FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__ || __i386__ || __aarch64__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <ixge/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/plugin/plugin.h>
38 #include <vpp/app/version.h>
39 
40 #define IXGE_ALWAYS_POLL 0
41 
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
44 
45 #define PCI_VENDOR_ID_INTEL 0x8086
46 
47 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
54 
58 
59 static void
61 {
62  ixge_main_t *xm = &ixge_main;
63  vlib_main_t *vm = xm->vlib_main;
64  ixge_regs_t *r = xd->regs;
65  u32 i;
66 
67  i = 0;
68  while (!(r->software_semaphore & (1 << 0)))
69  {
70  if (i > 0)
71  vlib_process_suspend (vm, 100e-6);
72  i++;
73  }
74  do
75  {
76  r->software_semaphore |= 1 << 1;
77  }
78  while (!(r->software_semaphore & (1 << 1)));
79 }
80 
81 static void
83 {
84  ixge_regs_t *r = xd->regs;
85  r->software_semaphore &= ~3;
86 }
87 
88 static void
90 {
91  ixge_main_t *xm = &ixge_main;
92  vlib_main_t *vm = xm->vlib_main;
93  ixge_regs_t *r = xd->regs;
94  u32 fw_mask = sw_mask << 5;
95  u32 m, done = 0;
96 
97  while (!done)
98  {
99  ixge_semaphore_get (xd);
100  m = r->software_firmware_sync;
101  done = (m & fw_mask) == 0;
102  if (done)
103  r->software_firmware_sync = m | sw_mask;
105  if (!done)
106  vlib_process_suspend (vm, 10e-3);
107  }
108 }
109 
110 static void
112 {
113  ixge_regs_t *r = xd->regs;
114  ixge_semaphore_get (xd);
115  r->software_firmware_sync &= ~sw_mask;
117 }
118 
119 u32
120 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121  u32 v, u32 is_read)
122 {
123  ixge_regs_t *r = xd->regs;
124  const u32 busy_bit = 1 << 30;
125  u32 x;
126 
127  ASSERT (xd->phy_index < 2);
128  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129 
130  ASSERT (reg_index < (1 << 16));
131  ASSERT (dev_type < (1 << 5));
132  if (!is_read)
133  r->xge_mac.phy_data = v;
134 
135  /* Address cycle. */
136  x =
137  reg_index | (dev_type << 16) | (xd->
138  phys[xd->phy_index].mdio_address << 21);
139  r->xge_mac.phy_command = x | busy_bit;
140  /* Busy wait timed to take 28e-6 secs. No suspend. */
141  while (r->xge_mac.phy_command & busy_bit)
142  ;
143 
144  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145  while (r->xge_mac.phy_command & busy_bit)
146  ;
147 
148  if (is_read)
149  v = r->xge_mac.phy_data >> 16;
150 
151  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152 
153  return v;
154 }
155 
156 static u32
157 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
158 {
159  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160  1);
161 }
162 
163 static void
164 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165 {
166  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167  0);
168 }
169 
170 static void
171 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172 {
173  ixge_main_t *xm = &ixge_main;
174  ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
175  u32 v;
176 
177  v = 0;
178  v |= (sda != 0) << 3;
179  v |= (scl != 0) << 1;
180  xd->regs->i2c_control = v;
181 }
182 
183 static void
184 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
185 {
186  ixge_main_t *xm = &ixge_main;
187  ixge_device_t *xd = vec_elt_at_index (xm->devices, b->private_data);
188  u32 v;
189 
190  v = xd->regs->i2c_control;
191  *sda = (v & (1 << 2)) != 0;
192  *scl = (v & (1 << 0)) != 0;
193 }
194 
195 static u16
197 {
198  ixge_regs_t *r = xd->regs;
199  u32 v;
200  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
201  /* Wait for done bit. */
202  while (!((v = r->eeprom_read) & (1 << 1)))
203  ;
204  return v >> 16;
205 }
206 
207 static void
209 {
210  u32 tx_disable_bit = 1 << 3;
211  if (enable)
212  xd->regs->sdp_control &= ~tx_disable_bit;
213  else
214  xd->regs->sdp_control |= tx_disable_bit;
215 }
216 
217 static void
219 {
220  u32 is_10g_bit = 1 << 5;
221  if (enable)
222  xd->regs->sdp_control |= is_10g_bit;
223  else
224  xd->regs->sdp_control &= ~is_10g_bit;
225 }
226 
227 static clib_error_t *
229 {
230  u16 a, id, reg_values_addr = 0;
231 
232  a = ixge_read_eeprom (xd, 0x2b);
233  if (a == 0 || a == 0xffff)
234  return clib_error_create ("no init sequence in eeprom");
235 
236  while (1)
237  {
238  id = ixge_read_eeprom (xd, ++a);
239  if (id == 0xffff)
240  break;
241  reg_values_addr = ixge_read_eeprom (xd, ++a);
242  if (id == sfp_type)
243  break;
244  }
245  if (id != sfp_type)
246  return clib_error_create ("failed to find id 0x%x", sfp_type);
247 
248  ixge_software_firmware_sync (xd, 1 << 3);
249  while (1)
250  {
251  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252  if (v == 0xffff)
253  break;
254  xd->regs->core_analog_config = v;
255  }
257 
258  /* Make sure laser is off. We'll turn on the laser when
259  the interface is brought up. */
260  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262 
263  return 0;
264 }
265 
266 static void
268 {
269  u32 v;
270 
271  if (is_up)
272  {
273  /* pma/pmd 10g serial SFI. */
274  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276 
278  v &= ~(7 << 13);
279  v |= (0 << 13);
280  /* Restart autoneg. */
281  v |= (1 << 12);
283 
284  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
285  ;
286 
288 
289  /* link mode 10g sfi serdes */
290  v &= ~(7 << 13);
291  v |= (3 << 13);
292 
293  /* Restart autoneg. */
294  v |= (1 << 12);
296 
297  xd->regs->xge_mac.link_status;
298  }
299 
300  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301 
302  /* Give time for link partner to notice that we're up. */
303  if (is_up && vlib_in_process_context (vlib_get_main ()))
304  {
305  vlib_process_suspend (vlib_get_main (), 300e-3);
306  }
307 }
308 
311 {
312  ixge_regs_t *r = xd->regs;
313  ASSERT (qi < 128);
314  if (rt == VLIB_RX)
315  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316  else
317  return &r->tx_dma[qi];
318 }
319 
320 static clib_error_t *
322 {
323  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
325  ixge_main_t *xm = &ixge_main;
327  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
328 
329  if (is_up)
330  {
331  xd->regs->rx_enable |= 1;
332  xd->regs->tx_dma_control |= 1;
333  dr->control |= 1 << 25;
334  while (!(dr->control & (1 << 25)))
335  ;
336  }
337  else
338  {
339  xd->regs->rx_enable &= ~1;
340  xd->regs->tx_dma_control &= ~1;
341  }
342 
343  ixge_sfp_device_up_down (xd, is_up);
344 
345  return /* no error */ 0;
346 }
347 
348 static void
350 {
351  ixge_phy_t *phy = xd->phys + xd->phy_index;
352  i2c_bus_t *ib = &xd->i2c_bus;
353 
354  ib->private_data = xd->device_index;
357  vlib_i2c_init (ib);
358 
359  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
360 
362  xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
363  else
364  {
365  /* FIXME 5 => SR/LR eeprom ID. */
366  clib_error_t *e =
368  if (e)
369  clib_error_report (e);
370  }
371 
372  phy->mdio_address = ~0;
373 }
374 
375 static void
377 {
378  ixge_main_t *xm = &ixge_main;
379  vlib_main_t *vm = xm->vlib_main;
380  ixge_phy_t *phy = xd->phys + xd->phy_index;
381 
382  switch (xd->device_id)
383  {
384  case IXGE_82599_sfp:
385  case IXGE_82599_sfp_em:
386  case IXGE_82599_sfp_fcoe:
387  /* others? */
388  return ixge_sfp_phy_init (xd);
389 
390  default:
391  break;
392  }
393 
394  /* Probe address of phy. */
395  {
396  u32 i, v;
397 
398  phy->mdio_address = ~0;
399  for (i = 0; i < 32; i++)
400  {
401  phy->mdio_address = i;
403  if (v != 0xffff && v != 0)
404  break;
405  }
406 
407  /* No PHY found? */
408  if (i >= 32)
409  return;
410  }
411 
412  phy->id =
415 
416  {
417  ELOG_TYPE_DECLARE (e) =
418  {
419  .function = (char *) __FUNCTION__,.format =
420  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421  struct
422  {
424  } *ed;
425  ed = ELOG_DATA (&vm->elog_main, e);
426  ed->instance = xd->device_index;
427  ed->id = phy->id;
428  ed->address = phy->mdio_address;
429  }
430 
431  /* Reset phy. */
434 
435  /* Wait for self-clearning reset bit to clear. */
436  do
437  {
438  vlib_process_suspend (vm, 1e-3);
439  }
442 }
443 
444 static u8 *
446 {
448  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
449  u32 s0 = d->status[0], s2 = d->status[2];
450  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
451  u32 indent = format_get_indent (s);
452 
453  s = format (s, "%s-owned",
455  "hw");
456  s =
457  format (s, ", length this descriptor %d, l3 offset %d",
461  s = format (s, ", end-of-packet");
462 
463  s = format (s, "\n%U", format_white_space, indent);
464 
466  s = format (s, "layer2 error");
467 
469  {
470  s = format (s, "layer 2 type %d", (s0 & 0x1f));
471  return s;
472  }
473 
475  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476  format_white_space, indent);
477 
478  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479  {
480  s = format (s, "ip4%s",
481  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482  "");
484  s = format (s, " checksum %s",
486  "bad" : "ok");
487  }
489  s = format (s, "ip6%s",
490  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491  "");
492  is_tcp = is_udp = 0;
493  if ((is_ip = (is_ip4 | is_ip6)))
494  {
495  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497  if (is_tcp)
498  s = format (s, ", tcp");
499  if (is_udp)
500  s = format (s, ", udp");
501  }
502 
504  s = format (s, ", tcp checksum %s",
506  "ok");
508  s = format (s, ", udp checksum %s",
510  "ok");
511 
512  return s;
513 }
514 
515 static u8 *
516 format_ixge_tx_descriptor (u8 * s, va_list * va)
517 {
518  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
519  u32 s0 = d->status0, s1 = d->status1;
520  u32 indent = format_get_indent (s);
521  u32 v;
522 
523  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
524  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
525 
526  s = format (s, "\n%U", format_white_space, indent);
527 
528  if ((v = (s0 >> 0) & 3))
529  s = format (s, "reserved 0x%x, ", v);
530 
531  if ((v = (s0 >> 2) & 3))
532  s = format (s, "mac 0x%x, ", v);
533 
534  if ((v = (s0 >> 4) & 0xf) != 3)
535  s = format (s, "type 0x%x, ", v);
536 
537  s = format (s, "%s%s%s%s%s%s%s%s",
538  (s0 & (1 << 8)) ? "eop, " : "",
539  (s0 & (1 << 9)) ? "insert-fcs, " : "",
540  (s0 & (1 << 10)) ? "reserved26, " : "",
541  (s0 & (1 << 11)) ? "report-status, " : "",
542  (s0 & (1 << 12)) ? "reserved28, " : "",
543  (s0 & (1 << 13)) ? "is-advanced, " : "",
544  (s0 & (1 << 14)) ? "vlan-enable, " : "",
545  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546 
547  if ((v = s1 & 0xf) != 0)
548  s = format (s, "status 0x%x, ", v);
549 
550  if ((v = (s1 >> 4) & 0xf))
551  s = format (s, "context 0x%x, ", v);
552 
553  if ((v = (s1 >> 8) & 0x3f))
554  s = format (s, "options 0x%x, ", v);
555 
556  return s;
557 }
558 
559 typedef struct
560 {
562 
564 
566 
568 
570 
571  /* Copy of VLIB buffer; packet data stored in pre_data. */
574 
575 static u8 *
576 format_ixge_rx_dma_trace (u8 * s, va_list * va)
577 {
578  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
579  vlib_node_t *node = va_arg (*va, vlib_node_t *);
580  vnet_main_t *vnm = vnet_get_main ();
581  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582  ixge_main_t *xm = &ixge_main;
585  u32 indent = format_get_indent (s);
586 
587  {
588  vnet_sw_interface_t *sw =
590  s =
591  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592  t->queue_index);
593  }
594 
595  s = format (s, "\n%Ubefore: %U",
596  format_white_space, indent,
598  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599  format_white_space, indent,
601 
602  s = format (s, "\n%Ubuffer 0x%x: %U", format_white_space, indent,
604 
605  s = format (s, "\n%U", format_white_space, indent);
606 
607  f = node->format_buffer;
608  if (!f || !t->is_start_of_packet)
610  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
611 
612  return s;
613 }
614 
615 #define foreach_ixge_error \
616  _ (none, "no error") \
617  _ (tx_full_drops, "tx ring full drops") \
618  _ (ip4_checksum_error, "ip4 checksum errors") \
619  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
620  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
621 
622 typedef enum
623 {
624 #define _(f,s) IXGE_ERROR_##f,
626 #undef _
628 } ixge_error_t;
629 
630 always_inline void
632  u32 s00, u32 s02,
633  u8 * next0, u8 * error0, u32 * flags0)
634 {
635  u8 is0_ip4, is0_ip6, n0, e0;
636  u32 f0;
637 
638  e0 = IXGE_ERROR_none;
640 
642  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
643 
644  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
645  ? IXGE_ERROR_ip4_checksum_error : e0);
646 
647  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
648  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
649 
650  n0 = (xd->per_interface_next_index != ~0) ?
651  xd->per_interface_next_index : n0;
652 
653  /* Check for error. */
654  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
655 
658  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
659 
662  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
663 
664  *error0 = e0;
665  *next0 = n0;
666  *flags0 = f0;
667 }
668 
669 always_inline void
671  u32 s00, u32 s02,
672  u32 s10, u32 s12,
673  u8 * next0, u8 * error0, u32 * flags0,
674  u8 * next1, u8 * error1, u32 * flags1)
675 {
676  u8 is0_ip4, is0_ip6, n0, e0;
677  u8 is1_ip4, is1_ip6, n1, e1;
678  u32 f0, f1;
679 
680  e0 = e1 = IXGE_ERROR_none;
681  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
682 
685 
686  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
687  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
688 
689  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
690  ? IXGE_ERROR_ip4_checksum_error : e0);
691  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
692  ? IXGE_ERROR_ip4_checksum_error : e1);
693 
694  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
695  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696 
697  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
698  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
699 
700  n0 = (xd->per_interface_next_index != ~0) ?
701  xd->per_interface_next_index : n0;
702  n1 = (xd->per_interface_next_index != ~0) ?
703  xd->per_interface_next_index : n1;
704 
705  /* Check for error. */
706  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
707  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
708 
709  *error0 = e0;
710  *error1 = e1;
711 
712  *next0 = n0;
713  *next1 = n1;
714 
717  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
720  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
721 
724  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
727  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
728 
729  *flags0 = f0;
730  *flags1 = f1;
731 }
732 
733 static void
735  ixge_device_t * xd,
736  ixge_dma_queue_t * dq,
737  ixge_descriptor_t * before_descriptors,
738  u32 * before_buffers,
739  ixge_descriptor_t * after_descriptors, uword n_descriptors)
740 {
741  vlib_main_t *vm = xm->vlib_main;
742  vlib_node_runtime_t *node = dq->rx.node;
745  u32 *b, n_left, is_sop, next_index_sop;
746 
747  n_left = n_descriptors;
748  b = before_buffers;
749  bd = &before_descriptors->rx_from_hw;
750  ad = &after_descriptors->rx_to_hw;
751  is_sop = dq->rx.is_start_of_packet;
752  next_index_sop = dq->rx.saved_start_of_packet_next_index;
753 
754  while (n_left >= 2)
755  {
756  u32 bi0, bi1, flags0, flags1;
757  vlib_buffer_t *b0, *b1;
758  ixge_rx_dma_trace_t *t0, *t1;
759  u8 next0, error0, next1, error1;
760 
761  bi0 = b[0];
762  bi1 = b[1];
763  n_left -= 2;
764 
765  b0 = vlib_get_buffer (vm, bi0);
766  b1 = vlib_get_buffer (vm, bi1);
767 
769  bd[0].status[0], bd[0].status[2],
770  bd[1].status[0], bd[1].status[2],
771  &next0, &error0, &flags0,
772  &next1, &error1, &flags1);
773 
774  next_index_sop = is_sop ? next0 : next_index_sop;
775  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
776  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
777  t0->is_start_of_packet = is_sop;
778  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
779 
780  next_index_sop = is_sop ? next1 : next_index_sop;
781  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
782  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
783  t1->is_start_of_packet = is_sop;
784  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
785 
786  t0->queue_index = dq->queue_index;
787  t1->queue_index = dq->queue_index;
788  t0->device_index = xd->device_index;
789  t1->device_index = xd->device_index;
790  t0->before.rx_from_hw = bd[0];
791  t1->before.rx_from_hw = bd[1];
792  t0->after.rx_to_hw = ad[0];
793  t1->after.rx_to_hw = ad[1];
794  t0->buffer_index = bi0;
795  t1->buffer_index = bi1;
796  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
797  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
798  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
799  sizeof (t0->buffer.pre_data));
800  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
801  sizeof (t1->buffer.pre_data));
802 
803  b += 2;
804  bd += 2;
805  ad += 2;
806  }
807 
808  while (n_left >= 1)
809  {
810  u32 bi0, flags0;
811  vlib_buffer_t *b0;
813  u8 next0, error0;
814 
815  bi0 = b[0];
816  n_left -= 1;
817 
818  b0 = vlib_get_buffer (vm, bi0);
819 
821  bd[0].status[0], bd[0].status[2],
822  &next0, &error0, &flags0);
823 
824  next_index_sop = is_sop ? next0 : next_index_sop;
825  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
826  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
827  t0->is_start_of_packet = is_sop;
828  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
829 
830  t0->queue_index = dq->queue_index;
831  t0->device_index = xd->device_index;
832  t0->before.rx_from_hw = bd[0];
833  t0->after.rx_to_hw = ad[0];
834  t0->buffer_index = bi0;
835  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
836  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
837  sizeof (t0->buffer.pre_data));
838 
839  b += 1;
840  bd += 1;
841  ad += 1;
842  }
843 }
844 
845 typedef struct
846 {
848 
850 
852 
854 
856 
857  /* Copy of VLIB buffer; packet data stored in pre_data. */
860 
861 static u8 *
862 format_ixge_tx_dma_trace (u8 * s, va_list * va)
863 {
864  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
865  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
866  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
867  vnet_main_t *vnm = vnet_get_main ();
868  ixge_main_t *xm = &ixge_main;
871  u32 indent = format_get_indent (s);
872 
873  {
874  vnet_sw_interface_t *sw =
876  s =
877  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
878  t->queue_index);
879  }
880 
881  s = format (s, "\n%Udescriptor: %U",
882  format_white_space, indent,
884 
885  s = format (s, "\n%Ubuffer 0x%x: %U", format_white_space, indent,
887 
888  s = format (s, "\n%U", format_white_space, indent);
889 
891  if (!f || !t->is_start_of_packet)
893  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
894 
895  return s;
896 }
897 
898 typedef struct
899 {
901 
903 
905 
908 
909 static void
911  ixge_device_t * xd,
912  ixge_dma_queue_t * dq,
913  ixge_tx_state_t * tx_state,
914  ixge_tx_descriptor_t * descriptors,
915  u32 * buffers, uword n_descriptors)
916 {
917  vlib_main_t *vm = xm->vlib_main;
918  vlib_node_runtime_t *node = tx_state->node;
920  u32 *b, n_left, is_sop;
921 
922  n_left = n_descriptors;
923  b = buffers;
924  d = descriptors;
925  is_sop = tx_state->is_start_of_packet;
926 
927  while (n_left >= 2)
928  {
929  u32 bi0, bi1;
930  vlib_buffer_t *b0, *b1;
931  ixge_tx_dma_trace_t *t0, *t1;
932 
933  bi0 = b[0];
934  bi1 = b[1];
935  n_left -= 2;
936 
937  b0 = vlib_get_buffer (vm, bi0);
938  b1 = vlib_get_buffer (vm, bi1);
939 
940  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
941  t0->is_start_of_packet = is_sop;
942  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
943 
944  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
945  t1->is_start_of_packet = is_sop;
946  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
947 
948  t0->queue_index = dq->queue_index;
949  t1->queue_index = dq->queue_index;
950  t0->device_index = xd->device_index;
951  t1->device_index = xd->device_index;
952  t0->descriptor = d[0];
953  t1->descriptor = d[1];
954  t0->buffer_index = bi0;
955  t1->buffer_index = bi1;
956  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
957  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
958  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
959  sizeof (t0->buffer.pre_data));
960  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
961  sizeof (t1->buffer.pre_data));
962 
963  b += 2;
964  d += 2;
965  }
966 
967  while (n_left >= 1)
968  {
969  u32 bi0;
970  vlib_buffer_t *b0;
972 
973  bi0 = b[0];
974  n_left -= 1;
975 
976  b0 = vlib_get_buffer (vm, bi0);
977 
978  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
979  t0->is_start_of_packet = is_sop;
980  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
981 
982  t0->queue_index = dq->queue_index;
983  t0->device_index = xd->device_index;
984  t0->descriptor = d[0];
985  t0->buffer_index = bi0;
986  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
987  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
988  sizeof (t0->buffer.pre_data));
989 
990  b += 1;
991  d += 1;
992  }
993 }
994 
997 {
998  i32 d = i1 - i0;
999  ASSERT (i0 < q->n_descriptors);
1000  ASSERT (i1 < q->n_descriptors);
1001  return d < 0 ? q->n_descriptors + d : d;
1002 }
1003 
1006 {
1007  u32 d = i0 + i1;
1008  ASSERT (i0 < q->n_descriptors);
1009  ASSERT (i1 < q->n_descriptors);
1010  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1011  return d;
1012 }
1013 
1017 {
1018  u32 cmp;
1019 
1020  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1022  if (cmp)
1023  return 0;
1024  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1026  if (cmp)
1027  return 0;
1028 
1029  return 1;
1030 }
1031 
1032 static uword
1034  ixge_device_t * xd,
1035  ixge_dma_queue_t * dq,
1036  u32 * buffers,
1037  u32 start_descriptor_index,
1038  u32 n_descriptors, ixge_tx_state_t * tx_state)
1039 {
1040  vlib_main_t *vm = xm->vlib_main;
1041  ixge_tx_descriptor_t *d, *d_sop;
1042  u32 n_left = n_descriptors;
1043  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1044  u32 *to_tx =
1045  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1046  u32 is_sop = tx_state->is_start_of_packet;
1047  u32 len_sop = tx_state->n_bytes_in_packet;
1048  u16 template_status = xm->tx_descriptor_template.status0;
1049  u32 descriptor_prefetch_rotor = 0;
1050 
1051  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1052  d = &dq->descriptors[start_descriptor_index].tx;
1053  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1054 
1055  while (n_left >= 4)
1056  {
1057  vlib_buffer_t *b0, *b1;
1058  u32 bi0, fi0, len0;
1059  u32 bi1, fi1, len1;
1060  u8 is_eop0, is_eop1;
1061 
1062  /* Prefetch next iteration. */
1063  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1064  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1065 
1066  if ((descriptor_prefetch_rotor & 0x3) == 0)
1067  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1068 
1069  descriptor_prefetch_rotor += 2;
1070 
1071  bi0 = buffers[0];
1072  bi1 = buffers[1];
1073 
1074  to_free[0] = fi0 = to_tx[0];
1075  to_tx[0] = bi0;
1076  to_free += fi0 != 0;
1077 
1078  to_free[0] = fi1 = to_tx[1];
1079  to_tx[1] = bi1;
1080  to_free += fi1 != 0;
1081 
1082  buffers += 2;
1083  n_left -= 2;
1084  to_tx += 2;
1085 
1086  b0 = vlib_get_buffer (vm, bi0);
1087  b1 = vlib_get_buffer (vm, bi1);
1088 
1089  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1090  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1091 
1092  len0 = b0->current_length;
1093  len1 = b1->current_length;
1094 
1097 
1098  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1099  d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
1100 
1101  d[0].n_bytes_this_buffer = len0;
1102  d[1].n_bytes_this_buffer = len1;
1103 
1104  d[0].status0 =
1105  template_status | (is_eop0 <<
1107  d[1].status0 =
1108  template_status | (is_eop1 <<
1110 
1111  len_sop = (is_sop ? 0 : len_sop) + len0;
1112  d_sop[0].status1 =
1114  d += 1;
1115  d_sop = is_eop0 ? d : d_sop;
1116 
1117  is_sop = is_eop0;
1118 
1119  len_sop = (is_sop ? 0 : len_sop) + len1;
1120  d_sop[0].status1 =
1122  d += 1;
1123  d_sop = is_eop1 ? d : d_sop;
1124 
1125  is_sop = is_eop1;
1126  }
1127 
1128  while (n_left > 0)
1129  {
1130  vlib_buffer_t *b0;
1131  u32 bi0, fi0, len0;
1132  u8 is_eop0;
1133 
1134  bi0 = buffers[0];
1135 
1136  to_free[0] = fi0 = to_tx[0];
1137  to_tx[0] = bi0;
1138  to_free += fi0 != 0;
1139 
1140  buffers += 1;
1141  n_left -= 1;
1142  to_tx += 1;
1143 
1144  b0 = vlib_get_buffer (vm, bi0);
1145 
1146  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1147 
1148  len0 = b0->current_length;
1149 
1151 
1152  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1153  d[0].n_bytes_this_buffer = len0;
1154 
1155  d[0].status0 =
1156  template_status | (is_eop0 <<
1158 
1159  len_sop = (is_sop ? 0 : len_sop) + len0;
1160  d_sop[0].status1 =
1162  d += 1;
1163  d_sop = is_eop0 ? d : d_sop;
1164 
1165  is_sop = is_eop0;
1166  }
1167 
1168  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1169  {
1170  to_tx =
1172  start_descriptor_index);
1173  ixge_tx_trace (xm, xd, dq, tx_state,
1174  &dq->descriptors[start_descriptor_index].tx, to_tx,
1175  n_descriptors);
1176  }
1177 
1178  _vec_len (xm->tx_buffers_pending_free) =
1179  to_free - xm->tx_buffers_pending_free;
1180 
1181  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1182  {
1183  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1184 
1185  ASSERT (d_sop - d_start <= dq->n_descriptors);
1186  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1187  }
1188 
1189  tx_state->is_start_of_packet = is_sop;
1190  tx_state->start_of_packet_descriptor = d_sop;
1191  tx_state->n_bytes_in_packet = len_sop;
1192 
1193  return n_descriptors;
1194 }
1195 
1196 static uword
1199 {
1200  ixge_main_t *xm = &ixge_main;
1201  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1203  ixge_dma_queue_t *dq;
1204  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1205  u32 queue_index = 0; /* fixme parameter */
1206  ixge_tx_state_t tx_state;
1207 
1208  tx_state.node = node;
1209  tx_state.is_start_of_packet = 1;
1210  tx_state.start_of_packet_descriptor = 0;
1211  tx_state.n_bytes_in_packet = 0;
1212 
1214 
1215  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1216 
1217  dq->head_index = dq->tx.head_index_write_back[0];
1218 
1219  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1220  n_left_tx = dq->n_descriptors - 1;
1221  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1222 
1223  _vec_len (xm->tx_buffers_pending_free) = 0;
1224 
1225  n_descriptors_to_tx = f->n_vectors;
1226  n_tail_drop = 0;
1227  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1228  {
1229  i32 i, n_ok, i_eop, i_sop;
1230 
1231  i_sop = i_eop = ~0;
1232  for (i = n_left_tx - 1; i >= 0; i--)
1233  {
1235  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1236  {
1237  if (i_sop != ~0 && i_eop != ~0)
1238  break;
1239  i_eop = i;
1240  i_sop = i + 1;
1241  }
1242  }
1243  if (i == 0)
1244  n_ok = 0;
1245  else
1246  n_ok = i_eop + 1;
1247 
1248  {
1249  ELOG_TYPE_DECLARE (e) =
1250  {
1251  .function = (char *) __FUNCTION__,.format =
1252  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1253  "i2i2i2i2",};
1254  struct
1255  {
1256  u16 instance, to_tx, head, tail;
1257  } *ed;
1258  ed = ELOG_DATA (&vm->elog_main, e);
1259  ed->instance = xd->device_index;
1260  ed->to_tx = n_descriptors_to_tx;
1261  ed->head = dq->head_index;
1262  ed->tail = dq->tail_index;
1263  }
1264 
1265  if (n_ok < n_descriptors_to_tx)
1266  {
1267  n_tail_drop = n_descriptors_to_tx - n_ok;
1268  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1270  IXGE_ERROR_tx_full_drops, n_tail_drop);
1271  }
1272 
1273  n_descriptors_to_tx = n_ok;
1274  }
1275 
1276  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1277 
1278  /* Process from tail to end of descriptor ring. */
1279  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1280  {
1281  u32 n =
1282  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1283  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1284  from += n;
1285  n_descriptors_to_tx -= n;
1286  dq->tail_index += n;
1287  ASSERT (dq->tail_index <= dq->n_descriptors);
1288  if (dq->tail_index == dq->n_descriptors)
1289  dq->tail_index = 0;
1290  }
1291 
1292  if (n_descriptors_to_tx > 0)
1293  {
1294  u32 n =
1295  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1296  from += n;
1297  ASSERT (n == n_descriptors_to_tx);
1298  dq->tail_index += n;
1299  ASSERT (dq->tail_index <= dq->n_descriptors);
1300  if (dq->tail_index == dq->n_descriptors)
1301  dq->tail_index = 0;
1302  }
1303 
1304  /* We should only get full packets. */
1305  ASSERT (tx_state.is_start_of_packet);
1306 
1307  /* Report status when last descriptor is done. */
1308  {
1309  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1310  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1312  }
1313 
1314  /* Give new descriptors to hardware. */
1315  {
1316  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1317 
1319 
1320  dr->tail_index = dq->tail_index;
1321  }
1322 
1323  /* Free any buffers that are done. */
1324  {
1325  u32 n = _vec_len (xm->tx_buffers_pending_free);
1326  if (n > 0)
1327  {
1329  _vec_len (xm->tx_buffers_pending_free) = 0;
1330  ASSERT (dq->tx.n_buffers_on_ring >= n);
1331  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1332  }
1333  }
1334 
1335  return f->n_vectors;
1336 }
1337 
1338 static uword
1340  ixge_device_t * xd,
1341  ixge_dma_queue_t * dq,
1342  u32 start_descriptor_index, u32 n_descriptors)
1343 {
1344  vlib_main_t *vm = xm->vlib_main;
1345  vlib_node_runtime_t *node = dq->rx.node;
1346  ixge_descriptor_t *d;
1347  static ixge_descriptor_t *d_trace_save;
1348  static u32 *d_trace_buffers;
1349  u32 n_descriptors_left = n_descriptors;
1350  u32 *to_rx =
1351  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1352  u32 *to_add;
1353  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1354  u32 bi_last = dq->rx.saved_last_buffer_index;
1355  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1356  u32 is_sop = dq->rx.is_start_of_packet;
1357  u32 next_index, n_left_to_next, *to_next;
1358  u32 n_packets = 0;
1359  u32 n_bytes = 0;
1360  u32 n_trace = vlib_get_trace_count (vm, node);
1361  vlib_buffer_t *b_last, b_placeholder;
1362 
1363  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1364  d = &dq->descriptors[start_descriptor_index];
1365 
1366  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_placeholder;
1367  next_index = dq->rx.next_index;
1368 
1369  if (n_trace > 0)
1370  {
1371  u32 n = clib_min (n_trace, n_descriptors);
1372  if (d_trace_save)
1373  {
1374  _vec_len (d_trace_save) = 0;
1375  _vec_len (d_trace_buffers) = 0;
1376  }
1377  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1378  vec_add (d_trace_buffers, to_rx, n);
1379  }
1380 
1381  {
1382  uword l = vec_len (xm->rx_buffers_to_add);
1383 
1384  if (l < n_descriptors_left)
1385  {
1386  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1387  u32 n_allocated;
1388 
1389  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1390 
1391  _vec_len (xm->rx_buffers_to_add) = l;
1392  n_allocated =
1393  vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
1394  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1395 
1396  /* Handle transient allocation failure */
1397  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1398  {
1399  if (n_allocated == 0)
1401  IXGE_ERROR_rx_alloc_no_physmem, 1);
1402  else
1404  IXGE_ERROR_rx_alloc_fail, 1);
1405 
1406  n_descriptors_left = l + n_allocated;
1407  }
1408  n_descriptors = n_descriptors_left;
1409  }
1410 
1411  /* Add buffers from end of vector going backwards. */
1412  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1413  }
1414 
1415  while (n_descriptors_left > 0)
1416  {
1417  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1418 
1419  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1420  {
1421  vlib_buffer_t *b0, *b1;
1422  vlib_buffer_t *f0, *f1;
1423  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1424  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1425  u8 is_eop0, error0, next0;
1426  u8 is_eop1, error1, next1;
1427  ixge_descriptor_t d0, d1;
1428 
1429  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1430  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1431 
1432  CLIB_PREFETCH (d + 2, 32, STORE);
1433 
1434  d0.as_u32x4 = d[0].as_u32x4;
1435  d1.as_u32x4 = d[1].as_u32x4;
1436 
1437  s20 = d0.rx_from_hw.status[2];
1438  s21 = d1.rx_from_hw.status[2];
1439 
1440  s00 = d0.rx_from_hw.status[0];
1441  s01 = d1.rx_from_hw.status[0];
1442 
1443  if (!
1445  goto found_hw_owned_descriptor_x2;
1446 
1447  bi0 = to_rx[0];
1448  bi1 = to_rx[1];
1449 
1450  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1451  fi0 = to_add[0];
1452  fi1 = to_add[-1];
1453 
1454  to_rx[0] = fi0;
1455  to_rx[1] = fi1;
1456  to_rx += 2;
1457  to_add -= 2;
1458 
1459 #if 0
1464 #endif
1465 
1466  b0 = vlib_get_buffer (vm, bi0);
1467  b1 = vlib_get_buffer (vm, bi1);
1468 
1471 
1472  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1473  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1474 
1475  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1476  &next0, &error0, &flags0,
1477  &next1, &error1, &flags1);
1478 
1479  next0 = is_sop ? next0 : next_index_sop;
1480  next1 = is_eop0 ? next1 : next0;
1481  next_index_sop = next1;
1482 
1483  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1484  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1485 
1486  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1487  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1488  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1489  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1490 
1491  b0->error = node->errors[error0];
1492  b1->error = node->errors[error1];
1493 
1496  n_bytes += len0 + len1;
1497  n_packets += is_eop0 + is_eop1;
1498 
1499  /* Give new buffers to hardware. */
1500  f0 = vlib_get_buffer (vm, fi0);
1501  f1 = vlib_get_buffer (vm, fi1);
1506  d[0].as_u32x4 = d0.as_u32x4;
1507  d[1].as_u32x4 = d1.as_u32x4;
1508 
1509  d += 2;
1510  n_descriptors_left -= 2;
1511 
1512  /* Point to either l2 or l3 header depending on next. */
1513  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1515  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1517 
1518  b0->current_length = len0 - l3_offset0;
1519  b1->current_length = len1 - l3_offset1;
1520  b0->current_data = l3_offset0;
1521  b1->current_data = l3_offset1;
1522 
1523  b_last->next_buffer = is_sop ? ~0 : bi0;
1524  b0->next_buffer = is_eop0 ? ~0 : bi1;
1525  bi_last = bi1;
1526  b_last = b1;
1527 
1528  if (CLIB_DEBUG > 0)
1529  {
1530  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1531  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1532 
1533  if (is_eop0)
1534  {
1535  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1536  /* follow_buffer_next */ 1);
1537  ASSERT (!msg);
1538  }
1539  if (is_eop1)
1540  {
1541  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1542  /* follow_buffer_next */ 1);
1543  ASSERT (!msg);
1544  }
1545  }
1546  if (0) /* "Dave" version */
1547  {
1548  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1549  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1550 
1551  if (is_eop0)
1552  {
1553  to_next[0] = bi_sop0;
1554  to_next++;
1555  n_left_to_next--;
1556 
1558  to_next, n_left_to_next,
1559  bi_sop0, next0);
1560  }
1561  if (is_eop1)
1562  {
1563  to_next[0] = bi_sop1;
1564  to_next++;
1565  n_left_to_next--;
1566 
1568  to_next, n_left_to_next,
1569  bi_sop1, next1);
1570  }
1571  is_sop = is_eop1;
1572  bi_sop = bi_sop1;
1573  }
1574  if (1) /* "Eliot" version */
1575  {
1576  /* Speculatively enqueue to cached next. */
1577  u8 saved_is_sop = is_sop;
1578  u32 bi_sop_save = bi_sop;
1579 
1580  bi_sop = saved_is_sop ? bi0 : bi_sop;
1581  to_next[0] = bi_sop;
1582  to_next += is_eop0;
1583  n_left_to_next -= is_eop0;
1584 
1585  bi_sop = is_eop0 ? bi1 : bi_sop;
1586  to_next[0] = bi_sop;
1587  to_next += is_eop1;
1588  n_left_to_next -= is_eop1;
1589 
1590  is_sop = is_eop1;
1591 
1592  if (PREDICT_FALSE
1593  (!(next0 == next_index && next1 == next_index)))
1594  {
1595  /* Undo speculation. */
1596  to_next -= is_eop0 + is_eop1;
1597  n_left_to_next += is_eop0 + is_eop1;
1598 
1599  /* Re-do both descriptors being careful about where we enqueue. */
1600  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1601  if (is_eop0)
1602  {
1603  if (next0 != next_index)
1604  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1605  else
1606  {
1607  to_next[0] = bi_sop;
1608  to_next += 1;
1609  n_left_to_next -= 1;
1610  }
1611  }
1612 
1613  bi_sop = is_eop0 ? bi1 : bi_sop;
1614  if (is_eop1)
1615  {
1616  if (next1 != next_index)
1617  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1618  else
1619  {
1620  to_next[0] = bi_sop;
1621  to_next += 1;
1622  n_left_to_next -= 1;
1623  }
1624  }
1625 
1626  /* Switch cached next index when next for both packets is the same. */
1627  if (is_eop0 && is_eop1 && next0 == next1)
1628  {
1630  n_left_to_next);
1631  next_index = next0;
1633  to_next, n_left_to_next);
1634  }
1635  }
1636  }
1637  }
1638 
1639  /* Bail out of dual loop and proceed with single loop. */
1640  found_hw_owned_descriptor_x2:
1641 
1642  while (n_descriptors_left > 0 && n_left_to_next > 0)
1643  {
1644  vlib_buffer_t *b0;
1645  vlib_buffer_t *f0;
1646  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1647  u8 is_eop0, error0, next0;
1648  ixge_descriptor_t d0;
1649 
1650  d0.as_u32x4 = d[0].as_u32x4;
1651 
1652  s20 = d0.rx_from_hw.status[2];
1653  s00 = d0.rx_from_hw.status[0];
1654 
1656  goto found_hw_owned_descriptor_x1;
1657 
1658  bi0 = to_rx[0];
1659  ASSERT (to_add >= xm->rx_buffers_to_add);
1660  fi0 = to_add[0];
1661 
1662  to_rx[0] = fi0;
1663  to_rx += 1;
1664  to_add -= 1;
1665 
1666 #if 0
1669 #endif
1670 
1671  b0 = vlib_get_buffer (vm, bi0);
1672 
1673  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1675  (xd, s00, s20, &next0, &error0, &flags0);
1676 
1677  next0 = is_sop ? next0 : next_index_sop;
1678  next_index_sop = next0;
1679 
1680  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1681 
1682  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1683  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1684 
1685  b0->error = node->errors[error0];
1686 
1688  n_bytes += len0;
1689  n_packets += is_eop0;
1690 
1691  /* Give new buffer to hardware. */
1692  f0 = vlib_get_buffer (vm, fi0);
1695  d[0].as_u32x4 = d0.as_u32x4;
1696 
1697  d += 1;
1698  n_descriptors_left -= 1;
1699 
1700  /* Point to either l2 or l3 header depending on next. */
1701  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1703  b0->current_length = len0 - l3_offset0;
1704  b0->current_data = l3_offset0;
1705 
1706  b_last->next_buffer = is_sop ? ~0 : bi0;
1707  bi_last = bi0;
1708  b_last = b0;
1709 
1710  bi_sop = is_sop ? bi0 : bi_sop;
1711 
1712  if (CLIB_DEBUG > 0 && is_eop0)
1713  {
1714  u8 *msg =
1715  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1716  ASSERT (!msg);
1717  }
1718 
1719  if (0) /* "Dave" version */
1720  {
1721  if (is_eop0)
1722  {
1723  to_next[0] = bi_sop;
1724  to_next++;
1725  n_left_to_next--;
1726 
1728  to_next, n_left_to_next,
1729  bi_sop, next0);
1730  }
1731  }
1732  if (1) /* "Eliot" version */
1733  {
1734  if (PREDICT_TRUE (next0 == next_index))
1735  {
1736  to_next[0] = bi_sop;
1737  to_next += is_eop0;
1738  n_left_to_next -= is_eop0;
1739  }
1740  else
1741  {
1742  if (next0 != next_index && is_eop0)
1743  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1744 
1745  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1746  next_index = next0;
1748  to_next, n_left_to_next);
1749  }
1750  }
1751  is_sop = is_eop0;
1752  }
1753  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1754  }
1755 
1756 found_hw_owned_descriptor_x1:
1757  if (n_descriptors_left > 0)
1758  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1759 
1760  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1761 
1762  {
1763  u32 n_done = n_descriptors - n_descriptors_left;
1764 
1765  if (n_trace > 0 && n_done > 0)
1766  {
1767  u32 n = clib_min (n_trace, n_done);
1768  ixge_rx_trace (xm, xd, dq,
1769  d_trace_save,
1770  d_trace_buffers,
1771  &dq->descriptors[start_descriptor_index], n);
1772  vlib_set_trace_count (vm, node, n_trace - n);
1773  }
1774  if (d_trace_save)
1775  {
1776  _vec_len (d_trace_save) = 0;
1777  _vec_len (d_trace_buffers) = 0;
1778  }
1779 
1780  /* Don't keep a reference to b_last if we don't have to.
1781  Otherwise we can over-write a next_buffer pointer after already haven
1782  enqueued a packet. */
1783  if (is_sop)
1784  {
1785  b_last->next_buffer = ~0;
1786  bi_last = ~0;
1787  }
1788 
1789  dq->rx.n_descriptors_done_this_call = n_done;
1790  dq->rx.n_descriptors_done_total += n_done;
1791  dq->rx.is_start_of_packet = is_sop;
1792  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1793  dq->rx.saved_last_buffer_index = bi_last;
1794  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1795  dq->rx.next_index = next_index;
1796  dq->rx.n_bytes += n_bytes;
1797 
1798  return n_packets;
1799  }
1800 }
1801 
1802 static uword
1804  ixge_device_t * xd,
1805  vlib_node_runtime_t * node, u32 queue_index)
1806 {
1807  ixge_dma_queue_t *dq =
1808  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1810  uword n_packets = 0;
1811  u32 hw_head_index, sw_head_index;
1812 
1813  /* One time initialization. */
1814  if (!dq->rx.node)
1815  {
1816  dq->rx.node = node;
1817  dq->rx.is_start_of_packet = 1;
1818  dq->rx.saved_start_of_packet_buffer_index = ~0;
1819  dq->rx.saved_last_buffer_index = ~0;
1820  }
1821 
1822  dq->rx.next_index = node->cached_next_index;
1823 
1824  dq->rx.n_descriptors_done_total = 0;
1825  dq->rx.n_descriptors_done_this_call = 0;
1826  dq->rx.n_bytes = 0;
1827 
1828  /* Fetch head from hardware and compare to where we think we are. */
1829  hw_head_index = dr->head_index;
1830  sw_head_index = dq->head_index;
1831 
1832  if (hw_head_index == sw_head_index)
1833  goto done;
1834 
1835  if (hw_head_index < sw_head_index)
1836  {
1837  u32 n_tried = dq->n_descriptors - sw_head_index;
1838  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1839  sw_head_index =
1840  ixge_ring_add (dq, sw_head_index,
1841  dq->rx.n_descriptors_done_this_call);
1842 
1843  if (dq->rx.n_descriptors_done_this_call != n_tried)
1844  goto done;
1845  }
1846  if (hw_head_index >= sw_head_index)
1847  {
1848  u32 n_tried = hw_head_index - sw_head_index;
1849  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1850  sw_head_index =
1851  ixge_ring_add (dq, sw_head_index,
1852  dq->rx.n_descriptors_done_this_call);
1853  }
1854 
1855 done:
1856  dq->head_index = sw_head_index;
1857  dq->tail_index =
1858  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1859 
1860  /* Give tail back to hardware. */
1862 
1863  dr->tail_index = dq->tail_index;
1864 
1866  interface_main.combined_sw_if_counters +
1868  0 /* thread_index */ ,
1869  xd->vlib_sw_if_index, n_packets,
1870  dq->rx.n_bytes);
1871 
1872  return n_packets;
1873 }
1874 
1875 static void
1877 {
1878  vlib_main_t *vm = xm->vlib_main;
1879  ixge_regs_t *r = xd->regs;
1880 
1881  if (i != 20)
1882  {
1883  ELOG_TYPE_DECLARE (e) =
1884  {
1885  .function = (char *) __FUNCTION__,.format =
1886  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1887  16,.enum_strings =
1888  {
1889  "flow director",
1890  "rx miss",
1891  "pci exception",
1892  "mailbox",
1893  "link status change",
1894  "linksec key exchange",
1895  "manageability event",
1896  "reserved23",
1897  "sdp0",
1898  "sdp1",
1899  "sdp2",
1900  "sdp3",
1901  "ecc", "descriptor handler error", "tcp timer", "other",},};
1902  struct
1903  {
1904  u8 instance;
1905  u8 index;
1906  } *ed;
1907  ed = ELOG_DATA (&vm->elog_main, e);
1908  ed->instance = xd->device_index;
1909  ed->index = i - 16;
1910  }
1911  else
1912  {
1913  u32 v = r->xge_mac.link_status;
1914  uword is_up = (v & (1 << 30)) != 0;
1915 
1916  ELOG_TYPE_DECLARE (e) =
1917  {
1918  .function = (char *) __FUNCTION__,.format =
1919  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1920  struct
1921  {
1922  u32 instance, link_status;
1923  } *ed;
1924  ed = ELOG_DATA (&vm->elog_main, e);
1925  ed->instance = xd->device_index;
1926  ed->link_status = v;
1928 
1931  ((is_up << 31) | xd->vlib_hw_if_index));
1932  }
1933 }
1934 
1937 {
1938  u32 *t0 = t;
1939 
1940  while (n_left >= 4)
1941  {
1942  u32 bi0, bi1, bi2, bi3;
1943 
1944  t[0] = bi0 = b[0];
1945  b[0] = 0;
1946  t += bi0 != 0;
1947 
1948  t[0] = bi1 = b[1];
1949  b[1] = 0;
1950  t += bi1 != 0;
1951 
1952  t[0] = bi2 = b[2];
1953  b[2] = 0;
1954  t += bi2 != 0;
1955 
1956  t[0] = bi3 = b[3];
1957  b[3] = 0;
1958  t += bi3 != 0;
1959 
1960  b += 4;
1961  n_left -= 4;
1962  }
1963 
1964  while (n_left > 0)
1965  {
1966  u32 bi0;
1967 
1968  t[0] = bi0 = b[0];
1969  b[0] = 0;
1970  t += bi0 != 0;
1971  b += 1;
1972  n_left -= 1;
1973  }
1974 
1975  return t - t0;
1976 }
1977 
1978 static void
1979 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1980 {
1981  vlib_main_t *vm = xm->vlib_main;
1982  ixge_dma_queue_t *dq =
1983  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1984  u32 n_clean, *b, *t, *t0;
1985  i32 n_hw_owned_descriptors;
1986  i32 first_to_clean, last_to_clean;
1987  u64 hwbp_race = 0;
1988 
1989  /* Handle case where head write back pointer update
1990  * arrives after the interrupt during high PCI bus loads.
1991  */
1992  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
1993  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
1994  {
1995  hwbp_race++;
1996  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
1997  {
1998  ELOG_TYPE_DECLARE (e) =
1999  {
2000  .function = (char *) __FUNCTION__,.format =
2001  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2002  = "i4i4i4i4",};
2003  struct
2004  {
2005  u32 instance, head_index, tail_index, n_buffers_on_ring;
2006  } *ed;
2007  ed = ELOG_DATA (&vm->elog_main, e);
2008  ed->instance = xd->device_index;
2009  ed->head_index = dq->head_index;
2010  ed->tail_index = dq->tail_index;
2011  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2012  }
2013  }
2014 
2015  dq->head_index = dq->tx.head_index_write_back[0];
2016  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2017  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2018  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2019 
2020  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2021  {
2022  ELOG_TYPE_DECLARE (e) =
2023  {
2024  .function = (char *) __FUNCTION__,.format =
2025  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2026  = "i4i4i4i4i4",};
2027  struct
2028  {
2029  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2030  } *ed;
2031  ed = ELOG_DATA (&vm->elog_main, e);
2032  ed->instance = xd->device_index;
2033  ed->head_index = dq->head_index;
2034  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2035  ed->n_clean = n_clean;
2036  ed->retries = hwbp_race;
2037  }
2038 
2039  /*
2040  * This function used to wait until hardware owned zero descriptors.
2041  * At high PPS rates, that doesn't happen until the TX ring is
2042  * completely full of descriptors which need to be cleaned up.
2043  * That, in turn, causes TX ring-full drops and/or long RX service
2044  * interruptions.
2045  */
2046  if (n_clean == 0)
2047  return;
2048 
2049  /* Clean the n_clean descriptors prior to the reported hardware head */
2050  last_to_clean = dq->head_index - 1;
2051  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2052  last_to_clean;
2053 
2054  first_to_clean = (last_to_clean) - (n_clean - 1);
2055  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2056  first_to_clean;
2057 
2059  t0 = t = xm->tx_buffers_pending_free;
2060  b = dq->descriptor_buffer_indices + first_to_clean;
2061 
2062  /* Wrap case: clean from first to end, then start to last */
2063  if (first_to_clean > last_to_clean)
2064  {
2065  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2066  first_to_clean = 0;
2068  }
2069 
2070  /* Typical case: clean from first to last */
2071  if (first_to_clean <= last_to_clean)
2072  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2073 
2074  if (t > t0)
2075  {
2076  u32 n = t - t0;
2077  vlib_buffer_free_no_next (vm, t0, n);
2078  ASSERT (dq->tx.n_buffers_on_ring >= n);
2079  dq->tx.n_buffers_on_ring -= n;
2080  _vec_len (xm->tx_buffers_pending_free) = 0;
2081  }
2082 }
2083 
2084 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2087 {
2088  return i < 8;
2089 }
2090 
2093 {
2094  return i >= 8 && i < 16;
2095 }
2096 
2099 {
2100  return 8 + i;
2101 }
2102 
2105 {
2106  return 0 + i;
2107 }
2108 
2111 {
2113  return i - 0;
2114 }
2115 
2118 {
2120  return i - 8;
2121 }
2122 
2123 static uword
2126 {
2127  ixge_regs_t *r = xd->regs;
2128  u32 i, s;
2129  uword n_rx_packets = 0;
2130 
2131  s = r->interrupt.status_write_1_to_set;
2132  if (s)
2133  r->interrupt.status_write_1_to_clear = s;
2134 
2135  /* *INDENT-OFF* */
2136  foreach_set_bit (i, s, ({
2138  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2139 
2140  else if (ixge_interrupt_is_tx_queue (i))
2142 
2143  else
2144  ixge_interrupt (xm, xd, i);
2145  }));
2146  /* *INDENT-ON* */
2147 
2148  return n_rx_packets;
2149 }
2150 
2151 static uword
2153 {
2154  ixge_main_t *xm = &ixge_main;
2155  ixge_device_t *xd;
2156  uword n_rx_packets = 0;
2157 
2158  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2159  {
2160  uword i;
2161 
2162  /* Loop over devices with interrupts. */
2163  /* *INDENT-OFF* */
2164  foreach_set_bit (i, node->runtime_data[0], ({
2165  xd = vec_elt_at_index (xm->devices, i);
2166  n_rx_packets += ixge_device_input (xm, xd, node);
2167 
2168  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2169  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2170  xd->regs->interrupt.enable_write_1_to_set = ~0;
2171  }));
2172  /* *INDENT-ON* */
2173 
2174  /* Clear mask of devices with pending interrupts. */
2175  node->runtime_data[0] = 0;
2176  }
2177  else
2178  {
2179  /* Poll all devices for input/interrupts. */
2180  vec_foreach (xd, xm->devices)
2181  {
2182  n_rx_packets += ixge_device_input (xm, xd, node);
2183 
2184  /* Re-enable interrupts when switching out of polling mode. */
2185  if (node->flags &
2188  }
2189  }
2190 
2191  return n_rx_packets;
2192 }
2193 
2194 static char *ixge_error_strings[] = {
2195 #define _(n,s) s,
2197 #undef _
2198 };
2199 
2200 /* *INDENT-OFF* */
2201 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2202  .function = ixge_input,
2203  .type = VLIB_NODE_TYPE_INPUT,
2204  .name = "ixge-input",
2206 
2207  /* Will be enabled if/when hardware is detected. */
2208  .state = VLIB_NODE_STATE_DISABLED,
2209 
2210  .format_buffer = format_ethernet_header_with_length,
2211  .format_trace = format_ixge_rx_dma_trace,
2212 
2213  .n_errors = IXGE_N_ERROR,
2214  .error_strings = ixge_error_strings,
2215 
2216  .n_next_nodes = IXGE_RX_N_NEXT,
2217  .next_nodes = {
2218  [IXGE_RX_NEXT_DROP] = "error-drop",
2219  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2220  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2221  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2222  },
2223 };
2224 
2225 /* *INDENT-ON* */
2226 
2227 static u8 *
2228 format_ixge_device_name (u8 * s, va_list * args)
2229 {
2231  u32 i = va_arg (*args, u32);
2232  ixge_main_t *xm = &ixge_main;
2233  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2234  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2235  return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2236  addr->domain, addr->bus, addr->slot, addr->function);
2237 }
2238 
2239 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2240 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2241 
2243 #define _(a,f) 0,
2244 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2246 #undef _
2247 #undef _64
2248 };
2249 
2250 static void
2252 {
2253  /* Byte offset for counter registers. */
2254  static u32 reg_offsets[] = {
2255 #define _(a,f) (a) / sizeof (u32),
2256 #define _64(a,f) _(a,f)
2258 #undef _
2259 #undef _64
2260  };
2261  volatile u32 *r = (volatile u32 *) xd->regs;
2262  int i;
2263 
2264  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2265  {
2266  u32 o = reg_offsets[i];
2267  xd->counters[i] += r[o];
2269  r[o] = 0;
2271  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2272  }
2273 }
2274 
2275 static u8 *
2276 format_ixge_device_id (u8 * s, va_list * args)
2277 {
2278  u32 device_id = va_arg (*args, u32);
2279  char *t = 0;
2280  switch (device_id)
2281  {
2282 #define _(f,n) case n: t = #f; break;
2284 #undef _
2285  default:
2286  t = 0;
2287  break;
2288  }
2289  if (t == 0)
2290  s = format (s, "unknown 0x%x", device_id);
2291  else
2292  s = format (s, "%s", t);
2293  return s;
2294 }
2295 
2296 static u8 *
2297 format_ixge_link_status (u8 * s, va_list * args)
2298 {
2299  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2301 
2302  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2303 
2304  {
2305  char *modes[] = {
2306  "1g", "10g parallel", "10g serial", "autoneg",
2307  };
2308  char *speeds[] = {
2309  "unknown", "100m", "1g", "10g",
2310  };
2311  s = format (s, ", mode %s, speed %s",
2312  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2313  }
2314 
2315  return s;
2316 }
2317 
2318 static u8 *
2319 format_ixge_device (u8 * s, va_list * args)
2320 {
2321  u32 dev_instance = va_arg (*args, u32);
2322  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2324  ixge_main_t *xm = &ixge_main;
2325  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2326  ixge_phy_t *phy = xd->phys + xd->phy_index;
2327  u32 indent = format_get_indent (s);
2328 
2329  ixge_update_counters (xd);
2331 
2332  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2334  format_white_space, indent + 2, format_ixge_link_status, xd);
2335 
2336  {
2337 
2338  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2340 
2341  if (d)
2342  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2344  }
2345 
2346  s = format (s, "\n%U", format_white_space, indent + 2);
2347  if (phy->mdio_address != ~0)
2348  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2349  else if (xd->sfp_eeprom.id == SFP_ID_SFP)
2350  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2351  else
2352  s = format (s, "PHY not found");
2353 
2354  /* FIXME */
2355  {
2357  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2358  u32 hw_head_index = dr->head_index;
2359  u32 sw_head_index = dq->head_index;
2360  u32 nitems;
2361 
2362  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2363  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2364  format_white_space, indent + 2, nitems, dq->n_descriptors);
2365 
2366  s = format (s, "\n%U%d buffers in driver rx cache",
2367  format_white_space, indent + 2,
2368  vec_len (xm->rx_buffers_to_add));
2369 
2370  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2371  format_white_space, indent + 2,
2372  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2373  }
2374  {
2375  u32 i;
2376  u64 v;
2377  static char *names[] = {
2378 #define _(a,f) #f,
2379 #define _64(a,f) _(a,f)
2381 #undef _
2382 #undef _64
2383  };
2384 
2385  for (i = 0; i < ARRAY_LEN (names); i++)
2386  {
2387  v = xd->counters[i] - xd->counters_last_clear[i];
2388  if (v != 0)
2389  s = format (s, "\n%U%-40U%16Ld",
2390  format_white_space, indent + 2,
2391  format_c_identifier, names[i], v);
2392  }
2393  }
2394 
2395  return s;
2396 }
2397 
2398 static void
2400 {
2401  ixge_main_t *xm = &ixge_main;
2403  ixge_update_counters (xd);
2404  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2405 }
2406 
2407 /*
2408  * Dynamically redirect all pkts from a specific interface
2409  * to the specified node
2410  */
2411 static void
2413  u32 node_index)
2414 {
2415  ixge_main_t *xm = &ixge_main;
2416  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2418 
2419  /* Shut off redirection */
2420  if (node_index == ~0)
2421  {
2423  return;
2424  }
2425 
2428 }
2429 
2430 
2431 /* *INDENT-OFF* */
2433  .name = "ixge",
2434  .tx_function = ixge_interface_tx,
2435  .format_device_name = format_ixge_device_name,
2436  .format_device = format_ixge_device,
2437  .format_tx_trace = format_ixge_tx_dma_trace,
2438  .clear_counters = ixge_clear_hw_interface_counters,
2439  .admin_up_down_function = ixge_interface_admin_up_down,
2440  .rx_redirect_to_node = ixge_set_interface_next_node,
2441 };
2442 /* *INDENT-ON* */
2443 
2444 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2445 
2446 static clib_error_t *
2448 {
2449  ixge_main_t *xm = &ixge_main;
2450  vlib_main_t *vm = xm->vlib_main;
2451  ixge_dma_queue_t *dq;
2452  clib_error_t *error = 0;
2453 
2454  vec_validate (xd->dma_queues[rt], queue_index);
2455  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2456 
2459  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2460 
2461  if (!xm->n_bytes_in_rx_buffer)
2464 
2465  if (!xm->n_descriptors[rt])
2466  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2467 
2468  dq->queue_index = queue_index;
2469  dq->n_descriptors =
2471  dq->head_index = dq->tail_index = 0;
2472 
2474  sizeof (dq->descriptors[0]),
2475  128 /* per chip spec */ );
2476  if (!dq->descriptors)
2477  return vlib_physmem_last_error (vm);
2478 
2479  clib_memset (dq->descriptors, 0,
2480  dq->n_descriptors * sizeof (dq->descriptors[0]));
2482 
2483  if (rt == VLIB_RX)
2484  {
2485  u32 n_alloc, i;
2486 
2489  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2490  for (i = 0; i < n_alloc; i++)
2491  {
2495  }
2496  }
2497  else
2498  {
2499  u32 i;
2500 
2501  dq->tx.head_index_write_back =
2503  if (!dq->tx.head_index_write_back)
2504  return vlib_physmem_last_error (vm);
2505 
2506  for (i = 0; i < dq->n_descriptors; i++)
2508 
2510  }
2511 
2512  {
2513  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2514  u64 a;
2515 
2517  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2518  dr->descriptor_address[1] = a >> (u64) 32;
2519  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2520  dq->head_index = dq->tail_index = 0;
2521 
2522  if (rt == VLIB_RX)
2523  {
2524  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2525  dr->rx_split_control =
2526  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2527  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2528  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2529  (1 << 25)) | ( /* drop if no descriptors available */
2530  (1 << 28)));
2531 
2532  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2533  dq->tail_index = dq->n_descriptors -
2535  }
2536  else
2537  {
2538  /* Make sure its initialized before hardware can get to it. */
2539  dq->tx.head_index_write_back[0] = dq->head_index;
2540 
2541  a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
2542  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2543  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2544  }
2545 
2546  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2547  and [12] undocumented set. */
2548  if (rt == VLIB_RX)
2549  dr->dca_control &= ~((1 << 13) | (1 << 12));
2550 
2552 
2553  if (rt == VLIB_TX)
2554  {
2555  xd->regs->tx_dma_control |= (1 << 0);
2556  dr->control |= ((32 << 0) /* prefetch threshold */
2557  | (64 << 8) /* host threshold */
2558  | (0 << 16) /* writeback threshold */ );
2559  }
2560 
2561  /* Enable this queue and wait for hardware to initialize
2562  before adding to tail. */
2563  if (rt == VLIB_TX)
2564  {
2565  dr->control |= 1 << 25;
2566  while (!(dr->control & (1 << 25)))
2567  ;
2568  }
2569 
2570  /* Set head/tail indices and enable DMA. */
2571  dr->head_index = dq->head_index;
2572  dr->tail_index = dq->tail_index;
2573  }
2574 
2575  return error;
2576 }
2577 
2578 static u32
2580 {
2581  ixge_device_t *xd;
2582  ixge_regs_t *r;
2583  u32 old;
2584  ixge_main_t *xm = &ixge_main;
2585 
2586  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2587  r = xd->regs;
2588 
2589  old = r->filter_control;
2590 
2592  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2593  else if (flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
2594  r->filter_control = old & ~(1 << 9);
2595  else
2596  return ~0;
2597 
2598  return old;
2599 }
2600 
2601 static void
2603 {
2604  vnet_main_t *vnm = vnet_get_main ();
2605  ixge_device_t *xd;
2606 
2607  /* Reset chip(s). */
2608  vec_foreach (xd, xm->devices)
2609  {
2610  ixge_regs_t *r = xd->regs;
2611  const u32 reset_bit = (1 << 26) | (1 << 3);
2612 
2613  r->control |= reset_bit;
2614 
2615  /* No need to suspend. Timed to take ~1e-6 secs */
2616  while (r->control & reset_bit)
2617  ;
2618 
2619  /* Software loaded. */
2620  r->extended_control |= (1 << 28);
2621 
2622  ixge_phy_init (xd);
2623 
2624  /* Register ethernet interface. */
2625  {
2626  u8 addr8[6];
2627  u32 i, addr32[2];
2629 
2630  addr32[0] = r->rx_ethernet_address0[0][0];
2631  addr32[1] = r->rx_ethernet_address0[0][1];
2632  for (i = 0; i < 6; i++)
2633  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2634 
2636  (vnm, ixge_device_class.index, xd->device_index,
2637  /* ethernet address */ addr8,
2639  if (error)
2641  }
2642 
2643  {
2644  vnet_sw_interface_t *sw =
2646  xd->vlib_sw_if_index = sw->sw_if_index;
2647  }
2648 
2649  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2650 
2652 
2653  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2654 
2655  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2656  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2657  ixge_rx_queue_to_interrupt (0)) << 0);
2658 
2659  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2660  ixge_tx_queue_to_interrupt (0)) << 8);
2661 
2662  /* No use in getting too many interrupts.
2663  Limit them to one every 3/4 ring size at line rate
2664  min sized packets.
2665  No need for this since kernel/vlib main loop provides adequate interrupt
2666  limiting scheme. */
2667  if (0)
2668  {
2669  f64 line_rate_max_pps =
2670  10e9 / (8 * (64 + /* interframe padding */ 20));
2672  .75 * xm->n_descriptors[VLIB_RX] /
2673  line_rate_max_pps);
2674  }
2675 
2676  /* Accept all multicast and broadcast packets. Should really add them
2677  to the dst_ethernet_address register array. */
2678  r->filter_control |= (1 << 10) | (1 << 8);
2679 
2680  /* Enable frames up to size in mac frame size register. */
2681  r->xge_mac.control |= 1 << 2;
2682  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2683 
2684  /* Enable all interrupts. */
2685  if (!IXGE_ALWAYS_POLL)
2686  r->interrupt.enable_write_1_to_set = ~0;
2687  }
2688 }
2689 
2690 static uword
2692 {
2693  vnet_main_t *vnm = vnet_get_main ();
2694  ixge_main_t *xm = &ixge_main;
2695  ixge_device_t *xd;
2696  uword event_type, *event_data = 0;
2697  f64 timeout, link_debounce_deadline;
2698 
2699  ixge_device_init (xm);
2700 
2701  /* Clear all counters. */
2702  vec_foreach (xd, xm->devices)
2703  {
2704  ixge_update_counters (xd);
2705  clib_memset (xd->counters, 0, sizeof (xd->counters));
2706  }
2707 
2708  timeout = 30.0;
2709  link_debounce_deadline = 1e70;
2710 
2711  while (1)
2712  {
2713  /* 36 bit stat counters could overflow in ~50 secs.
2714  We poll every 30 secs to be conservative. */
2716 
2717  event_type = vlib_process_get_events (vm, &event_data);
2718 
2719  switch (event_type)
2720  {
2721  case EVENT_SET_FLAGS:
2722  /* 1 ms */
2723  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2724  timeout = 1e-3;
2725  break;
2726 
2727  case ~0:
2728  /* No events found: timer expired. */
2729  if (vlib_time_now (vm) > link_debounce_deadline)
2730  {
2731  vec_foreach (xd, xm->devices)
2732  {
2733  ixge_regs_t *r = xd->regs;
2734  u32 v = r->xge_mac.link_status;
2735  uword is_up = (v & (1 << 30)) != 0;
2736 
2738  (vnm, xd->vlib_hw_if_index,
2739  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2740  }
2741  link_debounce_deadline = 1e70;
2742  timeout = 30.0;
2743  }
2744  break;
2745 
2746  default:
2747  ASSERT (0);
2748  }
2749 
2750  if (event_data)
2751  _vec_len (event_data) = 0;
2752 
2753  /* Query stats every 30 secs. */
2754  {
2755  f64 now = vlib_time_now (vm);
2756  if (now - xm->time_last_stats_update > 30)
2757  {
2759  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2760  }
2761  }
2762  }
2763 
2764  return 0;
2765 }
2766 
2768  .function = ixge_process,
2769  .type = VLIB_NODE_TYPE_PROCESS,
2770  .name = "ixge-process",
2771 };
2772 
2773 clib_error_t *
2775 {
2776  ixge_main_t *xm = &ixge_main;
2777 
2778  xm->vlib_main = vm;
2780  sizeof (xm->tx_descriptor_template));
2782  sizeof (xm->tx_descriptor_template_mask));
2787  xm->tx_descriptor_template_mask.status0 = 0xffff;
2788  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2789 
2795  return 0;
2796 }
2797 
2798 /* *INDENT-OFF* */
2800 {
2801  .runs_before = VLIB_INITS("pci_bus_init"),
2802 };
2803 /* *INDENT-ON* */
2804 
2805 
2806 static void
2808 {
2809  uword private_data = vlib_pci_get_private_data (vm, h);
2810 
2812 
2813  /* Let node know which device is interrupting. */
2814  {
2817  rt->runtime_data[0] |= 1 << private_data;
2818  }
2819 }
2820 
2821 static clib_error_t *
2823 {
2824  ixge_main_t *xm = &ixge_main;
2825  clib_error_t *error = 0;
2826  void *r;
2827  ixge_device_t *xd;
2828  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2830 
2831  error = vlib_pci_map_region (vm, h, 0, &r);
2832  if (error)
2833  return error;
2834 
2835  vec_add2 (xm->devices, xd, 1);
2836 
2837  if (vec_len (xm->devices) == 1)
2838  {
2839  ixge_input_node.function = ixge_input;
2840  }
2841 
2842  xd->pci_dev_handle = h;
2843  xd->device_id = d->device_id;
2844  xd->regs = r;
2845  xd->device_index = xd - xm->devices;
2846  xd->pci_function = addr->function;
2847  xd->per_interface_next_index = ~0;
2848 
2850 
2851  /* Chip found so enable node. */
2852  {
2855  ? VLIB_NODE_STATE_POLLING
2856  : VLIB_NODE_STATE_INTERRUPT));
2857 
2858  //dev->private_data = xd->device_index;
2859  }
2860 
2861  if (vec_len (xm->devices) == 1)
2862  {
2865  }
2866 
2868 
2869  if (error)
2870  return error;
2871 
2872  return vlib_pci_intr_enable (vm, h);
2873 }
2874 
2875 /* *INDENT-OFF* */
2876 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2877  .init_function = ixge_pci_init,
2878  .interrupt_handler = ixge_pci_intr_handler,
2879  .supported_devices = {
2880 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2882 #undef _
2883  { 0 },
2884  },
2885 };
2886 /* *INDENT-ON* */
2887 
2888 void
2890 {
2892 
2893  switch (next)
2894  {
2898  r->next_nodes[next] = name;
2899  break;
2900 
2901  default:
2902  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2903  break;
2904  }
2905 }
2906 
2907 /* *INDENT-OFF* */
2908 VLIB_PLUGIN_REGISTER () = {
2909  .version = VPP_BUILD_VER,
2910  .default_disabled = 1,
2911  .description = "Intel 82599 Family Native Driver (experimental)",
2912 };
2913 #endif
2914 
2915 /* *INDENT-ON* */
2916 
2917 /*
2918  * fd.io coding-style-patch-verification: ON
2919  *
2920  * Local Variables:
2921  * eval: (c-set-style "gnu")
2922  * End:
2923  */
i2c_bus_t
Definition: i2c.h:33
vlib.h
ixge_dma_regs_t::tx
struct ixge_dma_regs_t::@790::@793 tx
XGE_PHY_CONTROL
#define XGE_PHY_CONTROL
Definition: ixge.c:52
ixge_descriptor_t::as_u32x4
u32x4 as_u32x4
Definition: ixge.h:171
ixge_rx_next_and_error_from_status_x1
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:631
ixge_dma_regs_t::dca_control
u32 dca_control
Definition: ixge.h:40
PCI_REGISTER_DEVICE
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
vlib_pci_set_private_data
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
ixge_rx_queue_no_wrap
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1339
vlib_frame_t::n_vectors
u16 n_vectors
Definition: node.h:387
ixge_rx_queue
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1803
vlib_in_process_context
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:442
ixge_device_t::sfp_eeprom
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
IXGE_HWBP_RACE_ELOG
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:43
ixge_regs_t::auto_negotiation_control
u32 auto_negotiation_control
Definition: ixge.h:427
ixge_sfp_device_up_down
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:267
vlib_physmem_alloc_aligned
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, uword n_bytes, uword alignment)
Definition: physmem_funcs.h:56
vlib_node_runtime_t::flags
u16 flags
Copy of main node flags.
Definition: node.h:491
IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
vec_add
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:688
ixge_pci_intr_handler
static void ixge_pci_intr_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2807
ixge_tx_trace
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:910
ixge_counter_flags
static u8 ixge_counter_flags[]
Definition: ixge.c:2242
ixge_regs_t::link_status
u32 link_status
Definition: ixge.h:340
ixge_regs_t::auto_negotiation_control2
u32 auto_negotiation_control2
Definition: ixge.h:465
vnet_sw_interface_t
Definition: interface.h:869
ixge_rx_to_hw_descriptor_t::tail_address
u64 tail_address
Definition: ixge.h:89
ixge_device_t::device_index
u16 device_index
Definition: ixge.h:1216
XGE_PHY_DEV_TYPE_PMA_PMD
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:48
ixge_tx_queue
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:1979
IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
vlib_node_add_next
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1177
IXGE_RX_N_NEXT
@ IXGE_RX_N_NEXT
Definition: ixge.h:1279
ixge_input
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2152
next_index
nat44_ei_hairpin_src_next_t next_index
Definition: nat44_ei_hairpinning.c:412
ixge_tx_descriptor_t::buffer_address
u64 buffer_address
Definition: ixge.h:128
IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
ixge_regs_t::tx_dma_control
u32 tx_dma_control
Definition: ixge.h:500
ixge_read_eeprom
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:196
clean_block
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1936
ixge_main_t::vlib_main
vlib_main_t * vlib_main
Definition: ixge.h:1242
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
f
vlib_frame_t * f
Definition: interface_output.c:1098
ixge_rx_dma_trace_t::before
ixge_descriptor_t before
Definition: ixge.c:561
vlib_node_set_state
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:175
name
string name[64]
Definition: fib.api:25
vlib_i2c_bus_timed_out
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
format_ixge_rx_from_hw_descriptor
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:445
ixge_device_t::device_id
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
next
u16 * next
Definition: nat44_ei_out2in.c:718
ixge_tx_state_t::start_of_packet_descriptor
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:906
format_hex_bytes
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
vec_end
#define vec_end(v)
End (last data address) of vector.
Definition: vec_bootstrap.h:197
ixge_regs_t::core_analog_config
u32 core_analog_config
Definition: ixge.h:949
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
vlib_physmem_alloc
static void * vlib_physmem_alloc(vlib_main_t *vm, uword n_bytes)
Definition: physmem_funcs.h:73
ixge_phy_t::id
u32 id
Definition: ixge.h:1116
sfp_eeprom_is_valid
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:90
foreach_ixge_counter
#define foreach_ixge_counter
Definition: ixge.h:1016
ixge_dma_regs_t::control
u32 control
Definition: ixge.h:62
ixge_device_t::regs
ixge_regs_t * regs
Definition: ixge.h:1205
VLIB_NODE_TYPE_INPUT
@ VLIB_NODE_TYPE_INPUT
Definition: node.h:76
ixge_rx_from_hw_descriptor_t::status
u32 status[3]
Definition: ixge.h:95
ixge.h
vlib_pci_dev_handle_t
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
ixge_device_class
vnet_device_class_t ixge_device_class
IXGE_RX_NEXT_IP6_INPUT
@ IXGE_RX_NEXT_IP6_INPUT
Definition: ixge.h:1276
u16
unsigned short u16
Definition: types.h:57
ixge_dma_regs_t::head_index
u32 head_index
Definition: ixge.h:42
ixge_regs_t
Definition: ixge.h:174
VNET_SW_INTERFACE_FLAG_ADMIN_UP
@ VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:844
ixge_dma_queue_t::descriptors
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
VLIB_RX
@ VLIB_RX
Definition: defs.h:46
ixge_main_t::n_descriptors_per_cache_line
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
ixge_tx_state_t::n_bytes_in_packet
u32 n_bytes_in_packet
Definition: ixge.c:904
node_index
node node_index
Definition: interface_output.c:440
vnet_get_sw_interface
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
Definition: interface_funcs.h:58
VNET_HW_INTERFACE_FLAG_LINK_UP
@ VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:509
vlib_get_trace_count
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
ixge_rx_from_hw_descriptor_t
Definition: ixge.h:93
ixge_descriptor_t::rx_to_hw
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
ixge_sfp_enable_disable_10g
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:218
vlib_buffer_get_pa
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:488
clib_error_report
#define clib_error_report(e)
Definition: error.h:113
format_ixge_rx_dma_trace
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:576
IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
vnet_hw_interface_t::dev_instance
u32 dev_instance
Definition: interface.h:660
addr
vhost_vring_addr_t addr
Definition: vhost_user.h:130
foreach_ixge_error
#define foreach_ixge_error
Definition: ixge.c:615
ixge_clear_hw_interface_counters
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2399
r
vnet_hw_if_output_node_runtime_t * r
Definition: interface_output.c:1089
IXGE_RX_NEXT_IP4_INPUT
@ IXGE_RX_NEXT_IP4_INPUT
Definition: ixge.h:1275
vlib_buffer_is_known
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:529
vlib_error_count
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
vlib_frame_t
Definition: node.h:372
IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
XGE_PHY_DEV_TYPE_PHY_XS
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:49
vlib_process_signal_event
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1019
vlib_pci_map_region
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1182
ixge_dma_queue_t::queue_index
u32 queue_index
Definition: ixge.h:1131
IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
ixge_init
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2774
ethernet.h
h
h
Definition: flowhash_template.h:372
ixge_update_counters
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2251
error
Definition: cJSON.c:88
vlib_pci_intr_enable
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
i32
signed int i32
Definition: types.h:77
ixge_phy_init
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:376
ixge_interrupt_tx_queue
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2117
ixge_device_t::dma_queues
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
vlib_pci_device_info
Definition: pci.h:60
ixge_main_t::devices
ixge_device_t * devices
Definition: ixge.h:1245
i2c_bus_t::get_bits
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
vlib_pci_bus_master_enable
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
round_pow2
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:279
ixge_software_firmware_sync_release
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:111
CLIB_PREFETCH
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:76
vnet_sw_interface_t::sw_if_index
u32 sw_if_index
Definition: interface.h:876
vlib_process_get_events
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type,...
Definition: node_funcs.h:583
vlib_buffer_t::current_data
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
ixge_rx_to_hw_descriptor_t::head_address
u64 head_address
Definition: ixge.h:90
ixge_main_t::tx_descriptor_template_mask
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1259
vlib_pci_get_private_data
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
vlib_i2c_read_eeprom
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
format_ixge_device_name
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2228
vlib_physmem_get_pa
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
Definition: physmem_funcs.h:103
clib_error_create
#define clib_error_create(args...)
Definition: error.h:96
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
ixge_main_t::tx_buffers_pending_free
u32 * tx_buffers_pending_free
Definition: ixge.h:1262
vlib_buffer_t::error
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
IXGE_TX_DESCRIPTOR_STATUS1_DONE
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
ELOG_TYPE_DECLARE
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
ixge_device_t::pci_function
u16 pci_function
Definition: ixge.h:1219
vnet_interface_output_runtime_t::dev_instance
u32 dev_instance
Definition: interface_funcs.h:479
ixge_device_t::phy_index
u32 phy_index
Definition: ixge.h:1227
vec_add2
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:644
ixge_regs_t::sdp_control
u32 sdp_control
Definition: ixge.h:201
ixge_descriptor_t::rx_from_hw
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
ixge_ring_sub
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:996
vlib_buffer_alloc
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:702
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
vnet_get_hw_interface
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface_funcs.h:44
IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
vnet_get_main
vnet_main_t * vnet_get_main(void)
Definition: pnat_test_stubs.h:56
format_sfp_eeprom
format_function_t format_sfp_eeprom
Definition: sfp.h:133
VLIB_NODE_FLAG_TRACE
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:291
ixge_rx_from_hw_descriptor_t::vlan_tag
u16 vlan_tag
Definition: ixge.h:97
ixge_input_node
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:56
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
ixge_interface_admin_up_down
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:321
ARRAY_LEN
#define ARRAY_LEN(x)
Definition: clib.h:70
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
ixge_i2c_get_bits
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:184
vlib_prefetch_buffer_with_index
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:507
ixge_descriptor_t::tx
ixge_tx_descriptor_t tx
Definition: ixge.h:170
ixge_phy_t
Definition: ixge.h:1111
ixge_sfp_phy_init_from_eeprom
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:228
IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
uword
u64 uword
Definition: types.h:112
ixge_tx_descriptor_matches_template
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1015
ixge_tx_queue_to_interrupt
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2098
if
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
vlib_rx_or_tx_t
vlib_rx_or_tx_t
Definition: defs.h:44
ixge_device_init
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2602
ixge_tx_descriptor_t::n_bytes_this_buffer
u16 n_bytes_this_buffer
Definition: ixge.h:129
ixge_dma_regs_t::tail_index
u32 tail_index
Definition: ixge.h:53
ixge_software_firmware_sync
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:89
ixge_interrupt_is_rx_queue
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2086
IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
ixge_set_next_node
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2889
ixge_rx_dma_trace_t::is_start_of_packet
u8 is_start_of_packet
Definition: ixge.c:569
IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
VLIB_PLUGIN_REGISTER
VLIB_PLUGIN_REGISTER()
ixge_dma_regs_t
Definition: ixge.h:26
ixge_semaphore_get
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:60
ixge_interrupt
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1876
IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
VLIB_NODE_FLAG_TRACE_SUPPORTED
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:295
foreach_set_bit
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:166
f64
double f64
Definition: types.h:142
ixge_main
ixge_main_t ixge_main
Definition: ixge.c:55
ixge_regs_t::link_partner_ability
u32 link_partner_ability
Definition: ixge.h:343
i2c_bus_t::private_data
uword private_data
Definition: i2c.h:44
ixge_process_node
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:57
ixge_main_t::process_node_index
u32 process_node_index
Definition: ixge.h:1256
IXGE_COUNTER_NOT_CLEAR_ON_READ
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2240
ixge_device_t::counters_last_clear
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
vec_validate
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
Definition: vec.h:523
address
manual_print typedef address
Definition: ip_types.api:96
format_vnet_buffer_no_chain
format_function_t format_vnet_buffer_no_chain
Definition: buffer.h:520
format_ixge_link_status
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2297
ixge_tx_dma_trace_t
Definition: ixge.c:845
ixge_main_t::rx_buffers_to_add
u32 * rx_buffers_to_add
Definition: ixge.h:1264
ixge_device_t
Definition: ixge.h:1202
ixge_tx_dma_trace_t::is_start_of_packet
u8 is_start_of_packet
Definition: ixge.c:855
ixge_device_input
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2124
ixge_tx_dma_trace_t::buffer_index
u32 buffer_index
Definition: ixge.c:849
vlib_pci_get_device_info
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
Definition: pci.c:202
ixge_device_t::link_status_at_last_link_change
u32 link_status_at_last_link_change
Definition: ixge.h:1231
CLIB_MEMORY_BARRIER
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
VNET_INTERFACE_COUNTER_RX
@ VNET_INTERFACE_COUNTER_RX
Definition: interface.h:915
ixge_read_phy_reg
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:157
clib_min
#define clib_min(x, y)
Definition: clib.h:342
ixge_process
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2691
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
vlib_i2c_init
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
vlib_set_trace_count
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
ixge_dma_init
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2447
IXGE_N_ERROR
@ IXGE_N_ERROR
Definition: ixge.c:627
ixge_rx_trace
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:734
vlib_node_registration_t
struct _vlib_node_registration vlib_node_registration_t
ixge_dma_regs_t::rx_split_control
u32 rx_split_control
Definition: ixge.h:51
ixge_rx_queue_to_interrupt
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2104
ixge_regs_t::interrupt
struct ixge_regs_t::@796 interrupt
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
ixge_phy_t::mdio_address
u32 mdio_address
Definition: ixge.h:1113
ixge_rx_to_hw_descriptor_t
Definition: ixge.h:87
ixge_rx_dma_trace_t::queue_index
u8 queue_index
Definition: ixge.c:567
IXGE_ALWAYS_POLL
#define IXGE_ALWAYS_POLL
Definition: ixge.c:40
ixge_dma_regs_t::n_descriptor_bytes
u32 n_descriptor_bytes
Definition: ixge.h:30
plugin.h
ixge_set_interface_next_node
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2412
ixge_dma_queue_t::descriptor_buffer_indices
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
XGE_PHY_CONTROL_RESET
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:53
IXGE_RX_NEXT_DROP
@ IXGE_RX_NEXT_DROP
Definition: ixge.h:1278
ixge_ring_add
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1005
id
u8 id[64]
Definition: dhcp.api:160
ixge_sfp_enable_disable_laser
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:208
IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
is_ip6
bool is_ip6
Definition: ip.api:43
format_function_t
u8 *() format_function_t(u8 *s, va_list *args)
Definition: format.h:48
ixge_dma_regs_t::descriptor_address
u32 descriptor_address[2]
Definition: ixge.h:29
vnet_hw_interface_t
Definition: interface.h:638
vnet_main_t
Definition: vnet.h:76
ixge_tx_descriptor_t::status0
u16 status0
Definition: ixge.h:130
vlib_validate_buffer_enqueue_x1
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
index
u32 index
Definition: flow_types.api:221
always_inline
#define always_inline
Definition: rdma_mlx5dv.h:23
ixge_device_t::vlib_hw_if_index
u32 vlib_hw_if_index
Definition: ixge.h:1222
ixge_main_t
Definition: ixge.h:1240
ixge_interrupt_is_tx_queue
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2092
u64
unsigned long u64
Definition: types.h:89
IXGE_COUNTER_IS_64_BIT
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2239
vlib_process_wait_for_event_or_clock
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:755
IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
IXGE_RX_NEXT_ETHERNET_INPUT
@ IXGE_RX_NEXT_ETHERNET_INPUT
Definition: ixge.h:1277
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
format_c_identifier
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:329
ixge_main_t::time_last_stats_update
f64 time_last_stats_update
Definition: ixge.h:1266
format_get_indent
static u32 format_get_indent(u8 *s)
Definition: format.h:72
ixge_rx_from_hw_descriptor_t::n_packet_bytes_this_descriptor
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
ixge_tx_descriptor_t::status1
u32 status1
Definition: ixge.h:131
vlib_put_next_frame
vlib_put_next_frame(vm, node, next_index, 0)
ixge_tx_dma_trace_t::descriptor
ixge_tx_descriptor_t descriptor
Definition: ixge.c:847
vlib_process_suspend
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:486
vnet_get_hw_sw_interface
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface_funcs.h:72
vlib_set_next_frame_buffer
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:428
ixge_main_t::tx_descriptor_template
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1259
u32
unsigned int u32
Definition: types.h:88
VLIB_INIT_FUNCTION
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:172
format_ethernet_header_with_length
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
format_ixge_tx_descriptor
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:516
vlib_node_set_interrupt_pending
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:249
XGE_PHY_ID1
#define XGE_PHY_ID1
Definition: ixge.c:50
n_bytes
u32 n_bytes
Definition: interface_output.c:421
vlib_buffer_free_no_next
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:996
ixge_tx_no_wrap
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1033
foreach_ixge_pci_device_id
@ foreach_ixge_pci_device_id
Definition: ixge.h:1198
ixge_tx_dma_trace_t::buffer
vlib_buffer_t buffer
Definition: ixge.c:858
ixge_dma_queue_t::rx
struct ixge_dma_queue_t::@819::@822 rx
ixge_tx_state_t::is_start_of_packet
u32 is_start_of_packet
Definition: ixge.c:902
ixge_tx_dma_trace_t::queue_index
u8 queue_index
Definition: ixge.c:853
vec_foreach
#define vec_foreach(var, vec)
Vector iterator.
Definition: vec_bootstrap.h:213
IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
pci.h
vector.h
ixge_device_t::pci_dev_handle
vlib_pci_dev_handle_t pci_dev_handle
Definition: ixge.h:1211
for
for(i=1;i<=collision_buckets;i++)
Definition: flowhash_template.h:378
IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
n_left
u32 n_left
Definition: interface_output.c:1096
VLIB_NODE_TYPE_PROCESS
@ VLIB_NODE_TYPE_PROCESS
Definition: node.h:84
u32x4
unsigned long long u32x4
Definition: ixge.c:28
ixge_rx_dma_trace_t::buffer
vlib_buffer_t buffer
Definition: ixge.c:572
instance
u32 instance
Definition: gre.api:51
ixge_error_t
ixge_error_t
Definition: ixge.c:622
IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
ixge_device_t::per_interface_next_index
u32 per_interface_next_index
Definition: ixge.h:1208
IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
ixge_main_t::n_bytes_in_rx_buffer
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
ixge_rx_dma_trace_t::device_index
u16 device_index
Definition: ixge.c:565
ixge_dma_queue_t::tail_index
u32 tail_index
Definition: ixge.h:1128
VNET_DEVICE_CLASS
VNET_DEVICE_CLASS(ixge_device_class)
vlib_node_get_runtime
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:116
vec_resize
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V,...
Definition: vec.h:296
ixge_rx_dma_trace_t::buffer_index
u32 buffer_index
Definition: ixge.c:563
ixge_tx_state_t::node
vlib_node_runtime_t * node
Definition: ixge.c:900
ixge_dma_queue_t::tx
struct ixge_dma_queue_t::@819::@821 tx
ixge_sfp_phy_init
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:349
IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
ixge_dma_queue_t
Definition: ixge.h:1119
IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
now
f64 now
Definition: nat44_ei_out2in.c:710
format_ixge_device
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2319
IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
vnet_main
vnet_main_t vnet_main
Definition: misc.c:43
ixge_device_t::phys
ixge_phy_t phys[2]
Definition: ixge.h:1228
ixge_rx_next_and_error_from_status_x2
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:670
vlib_physmem_last_error
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
Definition: physmem_funcs.h:110
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
IXGE_N_BYTES_IN_RX_BUFFER
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2444
vlib_node_t
Definition: node.h:247
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
VLIB_INITS
#define VLIB_INITS(...)
Definition: init.h:352
ixge_rx_dma_trace_t
Definition: ixge.c:559
vlib_get_main
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
vnet_hw_interface_set_flags
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:513
a
a
Definition: bitmap.h:525
unix.h
format_ixge_tx_dma_trace
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:862
ixge_rx_dma_trace_t::after
ixge_descriptor_t after
Definition: ixge.c:561
rt
vnet_interface_output_runtime_t * rt
Definition: interface_output.c:419
ixge_interface_tx
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1197
ixge_tx_dma_trace_t::device_index
u16 device_index
Definition: ixge.c:851
vlib_init_function_t
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
Definition: init.h:51
ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:162
vlib_buffer_t::data
u8 data[]
Packet data.
Definition: buffer.h:204
sfp_eeprom_t::id
u8 id
Definition: sfp.h:56
IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
i
int i
Definition: flowhash_template.h:376
ixge_regs_t::enable_write_1_to_set
u32 enable_write_1_to_set
Definition: ixge.h:245
vlib_validate_buffer
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
Definition: buffer.c:254
ixge_descriptor_t
Definition: ixge.h:166
get_dma_regs
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:310
vlib_pci_get_addr
vlib_pci_addr_t * vlib_pci_get_addr(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:163
ixge_device_t::i2c_bus
i2c_bus_t i2c_bus
Definition: ixge.h:1233
clib_warning
#define clib_warning(format, args...)
Definition: error.h:59
format_ixge_device_id
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2276
vlib_register_node
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:519
ixge_dma_queue_t::head_index
u32 head_index
Definition: ixge.h:1128
VLIB_BUFFER_KNOWN_ALLOCATED
@ VLIB_BUFFER_KNOWN_ALLOCATED
Definition: buffer_funcs.h:520
format_vlib_pci_link_speed
format_function_t format_vlib_pci_link_speed
Definition: pci.h:327
ELOG_DATA
#define ELOG_DATA(em, f)
Definition: elog.h:484
ixge_device_t::counters
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
i2c_bus_t::put_bits
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
vlib_time_now
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:327
vlib_pci_device_info::device_id
u16 device_id
Definition: pci.h:74
vlib_buffer_t::pre_data
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:201
vnet.h
ixge_i2c_put_bits
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:171
vlib_node_runtime_t
Definition: node.h:454
ixge_dma_queue_t::n_descriptors
u32 n_descriptors
Definition: ixge.h:1125
vlib_trace_buffer
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
ixge_regs_t::rx_enable
u32 rx_enable
Definition: ixge.h:308
ixge_regs_t::i2c_control
u32 i2c_control
Definition: ixge.h:208
ixge_tx_state_t
Definition: ixge.c:898
from
from
Definition: nat44_ei_hairpinning.c:415
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
ixge_tx_descriptor_t
Definition: ixge.h:126
ixge_flag_change
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2579
ixge_interrupt_rx_queue
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2110
vlib_get_next_frame
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
ixge_throttle_queue_interrupt
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
XGE_PHY_ID2
#define XGE_PHY_ID2
Definition: ixge.c:51
VLIB_TX
@ VLIB_TX
Definition: defs.h:47
ixge_regs_t::xge_mac
struct ixge_regs_t::@798 xge_mac
ixge_write_phy_reg
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:164
ixge_rx_next_t
ixge_rx_next_t
Definition: ixge.h:1273
ixge_error_strings
static char * ixge_error_strings[]
Definition: ixge.c:2194
ixge_semaphore_release
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:82
EVENT_SET_FLAGS
#define EVENT_SET_FLAGS
Definition: ixge.c:42
ethernet_register_interface
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:348
ixge_main_t::n_descriptors
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
vlib_increment_combined_counter
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
format_white_space
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
ixge_read_write_phy_reg
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:120
format_vnet_sw_interface_name
format_function_t format_vnet_sw_interface_name
Definition: interface_funcs.h:456
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
vnet_interface_output_runtime_t
Definition: interface_funcs.h:475
VLIB_REGISTER_NODE
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:294
ixge_device_t::vlib_sw_if_index
u32 vlib_sw_if_index
Definition: ixge.h:1222
ixge_pci_init
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2822
flags
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105