FD.io VPP  v17.04.2-2-ga8f93f8
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <ixge/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/plugin/plugin.h>
38 #include <vpp/app/version.h>
39 
40 #define IXGE_ALWAYS_POLL 0
41 
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
44 
45 #define PCI_VENDOR_ID_INTEL 0x8086
46 
47 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
54 
58 
59 static void
61 {
62  ixge_main_t *xm = &ixge_main;
63  vlib_main_t *vm = xm->vlib_main;
64  ixge_regs_t *r = xd->regs;
65  u32 i;
66 
67  i = 0;
68  while (!(r->software_semaphore & (1 << 0)))
69  {
70  if (i > 0)
71  vlib_process_suspend (vm, 100e-6);
72  i++;
73  }
74  do
75  {
76  r->software_semaphore |= 1 << 1;
77  }
78  while (!(r->software_semaphore & (1 << 1)));
79 }
80 
81 static void
83 {
84  ixge_regs_t *r = xd->regs;
85  r->software_semaphore &= ~3;
86 }
87 
88 static void
90 {
91  ixge_main_t *xm = &ixge_main;
92  vlib_main_t *vm = xm->vlib_main;
93  ixge_regs_t *r = xd->regs;
94  u32 fw_mask = sw_mask << 5;
95  u32 m, done = 0;
96 
97  while (!done)
98  {
99  ixge_semaphore_get (xd);
100  m = r->software_firmware_sync;
101  done = (m & fw_mask) == 0;
102  if (done)
103  r->software_firmware_sync = m | sw_mask;
105  if (!done)
106  vlib_process_suspend (vm, 10e-3);
107  }
108 }
109 
110 static void
112 {
113  ixge_regs_t *r = xd->regs;
114  ixge_semaphore_get (xd);
115  r->software_firmware_sync &= ~sw_mask;
117 }
118 
119 u32
120 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121  u32 v, u32 is_read)
122 {
123  ixge_regs_t *r = xd->regs;
124  const u32 busy_bit = 1 << 30;
125  u32 x;
126 
127  ASSERT (xd->phy_index < 2);
128  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129 
130  ASSERT (reg_index < (1 << 16));
131  ASSERT (dev_type < (1 << 5));
132  if (!is_read)
133  r->xge_mac.phy_data = v;
134 
135  /* Address cycle. */
136  x =
137  reg_index | (dev_type << 16) | (xd->
138  phys[xd->phy_index].mdio_address << 21);
139  r->xge_mac.phy_command = x | busy_bit;
140  /* Busy wait timed to take 28e-6 secs. No suspend. */
141  while (r->xge_mac.phy_command & busy_bit)
142  ;
143 
144  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145  while (r->xge_mac.phy_command & busy_bit)
146  ;
147 
148  if (is_read)
149  v = r->xge_mac.phy_data >> 16;
150 
151  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152 
153  return v;
154 }
155 
156 static u32
157 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
158 {
159  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160  1);
161 }
162 
163 static void
164 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165 {
166  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167  0);
168 }
169 
170 static void
171 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172 {
173  ixge_main_t *xm = &ixge_main;
175  u32 v;
176 
177  v = 0;
178  v |= (sda != 0) << 3;
179  v |= (scl != 0) << 1;
180  xd->regs->i2c_control = v;
181 }
182 
183 static void
184 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
185 {
186  ixge_main_t *xm = &ixge_main;
188  u32 v;
189 
190  v = xd->regs->i2c_control;
191  *sda = (v & (1 << 2)) != 0;
192  *scl = (v & (1 << 0)) != 0;
193 }
194 
195 static u16
197 {
198  ixge_regs_t *r = xd->regs;
199  u32 v;
200  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
201  /* Wait for done bit. */
202  while (!((v = r->eeprom_read) & (1 << 1)))
203  ;
204  return v >> 16;
205 }
206 
207 static void
209 {
210  u32 tx_disable_bit = 1 << 3;
211  if (enable)
212  xd->regs->sdp_control &= ~tx_disable_bit;
213  else
214  xd->regs->sdp_control |= tx_disable_bit;
215 }
216 
217 static void
219 {
220  u32 is_10g_bit = 1 << 5;
221  if (enable)
222  xd->regs->sdp_control |= is_10g_bit;
223  else
224  xd->regs->sdp_control &= ~is_10g_bit;
225 }
226 
227 static clib_error_t *
229 {
230  u16 a, id, reg_values_addr = 0;
231 
232  a = ixge_read_eeprom (xd, 0x2b);
233  if (a == 0 || a == 0xffff)
234  return clib_error_create ("no init sequence in eeprom");
235 
236  while (1)
237  {
238  id = ixge_read_eeprom (xd, ++a);
239  if (id == 0xffff)
240  break;
241  reg_values_addr = ixge_read_eeprom (xd, ++a);
242  if (id == sfp_type)
243  break;
244  }
245  if (id != sfp_type)
246  return clib_error_create ("failed to find id 0x%x", sfp_type);
247 
248  ixge_software_firmware_sync (xd, 1 << 3);
249  while (1)
250  {
251  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252  if (v == 0xffff)
253  break;
254  xd->regs->core_analog_config = v;
255  }
257 
258  /* Make sure laser is off. We'll turn on the laser when
259  the interface is brought up. */
260  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262 
263  return 0;
264 }
265 
266 static void
268 {
269  u32 v;
270 
271  if (is_up)
272  {
273  /* pma/pmd 10g serial SFI. */
274  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276 
278  v &= ~(7 << 13);
279  v |= (0 << 13);
280  /* Restart autoneg. */
281  v |= (1 << 12);
283 
284  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
285  ;
286 
288 
289  /* link mode 10g sfi serdes */
290  v &= ~(7 << 13);
291  v |= (3 << 13);
292 
293  /* Restart autoneg. */
294  v |= (1 << 12);
296 
297  xd->regs->xge_mac.link_status;
298  }
299 
300  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301 
302  /* Give time for link partner to notice that we're up. */
303  if (is_up && vlib_in_process_context (vlib_get_main ()))
304  {
305  vlib_process_suspend (vlib_get_main (), 300e-3);
306  }
307 }
308 
311 {
312  ixge_regs_t *r = xd->regs;
313  ASSERT (qi < 128);
314  if (rt == VLIB_RX)
315  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316  else
317  return &r->tx_dma[qi];
318 }
319 
320 static clib_error_t *
322 {
323  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
324  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
325  ixge_main_t *xm = &ixge_main;
327  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
328 
329  if (is_up)
330  {
331  xd->regs->rx_enable |= 1;
332  xd->regs->tx_dma_control |= 1;
333  dr->control |= 1 << 25;
334  while (!(dr->control & (1 << 25)))
335  ;
336  }
337  else
338  {
339  xd->regs->rx_enable &= ~1;
340  xd->regs->tx_dma_control &= ~1;
341  }
342 
343  ixge_sfp_device_up_down (xd, is_up);
344 
345  return /* no error */ 0;
346 }
347 
348 static void
350 {
351  ixge_phy_t *phy = xd->phys + xd->phy_index;
352  i2c_bus_t *ib = &xd->i2c_bus;
353 
354  ib->private_data = xd->device_index;
357  vlib_i2c_init (ib);
358 
359  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
360 
362  xd->sfp_eeprom.id = SFP_ID_unknown;
363  else
364  {
365  /* FIXME 5 => SR/LR eeprom ID. */
366  clib_error_t *e =
368  if (e)
369  clib_error_report (e);
370  }
371 
372  phy->mdio_address = ~0;
373 }
374 
375 static void
377 {
378  ixge_main_t *xm = &ixge_main;
379  vlib_main_t *vm = xm->vlib_main;
380  ixge_phy_t *phy = xd->phys + xd->phy_index;
381 
382  switch (xd->device_id)
383  {
384  case IXGE_82599_sfp:
385  case IXGE_82599_sfp_em:
386  case IXGE_82599_sfp_fcoe:
387  /* others? */
388  return ixge_sfp_phy_init (xd);
389 
390  default:
391  break;
392  }
393 
394  /* Probe address of phy. */
395  {
396  u32 i, v;
397 
398  phy->mdio_address = ~0;
399  for (i = 0; i < 32; i++)
400  {
401  phy->mdio_address = i;
403  if (v != 0xffff && v != 0)
404  break;
405  }
406 
407  /* No PHY found? */
408  if (i >= 32)
409  return;
410  }
411 
412  phy->id =
415 
416  {
417  ELOG_TYPE_DECLARE (e) =
418  {
419  .function = (char *) __FUNCTION__,.format =
420  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421  struct
422  {
423  u32 instance, id, address;
424  } *ed;
425  ed = ELOG_DATA (&vm->elog_main, e);
426  ed->instance = xd->device_index;
427  ed->id = phy->id;
428  ed->address = phy->mdio_address;
429  }
430 
431  /* Reset phy. */
434 
435  /* Wait for self-clearning reset bit to clear. */
436  do
437  {
438  vlib_process_suspend (vm, 1e-3);
439  }
442 }
443 
444 static u8 *
446 {
448  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
449  u32 s0 = d->status[0], s2 = d->status[2];
450  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
451  uword indent = format_get_indent (s);
452 
453  s = format (s, "%s-owned",
455  "hw");
456  s =
457  format (s, ", length this descriptor %d, l3 offset %d",
461  s = format (s, ", end-of-packet");
462 
463  s = format (s, "\n%U", format_white_space, indent);
464 
466  s = format (s, "layer2 error");
467 
469  {
470  s = format (s, "layer 2 type %d", (s0 & 0x1f));
471  return s;
472  }
473 
475  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476  format_white_space, indent);
477 
478  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479  {
480  s = format (s, "ip4%s",
481  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482  "");
484  s = format (s, " checksum %s",
486  "bad" : "ok");
487  }
488  if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489  s = format (s, "ip6%s",
490  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491  "");
492  is_tcp = is_udp = 0;
493  if ((is_ip = (is_ip4 | is_ip6)))
494  {
495  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497  if (is_tcp)
498  s = format (s, ", tcp");
499  if (is_udp)
500  s = format (s, ", udp");
501  }
502 
504  s = format (s, ", tcp checksum %s",
506  "ok");
508  s = format (s, ", udp checksum %s",
510  "ok");
511 
512  return s;
513 }
514 
515 static u8 *
516 format_ixge_tx_descriptor (u8 * s, va_list * va)
517 {
518  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
519  u32 s0 = d->status0, s1 = d->status1;
520  uword indent = format_get_indent (s);
521  u32 v;
522 
523  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
524  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
525 
526  s = format (s, "\n%U", format_white_space, indent);
527 
528  if ((v = (s0 >> 0) & 3))
529  s = format (s, "reserved 0x%x, ", v);
530 
531  if ((v = (s0 >> 2) & 3))
532  s = format (s, "mac 0x%x, ", v);
533 
534  if ((v = (s0 >> 4) & 0xf) != 3)
535  s = format (s, "type 0x%x, ", v);
536 
537  s = format (s, "%s%s%s%s%s%s%s%s",
538  (s0 & (1 << 8)) ? "eop, " : "",
539  (s0 & (1 << 9)) ? "insert-fcs, " : "",
540  (s0 & (1 << 10)) ? "reserved26, " : "",
541  (s0 & (1 << 11)) ? "report-status, " : "",
542  (s0 & (1 << 12)) ? "reserved28, " : "",
543  (s0 & (1 << 13)) ? "is-advanced, " : "",
544  (s0 & (1 << 14)) ? "vlan-enable, " : "",
545  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546 
547  if ((v = s1 & 0xf) != 0)
548  s = format (s, "status 0x%x, ", v);
549 
550  if ((v = (s1 >> 4) & 0xf))
551  s = format (s, "context 0x%x, ", v);
552 
553  if ((v = (s1 >> 8) & 0x3f))
554  s = format (s, "options 0x%x, ", v);
555 
556  return s;
557 }
558 
559 typedef struct
560 {
562 
564 
566 
568 
570 
571  /* Copy of VLIB buffer; packet data stored in pre_data. */
574 
575 static u8 *
576 format_ixge_rx_dma_trace (u8 * s, va_list * va)
577 {
578  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
579  vlib_node_t *node = va_arg (*va, vlib_node_t *);
580  vnet_main_t *vnm = vnet_get_main ();
581  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582  ixge_main_t *xm = &ixge_main;
585  uword indent = format_get_indent (s);
586 
587  {
588  vnet_sw_interface_t *sw =
590  s =
591  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592  t->queue_index);
593  }
594 
595  s = format (s, "\n%Ubefore: %U",
596  format_white_space, indent,
598  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599  format_white_space, indent,
601 
602  s = format (s, "\n%Ubuffer 0x%x: %U",
603  format_white_space, indent,
605 
606  s = format (s, "\n%U", format_white_space, indent);
607 
608  f = node->format_buffer;
609  if (!f || !t->is_start_of_packet)
610  f = format_hex_bytes;
611  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612 
613  return s;
614 }
615 
616 #define foreach_ixge_error \
617  _ (none, "no error") \
618  _ (tx_full_drops, "tx ring full drops") \
619  _ (ip4_checksum_error, "ip4 checksum errors") \
620  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622 
623 typedef enum
624 {
625 #define _(f,s) IXGE_ERROR_##f,
627 #undef _
629 } ixge_error_t;
630 
631 always_inline void
633  u32 s00, u32 s02,
634  u8 * next0, u8 * error0, u32 * flags0)
635 {
636  u8 is0_ip4, is0_ip6, n0, e0;
637  u32 f0;
638 
639  e0 = IXGE_ERROR_none;
641 
643  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644 
645  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
646  ? IXGE_ERROR_ip4_checksum_error : e0);
647 
648  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650 
651  n0 = (xd->per_interface_next_index != ~0) ?
652  xd->per_interface_next_index : n0;
653 
654  /* Check for error. */
655  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656 
660 
664 
665  *error0 = e0;
666  *next0 = n0;
667  *flags0 = f0;
668 }
669 
670 always_inline void
672  u32 s00, u32 s02,
673  u32 s10, u32 s12,
674  u8 * next0, u8 * error0, u32 * flags0,
675  u8 * next1, u8 * error1, u32 * flags1)
676 {
677  u8 is0_ip4, is0_ip6, n0, e0;
678  u8 is1_ip4, is1_ip6, n1, e1;
679  u32 f0, f1;
680 
681  e0 = e1 = IXGE_ERROR_none;
682  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683 
686 
687  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689 
690  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
691  ? IXGE_ERROR_ip4_checksum_error : e0);
692  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
693  ? IXGE_ERROR_ip4_checksum_error : e1);
694 
695  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697 
698  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700 
701  n0 = (xd->per_interface_next_index != ~0) ?
702  xd->per_interface_next_index : n0;
703  n1 = (xd->per_interface_next_index != ~0) ?
704  xd->per_interface_next_index : n1;
705 
706  /* Check for error. */
707  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709 
710  *error0 = e0;
711  *error1 = e1;
712 
713  *next0 = n0;
714  *next1 = n1;
715 
722 
729 
730  *flags0 = f0;
731  *flags1 = f1;
732 }
733 
734 static void
736  ixge_device_t * xd,
737  ixge_dma_queue_t * dq,
738  ixge_descriptor_t * before_descriptors,
739  u32 * before_buffers,
740  ixge_descriptor_t * after_descriptors, uword n_descriptors)
741 {
742  vlib_main_t *vm = xm->vlib_main;
743  vlib_node_runtime_t *node = dq->rx.node;
746  u32 *b, n_left, is_sop, next_index_sop;
747 
748  n_left = n_descriptors;
749  b = before_buffers;
750  bd = &before_descriptors->rx_from_hw;
751  ad = &after_descriptors->rx_to_hw;
752  is_sop = dq->rx.is_start_of_packet;
753  next_index_sop = dq->rx.saved_start_of_packet_next_index;
754 
755  while (n_left >= 2)
756  {
757  u32 bi0, bi1, flags0, flags1;
758  vlib_buffer_t *b0, *b1;
759  ixge_rx_dma_trace_t *t0, *t1;
760  u8 next0, error0, next1, error1;
761 
762  bi0 = b[0];
763  bi1 = b[1];
764  n_left -= 2;
765 
766  b0 = vlib_get_buffer (vm, bi0);
767  b1 = vlib_get_buffer (vm, bi1);
768 
770  bd[0].status[0], bd[0].status[2],
771  bd[1].status[0], bd[1].status[2],
772  &next0, &error0, &flags0,
773  &next1, &error1, &flags1);
774 
775  next_index_sop = is_sop ? next0 : next_index_sop;
776  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778  t0->is_start_of_packet = is_sop;
779  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780 
781  next_index_sop = is_sop ? next1 : next_index_sop;
782  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784  t1->is_start_of_packet = is_sop;
785  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786 
787  t0->queue_index = dq->queue_index;
788  t1->queue_index = dq->queue_index;
789  t0->device_index = xd->device_index;
790  t1->device_index = xd->device_index;
791  t0->before.rx_from_hw = bd[0];
792  t1->before.rx_from_hw = bd[1];
793  t0->after.rx_to_hw = ad[0];
794  t1->after.rx_to_hw = ad[1];
795  t0->buffer_index = bi0;
796  t1->buffer_index = bi1;
797  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800  sizeof (t0->buffer.pre_data));
801  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802  sizeof (t1->buffer.pre_data));
803 
804  b += 2;
805  bd += 2;
806  ad += 2;
807  }
808 
809  while (n_left >= 1)
810  {
811  u32 bi0, flags0;
812  vlib_buffer_t *b0;
814  u8 next0, error0;
815 
816  bi0 = b[0];
817  n_left -= 1;
818 
819  b0 = vlib_get_buffer (vm, bi0);
820 
822  bd[0].status[0], bd[0].status[2],
823  &next0, &error0, &flags0);
824 
825  next_index_sop = is_sop ? next0 : next_index_sop;
826  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828  t0->is_start_of_packet = is_sop;
829  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830 
831  t0->queue_index = dq->queue_index;
832  t0->device_index = xd->device_index;
833  t0->before.rx_from_hw = bd[0];
834  t0->after.rx_to_hw = ad[0];
835  t0->buffer_index = bi0;
836  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838  sizeof (t0->buffer.pre_data));
839 
840  b += 1;
841  bd += 1;
842  ad += 1;
843  }
844 }
845 
846 typedef struct
847 {
849 
851 
853 
855 
857 
858  /* Copy of VLIB buffer; packet data stored in pre_data. */
861 
862 static u8 *
863 format_ixge_tx_dma_trace (u8 * s, va_list * va)
864 {
865  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
867  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868  vnet_main_t *vnm = vnet_get_main ();
869  ixge_main_t *xm = &ixge_main;
872  uword indent = format_get_indent (s);
873 
874  {
875  vnet_sw_interface_t *sw =
877  s =
878  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879  t->queue_index);
880  }
881 
882  s = format (s, "\n%Udescriptor: %U",
883  format_white_space, indent,
885 
886  s = format (s, "\n%Ubuffer 0x%x: %U",
887  format_white_space, indent,
889 
890  s = format (s, "\n%U", format_white_space, indent);
891 
893  if (!f || !t->is_start_of_packet)
894  f = format_hex_bytes;
895  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896 
897  return s;
898 }
899 
900 typedef struct
901 {
903 
905 
907 
910 
911 static void
913  ixge_device_t * xd,
914  ixge_dma_queue_t * dq,
915  ixge_tx_state_t * tx_state,
916  ixge_tx_descriptor_t * descriptors,
917  u32 * buffers, uword n_descriptors)
918 {
919  vlib_main_t *vm = xm->vlib_main;
920  vlib_node_runtime_t *node = tx_state->node;
922  u32 *b, n_left, is_sop;
923 
924  n_left = n_descriptors;
925  b = buffers;
926  d = descriptors;
927  is_sop = tx_state->is_start_of_packet;
928 
929  while (n_left >= 2)
930  {
931  u32 bi0, bi1;
932  vlib_buffer_t *b0, *b1;
933  ixge_tx_dma_trace_t *t0, *t1;
934 
935  bi0 = b[0];
936  bi1 = b[1];
937  n_left -= 2;
938 
939  b0 = vlib_get_buffer (vm, bi0);
940  b1 = vlib_get_buffer (vm, bi1);
941 
942  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943  t0->is_start_of_packet = is_sop;
944  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945 
946  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947  t1->is_start_of_packet = is_sop;
948  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949 
950  t0->queue_index = dq->queue_index;
951  t1->queue_index = dq->queue_index;
952  t0->device_index = xd->device_index;
953  t1->device_index = xd->device_index;
954  t0->descriptor = d[0];
955  t1->descriptor = d[1];
956  t0->buffer_index = bi0;
957  t1->buffer_index = bi1;
958  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961  sizeof (t0->buffer.pre_data));
962  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963  sizeof (t1->buffer.pre_data));
964 
965  b += 2;
966  d += 2;
967  }
968 
969  while (n_left >= 1)
970  {
971  u32 bi0;
972  vlib_buffer_t *b0;
974 
975  bi0 = b[0];
976  n_left -= 1;
977 
978  b0 = vlib_get_buffer (vm, bi0);
979 
980  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981  t0->is_start_of_packet = is_sop;
982  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983 
984  t0->queue_index = dq->queue_index;
985  t0->device_index = xd->device_index;
986  t0->descriptor = d[0];
987  t0->buffer_index = bi0;
988  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990  sizeof (t0->buffer.pre_data));
991 
992  b += 1;
993  d += 1;
994  }
995 }
996 
999 {
1000  i32 d = i1 - i0;
1001  ASSERT (i0 < q->n_descriptors);
1002  ASSERT (i1 < q->n_descriptors);
1003  return d < 0 ? q->n_descriptors + d : d;
1004 }
1005 
1008 {
1009  u32 d = i0 + i1;
1010  ASSERT (i0 < q->n_descriptors);
1011  ASSERT (i1 < q->n_descriptors);
1012  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013  return d;
1014 }
1015 
1019 {
1020  u32 cmp;
1021 
1022  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1024  if (cmp)
1025  return 0;
1026  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1028  if (cmp)
1029  return 0;
1030 
1031  return 1;
1032 }
1033 
1034 static uword
1036  ixge_device_t * xd,
1037  ixge_dma_queue_t * dq,
1038  u32 * buffers,
1039  u32 start_descriptor_index,
1040  u32 n_descriptors, ixge_tx_state_t * tx_state)
1041 {
1042  vlib_main_t *vm = xm->vlib_main;
1043  ixge_tx_descriptor_t *d, *d_sop;
1044  u32 n_left = n_descriptors;
1045  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046  u32 *to_tx =
1047  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1048  u32 is_sop = tx_state->is_start_of_packet;
1049  u32 len_sop = tx_state->n_bytes_in_packet;
1050  u16 template_status = xm->tx_descriptor_template.status0;
1051  u32 descriptor_prefetch_rotor = 0;
1052 
1053  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054  d = &dq->descriptors[start_descriptor_index].tx;
1055  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056 
1057  while (n_left >= 4)
1058  {
1059  vlib_buffer_t *b0, *b1;
1060  u32 bi0, fi0, len0;
1061  u32 bi1, fi1, len1;
1062  u8 is_eop0, is_eop1;
1063 
1064  /* Prefetch next iteration. */
1065  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067 
1068  if ((descriptor_prefetch_rotor & 0x3) == 0)
1069  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1070 
1071  descriptor_prefetch_rotor += 2;
1072 
1073  bi0 = buffers[0];
1074  bi1 = buffers[1];
1075 
1076  to_free[0] = fi0 = to_tx[0];
1077  to_tx[0] = bi0;
1078  to_free += fi0 != 0;
1079 
1080  to_free[0] = fi1 = to_tx[1];
1081  to_tx[1] = bi1;
1082  to_free += fi1 != 0;
1083 
1084  buffers += 2;
1085  n_left -= 2;
1086  to_tx += 2;
1087 
1088  b0 = vlib_get_buffer (vm, bi0);
1089  b1 = vlib_get_buffer (vm, bi1);
1090 
1091  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093 
1094  len0 = b0->current_length;
1095  len1 = b1->current_length;
1096 
1099 
1100  d[0].buffer_address =
1102  d[1].buffer_address =
1104 
1105  d[0].n_bytes_this_buffer = len0;
1106  d[1].n_bytes_this_buffer = len1;
1107 
1108  d[0].status0 =
1109  template_status | (is_eop0 <<
1111  d[1].status0 =
1112  template_status | (is_eop1 <<
1114 
1115  len_sop = (is_sop ? 0 : len_sop) + len0;
1116  d_sop[0].status1 =
1118  d += 1;
1119  d_sop = is_eop0 ? d : d_sop;
1120 
1121  is_sop = is_eop0;
1122 
1123  len_sop = (is_sop ? 0 : len_sop) + len1;
1124  d_sop[0].status1 =
1126  d += 1;
1127  d_sop = is_eop1 ? d : d_sop;
1128 
1129  is_sop = is_eop1;
1130  }
1131 
1132  while (n_left > 0)
1133  {
1134  vlib_buffer_t *b0;
1135  u32 bi0, fi0, len0;
1136  u8 is_eop0;
1137 
1138  bi0 = buffers[0];
1139 
1140  to_free[0] = fi0 = to_tx[0];
1141  to_tx[0] = bi0;
1142  to_free += fi0 != 0;
1143 
1144  buffers += 1;
1145  n_left -= 1;
1146  to_tx += 1;
1147 
1148  b0 = vlib_get_buffer (vm, bi0);
1149 
1150  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1151 
1152  len0 = b0->current_length;
1153 
1155 
1156  d[0].buffer_address =
1158 
1159  d[0].n_bytes_this_buffer = len0;
1160 
1161  d[0].status0 =
1162  template_status | (is_eop0 <<
1164 
1165  len_sop = (is_sop ? 0 : len_sop) + len0;
1166  d_sop[0].status1 =
1168  d += 1;
1169  d_sop = is_eop0 ? d : d_sop;
1170 
1171  is_sop = is_eop0;
1172  }
1173 
1174  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1175  {
1176  to_tx =
1178  start_descriptor_index);
1179  ixge_tx_trace (xm, xd, dq, tx_state,
1180  &dq->descriptors[start_descriptor_index].tx, to_tx,
1181  n_descriptors);
1182  }
1183 
1184  _vec_len (xm->tx_buffers_pending_free) =
1185  to_free - xm->tx_buffers_pending_free;
1186 
1187  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1188  {
1189  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1190 
1191  ASSERT (d_sop - d_start <= dq->n_descriptors);
1192  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1193  }
1194 
1195  tx_state->is_start_of_packet = is_sop;
1196  tx_state->start_of_packet_descriptor = d_sop;
1197  tx_state->n_bytes_in_packet = len_sop;
1198 
1199  return n_descriptors;
1200 }
1201 
1202 static uword
1204  vlib_node_runtime_t * node, vlib_frame_t * f)
1205 {
1206  ixge_main_t *xm = &ixge_main;
1207  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1209  ixge_dma_queue_t *dq;
1210  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1211  u32 queue_index = 0; /* fixme parameter */
1212  ixge_tx_state_t tx_state;
1213 
1214  tx_state.node = node;
1215  tx_state.is_start_of_packet = 1;
1216  tx_state.start_of_packet_descriptor = 0;
1217  tx_state.n_bytes_in_packet = 0;
1218 
1219  from = vlib_frame_vector_args (f);
1220 
1221  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1222 
1223  dq->head_index = dq->tx.head_index_write_back[0];
1224 
1225  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1226  n_left_tx = dq->n_descriptors - 1;
1227  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1228 
1229  _vec_len (xm->tx_buffers_pending_free) = 0;
1230 
1231  n_descriptors_to_tx = f->n_vectors;
1232  n_tail_drop = 0;
1233  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1234  {
1235  i32 i, n_ok, i_eop, i_sop;
1236 
1237  i_sop = i_eop = ~0;
1238  for (i = n_left_tx - 1; i >= 0; i--)
1239  {
1240  vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1241  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1242  {
1243  if (i_sop != ~0 && i_eop != ~0)
1244  break;
1245  i_eop = i;
1246  i_sop = i + 1;
1247  }
1248  }
1249  if (i == 0)
1250  n_ok = 0;
1251  else
1252  n_ok = i_eop + 1;
1253 
1254  {
1255  ELOG_TYPE_DECLARE (e) =
1256  {
1257  .function = (char *) __FUNCTION__,.format =
1258  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1259  "i2i2i2i2",};
1260  struct
1261  {
1262  u16 instance, to_tx, head, tail;
1263  } *ed;
1264  ed = ELOG_DATA (&vm->elog_main, e);
1265  ed->instance = xd->device_index;
1266  ed->to_tx = n_descriptors_to_tx;
1267  ed->head = dq->head_index;
1268  ed->tail = dq->tail_index;
1269  }
1270 
1271  if (n_ok < n_descriptors_to_tx)
1272  {
1273  n_tail_drop = n_descriptors_to_tx - n_ok;
1274  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1275  vlib_error_count (vm, ixge_input_node.index,
1276  IXGE_ERROR_tx_full_drops, n_tail_drop);
1277  }
1278 
1279  n_descriptors_to_tx = n_ok;
1280  }
1281 
1282  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1283 
1284  /* Process from tail to end of descriptor ring. */
1285  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1286  {
1287  u32 n =
1288  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1289  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1290  from += n;
1291  n_descriptors_to_tx -= n;
1292  dq->tail_index += n;
1293  ASSERT (dq->tail_index <= dq->n_descriptors);
1294  if (dq->tail_index == dq->n_descriptors)
1295  dq->tail_index = 0;
1296  }
1297 
1298  if (n_descriptors_to_tx > 0)
1299  {
1300  u32 n =
1301  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1302  from += n;
1303  ASSERT (n == n_descriptors_to_tx);
1304  dq->tail_index += n;
1305  ASSERT (dq->tail_index <= dq->n_descriptors);
1306  if (dq->tail_index == dq->n_descriptors)
1307  dq->tail_index = 0;
1308  }
1309 
1310  /* We should only get full packets. */
1311  ASSERT (tx_state.is_start_of_packet);
1312 
1313  /* Report status when last descriptor is done. */
1314  {
1315  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1316  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1318  }
1319 
1320  /* Give new descriptors to hardware. */
1321  {
1322  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1323 
1325 
1326  dr->tail_index = dq->tail_index;
1327  }
1328 
1329  /* Free any buffers that are done. */
1330  {
1331  u32 n = _vec_len (xm->tx_buffers_pending_free);
1332  if (n > 0)
1333  {
1335  _vec_len (xm->tx_buffers_pending_free) = 0;
1336  ASSERT (dq->tx.n_buffers_on_ring >= n);
1337  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1338  }
1339  }
1340 
1341  return f->n_vectors;
1342 }
1343 
1344 static uword
1346  ixge_device_t * xd,
1347  ixge_dma_queue_t * dq,
1348  u32 start_descriptor_index, u32 n_descriptors)
1349 {
1350  vlib_main_t *vm = xm->vlib_main;
1351  vlib_node_runtime_t *node = dq->rx.node;
1352  ixge_descriptor_t *d;
1353  static ixge_descriptor_t *d_trace_save;
1354  static u32 *d_trace_buffers;
1355  u32 n_descriptors_left = n_descriptors;
1356  u32 *to_rx =
1357  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1358  u32 *to_add;
1359  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1360  u32 bi_last = dq->rx.saved_last_buffer_index;
1361  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1362  u32 is_sop = dq->rx.is_start_of_packet;
1363  u32 next_index, n_left_to_next, *to_next;
1364  u32 n_packets = 0;
1365  u32 n_bytes = 0;
1366  u32 n_trace = vlib_get_trace_count (vm, node);
1367  vlib_buffer_t *b_last, b_dummy;
1368 
1369  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1370  d = &dq->descriptors[start_descriptor_index];
1371 
1372  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
1373  next_index = dq->rx.next_index;
1374 
1375  if (n_trace > 0)
1376  {
1377  u32 n = clib_min (n_trace, n_descriptors);
1378  if (d_trace_save)
1379  {
1380  _vec_len (d_trace_save) = 0;
1381  _vec_len (d_trace_buffers) = 0;
1382  }
1383  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1384  vec_add (d_trace_buffers, to_rx, n);
1385  }
1386 
1387  {
1388  uword l = vec_len (xm->rx_buffers_to_add);
1389 
1390  if (l < n_descriptors_left)
1391  {
1392  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1393  u32 n_allocated;
1394 
1395  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1396 
1397  _vec_len (xm->rx_buffers_to_add) = l;
1398  n_allocated = vlib_buffer_alloc_from_free_list
1399  (vm, xm->rx_buffers_to_add + l, n_to_alloc,
1401  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1402 
1403  /* Handle transient allocation failure */
1404  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1405  {
1406  if (n_allocated == 0)
1407  vlib_error_count (vm, ixge_input_node.index,
1408  IXGE_ERROR_rx_alloc_no_physmem, 1);
1409  else
1410  vlib_error_count (vm, ixge_input_node.index,
1411  IXGE_ERROR_rx_alloc_fail, 1);
1412 
1413  n_descriptors_left = l + n_allocated;
1414  }
1415  n_descriptors = n_descriptors_left;
1416  }
1417 
1418  /* Add buffers from end of vector going backwards. */
1419  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1420  }
1421 
1422  while (n_descriptors_left > 0)
1423  {
1424  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1425 
1426  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1427  {
1428  vlib_buffer_t *b0, *b1;
1429  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1430  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1431  u8 is_eop0, error0, next0;
1432  u8 is_eop1, error1, next1;
1433  ixge_descriptor_t d0, d1;
1434 
1435  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1436  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1437 
1438  CLIB_PREFETCH (d + 2, 32, STORE);
1439 
1440  d0.as_u32x4 = d[0].as_u32x4;
1441  d1.as_u32x4 = d[1].as_u32x4;
1442 
1443  s20 = d0.rx_from_hw.status[2];
1444  s21 = d1.rx_from_hw.status[2];
1445 
1446  s00 = d0.rx_from_hw.status[0];
1447  s01 = d1.rx_from_hw.status[0];
1448 
1449  if (!
1451  goto found_hw_owned_descriptor_x2;
1452 
1453  bi0 = to_rx[0];
1454  bi1 = to_rx[1];
1455 
1456  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1457  fi0 = to_add[0];
1458  fi1 = to_add[-1];
1459 
1460  to_rx[0] = fi0;
1461  to_rx[1] = fi1;
1462  to_rx += 2;
1463  to_add -= 2;
1464 
1466  vlib_buffer_is_known (vm, bi0));
1468  vlib_buffer_is_known (vm, bi1));
1470  vlib_buffer_is_known (vm, fi0));
1472  vlib_buffer_is_known (vm, fi1));
1473 
1474  b0 = vlib_get_buffer (vm, bi0);
1475  b1 = vlib_get_buffer (vm, bi1);
1476 
1477  /*
1478  * Turn this on if you run into
1479  * "bad monkey" contexts, and you want to know exactly
1480  * which nodes they've visited... See main.c...
1481  */
1484 
1487 
1488  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1489  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1490 
1491  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1492  &next0, &error0, &flags0,
1493  &next1, &error1, &flags1);
1494 
1495  next0 = is_sop ? next0 : next_index_sop;
1496  next1 = is_eop0 ? next1 : next0;
1497  next_index_sop = next1;
1498 
1499  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1500  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1501 
1502  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1503  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1504  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1505  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1506 
1507  b0->error = node->errors[error0];
1508  b1->error = node->errors[error1];
1509 
1512  n_bytes += len0 + len1;
1513  n_packets += is_eop0 + is_eop1;
1514 
1515  /* Give new buffers to hardware. */
1516  d0.rx_to_hw.tail_address =
1518  d1.rx_to_hw.tail_address =
1522  d[0].as_u32x4 = d0.as_u32x4;
1523  d[1].as_u32x4 = d1.as_u32x4;
1524 
1525  d += 2;
1526  n_descriptors_left -= 2;
1527 
1528  /* Point to either l2 or l3 header depending on next. */
1529  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1531  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1533 
1534  b0->current_length = len0 - l3_offset0;
1535  b1->current_length = len1 - l3_offset1;
1536  b0->current_data = l3_offset0;
1537  b1->current_data = l3_offset1;
1538 
1539  b_last->next_buffer = is_sop ? ~0 : bi0;
1540  b0->next_buffer = is_eop0 ? ~0 : bi1;
1541  bi_last = bi1;
1542  b_last = b1;
1543 
1544  if (CLIB_DEBUG > 0)
1545  {
1546  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1547  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1548 
1549  if (is_eop0)
1550  {
1551  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1552  /* follow_buffer_next */ 1);
1553  ASSERT (!msg);
1554  }
1555  if (is_eop1)
1556  {
1557  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1558  /* follow_buffer_next */ 1);
1559  ASSERT (!msg);
1560  }
1561  }
1562  if (0) /* "Dave" version */
1563  {
1564  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1565  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1566 
1567  if (is_eop0)
1568  {
1569  to_next[0] = bi_sop0;
1570  to_next++;
1571  n_left_to_next--;
1572 
1573  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1574  to_next, n_left_to_next,
1575  bi_sop0, next0);
1576  }
1577  if (is_eop1)
1578  {
1579  to_next[0] = bi_sop1;
1580  to_next++;
1581  n_left_to_next--;
1582 
1583  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1584  to_next, n_left_to_next,
1585  bi_sop1, next1);
1586  }
1587  is_sop = is_eop1;
1588  bi_sop = bi_sop1;
1589  }
1590  if (1) /* "Eliot" version */
1591  {
1592  /* Speculatively enqueue to cached next. */
1593  u8 saved_is_sop = is_sop;
1594  u32 bi_sop_save = bi_sop;
1595 
1596  bi_sop = saved_is_sop ? bi0 : bi_sop;
1597  to_next[0] = bi_sop;
1598  to_next += is_eop0;
1599  n_left_to_next -= is_eop0;
1600 
1601  bi_sop = is_eop0 ? bi1 : bi_sop;
1602  to_next[0] = bi_sop;
1603  to_next += is_eop1;
1604  n_left_to_next -= is_eop1;
1605 
1606  is_sop = is_eop1;
1607 
1608  if (PREDICT_FALSE
1609  (!(next0 == next_index && next1 == next_index)))
1610  {
1611  /* Undo speculation. */
1612  to_next -= is_eop0 + is_eop1;
1613  n_left_to_next += is_eop0 + is_eop1;
1614 
1615  /* Re-do both descriptors being careful about where we enqueue. */
1616  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1617  if (is_eop0)
1618  {
1619  if (next0 != next_index)
1620  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1621  else
1622  {
1623  to_next[0] = bi_sop;
1624  to_next += 1;
1625  n_left_to_next -= 1;
1626  }
1627  }
1628 
1629  bi_sop = is_eop0 ? bi1 : bi_sop;
1630  if (is_eop1)
1631  {
1632  if (next1 != next_index)
1633  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1634  else
1635  {
1636  to_next[0] = bi_sop;
1637  to_next += 1;
1638  n_left_to_next -= 1;
1639  }
1640  }
1641 
1642  /* Switch cached next index when next for both packets is the same. */
1643  if (is_eop0 && is_eop1 && next0 == next1)
1644  {
1645  vlib_put_next_frame (vm, node, next_index,
1646  n_left_to_next);
1647  next_index = next0;
1648  vlib_get_next_frame (vm, node, next_index,
1649  to_next, n_left_to_next);
1650  }
1651  }
1652  }
1653  }
1654 
1655  /* Bail out of dual loop and proceed with single loop. */
1656  found_hw_owned_descriptor_x2:
1657 
1658  while (n_descriptors_left > 0 && n_left_to_next > 0)
1659  {
1660  vlib_buffer_t *b0;
1661  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1662  u8 is_eop0, error0, next0;
1663  ixge_descriptor_t d0;
1664 
1665  d0.as_u32x4 = d[0].as_u32x4;
1666 
1667  s20 = d0.rx_from_hw.status[2];
1668  s00 = d0.rx_from_hw.status[0];
1669 
1671  goto found_hw_owned_descriptor_x1;
1672 
1673  bi0 = to_rx[0];
1674  ASSERT (to_add >= xm->rx_buffers_to_add);
1675  fi0 = to_add[0];
1676 
1677  to_rx[0] = fi0;
1678  to_rx += 1;
1679  to_add -= 1;
1680 
1682  vlib_buffer_is_known (vm, bi0));
1684  vlib_buffer_is_known (vm, fi0));
1685 
1686  b0 = vlib_get_buffer (vm, bi0);
1687 
1688  /*
1689  * Turn this on if you run into
1690  * "bad monkey" contexts, and you want to know exactly
1691  * which nodes they've visited...
1692  */
1694 
1695  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1697  (xd, s00, s20, &next0, &error0, &flags0);
1698 
1699  next0 = is_sop ? next0 : next_index_sop;
1700  next_index_sop = next0;
1701 
1702  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1703 
1704  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1705  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1706 
1707  b0->error = node->errors[error0];
1708 
1710  n_bytes += len0;
1711  n_packets += is_eop0;
1712 
1713  /* Give new buffer to hardware. */
1714  d0.rx_to_hw.tail_address =
1717  d[0].as_u32x4 = d0.as_u32x4;
1718 
1719  d += 1;
1720  n_descriptors_left -= 1;
1721 
1722  /* Point to either l2 or l3 header depending on next. */
1723  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1725  b0->current_length = len0 - l3_offset0;
1726  b0->current_data = l3_offset0;
1727 
1728  b_last->next_buffer = is_sop ? ~0 : bi0;
1729  bi_last = bi0;
1730  b_last = b0;
1731 
1732  bi_sop = is_sop ? bi0 : bi_sop;
1733 
1734  if (CLIB_DEBUG > 0 && is_eop0)
1735  {
1736  u8 *msg =
1737  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1738  ASSERT (!msg);
1739  }
1740 
1741  if (0) /* "Dave" version */
1742  {
1743  if (is_eop0)
1744  {
1745  to_next[0] = bi_sop;
1746  to_next++;
1747  n_left_to_next--;
1748 
1749  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1750  to_next, n_left_to_next,
1751  bi_sop, next0);
1752  }
1753  }
1754  if (1) /* "Eliot" version */
1755  {
1756  if (PREDICT_TRUE (next0 == next_index))
1757  {
1758  to_next[0] = bi_sop;
1759  to_next += is_eop0;
1760  n_left_to_next -= is_eop0;
1761  }
1762  else
1763  {
1764  if (next0 != next_index && is_eop0)
1765  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1766 
1767  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1768  next_index = next0;
1769  vlib_get_next_frame (vm, node, next_index,
1770  to_next, n_left_to_next);
1771  }
1772  }
1773  is_sop = is_eop0;
1774  }
1775  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1776  }
1777 
1778 found_hw_owned_descriptor_x1:
1779  if (n_descriptors_left > 0)
1780  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1781 
1782  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1783 
1784  {
1785  u32 n_done = n_descriptors - n_descriptors_left;
1786 
1787  if (n_trace > 0 && n_done > 0)
1788  {
1789  u32 n = clib_min (n_trace, n_done);
1790  ixge_rx_trace (xm, xd, dq,
1791  d_trace_save,
1792  d_trace_buffers,
1793  &dq->descriptors[start_descriptor_index], n);
1794  vlib_set_trace_count (vm, node, n_trace - n);
1795  }
1796  if (d_trace_save)
1797  {
1798  _vec_len (d_trace_save) = 0;
1799  _vec_len (d_trace_buffers) = 0;
1800  }
1801 
1802  /* Don't keep a reference to b_last if we don't have to.
1803  Otherwise we can over-write a next_buffer pointer after already haven
1804  enqueued a packet. */
1805  if (is_sop)
1806  {
1807  b_last->next_buffer = ~0;
1808  bi_last = ~0;
1809  }
1810 
1811  dq->rx.n_descriptors_done_this_call = n_done;
1812  dq->rx.n_descriptors_done_total += n_done;
1813  dq->rx.is_start_of_packet = is_sop;
1814  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1815  dq->rx.saved_last_buffer_index = bi_last;
1816  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1817  dq->rx.next_index = next_index;
1818  dq->rx.n_bytes += n_bytes;
1819 
1820  return n_packets;
1821  }
1822 }
1823 
1824 static uword
1826  ixge_device_t * xd,
1827  vlib_node_runtime_t * node, u32 queue_index)
1828 {
1829  ixge_dma_queue_t *dq =
1830  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1832  uword n_packets = 0;
1833  u32 hw_head_index, sw_head_index;
1834 
1835  /* One time initialization. */
1836  if (!dq->rx.node)
1837  {
1838  dq->rx.node = node;
1839  dq->rx.is_start_of_packet = 1;
1840  dq->rx.saved_start_of_packet_buffer_index = ~0;
1841  dq->rx.saved_last_buffer_index = ~0;
1842  }
1843 
1844  dq->rx.next_index = node->cached_next_index;
1845 
1846  dq->rx.n_descriptors_done_total = 0;
1847  dq->rx.n_descriptors_done_this_call = 0;
1848  dq->rx.n_bytes = 0;
1849 
1850  /* Fetch head from hardware and compare to where we think we are. */
1851  hw_head_index = dr->head_index;
1852  sw_head_index = dq->head_index;
1853 
1854  if (hw_head_index == sw_head_index)
1855  goto done;
1856 
1857  if (hw_head_index < sw_head_index)
1858  {
1859  u32 n_tried = dq->n_descriptors - sw_head_index;
1860  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1861  sw_head_index =
1862  ixge_ring_add (dq, sw_head_index,
1863  dq->rx.n_descriptors_done_this_call);
1864 
1865  if (dq->rx.n_descriptors_done_this_call != n_tried)
1866  goto done;
1867  }
1868  if (hw_head_index >= sw_head_index)
1869  {
1870  u32 n_tried = hw_head_index - sw_head_index;
1871  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1872  sw_head_index =
1873  ixge_ring_add (dq, sw_head_index,
1874  dq->rx.n_descriptors_done_this_call);
1875  }
1876 
1877 done:
1878  dq->head_index = sw_head_index;
1879  dq->tail_index =
1880  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1881 
1882  /* Give tail back to hardware. */
1884 
1885  dr->tail_index = dq->tail_index;
1886 
1888  interface_main.combined_sw_if_counters +
1890  0 /* cpu_index */ ,
1891  xd->vlib_sw_if_index, n_packets,
1892  dq->rx.n_bytes);
1893 
1894  return n_packets;
1895 }
1896 
1897 static void
1899 {
1900  vlib_main_t *vm = xm->vlib_main;
1901  ixge_regs_t *r = xd->regs;
1902 
1903  if (i != 20)
1904  {
1905  ELOG_TYPE_DECLARE (e) =
1906  {
1907  .function = (char *) __FUNCTION__,.format =
1908  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1909  16,.enum_strings =
1910  {
1911  "flow director",
1912  "rx miss",
1913  "pci exception",
1914  "mailbox",
1915  "link status change",
1916  "linksec key exchange",
1917  "manageability event",
1918  "reserved23",
1919  "sdp0",
1920  "sdp1",
1921  "sdp2",
1922  "sdp3",
1923  "ecc", "descriptor handler error", "tcp timer", "other",},};
1924  struct
1925  {
1926  u8 instance;
1927  u8 index;
1928  } *ed;
1929  ed = ELOG_DATA (&vm->elog_main, e);
1930  ed->instance = xd->device_index;
1931  ed->index = i - 16;
1932  }
1933  else
1934  {
1935  u32 v = r->xge_mac.link_status;
1936  uword is_up = (v & (1 << 30)) != 0;
1937 
1938  ELOG_TYPE_DECLARE (e) =
1939  {
1940  .function = (char *) __FUNCTION__,.format =
1941  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1942  struct
1943  {
1944  u32 instance, link_status;
1945  } *ed;
1946  ed = ELOG_DATA (&vm->elog_main, e);
1947  ed->instance = xd->device_index;
1948  ed->link_status = v;
1950 
1953  ((is_up << 31) | xd->vlib_hw_if_index));
1954  }
1955 }
1956 
1958 clean_block (u32 * b, u32 * t, u32 n_left)
1959 {
1960  u32 *t0 = t;
1961 
1962  while (n_left >= 4)
1963  {
1964  u32 bi0, bi1, bi2, bi3;
1965 
1966  t[0] = bi0 = b[0];
1967  b[0] = 0;
1968  t += bi0 != 0;
1969 
1970  t[0] = bi1 = b[1];
1971  b[1] = 0;
1972  t += bi1 != 0;
1973 
1974  t[0] = bi2 = b[2];
1975  b[2] = 0;
1976  t += bi2 != 0;
1977 
1978  t[0] = bi3 = b[3];
1979  b[3] = 0;
1980  t += bi3 != 0;
1981 
1982  b += 4;
1983  n_left -= 4;
1984  }
1985 
1986  while (n_left > 0)
1987  {
1988  u32 bi0;
1989 
1990  t[0] = bi0 = b[0];
1991  b[0] = 0;
1992  t += bi0 != 0;
1993  b += 1;
1994  n_left -= 1;
1995  }
1996 
1997  return t - t0;
1998 }
1999 
2000 static void
2001 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
2002 {
2003  vlib_main_t *vm = xm->vlib_main;
2004  ixge_dma_queue_t *dq =
2005  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2006  u32 n_clean, *b, *t, *t0;
2007  i32 n_hw_owned_descriptors;
2008  i32 first_to_clean, last_to_clean;
2009  u64 hwbp_race = 0;
2010 
2011  /* Handle case where head write back pointer update
2012  * arrives after the interrupt during high PCI bus loads.
2013  */
2014  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2015  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2016  {
2017  hwbp_race++;
2018  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2019  {
2020  ELOG_TYPE_DECLARE (e) =
2021  {
2022  .function = (char *) __FUNCTION__,.format =
2023  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2024  = "i4i4i4i4",};
2025  struct
2026  {
2027  u32 instance, head_index, tail_index, n_buffers_on_ring;
2028  } *ed;
2029  ed = ELOG_DATA (&vm->elog_main, e);
2030  ed->instance = xd->device_index;
2031  ed->head_index = dq->head_index;
2032  ed->tail_index = dq->tail_index;
2033  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2034  }
2035  }
2036 
2037  dq->head_index = dq->tx.head_index_write_back[0];
2038  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2039  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2040  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2041 
2042  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2043  {
2044  ELOG_TYPE_DECLARE (e) =
2045  {
2046  .function = (char *) __FUNCTION__,.format =
2047  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2048  = "i4i4i4i4i4",};
2049  struct
2050  {
2051  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2052  } *ed;
2053  ed = ELOG_DATA (&vm->elog_main, e);
2054  ed->instance = xd->device_index;
2055  ed->head_index = dq->head_index;
2056  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2057  ed->n_clean = n_clean;
2058  ed->retries = hwbp_race;
2059  }
2060 
2061  /*
2062  * This function used to wait until hardware owned zero descriptors.
2063  * At high PPS rates, that doesn't happen until the TX ring is
2064  * completely full of descriptors which need to be cleaned up.
2065  * That, in turn, causes TX ring-full drops and/or long RX service
2066  * interruptions.
2067  */
2068  if (n_clean == 0)
2069  return;
2070 
2071  /* Clean the n_clean descriptors prior to the reported hardware head */
2072  last_to_clean = dq->head_index - 1;
2073  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2074  last_to_clean;
2075 
2076  first_to_clean = (last_to_clean) - (n_clean - 1);
2077  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2078  first_to_clean;
2079 
2081  t0 = t = xm->tx_buffers_pending_free;
2082  b = dq->descriptor_buffer_indices + first_to_clean;
2083 
2084  /* Wrap case: clean from first to end, then start to last */
2085  if (first_to_clean > last_to_clean)
2086  {
2087  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2088  first_to_clean = 0;
2089  b = dq->descriptor_buffer_indices;
2090  }
2091 
2092  /* Typical case: clean from first to last */
2093  if (first_to_clean <= last_to_clean)
2094  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2095 
2096  if (t > t0)
2097  {
2098  u32 n = t - t0;
2099  vlib_buffer_free_no_next (vm, t0, n);
2100  ASSERT (dq->tx.n_buffers_on_ring >= n);
2101  dq->tx.n_buffers_on_ring -= n;
2102  _vec_len (xm->tx_buffers_pending_free) = 0;
2103  }
2104 }
2105 
2106 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2109 {
2110  return i < 8;
2111 }
2112 
2115 {
2116  return i >= 8 && i < 16;
2117 }
2118 
2121 {
2122  return 8 + i;
2123 }
2124 
2127 {
2128  return 0 + i;
2129 }
2130 
2133 {
2135  return i - 0;
2136 }
2137 
2140 {
2142  return i - 8;
2143 }
2144 
2145 static uword
2147  ixge_device_t * xd, vlib_node_runtime_t * node)
2148 {
2149  ixge_regs_t *r = xd->regs;
2150  u32 i, s;
2151  uword n_rx_packets = 0;
2152 
2154  if (s)
2156 
2157  /* *INDENT-OFF* */
2158  foreach_set_bit (i, s, ({
2160  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2161 
2162  else if (ixge_interrupt_is_tx_queue (i))
2163  ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2164 
2165  else
2166  ixge_interrupt (xm, xd, i);
2167  }));
2168  /* *INDENT-ON* */
2169 
2170  return n_rx_packets;
2171 }
2172 
2173 static uword
2175 {
2176  ixge_main_t *xm = &ixge_main;
2177  ixge_device_t *xd;
2178  uword n_rx_packets = 0;
2179 
2180  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2181  {
2182  uword i;
2183 
2184  /* Loop over devices with interrupts. */
2185  /* *INDENT-OFF* */
2186  foreach_set_bit (i, node->runtime_data[0], ({
2187  xd = vec_elt_at_index (xm->devices, i);
2188  n_rx_packets += ixge_device_input (xm, xd, node);
2189 
2190  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2191  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2192  xd->regs->interrupt.enable_write_1_to_set = ~0;
2193  }));
2194  /* *INDENT-ON* */
2195 
2196  /* Clear mask of devices with pending interrupts. */
2197  node->runtime_data[0] = 0;
2198  }
2199  else
2200  {
2201  /* Poll all devices for input/interrupts. */
2202  vec_foreach (xd, xm->devices)
2203  {
2204  n_rx_packets += ixge_device_input (xm, xd, node);
2205 
2206  /* Re-enable interrupts when switching out of polling mode. */
2207  if (node->flags &
2210  }
2211  }
2212 
2213  return n_rx_packets;
2214 }
2215 
2216 static char *ixge_error_strings[] = {
2217 #define _(n,s) s,
2219 #undef _
2220 };
2221 
2222 /* *INDENT-OFF* */
2223 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2224  .function = ixge_input,
2225  .type = VLIB_NODE_TYPE_INPUT,
2226  .name = "ixge-input",
2227 
2228  /* Will be enabled if/when hardware is detected. */
2229  .state = VLIB_NODE_STATE_DISABLED,
2230 
2231  .format_buffer = format_ethernet_header_with_length,
2232  .format_trace = format_ixge_rx_dma_trace,
2233 
2234  .n_errors = IXGE_N_ERROR,
2235  .error_strings = ixge_error_strings,
2236 
2237  .n_next_nodes = IXGE_RX_N_NEXT,
2238  .next_nodes = {
2239  [IXGE_RX_NEXT_DROP] = "error-drop",
2240  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2241  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2242  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2243  },
2244 };
2245 
2248 /* *INDENT-ON* */
2249 
2250 static u8 *
2251 format_ixge_device_name (u8 * s, va_list * args)
2252 {
2253  u32 i = va_arg (*args, u32);
2254  ixge_main_t *xm = &ixge_main;
2255  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2256  return format (s, "TenGigabitEthernet%U",
2258 }
2259 
2260 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2261 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2262 
2264 #define _(a,f) 0,
2265 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2267 #undef _
2268 #undef _64
2269 };
2270 
2271 static void
2273 {
2274  /* Byte offset for counter registers. */
2275  static u32 reg_offsets[] = {
2276 #define _(a,f) (a) / sizeof (u32),
2277 #define _64(a,f) _(a,f)
2279 #undef _
2280 #undef _64
2281  };
2282  volatile u32 *r = (volatile u32 *) xd->regs;
2283  int i;
2284 
2285  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2286  {
2287  u32 o = reg_offsets[i];
2288  xd->counters[i] += r[o];
2290  r[o] = 0;
2292  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2293  }
2294 }
2295 
2296 static u8 *
2297 format_ixge_device_id (u8 * s, va_list * args)
2298 {
2299  u32 device_id = va_arg (*args, u32);
2300  char *t = 0;
2301  switch (device_id)
2302  {
2303 #define _(f,n) case n: t = #f; break;
2305 #undef _
2306  default:
2307  t = 0;
2308  break;
2309  }
2310  if (t == 0)
2311  s = format (s, "unknown 0x%x", device_id);
2312  else
2313  s = format (s, "%s", t);
2314  return s;
2315 }
2316 
2317 static u8 *
2318 format_ixge_link_status (u8 * s, va_list * args)
2319 {
2320  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2322 
2323  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2324 
2325  {
2326  char *modes[] = {
2327  "1g", "10g parallel", "10g serial", "autoneg",
2328  };
2329  char *speeds[] = {
2330  "unknown", "100m", "1g", "10g",
2331  };
2332  s = format (s, ", mode %s, speed %s",
2333  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2334  }
2335 
2336  return s;
2337 }
2338 
2339 static u8 *
2340 format_ixge_device (u8 * s, va_list * args)
2341 {
2342  u32 dev_instance = va_arg (*args, u32);
2343  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2344  ixge_main_t *xm = &ixge_main;
2345  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2346  ixge_phy_t *phy = xd->phys + xd->phy_index;
2347  uword indent = format_get_indent (s);
2348 
2349  ixge_update_counters (xd);
2351 
2352  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2354  format_white_space, indent + 2, format_ixge_link_status, xd);
2355 
2356  {
2357 
2358  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2360  }
2361 
2362  s = format (s, "\n%U", format_white_space, indent + 2);
2363  if (phy->mdio_address != ~0)
2364  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2365  else if (xd->sfp_eeprom.id == SFP_ID_sfp)
2366  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2367  else
2368  s = format (s, "PHY not found");
2369 
2370  /* FIXME */
2371  {
2373  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2374  u32 hw_head_index = dr->head_index;
2375  u32 sw_head_index = dq->head_index;
2376  u32 nitems;
2377 
2378  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2379  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2380  format_white_space, indent + 2, nitems, dq->n_descriptors);
2381 
2382  s = format (s, "\n%U%d buffers in driver rx cache",
2383  format_white_space, indent + 2,
2384  vec_len (xm->rx_buffers_to_add));
2385 
2386  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2387  format_white_space, indent + 2,
2388  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2389  }
2390  {
2391  u32 i;
2392  u64 v;
2393  static char *names[] = {
2394 #define _(a,f) #f,
2395 #define _64(a,f) _(a,f)
2397 #undef _
2398 #undef _64
2399  };
2400 
2401  for (i = 0; i < ARRAY_LEN (names); i++)
2402  {
2403  v = xd->counters[i] - xd->counters_last_clear[i];
2404  if (v != 0)
2405  s = format (s, "\n%U%-40U%16Ld",
2406  format_white_space, indent + 2,
2407  format_c_identifier, names[i], v);
2408  }
2409  }
2410 
2411  return s;
2412 }
2413 
2414 static void
2416 {
2417  ixge_main_t *xm = &ixge_main;
2418  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
2419  ixge_update_counters (xd);
2420  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2421 }
2422 
2423 /*
2424  * Dynamically redirect all pkts from a specific interface
2425  * to the specified node
2426  */
2427 static void
2429  u32 node_index)
2430 {
2431  ixge_main_t *xm = &ixge_main;
2432  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2434 
2435  /* Shut off redirection */
2436  if (node_index == ~0)
2437  {
2438  xd->per_interface_next_index = node_index;
2439  return;
2440  }
2441 
2443  vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2444 }
2445 
2446 
2447 /* *INDENT-OFF* */
2449  .name = "ixge",
2450  .tx_function = ixge_interface_tx,
2451  .format_device_name = format_ixge_device_name,
2452  .format_device = format_ixge_device,
2453  .format_tx_trace = format_ixge_tx_dma_trace,
2454  .clear_counters = ixge_clear_hw_interface_counters,
2455  .admin_up_down_function = ixge_interface_admin_up_down,
2456  .rx_redirect_to_node = ixge_set_interface_next_node,
2457  .flatten_output_chains = 1,
2458 };
2459 /* *INDENT-ON* */
2460 
2461 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2462 
2463 static clib_error_t *
2465 {
2466  ixge_main_t *xm = &ixge_main;
2467  vlib_main_t *vm = xm->vlib_main;
2468  ixge_dma_queue_t *dq;
2469  clib_error_t *error = 0;
2470 
2471  vec_validate (xd->dma_queues[rt], queue_index);
2472  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2473 
2476  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2477 
2478  if (!xm->n_bytes_in_rx_buffer)
2481  if (!xm->vlib_buffer_free_list_index)
2482  {
2485  "ixge rx");
2487  }
2488 
2489  if (!xm->n_descriptors[rt])
2490  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2491 
2492  dq->queue_index = queue_index;
2493  dq->n_descriptors =
2495  dq->head_index = dq->tail_index = 0;
2496 
2497  dq->descriptors = vlib_physmem_alloc_aligned (vm, &error,
2498  dq->n_descriptors *
2499  sizeof (dq->descriptors[0]),
2500  128 /* per chip spec */ );
2501  if (error)
2502  return error;
2503 
2504  memset (dq->descriptors, 0,
2505  dq->n_descriptors * sizeof (dq->descriptors[0]));
2507 
2508  if (rt == VLIB_RX)
2509  {
2510  u32 n_alloc, i;
2511 
2513  (vm, dq->descriptor_buffer_indices,
2516  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2517  for (i = 0; i < n_alloc; i++)
2518  {
2519  vlib_buffer_t *b =
2523  }
2524  }
2525  else
2526  {
2527  u32 i;
2528 
2529  dq->tx.head_index_write_back =
2531 
2532  for (i = 0; i < dq->n_descriptors; i++)
2533  dq->descriptors[i].tx = xm->tx_descriptor_template;
2534 
2536  }
2537 
2538  {
2539  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2540  u64 a;
2541 
2543  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2544  dr->descriptor_address[1] = a >> (u64) 32;
2545  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2546  dq->head_index = dq->tail_index = 0;
2547 
2548  if (rt == VLIB_RX)
2549  {
2550  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2551  dr->rx_split_control =
2552  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2553  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2554  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2555  (1 << 25)) | ( /* drop if no descriptors available */
2556  (1 << 28)));
2557 
2558  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2559  dq->tail_index = dq->n_descriptors -
2561  }
2562  else
2563  {
2564  /* Make sure its initialized before hardware can get to it. */
2565  dq->tx.head_index_write_back[0] = dq->head_index;
2566 
2567  a =
2568  vlib_physmem_virtual_to_physical (vm, dq->tx.head_index_write_back);
2569  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2570  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2571  }
2572 
2573  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2574  and [12] undocumented set. */
2575  if (rt == VLIB_RX)
2576  dr->dca_control &= ~((1 << 13) | (1 << 12));
2577 
2579 
2580  if (rt == VLIB_TX)
2581  {
2582  xd->regs->tx_dma_control |= (1 << 0);
2583  dr->control |= ((32 << 0) /* prefetch threshold */
2584  | (64 << 8) /* host threshold */
2585  | (0 << 16) /* writeback threshold */ );
2586  }
2587 
2588  /* Enable this queue and wait for hardware to initialize
2589  before adding to tail. */
2590  if (rt == VLIB_TX)
2591  {
2592  dr->control |= 1 << 25;
2593  while (!(dr->control & (1 << 25)))
2594  ;
2595  }
2596 
2597  /* Set head/tail indices and enable DMA. */
2598  dr->head_index = dq->head_index;
2599  dr->tail_index = dq->tail_index;
2600  }
2601 
2602  return error;
2603 }
2604 
2605 static u32
2607 {
2608  ixge_device_t *xd;
2609  ixge_regs_t *r;
2610  u32 old;
2611  ixge_main_t *xm = &ixge_main;
2612 
2613  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2614  r = xd->regs;
2615 
2616  old = r->filter_control;
2617 
2619  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2620  else
2621  r->filter_control = old & ~(1 << 9);
2622 
2623  return old;
2624 }
2625 
2626 static void
2628 {
2629  vnet_main_t *vnm = vnet_get_main ();
2630  ixge_device_t *xd;
2631 
2632  /* Reset chip(s). */
2633  vec_foreach (xd, xm->devices)
2634  {
2635  ixge_regs_t *r = xd->regs;
2636  const u32 reset_bit = (1 << 26) | (1 << 3);
2637 
2638  r->control |= reset_bit;
2639 
2640  /* No need to suspend. Timed to take ~1e-6 secs */
2641  while (r->control & reset_bit)
2642  ;
2643 
2644  /* Software loaded. */
2645  r->extended_control |= (1 << 28);
2646 
2647  ixge_phy_init (xd);
2648 
2649  /* Register ethernet interface. */
2650  {
2651  u8 addr8[6];
2652  u32 i, addr32[2];
2653  clib_error_t *error;
2654 
2655  addr32[0] = r->rx_ethernet_address0[0][0];
2656  addr32[1] = r->rx_ethernet_address0[0][1];
2657  for (i = 0; i < 6; i++)
2658  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2659 
2661  (vnm, ixge_device_class.index, xd->device_index,
2662  /* ethernet address */ addr8,
2664  if (error)
2665  clib_error_report (error);
2666  }
2667 
2668  {
2669  vnet_sw_interface_t *sw =
2671  xd->vlib_sw_if_index = sw->sw_if_index;
2672  }
2673 
2674  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2675 
2677 
2678  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2679 
2680  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2681  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2682  ixge_rx_queue_to_interrupt (0)) << 0);
2683 
2684  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2685  ixge_tx_queue_to_interrupt (0)) << 8);
2686 
2687  /* No use in getting too many interrupts.
2688  Limit them to one every 3/4 ring size at line rate
2689  min sized packets.
2690  No need for this since kernel/vlib main loop provides adequate interrupt
2691  limiting scheme. */
2692  if (0)
2693  {
2694  f64 line_rate_max_pps =
2695  10e9 / (8 * (64 + /* interframe padding */ 20));
2697  .75 * xm->n_descriptors[VLIB_RX] /
2698  line_rate_max_pps);
2699  }
2700 
2701  /* Accept all multicast and broadcast packets. Should really add them
2702  to the dst_ethernet_address register array. */
2703  r->filter_control |= (1 << 10) | (1 << 8);
2704 
2705  /* Enable frames up to size in mac frame size register. */
2706  r->xge_mac.control |= 1 << 2;
2707  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2708 
2709  /* Enable all interrupts. */
2710  if (!IXGE_ALWAYS_POLL)
2712  }
2713 }
2714 
2715 static uword
2717 {
2718  vnet_main_t *vnm = vnet_get_main ();
2719  ixge_main_t *xm = &ixge_main;
2720  ixge_device_t *xd;
2721  uword event_type, *event_data = 0;
2722  f64 timeout, link_debounce_deadline;
2723 
2724  ixge_device_init (xm);
2725 
2726  /* Clear all counters. */
2727  vec_foreach (xd, xm->devices)
2728  {
2729  ixge_update_counters (xd);
2730  memset (xd->counters, 0, sizeof (xd->counters));
2731  }
2732 
2733  timeout = 30.0;
2734  link_debounce_deadline = 1e70;
2735 
2736  while (1)
2737  {
2738  /* 36 bit stat counters could overflow in ~50 secs.
2739  We poll every 30 secs to be conservative. */
2741 
2742  event_type = vlib_process_get_events (vm, &event_data);
2743 
2744  switch (event_type)
2745  {
2746  case EVENT_SET_FLAGS:
2747  /* 1 ms */
2748  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2749  timeout = 1e-3;
2750  break;
2751 
2752  case ~0:
2753  /* No events found: timer expired. */
2754  if (vlib_time_now (vm) > link_debounce_deadline)
2755  {
2756  vec_foreach (xd, xm->devices)
2757  {
2758  ixge_regs_t *r = xd->regs;
2759  u32 v = r->xge_mac.link_status;
2760  uword is_up = (v & (1 << 30)) != 0;
2761 
2763  (vnm, xd->vlib_hw_if_index,
2764  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2765  }
2766  link_debounce_deadline = 1e70;
2767  timeout = 30.0;
2768  }
2769  break;
2770 
2771  default:
2772  ASSERT (0);
2773  }
2774 
2775  if (event_data)
2776  _vec_len (event_data) = 0;
2777 
2778  /* Query stats every 30 secs. */
2779  {
2780  f64 now = vlib_time_now (vm);
2781  if (now - xm->time_last_stats_update > 30)
2782  {
2783  xm->time_last_stats_update = now;
2784  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2785  }
2786  }
2787  }
2788 
2789  return 0;
2790 }
2791 
2793  .function = ixge_process,
2794  .type = VLIB_NODE_TYPE_PROCESS,
2795  .name = "ixge-process",
2796 };
2797 
2798 clib_error_t *
2800 {
2801  ixge_main_t *xm = &ixge_main;
2802  clib_error_t *error;
2803 
2804  xm->vlib_main = vm;
2805  memset (&xm->tx_descriptor_template, 0,
2806  sizeof (xm->tx_descriptor_template));
2807  memset (&xm->tx_descriptor_template_mask, 0,
2808  sizeof (xm->tx_descriptor_template_mask));
2813  xm->tx_descriptor_template_mask.status0 = 0xffff;
2814  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2815 
2821 
2822  error = vlib_call_init_function (vm, pci_bus_init);
2823 
2824  return error;
2825 }
2826 
2828 
2829 
2830 static void
2832 {
2833  ixge_main_t *xm = &ixge_main;
2834  vlib_main_t *vm = xm->vlib_main;
2835 
2837 
2838  /* Let node know which device is interrupting. */
2839  {
2840  vlib_node_runtime_t *rt =
2842  rt->runtime_data[0] |= 1 << dev->private_data;
2843  }
2844 }
2845 
2846 static clib_error_t *
2848 {
2849  ixge_main_t *xm = &ixge_main;
2850  clib_error_t *error;
2851  void *r;
2852  ixge_device_t *xd;
2853 
2854  /* Device found: make sure we have dma memory. */
2855  if (unix_physmem_is_fake (vm))
2856  return clib_error_return (0, "no physical memory available");
2857 
2858  error = vlib_pci_map_resource (dev, 0, &r);
2859  if (error)
2860  return error;
2861 
2862  vec_add2 (xm->devices, xd, 1);
2863 
2864  if (vec_len (xm->devices) == 1)
2865  {
2866  ixge_input_node.function = ixge_input_multiarch_select ();
2867  }
2868 
2869  xd->pci_device = dev[0];
2871  xd->regs = r;
2872  xd->device_index = xd - xm->devices;
2873  xd->pci_function = dev->bus_address.function;
2874  xd->per_interface_next_index = ~0;
2875 
2876 
2877  /* Chip found so enable node. */
2878  {
2881  ? VLIB_NODE_STATE_POLLING
2882  : VLIB_NODE_STATE_INTERRUPT));
2883 
2884  dev->private_data = xd->device_index;
2885  }
2886 
2887  if (vec_len (xm->devices) == 1)
2888  {
2891  }
2892 
2893  error = vlib_pci_bus_master_enable (dev);
2894 
2895  if (error)
2896  return error;
2897 
2898  return vlib_pci_intr_enable (dev);
2899 }
2900 
2901 /* *INDENT-OFF* */
2902 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2903  .init_function = ixge_pci_init,
2904  .interrupt_handler = ixge_pci_intr_handler,
2905  .supported_devices = {
2906 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2908 #undef _
2909  { 0 },
2910  },
2911 };
2912 /* *INDENT-ON* */
2913 
2914 void
2916 {
2918 
2919  switch (next)
2920  {
2924  r->next_nodes[next] = name;
2925  break;
2926 
2927  default:
2928  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2929  break;
2930  }
2931 }
2932 #endif
2933 
2934 /* *INDENT-OFF* */
2935 VLIB_PLUGIN_REGISTER () = {
2936  .version = VPP_BUILD_VER,
2937  .default_disabled = 1,
2938  .description = "Intel 82599 Family Native Driver (experimental)",
2939 };
2940 
2941 /* *INDENT-ON* */
2942 /*
2943  * fd.io coding-style-patch-verification: ON
2944  *
2945  * Local Variables:
2946  * eval: (c-set-style "gnu")
2947  * End:
2948  */
u32 mdio_address
Definition: ixge.h:1113
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2272
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:436
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
u32 process_node_index
Definition: ixge.h:1258
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:321
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
static void ixge_pci_intr_handler(vlib_pci_device_t *dev)
Definition: ixge.c:2831
#define clib_min(x, y)
Definition: clib.h:332
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:111
#define CLIB_UNUSED(x)
Definition: clib.h:79
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:376
static u64 vlib_physmem_virtual_to_physical(vlib_main_t *vm, void *mem)
Definition: buffer_funcs.h:461
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:530
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1261
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:683
u32 enable_write_1_to_set
Definition: ixge.h:245
a
Definition: bitmap.h:516
u32 n_descriptor_bytes
Definition: ixge.h:30
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2606
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:463
u32 vlib_hw_if_index
Definition: ixge.h:1222
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
u32 link_status_at_last_link_change
Definition: ixge.h:1231
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:383
u32 head_index
Definition: ixge.h:42
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static void * vlib_physmem_alloc(vlib_main_t *vm, clib_error_t **error, uword n_bytes)
Definition: buffer_funcs.h:448
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:82
#define PREDICT_TRUE(x)
Definition: clib.h:98
format_function_t format_vlib_buffer
Definition: buffer_funcs.h:705
u8 is_start_of_packet
Definition: ixge.c:569
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:181
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
u32 queue_mapping[64]
Definition: ixge.h:266
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:185
vlib_buffer_t buffer
Definition: ixge.c:572
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2318
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_command
Definition: ixge.h:368
u32 * rx_buffers_to_add
Definition: ixge.h:1266
#define XGE_PHY_CONTROL
Definition: ixge.c:52
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:56
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1345
ixge_device_t * devices
Definition: ixge.h:1245
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:70
struct _vlib_node_registration vlib_node_registration_t
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:561
ixge_descriptor_t before
Definition: ixge.c:561
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2461
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
#define XGE_PHY_ID2
Definition: ixge.c:51
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:171
u32 per_interface_next_index
Definition: ixge.h:1208
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2108
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u32 extended_control
Definition: ixge.h:192
u32 software_semaphore
Definition: ixge.h:890
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:184
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:164
ixge_rx_next_t
Definition: ixge.h:1274
static u8 ixge_counter_flags[]
Definition: ixge.c:2263
u8 *( format_function_t)(u8 *s, va_list *args)
Definition: format.h:48
#define VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:379
#define CLIB_MULTIARCH_SELECT_FN(fn,...)
Definition: cpu.h:47
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:209
u32 rx_ethernet_address0[16][2]
Definition: ixge.h:581
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:418
u32 tx_dma_control
Definition: ixge.h:500
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1007
vlib_pci_addr_t bus_address
Definition: pci.h:58
static u64 vlib_get_buffer_data_physical_address(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:153
struct ixge_dma_queue_t::@284::@286 tx
u32 * tx_buffers_pending_free
Definition: ixge.h:1264
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1065
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
u32 filter_control
Definition: ixge.h:522
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:158
u32 i2c_control
Definition: ixge.h:208
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_data
Definition: ixge.h:369
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:632
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:599
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1958
static int unix_physmem_is_fake(vlib_main_t *vm)
Definition: unix.h:190
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:432
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:120
u32 rx_split_control
Definition: ixge.h:51
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2126
vlib_rx_or_tx_t
Definition: defs.h:44
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_device_t *dev)
Definition: ixge.c:2847
ixge_tx_descriptor_t descriptor
Definition: ixge.c:848
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:87
i2c_bus_t i2c_bus
Definition: ixge.h:1233
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:67
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:445
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:170
#define VLIB_BUFFER_LOG2_NEXT_PRESENT
Definition: buffer.h:86
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:228
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:526
#define always_inline
Definition: clib.h:84
#define IP_BUFFER_L4_CHECKSUM_CORRECT
Definition: buffer.h:50
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:735
static uword format_get_indent(u8 *s)
Definition: format.h:72
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u32 dca_control
Definition: ixge.h:40
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
unsigned long long u32x4
Definition: ixge.c:28
ixge_phy_t phys[2]
Definition: ixge.h:1228
int i32
Definition: types.h:81
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
ixge_dma_regs_t tx_dma[128]
Definition: ixge.h:616
u32 link_partner_ability
Definition: ixge.h:343
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
#define clib_error_return(e, args...)
Definition: error.h:111
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:144
vnet_device_class_t ixge_device_class
Definition: ixge.h:1272
unsigned long u64
Definition: types.h:89
ixge_main_t ixge_main
Definition: ixge.c:55
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:57
u32 queue_index
Definition: ixge.h:1131
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:241
struct ixge_dma_queue_t::@284::@287 rx
#define vec_end(v)
End (last data address) of vector.
#define clib_error_create(args...)
Definition: error.h:108
#define vlib_call_init_function(vm, x)
Definition: init.h:162
f64 time_last_stats_update
Definition: ixge.h:1268
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:671
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:258
u32 vlib_buffer_free_list_index
Definition: ixge.h:1256
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2716
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:218
u8 id
Definition: sfp.h:36
uword private_data
Definition: i2c.h:44
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:516
format_function_t format_vnet_sw_interface_name
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1203
u16 state
Input node state.
Definition: node.h:451
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2415
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:267
#define foreach_ixge_counter
Definition: ixge.h:1016
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1035
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:71
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:930
ixge_tx_descriptor_t tx
Definition: ixge.h:170
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2428
#define v
Definition: acl.c:246
#define XGE_PHY_ID1
Definition: ixge.c:50
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2297
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:48
#define ELOG_DATA(em, f)
Definition: elog.h:392
static clib_error_t * vlib_pci_intr_enable(vlib_pci_device_t *dev)
Definition: pci.h:175
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
format_function_t format_sfp_eeprom
Definition: sfp.h:107
#define PREDICT_FALSE(x)
Definition: clib.h:97
u16 n_bytes_this_buffer
Definition: ixge.h:129
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
vnet_main_t vnet_main
Definition: misc.c:43
u32 control
Definition: ixge.h:62
#define VLIB_FRAME_SIZE
Definition: node.h:328
u32 descriptor_address[2]
Definition: ixge.h:29
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:196
format_function_t * format_buffer
Definition: node.h:311
VNET_DEVICE_CLASS(ixge_device_class)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
u32 software_firmware_sync
Definition: ixge.h:894
format_function_t format_vlib_pci_handle
Definition: pci.h:239
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:349
vlib_node_runtime_t * node
Definition: ixge.c:902
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
u32 vlib_sw_if_index
Definition: ixge.h:1222
uword private_data
Definition: pci.h:88
u32 head_index
Definition: ixge.h:1128
u32 auto_negotiation_control2
Definition: ixge.h:465
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:91
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:908
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:310
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:863
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:311
u32 n_descriptors
Definition: ixge.h:1125
u16 n_vectors
Definition: node.h:344
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:49
u32 status_write_1_to_set
Definition: ixge.h:232
Definition: i2c.h:33
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2139
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:276
ixge_descriptor_t after
Definition: ixge.c:561
u32 vlib_buffer_get_or_create_free_list(vlib_main_t *vm, u32 n_data_bytes, char *fmt,...)
Definition: buffer.c:433
u32x4 as_u32x4
Definition: ixge.h:171
format_function_t format_vlib_pci_link_speed
Definition: pci.h:240
struct ixge_regs_t::@263 xge_mac
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
Definition: buffer.c:207
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
#define clib_warning(format, args...)
Definition: error.h:59
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
static u32 vlib_buffer_alloc_from_free_list(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u32 free_list_index)
Allocate buffers from specific freelist into supplied array.
Definition: buffer_funcs.h:269
elog_main_t elog_main
Definition: main.h:141
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:113
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2251
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
#define ARRAY_LEN(x)
Definition: clib.h:59
u16 device_index
Definition: ixge.h:1216
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:350
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2132
u32 n_bytes_in_packet
Definition: ixge.c:906
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:278
#define VLIB_NODE_FUNCTION_MULTIARCH_CLONE(fn)
Definition: node.h:157
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
u32 sdp_control
Definition: ixge.h:201
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:576
u32 tail_index
Definition: ixge.h:1128
static char * ixge_error_strings[]
Definition: ixge.c:2216
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, clib_error_t **error, uword n_bytes, uword alignment)
Definition: buffer_funcs.h:432
#define EVENT_SET_FLAGS
Definition: ixge.c:42
pci_config_type0_regs_t config0
Definition: pci.h:63
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:455
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:536
#define ASSERT(truth)
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2261
unsigned int u32
Definition: types.h:88
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:2001
#define IP_BUFFER_L4_CHECKSUM_COMPUTED
Definition: buffer.h:49
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:469
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:251
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:246
u32 auto_negotiation_control
Definition: ixge.h:427
#define clib_error_report(e)
Definition: error.h:125
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:146
u32 control
Definition: ixge.h:179
clib_error_t * vlib_pci_map_resource(vlib_pci_device_t *dev, u32 resource, void **result)
Definition: linux_pci.c:381
ixge_regs_t * regs
Definition: ixge.h:1205
u32 id
Definition: ixge.h:1116
u32 rx_enable
Definition: ixge.h:308
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:60
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1898
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1017
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2120
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
struct ixge_dma_regs_t::@255::@258 tx
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2627
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:89
Definition: defs.h:47
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1261
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2915
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
unsigned short u16
Definition: types.h:57
VLIB_PLUGIN_REGISTER()
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:998
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:912
u32 tail_index
Definition: ixge.h:53
u32 rx_max_frame_size
Definition: ixge.h:373
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
double f64
Definition: types.h:142
unsigned char u8
Definition: types.h:56
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:484
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2114
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2260
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:262
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
vlib_buffer_t buffer
Definition: ixge.c:859
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2146
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2174
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
vlib_main_t * vlib_main
Definition: ixge.h:1242
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1825
struct ixge_regs_t::@261 interrupt
static clib_error_t * vlib_pci_bus_master_enable(vlib_pci_device_t *dev)
Definition: pci.h:207
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:53
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:404
#define vnet_buffer(b)
Definition: buffer.h:294
u32 is_start_of_packet
Definition: ixge.c:904
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:43
vlib_pci_device_t pci_device
Definition: ixge.h:1211
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
ixge_dma_regs_t rx_dma0[64]
Definition: ixge.h:281
u8 is_start_of_packet
Definition: ixge.c:856
#define IXGE_ALWAYS_POLL
Definition: ixge.c:40
u8 data[0]
Packet data.
Definition: buffer.h:152
u32 core_analog_config
Definition: ixge.h:949
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2799
u16 flags
Copy of main node flags.
Definition: node.h:449
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:101
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:208
u16 pci_function
Definition: ixge.h:1219
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
u32 status_write_1_to_clear
Definition: ixge.h:230
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
u32 flags
Definition: vhost-user.h:78
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2340
u32 phy_index
Definition: ixge.h:1227
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:74
u32 eeprom_read
Definition: ixge.h:881
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:157
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
pci_config_header_t header
Definition: pci_config.h:240
u32 link_status
Definition: ixge.h:340
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2464
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
ixge_dma_regs_t rx_dma1[64]
Definition: ixge.h:747
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
#define foreach_ixge_error
Definition: ixge.c:616
Definition: defs.h:46
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
ixge_error_t
Definition: ixge.c:623