FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__ || __i386__ || __aarch64__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <ixge/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/plugin/plugin.h>
38 #include <vpp/app/version.h>
39 
40 #define IXGE_ALWAYS_POLL 0
41 
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
44 
45 #define PCI_VENDOR_ID_INTEL 0x8086
46 
47 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
54 
58 
59 static void
61 {
62  ixge_main_t *xm = &ixge_main;
63  vlib_main_t *vm = xm->vlib_main;
64  ixge_regs_t *r = xd->regs;
65  u32 i;
66 
67  i = 0;
68  while (!(r->software_semaphore & (1 << 0)))
69  {
70  if (i > 0)
71  vlib_process_suspend (vm, 100e-6);
72  i++;
73  }
74  do
75  {
76  r->software_semaphore |= 1 << 1;
77  }
78  while (!(r->software_semaphore & (1 << 1)));
79 }
80 
81 static void
83 {
84  ixge_regs_t *r = xd->regs;
85  r->software_semaphore &= ~3;
86 }
87 
88 static void
90 {
91  ixge_main_t *xm = &ixge_main;
92  vlib_main_t *vm = xm->vlib_main;
93  ixge_regs_t *r = xd->regs;
94  u32 fw_mask = sw_mask << 5;
95  u32 m, done = 0;
96 
97  while (!done)
98  {
99  ixge_semaphore_get (xd);
100  m = r->software_firmware_sync;
101  done = (m & fw_mask) == 0;
102  if (done)
103  r->software_firmware_sync = m | sw_mask;
105  if (!done)
106  vlib_process_suspend (vm, 10e-3);
107  }
108 }
109 
110 static void
112 {
113  ixge_regs_t *r = xd->regs;
114  ixge_semaphore_get (xd);
115  r->software_firmware_sync &= ~sw_mask;
117 }
118 
119 u32
120 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121  u32 v, u32 is_read)
122 {
123  ixge_regs_t *r = xd->regs;
124  const u32 busy_bit = 1 << 30;
125  u32 x;
126 
127  ASSERT (xd->phy_index < 2);
128  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129 
130  ASSERT (reg_index < (1 << 16));
131  ASSERT (dev_type < (1 << 5));
132  if (!is_read)
133  r->xge_mac.phy_data = v;
134 
135  /* Address cycle. */
136  x =
137  reg_index | (dev_type << 16) | (xd->
138  phys[xd->phy_index].mdio_address << 21);
139  r->xge_mac.phy_command = x | busy_bit;
140  /* Busy wait timed to take 28e-6 secs. No suspend. */
141  while (r->xge_mac.phy_command & busy_bit)
142  ;
143 
144  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145  while (r->xge_mac.phy_command & busy_bit)
146  ;
147 
148  if (is_read)
149  v = r->xge_mac.phy_data >> 16;
150 
151  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152 
153  return v;
154 }
155 
156 static u32
157 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
158 {
159  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160  1);
161 }
162 
163 static void
164 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165 {
166  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167  0);
168 }
169 
170 static void
171 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172 {
173  ixge_main_t *xm = &ixge_main;
175  u32 v;
176 
177  v = 0;
178  v |= (sda != 0) << 3;
179  v |= (scl != 0) << 1;
180  xd->regs->i2c_control = v;
181 }
182 
183 static void
184 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
185 {
186  ixge_main_t *xm = &ixge_main;
188  u32 v;
189 
190  v = xd->regs->i2c_control;
191  *sda = (v & (1 << 2)) != 0;
192  *scl = (v & (1 << 0)) != 0;
193 }
194 
195 static u16
197 {
198  ixge_regs_t *r = xd->regs;
199  u32 v;
200  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
201  /* Wait for done bit. */
202  while (!((v = r->eeprom_read) & (1 << 1)))
203  ;
204  return v >> 16;
205 }
206 
207 static void
209 {
210  u32 tx_disable_bit = 1 << 3;
211  if (enable)
212  xd->regs->sdp_control &= ~tx_disable_bit;
213  else
214  xd->regs->sdp_control |= tx_disable_bit;
215 }
216 
217 static void
219 {
220  u32 is_10g_bit = 1 << 5;
221  if (enable)
222  xd->regs->sdp_control |= is_10g_bit;
223  else
224  xd->regs->sdp_control &= ~is_10g_bit;
225 }
226 
227 static clib_error_t *
229 {
230  u16 a, id, reg_values_addr = 0;
231 
232  a = ixge_read_eeprom (xd, 0x2b);
233  if (a == 0 || a == 0xffff)
234  return clib_error_create ("no init sequence in eeprom");
235 
236  while (1)
237  {
238  id = ixge_read_eeprom (xd, ++a);
239  if (id == 0xffff)
240  break;
241  reg_values_addr = ixge_read_eeprom (xd, ++a);
242  if (id == sfp_type)
243  break;
244  }
245  if (id != sfp_type)
246  return clib_error_create ("failed to find id 0x%x", sfp_type);
247 
248  ixge_software_firmware_sync (xd, 1 << 3);
249  while (1)
250  {
251  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252  if (v == 0xffff)
253  break;
254  xd->regs->core_analog_config = v;
255  }
257 
258  /* Make sure laser is off. We'll turn on the laser when
259  the interface is brought up. */
260  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262 
263  return 0;
264 }
265 
266 static void
268 {
269  u32 v;
270 
271  if (is_up)
272  {
273  /* pma/pmd 10g serial SFI. */
274  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276 
278  v &= ~(7 << 13);
279  v |= (0 << 13);
280  /* Restart autoneg. */
281  v |= (1 << 12);
283 
284  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
285  ;
286 
288 
289  /* link mode 10g sfi serdes */
290  v &= ~(7 << 13);
291  v |= (3 << 13);
292 
293  /* Restart autoneg. */
294  v |= (1 << 12);
296 
297  xd->regs->xge_mac.link_status;
298  }
299 
300  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301 
302  /* Give time for link partner to notice that we're up. */
303  if (is_up && vlib_in_process_context (vlib_get_main ()))
304  {
305  vlib_process_suspend (vlib_get_main (), 300e-3);
306  }
307 }
308 
311 {
312  ixge_regs_t *r = xd->regs;
313  ASSERT (qi < 128);
314  if (rt == VLIB_RX)
315  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316  else
317  return &r->tx_dma[qi];
318 }
319 
320 static clib_error_t *
322 {
323  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
324  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
325  ixge_main_t *xm = &ixge_main;
327  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
328 
329  if (is_up)
330  {
331  xd->regs->rx_enable |= 1;
332  xd->regs->tx_dma_control |= 1;
333  dr->control |= 1 << 25;
334  while (!(dr->control & (1 << 25)))
335  ;
336  }
337  else
338  {
339  xd->regs->rx_enable &= ~1;
340  xd->regs->tx_dma_control &= ~1;
341  }
342 
343  ixge_sfp_device_up_down (xd, is_up);
344 
345  return /* no error */ 0;
346 }
347 
348 static void
350 {
351  ixge_phy_t *phy = xd->phys + xd->phy_index;
352  i2c_bus_t *ib = &xd->i2c_bus;
353 
354  ib->private_data = xd->device_index;
357  vlib_i2c_init (ib);
358 
359  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
360 
362  xd->sfp_eeprom.id = SFP_ID_UNKNOWN;
363  else
364  {
365  /* FIXME 5 => SR/LR eeprom ID. */
366  clib_error_t *e =
368  if (e)
369  clib_error_report (e);
370  }
371 
372  phy->mdio_address = ~0;
373 }
374 
375 static void
377 {
378  ixge_main_t *xm = &ixge_main;
379  vlib_main_t *vm = xm->vlib_main;
380  ixge_phy_t *phy = xd->phys + xd->phy_index;
381 
382  switch (xd->device_id)
383  {
384  case IXGE_82599_sfp:
385  case IXGE_82599_sfp_em:
386  case IXGE_82599_sfp_fcoe:
387  /* others? */
388  return ixge_sfp_phy_init (xd);
389 
390  default:
391  break;
392  }
393 
394  /* Probe address of phy. */
395  {
396  u32 i, v;
397 
398  phy->mdio_address = ~0;
399  for (i = 0; i < 32; i++)
400  {
401  phy->mdio_address = i;
403  if (v != 0xffff && v != 0)
404  break;
405  }
406 
407  /* No PHY found? */
408  if (i >= 32)
409  return;
410  }
411 
412  phy->id =
415 
416  {
417  ELOG_TYPE_DECLARE (e) =
418  {
419  .function = (char *) __FUNCTION__,.format =
420  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421  struct
422  {
424  } *ed;
425  ed = ELOG_DATA (&vm->elog_main, e);
426  ed->instance = xd->device_index;
427  ed->id = phy->id;
428  ed->address = phy->mdio_address;
429  }
430 
431  /* Reset phy. */
434 
435  /* Wait for self-clearning reset bit to clear. */
436  do
437  {
438  vlib_process_suspend (vm, 1e-3);
439  }
442 }
443 
444 static u8 *
446 {
448  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
449  u32 s0 = d->status[0], s2 = d->status[2];
450  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
451  u32 indent = format_get_indent (s);
452 
453  s = format (s, "%s-owned",
455  "hw");
456  s =
457  format (s, ", length this descriptor %d, l3 offset %d",
461  s = format (s, ", end-of-packet");
462 
463  s = format (s, "\n%U", format_white_space, indent);
464 
466  s = format (s, "layer2 error");
467 
469  {
470  s = format (s, "layer 2 type %d", (s0 & 0x1f));
471  return s;
472  }
473 
475  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476  format_white_space, indent);
477 
478  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479  {
480  s = format (s, "ip4%s",
481  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482  "");
484  s = format (s, " checksum %s",
486  "bad" : "ok");
487  }
488  if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489  s = format (s, "ip6%s",
490  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491  "");
492  is_tcp = is_udp = 0;
493  if ((is_ip = (is_ip4 | is_ip6)))
494  {
495  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497  if (is_tcp)
498  s = format (s, ", tcp");
499  if (is_udp)
500  s = format (s, ", udp");
501  }
502 
504  s = format (s, ", tcp checksum %s",
506  "ok");
508  s = format (s, ", udp checksum %s",
510  "ok");
511 
512  return s;
513 }
514 
515 static u8 *
516 format_ixge_tx_descriptor (u8 * s, va_list * va)
517 {
518  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
519  u32 s0 = d->status0, s1 = d->status1;
520  u32 indent = format_get_indent (s);
521  u32 v;
522 
523  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
524  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
525 
526  s = format (s, "\n%U", format_white_space, indent);
527 
528  if ((v = (s0 >> 0) & 3))
529  s = format (s, "reserved 0x%x, ", v);
530 
531  if ((v = (s0 >> 2) & 3))
532  s = format (s, "mac 0x%x, ", v);
533 
534  if ((v = (s0 >> 4) & 0xf) != 3)
535  s = format (s, "type 0x%x, ", v);
536 
537  s = format (s, "%s%s%s%s%s%s%s%s",
538  (s0 & (1 << 8)) ? "eop, " : "",
539  (s0 & (1 << 9)) ? "insert-fcs, " : "",
540  (s0 & (1 << 10)) ? "reserved26, " : "",
541  (s0 & (1 << 11)) ? "report-status, " : "",
542  (s0 & (1 << 12)) ? "reserved28, " : "",
543  (s0 & (1 << 13)) ? "is-advanced, " : "",
544  (s0 & (1 << 14)) ? "vlan-enable, " : "",
545  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546 
547  if ((v = s1 & 0xf) != 0)
548  s = format (s, "status 0x%x, ", v);
549 
550  if ((v = (s1 >> 4) & 0xf))
551  s = format (s, "context 0x%x, ", v);
552 
553  if ((v = (s1 >> 8) & 0x3f))
554  s = format (s, "options 0x%x, ", v);
555 
556  return s;
557 }
558 
559 typedef struct
560 {
562 
564 
566 
568 
570 
571  /* Copy of VLIB buffer; packet data stored in pre_data. */
574 
575 static u8 *
576 format_ixge_rx_dma_trace (u8 * s, va_list * va)
577 {
578  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
579  vlib_node_t *node = va_arg (*va, vlib_node_t *);
580  vnet_main_t *vnm = vnet_get_main ();
581  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582  ixge_main_t *xm = &ixge_main;
585  u32 indent = format_get_indent (s);
586 
587  {
588  vnet_sw_interface_t *sw =
590  s =
591  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592  t->queue_index);
593  }
594 
595  s = format (s, "\n%Ubefore: %U",
596  format_white_space, indent,
598  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599  format_white_space, indent,
601 
602  s = format (s, "\n%Ubuffer 0x%x: %U",
603  format_white_space, indent,
605 
606  s = format (s, "\n%U", format_white_space, indent);
607 
608  f = node->format_buffer;
609  if (!f || !t->is_start_of_packet)
610  f = format_hex_bytes;
611  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612 
613  return s;
614 }
615 
616 #define foreach_ixge_error \
617  _ (none, "no error") \
618  _ (tx_full_drops, "tx ring full drops") \
619  _ (ip4_checksum_error, "ip4 checksum errors") \
620  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622 
623 typedef enum
624 {
625 #define _(f,s) IXGE_ERROR_##f,
627 #undef _
629 } ixge_error_t;
630 
631 always_inline void
633  u32 s00, u32 s02,
634  u8 * next0, u8 * error0, u32 * flags0)
635 {
636  u8 is0_ip4, is0_ip6, n0, e0;
637  u32 f0;
638 
639  e0 = IXGE_ERROR_none;
641 
643  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644 
645  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
646  ? IXGE_ERROR_ip4_checksum_error : e0);
647 
648  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650 
651  n0 = (xd->per_interface_next_index != ~0) ?
652  xd->per_interface_next_index : n0;
653 
654  /* Check for error. */
655  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656 
659  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
660 
663  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
664 
665  *error0 = e0;
666  *next0 = n0;
667  *flags0 = f0;
668 }
669 
670 always_inline void
672  u32 s00, u32 s02,
673  u32 s10, u32 s12,
674  u8 * next0, u8 * error0, u32 * flags0,
675  u8 * next1, u8 * error1, u32 * flags1)
676 {
677  u8 is0_ip4, is0_ip6, n0, e0;
678  u8 is1_ip4, is1_ip6, n1, e1;
679  u32 f0, f1;
680 
681  e0 = e1 = IXGE_ERROR_none;
682  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683 
686 
687  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689 
690  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
691  ? IXGE_ERROR_ip4_checksum_error : e0);
692  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
693  ? IXGE_ERROR_ip4_checksum_error : e1);
694 
695  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697 
698  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700 
701  n0 = (xd->per_interface_next_index != ~0) ?
702  xd->per_interface_next_index : n0;
703  n1 = (xd->per_interface_next_index != ~0) ?
704  xd->per_interface_next_index : n1;
705 
706  /* Check for error. */
707  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709 
710  *error0 = e0;
711  *error1 = e1;
712 
713  *next0 = n0;
714  *next1 = n1;
715 
718  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
721  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
722 
725  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
728  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
729 
730  *flags0 = f0;
731  *flags1 = f1;
732 }
733 
734 static void
736  ixge_device_t * xd,
737  ixge_dma_queue_t * dq,
738  ixge_descriptor_t * before_descriptors,
739  u32 * before_buffers,
740  ixge_descriptor_t * after_descriptors, uword n_descriptors)
741 {
742  vlib_main_t *vm = xm->vlib_main;
743  vlib_node_runtime_t *node = dq->rx.node;
746  u32 *b, n_left, is_sop, next_index_sop;
747 
748  n_left = n_descriptors;
749  b = before_buffers;
750  bd = &before_descriptors->rx_from_hw;
751  ad = &after_descriptors->rx_to_hw;
752  is_sop = dq->rx.is_start_of_packet;
753  next_index_sop = dq->rx.saved_start_of_packet_next_index;
754 
755  while (n_left >= 2)
756  {
757  u32 bi0, bi1, flags0, flags1;
758  vlib_buffer_t *b0, *b1;
759  ixge_rx_dma_trace_t *t0, *t1;
760  u8 next0, error0, next1, error1;
761 
762  bi0 = b[0];
763  bi1 = b[1];
764  n_left -= 2;
765 
766  b0 = vlib_get_buffer (vm, bi0);
767  b1 = vlib_get_buffer (vm, bi1);
768 
770  bd[0].status[0], bd[0].status[2],
771  bd[1].status[0], bd[1].status[2],
772  &next0, &error0, &flags0,
773  &next1, &error1, &flags1);
774 
775  next_index_sop = is_sop ? next0 : next_index_sop;
776  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778  t0->is_start_of_packet = is_sop;
779  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780 
781  next_index_sop = is_sop ? next1 : next_index_sop;
782  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784  t1->is_start_of_packet = is_sop;
785  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786 
787  t0->queue_index = dq->queue_index;
788  t1->queue_index = dq->queue_index;
789  t0->device_index = xd->device_index;
790  t1->device_index = xd->device_index;
791  t0->before.rx_from_hw = bd[0];
792  t1->before.rx_from_hw = bd[1];
793  t0->after.rx_to_hw = ad[0];
794  t1->after.rx_to_hw = ad[1];
795  t0->buffer_index = bi0;
796  t1->buffer_index = bi1;
797  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800  sizeof (t0->buffer.pre_data));
801  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802  sizeof (t1->buffer.pre_data));
803 
804  b += 2;
805  bd += 2;
806  ad += 2;
807  }
808 
809  while (n_left >= 1)
810  {
811  u32 bi0, flags0;
812  vlib_buffer_t *b0;
814  u8 next0, error0;
815 
816  bi0 = b[0];
817  n_left -= 1;
818 
819  b0 = vlib_get_buffer (vm, bi0);
820 
822  bd[0].status[0], bd[0].status[2],
823  &next0, &error0, &flags0);
824 
825  next_index_sop = is_sop ? next0 : next_index_sop;
826  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828  t0->is_start_of_packet = is_sop;
829  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830 
831  t0->queue_index = dq->queue_index;
832  t0->device_index = xd->device_index;
833  t0->before.rx_from_hw = bd[0];
834  t0->after.rx_to_hw = ad[0];
835  t0->buffer_index = bi0;
836  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838  sizeof (t0->buffer.pre_data));
839 
840  b += 1;
841  bd += 1;
842  ad += 1;
843  }
844 }
845 
846 typedef struct
847 {
849 
851 
853 
855 
857 
858  /* Copy of VLIB buffer; packet data stored in pre_data. */
861 
862 static u8 *
863 format_ixge_tx_dma_trace (u8 * s, va_list * va)
864 {
865  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
867  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868  vnet_main_t *vnm = vnet_get_main ();
869  ixge_main_t *xm = &ixge_main;
872  u32 indent = format_get_indent (s);
873 
874  {
875  vnet_sw_interface_t *sw =
877  s =
878  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879  t->queue_index);
880  }
881 
882  s = format (s, "\n%Udescriptor: %U",
883  format_white_space, indent,
885 
886  s = format (s, "\n%Ubuffer 0x%x: %U",
887  format_white_space, indent,
889 
890  s = format (s, "\n%U", format_white_space, indent);
891 
893  if (!f || !t->is_start_of_packet)
894  f = format_hex_bytes;
895  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896 
897  return s;
898 }
899 
900 typedef struct
901 {
903 
905 
907 
910 
911 static void
913  ixge_device_t * xd,
914  ixge_dma_queue_t * dq,
915  ixge_tx_state_t * tx_state,
916  ixge_tx_descriptor_t * descriptors,
917  u32 * buffers, uword n_descriptors)
918 {
919  vlib_main_t *vm = xm->vlib_main;
920  vlib_node_runtime_t *node = tx_state->node;
922  u32 *b, n_left, is_sop;
923 
924  n_left = n_descriptors;
925  b = buffers;
926  d = descriptors;
927  is_sop = tx_state->is_start_of_packet;
928 
929  while (n_left >= 2)
930  {
931  u32 bi0, bi1;
932  vlib_buffer_t *b0, *b1;
933  ixge_tx_dma_trace_t *t0, *t1;
934 
935  bi0 = b[0];
936  bi1 = b[1];
937  n_left -= 2;
938 
939  b0 = vlib_get_buffer (vm, bi0);
940  b1 = vlib_get_buffer (vm, bi1);
941 
942  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943  t0->is_start_of_packet = is_sop;
944  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945 
946  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947  t1->is_start_of_packet = is_sop;
948  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949 
950  t0->queue_index = dq->queue_index;
951  t1->queue_index = dq->queue_index;
952  t0->device_index = xd->device_index;
953  t1->device_index = xd->device_index;
954  t0->descriptor = d[0];
955  t1->descriptor = d[1];
956  t0->buffer_index = bi0;
957  t1->buffer_index = bi1;
958  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961  sizeof (t0->buffer.pre_data));
962  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963  sizeof (t1->buffer.pre_data));
964 
965  b += 2;
966  d += 2;
967  }
968 
969  while (n_left >= 1)
970  {
971  u32 bi0;
972  vlib_buffer_t *b0;
974 
975  bi0 = b[0];
976  n_left -= 1;
977 
978  b0 = vlib_get_buffer (vm, bi0);
979 
980  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981  t0->is_start_of_packet = is_sop;
982  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983 
984  t0->queue_index = dq->queue_index;
985  t0->device_index = xd->device_index;
986  t0->descriptor = d[0];
987  t0->buffer_index = bi0;
988  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990  sizeof (t0->buffer.pre_data));
991 
992  b += 1;
993  d += 1;
994  }
995 }
996 
999 {
1000  i32 d = i1 - i0;
1001  ASSERT (i0 < q->n_descriptors);
1002  ASSERT (i1 < q->n_descriptors);
1003  return d < 0 ? q->n_descriptors + d : d;
1004 }
1005 
1008 {
1009  u32 d = i0 + i1;
1010  ASSERT (i0 < q->n_descriptors);
1011  ASSERT (i1 < q->n_descriptors);
1012  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013  return d;
1014 }
1015 
1019 {
1020  u32 cmp;
1021 
1022  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1024  if (cmp)
1025  return 0;
1026  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1028  if (cmp)
1029  return 0;
1030 
1031  return 1;
1032 }
1033 
1034 static uword
1036  ixge_device_t * xd,
1037  ixge_dma_queue_t * dq,
1038  u32 * buffers,
1039  u32 start_descriptor_index,
1040  u32 n_descriptors, ixge_tx_state_t * tx_state)
1041 {
1042  vlib_main_t *vm = xm->vlib_main;
1043  ixge_tx_descriptor_t *d, *d_sop;
1044  u32 n_left = n_descriptors;
1045  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046  u32 *to_tx =
1047  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1048  u32 is_sop = tx_state->is_start_of_packet;
1049  u32 len_sop = tx_state->n_bytes_in_packet;
1050  u16 template_status = xm->tx_descriptor_template.status0;
1051  u32 descriptor_prefetch_rotor = 0;
1052 
1053  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054  d = &dq->descriptors[start_descriptor_index].tx;
1055  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056 
1057  while (n_left >= 4)
1058  {
1059  vlib_buffer_t *b0, *b1;
1060  u32 bi0, fi0, len0;
1061  u32 bi1, fi1, len1;
1062  u8 is_eop0, is_eop1;
1063 
1064  /* Prefetch next iteration. */
1065  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067 
1068  if ((descriptor_prefetch_rotor & 0x3) == 0)
1069  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1070 
1071  descriptor_prefetch_rotor += 2;
1072 
1073  bi0 = buffers[0];
1074  bi1 = buffers[1];
1075 
1076  to_free[0] = fi0 = to_tx[0];
1077  to_tx[0] = bi0;
1078  to_free += fi0 != 0;
1079 
1080  to_free[0] = fi1 = to_tx[1];
1081  to_tx[1] = bi1;
1082  to_free += fi1 != 0;
1083 
1084  buffers += 2;
1085  n_left -= 2;
1086  to_tx += 2;
1087 
1088  b0 = vlib_get_buffer (vm, bi0);
1089  b1 = vlib_get_buffer (vm, bi1);
1090 
1091  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093 
1094  len0 = b0->current_length;
1095  len1 = b1->current_length;
1096 
1099 
1100  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1101  d[1].buffer_address = vlib_buffer_get_pa (vm, b1);
1102 
1103  d[0].n_bytes_this_buffer = len0;
1104  d[1].n_bytes_this_buffer = len1;
1105 
1106  d[0].status0 =
1107  template_status | (is_eop0 <<
1109  d[1].status0 =
1110  template_status | (is_eop1 <<
1112 
1113  len_sop = (is_sop ? 0 : len_sop) + len0;
1114  d_sop[0].status1 =
1116  d += 1;
1117  d_sop = is_eop0 ? d : d_sop;
1118 
1119  is_sop = is_eop0;
1120 
1121  len_sop = (is_sop ? 0 : len_sop) + len1;
1122  d_sop[0].status1 =
1124  d += 1;
1125  d_sop = is_eop1 ? d : d_sop;
1126 
1127  is_sop = is_eop1;
1128  }
1129 
1130  while (n_left > 0)
1131  {
1132  vlib_buffer_t *b0;
1133  u32 bi0, fi0, len0;
1134  u8 is_eop0;
1135 
1136  bi0 = buffers[0];
1137 
1138  to_free[0] = fi0 = to_tx[0];
1139  to_tx[0] = bi0;
1140  to_free += fi0 != 0;
1141 
1142  buffers += 1;
1143  n_left -= 1;
1144  to_tx += 1;
1145 
1146  b0 = vlib_get_buffer (vm, bi0);
1147 
1148  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1149 
1150  len0 = b0->current_length;
1151 
1153 
1154  d[0].buffer_address = vlib_buffer_get_pa (vm, b0);
1155  d[0].n_bytes_this_buffer = len0;
1156 
1157  d[0].status0 =
1158  template_status | (is_eop0 <<
1160 
1161  len_sop = (is_sop ? 0 : len_sop) + len0;
1162  d_sop[0].status1 =
1164  d += 1;
1165  d_sop = is_eop0 ? d : d_sop;
1166 
1167  is_sop = is_eop0;
1168  }
1169 
1170  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1171  {
1172  to_tx =
1174  start_descriptor_index);
1175  ixge_tx_trace (xm, xd, dq, tx_state,
1176  &dq->descriptors[start_descriptor_index].tx, to_tx,
1177  n_descriptors);
1178  }
1179 
1180  _vec_len (xm->tx_buffers_pending_free) =
1181  to_free - xm->tx_buffers_pending_free;
1182 
1183  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1184  {
1185  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1186 
1187  ASSERT (d_sop - d_start <= dq->n_descriptors);
1188  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1189  }
1190 
1191  tx_state->is_start_of_packet = is_sop;
1192  tx_state->start_of_packet_descriptor = d_sop;
1193  tx_state->n_bytes_in_packet = len_sop;
1194 
1195  return n_descriptors;
1196 }
1197 
1198 static uword
1201 {
1202  ixge_main_t *xm = &ixge_main;
1203  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1205  ixge_dma_queue_t *dq;
1206  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1207  u32 queue_index = 0; /* fixme parameter */
1208  ixge_tx_state_t tx_state;
1209 
1210  tx_state.node = node;
1211  tx_state.is_start_of_packet = 1;
1212  tx_state.start_of_packet_descriptor = 0;
1213  tx_state.n_bytes_in_packet = 0;
1214 
1215  from = vlib_frame_vector_args (f);
1216 
1217  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1218 
1219  dq->head_index = dq->tx.head_index_write_back[0];
1220 
1221  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1222  n_left_tx = dq->n_descriptors - 1;
1223  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1224 
1225  _vec_len (xm->tx_buffers_pending_free) = 0;
1226 
1227  n_descriptors_to_tx = f->n_vectors;
1228  n_tail_drop = 0;
1229  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1230  {
1231  i32 i, n_ok, i_eop, i_sop;
1232 
1233  i_sop = i_eop = ~0;
1234  for (i = n_left_tx - 1; i >= 0; i--)
1235  {
1236  vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1237  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1238  {
1239  if (i_sop != ~0 && i_eop != ~0)
1240  break;
1241  i_eop = i;
1242  i_sop = i + 1;
1243  }
1244  }
1245  if (i == 0)
1246  n_ok = 0;
1247  else
1248  n_ok = i_eop + 1;
1249 
1250  {
1251  ELOG_TYPE_DECLARE (e) =
1252  {
1253  .function = (char *) __FUNCTION__,.format =
1254  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1255  "i2i2i2i2",};
1256  struct
1257  {
1258  u16 instance, to_tx, head, tail;
1259  } *ed;
1260  ed = ELOG_DATA (&vm->elog_main, e);
1261  ed->instance = xd->device_index;
1262  ed->to_tx = n_descriptors_to_tx;
1263  ed->head = dq->head_index;
1264  ed->tail = dq->tail_index;
1265  }
1266 
1267  if (n_ok < n_descriptors_to_tx)
1268  {
1269  n_tail_drop = n_descriptors_to_tx - n_ok;
1270  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1271  vlib_error_count (vm, ixge_input_node.index,
1272  IXGE_ERROR_tx_full_drops, n_tail_drop);
1273  }
1274 
1275  n_descriptors_to_tx = n_ok;
1276  }
1277 
1278  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1279 
1280  /* Process from tail to end of descriptor ring. */
1281  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1282  {
1283  u32 n =
1284  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1285  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1286  from += n;
1287  n_descriptors_to_tx -= n;
1288  dq->tail_index += n;
1289  ASSERT (dq->tail_index <= dq->n_descriptors);
1290  if (dq->tail_index == dq->n_descriptors)
1291  dq->tail_index = 0;
1292  }
1293 
1294  if (n_descriptors_to_tx > 0)
1295  {
1296  u32 n =
1297  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1298  from += n;
1299  ASSERT (n == n_descriptors_to_tx);
1300  dq->tail_index += n;
1301  ASSERT (dq->tail_index <= dq->n_descriptors);
1302  if (dq->tail_index == dq->n_descriptors)
1303  dq->tail_index = 0;
1304  }
1305 
1306  /* We should only get full packets. */
1307  ASSERT (tx_state.is_start_of_packet);
1308 
1309  /* Report status when last descriptor is done. */
1310  {
1311  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1312  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1314  }
1315 
1316  /* Give new descriptors to hardware. */
1317  {
1318  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1319 
1321 
1322  dr->tail_index = dq->tail_index;
1323  }
1324 
1325  /* Free any buffers that are done. */
1326  {
1327  u32 n = _vec_len (xm->tx_buffers_pending_free);
1328  if (n > 0)
1329  {
1331  _vec_len (xm->tx_buffers_pending_free) = 0;
1332  ASSERT (dq->tx.n_buffers_on_ring >= n);
1333  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1334  }
1335  }
1336 
1337  return f->n_vectors;
1338 }
1339 
1340 static uword
1342  ixge_device_t * xd,
1343  ixge_dma_queue_t * dq,
1344  u32 start_descriptor_index, u32 n_descriptors)
1345 {
1346  vlib_main_t *vm = xm->vlib_main;
1347  vlib_node_runtime_t *node = dq->rx.node;
1348  ixge_descriptor_t *d;
1349  static ixge_descriptor_t *d_trace_save;
1350  static u32 *d_trace_buffers;
1351  u32 n_descriptors_left = n_descriptors;
1352  u32 *to_rx =
1353  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1354  u32 *to_add;
1355  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1356  u32 bi_last = dq->rx.saved_last_buffer_index;
1357  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1358  u32 is_sop = dq->rx.is_start_of_packet;
1359  u32 next_index, n_left_to_next, *to_next;
1360  u32 n_packets = 0;
1361  u32 n_bytes = 0;
1362  u32 n_trace = vlib_get_trace_count (vm, node);
1363  vlib_buffer_t *b_last, b_placeholder;
1364 
1365  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1366  d = &dq->descriptors[start_descriptor_index];
1367 
1368  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_placeholder;
1369  next_index = dq->rx.next_index;
1370 
1371  if (n_trace > 0)
1372  {
1373  u32 n = clib_min (n_trace, n_descriptors);
1374  if (d_trace_save)
1375  {
1376  _vec_len (d_trace_save) = 0;
1377  _vec_len (d_trace_buffers) = 0;
1378  }
1379  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1380  vec_add (d_trace_buffers, to_rx, n);
1381  }
1382 
1383  {
1384  uword l = vec_len (xm->rx_buffers_to_add);
1385 
1386  if (l < n_descriptors_left)
1387  {
1388  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1389  u32 n_allocated;
1390 
1391  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1392 
1393  _vec_len (xm->rx_buffers_to_add) = l;
1394  n_allocated =
1395  vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
1396  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1397 
1398  /* Handle transient allocation failure */
1399  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1400  {
1401  if (n_allocated == 0)
1402  vlib_error_count (vm, ixge_input_node.index,
1403  IXGE_ERROR_rx_alloc_no_physmem, 1);
1404  else
1405  vlib_error_count (vm, ixge_input_node.index,
1406  IXGE_ERROR_rx_alloc_fail, 1);
1407 
1408  n_descriptors_left = l + n_allocated;
1409  }
1410  n_descriptors = n_descriptors_left;
1411  }
1412 
1413  /* Add buffers from end of vector going backwards. */
1414  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1415  }
1416 
1417  while (n_descriptors_left > 0)
1418  {
1419  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1420 
1421  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1422  {
1423  vlib_buffer_t *b0, *b1;
1424  vlib_buffer_t *f0, *f1;
1425  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1426  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1427  u8 is_eop0, error0, next0;
1428  u8 is_eop1, error1, next1;
1429  ixge_descriptor_t d0, d1;
1430 
1431  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1432  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1433 
1434  CLIB_PREFETCH (d + 2, 32, STORE);
1435 
1436  d0.as_u32x4 = d[0].as_u32x4;
1437  d1.as_u32x4 = d[1].as_u32x4;
1438 
1439  s20 = d0.rx_from_hw.status[2];
1440  s21 = d1.rx_from_hw.status[2];
1441 
1442  s00 = d0.rx_from_hw.status[0];
1443  s01 = d1.rx_from_hw.status[0];
1444 
1445  if (!
1447  goto found_hw_owned_descriptor_x2;
1448 
1449  bi0 = to_rx[0];
1450  bi1 = to_rx[1];
1451 
1452  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1453  fi0 = to_add[0];
1454  fi1 = to_add[-1];
1455 
1456  to_rx[0] = fi0;
1457  to_rx[1] = fi1;
1458  to_rx += 2;
1459  to_add -= 2;
1460 
1461 #if 0
1466 #endif
1467 
1468  b0 = vlib_get_buffer (vm, bi0);
1469  b1 = vlib_get_buffer (vm, bi1);
1470 
1471  /*
1472  * Turn this on if you run into
1473  * "bad monkey" contexts, and you want to know exactly
1474  * which nodes they've visited... See main.c...
1475  */
1478 
1481 
1482  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1483  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1484 
1485  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1486  &next0, &error0, &flags0,
1487  &next1, &error1, &flags1);
1488 
1489  next0 = is_sop ? next0 : next_index_sop;
1490  next1 = is_eop0 ? next1 : next0;
1491  next_index_sop = next1;
1492 
1493  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1494  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1495 
1496  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1497  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1498  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1499  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1500 
1501  b0->error = node->errors[error0];
1502  b1->error = node->errors[error1];
1503 
1506  n_bytes += len0 + len1;
1507  n_packets += is_eop0 + is_eop1;
1508 
1509  /* Give new buffers to hardware. */
1510  f0 = vlib_get_buffer (vm, fi0);
1511  f1 = vlib_get_buffer (vm, fi1);
1516  d[0].as_u32x4 = d0.as_u32x4;
1517  d[1].as_u32x4 = d1.as_u32x4;
1518 
1519  d += 2;
1520  n_descriptors_left -= 2;
1521 
1522  /* Point to either l2 or l3 header depending on next. */
1523  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1525  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1527 
1528  b0->current_length = len0 - l3_offset0;
1529  b1->current_length = len1 - l3_offset1;
1530  b0->current_data = l3_offset0;
1531  b1->current_data = l3_offset1;
1532 
1533  b_last->next_buffer = is_sop ? ~0 : bi0;
1534  b0->next_buffer = is_eop0 ? ~0 : bi1;
1535  bi_last = bi1;
1536  b_last = b1;
1537 
1538  if (CLIB_DEBUG > 0)
1539  {
1540  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1541  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1542 
1543  if (is_eop0)
1544  {
1545  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1546  /* follow_buffer_next */ 1);
1547  ASSERT (!msg);
1548  }
1549  if (is_eop1)
1550  {
1551  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1552  /* follow_buffer_next */ 1);
1553  ASSERT (!msg);
1554  }
1555  }
1556  if (0) /* "Dave" version */
1557  {
1558  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1559  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1560 
1561  if (is_eop0)
1562  {
1563  to_next[0] = bi_sop0;
1564  to_next++;
1565  n_left_to_next--;
1566 
1567  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1568  to_next, n_left_to_next,
1569  bi_sop0, next0);
1570  }
1571  if (is_eop1)
1572  {
1573  to_next[0] = bi_sop1;
1574  to_next++;
1575  n_left_to_next--;
1576 
1577  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1578  to_next, n_left_to_next,
1579  bi_sop1, next1);
1580  }
1581  is_sop = is_eop1;
1582  bi_sop = bi_sop1;
1583  }
1584  if (1) /* "Eliot" version */
1585  {
1586  /* Speculatively enqueue to cached next. */
1587  u8 saved_is_sop = is_sop;
1588  u32 bi_sop_save = bi_sop;
1589 
1590  bi_sop = saved_is_sop ? bi0 : bi_sop;
1591  to_next[0] = bi_sop;
1592  to_next += is_eop0;
1593  n_left_to_next -= is_eop0;
1594 
1595  bi_sop = is_eop0 ? bi1 : bi_sop;
1596  to_next[0] = bi_sop;
1597  to_next += is_eop1;
1598  n_left_to_next -= is_eop1;
1599 
1600  is_sop = is_eop1;
1601 
1602  if (PREDICT_FALSE
1603  (!(next0 == next_index && next1 == next_index)))
1604  {
1605  /* Undo speculation. */
1606  to_next -= is_eop0 + is_eop1;
1607  n_left_to_next += is_eop0 + is_eop1;
1608 
1609  /* Re-do both descriptors being careful about where we enqueue. */
1610  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1611  if (is_eop0)
1612  {
1613  if (next0 != next_index)
1614  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1615  else
1616  {
1617  to_next[0] = bi_sop;
1618  to_next += 1;
1619  n_left_to_next -= 1;
1620  }
1621  }
1622 
1623  bi_sop = is_eop0 ? bi1 : bi_sop;
1624  if (is_eop1)
1625  {
1626  if (next1 != next_index)
1627  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1628  else
1629  {
1630  to_next[0] = bi_sop;
1631  to_next += 1;
1632  n_left_to_next -= 1;
1633  }
1634  }
1635 
1636  /* Switch cached next index when next for both packets is the same. */
1637  if (is_eop0 && is_eop1 && next0 == next1)
1638  {
1639  vlib_put_next_frame (vm, node, next_index,
1640  n_left_to_next);
1641  next_index = next0;
1642  vlib_get_next_frame (vm, node, next_index,
1643  to_next, n_left_to_next);
1644  }
1645  }
1646  }
1647  }
1648 
1649  /* Bail out of dual loop and proceed with single loop. */
1650  found_hw_owned_descriptor_x2:
1651 
1652  while (n_descriptors_left > 0 && n_left_to_next > 0)
1653  {
1654  vlib_buffer_t *b0;
1655  vlib_buffer_t *f0;
1656  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1657  u8 is_eop0, error0, next0;
1658  ixge_descriptor_t d0;
1659 
1660  d0.as_u32x4 = d[0].as_u32x4;
1661 
1662  s20 = d0.rx_from_hw.status[2];
1663  s00 = d0.rx_from_hw.status[0];
1664 
1666  goto found_hw_owned_descriptor_x1;
1667 
1668  bi0 = to_rx[0];
1669  ASSERT (to_add >= xm->rx_buffers_to_add);
1670  fi0 = to_add[0];
1671 
1672  to_rx[0] = fi0;
1673  to_rx += 1;
1674  to_add -= 1;
1675 
1676 #if 0
1679 #endif
1680 
1681  b0 = vlib_get_buffer (vm, bi0);
1682 
1683  /*
1684  * Turn this on if you run into
1685  * "bad monkey" contexts, and you want to know exactly
1686  * which nodes they've visited...
1687  */
1689 
1690  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1692  (xd, s00, s20, &next0, &error0, &flags0);
1693 
1694  next0 = is_sop ? next0 : next_index_sop;
1695  next_index_sop = next0;
1696 
1697  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1698 
1699  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1700  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1701 
1702  b0->error = node->errors[error0];
1703 
1705  n_bytes += len0;
1706  n_packets += is_eop0;
1707 
1708  /* Give new buffer to hardware. */
1709  f0 = vlib_get_buffer (vm, fi0);
1712  d[0].as_u32x4 = d0.as_u32x4;
1713 
1714  d += 1;
1715  n_descriptors_left -= 1;
1716 
1717  /* Point to either l2 or l3 header depending on next. */
1718  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1720  b0->current_length = len0 - l3_offset0;
1721  b0->current_data = l3_offset0;
1722 
1723  b_last->next_buffer = is_sop ? ~0 : bi0;
1724  bi_last = bi0;
1725  b_last = b0;
1726 
1727  bi_sop = is_sop ? bi0 : bi_sop;
1728 
1729  if (CLIB_DEBUG > 0 && is_eop0)
1730  {
1731  u8 *msg =
1732  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1733  ASSERT (!msg);
1734  }
1735 
1736  if (0) /* "Dave" version */
1737  {
1738  if (is_eop0)
1739  {
1740  to_next[0] = bi_sop;
1741  to_next++;
1742  n_left_to_next--;
1743 
1744  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1745  to_next, n_left_to_next,
1746  bi_sop, next0);
1747  }
1748  }
1749  if (1) /* "Eliot" version */
1750  {
1751  if (PREDICT_TRUE (next0 == next_index))
1752  {
1753  to_next[0] = bi_sop;
1754  to_next += is_eop0;
1755  n_left_to_next -= is_eop0;
1756  }
1757  else
1758  {
1759  if (next0 != next_index && is_eop0)
1760  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1761 
1762  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1763  next_index = next0;
1764  vlib_get_next_frame (vm, node, next_index,
1765  to_next, n_left_to_next);
1766  }
1767  }
1768  is_sop = is_eop0;
1769  }
1770  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1771  }
1772 
1773 found_hw_owned_descriptor_x1:
1774  if (n_descriptors_left > 0)
1775  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1776 
1777  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1778 
1779  {
1780  u32 n_done = n_descriptors - n_descriptors_left;
1781 
1782  if (n_trace > 0 && n_done > 0)
1783  {
1784  u32 n = clib_min (n_trace, n_done);
1785  ixge_rx_trace (xm, xd, dq,
1786  d_trace_save,
1787  d_trace_buffers,
1788  &dq->descriptors[start_descriptor_index], n);
1789  vlib_set_trace_count (vm, node, n_trace - n);
1790  }
1791  if (d_trace_save)
1792  {
1793  _vec_len (d_trace_save) = 0;
1794  _vec_len (d_trace_buffers) = 0;
1795  }
1796 
1797  /* Don't keep a reference to b_last if we don't have to.
1798  Otherwise we can over-write a next_buffer pointer after already haven
1799  enqueued a packet. */
1800  if (is_sop)
1801  {
1802  b_last->next_buffer = ~0;
1803  bi_last = ~0;
1804  }
1805 
1806  dq->rx.n_descriptors_done_this_call = n_done;
1807  dq->rx.n_descriptors_done_total += n_done;
1808  dq->rx.is_start_of_packet = is_sop;
1809  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1810  dq->rx.saved_last_buffer_index = bi_last;
1811  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1812  dq->rx.next_index = next_index;
1813  dq->rx.n_bytes += n_bytes;
1814 
1815  return n_packets;
1816  }
1817 }
1818 
1819 static uword
1821  ixge_device_t * xd,
1822  vlib_node_runtime_t * node, u32 queue_index)
1823 {
1824  ixge_dma_queue_t *dq =
1825  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1827  uword n_packets = 0;
1828  u32 hw_head_index, sw_head_index;
1829 
1830  /* One time initialization. */
1831  if (!dq->rx.node)
1832  {
1833  dq->rx.node = node;
1834  dq->rx.is_start_of_packet = 1;
1835  dq->rx.saved_start_of_packet_buffer_index = ~0;
1836  dq->rx.saved_last_buffer_index = ~0;
1837  }
1838 
1839  dq->rx.next_index = node->cached_next_index;
1840 
1841  dq->rx.n_descriptors_done_total = 0;
1842  dq->rx.n_descriptors_done_this_call = 0;
1843  dq->rx.n_bytes = 0;
1844 
1845  /* Fetch head from hardware and compare to where we think we are. */
1846  hw_head_index = dr->head_index;
1847  sw_head_index = dq->head_index;
1848 
1849  if (hw_head_index == sw_head_index)
1850  goto done;
1851 
1852  if (hw_head_index < sw_head_index)
1853  {
1854  u32 n_tried = dq->n_descriptors - sw_head_index;
1855  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1856  sw_head_index =
1857  ixge_ring_add (dq, sw_head_index,
1858  dq->rx.n_descriptors_done_this_call);
1859 
1860  if (dq->rx.n_descriptors_done_this_call != n_tried)
1861  goto done;
1862  }
1863  if (hw_head_index >= sw_head_index)
1864  {
1865  u32 n_tried = hw_head_index - sw_head_index;
1866  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1867  sw_head_index =
1868  ixge_ring_add (dq, sw_head_index,
1869  dq->rx.n_descriptors_done_this_call);
1870  }
1871 
1872 done:
1873  dq->head_index = sw_head_index;
1874  dq->tail_index =
1875  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1876 
1877  /* Give tail back to hardware. */
1879 
1880  dr->tail_index = dq->tail_index;
1881 
1883  interface_main.combined_sw_if_counters +
1885  0 /* thread_index */ ,
1886  xd->vlib_sw_if_index, n_packets,
1887  dq->rx.n_bytes);
1888 
1889  return n_packets;
1890 }
1891 
1892 static void
1894 {
1895  vlib_main_t *vm = xm->vlib_main;
1896  ixge_regs_t *r = xd->regs;
1897 
1898  if (i != 20)
1899  {
1900  ELOG_TYPE_DECLARE (e) =
1901  {
1902  .function = (char *) __FUNCTION__,.format =
1903  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1904  16,.enum_strings =
1905  {
1906  "flow director",
1907  "rx miss",
1908  "pci exception",
1909  "mailbox",
1910  "link status change",
1911  "linksec key exchange",
1912  "manageability event",
1913  "reserved23",
1914  "sdp0",
1915  "sdp1",
1916  "sdp2",
1917  "sdp3",
1918  "ecc", "descriptor handler error", "tcp timer", "other",},};
1919  struct
1920  {
1921  u8 instance;
1922  u8 index;
1923  } *ed;
1924  ed = ELOG_DATA (&vm->elog_main, e);
1925  ed->instance = xd->device_index;
1926  ed->index = i - 16;
1927  }
1928  else
1929  {
1930  u32 v = r->xge_mac.link_status;
1931  uword is_up = (v & (1 << 30)) != 0;
1932 
1933  ELOG_TYPE_DECLARE (e) =
1934  {
1935  .function = (char *) __FUNCTION__,.format =
1936  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1937  struct
1938  {
1939  u32 instance, link_status;
1940  } *ed;
1941  ed = ELOG_DATA (&vm->elog_main, e);
1942  ed->instance = xd->device_index;
1943  ed->link_status = v;
1945 
1948  ((is_up << 31) | xd->vlib_hw_if_index));
1949  }
1950 }
1951 
1953 clean_block (u32 * b, u32 * t, u32 n_left)
1954 {
1955  u32 *t0 = t;
1956 
1957  while (n_left >= 4)
1958  {
1959  u32 bi0, bi1, bi2, bi3;
1960 
1961  t[0] = bi0 = b[0];
1962  b[0] = 0;
1963  t += bi0 != 0;
1964 
1965  t[0] = bi1 = b[1];
1966  b[1] = 0;
1967  t += bi1 != 0;
1968 
1969  t[0] = bi2 = b[2];
1970  b[2] = 0;
1971  t += bi2 != 0;
1972 
1973  t[0] = bi3 = b[3];
1974  b[3] = 0;
1975  t += bi3 != 0;
1976 
1977  b += 4;
1978  n_left -= 4;
1979  }
1980 
1981  while (n_left > 0)
1982  {
1983  u32 bi0;
1984 
1985  t[0] = bi0 = b[0];
1986  b[0] = 0;
1987  t += bi0 != 0;
1988  b += 1;
1989  n_left -= 1;
1990  }
1991 
1992  return t - t0;
1993 }
1994 
1995 static void
1996 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1997 {
1998  vlib_main_t *vm = xm->vlib_main;
1999  ixge_dma_queue_t *dq =
2000  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2001  u32 n_clean, *b, *t, *t0;
2002  i32 n_hw_owned_descriptors;
2003  i32 first_to_clean, last_to_clean;
2004  u64 hwbp_race = 0;
2005 
2006  /* Handle case where head write back pointer update
2007  * arrives after the interrupt during high PCI bus loads.
2008  */
2009  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2010  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2011  {
2012  hwbp_race++;
2013  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2014  {
2015  ELOG_TYPE_DECLARE (e) =
2016  {
2017  .function = (char *) __FUNCTION__,.format =
2018  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2019  = "i4i4i4i4",};
2020  struct
2021  {
2022  u32 instance, head_index, tail_index, n_buffers_on_ring;
2023  } *ed;
2024  ed = ELOG_DATA (&vm->elog_main, e);
2025  ed->instance = xd->device_index;
2026  ed->head_index = dq->head_index;
2027  ed->tail_index = dq->tail_index;
2028  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2029  }
2030  }
2031 
2032  dq->head_index = dq->tx.head_index_write_back[0];
2033  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2034  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2035  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2036 
2037  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2038  {
2039  ELOG_TYPE_DECLARE (e) =
2040  {
2041  .function = (char *) __FUNCTION__,.format =
2042  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2043  = "i4i4i4i4i4",};
2044  struct
2045  {
2046  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2047  } *ed;
2048  ed = ELOG_DATA (&vm->elog_main, e);
2049  ed->instance = xd->device_index;
2050  ed->head_index = dq->head_index;
2051  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2052  ed->n_clean = n_clean;
2053  ed->retries = hwbp_race;
2054  }
2055 
2056  /*
2057  * This function used to wait until hardware owned zero descriptors.
2058  * At high PPS rates, that doesn't happen until the TX ring is
2059  * completely full of descriptors which need to be cleaned up.
2060  * That, in turn, causes TX ring-full drops and/or long RX service
2061  * interruptions.
2062  */
2063  if (n_clean == 0)
2064  return;
2065 
2066  /* Clean the n_clean descriptors prior to the reported hardware head */
2067  last_to_clean = dq->head_index - 1;
2068  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2069  last_to_clean;
2070 
2071  first_to_clean = (last_to_clean) - (n_clean - 1);
2072  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2073  first_to_clean;
2074 
2076  t0 = t = xm->tx_buffers_pending_free;
2077  b = dq->descriptor_buffer_indices + first_to_clean;
2078 
2079  /* Wrap case: clean from first to end, then start to last */
2080  if (first_to_clean > last_to_clean)
2081  {
2082  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2083  first_to_clean = 0;
2084  b = dq->descriptor_buffer_indices;
2085  }
2086 
2087  /* Typical case: clean from first to last */
2088  if (first_to_clean <= last_to_clean)
2089  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2090 
2091  if (t > t0)
2092  {
2093  u32 n = t - t0;
2094  vlib_buffer_free_no_next (vm, t0, n);
2095  ASSERT (dq->tx.n_buffers_on_ring >= n);
2096  dq->tx.n_buffers_on_ring -= n;
2097  _vec_len (xm->tx_buffers_pending_free) = 0;
2098  }
2099 }
2100 
2101 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2104 {
2105  return i < 8;
2106 }
2107 
2110 {
2111  return i >= 8 && i < 16;
2112 }
2113 
2116 {
2117  return 8 + i;
2118 }
2119 
2122 {
2123  return 0 + i;
2124 }
2125 
2128 {
2130  return i - 0;
2131 }
2132 
2135 {
2137  return i - 8;
2138 }
2139 
2140 static uword
2143 {
2144  ixge_regs_t *r = xd->regs;
2145  u32 i, s;
2146  uword n_rx_packets = 0;
2147 
2149  if (s)
2151 
2152  /* *INDENT-OFF* */
2153  foreach_set_bit (i, s, ({
2155  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2156 
2157  else if (ixge_interrupt_is_tx_queue (i))
2158  ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2159 
2160  else
2161  ixge_interrupt (xm, xd, i);
2162  }));
2163  /* *INDENT-ON* */
2164 
2165  return n_rx_packets;
2166 }
2167 
2168 static uword
2170 {
2171  ixge_main_t *xm = &ixge_main;
2172  ixge_device_t *xd;
2173  uword n_rx_packets = 0;
2174 
2175  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2176  {
2177  uword i;
2178 
2179  /* Loop over devices with interrupts. */
2180  /* *INDENT-OFF* */
2181  foreach_set_bit (i, node->runtime_data[0], ({
2182  xd = vec_elt_at_index (xm->devices, i);
2183  n_rx_packets += ixge_device_input (xm, xd, node);
2184 
2185  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2186  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2187  xd->regs->interrupt.enable_write_1_to_set = ~0;
2188  }));
2189  /* *INDENT-ON* */
2190 
2191  /* Clear mask of devices with pending interrupts. */
2192  node->runtime_data[0] = 0;
2193  }
2194  else
2195  {
2196  /* Poll all devices for input/interrupts. */
2197  vec_foreach (xd, xm->devices)
2198  {
2199  n_rx_packets += ixge_device_input (xm, xd, node);
2200 
2201  /* Re-enable interrupts when switching out of polling mode. */
2202  if (node->flags &
2205  }
2206  }
2207 
2208  return n_rx_packets;
2209 }
2210 
2211 static char *ixge_error_strings[] = {
2212 #define _(n,s) s,
2214 #undef _
2215 };
2216 
2217 /* *INDENT-OFF* */
2218 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2219  .function = ixge_input,
2220  .type = VLIB_NODE_TYPE_INPUT,
2221  .name = "ixge-input",
2223 
2224  /* Will be enabled if/when hardware is detected. */
2225  .state = VLIB_NODE_STATE_DISABLED,
2226 
2227  .format_buffer = format_ethernet_header_with_length,
2228  .format_trace = format_ixge_rx_dma_trace,
2229 
2230  .n_errors = IXGE_N_ERROR,
2231  .error_strings = ixge_error_strings,
2232 
2233  .n_next_nodes = IXGE_RX_N_NEXT,
2234  .next_nodes = {
2235  [IXGE_RX_NEXT_DROP] = "error-drop",
2236  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2237  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2238  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2239  },
2240 };
2241 
2242 /* *INDENT-ON* */
2243 
2244 static u8 *
2245 format_ixge_device_name (u8 * s, va_list * args)
2246 {
2248  u32 i = va_arg (*args, u32);
2249  ixge_main_t *xm = &ixge_main;
2250  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2251  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2252  return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2253  addr->domain, addr->bus, addr->slot, addr->function);
2254 }
2255 
2256 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2257 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2258 
2260 #define _(a,f) 0,
2261 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2263 #undef _
2264 #undef _64
2265 };
2266 
2267 static void
2269 {
2270  /* Byte offset for counter registers. */
2271  static u32 reg_offsets[] = {
2272 #define _(a,f) (a) / sizeof (u32),
2273 #define _64(a,f) _(a,f)
2275 #undef _
2276 #undef _64
2277  };
2278  volatile u32 *r = (volatile u32 *) xd->regs;
2279  int i;
2280 
2281  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2282  {
2283  u32 o = reg_offsets[i];
2284  xd->counters[i] += r[o];
2286  r[o] = 0;
2288  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2289  }
2290 }
2291 
2292 static u8 *
2293 format_ixge_device_id (u8 * s, va_list * args)
2294 {
2295  u32 device_id = va_arg (*args, u32);
2296  char *t = 0;
2297  switch (device_id)
2298  {
2299 #define _(f,n) case n: t = #f; break;
2301 #undef _
2302  default:
2303  t = 0;
2304  break;
2305  }
2306  if (t == 0)
2307  s = format (s, "unknown 0x%x", device_id);
2308  else
2309  s = format (s, "%s", t);
2310  return s;
2311 }
2312 
2313 static u8 *
2314 format_ixge_link_status (u8 * s, va_list * args)
2315 {
2316  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2318 
2319  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2320 
2321  {
2322  char *modes[] = {
2323  "1g", "10g parallel", "10g serial", "autoneg",
2324  };
2325  char *speeds[] = {
2326  "unknown", "100m", "1g", "10g",
2327  };
2328  s = format (s, ", mode %s, speed %s",
2329  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2330  }
2331 
2332  return s;
2333 }
2334 
2335 static u8 *
2336 format_ixge_device (u8 * s, va_list * args)
2337 {
2338  u32 dev_instance = va_arg (*args, u32);
2339  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2341  ixge_main_t *xm = &ixge_main;
2342  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2343  ixge_phy_t *phy = xd->phys + xd->phy_index;
2344  u32 indent = format_get_indent (s);
2345 
2346  ixge_update_counters (xd);
2348 
2349  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2351  format_white_space, indent + 2, format_ixge_link_status, xd);
2352 
2353  {
2354 
2355  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, xd->pci_dev_handle);
2357 
2358  if (d)
2359  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2361  }
2362 
2363  s = format (s, "\n%U", format_white_space, indent + 2);
2364  if (phy->mdio_address != ~0)
2365  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2366  else if (xd->sfp_eeprom.id == SFP_ID_SFP)
2367  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2368  else
2369  s = format (s, "PHY not found");
2370 
2371  /* FIXME */
2372  {
2374  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2375  u32 hw_head_index = dr->head_index;
2376  u32 sw_head_index = dq->head_index;
2377  u32 nitems;
2378 
2379  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2380  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2381  format_white_space, indent + 2, nitems, dq->n_descriptors);
2382 
2383  s = format (s, "\n%U%d buffers in driver rx cache",
2384  format_white_space, indent + 2,
2385  vec_len (xm->rx_buffers_to_add));
2386 
2387  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2388  format_white_space, indent + 2,
2389  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2390  }
2391  {
2392  u32 i;
2393  u64 v;
2394  static char *names[] = {
2395 #define _(a,f) #f,
2396 #define _64(a,f) _(a,f)
2398 #undef _
2399 #undef _64
2400  };
2401 
2402  for (i = 0; i < ARRAY_LEN (names); i++)
2403  {
2404  v = xd->counters[i] - xd->counters_last_clear[i];
2405  if (v != 0)
2406  s = format (s, "\n%U%-40U%16Ld",
2407  format_white_space, indent + 2,
2408  format_c_identifier, names[i], v);
2409  }
2410  }
2411 
2412  return s;
2413 }
2414 
2415 static void
2417 {
2418  ixge_main_t *xm = &ixge_main;
2419  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
2420  ixge_update_counters (xd);
2421  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2422 }
2423 
2424 /*
2425  * Dynamically redirect all pkts from a specific interface
2426  * to the specified node
2427  */
2428 static void
2430  u32 node_index)
2431 {
2432  ixge_main_t *xm = &ixge_main;
2433  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2435 
2436  /* Shut off redirection */
2437  if (node_index == ~0)
2438  {
2439  xd->per_interface_next_index = node_index;
2440  return;
2441  }
2442 
2444  vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2445 }
2446 
2447 
2448 /* *INDENT-OFF* */
2450  .name = "ixge",
2451  .tx_function = ixge_interface_tx,
2452  .format_device_name = format_ixge_device_name,
2453  .format_device = format_ixge_device,
2454  .format_tx_trace = format_ixge_tx_dma_trace,
2455  .clear_counters = ixge_clear_hw_interface_counters,
2456  .admin_up_down_function = ixge_interface_admin_up_down,
2457  .rx_redirect_to_node = ixge_set_interface_next_node,
2458 };
2459 /* *INDENT-ON* */
2460 
2461 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2462 
2463 static clib_error_t *
2465 {
2466  ixge_main_t *xm = &ixge_main;
2467  vlib_main_t *vm = xm->vlib_main;
2468  ixge_dma_queue_t *dq;
2469  clib_error_t *error = 0;
2470 
2471  vec_validate (xd->dma_queues[rt], queue_index);
2472  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2473 
2476  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2477 
2478  if (!xm->n_bytes_in_rx_buffer)
2481 
2482  if (!xm->n_descriptors[rt])
2483  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2484 
2485  dq->queue_index = queue_index;
2486  dq->n_descriptors =
2488  dq->head_index = dq->tail_index = 0;
2489 
2491  sizeof (dq->descriptors[0]),
2492  128 /* per chip spec */ );
2493  if (!dq->descriptors)
2494  return vlib_physmem_last_error (vm);
2495 
2496  clib_memset (dq->descriptors, 0,
2497  dq->n_descriptors * sizeof (dq->descriptors[0]));
2499 
2500  if (rt == VLIB_RX)
2501  {
2502  u32 n_alloc, i;
2503 
2504  n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2506  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2507  for (i = 0; i < n_alloc; i++)
2508  {
2511  (vm, vlib_get_buffer (vm, dq->descriptor_buffer_indices[i]));
2512  }
2513  }
2514  else
2515  {
2516  u32 i;
2517 
2518  dq->tx.head_index_write_back =
2520  if (!dq->tx.head_index_write_back)
2521  return vlib_physmem_last_error (vm);
2522 
2523  for (i = 0; i < dq->n_descriptors; i++)
2524  dq->descriptors[i].tx = xm->tx_descriptor_template;
2525 
2527  }
2528 
2529  {
2530  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2531  u64 a;
2532 
2533  a = vlib_physmem_get_pa (vm, dq->descriptors);
2534  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2535  dr->descriptor_address[1] = a >> (u64) 32;
2536  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2537  dq->head_index = dq->tail_index = 0;
2538 
2539  if (rt == VLIB_RX)
2540  {
2541  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2542  dr->rx_split_control =
2543  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2544  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2545  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2546  (1 << 25)) | ( /* drop if no descriptors available */
2547  (1 << 28)));
2548 
2549  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2550  dq->tail_index = dq->n_descriptors -
2552  }
2553  else
2554  {
2555  /* Make sure its initialized before hardware can get to it. */
2556  dq->tx.head_index_write_back[0] = dq->head_index;
2557 
2558  a = vlib_physmem_get_pa (vm, dq->tx.head_index_write_back);
2559  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2560  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2561  }
2562 
2563  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2564  and [12] undocumented set. */
2565  if (rt == VLIB_RX)
2566  dr->dca_control &= ~((1 << 13) | (1 << 12));
2567 
2569 
2570  if (rt == VLIB_TX)
2571  {
2572  xd->regs->tx_dma_control |= (1 << 0);
2573  dr->control |= ((32 << 0) /* prefetch threshold */
2574  | (64 << 8) /* host threshold */
2575  | (0 << 16) /* writeback threshold */ );
2576  }
2577 
2578  /* Enable this queue and wait for hardware to initialize
2579  before adding to tail. */
2580  if (rt == VLIB_TX)
2581  {
2582  dr->control |= 1 << 25;
2583  while (!(dr->control & (1 << 25)))
2584  ;
2585  }
2586 
2587  /* Set head/tail indices and enable DMA. */
2588  dr->head_index = dq->head_index;
2589  dr->tail_index = dq->tail_index;
2590  }
2591 
2592  return error;
2593 }
2594 
2595 static u32
2597 {
2598  ixge_device_t *xd;
2599  ixge_regs_t *r;
2600  u32 old;
2601  ixge_main_t *xm = &ixge_main;
2602 
2603  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2604  r = xd->regs;
2605 
2606  old = r->filter_control;
2607 
2609  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2610  else if (flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
2611  r->filter_control = old & ~(1 << 9);
2612  else
2613  return ~0;
2614 
2615  return old;
2616 }
2617 
2618 static void
2620 {
2621  vnet_main_t *vnm = vnet_get_main ();
2622  ixge_device_t *xd;
2623 
2624  /* Reset chip(s). */
2625  vec_foreach (xd, xm->devices)
2626  {
2627  ixge_regs_t *r = xd->regs;
2628  const u32 reset_bit = (1 << 26) | (1 << 3);
2629 
2630  r->control |= reset_bit;
2631 
2632  /* No need to suspend. Timed to take ~1e-6 secs */
2633  while (r->control & reset_bit)
2634  ;
2635 
2636  /* Software loaded. */
2637  r->extended_control |= (1 << 28);
2638 
2639  ixge_phy_init (xd);
2640 
2641  /* Register ethernet interface. */
2642  {
2643  u8 addr8[6];
2644  u32 i, addr32[2];
2645  clib_error_t *error;
2646 
2647  addr32[0] = r->rx_ethernet_address0[0][0];
2648  addr32[1] = r->rx_ethernet_address0[0][1];
2649  for (i = 0; i < 6; i++)
2650  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2651 
2653  (vnm, ixge_device_class.index, xd->device_index,
2654  /* ethernet address */ addr8,
2656  if (error)
2657  clib_error_report (error);
2658  }
2659 
2660  {
2661  vnet_sw_interface_t *sw =
2663  xd->vlib_sw_if_index = sw->sw_if_index;
2664  }
2665 
2666  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2667 
2669 
2670  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2671 
2672  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2673  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2674  ixge_rx_queue_to_interrupt (0)) << 0);
2675 
2676  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2677  ixge_tx_queue_to_interrupt (0)) << 8);
2678 
2679  /* No use in getting too many interrupts.
2680  Limit them to one every 3/4 ring size at line rate
2681  min sized packets.
2682  No need for this since kernel/vlib main loop provides adequate interrupt
2683  limiting scheme. */
2684  if (0)
2685  {
2686  f64 line_rate_max_pps =
2687  10e9 / (8 * (64 + /* interframe padding */ 20));
2689  .75 * xm->n_descriptors[VLIB_RX] /
2690  line_rate_max_pps);
2691  }
2692 
2693  /* Accept all multicast and broadcast packets. Should really add them
2694  to the dst_ethernet_address register array. */
2695  r->filter_control |= (1 << 10) | (1 << 8);
2696 
2697  /* Enable frames up to size in mac frame size register. */
2698  r->xge_mac.control |= 1 << 2;
2699  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2700 
2701  /* Enable all interrupts. */
2702  if (!IXGE_ALWAYS_POLL)
2704  }
2705 }
2706 
2707 static uword
2709 {
2710  vnet_main_t *vnm = vnet_get_main ();
2711  ixge_main_t *xm = &ixge_main;
2712  ixge_device_t *xd;
2713  uword event_type, *event_data = 0;
2714  f64 timeout, link_debounce_deadline;
2715 
2716  ixge_device_init (xm);
2717 
2718  /* Clear all counters. */
2719  vec_foreach (xd, xm->devices)
2720  {
2721  ixge_update_counters (xd);
2722  clib_memset (xd->counters, 0, sizeof (xd->counters));
2723  }
2724 
2725  timeout = 30.0;
2726  link_debounce_deadline = 1e70;
2727 
2728  while (1)
2729  {
2730  /* 36 bit stat counters could overflow in ~50 secs.
2731  We poll every 30 secs to be conservative. */
2733 
2734  event_type = vlib_process_get_events (vm, &event_data);
2735 
2736  switch (event_type)
2737  {
2738  case EVENT_SET_FLAGS:
2739  /* 1 ms */
2740  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2741  timeout = 1e-3;
2742  break;
2743 
2744  case ~0:
2745  /* No events found: timer expired. */
2746  if (vlib_time_now (vm) > link_debounce_deadline)
2747  {
2748  vec_foreach (xd, xm->devices)
2749  {
2750  ixge_regs_t *r = xd->regs;
2751  u32 v = r->xge_mac.link_status;
2752  uword is_up = (v & (1 << 30)) != 0;
2753 
2755  (vnm, xd->vlib_hw_if_index,
2756  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2757  }
2758  link_debounce_deadline = 1e70;
2759  timeout = 30.0;
2760  }
2761  break;
2762 
2763  default:
2764  ASSERT (0);
2765  }
2766 
2767  if (event_data)
2768  _vec_len (event_data) = 0;
2769 
2770  /* Query stats every 30 secs. */
2771  {
2772  f64 now = vlib_time_now (vm);
2773  if (now - xm->time_last_stats_update > 30)
2774  {
2775  xm->time_last_stats_update = now;
2776  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2777  }
2778  }
2779  }
2780 
2781  return 0;
2782 }
2783 
2785  .function = ixge_process,
2786  .type = VLIB_NODE_TYPE_PROCESS,
2787  .name = "ixge-process",
2788 };
2789 
2790 clib_error_t *
2792 {
2793  ixge_main_t *xm = &ixge_main;
2794 
2795  xm->vlib_main = vm;
2797  sizeof (xm->tx_descriptor_template));
2799  sizeof (xm->tx_descriptor_template_mask));
2804  xm->tx_descriptor_template_mask.status0 = 0xffff;
2805  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2806 
2812  return 0;
2813 }
2814 
2815 /* *INDENT-OFF* */
2817 {
2818  .runs_before = VLIB_INITS("pci_bus_init"),
2819 };
2820 /* *INDENT-ON* */
2821 
2822 
2823 static void
2825 {
2826  uword private_data = vlib_pci_get_private_data (vm, h);
2827 
2829 
2830  /* Let node know which device is interrupting. */
2831  {
2832  vlib_node_runtime_t *rt =
2834  rt->runtime_data[0] |= 1 << private_data;
2835  }
2836 }
2837 
2838 static clib_error_t *
2840 {
2841  ixge_main_t *xm = &ixge_main;
2842  clib_error_t *error = 0;
2843  void *r;
2844  ixge_device_t *xd;
2845  vlib_pci_addr_t *addr = vlib_pci_get_addr (vm, h);
2847 
2848  error = vlib_pci_map_region (vm, h, 0, &r);
2849  if (error)
2850  return error;
2851 
2852  vec_add2 (xm->devices, xd, 1);
2853 
2854  if (vec_len (xm->devices) == 1)
2855  {
2856  ixge_input_node.function = ixge_input;
2857  }
2858 
2859  xd->pci_dev_handle = h;
2860  xd->device_id = d->device_id;
2861  xd->regs = r;
2862  xd->device_index = xd - xm->devices;
2863  xd->pci_function = addr->function;
2864  xd->per_interface_next_index = ~0;
2865 
2867 
2868  /* Chip found so enable node. */
2869  {
2872  ? VLIB_NODE_STATE_POLLING
2873  : VLIB_NODE_STATE_INTERRUPT));
2874 
2875  //dev->private_data = xd->device_index;
2876  }
2877 
2878  if (vec_len (xm->devices) == 1)
2879  {
2882  }
2883 
2884  error = vlib_pci_bus_master_enable (vm, h);
2885 
2886  if (error)
2887  return error;
2888 
2889  return vlib_pci_intr_enable (vm, h);
2890 }
2891 
2892 /* *INDENT-OFF* */
2893 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2894  .init_function = ixge_pci_init,
2895  .interrupt_handler = ixge_pci_intr_handler,
2896  .supported_devices = {
2897 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2899 #undef _
2900  { 0 },
2901  },
2902 };
2903 /* *INDENT-ON* */
2904 
2905 void
2907 {
2909 
2910  switch (next)
2911  {
2915  r->next_nodes[next] = name;
2916  break;
2917 
2918  default:
2919  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2920  break;
2921  }
2922 }
2923 
2924 /* *INDENT-OFF* */
2925 VLIB_PLUGIN_REGISTER () = {
2926  .version = VPP_BUILD_VER,
2927  .default_disabled = 1,
2928  .description = "Intel 82599 Family Native Driver (experimental)",
2929 };
2930 #endif
2931 
2932 /* *INDENT-ON* */
2933 
2934 /*
2935  * fd.io coding-style-patch-verification: ON
2936  *
2937  * Local Variables:
2938  * eval: (c-set-style "gnu")
2939  * End:
2940  */
u32 mdio_address
Definition: ixge.h:1113
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2268
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
u32 process_node_index
Definition: ixge.h:1256
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:321
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:239
#define clib_min(x, y)
Definition: clib.h:327
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:111
#define CLIB_UNUSED(x)
Definition: clib.h:87
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:376
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.h:271
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:193
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1259
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:751
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:103
u32 enable_write_1_to_set
Definition: ixge.h:245
a
Definition: bitmap.h:538
u32 n_descriptor_bytes
Definition: ixge.h:30
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2596
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:518
u32 vlib_hw_if_index
Definition: ixge.h:1222
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
u32 link_status_at_last_link_change
Definition: ixge.h:1231
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:424
u32 head_index
Definition: ixge.h:42
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:457
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:82
#define PREDICT_TRUE(x)
Definition: clib.h:121
static void * vlib_physmem_alloc(vlib_main_t *vm, uword n_bytes)
Definition: physmem_funcs.h:73
u8 is_start_of_packet
Definition: ixge.c:569
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
unsigned long u64
Definition: types.h:89
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:255
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
u32 queue_mapping[64]
Definition: ixge.h:266
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:333
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:305
vlib_buffer_t buffer
Definition: ixge.c:572
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2314
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u32 phy_command
Definition: ixge.h:368
u32 * rx_buffers_to_add
Definition: ixge.h:1264
#define XGE_PHY_CONTROL
Definition: ixge.c:52
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:56
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1341
ixge_device_t * devices
Definition: ixge.h:1245
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:90
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
for(i=1;i<=collision_buckets;i++)
ixge_descriptor_t before
Definition: ixge.c:561
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2461
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
#define XGE_PHY_ID2
Definition: ixge.c:51
static u32 format_get_indent(u8 *s)
Definition: format.h:72
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:171
u32 per_interface_next_index
Definition: ixge.h:1208
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2103
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u32 extended_control
Definition: ixge.h:192
vlib_main_t * vm
Definition: in2out_ed.c:1582
u32 software_semaphore
Definition: ixge.h:890
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:184
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:164
ixge_rx_next_t
Definition: ixge.h:1273
static u8 ixge_counter_flags[]
Definition: ixge.c:2259
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:498
u32 rx_ethernet_address0[16][2]
Definition: ixge.h:581
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
u32 tx_dma_control
Definition: ixge.h:500
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1007
vhost_vring_addr_t addr
Definition: vhost_user.h:111
u32 * tx_buffers_pending_free
Definition: ixge.h:1262
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1173
unsigned char u8
Definition: types.h:56
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
u8 id[64]
Definition: dhcp.api:160
u32 filter_control
Definition: ixge.h:522
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:166
double f64
Definition: types.h:142
struct ixge_dma_regs_t::@692::@695 tx
u32 i2c_control
Definition: ixge.h:208
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_data
Definition: ixge.h:369
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:632
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:668
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:136
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1953
u8 *() format_function_t(u8 *s, va_list *args)
Definition: format.h:48
vlib_pci_dev_handle_t pci_dev_handle
Definition: ixge.h:1211
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:482
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:120
u32 rx_split_control
Definition: ixge.h:51
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2121
vlib_rx_or_tx_t
Definition: defs.h:44
ixge_tx_descriptor_t descriptor
Definition: ixge.c:848
i2c_bus_t i2c_bus
Definition: ixge.h:1233
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:445
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:476
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:228
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:579
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, uword n_bytes, uword alignment)
Definition: physmem_funcs.h:56
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:735
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u32 dca_control
Definition: ixge.h:40
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
ixge_phy_t phys[2]
Definition: ixge.h:1228
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
ixge_dma_regs_t tx_dma[128]
Definition: ixge.h:616
u32 link_partner_ability
Definition: ixge.h:343
struct ixge_regs_t::@700 xge_mac
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
vnet_device_class_t ixge_device_class
ixge_main_t ixge_main
Definition: ixge.c:55
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:57
u32 queue_index
Definition: ixge.h:1131
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:281
unsigned int u32
Definition: types.h:88
#define vec_end(v)
End (last data address) of vector.
#define clib_error_create(args...)
Definition: error.h:96
#define VLIB_FRAME_SIZE
Definition: node.h:377
f64 time_last_stats_update
Definition: ixge.h:1266
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
bool is_ip6
Definition: ip.api:43
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2839
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:671
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:274
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2708
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
Definition: pci.c:202
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:218
u8 id
Definition: sfp.h:56
uword private_data
Definition: i2c.h:44
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:516
format_function_t format_vnet_sw_interface_name
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1199
u16 state
Input node state.
Definition: node.h:502
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2416
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:267
format_function_t format_vnet_buffer
Definition: buffer.h:498
#define foreach_ixge_counter
Definition: ixge.h:1016
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1035
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1015
ixge_tx_descriptor_t tx
Definition: ixge.h:170
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:155
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2429
unsigned short u16
Definition: types.h:57
#define XGE_PHY_ID1
Definition: ixge.c:50
vec_header_t h
Definition: buffer.c:322
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2293
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:495
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:48
#define ELOG_DATA(em, f)
Definition: elog.h:484
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
format_function_t format_sfp_eeprom
Definition: sfp.h:133
#define PREDICT_FALSE(x)
Definition: clib.h:120
u16 n_bytes_this_buffer
Definition: ixge.h:129
#define always_inline
Definition: ipsec.h:28
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
vnet_main_t vnet_main
Definition: misc.c:43
u32 control
Definition: ixge.h:62
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:148
u32 descriptor_address[2]
Definition: ixge.h:29
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:196
format_function_t * format_buffer
Definition: node.h:357
VNET_DEVICE_CLASS(ixge_device_class)
u32 vlib_pci_dev_handle_t
Definition: pci.h:97
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
u32 software_firmware_sync
Definition: ixge.h:894
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:349
vlib_node_runtime_t * node
Definition: ixge.c:902
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
u32 vlib_sw_if_index
Definition: ixge.h:1222
u32 head_index
Definition: ixge.h:1128
u32 auto_negotiation_control2
Definition: ixge.h:465
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:908
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:310
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:863
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:954
u32 n_descriptors
Definition: ixge.h:1125
u16 n_vectors
Definition: node.h:396
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:49
u32 status_write_1_to_set
Definition: ixge.h:232
Definition: i2c.h:33
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2134
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
ixge_descriptor_t after
Definition: ixge.c:561
u32x4 as_u32x4
Definition: ixge.h:171
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
Definition: buffer.c:254
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
#define clib_warning(format, args...)
Definition: error.h:59
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:115
u8 data[]
Packet data.
Definition: buffer.h:181
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
elog_main_t elog_main
Definition: main.h:224
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:152
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2245
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
#define ARRAY_LEN(x)
Definition: clib.h:67
u16 device_index
Definition: ixge.h:1216
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2127
u32 n_bytes_in_packet
Definition: ixge.c:906
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:264
string name[64]
Definition: ip.api:44
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
u32 sdp_control
Definition: ixge.h:201
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
vlib_pci_addr_t * vlib_pci_get_addr(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: pci.c:163
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:576
u32 tail_index
Definition: ixge.h:1128
static char * ixge_error_strings[]
Definition: ixge.c:2211
#define EVENT_SET_FLAGS
Definition: ixge.c:42
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:510
#define ASSERT(truth)
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2257
manual_print typedef address
Definition: ip_types.api:85
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:1996
u32 auto_negotiation_control
Definition: ixge.h:427
#define clib_error_report(e)
Definition: error.h:113
struct ixge_dma_queue_t::@721::@723 tx
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:174
u32 control
Definition: ixge.h:179
ixge_regs_t * regs
Definition: ixge.h:1205
u32 id
Definition: ixge.h:1116
u32 rx_enable
Definition: ixge.h:308
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:60
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1893
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1017
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2115
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
struct ixge_regs_t::@698 interrupt
struct _vlib_node_registration vlib_node_registration_t
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2619
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:89
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
Definition: defs.h:47
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1259
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2906
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
VLIB_PLUGIN_REGISTER()
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:998
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:912
u32 tail_index
Definition: ixge.h:53
u32 rx_max_frame_size
Definition: ixge.h:373
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:331
u32 instance
Definition: gre.api:51
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:498
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:492
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2109
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2256
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
vlib_buffer_t buffer
Definition: ixge.c:859
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2141
u32 index
Definition: flow_types.api:221
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2169
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:1168
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
vlib_main_t * vlib_main
Definition: ixge.h:1242
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1820
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:53
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:438
#define vnet_buffer(b)
Definition: buffer.h:417
u32 is_start_of_packet
Definition: ixge.c:904
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:43
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
ixge_dma_regs_t rx_dma0[64]
Definition: ixge.h:281
u8 is_start_of_packet
Definition: ixge.c:856
#define IXGE_ALWAYS_POLL
Definition: ixge.c:40
u32 core_analog_config
Definition: ixge.h:949
static void ixge_pci_intr_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2824
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
unsigned long long u32x4
Definition: ixge.c:28
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2791
u16 flags
Copy of main node flags.
Definition: node.h:500
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:132
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:208
u16 pci_function
Definition: ixge.h:1219
format_function_t format_vlib_pci_link_speed
Definition: pci.h:325
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:209
u32 status_write_1_to_clear
Definition: ixge.h:230
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2336
u32 phy_index
Definition: ixge.h:1227
u32 eeprom_read
Definition: ixge.h:881
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:157
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
u32 link_status
Definition: ixge.h:340
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2464
#define VLIB_INITS(...)
Definition: init.h:357
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
ixge_dma_regs_t rx_dma1[64]
Definition: ixge.h:747
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:304
#define foreach_ixge_error
Definition: ixge.c:616
Definition: defs.h:46
struct ixge_dma_queue_t::@721::@724 rx
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
ixge_error_t
Definition: ixge.c:623