FD.io VPP  v18.01.2-1-g9b554f3
Vector Packet Processing
ixge.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /*
17  * WARNING!
18  * This driver is not intended for production use and it is unsupported.
19  * It is provided for educational use only.
20  * Please use supported DPDK driver instead.
21  */
22 
23 #if __x86_64__ || __i386__
24 #include <vppinfra/vector.h>
25 
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
29 #endif
30 
31 #include <vlib/vlib.h>
32 #include <vlib/unix/unix.h>
33 #include <vlib/pci/pci.h>
34 #include <vnet/vnet.h>
35 #include <ixge/ixge.h>
36 #include <vnet/ethernet/ethernet.h>
37 #include <vnet/plugin/plugin.h>
38 #include <vpp/app/version.h>
39 
40 #define IXGE_ALWAYS_POLL 0
41 
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
44 
45 #define PCI_VENDOR_ID_INTEL 0x8086
46 
47 /* 10 GIG E (XGE) PHY IEEE 802.3 clause 45 definitions. */
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
54 
58 
59 static void
61 {
62  ixge_main_t *xm = &ixge_main;
63  vlib_main_t *vm = xm->vlib_main;
64  ixge_regs_t *r = xd->regs;
65  u32 i;
66 
67  i = 0;
68  while (!(r->software_semaphore & (1 << 0)))
69  {
70  if (i > 0)
71  vlib_process_suspend (vm, 100e-6);
72  i++;
73  }
74  do
75  {
76  r->software_semaphore |= 1 << 1;
77  }
78  while (!(r->software_semaphore & (1 << 1)));
79 }
80 
81 static void
83 {
84  ixge_regs_t *r = xd->regs;
85  r->software_semaphore &= ~3;
86 }
87 
88 static void
90 {
91  ixge_main_t *xm = &ixge_main;
92  vlib_main_t *vm = xm->vlib_main;
93  ixge_regs_t *r = xd->regs;
94  u32 fw_mask = sw_mask << 5;
95  u32 m, done = 0;
96 
97  while (!done)
98  {
99  ixge_semaphore_get (xd);
100  m = r->software_firmware_sync;
101  done = (m & fw_mask) == 0;
102  if (done)
103  r->software_firmware_sync = m | sw_mask;
105  if (!done)
106  vlib_process_suspend (vm, 10e-3);
107  }
108 }
109 
110 static void
112 {
113  ixge_regs_t *r = xd->regs;
114  ixge_semaphore_get (xd);
115  r->software_firmware_sync &= ~sw_mask;
117 }
118 
119 u32
120 ixge_read_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index,
121  u32 v, u32 is_read)
122 {
123  ixge_regs_t *r = xd->regs;
124  const u32 busy_bit = 1 << 30;
125  u32 x;
126 
127  ASSERT (xd->phy_index < 2);
128  ixge_software_firmware_sync (xd, 1 << (1 + xd->phy_index));
129 
130  ASSERT (reg_index < (1 << 16));
131  ASSERT (dev_type < (1 << 5));
132  if (!is_read)
133  r->xge_mac.phy_data = v;
134 
135  /* Address cycle. */
136  x =
137  reg_index | (dev_type << 16) | (xd->
138  phys[xd->phy_index].mdio_address << 21);
139  r->xge_mac.phy_command = x | busy_bit;
140  /* Busy wait timed to take 28e-6 secs. No suspend. */
141  while (r->xge_mac.phy_command & busy_bit)
142  ;
143 
144  r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145  while (r->xge_mac.phy_command & busy_bit)
146  ;
147 
148  if (is_read)
149  v = r->xge_mac.phy_data >> 16;
150 
151  ixge_software_firmware_sync_release (xd, 1 << (1 + xd->phy_index));
152 
153  return v;
154 }
155 
156 static u32
157 ixge_read_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index)
158 {
159  return ixge_read_write_phy_reg (xd, dev_type, reg_index, 0, /* is_read */
160  1);
161 }
162 
163 static void
164 ixge_write_phy_reg (ixge_device_t * xd, u32 dev_type, u32 reg_index, u32 v)
165 {
166  (void) ixge_read_write_phy_reg (xd, dev_type, reg_index, v, /* is_read */
167  0);
168 }
169 
170 static void
171 ixge_i2c_put_bits (i2c_bus_t * b, int scl, int sda)
172 {
173  ixge_main_t *xm = &ixge_main;
175  u32 v;
176 
177  v = 0;
178  v |= (sda != 0) << 3;
179  v |= (scl != 0) << 1;
180  xd->regs->i2c_control = v;
181 }
182 
183 static void
184 ixge_i2c_get_bits (i2c_bus_t * b, int *scl, int *sda)
185 {
186  ixge_main_t *xm = &ixge_main;
188  u32 v;
189 
190  v = xd->regs->i2c_control;
191  *sda = (v & (1 << 2)) != 0;
192  *scl = (v & (1 << 0)) != 0;
193 }
194 
195 static u16
197 {
198  ixge_regs_t *r = xd->regs;
199  u32 v;
200  r->eeprom_read = (( /* start bit */ (1 << 0)) | (address << 2));
201  /* Wait for done bit. */
202  while (!((v = r->eeprom_read) & (1 << 1)))
203  ;
204  return v >> 16;
205 }
206 
207 static void
209 {
210  u32 tx_disable_bit = 1 << 3;
211  if (enable)
212  xd->regs->sdp_control &= ~tx_disable_bit;
213  else
214  xd->regs->sdp_control |= tx_disable_bit;
215 }
216 
217 static void
219 {
220  u32 is_10g_bit = 1 << 5;
221  if (enable)
222  xd->regs->sdp_control |= is_10g_bit;
223  else
224  xd->regs->sdp_control &= ~is_10g_bit;
225 }
226 
227 static clib_error_t *
229 {
230  u16 a, id, reg_values_addr = 0;
231 
232  a = ixge_read_eeprom (xd, 0x2b);
233  if (a == 0 || a == 0xffff)
234  return clib_error_create ("no init sequence in eeprom");
235 
236  while (1)
237  {
238  id = ixge_read_eeprom (xd, ++a);
239  if (id == 0xffff)
240  break;
241  reg_values_addr = ixge_read_eeprom (xd, ++a);
242  if (id == sfp_type)
243  break;
244  }
245  if (id != sfp_type)
246  return clib_error_create ("failed to find id 0x%x", sfp_type);
247 
248  ixge_software_firmware_sync (xd, 1 << 3);
249  while (1)
250  {
251  u16 v = ixge_read_eeprom (xd, ++reg_values_addr);
252  if (v == 0xffff)
253  break;
254  xd->regs->core_analog_config = v;
255  }
257 
258  /* Make sure laser is off. We'll turn on the laser when
259  the interface is brought up. */
260  ixge_sfp_enable_disable_laser (xd, /* enable */ 0);
261  ixge_sfp_enable_disable_10g (xd, /* is_10g */ 1);
262 
263  return 0;
264 }
265 
266 static void
268 {
269  u32 v;
270 
271  if (is_up)
272  {
273  /* pma/pmd 10g serial SFI. */
274  xd->regs->xge_mac.auto_negotiation_control2 &= ~(3 << 16);
275  xd->regs->xge_mac.auto_negotiation_control2 |= 2 << 16;
276 
278  v &= ~(7 << 13);
279  v |= (0 << 13);
280  /* Restart autoneg. */
281  v |= (1 << 12);
283 
284  while (!(xd->regs->xge_mac.link_partner_ability[0] & 0xf0000))
285  ;
286 
288 
289  /* link mode 10g sfi serdes */
290  v &= ~(7 << 13);
291  v |= (3 << 13);
292 
293  /* Restart autoneg. */
294  v |= (1 << 12);
296 
297  xd->regs->xge_mac.link_status;
298  }
299 
300  ixge_sfp_enable_disable_laser (xd, /* enable */ is_up);
301 
302  /* Give time for link partner to notice that we're up. */
303  if (is_up && vlib_in_process_context (vlib_get_main ()))
304  {
305  vlib_process_suspend (vlib_get_main (), 300e-3);
306  }
307 }
308 
311 {
312  ixge_regs_t *r = xd->regs;
313  ASSERT (qi < 128);
314  if (rt == VLIB_RX)
315  return qi < 64 ? &r->rx_dma0[qi] : &r->rx_dma1[qi - 64];
316  else
317  return &r->tx_dma[qi];
318 }
319 
320 static clib_error_t *
322 {
323  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
324  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
325  ixge_main_t *xm = &ixge_main;
327  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
328 
329  if (is_up)
330  {
331  xd->regs->rx_enable |= 1;
332  xd->regs->tx_dma_control |= 1;
333  dr->control |= 1 << 25;
334  while (!(dr->control & (1 << 25)))
335  ;
336  }
337  else
338  {
339  xd->regs->rx_enable &= ~1;
340  xd->regs->tx_dma_control &= ~1;
341  }
342 
343  ixge_sfp_device_up_down (xd, is_up);
344 
345  return /* no error */ 0;
346 }
347 
348 static void
350 {
351  ixge_phy_t *phy = xd->phys + xd->phy_index;
352  i2c_bus_t *ib = &xd->i2c_bus;
353 
354  ib->private_data = xd->device_index;
357  vlib_i2c_init (ib);
358 
359  vlib_i2c_read_eeprom (ib, 0x50, 0, 128, (u8 *) & xd->sfp_eeprom);
360 
362  xd->sfp_eeprom.id = SFP_ID_unknown;
363  else
364  {
365  /* FIXME 5 => SR/LR eeprom ID. */
366  clib_error_t *e =
368  if (e)
369  clib_error_report (e);
370  }
371 
372  phy->mdio_address = ~0;
373 }
374 
375 static void
377 {
378  ixge_main_t *xm = &ixge_main;
379  vlib_main_t *vm = xm->vlib_main;
380  ixge_phy_t *phy = xd->phys + xd->phy_index;
381 
382  switch (xd->device_id)
383  {
384  case IXGE_82599_sfp:
385  case IXGE_82599_sfp_em:
386  case IXGE_82599_sfp_fcoe:
387  /* others? */
388  return ixge_sfp_phy_init (xd);
389 
390  default:
391  break;
392  }
393 
394  /* Probe address of phy. */
395  {
396  u32 i, v;
397 
398  phy->mdio_address = ~0;
399  for (i = 0; i < 32; i++)
400  {
401  phy->mdio_address = i;
403  if (v != 0xffff && v != 0)
404  break;
405  }
406 
407  /* No PHY found? */
408  if (i >= 32)
409  return;
410  }
411 
412  phy->id =
415 
416  {
417  ELOG_TYPE_DECLARE (e) =
418  {
419  .function = (char *) __FUNCTION__,.format =
420  "ixge %d, phy id 0x%d mdio address %d",.format_args = "i4i4i4",};
421  struct
422  {
423  u32 instance, id, address;
424  } *ed;
425  ed = ELOG_DATA (&vm->elog_main, e);
426  ed->instance = xd->device_index;
427  ed->id = phy->id;
428  ed->address = phy->mdio_address;
429  }
430 
431  /* Reset phy. */
434 
435  /* Wait for self-clearning reset bit to clear. */
436  do
437  {
438  vlib_process_suspend (vm, 1e-3);
439  }
442 }
443 
444 static u8 *
446 {
448  va_arg (*va, ixge_rx_from_hw_descriptor_t *);
449  u32 s0 = d->status[0], s2 = d->status[2];
450  u32 is_ip4, is_ip6, is_ip, is_tcp, is_udp;
451  u32 indent = format_get_indent (s);
452 
453  s = format (s, "%s-owned",
455  "hw");
456  s =
457  format (s, ", length this descriptor %d, l3 offset %d",
461  s = format (s, ", end-of-packet");
462 
463  s = format (s, "\n%U", format_white_space, indent);
464 
466  s = format (s, "layer2 error");
467 
469  {
470  s = format (s, "layer 2 type %d", (s0 & 0x1f));
471  return s;
472  }
473 
475  s = format (s, "vlan header 0x%x\n%U", d->vlan_tag,
476  format_white_space, indent);
477 
478  if ((is_ip4 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4)))
479  {
480  s = format (s, "ip4%s",
481  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT) ? " options" :
482  "");
484  s = format (s, " checksum %s",
486  "bad" : "ok");
487  }
488  if ((is_ip6 = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6)))
489  s = format (s, "ip6%s",
490  (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT) ? " extended" :
491  "");
492  is_tcp = is_udp = 0;
493  if ((is_ip = (is_ip4 | is_ip6)))
494  {
495  is_tcp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP) != 0;
496  is_udp = (s0 & IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP) != 0;
497  if (is_tcp)
498  s = format (s, ", tcp");
499  if (is_udp)
500  s = format (s, ", udp");
501  }
502 
504  s = format (s, ", tcp checksum %s",
506  "ok");
508  s = format (s, ", udp checksum %s",
510  "ok");
511 
512  return s;
513 }
514 
515 static u8 *
516 format_ixge_tx_descriptor (u8 * s, va_list * va)
517 {
518  ixge_tx_descriptor_t *d = va_arg (*va, ixge_tx_descriptor_t *);
519  u32 s0 = d->status0, s1 = d->status1;
520  u32 indent = format_get_indent (s);
521  u32 v;
522 
523  s = format (s, "buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
524  d->buffer_address, s1 >> 14, d->n_bytes_this_buffer);
525 
526  s = format (s, "\n%U", format_white_space, indent);
527 
528  if ((v = (s0 >> 0) & 3))
529  s = format (s, "reserved 0x%x, ", v);
530 
531  if ((v = (s0 >> 2) & 3))
532  s = format (s, "mac 0x%x, ", v);
533 
534  if ((v = (s0 >> 4) & 0xf) != 3)
535  s = format (s, "type 0x%x, ", v);
536 
537  s = format (s, "%s%s%s%s%s%s%s%s",
538  (s0 & (1 << 8)) ? "eop, " : "",
539  (s0 & (1 << 9)) ? "insert-fcs, " : "",
540  (s0 & (1 << 10)) ? "reserved26, " : "",
541  (s0 & (1 << 11)) ? "report-status, " : "",
542  (s0 & (1 << 12)) ? "reserved28, " : "",
543  (s0 & (1 << 13)) ? "is-advanced, " : "",
544  (s0 & (1 << 14)) ? "vlan-enable, " : "",
545  (s0 & (1 << 15)) ? "tx-segmentation, " : "");
546 
547  if ((v = s1 & 0xf) != 0)
548  s = format (s, "status 0x%x, ", v);
549 
550  if ((v = (s1 >> 4) & 0xf))
551  s = format (s, "context 0x%x, ", v);
552 
553  if ((v = (s1 >> 8) & 0x3f))
554  s = format (s, "options 0x%x, ", v);
555 
556  return s;
557 }
558 
559 typedef struct
560 {
562 
564 
566 
568 
570 
571  /* Copy of VLIB buffer; packet data stored in pre_data. */
574 
575 static u8 *
576 format_ixge_rx_dma_trace (u8 * s, va_list * va)
577 {
578  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
579  vlib_node_t *node = va_arg (*va, vlib_node_t *);
580  vnet_main_t *vnm = vnet_get_main ();
581  ixge_rx_dma_trace_t *t = va_arg (*va, ixge_rx_dma_trace_t *);
582  ixge_main_t *xm = &ixge_main;
585  u32 indent = format_get_indent (s);
586 
587  {
588  vnet_sw_interface_t *sw =
590  s =
591  format (s, "%U rx queue %d", format_vnet_sw_interface_name, vnm, sw,
592  t->queue_index);
593  }
594 
595  s = format (s, "\n%Ubefore: %U",
596  format_white_space, indent,
598  s = format (s, "\n%Uafter : head/tail address 0x%Lx/0x%Lx",
599  format_white_space, indent,
601 
602  s = format (s, "\n%Ubuffer 0x%x: %U",
603  format_white_space, indent,
605 
606  s = format (s, "\n%U", format_white_space, indent);
607 
608  f = node->format_buffer;
609  if (!f || !t->is_start_of_packet)
610  f = format_hex_bytes;
611  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
612 
613  return s;
614 }
615 
616 #define foreach_ixge_error \
617  _ (none, "no error") \
618  _ (tx_full_drops, "tx ring full drops") \
619  _ (ip4_checksum_error, "ip4 checksum errors") \
620  _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621  _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
622 
623 typedef enum
624 {
625 #define _(f,s) IXGE_ERROR_##f,
627 #undef _
629 } ixge_error_t;
630 
631 always_inline void
633  u32 s00, u32 s02,
634  u8 * next0, u8 * error0, u32 * flags0)
635 {
636  u8 is0_ip4, is0_ip6, n0, e0;
637  u32 f0;
638 
639  e0 = IXGE_ERROR_none;
641 
643  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
644 
645  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
646  ? IXGE_ERROR_ip4_checksum_error : e0);
647 
648  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
649  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
650 
651  n0 = (xd->per_interface_next_index != ~0) ?
652  xd->per_interface_next_index : n0;
653 
654  /* Check for error. */
655  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
656 
659  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
660 
663  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
664 
665  *error0 = e0;
666  *next0 = n0;
667  *flags0 = f0;
668 }
669 
670 always_inline void
672  u32 s00, u32 s02,
673  u32 s10, u32 s12,
674  u8 * next0, u8 * error0, u32 * flags0,
675  u8 * next1, u8 * error1, u32 * flags1)
676 {
677  u8 is0_ip4, is0_ip6, n0, e0;
678  u8 is1_ip4, is1_ip6, n1, e1;
679  u32 f0, f1;
680 
681  e0 = e1 = IXGE_ERROR_none;
682  n0 = n1 = IXGE_RX_NEXT_IP4_INPUT;
683 
686 
687  n0 = is0_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n0;
688  n1 = is1_ip4 ? IXGE_RX_NEXT_IP4_INPUT : n1;
689 
690  e0 = (is0_ip4 && (s02 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
691  ? IXGE_ERROR_ip4_checksum_error : e0);
692  e1 = (is1_ip4 && (s12 & IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR)
693  ? IXGE_ERROR_ip4_checksum_error : e1);
694 
695  is0_ip6 = s00 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
696  is1_ip6 = s10 & IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6;
697 
698  n0 = is0_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n0;
699  n1 = is1_ip6 ? IXGE_RX_NEXT_IP6_INPUT : n1;
700 
701  n0 = (xd->per_interface_next_index != ~0) ?
702  xd->per_interface_next_index : n0;
703  n1 = (xd->per_interface_next_index != ~0) ?
704  xd->per_interface_next_index : n1;
705 
706  /* Check for error. */
707  n0 = e0 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n0;
708  n1 = e1 != IXGE_ERROR_none ? IXGE_RX_NEXT_DROP : n1;
709 
710  *error0 = e0;
711  *error1 = e1;
712 
713  *next0 = n0;
714  *next1 = n1;
715 
718  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
721  ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
722 
725  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
728  ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
729 
730  *flags0 = f0;
731  *flags1 = f1;
732 }
733 
734 static void
736  ixge_device_t * xd,
737  ixge_dma_queue_t * dq,
738  ixge_descriptor_t * before_descriptors,
739  u32 * before_buffers,
740  ixge_descriptor_t * after_descriptors, uword n_descriptors)
741 {
742  vlib_main_t *vm = xm->vlib_main;
743  vlib_node_runtime_t *node = dq->rx.node;
746  u32 *b, n_left, is_sop, next_index_sop;
747 
748  n_left = n_descriptors;
749  b = before_buffers;
750  bd = &before_descriptors->rx_from_hw;
751  ad = &after_descriptors->rx_to_hw;
752  is_sop = dq->rx.is_start_of_packet;
753  next_index_sop = dq->rx.saved_start_of_packet_next_index;
754 
755  while (n_left >= 2)
756  {
757  u32 bi0, bi1, flags0, flags1;
758  vlib_buffer_t *b0, *b1;
759  ixge_rx_dma_trace_t *t0, *t1;
760  u8 next0, error0, next1, error1;
761 
762  bi0 = b[0];
763  bi1 = b[1];
764  n_left -= 2;
765 
766  b0 = vlib_get_buffer (vm, bi0);
767  b1 = vlib_get_buffer (vm, bi1);
768 
770  bd[0].status[0], bd[0].status[2],
771  bd[1].status[0], bd[1].status[2],
772  &next0, &error0, &flags0,
773  &next1, &error1, &flags1);
774 
775  next_index_sop = is_sop ? next0 : next_index_sop;
776  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
777  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
778  t0->is_start_of_packet = is_sop;
779  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
780 
781  next_index_sop = is_sop ? next1 : next_index_sop;
782  vlib_trace_buffer (vm, node, next_index_sop, b1, /* follow_chain */ 0);
783  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
784  t1->is_start_of_packet = is_sop;
785  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
786 
787  t0->queue_index = dq->queue_index;
788  t1->queue_index = dq->queue_index;
789  t0->device_index = xd->device_index;
790  t1->device_index = xd->device_index;
791  t0->before.rx_from_hw = bd[0];
792  t1->before.rx_from_hw = bd[1];
793  t0->after.rx_to_hw = ad[0];
794  t1->after.rx_to_hw = ad[1];
795  t0->buffer_index = bi0;
796  t1->buffer_index = bi1;
797  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
798  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
799  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
800  sizeof (t0->buffer.pre_data));
801  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
802  sizeof (t1->buffer.pre_data));
803 
804  b += 2;
805  bd += 2;
806  ad += 2;
807  }
808 
809  while (n_left >= 1)
810  {
811  u32 bi0, flags0;
812  vlib_buffer_t *b0;
814  u8 next0, error0;
815 
816  bi0 = b[0];
817  n_left -= 1;
818 
819  b0 = vlib_get_buffer (vm, bi0);
820 
822  bd[0].status[0], bd[0].status[2],
823  &next0, &error0, &flags0);
824 
825  next_index_sop = is_sop ? next0 : next_index_sop;
826  vlib_trace_buffer (vm, node, next_index_sop, b0, /* follow_chain */ 0);
827  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
828  t0->is_start_of_packet = is_sop;
829  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
830 
831  t0->queue_index = dq->queue_index;
832  t0->device_index = xd->device_index;
833  t0->before.rx_from_hw = bd[0];
834  t0->after.rx_to_hw = ad[0];
835  t0->buffer_index = bi0;
836  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
837  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
838  sizeof (t0->buffer.pre_data));
839 
840  b += 1;
841  bd += 1;
842  ad += 1;
843  }
844 }
845 
846 typedef struct
847 {
849 
851 
853 
855 
857 
858  /* Copy of VLIB buffer; packet data stored in pre_data. */
861 
862 static u8 *
863 format_ixge_tx_dma_trace (u8 * s, va_list * va)
864 {
865  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
866  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
867  ixge_tx_dma_trace_t *t = va_arg (*va, ixge_tx_dma_trace_t *);
868  vnet_main_t *vnm = vnet_get_main ();
869  ixge_main_t *xm = &ixge_main;
872  u32 indent = format_get_indent (s);
873 
874  {
875  vnet_sw_interface_t *sw =
877  s =
878  format (s, "%U tx queue %d", format_vnet_sw_interface_name, vnm, sw,
879  t->queue_index);
880  }
881 
882  s = format (s, "\n%Udescriptor: %U",
883  format_white_space, indent,
885 
886  s = format (s, "\n%Ubuffer 0x%x: %U",
887  format_white_space, indent,
889 
890  s = format (s, "\n%U", format_white_space, indent);
891 
893  if (!f || !t->is_start_of_packet)
894  f = format_hex_bytes;
895  s = format (s, "%U", f, t->buffer.pre_data, sizeof (t->buffer.pre_data));
896 
897  return s;
898 }
899 
900 typedef struct
901 {
903 
905 
907 
910 
911 static void
913  ixge_device_t * xd,
914  ixge_dma_queue_t * dq,
915  ixge_tx_state_t * tx_state,
916  ixge_tx_descriptor_t * descriptors,
917  u32 * buffers, uword n_descriptors)
918 {
919  vlib_main_t *vm = xm->vlib_main;
920  vlib_node_runtime_t *node = tx_state->node;
922  u32 *b, n_left, is_sop;
923 
924  n_left = n_descriptors;
925  b = buffers;
926  d = descriptors;
927  is_sop = tx_state->is_start_of_packet;
928 
929  while (n_left >= 2)
930  {
931  u32 bi0, bi1;
932  vlib_buffer_t *b0, *b1;
933  ixge_tx_dma_trace_t *t0, *t1;
934 
935  bi0 = b[0];
936  bi1 = b[1];
937  n_left -= 2;
938 
939  b0 = vlib_get_buffer (vm, bi0);
940  b1 = vlib_get_buffer (vm, bi1);
941 
942  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
943  t0->is_start_of_packet = is_sop;
944  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
945 
946  t1 = vlib_add_trace (vm, node, b1, sizeof (t1[0]));
947  t1->is_start_of_packet = is_sop;
948  is_sop = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
949 
950  t0->queue_index = dq->queue_index;
951  t1->queue_index = dq->queue_index;
952  t0->device_index = xd->device_index;
953  t1->device_index = xd->device_index;
954  t0->descriptor = d[0];
955  t1->descriptor = d[1];
956  t0->buffer_index = bi0;
957  t1->buffer_index = bi1;
958  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
959  memcpy (&t1->buffer, b1, sizeof (b1[0]) - sizeof (b0->pre_data));
960  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
961  sizeof (t0->buffer.pre_data));
962  memcpy (t1->buffer.pre_data, b1->data + b1->current_data,
963  sizeof (t1->buffer.pre_data));
964 
965  b += 2;
966  d += 2;
967  }
968 
969  while (n_left >= 1)
970  {
971  u32 bi0;
972  vlib_buffer_t *b0;
974 
975  bi0 = b[0];
976  n_left -= 1;
977 
978  b0 = vlib_get_buffer (vm, bi0);
979 
980  t0 = vlib_add_trace (vm, node, b0, sizeof (t0[0]));
981  t0->is_start_of_packet = is_sop;
982  is_sop = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
983 
984  t0->queue_index = dq->queue_index;
985  t0->device_index = xd->device_index;
986  t0->descriptor = d[0];
987  t0->buffer_index = bi0;
988  memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
989  memcpy (t0->buffer.pre_data, b0->data + b0->current_data,
990  sizeof (t0->buffer.pre_data));
991 
992  b += 1;
993  d += 1;
994  }
995 }
996 
999 {
1000  i32 d = i1 - i0;
1001  ASSERT (i0 < q->n_descriptors);
1002  ASSERT (i1 < q->n_descriptors);
1003  return d < 0 ? q->n_descriptors + d : d;
1004 }
1005 
1008 {
1009  u32 d = i0 + i1;
1010  ASSERT (i0 < q->n_descriptors);
1011  ASSERT (i1 < q->n_descriptors);
1012  d -= d >= q->n_descriptors ? q->n_descriptors : 0;
1013  return d;
1014 }
1015 
1019 {
1020  u32 cmp;
1021 
1022  cmp = ((d->status0 & xm->tx_descriptor_template_mask.status0)
1024  if (cmp)
1025  return 0;
1026  cmp = ((d->status1 & xm->tx_descriptor_template_mask.status1)
1028  if (cmp)
1029  return 0;
1030 
1031  return 1;
1032 }
1033 
1034 static uword
1036  ixge_device_t * xd,
1037  ixge_dma_queue_t * dq,
1038  u32 * buffers,
1039  u32 start_descriptor_index,
1040  u32 n_descriptors, ixge_tx_state_t * tx_state)
1041 {
1042  vlib_main_t *vm = xm->vlib_main;
1043  ixge_tx_descriptor_t *d, *d_sop;
1044  u32 n_left = n_descriptors;
1045  u32 *to_free = vec_end (xm->tx_buffers_pending_free);
1046  u32 *to_tx =
1047  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1048  u32 is_sop = tx_state->is_start_of_packet;
1049  u32 len_sop = tx_state->n_bytes_in_packet;
1050  u16 template_status = xm->tx_descriptor_template.status0;
1051  u32 descriptor_prefetch_rotor = 0;
1052 
1053  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1054  d = &dq->descriptors[start_descriptor_index].tx;
1055  d_sop = is_sop ? d : tx_state->start_of_packet_descriptor;
1056 
1057  while (n_left >= 4)
1058  {
1059  vlib_buffer_t *b0, *b1;
1060  u32 bi0, fi0, len0;
1061  u32 bi1, fi1, len1;
1062  u8 is_eop0, is_eop1;
1063 
1064  /* Prefetch next iteration. */
1065  vlib_prefetch_buffer_with_index (vm, buffers[2], LOAD);
1066  vlib_prefetch_buffer_with_index (vm, buffers[3], LOAD);
1067 
1068  if ((descriptor_prefetch_rotor & 0x3) == 0)
1069  CLIB_PREFETCH (d + 4, CLIB_CACHE_LINE_BYTES, STORE);
1070 
1071  descriptor_prefetch_rotor += 2;
1072 
1073  bi0 = buffers[0];
1074  bi1 = buffers[1];
1075 
1076  to_free[0] = fi0 = to_tx[0];
1077  to_tx[0] = bi0;
1078  to_free += fi0 != 0;
1079 
1080  to_free[0] = fi1 = to_tx[1];
1081  to_tx[1] = bi1;
1082  to_free += fi1 != 0;
1083 
1084  buffers += 2;
1085  n_left -= 2;
1086  to_tx += 2;
1087 
1088  b0 = vlib_get_buffer (vm, bi0);
1089  b1 = vlib_get_buffer (vm, bi1);
1090 
1091  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092  is_eop1 = (b1->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1093 
1094  len0 = b0->current_length;
1095  len1 = b1->current_length;
1096 
1099 
1100  d[0].buffer_address =
1102  d[1].buffer_address =
1104 
1105  d[0].n_bytes_this_buffer = len0;
1106  d[1].n_bytes_this_buffer = len1;
1107 
1108  d[0].status0 =
1109  template_status | (is_eop0 <<
1111  d[1].status0 =
1112  template_status | (is_eop1 <<
1114 
1115  len_sop = (is_sop ? 0 : len_sop) + len0;
1116  d_sop[0].status1 =
1118  d += 1;
1119  d_sop = is_eop0 ? d : d_sop;
1120 
1121  is_sop = is_eop0;
1122 
1123  len_sop = (is_sop ? 0 : len_sop) + len1;
1124  d_sop[0].status1 =
1126  d += 1;
1127  d_sop = is_eop1 ? d : d_sop;
1128 
1129  is_sop = is_eop1;
1130  }
1131 
1132  while (n_left > 0)
1133  {
1134  vlib_buffer_t *b0;
1135  u32 bi0, fi0, len0;
1136  u8 is_eop0;
1137 
1138  bi0 = buffers[0];
1139 
1140  to_free[0] = fi0 = to_tx[0];
1141  to_tx[0] = bi0;
1142  to_free += fi0 != 0;
1143 
1144  buffers += 1;
1145  n_left -= 1;
1146  to_tx += 1;
1147 
1148  b0 = vlib_get_buffer (vm, bi0);
1149 
1150  is_eop0 = (b0->flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1151 
1152  len0 = b0->current_length;
1153 
1155 
1156  d[0].buffer_address =
1158 
1159  d[0].n_bytes_this_buffer = len0;
1160 
1161  d[0].status0 =
1162  template_status | (is_eop0 <<
1164 
1165  len_sop = (is_sop ? 0 : len_sop) + len0;
1166  d_sop[0].status1 =
1168  d += 1;
1169  d_sop = is_eop0 ? d : d_sop;
1170 
1171  is_sop = is_eop0;
1172  }
1173 
1174  if (tx_state->node->flags & VLIB_NODE_FLAG_TRACE)
1175  {
1176  to_tx =
1178  start_descriptor_index);
1179  ixge_tx_trace (xm, xd, dq, tx_state,
1180  &dq->descriptors[start_descriptor_index].tx, to_tx,
1181  n_descriptors);
1182  }
1183 
1184  _vec_len (xm->tx_buffers_pending_free) =
1185  to_free - xm->tx_buffers_pending_free;
1186 
1187  /* When we are done d_sop can point to end of ring. Wrap it if so. */
1188  {
1189  ixge_tx_descriptor_t *d_start = &dq->descriptors[0].tx;
1190 
1191  ASSERT (d_sop - d_start <= dq->n_descriptors);
1192  d_sop = d_sop - d_start == dq->n_descriptors ? d_start : d_sop;
1193  }
1194 
1195  tx_state->is_start_of_packet = is_sop;
1196  tx_state->start_of_packet_descriptor = d_sop;
1197  tx_state->n_bytes_in_packet = len_sop;
1198 
1199  return n_descriptors;
1200 }
1201 
1202 static uword
1204  vlib_node_runtime_t * node, vlib_frame_t * f)
1205 {
1206  ixge_main_t *xm = &ixge_main;
1207  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
1209  ixge_dma_queue_t *dq;
1210  u32 *from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1211  u32 queue_index = 0; /* fixme parameter */
1212  ixge_tx_state_t tx_state;
1213 
1214  tx_state.node = node;
1215  tx_state.is_start_of_packet = 1;
1216  tx_state.start_of_packet_descriptor = 0;
1217  tx_state.n_bytes_in_packet = 0;
1218 
1219  from = vlib_frame_vector_args (f);
1220 
1221  dq = vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
1222 
1223  dq->head_index = dq->tx.head_index_write_back[0];
1224 
1225  /* Since head == tail means ring is empty we can send up to dq->n_descriptors - 1. */
1226  n_left_tx = dq->n_descriptors - 1;
1227  n_left_tx -= ixge_ring_sub (dq, dq->head_index, dq->tail_index);
1228 
1229  _vec_len (xm->tx_buffers_pending_free) = 0;
1230 
1231  n_descriptors_to_tx = f->n_vectors;
1232  n_tail_drop = 0;
1233  if (PREDICT_FALSE (n_descriptors_to_tx > n_left_tx))
1234  {
1235  i32 i, n_ok, i_eop, i_sop;
1236 
1237  i_sop = i_eop = ~0;
1238  for (i = n_left_tx - 1; i >= 0; i--)
1239  {
1240  vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
1241  if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
1242  {
1243  if (i_sop != ~0 && i_eop != ~0)
1244  break;
1245  i_eop = i;
1246  i_sop = i + 1;
1247  }
1248  }
1249  if (i == 0)
1250  n_ok = 0;
1251  else
1252  n_ok = i_eop + 1;
1253 
1254  {
1255  ELOG_TYPE_DECLARE (e) =
1256  {
1257  .function = (char *) __FUNCTION__,.format =
1258  "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1259  "i2i2i2i2",};
1260  struct
1261  {
1262  u16 instance, to_tx, head, tail;
1263  } *ed;
1264  ed = ELOG_DATA (&vm->elog_main, e);
1265  ed->instance = xd->device_index;
1266  ed->to_tx = n_descriptors_to_tx;
1267  ed->head = dq->head_index;
1268  ed->tail = dq->tail_index;
1269  }
1270 
1271  if (n_ok < n_descriptors_to_tx)
1272  {
1273  n_tail_drop = n_descriptors_to_tx - n_ok;
1274  vec_add (xm->tx_buffers_pending_free, from + n_ok, n_tail_drop);
1275  vlib_error_count (vm, ixge_input_node.index,
1276  IXGE_ERROR_tx_full_drops, n_tail_drop);
1277  }
1278 
1279  n_descriptors_to_tx = n_ok;
1280  }
1281 
1282  dq->tx.n_buffers_on_ring += n_descriptors_to_tx;
1283 
1284  /* Process from tail to end of descriptor ring. */
1285  if (n_descriptors_to_tx > 0 && dq->tail_index < dq->n_descriptors)
1286  {
1287  u32 n =
1288  clib_min (dq->n_descriptors - dq->tail_index, n_descriptors_to_tx);
1289  n = ixge_tx_no_wrap (xm, xd, dq, from, dq->tail_index, n, &tx_state);
1290  from += n;
1291  n_descriptors_to_tx -= n;
1292  dq->tail_index += n;
1293  ASSERT (dq->tail_index <= dq->n_descriptors);
1294  if (dq->tail_index == dq->n_descriptors)
1295  dq->tail_index = 0;
1296  }
1297 
1298  if (n_descriptors_to_tx > 0)
1299  {
1300  u32 n =
1301  ixge_tx_no_wrap (xm, xd, dq, from, 0, n_descriptors_to_tx, &tx_state);
1302  from += n;
1303  ASSERT (n == n_descriptors_to_tx);
1304  dq->tail_index += n;
1305  ASSERT (dq->tail_index <= dq->n_descriptors);
1306  if (dq->tail_index == dq->n_descriptors)
1307  dq->tail_index = 0;
1308  }
1309 
1310  /* We should only get full packets. */
1311  ASSERT (tx_state.is_start_of_packet);
1312 
1313  /* Report status when last descriptor is done. */
1314  {
1315  u32 i = dq->tail_index == 0 ? dq->n_descriptors - 1 : dq->tail_index - 1;
1316  ixge_tx_descriptor_t *d = &dq->descriptors[i].tx;
1318  }
1319 
1320  /* Give new descriptors to hardware. */
1321  {
1322  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_TX, queue_index);
1323 
1325 
1326  dr->tail_index = dq->tail_index;
1327  }
1328 
1329  /* Free any buffers that are done. */
1330  {
1331  u32 n = _vec_len (xm->tx_buffers_pending_free);
1332  if (n > 0)
1333  {
1335  _vec_len (xm->tx_buffers_pending_free) = 0;
1336  ASSERT (dq->tx.n_buffers_on_ring >= n);
1337  dq->tx.n_buffers_on_ring -= (n - n_tail_drop);
1338  }
1339  }
1340 
1341  return f->n_vectors;
1342 }
1343 
1344 static uword
1346  ixge_device_t * xd,
1347  ixge_dma_queue_t * dq,
1348  u32 start_descriptor_index, u32 n_descriptors)
1349 {
1350  vlib_main_t *vm = xm->vlib_main;
1351  vlib_node_runtime_t *node = dq->rx.node;
1352  ixge_descriptor_t *d;
1353  static ixge_descriptor_t *d_trace_save;
1354  static u32 *d_trace_buffers;
1355  u32 n_descriptors_left = n_descriptors;
1356  u32 *to_rx =
1357  vec_elt_at_index (dq->descriptor_buffer_indices, start_descriptor_index);
1358  u32 *to_add;
1359  u32 bi_sop = dq->rx.saved_start_of_packet_buffer_index;
1360  u32 bi_last = dq->rx.saved_last_buffer_index;
1361  u32 next_index_sop = dq->rx.saved_start_of_packet_next_index;
1362  u32 is_sop = dq->rx.is_start_of_packet;
1363  u32 next_index, n_left_to_next, *to_next;
1364  u32 n_packets = 0;
1365  u32 n_bytes = 0;
1366  u32 n_trace = vlib_get_trace_count (vm, node);
1367  vlib_buffer_t *b_last, b_dummy;
1368 
1369  ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1370  d = &dq->descriptors[start_descriptor_index];
1371 
1372  b_last = bi_last != ~0 ? vlib_get_buffer (vm, bi_last) : &b_dummy;
1373  next_index = dq->rx.next_index;
1374 
1375  if (n_trace > 0)
1376  {
1377  u32 n = clib_min (n_trace, n_descriptors);
1378  if (d_trace_save)
1379  {
1380  _vec_len (d_trace_save) = 0;
1381  _vec_len (d_trace_buffers) = 0;
1382  }
1383  vec_add (d_trace_save, (ixge_descriptor_t *) d, n);
1384  vec_add (d_trace_buffers, to_rx, n);
1385  }
1386 
1387  {
1388  uword l = vec_len (xm->rx_buffers_to_add);
1389 
1390  if (l < n_descriptors_left)
1391  {
1392  u32 n_to_alloc = 2 * dq->n_descriptors - l;
1393  u32 n_allocated;
1394 
1395  vec_resize (xm->rx_buffers_to_add, n_to_alloc);
1396 
1397  _vec_len (xm->rx_buffers_to_add) = l;
1398  n_allocated =
1399  vlib_buffer_alloc (vm, xm->rx_buffers_to_add + l, n_to_alloc);
1400  _vec_len (xm->rx_buffers_to_add) += n_allocated;
1401 
1402  /* Handle transient allocation failure */
1403  if (PREDICT_FALSE (l + n_allocated <= n_descriptors_left))
1404  {
1405  if (n_allocated == 0)
1406  vlib_error_count (vm, ixge_input_node.index,
1407  IXGE_ERROR_rx_alloc_no_physmem, 1);
1408  else
1409  vlib_error_count (vm, ixge_input_node.index,
1410  IXGE_ERROR_rx_alloc_fail, 1);
1411 
1412  n_descriptors_left = l + n_allocated;
1413  }
1414  n_descriptors = n_descriptors_left;
1415  }
1416 
1417  /* Add buffers from end of vector going backwards. */
1418  to_add = vec_end (xm->rx_buffers_to_add) - 1;
1419  }
1420 
1421  while (n_descriptors_left > 0)
1422  {
1423  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1424 
1425  while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1426  {
1427  vlib_buffer_t *b0, *b1;
1428  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1429  u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1430  u8 is_eop0, error0, next0;
1431  u8 is_eop1, error1, next1;
1432  ixge_descriptor_t d0, d1;
1433 
1434  vlib_prefetch_buffer_with_index (vm, to_rx[2], STORE);
1435  vlib_prefetch_buffer_with_index (vm, to_rx[3], STORE);
1436 
1437  CLIB_PREFETCH (d + 2, 32, STORE);
1438 
1439  d0.as_u32x4 = d[0].as_u32x4;
1440  d1.as_u32x4 = d[1].as_u32x4;
1441 
1442  s20 = d0.rx_from_hw.status[2];
1443  s21 = d1.rx_from_hw.status[2];
1444 
1445  s00 = d0.rx_from_hw.status[0];
1446  s01 = d1.rx_from_hw.status[0];
1447 
1448  if (!
1450  goto found_hw_owned_descriptor_x2;
1451 
1452  bi0 = to_rx[0];
1453  bi1 = to_rx[1];
1454 
1455  ASSERT (to_add - 1 >= xm->rx_buffers_to_add);
1456  fi0 = to_add[0];
1457  fi1 = to_add[-1];
1458 
1459  to_rx[0] = fi0;
1460  to_rx[1] = fi1;
1461  to_rx += 2;
1462  to_add -= 2;
1463 
1464 #if 0
1469 #endif
1470 
1471  b0 = vlib_get_buffer (vm, bi0);
1472  b1 = vlib_get_buffer (vm, bi1);
1473 
1474  /*
1475  * Turn this on if you run into
1476  * "bad monkey" contexts, and you want to know exactly
1477  * which nodes they've visited... See main.c...
1478  */
1481 
1484 
1485  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1486  is_eop1 = (s21 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1487 
1488  ixge_rx_next_and_error_from_status_x2 (xd, s00, s20, s01, s21,
1489  &next0, &error0, &flags0,
1490  &next1, &error1, &flags1);
1491 
1492  next0 = is_sop ? next0 : next_index_sop;
1493  next1 = is_eop0 ? next1 : next0;
1494  next_index_sop = next1;
1495 
1496  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1497  b1->flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1498 
1499  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1500  vnet_buffer (b1)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1501  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1502  vnet_buffer (b1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1503 
1504  b0->error = node->errors[error0];
1505  b1->error = node->errors[error1];
1506 
1509  n_bytes += len0 + len1;
1510  n_packets += is_eop0 + is_eop1;
1511 
1512  /* Give new buffers to hardware. */
1513  d0.rx_to_hw.tail_address =
1515  d1.rx_to_hw.tail_address =
1519  d[0].as_u32x4 = d0.as_u32x4;
1520  d[1].as_u32x4 = d1.as_u32x4;
1521 
1522  d += 2;
1523  n_descriptors_left -= 2;
1524 
1525  /* Point to either l2 or l3 header depending on next. */
1526  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1528  l3_offset1 = (is_eop0 && (next1 != IXGE_RX_NEXT_ETHERNET_INPUT))
1530 
1531  b0->current_length = len0 - l3_offset0;
1532  b1->current_length = len1 - l3_offset1;
1533  b0->current_data = l3_offset0;
1534  b1->current_data = l3_offset1;
1535 
1536  b_last->next_buffer = is_sop ? ~0 : bi0;
1537  b0->next_buffer = is_eop0 ? ~0 : bi1;
1538  bi_last = bi1;
1539  b_last = b1;
1540 
1541  if (CLIB_DEBUG > 0)
1542  {
1543  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1544  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1545 
1546  if (is_eop0)
1547  {
1548  u8 *msg = vlib_validate_buffer (vm, bi_sop0,
1549  /* follow_buffer_next */ 1);
1550  ASSERT (!msg);
1551  }
1552  if (is_eop1)
1553  {
1554  u8 *msg = vlib_validate_buffer (vm, bi_sop1,
1555  /* follow_buffer_next */ 1);
1556  ASSERT (!msg);
1557  }
1558  }
1559  if (0) /* "Dave" version */
1560  {
1561  u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1562  u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1563 
1564  if (is_eop0)
1565  {
1566  to_next[0] = bi_sop0;
1567  to_next++;
1568  n_left_to_next--;
1569 
1570  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1571  to_next, n_left_to_next,
1572  bi_sop0, next0);
1573  }
1574  if (is_eop1)
1575  {
1576  to_next[0] = bi_sop1;
1577  to_next++;
1578  n_left_to_next--;
1579 
1580  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1581  to_next, n_left_to_next,
1582  bi_sop1, next1);
1583  }
1584  is_sop = is_eop1;
1585  bi_sop = bi_sop1;
1586  }
1587  if (1) /* "Eliot" version */
1588  {
1589  /* Speculatively enqueue to cached next. */
1590  u8 saved_is_sop = is_sop;
1591  u32 bi_sop_save = bi_sop;
1592 
1593  bi_sop = saved_is_sop ? bi0 : bi_sop;
1594  to_next[0] = bi_sop;
1595  to_next += is_eop0;
1596  n_left_to_next -= is_eop0;
1597 
1598  bi_sop = is_eop0 ? bi1 : bi_sop;
1599  to_next[0] = bi_sop;
1600  to_next += is_eop1;
1601  n_left_to_next -= is_eop1;
1602 
1603  is_sop = is_eop1;
1604 
1605  if (PREDICT_FALSE
1606  (!(next0 == next_index && next1 == next_index)))
1607  {
1608  /* Undo speculation. */
1609  to_next -= is_eop0 + is_eop1;
1610  n_left_to_next += is_eop0 + is_eop1;
1611 
1612  /* Re-do both descriptors being careful about where we enqueue. */
1613  bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1614  if (is_eop0)
1615  {
1616  if (next0 != next_index)
1617  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1618  else
1619  {
1620  to_next[0] = bi_sop;
1621  to_next += 1;
1622  n_left_to_next -= 1;
1623  }
1624  }
1625 
1626  bi_sop = is_eop0 ? bi1 : bi_sop;
1627  if (is_eop1)
1628  {
1629  if (next1 != next_index)
1630  vlib_set_next_frame_buffer (vm, node, next1, bi_sop);
1631  else
1632  {
1633  to_next[0] = bi_sop;
1634  to_next += 1;
1635  n_left_to_next -= 1;
1636  }
1637  }
1638 
1639  /* Switch cached next index when next for both packets is the same. */
1640  if (is_eop0 && is_eop1 && next0 == next1)
1641  {
1642  vlib_put_next_frame (vm, node, next_index,
1643  n_left_to_next);
1644  next_index = next0;
1645  vlib_get_next_frame (vm, node, next_index,
1646  to_next, n_left_to_next);
1647  }
1648  }
1649  }
1650  }
1651 
1652  /* Bail out of dual loop and proceed with single loop. */
1653  found_hw_owned_descriptor_x2:
1654 
1655  while (n_descriptors_left > 0 && n_left_to_next > 0)
1656  {
1657  vlib_buffer_t *b0;
1658  u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1659  u8 is_eop0, error0, next0;
1660  ixge_descriptor_t d0;
1661 
1662  d0.as_u32x4 = d[0].as_u32x4;
1663 
1664  s20 = d0.rx_from_hw.status[2];
1665  s00 = d0.rx_from_hw.status[0];
1666 
1668  goto found_hw_owned_descriptor_x1;
1669 
1670  bi0 = to_rx[0];
1671  ASSERT (to_add >= xm->rx_buffers_to_add);
1672  fi0 = to_add[0];
1673 
1674  to_rx[0] = fi0;
1675  to_rx += 1;
1676  to_add -= 1;
1677 
1678 #if 0
1681 #endif
1682 
1683  b0 = vlib_get_buffer (vm, bi0);
1684 
1685  /*
1686  * Turn this on if you run into
1687  * "bad monkey" contexts, and you want to know exactly
1688  * which nodes they've visited...
1689  */
1691 
1692  is_eop0 = (s20 & IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET) != 0;
1694  (xd, s00, s20, &next0, &error0, &flags0);
1695 
1696  next0 = is_sop ? next0 : next_index_sop;
1697  next_index_sop = next0;
1698 
1699  b0->flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1700 
1701  vnet_buffer (b0)->sw_if_index[VLIB_RX] = xd->vlib_sw_if_index;
1702  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1703 
1704  b0->error = node->errors[error0];
1705 
1707  n_bytes += len0;
1708  n_packets += is_eop0;
1709 
1710  /* Give new buffer to hardware. */
1711  d0.rx_to_hw.tail_address =
1714  d[0].as_u32x4 = d0.as_u32x4;
1715 
1716  d += 1;
1717  n_descriptors_left -= 1;
1718 
1719  /* Point to either l2 or l3 header depending on next. */
1720  l3_offset0 = (is_sop && (next0 != IXGE_RX_NEXT_ETHERNET_INPUT))
1722  b0->current_length = len0 - l3_offset0;
1723  b0->current_data = l3_offset0;
1724 
1725  b_last->next_buffer = is_sop ? ~0 : bi0;
1726  bi_last = bi0;
1727  b_last = b0;
1728 
1729  bi_sop = is_sop ? bi0 : bi_sop;
1730 
1731  if (CLIB_DEBUG > 0 && is_eop0)
1732  {
1733  u8 *msg =
1734  vlib_validate_buffer (vm, bi_sop, /* follow_buffer_next */ 1);
1735  ASSERT (!msg);
1736  }
1737 
1738  if (0) /* "Dave" version */
1739  {
1740  if (is_eop0)
1741  {
1742  to_next[0] = bi_sop;
1743  to_next++;
1744  n_left_to_next--;
1745 
1746  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1747  to_next, n_left_to_next,
1748  bi_sop, next0);
1749  }
1750  }
1751  if (1) /* "Eliot" version */
1752  {
1753  if (PREDICT_TRUE (next0 == next_index))
1754  {
1755  to_next[0] = bi_sop;
1756  to_next += is_eop0;
1757  n_left_to_next -= is_eop0;
1758  }
1759  else
1760  {
1761  if (next0 != next_index && is_eop0)
1762  vlib_set_next_frame_buffer (vm, node, next0, bi_sop);
1763 
1764  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1765  next_index = next0;
1766  vlib_get_next_frame (vm, node, next_index,
1767  to_next, n_left_to_next);
1768  }
1769  }
1770  is_sop = is_eop0;
1771  }
1772  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1773  }
1774 
1775 found_hw_owned_descriptor_x1:
1776  if (n_descriptors_left > 0)
1777  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1778 
1779  _vec_len (xm->rx_buffers_to_add) = (to_add + 1) - xm->rx_buffers_to_add;
1780 
1781  {
1782  u32 n_done = n_descriptors - n_descriptors_left;
1783 
1784  if (n_trace > 0 && n_done > 0)
1785  {
1786  u32 n = clib_min (n_trace, n_done);
1787  ixge_rx_trace (xm, xd, dq,
1788  d_trace_save,
1789  d_trace_buffers,
1790  &dq->descriptors[start_descriptor_index], n);
1791  vlib_set_trace_count (vm, node, n_trace - n);
1792  }
1793  if (d_trace_save)
1794  {
1795  _vec_len (d_trace_save) = 0;
1796  _vec_len (d_trace_buffers) = 0;
1797  }
1798 
1799  /* Don't keep a reference to b_last if we don't have to.
1800  Otherwise we can over-write a next_buffer pointer after already haven
1801  enqueued a packet. */
1802  if (is_sop)
1803  {
1804  b_last->next_buffer = ~0;
1805  bi_last = ~0;
1806  }
1807 
1808  dq->rx.n_descriptors_done_this_call = n_done;
1809  dq->rx.n_descriptors_done_total += n_done;
1810  dq->rx.is_start_of_packet = is_sop;
1811  dq->rx.saved_start_of_packet_buffer_index = bi_sop;
1812  dq->rx.saved_last_buffer_index = bi_last;
1813  dq->rx.saved_start_of_packet_next_index = next_index_sop;
1814  dq->rx.next_index = next_index;
1815  dq->rx.n_bytes += n_bytes;
1816 
1817  return n_packets;
1818  }
1819 }
1820 
1821 static uword
1823  ixge_device_t * xd,
1824  vlib_node_runtime_t * node, u32 queue_index)
1825 {
1826  ixge_dma_queue_t *dq =
1827  vec_elt_at_index (xd->dma_queues[VLIB_RX], queue_index);
1829  uword n_packets = 0;
1830  u32 hw_head_index, sw_head_index;
1831 
1832  /* One time initialization. */
1833  if (!dq->rx.node)
1834  {
1835  dq->rx.node = node;
1836  dq->rx.is_start_of_packet = 1;
1837  dq->rx.saved_start_of_packet_buffer_index = ~0;
1838  dq->rx.saved_last_buffer_index = ~0;
1839  }
1840 
1841  dq->rx.next_index = node->cached_next_index;
1842 
1843  dq->rx.n_descriptors_done_total = 0;
1844  dq->rx.n_descriptors_done_this_call = 0;
1845  dq->rx.n_bytes = 0;
1846 
1847  /* Fetch head from hardware and compare to where we think we are. */
1848  hw_head_index = dr->head_index;
1849  sw_head_index = dq->head_index;
1850 
1851  if (hw_head_index == sw_head_index)
1852  goto done;
1853 
1854  if (hw_head_index < sw_head_index)
1855  {
1856  u32 n_tried = dq->n_descriptors - sw_head_index;
1857  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1858  sw_head_index =
1859  ixge_ring_add (dq, sw_head_index,
1860  dq->rx.n_descriptors_done_this_call);
1861 
1862  if (dq->rx.n_descriptors_done_this_call != n_tried)
1863  goto done;
1864  }
1865  if (hw_head_index >= sw_head_index)
1866  {
1867  u32 n_tried = hw_head_index - sw_head_index;
1868  n_packets += ixge_rx_queue_no_wrap (xm, xd, dq, sw_head_index, n_tried);
1869  sw_head_index =
1870  ixge_ring_add (dq, sw_head_index,
1871  dq->rx.n_descriptors_done_this_call);
1872  }
1873 
1874 done:
1875  dq->head_index = sw_head_index;
1876  dq->tail_index =
1877  ixge_ring_add (dq, dq->tail_index, dq->rx.n_descriptors_done_total);
1878 
1879  /* Give tail back to hardware. */
1881 
1882  dr->tail_index = dq->tail_index;
1883 
1885  interface_main.combined_sw_if_counters +
1887  0 /* thread_index */ ,
1888  xd->vlib_sw_if_index, n_packets,
1889  dq->rx.n_bytes);
1890 
1891  return n_packets;
1892 }
1893 
1894 static void
1896 {
1897  vlib_main_t *vm = xm->vlib_main;
1898  ixge_regs_t *r = xd->regs;
1899 
1900  if (i != 20)
1901  {
1902  ELOG_TYPE_DECLARE (e) =
1903  {
1904  .function = (char *) __FUNCTION__,.format =
1905  "ixge %d, %s",.format_args = "i1t1",.n_enum_strings =
1906  16,.enum_strings =
1907  {
1908  "flow director",
1909  "rx miss",
1910  "pci exception",
1911  "mailbox",
1912  "link status change",
1913  "linksec key exchange",
1914  "manageability event",
1915  "reserved23",
1916  "sdp0",
1917  "sdp1",
1918  "sdp2",
1919  "sdp3",
1920  "ecc", "descriptor handler error", "tcp timer", "other",},};
1921  struct
1922  {
1923  u8 instance;
1924  u8 index;
1925  } *ed;
1926  ed = ELOG_DATA (&vm->elog_main, e);
1927  ed->instance = xd->device_index;
1928  ed->index = i - 16;
1929  }
1930  else
1931  {
1932  u32 v = r->xge_mac.link_status;
1933  uword is_up = (v & (1 << 30)) != 0;
1934 
1935  ELOG_TYPE_DECLARE (e) =
1936  {
1937  .function = (char *) __FUNCTION__,.format =
1938  "ixge %d, link status change 0x%x",.format_args = "i4i4",};
1939  struct
1940  {
1941  u32 instance, link_status;
1942  } *ed;
1943  ed = ELOG_DATA (&vm->elog_main, e);
1944  ed->instance = xd->device_index;
1945  ed->link_status = v;
1947 
1950  ((is_up << 31) | xd->vlib_hw_if_index));
1951  }
1952 }
1953 
1955 clean_block (u32 * b, u32 * t, u32 n_left)
1956 {
1957  u32 *t0 = t;
1958 
1959  while (n_left >= 4)
1960  {
1961  u32 bi0, bi1, bi2, bi3;
1962 
1963  t[0] = bi0 = b[0];
1964  b[0] = 0;
1965  t += bi0 != 0;
1966 
1967  t[0] = bi1 = b[1];
1968  b[1] = 0;
1969  t += bi1 != 0;
1970 
1971  t[0] = bi2 = b[2];
1972  b[2] = 0;
1973  t += bi2 != 0;
1974 
1975  t[0] = bi3 = b[3];
1976  b[3] = 0;
1977  t += bi3 != 0;
1978 
1979  b += 4;
1980  n_left -= 4;
1981  }
1982 
1983  while (n_left > 0)
1984  {
1985  u32 bi0;
1986 
1987  t[0] = bi0 = b[0];
1988  b[0] = 0;
1989  t += bi0 != 0;
1990  b += 1;
1991  n_left -= 1;
1992  }
1993 
1994  return t - t0;
1995 }
1996 
1997 static void
1998 ixge_tx_queue (ixge_main_t * xm, ixge_device_t * xd, u32 queue_index)
1999 {
2000  vlib_main_t *vm = xm->vlib_main;
2001  ixge_dma_queue_t *dq =
2002  vec_elt_at_index (xd->dma_queues[VLIB_TX], queue_index);
2003  u32 n_clean, *b, *t, *t0;
2004  i32 n_hw_owned_descriptors;
2005  i32 first_to_clean, last_to_clean;
2006  u64 hwbp_race = 0;
2007 
2008  /* Handle case where head write back pointer update
2009  * arrives after the interrupt during high PCI bus loads.
2010  */
2011  while ((dq->head_index == dq->tx.head_index_write_back[0]) &&
2012  dq->tx.n_buffers_on_ring && (dq->head_index != dq->tail_index))
2013  {
2014  hwbp_race++;
2015  if (IXGE_HWBP_RACE_ELOG && (hwbp_race == 1))
2016  {
2017  ELOG_TYPE_DECLARE (e) =
2018  {
2019  .function = (char *) __FUNCTION__,.format =
2020  "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2021  = "i4i4i4i4",};
2022  struct
2023  {
2024  u32 instance, head_index, tail_index, n_buffers_on_ring;
2025  } *ed;
2026  ed = ELOG_DATA (&vm->elog_main, e);
2027  ed->instance = xd->device_index;
2028  ed->head_index = dq->head_index;
2029  ed->tail_index = dq->tail_index;
2030  ed->n_buffers_on_ring = dq->tx.n_buffers_on_ring;
2031  }
2032  }
2033 
2034  dq->head_index = dq->tx.head_index_write_back[0];
2035  n_hw_owned_descriptors = ixge_ring_sub (dq, dq->head_index, dq->tail_index);
2036  ASSERT (dq->tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2037  n_clean = dq->tx.n_buffers_on_ring - n_hw_owned_descriptors;
2038 
2039  if (IXGE_HWBP_RACE_ELOG && hwbp_race)
2040  {
2041  ELOG_TYPE_DECLARE (e) =
2042  {
2043  .function = (char *) __FUNCTION__,.format =
2044  "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2045  = "i4i4i4i4i4",};
2046  struct
2047  {
2048  u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2049  } *ed;
2050  ed = ELOG_DATA (&vm->elog_main, e);
2051  ed->instance = xd->device_index;
2052  ed->head_index = dq->head_index;
2053  ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2054  ed->n_clean = n_clean;
2055  ed->retries = hwbp_race;
2056  }
2057 
2058  /*
2059  * This function used to wait until hardware owned zero descriptors.
2060  * At high PPS rates, that doesn't happen until the TX ring is
2061  * completely full of descriptors which need to be cleaned up.
2062  * That, in turn, causes TX ring-full drops and/or long RX service
2063  * interruptions.
2064  */
2065  if (n_clean == 0)
2066  return;
2067 
2068  /* Clean the n_clean descriptors prior to the reported hardware head */
2069  last_to_clean = dq->head_index - 1;
2070  last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->n_descriptors :
2071  last_to_clean;
2072 
2073  first_to_clean = (last_to_clean) - (n_clean - 1);
2074  first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->n_descriptors :
2075  first_to_clean;
2076 
2078  t0 = t = xm->tx_buffers_pending_free;
2079  b = dq->descriptor_buffer_indices + first_to_clean;
2080 
2081  /* Wrap case: clean from first to end, then start to last */
2082  if (first_to_clean > last_to_clean)
2083  {
2084  t += clean_block (b, t, (dq->n_descriptors - 1) - first_to_clean);
2085  first_to_clean = 0;
2086  b = dq->descriptor_buffer_indices;
2087  }
2088 
2089  /* Typical case: clean from first to last */
2090  if (first_to_clean <= last_to_clean)
2091  t += clean_block (b, t, (last_to_clean - first_to_clean) + 1);
2092 
2093  if (t > t0)
2094  {
2095  u32 n = t - t0;
2096  vlib_buffer_free_no_next (vm, t0, n);
2097  ASSERT (dq->tx.n_buffers_on_ring >= n);
2098  dq->tx.n_buffers_on_ring -= n;
2099  _vec_len (xm->tx_buffers_pending_free) = 0;
2100  }
2101 }
2102 
2103 /* RX queue interrupts 0 thru 7; TX 8 thru 15. */
2106 {
2107  return i < 8;
2108 }
2109 
2112 {
2113  return i >= 8 && i < 16;
2114 }
2115 
2118 {
2119  return 8 + i;
2120 }
2121 
2124 {
2125  return 0 + i;
2126 }
2127 
2130 {
2132  return i - 0;
2133 }
2134 
2137 {
2139  return i - 8;
2140 }
2141 
2142 static uword
2144  ixge_device_t * xd, vlib_node_runtime_t * node)
2145 {
2146  ixge_regs_t *r = xd->regs;
2147  u32 i, s;
2148  uword n_rx_packets = 0;
2149 
2151  if (s)
2153 
2154  /* *INDENT-OFF* */
2155  foreach_set_bit (i, s, ({
2157  n_rx_packets += ixge_rx_queue (xm, xd, node, ixge_interrupt_rx_queue (i));
2158 
2159  else if (ixge_interrupt_is_tx_queue (i))
2160  ixge_tx_queue (xm, xd, ixge_interrupt_tx_queue (i));
2161 
2162  else
2163  ixge_interrupt (xm, xd, i);
2164  }));
2165  /* *INDENT-ON* */
2166 
2167  return n_rx_packets;
2168 }
2169 
2170 static uword
2172 {
2173  ixge_main_t *xm = &ixge_main;
2174  ixge_device_t *xd;
2175  uword n_rx_packets = 0;
2176 
2177  if (node->state == VLIB_NODE_STATE_INTERRUPT)
2178  {
2179  uword i;
2180 
2181  /* Loop over devices with interrupts. */
2182  /* *INDENT-OFF* */
2183  foreach_set_bit (i, node->runtime_data[0], ({
2184  xd = vec_elt_at_index (xm->devices, i);
2185  n_rx_packets += ixge_device_input (xm, xd, node);
2186 
2187  /* Re-enable interrupts since we're going to stay in interrupt mode. */
2188  if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2189  xd->regs->interrupt.enable_write_1_to_set = ~0;
2190  }));
2191  /* *INDENT-ON* */
2192 
2193  /* Clear mask of devices with pending interrupts. */
2194  node->runtime_data[0] = 0;
2195  }
2196  else
2197  {
2198  /* Poll all devices for input/interrupts. */
2199  vec_foreach (xd, xm->devices)
2200  {
2201  n_rx_packets += ixge_device_input (xm, xd, node);
2202 
2203  /* Re-enable interrupts when switching out of polling mode. */
2204  if (node->flags &
2207  }
2208  }
2209 
2210  return n_rx_packets;
2211 }
2212 
2213 static char *ixge_error_strings[] = {
2214 #define _(n,s) s,
2216 #undef _
2217 };
2218 
2219 /* *INDENT-OFF* */
2220 VLIB_REGISTER_NODE (ixge_input_node, static) = {
2221  .function = ixge_input,
2222  .type = VLIB_NODE_TYPE_INPUT,
2223  .name = "ixge-input",
2224 
2225  /* Will be enabled if/when hardware is detected. */
2226  .state = VLIB_NODE_STATE_DISABLED,
2227 
2228  .format_buffer = format_ethernet_header_with_length,
2229  .format_trace = format_ixge_rx_dma_trace,
2230 
2231  .n_errors = IXGE_N_ERROR,
2232  .error_strings = ixge_error_strings,
2233 
2234  .n_next_nodes = IXGE_RX_N_NEXT,
2235  .next_nodes = {
2236  [IXGE_RX_NEXT_DROP] = "error-drop",
2237  [IXGE_RX_NEXT_ETHERNET_INPUT] = "ethernet-input",
2238  [IXGE_RX_NEXT_IP4_INPUT] = "ip4-input",
2239  [IXGE_RX_NEXT_IP6_INPUT] = "ip6-input",
2240  },
2241 };
2242 
2245 /* *INDENT-ON* */
2246 
2247 static u8 *
2248 format_ixge_device_name (u8 * s, va_list * args)
2249 {
2250  u32 i = va_arg (*args, u32);
2251  ixge_main_t *xm = &ixge_main;
2252  ixge_device_t *xd = vec_elt_at_index (xm->devices, i);
2253  vlib_pci_addr_t *addr = vlib_pci_get_addr (xd->pci_dev_handle);
2254  return format (s, "TenGigabitEthernet%x/%x/%x/%x",
2255  addr->domain, addr->bus, addr->slot, addr->function);
2256 }
2257 
2258 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2259 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2260 
2262 #define _(a,f) 0,
2263 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2265 #undef _
2266 #undef _64
2267 };
2268 
2269 static void
2271 {
2272  /* Byte offset for counter registers. */
2273  static u32 reg_offsets[] = {
2274 #define _(a,f) (a) / sizeof (u32),
2275 #define _64(a,f) _(a,f)
2277 #undef _
2278 #undef _64
2279  };
2280  volatile u32 *r = (volatile u32 *) xd->regs;
2281  int i;
2282 
2283  for (i = 0; i < ARRAY_LEN (xd->counters); i++)
2284  {
2285  u32 o = reg_offsets[i];
2286  xd->counters[i] += r[o];
2288  r[o] = 0;
2290  xd->counters[i] += (u64) r[o + 1] << (u64) 32;
2291  }
2292 }
2293 
2294 static u8 *
2295 format_ixge_device_id (u8 * s, va_list * args)
2296 {
2297  u32 device_id = va_arg (*args, u32);
2298  char *t = 0;
2299  switch (device_id)
2300  {
2301 #define _(f,n) case n: t = #f; break;
2303 #undef _
2304  default:
2305  t = 0;
2306  break;
2307  }
2308  if (t == 0)
2309  s = format (s, "unknown 0x%x", device_id);
2310  else
2311  s = format (s, "%s", t);
2312  return s;
2313 }
2314 
2315 static u8 *
2316 format_ixge_link_status (u8 * s, va_list * args)
2317 {
2318  ixge_device_t *xd = va_arg (*args, ixge_device_t *);
2320 
2321  s = format (s, "%s", (v & (1 << 30)) ? "up" : "down");
2322 
2323  {
2324  char *modes[] = {
2325  "1g", "10g parallel", "10g serial", "autoneg",
2326  };
2327  char *speeds[] = {
2328  "unknown", "100m", "1g", "10g",
2329  };
2330  s = format (s, ", mode %s, speed %s",
2331  modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2332  }
2333 
2334  return s;
2335 }
2336 
2337 static u8 *
2338 format_ixge_device (u8 * s, va_list * args)
2339 {
2340  u32 dev_instance = va_arg (*args, u32);
2341  CLIB_UNUSED (int verbose) = va_arg (*args, int);
2342  ixge_main_t *xm = &ixge_main;
2343  ixge_device_t *xd = vec_elt_at_index (xm->devices, dev_instance);
2344  ixge_phy_t *phy = xd->phys + xd->phy_index;
2345  u32 indent = format_get_indent (s);
2346 
2347  ixge_update_counters (xd);
2349 
2350  s = format (s, "Intel 8259X: id %U\n%Ulink %U",
2352  format_white_space, indent + 2, format_ixge_link_status, xd);
2353 
2354  {
2355 
2356  vlib_pci_addr_t *addr = vlib_pci_get_addr (xd->pci_dev_handle);
2358 
2359  if (d)
2360  s = format (s, "\n%UPCIe %U", format_white_space, indent + 2,
2362  }
2363 
2364  s = format (s, "\n%U", format_white_space, indent + 2);
2365  if (phy->mdio_address != ~0)
2366  s = format (s, "PHY address %d, id 0x%x", phy->mdio_address, phy->id);
2367  else if (xd->sfp_eeprom.id == SFP_ID_sfp)
2368  s = format (s, "SFP %U", format_sfp_eeprom, &xd->sfp_eeprom);
2369  else
2370  s = format (s, "PHY not found");
2371 
2372  /* FIXME */
2373  {
2375  ixge_dma_regs_t *dr = get_dma_regs (xd, VLIB_RX, 0);
2376  u32 hw_head_index = dr->head_index;
2377  u32 sw_head_index = dq->head_index;
2378  u32 nitems;
2379 
2380  nitems = ixge_ring_sub (dq, hw_head_index, sw_head_index);
2381  s = format (s, "\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2382  format_white_space, indent + 2, nitems, dq->n_descriptors);
2383 
2384  s = format (s, "\n%U%d buffers in driver rx cache",
2385  format_white_space, indent + 2,
2386  vec_len (xm->rx_buffers_to_add));
2387 
2388  s = format (s, "\n%U%d buffers on tx queue 0 ring",
2389  format_white_space, indent + 2,
2390  xd->dma_queues[VLIB_TX][0].tx.n_buffers_on_ring);
2391  }
2392  {
2393  u32 i;
2394  u64 v;
2395  static char *names[] = {
2396 #define _(a,f) #f,
2397 #define _64(a,f) _(a,f)
2399 #undef _
2400 #undef _64
2401  };
2402 
2403  for (i = 0; i < ARRAY_LEN (names); i++)
2404  {
2405  v = xd->counters[i] - xd->counters_last_clear[i];
2406  if (v != 0)
2407  s = format (s, "\n%U%-40U%16Ld",
2408  format_white_space, indent + 2,
2409  format_c_identifier, names[i], v);
2410  }
2411  }
2412 
2413  return s;
2414 }
2415 
2416 static void
2418 {
2419  ixge_main_t *xm = &ixge_main;
2420  ixge_device_t *xd = vec_elt_at_index (xm->devices, instance);
2421  ixge_update_counters (xd);
2422  memcpy (xd->counters_last_clear, xd->counters, sizeof (xd->counters));
2423 }
2424 
2425 /*
2426  * Dynamically redirect all pkts from a specific interface
2427  * to the specified node
2428  */
2429 static void
2431  u32 node_index)
2432 {
2433  ixge_main_t *xm = &ixge_main;
2434  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
2436 
2437  /* Shut off redirection */
2438  if (node_index == ~0)
2439  {
2440  xd->per_interface_next_index = node_index;
2441  return;
2442  }
2443 
2445  vlib_node_add_next (xm->vlib_main, ixge_input_node.index, node_index);
2446 }
2447 
2448 
2449 /* *INDENT-OFF* */
2451  .name = "ixge",
2452  .tx_function = ixge_interface_tx,
2453  .format_device_name = format_ixge_device_name,
2454  .format_device = format_ixge_device,
2455  .format_tx_trace = format_ixge_tx_dma_trace,
2456  .clear_counters = ixge_clear_hw_interface_counters,
2457  .admin_up_down_function = ixge_interface_admin_up_down,
2458  .rx_redirect_to_node = ixge_set_interface_next_node,
2459 };
2460 /* *INDENT-ON* */
2461 
2462 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2463 
2464 static clib_error_t *
2466 {
2467  ixge_main_t *xm = &ixge_main;
2468  vlib_main_t *vm = xm->vlib_main;
2469  ixge_dma_queue_t *dq;
2470  clib_error_t *error = 0;
2471 
2472  vec_validate (xd->dma_queues[rt], queue_index);
2473  dq = vec_elt_at_index (xd->dma_queues[rt], queue_index);
2474 
2477  CLIB_CACHE_LINE_BYTES / sizeof (dq->descriptors[0]);
2478 
2479  if (!xm->n_bytes_in_rx_buffer)
2482 
2483  if (!xm->n_descriptors[rt])
2484  xm->n_descriptors[rt] = 4 * VLIB_FRAME_SIZE;
2485 
2486  dq->queue_index = queue_index;
2487  dq->n_descriptors =
2489  dq->head_index = dq->tail_index = 0;
2490 
2491  dq->descriptors =
2492  vlib_physmem_alloc_aligned (vm, xm->physmem_region, &error,
2493  dq->n_descriptors *
2494  sizeof (dq->descriptors[0]),
2495  128 /* per chip spec */ );
2496  if (error)
2497  return error;
2498 
2499  memset (dq->descriptors, 0,
2500  dq->n_descriptors * sizeof (dq->descriptors[0]));
2502 
2503  if (rt == VLIB_RX)
2504  {
2505  u32 n_alloc, i;
2506 
2507  n_alloc = vlib_buffer_alloc (vm, dq->descriptor_buffer_indices,
2509  ASSERT (n_alloc == vec_len (dq->descriptor_buffer_indices));
2510  for (i = 0; i < n_alloc; i++)
2511  {
2515  [i]);
2516  }
2517  }
2518  else
2519  {
2520  u32 i;
2521 
2522  dq->tx.head_index_write_back = vlib_physmem_alloc (vm,
2523  xm->physmem_region,
2524  &error,
2526 
2527  for (i = 0; i < dq->n_descriptors; i++)
2528  dq->descriptors[i].tx = xm->tx_descriptor_template;
2529 
2531  }
2532 
2533  {
2534  ixge_dma_regs_t *dr = get_dma_regs (xd, rt, queue_index);
2535  u64 a;
2536 
2537  a =
2539  dq->descriptors);
2540  dr->descriptor_address[0] = a & 0xFFFFFFFF;
2541  dr->descriptor_address[1] = a >> (u64) 32;
2542  dr->n_descriptor_bytes = dq->n_descriptors * sizeof (dq->descriptors[0]);
2543  dq->head_index = dq->tail_index = 0;
2544 
2545  if (rt == VLIB_RX)
2546  {
2547  ASSERT ((xm->n_bytes_in_rx_buffer / 1024) < 32);
2548  dr->rx_split_control =
2549  ( /* buffer size */ ((xm->n_bytes_in_rx_buffer / 1024) << 0)
2550  | ( /* lo free descriptor threshold (units of 64 descriptors) */
2551  (1 << 22)) | ( /* descriptor type: advanced one buffer */
2552  (1 << 25)) | ( /* drop if no descriptors available */
2553  (1 << 28)));
2554 
2555  /* Give hardware all but last 16 cache lines' worth of descriptors. */
2556  dq->tail_index = dq->n_descriptors -
2558  }
2559  else
2560  {
2561  /* Make sure its initialized before hardware can get to it. */
2562  dq->tx.head_index_write_back[0] = dq->head_index;
2563 
2565  dq->tx.head_index_write_back);
2566  dr->tx.head_index_write_back_address[0] = /* enable bit */ 1 | a;
2567  dr->tx.head_index_write_back_address[1] = (u64) a >> (u64) 32;
2568  }
2569 
2570  /* DMA on 82599 does not work with [13] rx data write relaxed ordering
2571  and [12] undocumented set. */
2572  if (rt == VLIB_RX)
2573  dr->dca_control &= ~((1 << 13) | (1 << 12));
2574 
2576 
2577  if (rt == VLIB_TX)
2578  {
2579  xd->regs->tx_dma_control |= (1 << 0);
2580  dr->control |= ((32 << 0) /* prefetch threshold */
2581  | (64 << 8) /* host threshold */
2582  | (0 << 16) /* writeback threshold */ );
2583  }
2584 
2585  /* Enable this queue and wait for hardware to initialize
2586  before adding to tail. */
2587  if (rt == VLIB_TX)
2588  {
2589  dr->control |= 1 << 25;
2590  while (!(dr->control & (1 << 25)))
2591  ;
2592  }
2593 
2594  /* Set head/tail indices and enable DMA. */
2595  dr->head_index = dq->head_index;
2596  dr->tail_index = dq->tail_index;
2597  }
2598 
2599  return error;
2600 }
2601 
2602 static u32
2604 {
2605  ixge_device_t *xd;
2606  ixge_regs_t *r;
2607  u32 old;
2608  ixge_main_t *xm = &ixge_main;
2609 
2610  xd = vec_elt_at_index (xm->devices, hw->dev_instance);
2611  r = xd->regs;
2612 
2613  old = r->filter_control;
2614 
2616  r->filter_control = old | (1 << 9) /* unicast promiscuous */ ;
2617  else
2618  r->filter_control = old & ~(1 << 9);
2619 
2620  return old;
2621 }
2622 
2623 static void
2625 {
2626  vnet_main_t *vnm = vnet_get_main ();
2627  ixge_device_t *xd;
2628 
2629  /* Reset chip(s). */
2630  vec_foreach (xd, xm->devices)
2631  {
2632  ixge_regs_t *r = xd->regs;
2633  const u32 reset_bit = (1 << 26) | (1 << 3);
2634 
2635  r->control |= reset_bit;
2636 
2637  /* No need to suspend. Timed to take ~1e-6 secs */
2638  while (r->control & reset_bit)
2639  ;
2640 
2641  /* Software loaded. */
2642  r->extended_control |= (1 << 28);
2643 
2644  ixge_phy_init (xd);
2645 
2646  /* Register ethernet interface. */
2647  {
2648  u8 addr8[6];
2649  u32 i, addr32[2];
2650  clib_error_t *error;
2651 
2652  addr32[0] = r->rx_ethernet_address0[0][0];
2653  addr32[1] = r->rx_ethernet_address0[0][1];
2654  for (i = 0; i < 6; i++)
2655  addr8[i] = addr32[i / 4] >> ((i % 4) * 8);
2656 
2658  (vnm, ixge_device_class.index, xd->device_index,
2659  /* ethernet address */ addr8,
2661  if (error)
2662  clib_error_report (error);
2663  }
2664 
2665  {
2666  vnet_sw_interface_t *sw =
2668  xd->vlib_sw_if_index = sw->sw_if_index;
2669  }
2670 
2671  ixge_dma_init (xd, VLIB_RX, /* queue_index */ 0);
2672 
2674 
2675  ixge_dma_init (xd, VLIB_TX, /* queue_index */ 0);
2676 
2677  /* RX/TX queue 0 gets mapped to interrupt bits 0 & 8. */
2678  r->interrupt.queue_mapping[0] = (( /* valid bit */ (1 << 7) |
2679  ixge_rx_queue_to_interrupt (0)) << 0);
2680 
2681  r->interrupt.queue_mapping[0] |= (( /* valid bit */ (1 << 7) |
2682  ixge_tx_queue_to_interrupt (0)) << 8);
2683 
2684  /* No use in getting too many interrupts.
2685  Limit them to one every 3/4 ring size at line rate
2686  min sized packets.
2687  No need for this since kernel/vlib main loop provides adequate interrupt
2688  limiting scheme. */
2689  if (0)
2690  {
2691  f64 line_rate_max_pps =
2692  10e9 / (8 * (64 + /* interframe padding */ 20));
2694  .75 * xm->n_descriptors[VLIB_RX] /
2695  line_rate_max_pps);
2696  }
2697 
2698  /* Accept all multicast and broadcast packets. Should really add them
2699  to the dst_ethernet_address register array. */
2700  r->filter_control |= (1 << 10) | (1 << 8);
2701 
2702  /* Enable frames up to size in mac frame size register. */
2703  r->xge_mac.control |= 1 << 2;
2704  r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2705 
2706  /* Enable all interrupts. */
2707  if (!IXGE_ALWAYS_POLL)
2709  }
2710 }
2711 
2712 static uword
2714 {
2715  vnet_main_t *vnm = vnet_get_main ();
2716  ixge_main_t *xm = &ixge_main;
2717  ixge_device_t *xd;
2718  uword event_type, *event_data = 0;
2719  f64 timeout, link_debounce_deadline;
2720 
2721  ixge_device_init (xm);
2722 
2723  /* Clear all counters. */
2724  vec_foreach (xd, xm->devices)
2725  {
2726  ixge_update_counters (xd);
2727  memset (xd->counters, 0, sizeof (xd->counters));
2728  }
2729 
2730  timeout = 30.0;
2731  link_debounce_deadline = 1e70;
2732 
2733  while (1)
2734  {
2735  /* 36 bit stat counters could overflow in ~50 secs.
2736  We poll every 30 secs to be conservative. */
2738 
2739  event_type = vlib_process_get_events (vm, &event_data);
2740 
2741  switch (event_type)
2742  {
2743  case EVENT_SET_FLAGS:
2744  /* 1 ms */
2745  link_debounce_deadline = vlib_time_now (vm) + 1e-3;
2746  timeout = 1e-3;
2747  break;
2748 
2749  case ~0:
2750  /* No events found: timer expired. */
2751  if (vlib_time_now (vm) > link_debounce_deadline)
2752  {
2753  vec_foreach (xd, xm->devices)
2754  {
2755  ixge_regs_t *r = xd->regs;
2756  u32 v = r->xge_mac.link_status;
2757  uword is_up = (v & (1 << 30)) != 0;
2758 
2760  (vnm, xd->vlib_hw_if_index,
2761  is_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0);
2762  }
2763  link_debounce_deadline = 1e70;
2764  timeout = 30.0;
2765  }
2766  break;
2767 
2768  default:
2769  ASSERT (0);
2770  }
2771 
2772  if (event_data)
2773  _vec_len (event_data) = 0;
2774 
2775  /* Query stats every 30 secs. */
2776  {
2777  f64 now = vlib_time_now (vm);
2778  if (now - xm->time_last_stats_update > 30)
2779  {
2780  xm->time_last_stats_update = now;
2781  vec_foreach (xd, xm->devices) ixge_update_counters (xd);
2782  }
2783  }
2784  }
2785 
2786  return 0;
2787 }
2788 
2790  .function = ixge_process,
2791  .type = VLIB_NODE_TYPE_PROCESS,
2792  .name = "ixge-process",
2793 };
2794 
2795 clib_error_t *
2797 {
2798  ixge_main_t *xm = &ixge_main;
2799  clib_error_t *error;
2800 
2801  xm->vlib_main = vm;
2802  memset (&xm->tx_descriptor_template, 0,
2803  sizeof (xm->tx_descriptor_template));
2804  memset (&xm->tx_descriptor_template_mask, 0,
2805  sizeof (xm->tx_descriptor_template_mask));
2810  xm->tx_descriptor_template_mask.status0 = 0xffff;
2811  xm->tx_descriptor_template_mask.status1 = 0x00003fff;
2812 
2818 
2819  error = vlib_call_init_function (vm, pci_bus_init);
2820 
2821  return error;
2822 }
2823 
2825 
2826 
2827 static void
2829 {
2830  ixge_main_t *xm = &ixge_main;
2831  vlib_main_t *vm = xm->vlib_main;
2832  uword private_data = vlib_pci_get_private_data (h);
2833 
2835 
2836  /* Let node know which device is interrupting. */
2837  {
2838  vlib_node_runtime_t *rt =
2840  rt->runtime_data[0] |= 1 << private_data;
2841  }
2842 }
2843 
2844 static clib_error_t *
2846 {
2847  ixge_main_t *xm = &ixge_main;
2848  clib_error_t *error = 0;
2849  void *r;
2850  ixge_device_t *xd;
2851  vlib_pci_addr_t *addr = vlib_pci_get_addr (h);
2853 
2854  /* Allocate physmem region for DMA buffers */
2855  if (xm->physmem_region_allocated == 0)
2856  {
2857  error = vlib_physmem_region_alloc (vm, "ixge decriptors", 2 << 20, 0,
2859  &xm->physmem_region);
2860  xm->physmem_region_allocated = 1;
2861  }
2862  if (error)
2863  return error;
2864 
2865  error = vlib_pci_map_resource (h, 0, &r);
2866  if (error)
2867  return error;
2868 
2869  vec_add2 (xm->devices, xd, 1);
2870 
2871  if (vec_len (xm->devices) == 1)
2872  {
2873  ixge_input_node.function = ixge_input_multiarch_select ();
2874  }
2875 
2876  xd->pci_dev_handle = h;
2877  xd->device_id = d->device_id;
2878  xd->regs = r;
2879  xd->device_index = xd - xm->devices;
2880  xd->pci_function = addr->function;
2881  xd->per_interface_next_index = ~0;
2882 
2884 
2885  /* Chip found so enable node. */
2886  {
2889  ? VLIB_NODE_STATE_POLLING
2890  : VLIB_NODE_STATE_INTERRUPT));
2891 
2892  //dev->private_data = xd->device_index;
2893  }
2894 
2895  if (vec_len (xm->devices) == 1)
2896  {
2899  }
2900 
2901  error = vlib_pci_bus_master_enable (h);
2902 
2903  if (error)
2904  return error;
2905 
2906  return vlib_pci_intr_enable (h);
2907 }
2908 
2909 /* *INDENT-OFF* */
2910 PCI_REGISTER_DEVICE (ixge_pci_device_registration,static) = {
2911  .init_function = ixge_pci_init,
2912  .interrupt_handler = ixge_pci_intr_handler,
2913  .supported_devices = {
2914 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2916 #undef _
2917  { 0 },
2918  },
2919 };
2920 /* *INDENT-ON* */
2921 
2922 void
2924 {
2926 
2927  switch (next)
2928  {
2932  r->next_nodes[next] = name;
2933  break;
2934 
2935  default:
2936  clib_warning ("%s: illegal next %d\n", __FUNCTION__, next);
2937  break;
2938  }
2939 }
2940 
2941 /* *INDENT-OFF* */
2942 VLIB_PLUGIN_REGISTER () = {
2943  .version = VPP_BUILD_VER,
2944  .default_disabled = 1,
2945  .description = "Intel 82599 Family Native Driver (experimental)",
2946 };
2947 #endif
2948 
2949 /* *INDENT-ON* */
2950 
2951 /*
2952  * fd.io coding-style-patch-verification: ON
2953  *
2954  * Local Variables:
2955  * eval: (c-set-style "gnu")
2956  * End:
2957  */
u32 mdio_address
Definition: ixge.h:1113
static void ixge_update_counters(ixge_device_t *xd)
Definition: ixge.c:2270
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:432
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
Definition: ixge.h:107
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
Definition: ixge.h:138
u32 process_node_index
Definition: ixge.h:1256
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
Definition: ixge.h:105
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: ixge.c:321
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
Definition: ixge.h:119
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:337
#define clib_min(x, y)
Definition: clib.h:340
#define VLIB_PHYSMEM_F_INIT_MHEAP
Definition: physmem.h:57
struct ixge_dma_regs_t::@325::@328 tx
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:111
#define CLIB_UNUSED(x)
Definition: clib.h:79
ixge_rx_to_hw_descriptor_t rx_to_hw
Definition: ixge.h:168
static void ixge_phy_init(ixge_device_t *xd)
Definition: ixge.c:376
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:538
VLIB_NODE_FUNCTION_MULTIARCH_CLONE(vnet_interface_output_node)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
ixge_tx_descriptor_t tx_descriptor_template
Definition: ixge.h:1259
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:699
u32 enable_write_1_to_set
Definition: ixge.h:245
a
Definition: bitmap.h:516
u32 n_descriptor_bytes
Definition: ixge.h:30
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: ixge.c:2603
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
Definition: ixge.h:120
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:464
u32 vlib_hw_if_index
Definition: ixge.h:1222
u32 n_descriptors[VLIB_N_RX_TX]
Definition: ixge.h:1248
u32 link_status_at_last_link_change
Definition: ixge.h:1231
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:397
u32 head_index
Definition: ixge.h:42
vnet_main_t * vnet_get_main(void)
Definition: misc.c:47
static void ixge_semaphore_release(ixge_device_t *xd)
Definition: ixge.c:82
#define PREDICT_TRUE(x)
Definition: clib.h:106
u8 is_start_of_packet
Definition: ixge.c:569
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
Definition: ixge.h:115
u32 * descriptor_buffer_indices
Definition: ixge.h:1134
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:196
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
Definition: ixge.h:104
u32 queue_mapping[64]
Definition: ixge.h:266
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:224
vlib_buffer_t buffer
Definition: ixge.c:572
static u8 * format_ixge_link_status(u8 *s, va_list *args)
Definition: ixge.c:2316
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_pci_addr_t *addr, clib_error_t **error)
Definition: pci.c:156
vlib_pci_addr_t * vlib_pci_get_addr(vlib_pci_dev_handle_t h)
Definition: pci.c:142
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_command
Definition: ixge.h:368
u32 * rx_buffers_to_add
Definition: ixge.h:1264
#define XGE_PHY_CONTROL
Definition: ixge.c:52
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
Definition: ixge.c:56
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
Definition: ixge.c:1345
ixge_device_t * devices
Definition: ixge.h:1245
static clib_error_t * vlib_physmem_region_alloc(vlib_main_t *vm, char *name, u32 size, u8 numa_node, u32 flags, vlib_physmem_region_index_t *idx)
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
Definition: sfp.h:70
struct _vlib_node_registration vlib_node_registration_t
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:557
ixge_descriptor_t before
Definition: ixge.c:561
#define IXGE_N_BYTES_IN_RX_BUFFER
Definition: ixge.c:2462
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
Definition: ixge.h:994
#define XGE_PHY_ID2
Definition: ixge.c:51
static u32 format_get_indent(u8 *s)
Definition: format.h:72
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
Definition: ixge.c:171
u32 per_interface_next_index
Definition: ixge.h:1208
static uword ixge_interrupt_is_rx_queue(uword i)
Definition: ixge.c:2105
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
u32 extended_control
Definition: ixge.h:192
u32 software_semaphore
Definition: ixge.h:890
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
Definition: ixge.c:184
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
Definition: ixge.c:164
ixge_rx_next_t
Definition: ixge.h:1275
static u8 ixge_counter_flags[]
Definition: ixge.c:2261
u8 *( format_function_t)(u8 *s, va_list *args)
Definition: format.h:48
#define VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:394
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, vlib_physmem_region_index_t idx, clib_error_t **error, uword n_bytes, uword alignment)
Definition: physmem_funcs.h:97
#define CLIB_MULTIARCH_SELECT_FN(fn,...)
Definition: cpu.h:47
u32 rx_ethernet_address0[16][2]
Definition: ixge.h:581
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:415
u32 tx_dma_control
Definition: ixge.h:500
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:1007
static u64 vlib_get_buffer_data_physical_address(vlib_main_t *vm, u32 buffer_index)
Definition: buffer_funcs.h:163
u32 * tx_buffers_pending_free
Definition: ixge.h:1262
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1108
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
Definition: ixge.h:111
u32 filter_control
Definition: ixge.h:522
#define foreach_set_bit(var, mask, body)
Definition: bitops.h:158
u32 i2c_control
Definition: ixge.h:208
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 phy_data
Definition: ixge.h:369
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
Definition: ixge.c:632
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
Definition: vec.h:595
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
Definition: ixge.c:1955
struct ixge_regs_t::@333 xge_mac
vlib_pci_dev_handle_t pci_dev_handle
Definition: ixge.h:1211
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
Definition: node_funcs.h:448
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
Definition: ixge.c:120
u32 rx_split_control
Definition: ixge.h:51
static uword ixge_rx_queue_to_interrupt(uword i)
Definition: ixge.c:2123
vlib_rx_or_tx_t
Definition: defs.h:44
ixge_tx_descriptor_t descriptor
Definition: ixge.c:848
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
i2c_bus_t i2c_bus
Definition: ixge.h:1233
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:68
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
Definition: ixge.c:445
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:181
#define VLIB_BUFFER_LOG2_NEXT_PRESENT
Definition: buffer.h:94
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
Definition: ixge.c:228
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:542
#define always_inline
Definition: clib.h:92
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
Definition: ixge.c:735
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u32 dca_control
Definition: ixge.h:40
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
Definition: ixge.h:114
unsigned long long u32x4
Definition: ixge.c:28
ixge_phy_t phys[2]
Definition: ixge.h:1228
int i32
Definition: types.h:81
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
Definition: ixge.h:121
ixge_dma_regs_t tx_dma[128]
Definition: ixge.h:616
u32 link_partner_ability
Definition: ixge.h:343
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
Definition: ixge.h:112
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 * format_hex_bytes(u8 *s, va_list *va)
Definition: std-formats.c:84
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:151
vnet_device_class_t ixge_device_class
unsigned long u64
Definition: types.h:89
ixge_main_t ixge_main
Definition: ixge.c:55
static vlib_node_registration_t ixge_process_node
Definition: ixge.c:57
u32 queue_index
Definition: ixge.h:1131
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V...
Definition: vec.h:237
#define vec_end(v)
End (last data address) of vector.
#define clib_error_create(args...)
Definition: error.h:96
#define vlib_call_init_function(vm, x)
Definition: init.h:162
f64 time_last_stats_update
Definition: ixge.h:1266
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
Definition: ixge.h:100
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_dev_handle_t h)
Definition: ixge.c:2845
struct ixge_regs_t::@331 interrupt
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
Definition: ixge.c:671
u8 * format_c_identifier(u8 *s, va_list *va)
Definition: std-formats.c:258
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
Definition: ixge.c:2713
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
Definition: ixge.c:218
u8 id
Definition: sfp.h:36
uword private_data
Definition: i2c.h:44
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
Definition: ixge.c:516
format_function_t format_vnet_sw_interface_name
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
Definition: ixge.h:145
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:1203
u16 state
Input node state.
Definition: node.h:452
static void ixge_clear_hw_interface_counters(u32 instance)
Definition: ixge.c:2417
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
Definition: ixge.c:267
format_function_t format_vnet_buffer
Definition: buffer.h:354
#define foreach_ixge_counter
Definition: ixge.h:1016
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
Definition: ixge.c:1035
u64 counters[IXGE_N_COUNTER]
Definition: ixge.h:1237
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:950
ixge_tx_descriptor_t tx
Definition: ixge.h:170
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
Definition: ixge.h:137
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
Definition: ixge.h:133
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: ixge.c:2430
#define v
Definition: acl.c:341
#define XGE_PHY_ID1
Definition: ixge.c:50
u32 vlib_pci_dev_handle_t
Definition: pci.h:94
static u8 * format_ixge_device_id(u8 *s, va_list *args)
Definition: ixge.c:2295
#define XGE_PHY_DEV_TYPE_PMA_PMD
Definition: ixge.c:48
#define ELOG_DATA(em, f)
Definition: elog.h:481
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
Definition: ixge.h:132
format_function_t format_sfp_eeprom
Definition: sfp.h:107
#define PREDICT_FALSE(x)
Definition: clib.h:105
u16 n_bytes_this_buffer
Definition: ixge.h:129
sfp_eeprom_t sfp_eeprom
Definition: ixge.h:1234
vnet_main_t vnet_main
Definition: misc.c:43
int physmem_region_allocated
Definition: ixge.h:1269
u32 control
Definition: ixge.h:62
#define VLIB_FRAME_SIZE
Definition: node.h:328
u32 descriptor_address[2]
Definition: ixge.h:29
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
Definition: ixge.c:196
format_function_t * format_buffer
Definition: node.h:311
VNET_DEVICE_CLASS(ixge_device_class)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
u32 software_firmware_sync
Definition: ixge.h:894
static void ixge_sfp_phy_init(ixge_device_t *xd)
Definition: ixge.c:349
static clib_error_t * vlib_pci_bus_master_enable(vlib_pci_dev_handle_t h)
Definition: pci.h:229
vlib_node_runtime_t * node
Definition: ixge.c:902
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
Definition: ixge.h:110
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
u32 vlib_sw_if_index
Definition: ixge.h:1222
u32 head_index
Definition: ixge.h:1128
u32 auto_negotiation_control2
Definition: ixge.h:465
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:91
ixge_tx_descriptor_t * start_of_packet_descriptor
Definition: ixge.c:908
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
Definition: ixge.c:310
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:863
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
Definition: ixge.h:102
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
Definition: buffer_funcs.h:377
u32 n_descriptors
Definition: ixge.h:1125
u16 n_vectors
Definition: node.h:344
#define XGE_PHY_DEV_TYPE_PHY_XS
Definition: ixge.c:49
u32 status_write_1_to_set
Definition: ixge.h:232
Definition: i2c.h:33
static uword ixge_interrupt_tx_queue(uword i)
Definition: ixge.c:2136
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
ixge_descriptor_t after
Definition: ixge.c:561
vec_header_t h
Definition: buffer.c:282
u32x4 as_u32x4
Definition: ixge.h:171
format_function_t format_vlib_pci_link_speed
Definition: pci.h:257
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
Definition: buffer.c:214
clib_error_t * pci_bus_init(vlib_main_t *vm)
Definition: pci.c:251
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
Definition: ixge.h:108
#define clib_warning(format, args...)
Definition: error.h:59
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
struct ixge_dma_queue_t::@354::@356 tx
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
Definition: i2c.h:36
elog_main_t elog_main
Definition: main.h:155
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:119
static u8 * format_ixge_device_name(u8 *s, va_list *args)
Definition: ixge.c:2248
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
Definition: ixge.h:136
#define ARRAY_LEN(x)
Definition: clib.h:59
u16 device_index
Definition: ixge.h:1216
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:439
static uword ixge_interrupt_rx_queue(uword i)
Definition: ixge.c:2129
u32 n_bytes_in_packet
Definition: ixge.c:906
static uword round_pow2(uword x, uword pow2)
Definition: clib.h:286
void vlib_i2c_init(i2c_bus_t *b)
Definition: i2c.c:150
u32 sdp_control
Definition: ixge.h:201
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
Definition: ixge.c:576
u32 tail_index
Definition: ixge.h:1128
static char * ixge_error_strings[]
Definition: ixge.c:2213
#define EVENT_SET_FLAGS
Definition: ixge.c:42
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:456
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:576
#define ASSERT(truth)
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
Definition: ixge.c:2259
unsigned int u32
Definition: types.h:88
ixge_pci_device_id_t device_id
Definition: ixge.h:1214
ixge_descriptor_t * descriptors
Definition: ixge.h:1122
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
Definition: ixge.c:1998
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
Definition: node.c:498
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:273
u32 auto_negotiation_control
Definition: ixge.h:427
#define clib_error_report(e)
Definition: error.h:113
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
Definition: node_funcs.h:147
u32 control
Definition: ixge.h:179
ixge_regs_t * regs
Definition: ixge.h:1205
u32 id
Definition: ixge.h:1116
u32 rx_enable
Definition: ixge.h:308
static void * vlib_physmem_alloc(vlib_main_t *vm, vlib_physmem_region_index_t idx, clib_error_t **error, uword n_bytes)
struct ixge_dma_queue_t::@354::@357 rx
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void ixge_semaphore_get(ixge_device_t *xd)
Definition: ixge.c:60
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
Definition: ixge.c:1895
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
Definition: ixge.c:1017
static uword ixge_tx_queue_to_interrupt(uword i)
Definition: ixge.c:2117
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
Definition: i2c.c:201
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
Definition: i2c.h:35
static void ixge_device_init(ixge_main_t *xm)
Definition: ixge.c:2624
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
Definition: ixge.c:89
Definition: defs.h:47
ixge_tx_descriptor_t tx_descriptor_template_mask
Definition: ixge.h:1259
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
Definition: ixge.h:113
void ixge_set_next_node(ixge_rx_next_t next, char *name)
Definition: ixge.c:2923
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
Definition: ixge.h:1224
unsigned short u16
Definition: types.h:57
VLIB_PLUGIN_REGISTER()
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
Definition: ixge.c:998
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
Definition: ixge.c:912
u32 tail_index
Definition: ixge.h:53
u32 rx_max_frame_size
Definition: ixge.h:373
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
double f64
Definition: types.h:142
unsigned char u8
Definition: types.h:56
uword vlib_pci_get_private_data(vlib_pci_dev_handle_t h)
Definition: pci.c:128
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:534
static uword ixge_interrupt_is_tx_queue(uword i)
Definition: ixge.c:2111
static u64 vlib_physmem_virtual_to_physical(vlib_main_t *vm, vlib_physmem_region_index_t idx, void *mem)
#define IXGE_COUNTER_IS_64_BIT
Definition: ixge.c:2258
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
Definition: node.h:262
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
vlib_buffer_t buffer
Definition: ixge.c:859
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
Definition: ixge.c:2143
void vlib_pci_set_private_data(vlib_pci_dev_handle_t h, uword private_data)
Definition: pci.c:135
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
Definition: ixge.c:2171
u32 n_descriptors_per_cache_line
Definition: ixge.h:1254
vlib_physmem_region_index_t physmem_region
Definition: ixge.h:1268
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
Definition: ixge.h:103
vlib_main_t * vlib_main
Definition: ixge.h:1242
u16 n_packet_bytes_this_descriptor
Definition: ixge.h:96
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
Definition: ixge.c:1822
#define XGE_PHY_CONTROL_RESET
Definition: ixge.c:53
static uword vlib_in_process_context(vlib_main_t *vm)
Definition: node_funcs.h:411
static clib_error_t * vlib_pci_intr_enable(vlib_pci_dev_handle_t h)
Definition: pci.h:197
#define vnet_buffer(b)
Definition: buffer.h:326
u32 is_start_of_packet
Definition: ixge.c:904
#define IXGE_HWBP_RACE_ELOG
Definition: ixge.c:43
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
Definition: i2c.h:54
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
static vlib_buffer_known_state_t vlib_buffer_is_known(u32 buffer_index)
Definition: buffer_funcs.h:225
ixge_dma_regs_t rx_dma0[64]
Definition: ixge.h:281
u8 is_start_of_packet
Definition: ixge.c:856
#define IXGE_ALWAYS_POLL
Definition: ixge.c:40
u8 data[0]
Packet data.
Definition: buffer.h:159
u32 core_analog_config
Definition: ixge.h:949
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
Definition: ixge.h:106
clib_error_t * ixge_init(vlib_main_t *vm)
Definition: ixge.c:2796
u16 flags
Copy of main node flags.
Definition: node.h:450
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
Definition: ixge.c:208
vhost_vring_addr_t addr
Definition: vhost-user.h:83
u16 pci_function
Definition: ixge.h:1219
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
u32 status_write_1_to_clear
Definition: ixge.h:230
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
Definition: ixge.h:135
u32 flags
Definition: vhost-user.h:77
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
static u8 * format_ixge_device(u8 *s, va_list *args)
Definition: ixge.c:2338
u32 phy_index
Definition: ixge.h:1227
static void ixge_pci_intr_handler(vlib_pci_dev_handle_t h)
Definition: ixge.c:2828
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:341
u32 eeprom_read
Definition: ixge.h:881
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
Definition: ixge.c:157
u32 n_bytes_in_rx_buffer
Definition: ixge.h:1252
u32 link_status
Definition: ixge.h:340
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
Definition: ixge.c:2465
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u64 counters_last_clear[IXGE_N_COUNTER]
Definition: ixge.h:1237
ixge_dma_regs_t rx_dma1[64]
Definition: ixge.h:747
ixge_rx_from_hw_descriptor_t rx_from_hw
Definition: ixge.h:169
#define foreach_ixge_error
Definition: ixge.c:616
Definition: defs.h:46
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
Definition: ixge.h:139
clib_error_t * vlib_pci_map_resource(vlib_pci_dev_handle_t h, u32 resource, void **result)
Definition: pci.c:927
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
Definition: ixge.h:118
ixge_error_t
Definition: ixge.c:623