|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
23 #if __x86_64__ || __i386__ || __aarch64__
26 #ifndef CLIB_HAVE_VEC128
27 #warning HACK: ixge driver wont really work, missing u32x4
28 typedef unsigned long long u32x4;
38 #include <vpp/app/version.h>
40 #define IXGE_ALWAYS_POLL 0
42 #define EVENT_SET_FLAGS 0
43 #define IXGE_HWBP_RACE_ELOG 0
45 #define PCI_VENDOR_ID_INTEL 0x8086
48 #define XGE_PHY_DEV_TYPE_PMA_PMD 1
49 #define XGE_PHY_DEV_TYPE_PHY_XS 4
50 #define XGE_PHY_ID1 0x2
51 #define XGE_PHY_ID2 0x3
52 #define XGE_PHY_CONTROL 0x0
53 #define XGE_PHY_CONTROL_RESET (1 << 15)
68 while (!(
r->software_semaphore & (1 << 0)))
76 r->software_semaphore |= 1 << 1;
78 while (!(
r->software_semaphore & (1 << 1)));
85 r->software_semaphore &= ~3;
94 u32 fw_mask = sw_mask << 5;
100 m =
r->software_firmware_sync;
101 done = (m & fw_mask) == 0;
103 r->software_firmware_sync = m | sw_mask;
115 r->software_firmware_sync &= ~sw_mask;
124 const u32 busy_bit = 1 << 30;
130 ASSERT (reg_index < (1 << 16));
131 ASSERT (dev_type < (1 << 5));
133 r->xge_mac.phy_data = v;
137 reg_index | (dev_type << 16) | (xd->
139 r->xge_mac.phy_command = x | busy_bit;
141 while (
r->xge_mac.phy_command & busy_bit)
144 r->xge_mac.phy_command = x | ((is_read ? 2 : 1) << 26) | busy_bit;
145 while (
r->xge_mac.phy_command & busy_bit)
149 v =
r->xge_mac.phy_data >> 16;
178 v |= (sda != 0) << 3;
179 v |= (scl != 0) << 1;
191 *sda = (v & (1 << 2)) != 0;
192 *scl = (v & (1 << 0)) != 0;
200 r->eeprom_read = (( (1 << 0)) | (
address << 2));
202 while (!((v =
r->eeprom_read) & (1 << 1)))
210 u32 tx_disable_bit = 1 << 3;
220 u32 is_10g_bit = 1 << 5;
230 u16 a,
id, reg_values_addr = 0;
233 if (
a == 0 ||
a == 0xffff)
315 return qi < 64 ? &
r->rx_dma0[qi] : &
r->rx_dma1[qi - 64];
317 return &
r->tx_dma[qi];
334 while (!(dr->
control & (1 << 25)))
385 case IXGE_82599_sfp_em:
386 case IXGE_82599_sfp_fcoe:
399 for (
i = 0;
i < 32;
i++)
403 if (v != 0xffff && v != 0)
419 .function = (
char *) __FUNCTION__,.
format =
420 "ixge %d, phy id 0x%d mdio address %d",.format_args =
"i4i4i4",};
450 u32 is_ip4,
is_ip6, is_ip, is_tcp, is_udp;
453 s =
format (s,
"%s-owned",
457 format (s,
", length this descriptor %d, l3 offset %d",
461 s =
format (s,
", end-of-packet");
466 s =
format (s,
"layer2 error");
470 s =
format (s,
"layer 2 type %d", (s0 & 0x1f));
484 s =
format (s,
" checksum %s",
493 if ((is_ip = (is_ip4 |
is_ip6)))
504 s =
format (s,
", tcp checksum %s",
508 s =
format (s,
", udp checksum %s",
523 s =
format (s,
"buffer 0x%Lx, %d packet bytes, %d bytes this buffer",
528 if ((v = (s0 >> 0) & 3))
529 s =
format (s,
"reserved 0x%x, ", v);
531 if ((v = (s0 >> 2) & 3))
532 s =
format (s,
"mac 0x%x, ", v);
534 if ((v = (s0 >> 4) & 0xf) != 3)
535 s =
format (s,
"type 0x%x, ", v);
537 s =
format (s,
"%s%s%s%s%s%s%s%s",
538 (s0 & (1 << 8)) ?
"eop, " :
"",
539 (s0 & (1 << 9)) ?
"insert-fcs, " :
"",
540 (s0 & (1 << 10)) ?
"reserved26, " :
"",
541 (s0 & (1 << 11)) ?
"report-status, " :
"",
542 (s0 & (1 << 12)) ?
"reserved28, " :
"",
543 (s0 & (1 << 13)) ?
"is-advanced, " :
"",
544 (s0 & (1 << 14)) ?
"vlan-enable, " :
"",
545 (s0 & (1 << 15)) ?
"tx-segmentation, " :
"");
547 if ((v = s1 & 0xf) != 0)
548 s =
format (s,
"status 0x%x, ", v);
550 if ((v = (s1 >> 4) & 0xf))
551 s =
format (s,
"context 0x%x, ", v);
553 if ((v = (s1 >> 8) & 0x3f))
554 s =
format (s,
"options 0x%x, ", v);
595 s =
format (s,
"\n%Ubefore: %U",
598 s =
format (s,
"\n%Uafter : head/tail address 0x%Lx/0x%Lx",
602 s =
format (s,
"\n%Ubuffer 0x%x: %U",
608 f =
node->format_buffer;
616 #define foreach_ixge_error \
617 _ (none, "no error") \
618 _ (tx_full_drops, "tx ring full drops") \
619 _ (ip4_checksum_error, "ip4 checksum errors") \
620 _ (rx_alloc_fail, "rx buf alloc from free list failed") \
621 _ (rx_alloc_no_physmem, "rx buf alloc failed no physmem")
625 #define _(f,s) IXGE_ERROR_##f,
634 u8 * next0,
u8 * error0,
u32 * flags0)
636 u8 is0_ip4, is0_ip6, n0, e0;
639 e0 = IXGE_ERROR_none;
646 ? IXGE_ERROR_ip4_checksum_error : e0);
659 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
663 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
674 u8 * next0,
u8 * error0,
u32 * flags0,
675 u8 * next1,
u8 * error1,
u32 * flags1)
677 u8 is0_ip4, is0_ip6, n0, e0;
678 u8 is1_ip4, is1_ip6, n1, e1;
681 e0 = e1 = IXGE_ERROR_none;
691 ? IXGE_ERROR_ip4_checksum_error : e0);
693 ? IXGE_ERROR_ip4_checksum_error : e1);
718 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
721 ? VNET_BUFFER_F_L4_CHECKSUM_COMPUTED : 0);
725 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
728 ? 0 : VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
739 u32 * before_buffers,
752 is_sop = dq->
rx.is_start_of_packet;
753 next_index_sop = dq->
rx.saved_start_of_packet_next_index;
757 u32 bi0, bi1, flags0, flags1;
760 u8 next0, error0, next1, error1;
770 bd[0].status[0], bd[0].status[2],
771 bd[1].status[0], bd[1].status[2],
772 &next0, &error0, &flags0,
773 &next1, &error1, &flags1);
775 next_index_sop = is_sop ? next0 : next_index_sop;
779 is_sop = (b0->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
781 next_index_sop = is_sop ? next1 : next_index_sop;
785 is_sop = (b1->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
822 bd[0].status[0], bd[0].status[2],
823 &next0, &error0, &flags0);
825 next_index_sop = is_sop ? next0 : next_index_sop;
829 is_sop = (b0->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
882 s =
format (s,
"\n%Udescriptor: %U",
886 s =
format (s,
"\n%Ubuffer 0x%x: %U",
944 is_sop = (b0->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
948 is_sop = (b1->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
982 is_sop = (b0->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1001 ASSERT (i0 < q->n_descriptors);
1002 ASSERT (i1 < q->n_descriptors);
1010 ASSERT (i0 < q->n_descriptors);
1011 ASSERT (i1 < q->n_descriptors);
1039 u32 start_descriptor_index,
1051 u32 descriptor_prefetch_rotor = 0;
1053 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1062 u8 is_eop0, is_eop1;
1068 if ((descriptor_prefetch_rotor & 0x3) == 0)
1071 descriptor_prefetch_rotor += 2;
1076 to_free[0] = fi0 = to_tx[0];
1078 to_free += fi0 != 0;
1080 to_free[0] = fi1 = to_tx[1];
1082 to_free += fi1 != 0;
1091 is_eop0 = (b0->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1092 is_eop1 = (b1->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1107 template_status | (is_eop0 <<
1110 template_status | (is_eop1 <<
1113 len_sop = (is_sop ? 0 : len_sop) + len0;
1117 d_sop = is_eop0 ? d : d_sop;
1121 len_sop = (is_sop ? 0 : len_sop) + len1;
1125 d_sop = is_eop1 ? d : d_sop;
1138 to_free[0] = fi0 = to_tx[0];
1140 to_free += fi0 != 0;
1148 is_eop0 = (b0->
flags & VLIB_BUFFER_NEXT_PRESENT) == 0;
1158 template_status | (is_eop0 <<
1161 len_sop = (is_sop ? 0 : len_sop) + len0;
1165 d_sop = is_eop0 ? d : d_sop;
1174 start_descriptor_index);
1187 ASSERT (d_sop - d_start <= dq->n_descriptors);
1188 d_sop = d_sop - d_start == dq->
n_descriptors ? d_start : d_sop;
1195 return n_descriptors;
1206 u32 *
from, n_left_tx, n_descriptors_to_tx, n_tail_drop;
1207 u32 queue_index = 0;
1231 i32 i, n_ok, i_eop, i_sop;
1234 for (
i = n_left_tx - 1;
i >= 0;
i--)
1237 if (!(
b->
flags & VLIB_BUFFER_NEXT_PRESENT))
1239 if (i_sop != ~0 && i_eop != ~0)
1253 .function = (
char *) __FUNCTION__,.
format =
1254 "ixge %d, ring full to tx %d head %d tail %d",.format_args =
1261 ed->instance = xd->device_index;
1262 ed->to_tx = n_descriptors_to_tx;
1267 if (n_ok < n_descriptors_to_tx)
1269 n_tail_drop = n_descriptors_to_tx - n_ok;
1272 IXGE_ERROR_tx_full_drops, n_tail_drop);
1275 n_descriptors_to_tx = n_ok;
1278 dq->
tx.n_buffers_on_ring += n_descriptors_to_tx;
1287 n_descriptors_to_tx -= n;
1294 if (n_descriptors_to_tx > 0)
1299 ASSERT (n == n_descriptors_to_tx);
1332 ASSERT (dq->
tx.n_buffers_on_ring >= n);
1333 dq->
tx.n_buffers_on_ring -= (n - n_tail_drop);
1344 u32 start_descriptor_index,
u32 n_descriptors)
1350 static u32 *d_trace_buffers;
1351 u32 n_descriptors_left = n_descriptors;
1355 u32 bi_sop = dq->
rx.saved_start_of_packet_buffer_index;
1356 u32 bi_last = dq->
rx.saved_last_buffer_index;
1357 u32 next_index_sop = dq->
rx.saved_start_of_packet_next_index;
1358 u32 is_sop = dq->
rx.is_start_of_packet;
1365 ASSERT (start_descriptor_index + n_descriptors <= dq->n_descriptors);
1376 _vec_len (d_trace_save) = 0;
1377 _vec_len (d_trace_buffers) = 0;
1380 vec_add (d_trace_buffers, to_rx, n);
1386 if (l < n_descriptors_left)
1401 if (n_allocated == 0)
1403 IXGE_ERROR_rx_alloc_no_physmem, 1);
1406 IXGE_ERROR_rx_alloc_fail, 1);
1408 n_descriptors_left = l + n_allocated;
1410 n_descriptors = n_descriptors_left;
1417 while (n_descriptors_left > 0)
1421 while (n_descriptors_left >= 4 && n_left_to_next >= 2)
1425 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1426 u32 bi1, fi1, len1, l3_offset1, s21, s01, flags1;
1427 u8 is_eop0, error0, next0;
1428 u8 is_eop1, error1, next1;
1447 goto found_hw_owned_descriptor_x2;
1478 &next0, &error0, &flags0,
1479 &next1, &error1, &flags1);
1481 next0 = is_sop ? next0 : next_index_sop;
1482 next1 = is_eop0 ? next1 : next0;
1483 next_index_sop = next1;
1485 b0->
flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1486 b1->
flags |= flags1 | (!is_eop1 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1499 n_packets += is_eop0 + is_eop1;
1512 n_descriptors_left -= 2;
1532 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1533 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1550 u32 bi_sop0 = is_sop ? bi0 : bi_sop;
1551 u32 bi_sop1 = is_eop0 ? bi1 : bi_sop0;
1555 to_next[0] = bi_sop0;
1560 to_next, n_left_to_next,
1565 to_next[0] = bi_sop1;
1570 to_next, n_left_to_next,
1579 u8 saved_is_sop = is_sop;
1580 u32 bi_sop_save = bi_sop;
1582 bi_sop = saved_is_sop ? bi0 : bi_sop;
1583 to_next[0] = bi_sop;
1585 n_left_to_next -= is_eop0;
1587 bi_sop = is_eop0 ? bi1 : bi_sop;
1588 to_next[0] = bi_sop;
1590 n_left_to_next -= is_eop1;
1598 to_next -= is_eop0 + is_eop1;
1599 n_left_to_next += is_eop0 + is_eop1;
1602 bi_sop = saved_is_sop ? bi0 : bi_sop_save;
1609 to_next[0] = bi_sop;
1611 n_left_to_next -= 1;
1615 bi_sop = is_eop0 ? bi1 : bi_sop;
1622 to_next[0] = bi_sop;
1624 n_left_to_next -= 1;
1629 if (is_eop0 && is_eop1 && next0 == next1)
1635 to_next, n_left_to_next);
1642 found_hw_owned_descriptor_x2:
1644 while (n_descriptors_left > 0 && n_left_to_next > 0)
1648 u32 bi0, fi0, len0, l3_offset0, s20, s00, flags0;
1649 u8 is_eop0, error0, next0;
1658 goto found_hw_owned_descriptor_x1;
1677 (xd, s00, s20, &next0, &error0, &flags0);
1679 next0 = is_sop ? next0 : next_index_sop;
1680 next_index_sop = next0;
1682 b0->
flags |= flags0 | (!is_eop0 << VLIB_BUFFER_LOG2_NEXT_PRESENT);
1691 n_packets += is_eop0;
1700 n_descriptors_left -= 1;
1712 bi_sop = is_sop ? bi0 : bi_sop;
1714 if (CLIB_DEBUG > 0 && is_eop0)
1725 to_next[0] = bi_sop;
1730 to_next, n_left_to_next,
1738 to_next[0] = bi_sop;
1740 n_left_to_next -= is_eop0;
1750 to_next, n_left_to_next);
1758 found_hw_owned_descriptor_x1:
1759 if (n_descriptors_left > 0)
1765 u32 n_done = n_descriptors - n_descriptors_left;
1767 if (n_trace > 0 && n_done > 0)
1778 _vec_len (d_trace_save) = 0;
1779 _vec_len (d_trace_buffers) = 0;
1791 dq->
rx.n_descriptors_done_this_call = n_done;
1792 dq->
rx.n_descriptors_done_total += n_done;
1793 dq->
rx.is_start_of_packet = is_sop;
1794 dq->
rx.saved_start_of_packet_buffer_index = bi_sop;
1795 dq->
rx.saved_last_buffer_index = bi_last;
1796 dq->
rx.saved_start_of_packet_next_index = next_index_sop;
1812 uword n_packets = 0;
1813 u32 hw_head_index, sw_head_index;
1819 dq->
rx.is_start_of_packet = 1;
1820 dq->
rx.saved_start_of_packet_buffer_index = ~0;
1821 dq->
rx.saved_last_buffer_index = ~0;
1824 dq->
rx.next_index =
node->cached_next_index;
1826 dq->
rx.n_descriptors_done_total = 0;
1827 dq->
rx.n_descriptors_done_this_call = 0;
1834 if (hw_head_index == sw_head_index)
1837 if (hw_head_index < sw_head_index)
1843 dq->
rx.n_descriptors_done_this_call);
1845 if (dq->
rx.n_descriptors_done_this_call != n_tried)
1848 if (hw_head_index >= sw_head_index)
1850 u32 n_tried = hw_head_index - sw_head_index;
1854 dq->
rx.n_descriptors_done_this_call);
1868 interface_main.combined_sw_if_counters +
1887 .function = (
char *) __FUNCTION__,.
format =
1888 "ixge %d, %s",.format_args =
"i1t1",.n_enum_strings =
1895 "link status change",
1896 "linksec key exchange",
1897 "manageability event",
1903 "ecc",
"descriptor handler error",
"tcp timer",
"other",},};
1915 u32 v =
r->xge_mac.link_status;
1916 uword is_up = (v & (1 << 30)) != 0;
1920 .function = (
char *) __FUNCTION__,.
format =
1921 "ixge %d, link status change 0x%x",.format_args =
"i4i4",};
1928 ed->link_status = v;
1944 u32 bi0, bi1, bi2, bi3;
1986 u32 n_clean, *
b, *t, *t0;
1987 i32 n_hw_owned_descriptors;
1988 i32 first_to_clean, last_to_clean;
1994 while ((dq->
head_index == dq->
tx.head_index_write_back[0]) &&
2002 .function = (
char *) __FUNCTION__,.
format =
2003 "ixge %d tx head index race: head %4d, tail %4d, buffs %4d",.format_args
2007 u32 instance, head_index, tail_index, n_buffers_on_ring;
2013 ed->n_buffers_on_ring = dq->
tx.n_buffers_on_ring;
2019 ASSERT (dq->
tx.n_buffers_on_ring >= n_hw_owned_descriptors);
2020 n_clean = dq->
tx.n_buffers_on_ring - n_hw_owned_descriptors;
2026 .function = (
char *) __FUNCTION__,.
format =
2027 "ixge %d tx head index race: head %4d, hw_owned %4d, n_clean %4d, retries %d",.format_args
2031 u32 instance, head_index, n_hw_owned_descriptors, n_clean, retries;
2036 ed->n_hw_owned_descriptors = n_hw_owned_descriptors;
2037 ed->n_clean = n_clean;
2038 ed->retries = hwbp_race;
2053 last_to_clean = (last_to_clean < 0) ? last_to_clean + dq->
n_descriptors :
2056 first_to_clean = (last_to_clean) - (n_clean - 1);
2057 first_to_clean = (first_to_clean < 0) ? first_to_clean + dq->
n_descriptors :
2065 if (first_to_clean > last_to_clean)
2073 if (first_to_clean <= last_to_clean)
2074 t +=
clean_block (
b, t, (last_to_clean - first_to_clean) + 1);
2080 ASSERT (dq->
tx.n_buffers_on_ring >= n);
2081 dq->
tx.n_buffers_on_ring -= n;
2096 return i >= 8 &&
i < 16;
2131 uword n_rx_packets = 0;
2133 s =
r->interrupt.status_write_1_to_set;
2135 r->interrupt.status_write_1_to_clear = s;
2150 return n_rx_packets;
2158 uword n_rx_packets = 0;
2160 if (
node->state == VLIB_NODE_STATE_INTERRUPT)
2167 xd = vec_elt_at_index (xm->devices, i);
2168 n_rx_packets += ixge_device_input (xm, xd, node);
2171 if (! (node->flags & VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
2172 xd->regs->interrupt.enable_write_1_to_set = ~0;
2177 node->runtime_data[0] = 0;
2193 return n_rx_packets;
2206 .name =
"ixge-input",
2210 .state = VLIB_NODE_STATE_DISABLED,
2237 return format (s,
"TenGigabitEthernet%x/%x/%x/%x",
2241 #define IXGE_COUNTER_IS_64_BIT (1 << 0)
2242 #define IXGE_COUNTER_NOT_CLEAR_ON_READ (1 << 1)
2246 #define _64(a,f) IXGE_COUNTER_IS_64_BIT,
2256 static u32 reg_offsets[] = {
2257 #define _(a,f) (a) / sizeof (u32),
2258 #define _64(a,f) _(a,f)
2268 u32 o = reg_offsets[
i];
2280 u32 device_id = va_arg (*args,
u32);
2284 #define _(f,n) case n: t = #f; break;
2292 s =
format (s,
"unknown 0x%x", device_id);
2304 s =
format (s,
"%s", (v & (1 << 30)) ?
"up" :
"down");
2308 "1g",
"10g parallel",
"10g serial",
"autoneg",
2311 "unknown",
"100m",
"1g",
"10g",
2313 s =
format (s,
", mode %s, speed %s",
2314 modes[(v >> 26) & 3], speeds[(v >> 28) & 3]);
2323 u32 dev_instance = va_arg (*args,
u32);
2334 s =
format (s,
"Intel 8259X: id %U\n%Ulink %U",
2354 s =
format (s,
"PHY not found");
2365 s =
format (s,
"\n%U%d unprocessed, %d total buffers on rx queue 0 ring",
2368 s =
format (s,
"\n%U%d buffers in driver rx cache",
2372 s =
format (s,
"\n%U%d buffers on tx queue 0 ring",
2379 static char *names[] = {
2381 #define _64(a,f) _(a,f)
2391 s =
format (s,
"\n%U%-40U%16Ld",
2446 #define IXGE_N_BYTES_IN_RX_BUFFER (2048) // DAW-HACK: Set Rx buffer size so all packets < ETH_MTU_SIZE fit in the buffer (i.e. sop & eop for all descriptors).
2492 for (
i = 0;
i < n_alloc;
i++)
2503 dq->
tx.head_index_write_back =
2505 if (!dq->
tx.head_index_write_back)
2544 dr->
tx.head_index_write_back_address[0] = 1 |
a;
2545 dr->
tx.head_index_write_back_address[1] = (
u64)
a >> (
u64) 32;
2568 while (!(dr->
control & (1 << 25)))
2591 old =
r->filter_control;
2594 r->filter_control = old | (1 << 9) ;
2595 else if (
flags == ETHERNET_INTERFACE_FLAGS_DEFAULT_L3)
2596 r->filter_control = old & ~(1 << 9);
2613 const u32 reset_bit = (1 << 26) | (1 << 3);
2615 r->control |= reset_bit;
2618 while (
r->control & reset_bit)
2622 r->extended_control |= (1 << 28);
2632 addr32[0] =
r->rx_ethernet_address0[0][0];
2633 addr32[1] =
r->rx_ethernet_address0[0][1];
2634 for (
i = 0;
i < 6;
i++)
2635 addr8[
i] = addr32[
i / 4] >> ((
i % 4) * 8);
2658 r->interrupt.queue_mapping[0] = (( (1 << 7) |
2661 r->interrupt.queue_mapping[0] |= (( (1 << 7) |
2671 f64 line_rate_max_pps =
2672 10e9 / (8 * (64 + 20));
2680 r->filter_control |= (1 << 10) | (1 << 8);
2683 r->xge_mac.control |= 1 << 2;
2684 r->xge_mac.rx_max_frame_size = (9216 + 14) << 16;
2688 r->interrupt.enable_write_1_to_set = ~0;
2698 uword event_type, *event_data = 0;
2699 f64 timeout, link_debounce_deadline;
2711 link_debounce_deadline = 1e70;
2736 u32 v =
r->xge_mac.link_status;
2737 uword is_up = (v & (1 << 30)) != 0;
2743 link_debounce_deadline = 1e70;
2753 _vec_len (event_data) = 0;
2772 .name =
"ixge-process",
2819 rt->runtime_data[0] |= 1 << private_data;
2857 ? VLIB_NODE_STATE_POLLING
2858 : VLIB_NODE_STATE_INTERRUPT));
2881 .supported_devices = {
2882 #define _(t,i) { .vendor_id = PCI_VENDOR_ID_INTEL, .device_id = i, },
2911 .version = VPP_BUILD_VER,
2912 .default_disabled = 1,
2913 .description =
"Intel 82599 Family Native Driver (experimental)",
static void ixge_rx_next_and_error_from_status_x1(ixge_device_t *xd, u32 s00, u32 s02, u8 *next0, u8 *error0, u32 *flags0)
PCI_REGISTER_DEVICE(ixge_pci_device_registration, static)
void vlib_pci_set_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h, uword private_data)
u32 next_buffer
Next buffer for this linked-list of buffers.
static uword ixge_rx_queue_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 start_descriptor_index, u32 n_descriptors)
static uword ixge_rx_queue(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node, u32 queue_index)
static uword vlib_in_process_context(vlib_main_t *vm)
struct ixge_regs_t::@784 interrupt
#define IXGE_HWBP_RACE_ELOG
u32 auto_negotiation_control
static void ixge_sfp_device_up_down(ixge_device_t *xd, uword is_up)
static void * vlib_physmem_alloc_aligned(vlib_main_t *vm, uword n_bytes, uword alignment)
u16 flags
Copy of main node flags.
#define IXGE_RX_DESCRIPTOR_STATUS2_IP4_CHECKSUM_ERROR
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
static void ixge_pci_intr_handler(vlib_main_t *vm, vlib_pci_dev_handle_t h)
static void ixge_tx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_tx_state_t *tx_state, ixge_tx_descriptor_t *descriptors, u32 *buffers, uword n_descriptors)
static u8 ixge_counter_flags[]
u32 auto_negotiation_control2
#define XGE_PHY_DEV_TYPE_PMA_PMD
static void ixge_tx_queue(ixge_main_t *xm, ixge_device_t *xd, u32 queue_index)
#define IXGE_TX_DESCRIPTOR_STATUS0_REPORT_STATUS
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static uword ixge_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
nat44_ei_hairpin_src_next_t next_index
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4_EXT
static u16 ixge_read_eeprom(ixge_device_t *xd, u32 address)
static u32 clean_block(u32 *b, u32 *t, u32 n_left)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static void vlib_node_set_state(vlib_main_t *vm, u32 node_index, vlib_node_state_t new_state)
Set node dispatch state.
static int vlib_i2c_bus_timed_out(i2c_bus_t *bus)
static u8 * format_ixge_rx_from_hw_descriptor(u8 *s, va_list *va)
ixge_pci_device_id_t device_id
ixge_tx_descriptor_t * start_of_packet_descriptor
#define vec_end(v)
End (last data address) of vector.
vlib_main_t vlib_node_runtime_t * node
static void * vlib_physmem_alloc(vlib_main_t *vm, uword n_bytes)
static uword sfp_eeprom_is_valid(sfp_eeprom_t *e)
#define foreach_ixge_counter
struct ixge_dma_queue_t::@807::@810 rx
u32 vlib_pci_dev_handle_t
vnet_device_class_t ixge_device_class
@ VNET_SW_INTERFACE_FLAG_ADMIN_UP
ixge_descriptor_t * descriptors
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
u32 n_descriptors_per_cache_line
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
@ VNET_HW_INTERFACE_FLAG_LINK_UP
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
ixge_rx_to_hw_descriptor_t rx_to_hw
static void ixge_sfp_enable_disable_10g(ixge_device_t *xd, uword enable)
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
#define clib_error_report(e)
static u8 * format_ixge_rx_dma_trace(u8 *s, va_list *va)
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_OWNED_BY_SOFTWARE
#define foreach_ixge_error
static void ixge_clear_hw_interface_counters(u32 instance)
vnet_hw_if_output_node_runtime_t * r
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_IP4_CHECKSUMMED
#define XGE_PHY_DEV_TYPE_PHY_XS
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
clib_error_t * vlib_pci_map_region(vlib_main_t *vm, vlib_pci_dev_handle_t h, u32 resource, void **result)
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_END_OF_PACKET
#define IXGE_RX_DESCRIPTOR_STATUS2_UDP_CHECKSUM_ERROR
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_TCP
clib_error_t * ixge_init(vlib_main_t *vm)
static void ixge_update_counters(ixge_device_t *xd)
static clib_error_t * vlib_pci_intr_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP4
static void ixge_phy_init(ixge_device_t *xd)
static uword ixge_interrupt_tx_queue(uword i)
ixge_dma_queue_t * dma_queues[VLIB_N_RX_TX]
void(* get_bits)(struct i2c_bus_t *b, int *scl, int *sda)
static clib_error_t * vlib_pci_bus_master_enable(vlib_main_t *vm, vlib_pci_dev_handle_t h)
static uword round_pow2(uword x, uword pow2)
static void ixge_software_firmware_sync_release(ixge_device_t *xd, u32 sw_mask)
#define CLIB_PREFETCH(addr, size, type)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type,...
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
ixge_tx_descriptor_t tx_descriptor_template_mask
struct ixge_dma_queue_t::@807::@809 tx
uword vlib_pci_get_private_data(vlib_main_t *vm, vlib_pci_dev_handle_t h)
void vlib_i2c_read_eeprom(i2c_bus_t *bus, u8 i2c_addr, u16 start_addr, u16 length, u8 *data)
static u8 * format_ixge_device_name(u8 *s, va_list *args)
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
#define clib_error_create(args...)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 * tx_buffers_pending_free
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define IXGE_TX_DESCRIPTOR_STATUS1_DONE
#define ELOG_TYPE_DECLARE(f)
struct ixge_dma_regs_t::@778::@781 tx
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
ixge_rx_from_hw_descriptor_t rx_from_hw
static uword ixge_ring_sub(ixge_dma_queue_t *q, u32 i0, u32 i1)
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_UDP
vnet_main_t * vnet_get_main(void)
format_function_t format_sfp_eeprom
#define VLIB_NODE_FLAG_TRACE
static vlib_node_registration_t ixge_input_node
(constructor) VLIB_REGISTER_NODE (ixge_input_node)
static clib_error_t * ixge_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static void ixge_i2c_get_bits(i2c_bus_t *b, int *scl, int *sda)
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
static clib_error_t * ixge_sfp_phy_init_from_eeprom(ixge_device_t *xd, u16 sfp_type)
#define IXGE_RX_DESCRIPTOR_STATUS2_TCP_CHECKSUM_ERROR
static uword ixge_tx_descriptor_matches_template(ixge_main_t *xm, ixge_tx_descriptor_t *d)
static uword ixge_tx_queue_to_interrupt(uword i)
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
static void ixge_device_init(ixge_main_t *xm)
static void ixge_software_firmware_sync(ixge_device_t *xd, u32 sw_mask)
static uword ixge_interrupt_is_rx_queue(uword i)
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_UDP_CHECKSUMMED
void ixge_set_next_node(ixge_rx_next_t next, char *name)
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_VLAN
static void ixge_semaphore_get(ixge_device_t *xd)
sll srl srl sll sra u16x4 i
static void ixge_interrupt(ixge_main_t *xm, ixge_device_t *xd, u32 i)
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_END_OF_PACKET
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
#define foreach_set_bit(var, mask, body)
static vlib_node_registration_t ixge_process_node
#define IXGE_COUNTER_NOT_CLEAR_ON_READ
u64 counters_last_clear[IXGE_N_COUNTER]
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
manual_print typedef address
static u8 * format_ixge_link_status(u8 *s, va_list *args)
static uword ixge_device_input(ixge_main_t *xm, ixge_device_t *xd, vlib_node_runtime_t *node)
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
u32 link_status_at_last_link_change
#define CLIB_MEMORY_BARRIER()
@ VNET_INTERFACE_COUNTER_RX
static u32 ixge_read_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index)
static uword ixge_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
#define CLIB_CACHE_LINE_BYTES
void vlib_i2c_init(i2c_bus_t *b)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
static clib_error_t * ixge_dma_init(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 queue_index)
static void ixge_rx_trace(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, ixge_descriptor_t *before_descriptors, u32 *before_buffers, ixge_descriptor_t *after_descriptors, uword n_descriptors)
struct _vlib_node_registration vlib_node_registration_t
static uword ixge_rx_queue_to_interrupt(uword i)
u16 current_length
Nbytes between current data and the end of this buffer.
struct ixge_regs_t::@786 xge_mac
static void ixge_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
u32 * descriptor_buffer_indices
#define XGE_PHY_CONTROL_RESET
static uword ixge_ring_add(ixge_dma_queue_t *q, u32 i0, u32 i1)
static void ixge_sfp_enable_disable_laser(ixge_device_t *xd, uword enable)
#define IXGE_RX_DESCRIPTOR_STATUS2_IS_TCP_CHECKSUMMED
u32 descriptor_address[2]
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
static uword ixge_interrupt_is_tx_queue(uword i)
#define IXGE_COUNTER_IS_64_BIT
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
#define IXGE_TX_DESCRIPTOR_STATUS1_N_BYTES_IN_PACKET(l)
@ IXGE_RX_NEXT_ETHERNET_INPUT
description fragment has unexpected format
f64 time_last_stats_update
u16 n_packet_bytes_this_descriptor
vlib_put_next_frame(vm, node, next_index, 0)
ixge_tx_descriptor_t descriptor
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
ixge_tx_descriptor_t tx_descriptor_template
#define VLIB_INIT_FUNCTION(x)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
static u8 * format_ixge_tx_descriptor(u8 *s, va_list *va)
static void vlib_node_set_interrupt_pending(vlib_main_t *vm, u32 node_index)
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
static uword ixge_tx_no_wrap(ixge_main_t *xm, ixge_device_t *xd, ixge_dma_queue_t *dq, u32 *buffers, u32 start_descriptor_index, u32 n_descriptors, ixge_tx_state_t *tx_state)
@ foreach_ixge_pci_device_id
#define vec_foreach(var, vec)
Vector iterator.
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6
vlib_pci_dev_handle_t pci_dev_handle
for(i=1;i<=collision_buckets;i++)
#define IXGE_TX_DESCRIPTOR_STATUS0_IS_ADVANCED
#define IXGE_RX_DESCRIPTOR_STATUS2_ETHERNET_ERROR
u32 per_interface_next_index
#define IXGE_RX_DESCRIPTOR_STATUS0_L3_OFFSET(s)
VNET_DEVICE_CLASS(ixge_device_class)
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
#define vec_resize(V, N)
Resize a vector (no header, unspecified alignment) Add N elements to end of given vector V,...
vlib_node_runtime_t * node
static void ixge_sfp_phy_init(ixge_device_t *xd)
#define IXGE_TX_DESCRIPTOR_STATUS0_ADVANCED
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_LAYER2
static u8 * format_ixge_device(u8 *s, va_list *args)
#define IXGE_RX_DESCRIPTOR_STATUS0_IS_IP6_EXT
#define IXGE_TX_DESCRIPTOR_STATUS0_INSERT_FCS
static void ixge_rx_next_and_error_from_status_x2(ixge_device_t *xd, u32 s00, u32 s02, u32 s10, u32 s12, u8 *next0, u8 *error0, u32 *flags0, u8 *next1, u8 *error1, u32 *flags1)
static clib_error_t * vlib_physmem_last_error(struct vlib_main_t *vm)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define IXGE_N_BYTES_IN_RX_BUFFER
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static vlib_main_t * vlib_get_main(void)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
static u8 * format_ixge_tx_dma_trace(u8 *s, va_list *va)
vnet_interface_output_runtime_t * rt
static uword ixge_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
#define IXGE_TX_DESCRIPTOR_STATUS0_LOG2_IS_END_OF_PACKET
u32 enable_write_1_to_set
u8 * vlib_validate_buffer(vlib_main_t *vm, u32 bi, uword follow_buffer_next)
static ixge_dma_regs_t * get_dma_regs(ixge_device_t *xd, vlib_rx_or_tx_t rt, u32 qi)
vlib_pci_addr_t * vlib_pci_get_addr(vlib_main_t *vm, vlib_pci_dev_handle_t h)
#define clib_warning(format, args...)
static u8 * format_ixge_device_id(u8 *s, va_list *args)
u32 vlib_register_node(vlib_main_t *vm, vlib_node_registration_t *r)
@ VLIB_BUFFER_KNOWN_ALLOCATED
format_function_t format_vlib_pci_link_speed
u64 counters[IXGE_N_COUNTER]
void(* put_bits)(struct i2c_bus_t *b, int scl, int sda)
static f64 vlib_time_now(vlib_main_t *vm)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
static void ixge_i2c_put_bits(i2c_bus_t *b, int scl, int sda)
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
static u32 ixge_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
static uword ixge_interrupt_rx_queue(uword i)
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static void ixge_throttle_queue_interrupt(ixge_regs_t *r, u32 queue_interrupt_index, f64 inter_interrupt_interval_in_secs)
static void ixge_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v)
static char * ixge_error_strings[]
static void ixge_semaphore_release(ixge_device_t *xd)
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
u32 n_descriptors[VLIB_N_RX_TX]
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
format_function_t format_vnet_buffer
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
u32 ixge_read_write_phy_reg(ixge_device_t *xd, u32 dev_type, u32 reg_index, u32 v, u32 is_read)
format_function_t format_vnet_sw_interface_name
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE
static clib_error_t * ixge_pci_init(vlib_main_t *vm, vlib_pci_dev_handle_t h)
vl_api_wireguard_peer_flags_t flags