29 #define MAX(a,b) ((a) < (b) ? (b) : (a)) 33 #define MIN(a,b) ((a) < (b) ? (a) : (b)) 49 #define VMWARE_LENGTH_BUG_WORKAROUND 0 72 s =
format (s,
"HANDOFF_DISPATCH: sw_if_index %d next_index %d buffer 0x%x",
83 #define foreach_handoff_dispatch_error \ 84 _(EXAMPLE, "example packets") 87 #define _(sym,str) HANDOFF_DISPATCH_ERROR_##sym, 94 #define _(sym,string) string, 111 u32 n_left_from, * from, * to_next;
118 while (n_left_from > 0)
123 to_next, n_left_to_next);
125 while (n_left_from >= 4 && n_left_to_next >= 2)
130 u32 sw_if_index0, sw_if_index1;
144 to_next[0] = bi0 = from[0];
145 to_next[1] = bi1 = from[1];
183 to_next, n_left_to_next,
184 bi0, bi1, next0, next1);
187 while (n_left_from > 0 && n_left_to_next > 0)
222 to_next, n_left_to_next,
234 .name =
"handoff-dispatch",
235 .vector_size =
sizeof (
u32),
280 u8 * next0,
u8 * error0)
282 u8 is0_ip4, is0_ip6, is0_mpls, n0;
286 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
287 PKT_EXT_RX_PKT_ERROR | PKT_EXT_RX_BAD_FCS |
289 PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
295 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS 296 (mb_flags & PKT_EXT_RX_PKT_ERROR) ? DPDK_ERROR_RX_PACKET_ERROR :
297 (mb_flags & PKT_EXT_RX_BAD_FCS) ? DPDK_ERROR_RX_BAD_FCS :
299 (mb_flags & PKT_RX_IP_CKSUM_BAD) ? DPDK_ERROR_IP_CHECKSUM_ERROR :
300 (mb_flags & PKT_RX_L4_CKSUM_BAD) ? DPDK_ERROR_L4_CHECKSUM_ERROR :
305 *error0 = DPDK_ERROR_NONE;
313 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0) 314 is0_ip4 = RTE_ETH_IS_IPV4_HDR(mb->packet_type) != 0;
316 is0_ip4 = (mb_flags & (PKT_RX_IPV4_HDR | PKT_RX_IPV4_HDR_EXT)) != 0;
323 #if RTE_VERSION >= RTE_VERSION_NUM(2, 1, 0, 0) 324 is0_ip6 = RTE_ETH_IS_IPV6_HDR(mb->packet_type) != 0;
327 (mb_flags & (PKT_RX_IPV6_HDR | PKT_RX_IPV6_HDR_EXT)) != 0;
334 is0_mpls = (h0->
type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST));
369 mb = rte_mbuf_from_vlib_buffer(b0);
382 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS 387 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
441 if (eh->
type == clib_host_to_net_u16(ETHERNET_TYPE_IP4))
445 u8 pkt_prec = (ipv4->
tos >> 5);
448 DPDK_ERROR_IPV4_EFD_DROP_PKTS : DPDK_ERROR_NONE);
450 else if (eh->
type == clib_net_to_host_u16(ETHERNET_TYPE_IP6))
458 DPDK_ERROR_IPV6_EFD_DROP_PKTS : DPDK_ERROR_NONE);
460 else if (eh->
type == clib_net_to_host_u16(ETHERNET_TYPE_MPLS_UNICAST))
467 DPDK_ERROR_MPLS_EFD_DROP_PKTS : DPDK_ERROR_NONE);
469 else if ((eh->
type == clib_net_to_host_u16(ETHERNET_TYPE_VLAN)) ||
470 (eh->
type == clib_net_to_host_u16(ETHERNET_TYPE_DOT1AD)))
477 DPDK_ERROR_VLAN_EFD_DROP_PKTS : DPDK_ERROR_NONE);
480 return DPDK_ERROR_NONE;
495 u32 n_left_to_next, * to_next;
498 uword n_rx_bytes = 0;
499 u32 n_trace, trace_cnt __attribute__((unused));
501 u8 efd_discard_burst = 0;
502 u16 ip_align_offset = 0;
503 u32 buffer_flags_template;
522 if (xd->
pmd == VNET_DPDK_PMD_THUNDERX)
540 for (mb_index = 0; mb_index < n_buffers; mb_index++)
541 rte_pktmbuf_free (xd->
rx_vectors[queue_id][mb_index]);
557 for (mb_index = 0; mb_index < n_buffers; mb_index++)
558 rte_pktmbuf_free(xd->
rx_vectors[queue_id][mb_index]);
562 DPDK_ERROR_VLAN_EFD_DROP_PKTS,
579 efd_discard_burst = 1;
586 while (n_buffers > 0)
596 while (n_buffers > 0 && n_left_to_next > 0)
599 struct rte_mbuf *mb = xd->
rx_vectors[queue_id][mb_index];
600 struct rte_mbuf *mb_seg = mb->next;
604 struct rte_mbuf *pfmb = xd->
rx_vectors[queue_id][mb_index+2];
612 b0 = vlib_buffer_from_rte_mbuf(mb);
621 rte_pktmbuf_free(mb);
635 struct rte_mbuf *pfmb = mb->next;
653 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS 660 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
682 b0->
flags = buffer_flags_template;
689 n_rx_bytes += mb->pkt_len;
692 while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
696 b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
707 b_seg->
current_data = (mb_seg->buf_addr + mb_seg->data_off) - (
void *)b_seg->
data;
717 mb_seg = mb_seg->next;
729 to_next, n_left_to_next,
751 mb_index, n_rx_bytes);
760 #define VIRL_SPEED_LIMIT() \ 763 struct timespec ts, tsrem; \ 766 ts.tv_nsec = 1000*1000; \ 768 while (nanosleep(&ts, &tsrem) < 0) \ 774 #define VIRL_SPEED_LIMIT() 785 uword n_rx_packets = 0;
811 uword n_rx_packets = 0;
832 .name =
"dpdk-input",
835 .state = VLIB_NODE_STATE_DISABLED,
869 r->next_nodes[next] = name;
870 r_io->next_nodes[next] = name;
871 r_handoff->next_nodes[next] = name;
875 clib_warning (
"%s: illegal next %d\n", __FUNCTION__, next);
890 new_tail = __sync_add_and_fetch (&fq->
tail, 1);
896 elt = fq->
elts + (new_tail & (fq->
nelts-1));
910 u32 vlib_worker_index,
915 if (handoff_queue_elt_by_worker_index [vlib_worker_index])
916 return handoff_queue_elt_by_worker_index [vlib_worker_index];
920 handoff_queue_elt_by_worker_index [vlib_worker_index] = elt;
927 u32 vlib_worker_index,
933 fq = handoff_queue_by_worker_index [vlib_worker_index];
944 handoff_queue_by_worker_index [vlib_worker_index] = fq;
975 #define MPLS_BOTTOM_OF_STACK_BIT_MASK 0x00000100U 976 #define MPLS_LABEL_MASK 0xFFFFF000U 987 goto bottom_lbl_found;
993 goto bottom_lbl_found;
998 goto bottom_lbl_found;
1003 goto bottom_lbl_found;
1008 goto bottom_lbl_found;
1019 ip_ver = (*((
u8 *)m) >> 4);
1028 hash_key = (m-1)->label_exp_s_ttl & clib_net_to_host_u32(
MPLS_LABEL_MASK);
1042 }
else if (h0->
type == clib_host_to_net_u16(ETHERNET_TYPE_IP6)) {
1044 }
else if (h0->
type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1046 }
else if ((h0->
type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ||
1047 (h0->
type == clib_host_to_net_u16(ETHERNET_TYPE_DOT1AD))) {
1050 outer = (outer->
type == clib_host_to_net_u16(ETHERNET_TYPE_VLAN)) ?
1052 if (
PREDICT_TRUE(outer->
type) == clib_host_to_net_u16(ETHERNET_TYPE_IP4)) {
1054 }
else if (outer->
type == clib_host_to_net_u16 (ETHERNET_TYPE_IP6)) {
1056 }
else if (outer->
type == clib_host_to_net_u16(ETHERNET_TYPE_MPLS_UNICAST)) {
1059 hash_key = outer->
type;
1096 u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1097 u32 next_worker_index = 0;
1098 u32 current_worker_index = ~0;
1100 u32 num_workers = 0;
1101 u32 num_devices = 0;
1105 u32 first_worker_index = 0;
1106 u32 buffer_flags_template;
1119 num_workers = tr->
count;
1133 instances = tr->
count;
1138 ASSERT (instance_id < instances);
1142 first_worker_index + num_workers - 1);
1145 first_worker_index + num_workers - 1,
1155 uword n_rx_bytes = 0;
1156 u32 n_trace, trace_cnt __attribute__((unused));
1160 u8 efd_discard_burst;
1175 fprintf(stderr,
"i/o thread %d (cpu %d) takes port %d\n",
1183 for (i = 0; i <
vec_len (my_devices); i++)
1204 trace_cnt = n_trace = 0;
1226 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1227 rte_pktmbuf_free (xd->
rx_vectors[queue_id][mb_index]);
1232 efd_discard_burst = 0;
1245 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1246 rte_pktmbuf_free(xd->
rx_vectors[queue_id][mb_index]);
1250 DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1267 efd_discard_burst = 1;
1277 while (n_buffers > 0)
1285 struct rte_mbuf *mb = xd->
rx_vectors[queue_id][mb_index];
1286 struct rte_mbuf *mb_seg = mb->next;
1290 struct rte_mbuf *pfmb = xd->
rx_vectors[queue_id][mb_index+2];
1297 b0 = vlib_buffer_from_rte_mbuf(mb);
1305 rte_pktmbuf_free(mb);
1320 struct rte_mbuf *pfmb = mb->next;
1333 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS 1339 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1357 b0->
flags = buffer_flags_template;
1365 n_rx_bytes += mb->pkt_len;
1368 while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1372 b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
1383 b_seg->
current_data = (mb_seg->buf_addr + mb_seg->data_off) - (
void *)b_seg->
data;
1393 mb_seg = mb_seg->next;
1407 next_worker_index = first_worker_index;
1420 next_worker_index += hash & (num_workers - 1);
1422 next_worker_index += hash % num_workers;
1434 congested_handoff_queue_by_worker_index);
1445 rte_pktmbuf_free(mb);
1453 if (next_worker_index != current_worker_index)
1460 handoff_queue_elt_by_worker_index);
1464 current_worker_index = next_worker_index;
1468 to_next_worker[0] = bi0;
1470 n_left_to_next_worker--;
1472 if (n_left_to_next_worker == 0)
1476 current_worker_index = ~0;
1477 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1498 mb_index, n_rx_bytes);
1508 for (i = 0; i <
vec_len (handoff_queue_elt_by_worker_index); i++)
1510 if (handoff_queue_elt_by_worker_index[i])
1512 hf = handoff_queue_elt_by_worker_index[
i];
1520 handoff_queue_elt_by_worker_index[
i] = 0;
1528 current_worker_index = ~0;
1546 uword n_rx_packets = 0;
1551 u32 n_left_to_next_worker = 0, * to_next_worker = 0;
1552 u32 next_worker_index = 0;
1553 u32 current_worker_index = ~0;
1555 static int num_workers_set;
1556 static u32 num_workers;
1559 static u32 first_worker_index;
1560 u32 buffer_flags_template;
1571 num_workers = tr->
count;
1574 num_workers_set = 1;
1582 first_worker_index + num_workers - 1,
1595 uword n_rx_bytes = 0;
1596 u32 n_trace, trace_cnt __attribute__((unused));
1600 u8 efd_discard_burst = 0;
1630 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1631 rte_pktmbuf_free (xd->
rx_vectors[queue_id][mb_index]);
1646 for (mb_index = 0; mb_index < n_buffers; mb_index++)
1647 rte_pktmbuf_free(xd->
rx_vectors[queue_id][mb_index]);
1651 DPDK_ERROR_VLAN_EFD_DROP_PKTS,
1668 efd_discard_burst = 1;
1678 while (n_buffers > 0)
1686 struct rte_mbuf *mb = xd->
rx_vectors[queue_id][mb_index];
1687 struct rte_mbuf *mb_seg = mb->next;
1691 struct rte_mbuf *pfmb = xd->
rx_vectors[queue_id][mb_index+2];
1698 b0 = vlib_buffer_from_rte_mbuf(mb);
1706 rte_pktmbuf_free(mb);
1721 struct rte_mbuf *pfmb = mb->next;
1734 #ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS 1740 mb->ol_flags &= PKT_EXT_RX_CLR_TX_FLAGS_MASK;
1758 b0->
flags = buffer_flags_template;
1766 n_rx_bytes += mb->pkt_len;
1769 while ((mb->nb_segs > 1) && (nb_seg < mb->nb_segs))
1773 b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
1784 b_seg->
current_data = (mb_seg->buf_addr + mb_seg->data_off) - (
void *)b_seg->
data;
1794 mb_seg = mb_seg->next;
1808 next_worker_index = first_worker_index;
1821 next_worker_index += hash & (num_workers - 1);
1823 next_worker_index += hash % num_workers;
1835 congested_handoff_queue_by_worker_index);
1846 rte_pktmbuf_free(mb);
1854 if (next_worker_index != current_worker_index)
1861 handoff_queue_elt_by_worker_index);
1865 current_worker_index = next_worker_index;
1869 to_next_worker[0] = bi0;
1871 n_left_to_next_worker--;
1873 if (n_left_to_next_worker == 0)
1877 current_worker_index = ~0;
1878 handoff_queue_elt_by_worker_index[next_worker_index] = 0;
1899 mb_index, n_rx_bytes);
1903 n_rx_packets += mb_index;
1910 for (i = 0; i <
vec_len (handoff_queue_elt_by_worker_index); i++)
1912 if (handoff_queue_elt_by_worker_index[i])
1914 hf = handoff_queue_elt_by_worker_index[
i];
1922 handoff_queue_elt_by_worker_index[
i] = 0;
1930 current_worker_index = ~0;
1931 return n_rx_packets;
1937 .name =
"dpdk-io-input",
1940 .state = VLIB_NODE_STATE_DISABLED,
1968 for (ix = 0; ix < 8; ix++) {
1971 (*bitmap) |= (1 << ix);
1979 u32 mpls_exp,
u32 mpls_op,
1980 u32 vlan_cos,
u32 vlan_op)
#define VIRL_SPEED_LIMIT()
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
void dpdk_rx_trace(dpdk_main_t *dm, vlib_node_runtime_t *node, dpdk_device_t *xd, u16 queue_id, u32 *buffers, uword n_buffers)
always_inline vlib_thread_main_t * vlib_get_thread_main()
sll srl srl sll sra u16x4 i
uword dpdk_input_rss(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static u64 ipv4_get_key(ip4_header_t *ip)
void dpdk_efd_update_counters(dpdk_device_t *xd, u32 n_buffers, u16 enabled)
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
static u8 * format_handoff_dispatch_trace(u8 *s, va_list *args)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
static void vlib_worker_thread_barrier_check(void)
void dpdk_set_next_node(dpdk_rx_next_t next, char *name)
#define foreach_dpdk_error
void(* dpdk_io_thread_callback_t)(vlib_main_t *vm)
always_inline void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
static u64 clib_xxhash(u64 key)
struct _vlib_node_registration vlib_node_registration_t
u32 per_interface_next_index
static u64 mpls_get_key(mpls_unicast_header_t *m)
vlib_node_registration_t dpdk_io_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_io_input_node)
static char * dpdk_error_strings[]
u32 buffer_index[VLIB_FRAME_SIZE]
always_inline vlib_main_t * vlib_get_main(void)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
vnet_main_t * vnet_get_main(void)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
always_inline u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static u64 eth_get_key(ethernet_header_t *h0)
#define VLIB_INIT_FUNCTION(x)
u32 dpdk_get_handoff_node_index(void)
void efd_config(u32 enabled, u32 ip_prec, u32 ip_op, u32 mpls_exp, u32 mpls_op, u32 vlan_cos, u32 vlan_op)
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
clib_error_t * handoff_dispatch_init(vlib_main_t *vm)
always_inline uword rotate_left(uword x, uword i)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
#define clib_warning(format, args...)
vlib_frame_queue_elt_t * elts
always_inline vlib_buffer_free_list_t * vlib_buffer_get_free_list(vlib_main_t *vm, u32 free_list_index)
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
#define VMWARE_LENGTH_BUG_WORKAROUND
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
static u64 ipv6_get_key(ip6_header_t *ip)
#define VLIB_BUFFER_NEXT_PRESENT
u16 consec_full_frames_hi_thresh
#define VLIB_EFD_DISCARD_ENABLED
format_function_t format_dpdk_rx_dma_trace
u16 current_length
Nbytes between current data and the end of this buffer.
dpdk_device_and_queue_t ** devices_by_cpu
static_always_inline void increment_efd_drop_counter(vlib_main_t *vm, u32 counter_index, u32 count)
#define VLIB_NODE_FLAG_IS_HANDOFF
u32 consec_full_frames_cnt
static char * handoff_dispatch_error_strings[]
#define DPDK_EFD_DISCARD_ENABLED
uword os_get_cpu_number(void)
static void vlib_put_handoff_queue_elt(vlib_frame_queue_elt_t *hf)
unsigned short int uint16_t
#define DPDK_EFD_MONITOR_ENABLED
static u32 dpdk_device_input(dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 cpu_index, u16 queue_id)
vlib_frame_queue_t ** vlib_frame_queues
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
void clib_time_init(clib_time_t *c)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static uword handoff_dispatch_node_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
#define CLIB_PREFETCH(addr, size, type)
static uword dpdk_io_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
#define foreach_handoff_dispatch_error
struct rte_mbuf *** rx_vectors
#define clib_memcpy(a, b, c)
#define EFD_OPERATION_GREATER_OR_EQUAL
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
always_inline uword is_pow2(uword x)
u32 next_buffer
Next buffer for this linked-list of buffers.
u32 clone_count
Specifies whether this buffer should be reinitialized when freed.
vlib_trace_main_t trace_main
uword * thread_registrations_by_name
void dpdk_io_thread(vlib_worker_thread_t *w, u32 instances, u32 instance_id, char *worker_name, dpdk_io_thread_callback_t callback)
#define VLIB_BUFFER_IS_TRACED
always_inline void vlib_buffer_init_for_free_list(vlib_buffer_t *_dst, vlib_buffer_free_list_t *fl)
vlib_node_registration_t handoff_dispatch_node
(constructor) VLIB_REGISTER_NODE (handoff_dispatch_node)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
handoff_dispatch_main_t handoff_dispatch_main
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define MPLS_BOTTOM_OF_STACK_BIT_MASK
dpdk_efd_agent_t efd_agent
static vlib_frame_queue_elt_t * dpdk_get_handoff_queue_elt(u32 vlib_worker_index, vlib_frame_queue_elt_t **handoff_queue_elt_by_worker_index)
#define DPDK_EFD_DROPALL_ENABLED
always_inline void vlib_increment_main_loop_counter(vlib_main_t *vm)
static u32 dpdk_rx_burst(dpdk_main_t *dm, dpdk_device_t *xd, u16 queue_id)
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define hash_get_mem(h, key)
u32 buffer_flags_template
vlib_frame_queue_elt_t * vlib_get_handoff_queue_elt(u32 vlib_worker_index)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define VLIB_REGISTER_NODE(x,...)
always_inline vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
volatile u32 io_thread_release
#define vec_foreach(var, vec)
Vector iterator.
static vlib_frame_queue_t * is_vlib_handoff_queue_congested(u32 vlib_worker_index, u32 queue_hi_thresh, vlib_frame_queue_t **handoff_queue_by_worker_index)
#define EFD_OPERATION_LESS_THAN
#define CLIB_MEMORY_BARRIER()
always_inline void dpdk_rx_next_and_error_from_mb_flags_x1(dpdk_device_t *xd, struct rte_mbuf *mb, vlib_buffer_t *b0, u8 *next0, u8 *error0)
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
u32 is_efd_discardable(vlib_thread_main_t *tm, vlib_buffer_t *b0, struct rte_mbuf *mb)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
always_inline u64 clib_cpu_time_now(void)
static uword dpdk_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
vlib_thread_registration_t * registration
always_inline void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
always_inline u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
void set_efd_bitmap(u8 *bitmap, u32 value, u32 op)