|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
75 if ((bd[
i] & pm[
i]) != pd[
i])
99 ASSERT (v0 >= v_min && v0 <= v_max);
106 if (is_net_byte_order)
107 v0 = clib_host_to_net_u16 (v0);
112 if (is_net_byte_order)
113 v0 = clib_host_to_net_u32 (v0);
118 if (is_net_byte_order)
119 v0 = clib_host_to_net_u64 (v0);
128 u32 n_bits,
u32 is_net_byte_order,
u32 is_increment)
130 ASSERT (v0 >= v_min && v0 <= v_max);
131 ASSERT (v1 >= v_min && v1 <= (v_max + is_increment));
139 if (is_net_byte_order)
141 v0 = clib_host_to_net_u16 (v0);
142 v1 = clib_host_to_net_u16 (v1);
149 if (is_net_byte_order)
151 v0 = clib_host_to_net_u32 (v0);
152 v1 = clib_host_to_net_u32 (v1);
159 if (is_net_byte_order)
161 v0 = clib_host_to_net_u64 (v0);
162 v1 = clib_host_to_net_u64 (v1);
175 u32 byte_offset,
u32 is_net_byte_order,
u64 v_min,
u64 v_max)
191 a0 = (
void *) b0 + byte_offset;
192 a1 = (
void *) b1 + byte_offset;
193 CLIB_PREFETCH ((
void *) b2 + byte_offset,
sizeof (v_min), WRITE);
194 CLIB_PREFETCH ((
void *) b3 + byte_offset,
sizeof (v_min), WRITE);
196 set_2 (a0, a1, v_min, v_min, v_min, v_max, n_bits, is_net_byte_order,
212 a0 = (
void *) b0 + byte_offset;
214 set_1 (a0, v_min, v_min, v_max, n_bits, is_net_byte_order);
227 u32 is_net_byte_order,
233 ASSERT (v >= v_min && v <= v_max);
248 a0 = (
void *) b0 + byte_offset;
249 a1 = (
void *) b1 + byte_offset;
250 CLIB_PREFETCH ((
void *) b2 + byte_offset,
sizeof (v_min), WRITE);
251 CLIB_PREFETCH ((
void *) b3 + byte_offset,
sizeof (v_min), WRITE);
255 v = v > v_max ? v_min : v;
257 v_old + 0, v_old + 1, v_min, v_max, n_bits, is_net_byte_order,
261 sum += 2 * v_old + 1;
266 sum -= 2 * v_old + 1;
269 set_1 (a0, v + 0, v_min, v_max, n_bits, is_net_byte_order);
274 v = v > v_max ? v_min : v;
275 set_1 (a1, v + 0, v_min, v_max, n_bits, is_net_byte_order);
295 a0 = (
void *) b0 + byte_offset;
301 v = v > v_max ? v_min : v;
303 ASSERT (v_old >= v_min && v_old <= v_max);
304 set_1 (a0, v_old, v_min, v_max, n_bits, is_net_byte_order);
322 u32 is_net_byte_order,
326 u64 v_diff = v_max - v_min + 1;
350 a0 = (
void *) b0 + byte_offset;
351 a1 = (
void *) b1 + byte_offset;
352 CLIB_PREFETCH ((
void *) b2 + byte_offset,
sizeof (v_min), WRITE);
353 CLIB_PREFETCH ((
void *) b3 + byte_offset,
sizeof (v_min), WRITE);
360 u##n * r = random_data; \
363 random_data = r + 2; \
380 v0 = v0 > v_max ? v0 - v_diff : v0;
381 v1 = v1 > v_max ? v1 - v_diff : v1;
382 v0 = v0 > v_max ? v0 - v_diff : v0;
383 v1 = v1 > v_max ? v1 - v_diff : v1;
388 set_2 (a0, a1, v0, v1, v_min, v_max, n_bits, is_net_byte_order,
405 a0 = (
void *) b0 + byte_offset;
412 u##n * r = random_data; \
414 random_data = r + 1; \
430 v0 = v0 > v_max ? v0 - v_diff : v0;
431 v0 = v0 > v_max ? v0 - v_diff : v0;
436 set_1 (a0, v0, v_min, v_max, n_bits, is_net_byte_order);
446 clib_mem_unaligned (a##i, t) = \
447 clib_host_to_net_##t ((clib_net_to_host_mem_##t (a##i) &~ mask) \
456 ASSERT (v0 >= v_min && v0 <= v_max);
457 if (max_bits ==
BITS (
u8))
458 ((
u8 *) a0)[0] = (((
u8 *) a0)[0] & ~
mask) | (v0 << shift);
460 else if (max_bits ==
BITS (
u16))
464 else if (max_bits ==
BITS (
u32))
468 else if (max_bits ==
BITS (
u64))
480 ASSERT (v0 >= v_min && v0 <= v_max);
481 ASSERT (v1 >= v_min && v1 <= v_max + is_increment);
482 if (max_bits ==
BITS (
u8))
484 ((
u8 *) a0)[0] = (((
u8 *) a0)[0] & ~
mask) | (v0 << shift);
485 ((
u8 *) a1)[0] = (((
u8 *) a1)[0] & ~
mask) | (v1 << shift);
488 else if (max_bits ==
BITS (
u16))
493 else if (max_bits ==
BITS (
u32))
498 else if (max_bits ==
BITS (
u64))
530 a0 = (
void *) b0 + byte_offset;
531 a1 = (
void *) b1 + byte_offset;
532 CLIB_PREFETCH ((
void *) b2 + byte_offset,
sizeof (v_min), WRITE);
533 CLIB_PREFETCH ((
void *) b3 + byte_offset,
sizeof (v_min), WRITE);
536 v_min, v_min, v_min, v_max, max_bits, n_bits,
mask, shift,
552 a0 = (
void *) b0 + byte_offset;
554 setbits_1 (a0, v_min, v_min, v_max, max_bits, n_bits,
mask, shift);
571 ASSERT (v >= v_min && v <= v_max);
586 a0 = (
void *) b0 + byte_offset;
587 a1 = (
void *) b1 + byte_offset;
588 CLIB_PREFETCH ((
void *) b2 + byte_offset,
sizeof (v_min), WRITE);
589 CLIB_PREFETCH ((
void *) b3 + byte_offset,
sizeof (v_min), WRITE);
593 v = v > v_max ? v_min : v;
595 v_old + 0, v_old + 1,
596 v_min, v_max, max_bits, n_bits,
mask, shift,
602 setbits_1 (a0, v + 0, v_min, v_max, max_bits, n_bits,
mask, shift);
605 v = v > v_max ? v_min : v;
606 setbits_1 (a1, v + 0, v_min, v_max, max_bits, n_bits,
mask, shift);
623 a0 = (
void *) b0 + byte_offset;
627 v = v > v_max ? v_min : v;
629 ASSERT (v_old >= v_min && v_old <= v_max);
630 setbits_1 (a0, v_old, v_min, v_max, max_bits, n_bits,
mask, shift);
648 u64 v_diff = v_max - v_min + 1;
670 a0 = (
void *) b0 + byte_offset;
671 a1 = (
void *) b1 + byte_offset;
672 CLIB_PREFETCH ((
void *) b2 + byte_offset,
sizeof (v_min), WRITE);
673 CLIB_PREFETCH ((
void *) b3 + byte_offset,
sizeof (v_min), WRITE);
680 u##n * r = random_data; \
683 random_data = r + 2; \
700 v0 = v0 > v_max ? v0 - v_diff : v0;
701 v1 = v1 > v_max ? v1 - v_diff : v1;
702 v0 = v0 > v_max ? v0 - v_diff : v0;
703 v1 = v1 > v_max ? v1 - v_diff : v1;
705 setbits_2 (a0, a1, v0, v1, v_min, v_max, max_bits, n_bits,
mask, shift,
722 a0 = (
void *) b0 + byte_offset;
729 u##n * r = random_data; \
731 random_data = r + 1; \
747 v0 = v0 > v_max ? v0 - v_diff : v0;
748 v0 = v0 > v_max ? v0 - v_diff : v0;
750 setbits_1 (a0, v0, v_min, v_max, max_bits, n_bits,
mask, shift);
764 u32 max_bits, l0, l1, h1, start_bit;
773 start_bit = l0 *
BITS (
u8);
775 max_bits = hi_bit - start_bit;
780 if (edit_type == PG_EDIT_INCREMENT) \
781 v = do_set_increment (pg, s, buffers, n_buffers, \
788 else if (edit_type == PG_EDIT_RANDOM) \
789 do_set_random (pg, s, buffers, n_buffers, \
796 do_set_fixed (pg, s, buffers, n_buffers, \
803 if (l1 == 0 && h1 == 0)
819 u32 n_bits = max_bits;
826 mask <<= max_bits - n_bits;
827 shift += max_bits - n_bits;
833 if (edit_type == PG_EDIT_INCREMENT) \
834 v = do_setbits_increment (pg, s, buffers, n_buffers, \
835 BITS (u##n), n_bits, \
836 l0, v_min, v_max, v, \
838 else if (edit_type == PG_EDIT_RANDOM) \
839 do_setbits_random (pg, s, buffers, n_buffers, \
840 BITS (u##n), n_bits, \
844 do_setbits_fixed (pg, s, buffers, n_buffers, \
845 BITS (u##n), n_bits, \
867 u64 v_min, v_max, length_sum;
923 static u32 *unused_buffers = 0;
943 if (n_bytes_left > 0)
944 b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
946 b->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
959 ASSERT (n_bytes_left == 0);
965 if (
vec_len (unused_buffers) > 0)
968 _vec_len (unused_buffers) = 0;
993 lo_bit = hi_bit - e->
n_bits;
1037 ni0 = next_buffers[0];
1038 ni1 = next_buffers[1];
1043 b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
1044 b1->
flags |= VLIB_BUFFER_NEXT_PRESENT;
1059 ni0 = next_buffers[0];
1064 b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
1156 u32 * buffers,
u32 * next_buffers,
u32 n_alloc)
1165 if (n_allocated == 0)
1172 n_alloc = n_allocated;
1185 if (is_start_of_packet)
1202 u32 buffer_alloc_request = 0;
1203 u32 buffer_alloc_result;
1204 u32 current_buffer_index;
1226 buffer_alloc_request += (
vec_len (d0) + (buf_sz - 1)) / buf_sz;
1228 i = ((
i + 1) == l) ? 0 :
i + 1;
1232 ASSERT (buffer_alloc_request > 0);
1237 if (buffer_alloc_result < buffer_alloc_request)
1239 clib_warning (
"alloc failure, got %d not %d", buffer_alloc_result,
1240 buffer_alloc_request);
1249 current_buffer_index = 0;
1257 u32 bytes_to_copy, bytes_this_chunk;
1268 while (bytes_to_copy)
1270 bytes_this_chunk =
clib_min (bytes_to_copy, buf_sz);
1281 not_last = bytes_this_chunk < bytes_to_copy;
1285 b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
1288 bytes_to_copy -= bytes_this_chunk;
1289 data_offset += bytes_this_chunk;
1290 current_buffer_index++;
1293 i = ((
i + 1) == l) ? 0 :
i + 1;
1300 for (
i = 0;
i < n_alloc;
i++)
1320 u32 *tail, *start, *
end, *last_tail, *last_start;
1360 last_tail = last_start = 0;
1375 if (tail + n_alloc <=
end)
1385 if (n_added == n && n_alloc > n_added)
1388 (pg, s, bi, start, last_start, n_alloc - n_added);
1401 return n_in_fifo + n_added;
1464 u32 n_trace0 = 0, n_trace1 = 0;
1501 sizeof (b0[0]) -
sizeof (b0->
pre_data));
1503 sizeof (b1[0]) -
sizeof (b1->
pre_data));
1531 sizeof (b0[0]) -
sizeof (b0->
pre_data));
1536 return n_trace - n_trace0 - n_trace1;
1541 u32 buffer_oflags,
int gso_enabled,
u32 gso_size)
1551 u16 ethertype = clib_net_to_host_u16 (eh->
type);
1558 ethertype = clib_net_to_host_u16 (vlan->
type);
1559 l2hdr_sz +=
sizeof (*vlan);
1560 if (ethertype == ETHERNET_TYPE_VLAN)
1563 ethertype = clib_net_to_host_u16 (vlan->
type);
1564 l2hdr_sz +=
sizeof (*vlan);
1576 l4_proto =
ip4->protocol;
1578 (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
1579 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1580 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1581 if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
1582 oflags |= VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
1584 else if (
PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
1590 l4_proto =
ip6->protocol;
1592 (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
1593 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
1594 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
1597 if (l4_proto == IP_PROTOCOL_TCP)
1599 if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
1600 oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
1603 if (gso_enabled && (b0->
flags & VLIB_BUFFER_NEXT_PRESENT))
1605 b0->
flags |= VNET_BUFFER_F_GSO;
1613 else if (l4_proto == IP_PROTOCOL_UDP)
1615 if (buffer_oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
1616 oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
1630 u32 *to_next, n_this_frame,
n_left, n_trace, n_packets_in_fifo;
1631 uword n_packets_generated;
1638 u32 current_config_index = ~(
u32) 0;
1646 n_packets_in_fifo =
pg_stream_fill (pg, s, n_packets_to_generate);
1647 n_packets_to_generate =
clib_min (n_packets_in_fifo, n_packets_to_generate);
1648 n_packets_generated = 0;
1653 current_config_index =
1662 while (n_packets_to_generate > 0)
1684 n_this_frame = n_packets_to_generate;
1685 if (n_this_frame >
n_left)
1692 if (head + n_this_frame <=
end)
1711 if (current_config_index != ~(
u32) 0)
1712 for (
i = 0;
i < n_this_frame;
i++)
1717 vnet_buffer (
b)->feature_arc_index = feature_arc_index;
1732 n_this_frame, n_trace);
1735 n_packets_to_generate -= n_this_frame;
1736 n_packets_generated += n_this_frame;
1743 for (
i = 0;
i < n_this_frame;
i++)
1753 return n_packets_generated;
1809 uword n_packets = 0;
1810 u32 worker_index = 0;
1830 .sibling_of =
"device-input",
1836 .state = VLIB_NODE_STATE_DISABLED,
1894 return (
frame->n_vectors);
1899 .name =
"pg-input-mac-filter",
1900 .vector_size =
sizeof (
u32),
1908 .arc_name =
"device-input",
1909 .node_name =
"pg-input-mac-filter",
1930 else if (
unformat (line_input,
"%U",
1943 "pg-input-mac-filter",
1951 .path =
"packet-generator mac-filter",
1952 .short_help =
"packet-generator mac-filter <INTERFACE> <on|off>",
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
mac_address_t * allowed_mcast_macs
pg_edit_group_t * edit_groups
vnet_interface_main_t * im
pg_edit_type_t packet_size_edit_type
static int tcp_header_bytes(tcp_header_t *t)
static u32 vlib_num_workers()
clib_random_buffer_t random_buffer
static uword clib_fifo_advance_head(void *v, uword n_elts)
vnet_config_main_t config_main
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE
nat44_ei_hairpin_src_next_t next_index
#define vec_is_member(v, e)
True if given pointer is within given vector.
vlib_buffer_copy_indices(to, tmp, n_free)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline int ethernet_frame_is_tagged(u16 type)
static_always_inline void clib_memset_u16(void *p, u16 val, uword count)
static uword clib_fifo_elts(void *v)
struct _tcp_header tcp_header_t
u8 * fixed_packet_data_mask
vlib_get_buffers(vm, from, b, n_left_from)
#define vec_end(v)
End (last data address) of vector.
vlib_main_t vlib_node_runtime_t * node
static_always_inline void vnet_buffer_offload_flags_set(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
u32 * config_index_by_sw_if_index
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
u32 ** replay_buffers_by_thread
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
static_always_inline int mac_address_cmp(const mac_address_t *a, const mac_address_t *b)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
gro_flow_table_t * flow_table
void pg_stream_enable_disable(pg_main_t *pg, pg_stream_t *s, int is_enable)
static void vlib_frame_no_append(vlib_frame_t *f)
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
#define vec_elt(v, i)
Get vector value at index i.
#define clib_fifo_resize(f, n_elts)
#define CLIB_PREFETCH(addr, size, type)
#define STRUCT_OFFSET_OF(t, f)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
format_function_t * format_buffer
#define clib_error_create(args...)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static uword clib_fifo_free_elts(void *v)
#define VLIB_NODE_FN(node)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
pg_buffer_index_t * buffer_indices
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline void vnet_feature_next_u16(u16 *next0, vlib_buffer_t *b0)
u32 sw_if_index[VLIB_N_RX_TX]
static u32 vlib_get_current_worker_index()
vnet_main_t * vnet_get_main(void)
static_always_inline uword vlib_get_thread_index(void)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
f64 end
end of the time range
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
#define clib_mem_unaligned(pointer, type)
f64 rate_packets_per_second
#define clib_fifo_add1(f, e)
static vlib_node_t * vlib_get_node(vlib_main_t *vm, u32 i)
Get vlib node by index.
sll srl srl sll sra u16x4 i
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
static u64 pg_edit_get_value(pg_edit_t *e, int hi_or_lo)
vnet_feature_config_main_t * cm
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
#define VLIB_CLI_COMMAND(x,...)
#define clib_fifo_head(v)
vnet_feature_main_t feature_main
@ VNET_INTERFACE_COUNTER_RX
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
struct _vlib_node_registration vlib_node_registration_t
vlib_combined_counter_main_t * combined_sw_if_counters
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
uword * if_id_by_sw_if_index
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
#define clib_fifo_advance_tail(f, n_elts)
pg_interface_t * interfaces
static_always_inline void vnet_gro_flow_table_schedule_node_on_dispatcher(vlib_main_t *vm, gro_flow_table_t *flow_table)
unformat_function_t unformat_vnet_sw_interface
description fragment has unexpected format
static uword vlib_buffer_index_length_in_chain(vlib_main_t *vm, u32 bi)
Get length in bytes of the buffer index buffer chain.
vlib_put_next_frame(vm, node, next_index, 0)
static uword ethernet_address_cast(const u8 *a)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
static void vlib_buffer_free_no_next(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers, does not free the buffer chain for each buffer.
#define vec_foreach(var, vec)
Vector iterator.
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
u32 last_increment_packet_size
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static vlib_main_t * vlib_get_main(void)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
pg_edit_t * non_fixed_edits
u8 ** replay_packet_templates
#define clib_warning(format, args...)
u16 nexts[VLIB_FRAME_SIZE]
static int ip4_header_bytes(const ip4_header_t *i)
static uword max_pow2(uword x)
static f64 vlib_time_now(vlib_main_t *vm)
vnet_feature_config_main_t * feature_config_mains
feature config main objects
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
#define clib_bitmap_foreach(i, ai)
Macro to iterate across set bits in a bitmap.
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
void(* edit_function)(struct pg_main_t *pg, struct pg_stream_t *s, struct pg_edit_group_t *g, u32 *buffers, u32 n_buffers)
vl_api_interface_index_t sw_if_index
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
u8 device_input_feature_arc_index
Feature arc index for device-input.
#define STRUCT_SIZE_OF(t, f)
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vnet_interface_main_t interface_main
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
u32 current_replay_packet_index
format_function_t format_vnet_buffer
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
static_always_inline void mac_address_from_bytes(mac_address_t *mac, const u8 *bytes)
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)