|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
21 #include <sys/ioctl.h>
33 #define foreach_memif_input_error \
34 _ (BUFFER_ALLOC_FAIL, buffer_alloc, ERROR, "buffer allocation failed") \
35 _ (BAD_DESC, bad_desc, ERROR, "bad descriptor") \
36 _ (NOT_IP, not_ip, INFO, "not ip packet")
40 #define _(f, n, s, d) MEMIF_INPUT_ERROR_##f,
47 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
59 static __clib_unused
u8 *
67 s =
format (s,
"memif: hw_if_index %d next-index %d",
85 b->
error =
node->errors[MEMIF_INPUT_ERROR_NOT_IP];
108 u16 buffer_offset,
u16 buffer_vec_index)
133 seg->
flags |= VLIB_BUFFER_NEXT_PRESENT;
156 u32 n_rx_packets = 0, n_rx_bytes = 0;
164 u16 cur_slot, last_slot, ring_size, n_slots,
mask;
170 void *last_region_shm = 0;
171 void *last_region_max = 0;
176 mask = ring_size - 1;
187 last_slot = __atomic_load_n (&ring->
head, __ATOMIC_ACQUIRE);
189 last_slot = __atomic_load_n (&ring->
tail, __ATOMIC_ACQUIRE);
191 if (cur_slot == last_slot)
193 n_slots = last_slot - cur_slot;
198 u32 dst_off, src_off, n_bytes_left;
207 dst_off = start_offset;
211 s0 = cur_slot &
mask;
212 d0 = &ring->
desc[s0];
213 n_bytes_left = d0->
length;
229 mb0 = last_region_shm + d0->
offset;
236 u32 dst_free = buffer_size - dst_off;
240 dst_free = buffer_size;
243 u32 bytes_to_copy =
clib_min (dst_free, n_bytes_left);
246 n_bytes_left -= bytes_to_copy;
247 src_off += bytes_to_copy;
248 dst_off += bytes_to_copy;
270 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
313 __atomic_store_n (&ring->
tail, cur_slot, __ATOMIC_RELEASE);
358 u32 n_from = n_rx_packets;
486 n_left_to_next -= n_rx_packets;
505 n_slots = ring_size - head + mq->
last_tail;
514 __atomic_store_n (&ring->
head, head, __ATOMIC_RELEASE);
531 u32 n_rx_packets = 0, n_rx_bytes = 0;
532 u32 *to_next = 0, *buffers;
533 u32 bi0, bi1, bi2, bi3;
540 u16 cur_slot, last_slot, ring_size, n_slots,
mask, head;
549 mask = ring_size - 1;
560 last_slot = __atomic_load_n (&ring->
tail, __ATOMIC_ACQUIRE);
561 if (cur_slot == last_slot)
563 n_slots = last_slot - cur_slot;
572 s0 = cur_slot &
mask;
574 ptd->
buffers[n_rx_packets++] = bi0;
577 d0 = &ring->
desc[s0];
587 hb->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
590 s0 = cur_slot &
mask;
591 d0 = &ring->
desc[s0];
596 b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
615 n_from = n_rx_packets;
621 u32 next0, next1, next2, next3;
624 while (n_from >= 8 && n_left_to_next >= 4)
636 to_next[0] = bi0 = buffers[0];
637 to_next[1] = bi1 = buffers[1];
638 to_next[2] = bi2 = buffers[2];
639 to_next[3] = bi3 = buffers[3];
703 n_left_to_next, bi0, bi1, bi2, bi3,
704 next0, next1, next2, next3);
709 while (n_from && n_left_to_next)
712 to_next[0] = bi0 = buffers[0];
744 n_left_to_next, bi0, next0);
762 n_slots = ring_size - head + mq->
last_tail;
781 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
823 __atomic_store_n (&ring->
head, head, __ATOMIC_RELEASE);
847 if ((mif->
flags & MEMIF_IF_FLAG_ADMIN_UP) &&
848 (mif->
flags & MEMIF_IF_FLAG_CONNECTED))
850 if (mif->
flags & MEMIF_IF_FLAG_ZERO_COPY)
859 else if (mif->
flags & MEMIF_IF_FLAG_IS_SLAVE)
885 .name =
"memif-input",
887 .sibling_of =
"device-input",
890 .state = VLIB_NODE_STATE_INTERRUPT,
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
static_always_inline void memif_add_to_chain(vlib_main_t *vm, vlib_buffer_t *b, u32 *buffers, u32 buffer_size)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
nat44_ei_hairpin_src_next_t next_index
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
u32 per_interface_next_index
memif_region_size_t region_size
@ VNET_DEVICE_INPUT_NEXT_DROP
vlib_main_t vlib_node_runtime_t * node
#define foreach_memif_input_error
@ MEMIF_INTERFACE_MODE_IP
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
memif_per_thread_data_t * per_thread_data
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
vl_api_tunnel_mode_t mode
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
memif_packet_op_t packet_ops[MEMIF_RX_VECTOR_SZ]
memif_queue_t * rx_queues
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
u16 first_buffer_vec_index
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static_always_inline void memif_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, vlib_buffer_t *b, u32 next, u16 qid, uword *n_tracep)
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
memif_region_index_t region
memif_log2_ring_size_t log2_ring_size
static void vlib_frame_no_append(vlib_frame_t *f)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
struct memif_if_t::@729 run
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define VLIB_NODE_FN(node)
memif_interface_mode_t mode
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
@ VNET_DEVICE_INPUT_NEXT_IP6_INPUT
struct clib_bihash_value offset
template key/value backing page structure
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
#define static_always_inline
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
#define MEMIF_DESC_FLAG_NEXT
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
static vlib_error_desc_t memif_input_error_counters[]
volatile u8 ref_count
Reference count for this buffer.
vlib_buffer_t buffer_template
@ VNET_INTERFACE_COUNTER_RX
uint16_t memif_region_index_t
#define CLIB_CACHE_LINE_BYTES
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
memif_copy_op_t * copy_ops
struct _vlib_node_registration vlib_node_registration_t
vlib_combined_counter_main_t * combined_sw_if_counters
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
static_always_inline uword memif_device_input_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, u16 qid, memif_interface_mode_t mode)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
@ VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
template key/value backing page structure
description fragment has unexpected format
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
vlib_put_next_frame(vm, node, next_index, 0)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
static_always_inline void clib_prefetch_load(void *p)
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
#define MEMIF_RX_VECTOR_SZ
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
@ MEMIF_INTERFACE_MODE_ETHERNET
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
u16 nexts[VLIB_FRAME_SIZE]
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
memif_region_offset_t offset
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
vl_api_fib_path_type_t type
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vnet_interface_main_t interface_main
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)