21 #include <sys/ioctl.h> 33 #define foreach_memif_input_error \ 34 _(BUFFER_ALLOC_FAIL, "buffer allocation failed") \ 35 _(NOT_IP, "not ip packet") 39 #define _(f,s) MEMIF_INPUT_ERROR_##f, 58 static __clib_unused
u8 *
66 s =
format (s,
"memif: hw_if_index %d next-index %d",
109 u16 buffer_offset,
u16 buffer_vec_index)
134 seg->
flags |= VLIB_BUFFER_NEXT_PRESENT;
159 rv = d->
flags & (~valid_flags);
166 mif->
flags |= MEMIF_IF_FLAG_ERROR;
186 u32 n_rx_packets = 0, n_rx_bytes = 0;
187 u32 n_left, *to_next = 0;
188 u32 bi0, bi1, bi2, bi3;
194 u16 cur_slot, last_slot, ring_size, n_slots, mask;
196 u16 n_buffers = 0, n_alloc;
200 void *last_region_shm = 0;
205 mask = ring_size - 1;
218 if (cur_slot == last_slot)
220 n_slots = last_slot - cur_slot;
225 u32 dst_off, src_off, n_bytes_left;
234 dst_off = start_offset;
239 s0 = cur_slot & mask;
240 d0 = &ring->
desc[s0];
241 n_bytes_left = d0->
length;
255 mb0 = last_region_shm + d0->
offset;
259 u32 dst_free = buffer_size - dst_off;
263 dst_free = buffer_size;
266 u32 bytes_to_copy =
clib_min (dst_free, n_bytes_left);
269 n_bytes_left -= bytes_to_copy;
270 src_off += bytes_to_copy;
271 dst_off += bytes_to_copy;
292 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
343 u32 n_from = n_rx_packets;
352 u32 next0, next1, next2, next3;
355 while (n_from >= 8 && n_left_to_next >= 4)
371 to_next[0] = bi0 = ptd->
buffers[fbvi0];
372 to_next[1] = bi1 = ptd->
buffers[fbvi1];
373 to_next[2] = bi2 = ptd->
buffers[fbvi2];
374 to_next[3] = bi3 = ptd->
buffers[fbvi3];
417 next0 = next1 = next2 = next3 = next_index;
444 n_left_to_next, bi0, bi1, bi2, bi3,
445 next0, next1, next2, next3);
451 while (n_from && n_left_to_next)
455 to_next[0] = bi0 = ptd->
buffers[fbvi0];
490 n_left_to_next, bi0, next0);
512 n_slots = ring_size - head + mq->
last_tail;
516 u16 s = head++ & mask;
539 u32 n_rx_packets = 0, n_rx_bytes = 0;
540 u32 *to_next = 0, *buffers;
541 u32 bi0, bi1, bi2, bi3;
548 u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
556 mask = ring_size - 1;
567 last_slot = ring->
tail;
568 if (cur_slot == last_slot)
570 n_slots = last_slot - cur_slot;
579 s0 = cur_slot & mask;
581 ptd->
buffers[n_rx_packets++] = bi0;
585 d0 = &ring->
desc[s0];
598 hb->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
600 s0 = cur_slot & mask;
601 d0 = &ring->
desc[s0];
606 b0->
flags |= VLIB_BUFFER_NEXT_PRESENT;
625 n_from = n_rx_packets;
631 u32 next0, next1, next2, next3;
634 while (n_from >= 8 && n_left_to_next >= 4)
646 to_next[0] = bi0 = buffers[0];
647 to_next[1] = bi1 = buffers[1];
648 to_next[2] = bi2 = buffers[2];
649 to_next[3] = bi3 = buffers[3];
686 next0 = next1 = next2 = next3 = next_index;
713 n_left_to_next, bi0, bi1, bi2, bi3,
714 next0, next1, next2, next3);
719 while (n_from && n_left_to_next)
722 to_next[0] = bi0 = buffers[0];
754 n_left_to_next, bi0, next0);
772 n_slots = ring_size - head + mq->
last_tail;
779 dt->
length = buffer_length;
787 MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
790 while (n_alloc >= 32)
792 bi0 = mq->
buffers[(head + 4) & mask];
794 bi1 = mq->
buffers[(head + 5) & mask];
796 bi2 = mq->
buffers[(head + 6) & mask];
798 bi3 = mq->
buffers[(head + 7) & mask];
806 d0 = &ring->
desc[s0];
807 d1 = &ring->
desc[s1];
808 d2 = &ring->
desc[s2];
809 d3 = &ring->
desc[s3];
840 d0 = &ring->
desc[s0];
873 if ((mif->
flags & MEMIF_IF_FLAG_ADMIN_UP) &&
874 (mif->
flags & MEMIF_IF_FLAG_CONNECTED))
876 if (mif->
flags & MEMIF_IF_FLAG_ZERO_COPY)
880 dq->queue_id, mode_ip);
883 dq->queue_id, mode_eth);
885 else if (mif->
flags & MEMIF_IF_FLAG_IS_SLAVE)
913 #ifndef CLIB_MARCH_VARIANT 916 .name =
"memif-input",
917 .sibling_of =
"device-input",
920 .state = VLIB_NODE_STATE_INTERRUPT,
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
vnet_main_t * vnet_get_main(void)
vnet_interface_main_t interface_main
#define CLIB_MEMORY_STORE_BARRIER()
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
uint16_t memif_region_index_t
#define VLIB_NODE_FN(node)
u16 first_buffer_vec_index
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
vlib_error_t * errors
Vector of errors for this node.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
vlib_combined_counter_main_t * combined_sw_if_counters
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES
static __clib_unused char * memif_input_error_strings[]
memif_packet_op_t packet_ops[MEMIF_RX_VECTOR_SZ]
memif_region_index_t region
memif_copy_op_t * copy_ops
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define foreach_memif_input_error
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
static_always_inline void clib_memcpy64_x4(void *d0, void *d1, void *d2, void *d3, void *s)
struct memif_if_t::@446 run
#define clib_memcpy(a, b, c)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
static_always_inline u32 sat_sub(u32 x, u32 y)
static_always_inline void memif_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, vlib_buffer_t *b, u32 next, u16 qid, uword *n_tracep)
static_always_inline uword memif_device_input_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, u16 qid, memif_interface_mode_t mode)
#define MEMIF_DESC_FLAG_NEXT
u32 next_buffer
Next buffer for this linked-list of buffers.
vlib_buffer_t buffer_template
memif_region_offset_t offset
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
#define foreach_device_and_queue(var, vec)
static u32 vlib_buffer_alloc_to_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Allocate buffers into ring.
static_always_inline void memif_add_to_chain(vlib_main_t *vm, vlib_buffer_t *b, u32 *buffers, u32 buffer_size)
#define MEMIF_RX_VECTOR_SZ
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
static_always_inline u32 memif_desc_is_invalid(memif_if_t *mif, memif_desc_t *d, u32 buffer_length)
memif_log2_ring_size_t log2_ring_size
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
memif_per_thread_data_t * per_thread_data
memif_queue_t * rx_queues
memif_desc_t desc_template
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
memif_interface_mode_t mode
memif_region_size_t region_size