|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
21 #include <sys/ioctl.h>
31 #define foreach_memif_tx_func_error \
32 _ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots") \
33 _ (ROLLBACK, rollback, ERROR, "no enough space in tx buffers")
37 #define _(f, n, s, d) MEMIF_TX_ERROR_##f,
44 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
49 #ifndef CLIB_MARCH_VARIANT
53 u32 dev_instance = va_arg (*args,
u32);
67 u32 dev_instance = va_arg (*args,
u32);
68 int verbose = va_arg (*args,
int);
71 s =
format (s,
"MEMIF interface");
83 s =
format (s,
"Unimplemented...");
89 u16 buffer_offset,
u16 buffer_vec_index)
112 void *last_region_shm = 0;
117 mask = ring_size - 1;
124 tail = __atomic_load_n (&ring->
tail, __ATOMIC_ACQUIRE);
126 free_slots = ring_size - head + mq->
last_tail;
131 head = __atomic_load_n (&ring->
head, __ATOMIC_ACQUIRE);
133 free_slots = head - tail;
136 while (
n_left && free_slots)
141 u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
142 u32 saved_ptd_copy_ops_len = _vec_len (ptd->
copy_ops);
143 u32 saved_ptd_buffers_len = _vec_len (ptd->
buffers);
155 mb0 = last_region_shm + d0->
offset;
192 mb0 = last_region_shm + d0->
offset;
197 _vec_len (ptd->
buffers) = saved_ptd_buffers_len;
198 _vec_len (ptd->
copy_ops) = saved_ptd_copy_ops_len;
200 MEMIF_TX_ERROR_ROLLBACK, 1);
205 bytes_to_copy =
clib_min (src_left, dst_left);
209 src_off += bytes_to_copy;
210 dst_off += bytes_to_copy;
211 src_left -= bytes_to_copy;
212 dst_left -= bytes_to_copy;
235 while (n_copy_op >= 8)
272 __atomic_store_n (&ring->
head,
slot, __ATOMIC_RELEASE);
274 __atomic_store_n (&ring->
tail,
slot, __ATOMIC_RELEASE);
276 if (
n_left && n_retries--)
296 tail = __atomic_load_n (&ring->
tail, __ATOMIC_ACQUIRE);
308 free_slots = ring_size - head + mq->
last_tail;
310 while (
n_left && free_slots)
313 u16 slots_in_packet = 1;
327 d0 = &ring->
desc[s0];
344 free_slots += slots_in_packet;
345 slot -= slots_in_packet;
365 __atomic_store_n (&ring->
head,
slot, __ATOMIC_RELEASE);
367 if (
n_left && n_retries--)
398 if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
401 else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
417 int __clib_unused
r = write (mq->
int_fd, &
b, sizeof (
b));
421 if ((mif->flags & MEMIF_IF_FLAG_ZERO_COPY) == 0)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u32 next_buffer
Next buffer for this linked-list of buffers.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, memif_if_t *mif, memif_ring_type_t type, memif_queue_t *mq, memif_per_thread_data_t *ptd, u32 n_left)
u32 per_interface_next_index
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
nat44_ei_interface_t * interfaces
vlib_main_t vlib_node_runtime_t * node
memif_socket_file_t * socket_files
VNET_DEVICE_CLASS(af_xdp_device_class)
memif_per_thread_data_t * per_thread_data
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
vl_api_tunnel_mode_t mode
@ VNET_HW_IF_RX_MODE_POLLING
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
memif_queue_t * rx_queues
clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
vnet_hw_if_output_node_runtime_t * r
static u8 * format_memif_device(u8 *s, va_list *args)
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
memif_region_index_t region
#define VNET_DEVICE_CLASS_TX_FN(devclass)
memif_log2_ring_size_t log2_ring_size
static void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
#define CLIB_PREFETCH(addr, size, type)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
struct memif_if_t::@719 run
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define static_always_inline
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
#define MEMIF_DESC_FLAG_NEXT
VNET_DEVICE_CLASS_TX_FN() memif_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u8 * format_memif_device_name(u8 *s, va_list *args)
uint16_t memif_region_index_t
#define CLIB_CACHE_LINE_BYTES
memif_copy_op_t * copy_ops
u16 current_length
Nbytes between current data and the end of this buffer.
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
description fragment has unexpected format
#define vec_add1_aligned(V, E, A)
Add 1 element to end of vector (alignment specified).
static clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
static u32 vlib_get_n_threads()
static vlib_main_t * vlib_get_main(void)
static void memif_clear_hw_interface_counters(u32 instance)
#define MEMIF_RING_FLAG_MASK_INT
#define foreach_memif_tx_func_error
static_always_inline uword memif_interface_tx_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, memif_if_t *mif, memif_queue_t *mq, memif_per_thread_data_t *ptd, u32 n_left)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
memif_region_offset_t offset
static u8 * format_memif_tx_trace(u8 *s, va_list *args)
static clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
vl_api_fib_path_type_t type
static vlib_error_desc_t memif_tx_func_error_counters[]
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.