|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
27 #define foreach_rdma_input_error \
28 _(BUFFER_ALLOC, "buffer alloc error")
32 #define _(f,s) RDMA_INPUT_ERROR_##f,
50 s[0].length = data_size;
63 u32 first_slot,
u32 n_alloc)
70 for (
i = 0;
i < n_alloc;
i++)
75 rxq->
wqes + ((first_slot +
i) << log_wqe_sz);
88 for (
int j = 0; j < chain_sz - 1; j++)
92 bufs[j]->
flags |= VLIB_BUFFER_NEXT_PRESENT;
97 if (chain_sz < rxq->n_ds_per_wqe - 1)
100 bufs[chain_sz - 1]->
flags |= VLIB_BUFFER_NEXT_PRESENT;
104 bufs[chain_sz - 1]->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
108 for (
int j = 0; j < chain_sz; j++)
114 current_wqe[j + 1].
addr = clib_host_to_net_u64 (
addr);
126 const int is_mlx5dv,
const int is_striding)
151 rxq->
tail += n_completed;
154 if (n_completed != n_incomplete)
159 u32 alloc_multiple = 1 << (
clib_max (3, log_stride_per_wqe));
166 if (n_alloc < 2 * alloc_multiple)
172 n_alloc &= ~(alloc_multiple - 1);
180 if (n < alloc_multiple)
188 n_free = n & (alloc_multiple - 1);
199 u64 __clib_aligned (32) va[8];
203 u32 wqes_slot =
slot << (log_wqe_sz - log_stride_per_wqe);
206 const int wqe_sz = 1 << log_wqe_sz;
207 const int stride_per_wqe = 1 << log_stride_per_wqe;
208 int current_data_seg = 0;
212 const int log_skip_wqe = is_striding ? 0 : log_wqe_sz;
218 #ifdef CLIB_HAVE_VEC256
222 for (
int i = 0;
i < 8;
i++)
223 va[
i] = clib_host_to_net_u64 (va[
i]);
232 if (is_striding && !(current_data_seg & (wqe_sz - 1)))
236 .next_wqe_index = clib_host_to_net_u16 (((wqes_slot >> log_wqe_sz) + 1) & (wqe_cnt - 1)),
244 if (!is_striding || !(current_data_seg & ~(stride_per_wqe - 1)))
246 wqe[(0 << log_skip_wqe) + is_striding].
addr = va[0];
247 wqe[(1 << log_skip_wqe) + is_striding].
addr = va[1];
248 wqe[(2 << log_skip_wqe) + is_striding].
addr = va[2];
249 wqe[(3 << log_skip_wqe) + is_striding].
addr = va[3];
250 wqe[(4 << log_skip_wqe) + is_striding].
addr = va[4];
251 wqe[(5 << log_skip_wqe) + is_striding].
addr = va[5];
252 wqe[(6 << log_skip_wqe) + is_striding].
addr = va[6];
253 wqe[(7 << log_skip_wqe) + is_striding].
addr = va[7];
257 wqe += 8 << log_skip_wqe;
258 wqes_slot += 8 << log_skip_wqe;
259 current_data_seg += 8;
260 current_data_seg &= wqe_sz - 1;
266 int first_slot =
slot - n_alloc;
275 rxq->
tail += n_alloc;
279 rxq->
wq_db[MLX5_RCV_DBR] =
283 rxq->
wq_db[MLX5_RCV_DBR] = clib_host_to_net_u32 (rxq->
tail);
315 if (ibv_post_wq_recv (rxq->
wq, wr, &w) != 0)
345 tr->
cqe_flags = is_mlx5dv ? clib_net_to_host_u16 (cqe_flags[0]) : 0;
435 u32 mcqe_array_index = (cq_ci + 1) &
mask;
438 mcqe_array_index = cq_ci;
446 mcqe_array_index = (mcqe_array_index + n) &
mask;
454 for (
int i = 0;
i < n;
i++)
455 byte_cnt[
i] = mcqe[skip +
i].byte_count;
456 mcqe_array_index = (mcqe_array_index + 8) &
mask;
466 for (
int i = 0;
i < 8;
i++)
467 byte_cnt[
i] = mcqe[
i].byte_count;
471 mcqe_array_index = (mcqe_array_index + 8) &
mask;
478 byte_cnt[
i] = mcqe[
i].byte_count;
519 owner = 0xf0 | ((cq_ci >> log2_cq_size) & 1);
521 if (
offset + n_mini_cqes < cq_size)
536 u32 * byte_cnt,
u16 * cqe_flags)
538 u32 n_rx_packets = 0;
549 n_mini_cqes_left, cq_ci,
mask, byte_cnt);
553 n_rx_packets = n_mini_cqes_left;
554 byte_cnt += n_mini_cqes_left;
555 cqe_flags += n_mini_cqes_left;
557 rxq->
cq_ci = cq_ci = cq_ci + n_mini_cqes;
562 u8 cqe_last_byte, owner;
567 owner = (cq_ci >> log2_cq_size) & 1;
570 if ((cqe_last_byte & 0x1) != owner)
573 cqe_last_byte &= 0xfc;
575 if (cqe_last_byte == 0x2c)
581 if (
n_left >= n_mini_cqes)
587 n_rx_packets += n_mini_cqes;
588 byte_cnt += n_mini_cqes;
589 cqe_flags += n_mini_cqes;
590 cq_ci += n_mini_cqes;
605 if (cqe_last_byte == 0x20)
608 cqe_flags[0] = cqe->
flags;
615 rd->
flags |= RDMA_DEVICE_F_ERROR;
631 for (
int i = 0;
i < n_rx_packets;
i++)
638 return n_rx_packets != *n_rx_segs || filler;
645 #if defined CLIB_HAVE_VEC256
646 u32x8 thresh8 = u32x8_splat (buf_sz);
647 for (
int i = 0;
i < n_rx_packets;
i += 8)
648 if (!u32x8_is_all_zero (*(u32x8 *) (bc +
i) > thresh8))
650 #elif defined CLIB_HAVE_VEC128
651 u32x4 thresh4 = u32x4_splat (buf_sz);
652 for (
int i = 0;
i < n_rx_packets;
i += 4)
653 if (!u32x4_is_all_zero (*(
u32x4 *) (bc +
i) > thresh4))
670 * ptd,
int n_rx_packets,
u32 * bc)
677 int skip_ip4_cksum = 1;
679 #if defined CLIB_HAVE_VEC256
681 u16x16 match16 = u16x16_splat (match);
684 for (
int i = 0;
i * 16 < n_rx_packets;
i++)
687 if (!u16x16_is_all_zero (
r))
690 for (
int i = 0;
i < n_rx_packets;
i += 8)
692 #elif defined CLIB_HAVE_VEC128
694 u16x8 match8 = u16x8_splat (match);
697 for (
int i = 0;
i * 8 < n_rx_packets;
i++)
700 if (!u16x8_is_all_zero (
r))
703 for (
int i = 0;
i < n_rx_packets;
i += 4)
706 for (
int i = 0;
i < n_rx_packets;
i++)
710 for (
int i = 0;
i < n_rx_packets;
i++)
711 bc[
i] = clib_net_to_host_u32 (bc[
i]);
713 return skip_ip4_cksum;
729 rxq->
head += n_rx_segs;
770 u32 n_wrap_around = (
slot + n) & (qs_mask + 1) ? (
slot + n) & qs_mask : 0;
777 if (total_length > buf_sz)
780 u8 current_chain_sz = 0;
782 total_length -= buf_sz;
784 current_buf->
flags |= VLIB_BUFFER_NEXT_PRESENT;
793 while (total_length > 0);
794 current_buf->
flags &= ~VLIB_BUFFER_NEXT_PRESENT;
798 n_used_per_chain[0] = current_chain_sz;
820 int n_rx_segs,
int *n_rx_packets,
821 u32 * bc,
int slow_path_needed)
838 uword n_segs_remaining = 0;
839 u32 n_bytes_remaining = 0;
840 u32 *next_in_frame = to_next;
858 if (!n_segs_remaining)
873 pkt_prev->
flags |= VLIB_BUFFER_NEXT_PRESENT;
874 pkt[0]->
flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
876 if (n_segs_remaining == 1)
881 (next_to_free++)[0] = pkt_head_idx;
887 (next_in_frame++)[0] = pkt_head_idx;
906 while (n_rx_segs > 0);
916 u16 qid,
const int use_mlx5dv)
927 int n_rx_packets, skip_ip4_cksum = 0;
929 const int is_striding = ! !(rd->
flags & RDMA_DEVICE_F_STRIDING_RQ);
955 int slow_path_needed;
977 vm, rxq,
bufs,
mask, &bt, to_next, n_rx_packets, bc, ~0);
991 rxq->
size, n_rx_packets);
993 rxq->
head += n_rx_packets;
1007 combined_sw_if_counters +
1010 n_rx_packets, n_rx_bytes);
1013 return n_rx_packets;
1046 .name =
"rdma-input",
1048 .sibling_of =
"device-input",
1051 .state = VLIB_NODE_STATE_DISABLED,
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
nat44_ei_hairpin_src_next_t next_index
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword pow2_mask(uword x)
u32 tmp_bi[VLIB_FRAME_SIZE]
static_always_inline void clib_memset_u16(void *p, u16 val, uword count)
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
vlib_get_buffers(vm, from, b, n_left_from)
vlib_main_t vlib_node_runtime_t * node
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
static_always_inline u32x8 u32x8_byte_swap(u32x8 v)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
rdma_per_thread_data_t * per_thread_data
vnet_hw_if_output_node_runtime_t * r
static_always_inline u32x4 u32x4_byte_swap(u32x4 v)
#define CQE_FLAG_L3_HDR_TYPE_MASK
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define CLIB_MEMORY_STORE_BARRIER()
#define VLIB_NODE_FN(node)
#define CQE_FLAG_L3_HDR_TYPE_IP4
#define CQE_FLAG_L3_HDR_TYPE_SHIFT
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
struct clib_bihash_value offset
template key/value backing page structure
u8 opcode_cqefmt_se_owner
static_always_inline uword vlib_get_thread_index(void)
#define static_always_inline
format_function_t format_rdma_input_trace
vlib_buffer_t * tmp_bufs[VLIB_FRAME_SIZE]
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
#define CQE_BC_CONSUMED_STRIDES_SHIFT
@ VNET_INTERFACE_COUNTER_RX
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
struct _vlib_node_registration vlib_node_registration_t
u16 cqe_flags[VLIB_FRAME_SIZE]
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
#define CQE_BC_FILLER_MASK
template key/value backing page structure
vlib_put_next_frame(vm, node, next_index, 0)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
static_always_inline void clib_prefetch_load(void *p)
u32 per_interface_next_index
u16x8 cqe_flags8[VLIB_FRAME_SIZE/8]
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
vlib_buffer_t buffer_template
u16x16 cqe_flags16[VLIB_FRAME_SIZE/16]
static void vlib_buffer_copy_indices_from_ring(u32 *dst, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
u32 current_segs[VLIB_FRAME_SIZE]
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
u16 n_total_additional_segs
#define CQE_BC_CONSUMED_STRIDES_MASK
epu16_epi64 epu8_epi16 epu8_epi64 epi16_epi64 epi8_epi16 epi8_epi64 static_always_inline u64x4 u64x4_byte_swap(u64x4 v)
#define CQE_BC_BYTE_COUNT_MASK
static_always_inline void clib_prefetch_store(void *p)
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
u32 to_free_buffers[VLIB_FRAME_SIZE]
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vnet_interface_main_t interface_main
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
_mm256_packus_epi16 u16x16
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
vl_api_wireguard_peer_flags_t flags