|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
26 #define RDMA_TX_RETRIES 5
28 #define RDMA_TXQ_DV_DSEG_SZ(txq) (RDMA_MLX5_WQE_DS * RDMA_TXQ_DV_SQ_SZ(txq))
29 #define RDMA_TXQ_DV_DSEG2WQE(d) (((d) + RDMA_MLX5_WQE_DS - 1) / RDMA_MLX5_WQE_DS)
46 struct mlx5_cqe64 *cqes = txq->
dv_cq_cqes, *cur = cqes + (idx & cq_mask);
52 op_own = *(
volatile u8 *) &cur->op_own;
53 if (((idx >> log2_cq_sz) & MLX5_CQE_OWNER_MASK) !=
54 (op_own & MLX5_CQE_OWNER_MASK) || (op_own >> 4) == MLX5_CQE_INVALID)
59 cur = cqes + (idx & cq_mask);
65 cur = cqes + ((idx - 1) & cq_mask);
72 wqe = txq->
dv_sq_wqes + (be16toh (cur->wqe_counter) & sq_mask);
91 const u16 tail,
u32 sq_mask)
93 last->ctrl.imm = tail;
94 last->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
135 wqe->
eseg.inline_hdr_sz = htobe16 (sz);
156 const u32 lkey = clib_host_to_net_u32 (rd->
lkey);
163 while (n >= 1 && wqe_n >= 1)
173 if (
b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
180 #define RDMA_MLX5_WQE_DS_MAX (1 << 5)
188 sizeof (
struct mlx5_wqe_data_seg) ==
189 MLX5_SEND_WQE_BB,
"wrong size");
197 while (chained_n < dseg_max
198 && chained_b->
flags & VLIB_BUFFER_NEXT_PRESENT)
200 struct mlx5_wqe_data_seg *dseg = (
void *) txq->
dv_sq_wqes;
211 chained_b->
flags &= ~(VLIB_BUFFER_NEXT_PRESENT |
212 VLIB_BUFFER_TOTAL_LENGTH_VALID);
226 if (chained_b->
flags & VLIB_BUFFER_NEXT_PRESENT)
236 dseg_max == chained_n ?
237 RDMA_TX_ERROR_SEGMENT_SIZE_EXCEEDED :
238 RDMA_TX_ERROR_NO_FREE_SLOTS, 1);
377 tail = wc[n - 1].wr_id;
399 s[0].lkey = rd->
lkey;
404 s[1].lkey = rd->
lkey;
409 s[2].lkey = rd->
lkey;
414 s[3].lkey = rd->
lkey;
417 w[0].next = &w[0] + 1;
418 w[0].sg_list = &s[0];
420 w[0].opcode = IBV_WR_SEND;
423 w[1].next = &w[1] + 1;
424 w[1].sg_list = &s[1];
426 w[1].opcode = IBV_WR_SEND;
429 w[2].next = &w[2] + 1;
430 w[2].sg_list = &s[2];
432 w[2].opcode = IBV_WR_SEND;
435 w[3].next = &w[3] + 1;
436 w[3].sg_list = &s[3];
438 w[3].opcode = IBV_WR_SEND;
450 s[0].lkey = rd->
lkey;
453 w[0].next = &w[0] + 1;
454 w[0].sg_list = &s[0];
456 w[0].opcode = IBV_WR_SEND;
464 w[-1].wr_id = txq->
tail;
466 w[-1].send_flags = IBV_SEND_SIGNALED;
531 for (
i = 0; i < RDMA_TX_RETRIES && n_left_from > 0;
i++)
VNET_DEVICE_CLASS_TX_FN() rdma_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static void rdma_device_output_free(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq)
u32 next_buffer
Next buffer for this linked-list of buffers.
static_always_inline void rdma_device_output_tx_mlx5_doorbell(rdma_txq_t *txq, rdma_mlx5_wqe_t *last, const u16 tail, u32 sq_mask)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define RDMA_MLX5_WQE_DS_MAX
#define RDMA_TXQ_DV_DSEG_SZ(txq)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword pow2_mask(uword x)
static uword pointer_to_uword(const void *p)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vlib_get_buffers(vm, from, b, n_left_from)
vlib_main_t vlib_node_runtime_t * node
static void vlib_buffer_copy_indices_to_ring(u32 *ring, u32 *src, u32 start, u32 ring_size, u32 n_buffers)
volatile u32 * dv_sq_dbrec
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
struct mlx5_wqe_data_seg dseg
#define RDMA_TXQ_AVAIL_SZ(txq, head, tail)
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define RDMA_TXQ_DV_DSEG2WQE(d)
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
volatile u32 * dv_cq_dbrec
#define RDMA_TXQ_USED_SZ(head, tail)
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
#define CLIB_COMPILER_BARRIER()
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define CLIB_MEMORY_STORE_BARRIER()
#define RDMA_TXQ_BUF_SZ(txq)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define static_always_inline
static heap_elt_t * last(heap_header_t *h)
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
static_always_inline void rdma_mlx5_wqe_init(rdma_mlx5_wqe_t *wqe, const void *tmpl, vlib_buffer_t *b, const u16 tail)
static_always_inline u32 rdma_device_output_tx_mlx5(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, const u32 n_left_from, const u32 *bi, vlib_buffer_t **b)
#define STATIC_ASSERT(truth,...)
static_always_inline void rdma_device_output_free_ibverb(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq)
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
static_always_inline void rdma_device_output_free_mlx5(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq)
u16 current_length
Nbytes between current data and the end of this buffer.
struct mlx5_wqe_eth_seg eseg
#define MLX5_ETH_L2_INLINE_HEADER_SIZE
rdma_mlx5_wqe_t * dv_sq_wqes
#define RDMA_TXQ_DV_SQ_SZ(txq)
static_always_inline u32 rdma_device_output_tx_ibverb(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 *bi, vlib_buffer_t **b)
static uword rdma_device_output_tx(vlib_main_t *vm, vlib_node_runtime_t *node, rdma_device_t *rd, rdma_txq_t *txq, u32 *from, u32 n_left_from)
static uword vlib_buffer_get_current_va(vlib_buffer_t *b)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
struct mlx5_wqe_ctrl_seg ctrl
struct mlx5_cqe64 * dv_cq_cqes
static_always_inline u32 rdma_device_output_tx_mlx5_chained(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, const u32 n_left_from, const u32 *bi, vlib_buffer_t **b, u16 tail)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
#define RDMA_TXQ_DV_INVALID_ID
#define STRUCT_SIZE_OF(t, f)
static u32 rdma_device_output_tx_try(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 *bi)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
vl_api_wireguard_peer_flags_t flags