|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
26 #define RDMA_TX_RETRIES 5
28 #define RDMA_TXQ_DV_DSEG_SZ(txq) (RDMA_MLX5_WQE_DS * RDMA_TXQ_DV_SQ_SZ(txq))
29 #define RDMA_TXQ_DV_DSEG2WQE(d) (((d) + RDMA_MLX5_WQE_DS - 1) / RDMA_MLX5_WQE_DS)
46 struct mlx5_cqe64 *cqes = txq->
dv_cq_cqes, *cur = cqes + (idx & cq_mask);
52 op_own = *(
volatile u8 *) &cur->op_own;
53 if (((idx >> log2_cq_sz) & MLX5_CQE_OWNER_MASK) !=
54 (op_own & MLX5_CQE_OWNER_MASK) || (op_own >> 4) == MLX5_CQE_INVALID)
59 cur = cqes + (idx & cq_mask);
65 cur = cqes + ((idx - 1) & cq_mask);
72 wqe = txq->
dv_sq_wqes + (be16toh (cur->wqe_counter) & sq_mask);
91 const u16 tail,
u32 sq_mask)
93 last->ctrl.imm = tail;
94 last->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
135 wqe->
eseg.inline_hdr_sz = htobe16 (sz);
158 const u32 lkey = clib_host_to_net_u32 (rd->
lkey);
164 while (n >= 1 && wqe_n >= 1)
174 if (
b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
181 #define RDMA_MLX5_WQE_DS_MAX (1 << 5)
189 sizeof (
struct mlx5_wqe_data_seg) ==
190 MLX5_SEND_WQE_BB,
"wrong size");
198 while (chained_n < dseg_max
199 && chained_b->
flags & VLIB_BUFFER_NEXT_PRESENT)
201 struct mlx5_wqe_data_seg *dseg = (
void *) txq->
dv_sq_wqes;
212 chained_b->
flags &= ~(VLIB_BUFFER_NEXT_PRESENT |
213 VLIB_BUFFER_TOTAL_LENGTH_VALID);
227 if (chained_b->
flags & VLIB_BUFFER_NEXT_PRESENT)
237 dseg_max == chained_n ?
238 RDMA_TX_ERROR_SEGMENT_SIZE_EXCEEDED :
239 RDMA_TX_ERROR_NO_FREE_SLOTS, 1);
383 tail = wc[n - 1].wr_id;
406 s[0].lkey = rd->
lkey;
411 s[1].lkey = rd->
lkey;
416 s[2].lkey = rd->
lkey;
421 s[3].lkey = rd->
lkey;
424 w[0].next = &w[0] + 1;
425 w[0].sg_list = &s[0];
427 w[0].opcode = IBV_WR_SEND;
430 w[1].next = &w[1] + 1;
431 w[1].sg_list = &s[1];
433 w[1].opcode = IBV_WR_SEND;
436 w[2].next = &w[2] + 1;
437 w[2].sg_list = &s[2];
439 w[2].opcode = IBV_WR_SEND;
442 w[3].next = &w[3] + 1;
443 w[3].sg_list = &s[3];
445 w[3].opcode = IBV_WR_SEND;
457 s[0].lkey = rd->
lkey;
460 w[0].next = &w[0] + 1;
461 w[0].sg_list = &s[0];
463 w[0].opcode = IBV_WR_SEND;
471 w[-1].wr_id = txq->
tail;
473 w[-1].send_flags = IBV_SEND_SIGNALED;
535 for (
i = 0; i < RDMA_TX_RETRIES && n_left_from > 0;
i++)
VNET_DEVICE_CLASS_TX_FN() rdma_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u32 next_buffer
Next buffer for this linked-list of buffers.
static_always_inline void rdma_device_output_tx_mlx5_doorbell(rdma_txq_t *txq, rdma_mlx5_wqe_t *last, const u16 tail, u32 sq_mask)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define RDMA_MLX5_WQE_DS_MAX
#define RDMA_TXQ_DV_DSEG_SZ(txq)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword pow2_mask(uword x)
static uword pointer_to_uword(const void *p)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vlib_get_buffers(vm, from, b, n_left_from)
vlib_main_t vlib_node_runtime_t * node
static void vlib_buffer_copy_indices_to_ring(u32 *ring, u32 *src, u32 start, u32 ring_size, u32 n_buffers)
volatile u32 * dv_sq_dbrec
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
struct mlx5_wqe_data_seg dseg
#define RDMA_TXQ_AVAIL_SZ(txq, head, tail)
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define RDMA_TXQ_DV_DSEG2WQE(d)
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
volatile u32 * dv_cq_dbrec
#define RDMA_TXQ_USED_SZ(head, tail)
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
#define CLIB_COMPILER_BARRIER()
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define CLIB_MEMORY_STORE_BARRIER()
#define RDMA_TXQ_BUF_SZ(txq)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static_always_inline u32 rdma_device_output_tx_try(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 *bi, int is_mlx5dv)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define static_always_inline
static heap_elt_t * last(heap_header_t *h)
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
static_always_inline void rdma_mlx5_wqe_init(rdma_mlx5_wqe_t *wqe, const void *tmpl, vlib_buffer_t *b, const u16 tail)
sll srl srl sll sra u16x4 i
#define STATIC_ASSERT(truth,...)
static_always_inline void rdma_device_output_free_ibverb(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq)
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
static_always_inline void rdma_device_output_free_mlx5(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq)
u16 current_length
Nbytes between current data and the end of this buffer.
static_always_inline uword rdma_device_output_tx(vlib_main_t *vm, vlib_node_runtime_t *node, rdma_device_t *rd, rdma_txq_t *txq, u32 *from, u32 n_left_from, int is_mlx5dv)
struct mlx5_wqe_eth_seg eseg
#define MLX5_ETH_L2_INLINE_HEADER_SIZE
rdma_mlx5_wqe_t * dv_sq_wqes
#define RDMA_TXQ_DV_SQ_SZ(txq)
static_always_inline u32 rdma_device_output_tx_ibverb(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 *bi, vlib_buffer_t **b)
static_always_inline u32 rdma_device_output_tx_mlx5(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, const u32 n_left_from, u32 *bi, vlib_buffer_t **b)
static uword vlib_buffer_get_current_va(vlib_buffer_t *b)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
struct mlx5_wqe_ctrl_seg ctrl
struct mlx5_cqe64 * dv_cq_cqes
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
#define RDMA_TXQ_DV_INVALID_ID
static_always_inline u32 rdma_device_output_tx_mlx5_chained(vlib_main_t *vm, const vlib_node_runtime_t *node, const rdma_device_t *rd, rdma_txq_t *txq, u32 n_left_from, u32 n, u32 *bi, vlib_buffer_t **b, rdma_mlx5_wqe_t *wqe, u16 tail)
#define STRUCT_SIZE_OF(t, f)
static_always_inline void rdma_device_output_free(vlib_main_t *vm, const vlib_node_runtime_t *node, rdma_txq_t *txq, int is_mlx5dv)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
vl_api_wireguard_peer_flags_t flags