|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
21 #include <infiniband/verbs.h>
28 #define foreach_rdma_device_flags \
29 _(0, ERROR, "error") \
30 _(1, ADMIN_UP, "admin-up") \
31 _(2, LINK_UP, "link-up") \
32 _(3, PROMISC, "promiscuous") \
33 _(4, MLX5DV, "mlx5dv") \
34 _(5, STRIDING_RQ, "striding-rq")
38 #define _(a, b, c) RDMA_DEVICE_F_##b = (1 << a),
43 #ifndef MLX5_ETH_L2_INLINE_HEADER_SIZE
44 #define MLX5_ETH_L2_INLINE_HEADER_SIZE 18
52 struct mlx5_wqe_ctrl_seg ctrl;
61 struct mlx5_wqe_eth_seg eseg;
62 struct mlx5_wqe_data_seg dseg;
64 #define RDMA_MLX5_WQE_SZ sizeof(rdma_mlx5_wqe_t)
65 #define RDMA_MLX5_WQE_DS (RDMA_MLX5_WQE_SZ/sizeof(struct mlx5_wqe_data_seg))
166 #define RDMA_TXQ_DV_INVALID_ID 0xffffffff
168 #define RDMA_TXQ_BUF_SZ(txq) (1U << (txq)->bufs_log2sz)
169 #define RDMA_TXQ_DV_SQ_SZ(txq) (1U << (txq)->dv_sq_log2sz)
170 #define RDMA_TXQ_DV_CQ_SZ(txq) (1U << (txq)->dv_cq_log2sz)
172 #define RDMA_TXQ_USED_SZ(head, tail) ((u16)((u16)(tail) - (u16)(head)))
173 #define RDMA_TXQ_AVAIL_SZ(txq, head, tail) ((u16)(RDMA_TXQ_BUF_SZ (txq) - RDMA_TXQ_USED_SZ (head, tail)))
174 #define RDMA_RXQ_MAX_CHAIN_LOG_SZ 3
175 #define RDMA_RXQ_MAX_CHAIN_SZ (1U << RDMA_RXQ_MAX_CHAIN_LOG_SZ)
176 #define RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ 5
314 #define foreach_rdma_tx_func_error \
315 _(SEGMENT_SIZE_EXCEEDED, "segment size exceeded") \
316 _(NO_FREE_SLOTS, "no free tx slots") \
317 _(SUBMISSION, "tx submission errors") \
318 _(COMPLETION, "tx completion errors")
322 #define _(f,s) RDMA_TX_ERROR_##f,
struct ibv_flow * flow_mcast4
unformat_function_t unformat_rdma_create_if_args
struct _vnet_device_class vnet_device_class_t
#define foreach_rdma_tx_func_error
struct ibv_rwq_ind_table * rx_rwq_ind_tbl
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
vnet_device_class_t rdma_device_class
#define foreach_rdma_device_flags
volatile u32 * dv_sq_dbrec
void rdma_delete_if(vlib_main_t *vm, rdma_device_t *rd)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
struct ibv_flow * flow_mcast6
rdma_per_thread_data_t * per_thread_data
void rdma_create_if(vlib_main_t *vm, rdma_create_if_args_t *args)
#define CLIB_ALIGN_MARK(name, alignment)
volatile u32 * dv_cq_dbrec
format_function_t format_rdma_device_name
vlib_pci_device_info_t * pci
vlib_log_class_t log_class
format_function_t format_rdma_input_trace
vlib_node_registration_t rdma_input_node
(constructor) VLIB_REGISTER_NODE (rdma_input_node)
u32 async_event_clib_file_index
struct _vlib_node_registration vlib_node_registration_t
rdma_mlx5_wqe_t * dv_sq_wqes
format_function_t format_rdma_rxq
u32 per_interface_next_index
struct ibv_flow * flow_ucast4
vlib_buffer_t buffer_template
#define STRUCT_MARK(mark)
u16 n_total_additional_segs
STATIC_ASSERT(RDMA_MLX5_WQE_SZ==MLX5_SEND_WQE_BB &&RDMA_MLX5_WQE_SZ % sizeof(struct mlx5_wqe_data_seg)==0, "bad size")
struct mlx5_cqe64 * dv_cq_cqes
struct ibv_flow * flow_ucast6
STATIC_ASSERT_OFFSET_OF(rdma_txq_t, cacheline1, 64)
format_function_t format_rdma_device
_mm256_packus_epi16 u16x16
VLIB buffer representation.