FD.io VPP  v21.01.1
Vector Packet Processing
rdma.h
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #ifndef _RDMA_H_
19 #define _RDMA_H_
20 
21 #include <infiniband/verbs.h>
22 #include <vlib/log.h>
23 #include <vlib/pci/pci.h>
24 #include <vnet/interface.h>
26 #include <rdma/rdma_mlx5dv.h>
27 
28 #define foreach_rdma_device_flags \
29  _(0, ERROR, "error") \
30  _(1, ADMIN_UP, "admin-up") \
31  _(2, LINK_UP, "link-up") \
32  _(3, PROMISC, "promiscuous") \
33  _(4, MLX5DV, "mlx5dv") \
34  _(5, STRIDING_RQ, "striding-rq")
35 
36 enum
37 {
38 #define _(a, b, c) RDMA_DEVICE_F_##b = (1 << a),
40 #undef _
41 };
42 
43 #ifndef MLX5_ETH_L2_INLINE_HEADER_SIZE
44 #define MLX5_ETH_L2_INLINE_HEADER_SIZE 18
45 #endif
46 
47 typedef struct
48 {
49  CLIB_ALIGN_MARK (align0, MLX5_SEND_WQE_BB);
50  union
51  {
52  struct mlx5_wqe_ctrl_seg ctrl;
53  struct
54  {
59  };
60  };
61  struct mlx5_wqe_eth_seg eseg;
62  struct mlx5_wqe_data_seg dseg;
64 #define RDMA_MLX5_WQE_SZ sizeof(rdma_mlx5_wqe_t)
65 #define RDMA_MLX5_WQE_DS (RDMA_MLX5_WQE_SZ/sizeof(struct mlx5_wqe_data_seg))
66 STATIC_ASSERT (RDMA_MLX5_WQE_SZ == MLX5_SEND_WQE_BB &&
67  RDMA_MLX5_WQE_SZ % sizeof (struct mlx5_wqe_data_seg) == 0,
68  "bad size");
69 
70 typedef struct
71 {
72  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
73  struct ibv_cq *cq;
74  struct ibv_wq *wq;
86  CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
87  volatile u32 *wq_db;
88  volatile u32 *cq_db;
93  union
94  {
95  struct
96  {
97  u32 striding_wqe_tail; /* Striding RQ: number of released whole WQE */
98  u8 log_stride_per_wqe; /* Striding RQ: number of strides in a single WQE */
99  };
100 
101  struct
102  {
103  u8 *n_used_per_chain; /* Legacy RQ: for each buffer chain, how many additional segments are needed */
104 
105  u32 *second_bufs; /* Legacy RQ: ring of second buffers of each chain */
106  u32 incomplete_tail; /* Legacy RQ: tail index in bufs,
107  corresponds to buffer chains with recycled valid head buffer,
108  but whose other buffers are not yet recycled (due to pool exhaustion). */
110  u8 n_ds_per_wqe; /* Legacy RQ: number of nonnull data segs per WQE */
111  };
112  };
113  u8 log_wqe_sz; /* log-size of a single WQE (in data segments) */
114 } rdma_rxq_t;
115 
116 typedef struct
117 {
118  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
119 
120  /* following fields are accessed in datapath */
122 
123  union
124  {
125  struct
126  {
127  /* ibverb datapath. Cache of cq, sq below */
128  struct ibv_cq *ibv_cq;
129  struct ibv_qp *ibv_qp;
130  };
131  struct
132  {
133  /* direct verbs datapath */
134  rdma_mlx5_wqe_t *dv_sq_wqes;
135  volatile u32 *dv_sq_dbrec;
136  volatile u64 *dv_sq_db;
137  struct mlx5_cqe64 *dv_cq_cqes;
138  volatile u32 *dv_cq_dbrec;
139  };
140  };
141 
142  u32 *bufs; /* vlib_buffer ring buffer */
145  u16 dv_cq_idx; /* monotonic CQE index (valid only for direct verbs) */
146  u8 bufs_log2sz; /* log2 vlib_buffer entries */
147  u8 dv_sq_log2sz:4; /* log2 SQ WQE entries (valid only for direct verbs) */
148  u8 dv_cq_log2sz:4; /* log2 CQ CQE entries (valid only for direct verbs) */
149  STRUCT_MARK (cacheline1);
150 
151  /* WQE template (valid only for direct verbs) */
152  u8 dv_wqe_tmpl[64];
153 
154  /* end of 2nd 64-bytes cacheline (or 1st 128-bytes cacheline) */
155  STRUCT_MARK (cacheline2);
156 
157  /* fields below are not accessed in datapath */
158  struct ibv_cq *cq;
159  struct ibv_qp *qp;
160 
161 } rdma_txq_t;
162 STATIC_ASSERT_OFFSET_OF (rdma_txq_t, cacheline1, 64);
163 STATIC_ASSERT_OFFSET_OF (rdma_txq_t, cacheline2, 128);
164 
165 #define RDMA_TXQ_DV_INVALID_ID 0xffffffff
166 
167 #define RDMA_TXQ_BUF_SZ(txq) (1U << (txq)->bufs_log2sz)
168 #define RDMA_TXQ_DV_SQ_SZ(txq) (1U << (txq)->dv_sq_log2sz)
169 #define RDMA_TXQ_DV_CQ_SZ(txq) (1U << (txq)->dv_cq_log2sz)
170 
171 #define RDMA_TXQ_USED_SZ(head, tail) ((u16)((u16)(tail) - (u16)(head)))
172 #define RDMA_TXQ_AVAIL_SZ(txq, head, tail) ((u16)(RDMA_TXQ_BUF_SZ (txq) - RDMA_TXQ_USED_SZ (head, tail)))
173 #define RDMA_RXQ_MAX_CHAIN_LOG_SZ 3 /* This should NOT be lower than 3! */
174 #define RDMA_RXQ_MAX_CHAIN_SZ (1U << RDMA_RXQ_MAX_CHAIN_LOG_SZ)
175 #define RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ 5
176 typedef struct
177 {
178  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
179 
180  /* following fields are accessed in datapath */
187  u32 lkey; /* cache of mr->lkey */
188  u8 pool; /* buffer pool index */
189 
190  /* fields below are not accessed in datapath */
197 
198  struct ibv_context *ctx;
199  struct ibv_pd *pd;
200  struct ibv_mr *mr;
201  struct ibv_qp *rx_qp4;
202  struct ibv_qp *rx_qp6;
203  struct ibv_rwq_ind_table *rx_rwq_ind_tbl;
204  struct ibv_flow *flow_ucast4;
205  struct ibv_flow *flow_mcast4;
206  struct ibv_flow *flow_ucast6;
207  struct ibv_flow *flow_mcast6;
208 
210 } rdma_device_t;
211 
212 typedef struct
213 {
214  CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
215  union
216  {
217  u16 cqe_flags[VLIB_FRAME_SIZE];
218  u16x8 cqe_flags8[VLIB_FRAME_SIZE / 8];
219  u16x16 cqe_flags16[VLIB_FRAME_SIZE / 16];
220  };
221  union
222  {
223  struct
224  {
225  u32 current_segs[VLIB_FRAME_SIZE];
226  u32 to_free_buffers[VLIB_FRAME_SIZE];
227  }; /* Specific to STRIDING RQ mode */
228  struct
229  {
232  }; /* Specific to LEGACY RQ mode */
233  };
234 
237 
238 typedef struct
239 {
244 } rdma_main_t;
245 
246 extern rdma_main_t rdma_main;
247 
248 typedef enum
249 {
253 } rdma_mode_t;
254 
255 typedef struct
256 {
262  rdma_mode_t mode;
266 
267  /* return */
268  int rv;
272 
275 
278 
284 
285 typedef struct
286 {
291 
292 #define foreach_rdma_tx_func_error \
293 _(SEGMENT_SIZE_EXCEEDED, "segment size exceeded") \
294 _(NO_FREE_SLOTS, "no free tx slots") \
295 _(SUBMISSION, "tx submission errors") \
296 _(COMPLETION, "tx completion errors")
297 
298 typedef enum
299 {
300 #define _(f,s) RDMA_TX_ERROR_##f,
302 #undef _
305 
306 #endif /* _RDMA_H_ */
307 
308 /*
309  * fd.io coding-style-patch-verification: ON
310  *
311  * Local Variables:
312  * eval: (c-set-style "gnu")
313  * End:
314  */
struct ibv_mr * mr
Definition: rdma.h:200
u16 head
Definition: rdma.h:143
volatile u32 * dv_sq_dbrec
Definition: rdma.h:135
struct mlx5_cqe64 * dv_cq_cqes
Definition: rdma.h:137
#define CLIB_CACHE_LINE_ALIGN_MARK(mark)
Definition: cache.h:60
void rdma_delete_if(vlib_main_t *vm, rdma_device_t *rd)
Definition: device.c:1007
u8 * linux_ifname
Definition: rdma.h:193
rdma_mlx5_wqe_t * dv_sq_wqes
Definition: rdma.h:134
u32 wq_stride
Definition: rdma.h:91
struct ibv_flow * flow_mcast6
Definition: rdma.h:207
u16 tail
Definition: rdma.h:144
volatile u32 * cq_db
Definition: rdma.h:88
u32 cqn
Definition: rdma.h:89
u8 n_ds_per_wqe
Definition: rdma.h:110
format_function_t format_rdma_device
Definition: rdma.h:279
unsigned long u64
Definition: types.h:89
void rdma_create_if(vlib_main_t *vm, rdma_create_if_args_t *args)
Definition: device.c:833
u32 size
Definition: rdma.h:76
rdma_mode_t
Definition: rdma.h:248
vlib_pci_device_info_t * pci
Definition: rdma.h:191
u32 dev_instance
Definition: rdma.h:196
struct ibv_wq * wq
Definition: rdma.h:74
volatile u32 * dv_cq_dbrec
Definition: rdma.h:138
u32 per_interface_next_index
Definition: rdma.h:184
format_function_t format_rdma_input_trace
Definition: rdma.h:281
#define STRUCT_MARK(mark)
Definition: clib.h:75
vlib_main_t * vm
Definition: in2out_ed.c:1580
rdma_main_t rdma_main
Definition: device.c:46
struct _vnet_device_class vnet_device_class_t
u32 tail
Definition: rdma.h:78
mac_address_t hwaddr
Definition: rdma.h:194
unsigned char u8
Definition: types.h:56
clib_spinlock_t lock
Definition: rdma.h:121
vlib_log_class_t log_class
Definition: rdma.h:242
u8 *() format_function_t(u8 *s, va_list *args)
Definition: format.h:48
struct ibv_flow * flow_ucast6
Definition: rdma.h:206
rdma_per_thread_data_t * per_thread_data
Definition: rdma.h:240
u32 vlib_log_class_t
Definition: vlib.h:51
u16 n_mini_cqes_left
Definition: rdma.h:82
struct ibv_flow * flow_mcast4
Definition: rdma.h:205
struct ibv_pd * pd
Definition: rdma.h:199
rdma_device_t * devices
Definition: rdma.h:241
struct ibv_cq * cq
Definition: rdma.h:158
unsigned int u32
Definition: types.h:88
u16 n_mini_cqes
Definition: rdma.h:81
unformat_function_t unformat_rdma_create_if_args
Definition: rdma.h:283
#define VLIB_FRAME_SIZE
Definition: node.h:378
u32 flags
Definition: rdma.h:183
u32 * bufs
Definition: rdma.h:75
format_function_t format_rdma_rxq
Definition: rdma.h:282
u8 wqe_index_hi
Definition: rdma.h:56
u32 * second_bufs
Definition: rdma.h:105
unsigned short u16
Definition: types.h:57
struct ibv_cq * cq
Definition: rdma.h:73
u32 buf_sz
Definition: rdma.h:92
struct ibv_cq * ibv_cq
Definition: rdma.h:128
struct ibv_qp * qp
Definition: rdma.h:159
uword() unformat_function_t(unformat_input_t *input, va_list *args)
Definition: format.h:232
u8 wqe_index_lo
Definition: rdma.h:57
vlib_buffer_t buffer_template
Definition: rdma.h:235
u32 hw_if_index
Definition: rdma.h:186
u32 wqe_cnt
Definition: rdma.h:90
u8 log_stride_per_wqe
Definition: rdma.h:98
struct ibv_rwq_ind_table * rx_rwq_ind_tbl
Definition: rdma.h:203
clib_error_t * error
Definition: rdma.h:209
clib_error_t * error
Definition: rdma.h:270
rdma_mode_t mode
Definition: rdma.h:262
u32 incomplete_tail
Definition: rdma.h:106
u16 dv_cq_idx
Definition: rdma.h:145
struct ibv_flow * flow_ucast4
Definition: rdma.h:204
STATIC_ASSERT(RDMA_MLX5_WQE_SZ==MLX5_SEND_WQE_BB &&RDMA_MLX5_WQE_SZ % sizeof(struct mlx5_wqe_data_seg)==0, "bad size")
u32 lkey
Definition: rdma.h:187
vlib_node_registration_t rdma_input_node
(constructor) VLIB_REGISTER_NODE (rdma_input_node)
Definition: input.c:1044
u32 sw_if_index
Definition: rdma.h:185
STATIC_ASSERT_OFFSET_OF(rdma_txq_t, cacheline1, 64)
u16 log2_cq_size
Definition: rdma.h:80
u32 head
Definition: rdma.h:77
format_function_t format_rdma_device_name
Definition: rdma.h:280
u8 * n_used_per_chain
Definition: rdma.h:103
u8 bufs_log2sz
Definition: rdma.h:146
u16 msg_id_base
Definition: rdma.h:243
u8 log_wqe_sz
Definition: rdma.h:113
u8 * name
Definition: rdma.h:192
volatile u64 * dv_sq_db
Definition: rdma.h:136
rdma_txq_t * txqs
Definition: rdma.h:182
u8 opc_mod
Definition: rdma.h:55
u8 dv_cq_log2sz
Definition: rdma.h:148
u8 dv_sq_log2sz
Definition: rdma.h:147
struct ibv_qp * rx_qp4
Definition: rdma.h:201
rdma_rxq_t * rxqs
Definition: rdma.h:181
vnet_device_class_t rdma_device_class
rdma_tx_func_error_t
Definition: rdma.h:298
struct _vlib_node_registration vlib_node_registration_t
mlx5dv_cqe_t * cqes
Definition: rdma.h:84
#define CLIB_ALIGN_MARK(name, alignment)
Definition: clib.h:90
u32 async_event_clib_file_index
Definition: rdma.h:195
u16 n_total_additional_segs
Definition: rdma.h:109
VLIB buffer representation.
Definition: buffer.h:102
u32 striding_wqe_tail
Definition: rdma.h:97
u32 * bufs
Definition: rdma.h:142
#define foreach_rdma_tx_func_error
Definition: rdma.h:292
volatile u32 * wq_db
Definition: rdma.h:87
#define foreach_rdma_device_flags
Definition: rdma.h:28
u32 cq_ci
Definition: rdma.h:79
u16 last_cqe_flags
Definition: rdma.h:83
epu16_epi64 u16x16
Definition: vector_avx2.h:123
struct ibv_context * ctx
Definition: rdma.h:198
#define RDMA_MLX5_WQE_SZ
Definition: rdma.h:64
mlx5dv_wqe_ds_t * wqes
Definition: rdma.h:85
struct ibv_qp * rx_qp6
Definition: rdma.h:202
struct ibv_qp * ibv_qp
Definition: rdma.h:129