FD.io VPP  v21.01.1
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2018 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <unistd.h>
19 #include <fcntl.h>
20 #include <net/if.h>
21 #include <linux/if_link.h>
22 #include <linux/if_ether.h>
23 
24 #include <vppinfra/linux/sysfs.h>
25 #include <vlib/vlib.h>
26 #include <vlib/unix/unix.h>
27 #include <vlib/pci/pci.h>
28 #include <vnet/ethernet/ethernet.h>
29 
30 #include <rdma/rdma.h>
31 
32 /* Default RSS hash key (from DPDK MLX driver) */
33 static u8 rdma_rss_hash_key[] = {
34  0x2c, 0xc6, 0x81, 0xd1,
35  0x5b, 0xdb, 0xf4, 0xf7,
36  0xfc, 0xa2, 0x83, 0x19,
37  0xdb, 0x1a, 0x3e, 0x94,
38  0x6b, 0x9e, 0x38, 0xd9,
39  0x2c, 0x9c, 0x03, 0xd1,
40  0xad, 0x99, 0x44, 0xa7,
41  0xd9, 0x56, 0x3d, 0x59,
42  0x06, 0x3c, 0x25, 0xf3,
43  0xfc, 0x1f, 0xdc, 0x2a,
44 };
45 
47 
48 #define rdma_log__(lvl, dev, f, ...) \
49  do { \
50  vlib_log((lvl), rdma_main.log_class, "%s: " f, \
51  &(dev)->name, ##__VA_ARGS__); \
52  } while (0)
53 
54 #define rdma_log(lvl, dev, f, ...) \
55  rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
56 
57 static struct ibv_flow *
58 rdma_rxq_init_flow (const rdma_device_t * rd, struct ibv_qp *qp,
59  const mac_address_t * mac, const mac_address_t * mask,
60  u16 ether_type, u32 flags)
61 {
62  struct ibv_flow *flow;
63  struct raw_eth_flow_attr
64  {
65  struct ibv_flow_attr attr;
66  struct ibv_flow_spec_eth spec_eth;
67  } __attribute__ ((packed)) fa;
68 
69  memset (&fa, 0, sizeof (fa));
70  fa.attr.num_of_specs = 1;
71  fa.attr.port = 1;
72  fa.attr.flags = flags;
73  fa.spec_eth.type = IBV_FLOW_SPEC_ETH;
74  fa.spec_eth.size = sizeof (struct ibv_flow_spec_eth);
75 
76  memcpy (fa.spec_eth.val.dst_mac, mac, sizeof (fa.spec_eth.val.dst_mac));
77  memcpy (fa.spec_eth.mask.dst_mac, mask, sizeof (fa.spec_eth.mask.dst_mac));
78 
79  if (ether_type)
80  {
81  fa.spec_eth.val.ether_type = ether_type;
82  fa.spec_eth.mask.ether_type = 0xffff;
83  }
84 
85  flow = ibv_create_flow (qp, &fa.attr);
86  if (!flow)
87  rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_create_flow() failed");
88  return flow;
89 }
90 
91 static u32
92 rdma_rxq_destroy_flow (const rdma_device_t * rd, struct ibv_flow **flow)
93 {
94  if (!*flow)
95  return 0;
96 
97  if (ibv_destroy_flow (*flow))
98  {
99  rdma_log (VLIB_LOG_LEVEL_ERR, rd, "ibv_destroy_flow() failed");
100  return ~0;
101  }
102 
103  *flow = 0;
104  return 0;
105 }
106 
107 static u32
109 {
110  const mac_address_t all = {.bytes = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0} };
111  int err;
112 
113  err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
114  err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
115  err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
116  err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
117  if (err)
118  return ~0;
119 
120  rd->flow_ucast6 =
121  rdma_rxq_init_flow (rd, rd->rx_qp6, &all, &all, ntohs (ETH_P_IPV6), 0);
122  rd->flow_ucast4 = rdma_rxq_init_flow (rd, rd->rx_qp4, &all, &all, 0, 0);
123  if (!rd->flow_ucast6 || !rd->flow_ucast4)
124  return ~0;
125 
126  rd->flags |= RDMA_DEVICE_F_PROMISC;
127  return 0;
128 }
129 
130 static u32
132 {
133  const mac_address_t ucast = {.bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
134  };
135  const mac_address_t mcast = {.bytes = {0x1, 0x0, 0x0, 0x0, 0x0, 0x0} };
136  int err;
137 
138  err = rdma_rxq_destroy_flow (rd, &rd->flow_mcast6);
139  err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast6);
140  err |= rdma_rxq_destroy_flow (rd, &rd->flow_mcast4);
141  err |= rdma_rxq_destroy_flow (rd, &rd->flow_ucast4);
142  if (err)
143  return ~0;
144 
145  rd->flow_ucast6 =
146  rdma_rxq_init_flow (rd, rd->rx_qp6, &rd->hwaddr, &ucast,
147  ntohs (ETH_P_IPV6), 0);
148  rd->flow_mcast6 =
149  rdma_rxq_init_flow (rd, rd->rx_qp6, &mcast, &mcast, ntohs (ETH_P_IPV6),
150  IBV_FLOW_ATTR_FLAGS_DONT_TRAP
151  /* let others receive mcast packet too (eg. Linux) */
152  );
153  rd->flow_ucast4 =
154  rdma_rxq_init_flow (rd, rd->rx_qp4, &rd->hwaddr, &ucast, 0, 0);
155  rd->flow_mcast4 =
156  rdma_rxq_init_flow (rd, rd->rx_qp4, &mcast, &mcast, 0,
157  IBV_FLOW_ATTR_FLAGS_DONT_TRAP
158  /* let others receive mcast packet too (eg. Linux) */
159  );
160  if (!rd->flow_ucast6 || !rd->flow_mcast6 || !rd->flow_ucast4
161  || !rd->flow_mcast4)
162  return ~0;
163 
164  rd->flags &= ~RDMA_DEVICE_F_PROMISC;
165  return 0;
166 }
167 
168 static clib_error_t *
169 rdma_mac_change (vnet_hw_interface_t * hw, const u8 * old, const u8 * new)
170 {
171  rdma_main_t *rm = &rdma_main;
173  mac_address_from_bytes (&rd->hwaddr, new);
174  if (!(rd->flags & RDMA_DEVICE_F_PROMISC) && rdma_dev_set_ucast (rd))
175  {
176  mac_address_from_bytes (&rd->hwaddr, old);
177  return clib_error_return_unix (0, "MAC update failed");
178  }
179  return 0;
180 }
181 
182 static u32
184 {
185  rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "MTU change not supported");
186  return ~0;
187 }
188 
189 static u32
191 {
192  rdma_main_t *rm = &rdma_main;
194 
195  switch (flags)
196  {
198  return rdma_dev_set_ucast (rd);
200  return rdma_dev_set_promisc (rd);
202  return rdma_dev_change_mtu (rd);
203  }
204 
205  rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unknown flag %x requested", flags);
206  return ~0;
207 }
208 
209 static void
211 {
212  struct ibv_port_attr attr;
213  u32 width = 0;
214  u32 speed = 0;
215 
216  if (ibv_query_port (rd->ctx, port, &attr))
217  {
220  return;
221  }
222 
223  /* update state */
224  switch (attr.state)
225  {
226  case IBV_PORT_ACTIVE: /* fallthrough */
227  case IBV_PORT_ACTIVE_DEFER:
228  rd->flags |= RDMA_DEVICE_F_LINK_UP;
231  break;
232  default:
233  rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
235  break;
236  }
237 
238  /* update speed */
239  switch (attr.active_width)
240  {
241  case 1:
242  width = 1;
243  break;
244  case 2:
245  width = 4;
246  break;
247  case 4:
248  width = 8;
249  break;
250  case 8:
251  width = 12;
252  break;
253  }
254  switch (attr.active_speed)
255  {
256  case 1:
257  speed = 2500000;
258  break;
259  case 2:
260  speed = 5000000;
261  break;
262  case 4: /* fallthrough */
263  case 8:
264  speed = 10000000;
265  break;
266  case 16:
267  speed = 14000000;
268  break;
269  case 32:
270  speed = 25000000;
271  break;
272  }
273  vnet_hw_interface_set_link_speed (vnm, rd->hw_if_index, width * speed);
274 }
275 
276 static clib_error_t *
278 {
279  rdma_main_t *rm = &rdma_main;
281  return clib_error_return (0, "RDMA: %s: async event error", rd->name);
282 }
283 
284 static clib_error_t *
286 {
287  vnet_main_t *vnm = vnet_get_main ();
288  rdma_main_t *rm = &rdma_main;
290  int ret;
291  struct ibv_async_event event;
292  ret = ibv_get_async_event (rd->ctx, &event);
293  if (ret < 0)
294  return clib_error_return_unix (0, "ibv_get_async_event() failed");
295 
296  switch (event.event_type)
297  {
298  case IBV_EVENT_PORT_ACTIVE:
299  rdma_update_state (vnm, rd, event.element.port_num);
300  break;
301  case IBV_EVENT_PORT_ERR:
302  rdma_update_state (vnm, rd, event.element.port_num);
303  break;
304  case IBV_EVENT_DEVICE_FATAL:
305  rd->flags &= ~RDMA_DEVICE_F_LINK_UP;
307  vlib_log_emerg (rm->log_class, "%s: fatal error", rd->name);
308  break;
309  default:
310  rdma_log__ (VLIB_LOG_LEVEL_ERR, rd, "unhandeld RDMA async event %i",
311  event.event_type);
312  break;
313  }
314 
315  ibv_ack_async_event (&event);
316  return 0;
317 }
318 
319 static clib_error_t *
321 {
322  clib_file_t t = { 0 };
323  int ret;
324 
325  /* make RDMA async event fd non-blocking */
326  ret = fcntl (rd->ctx->async_fd, F_GETFL);
327  if (ret < 0)
328  return clib_error_return_unix (0, "fcntl(F_GETFL) failed");
329 
330  ret = fcntl (rd->ctx->async_fd, F_SETFL, ret | O_NONBLOCK);
331  if (ret < 0)
332  return clib_error_return_unix (0, "fcntl(F_SETFL, O_NONBLOCK) failed");
333 
334  /* register RDMA async event fd */
336  t.file_descriptor = rd->ctx->async_fd;
338  t.private_data = rd->dev_instance;
339  t.description = format (0, "%v async event", rd->name);
340 
342  return 0;
343 }
344 
345 static void
347 {
349 }
350 
351 static clib_error_t *
353 {
354  clib_error_t *err =
356  rd->dev_instance, rd->hwaddr.bytes,
358 
359  /* Indicate ability to support L3 DMAC filtering and
360  * initialize interface to L3 non-promisc mode */
365  return err;
366 }
367 
368 static void
370 {
374 }
375 
376 static void
378 {
379  rdma_main_t *rm = &rdma_main;
380  rdma_rxq_t *rxq;
381  rdma_txq_t *txq;
382 
383 #define _(fn, arg) if (arg) \
384  { \
385  int rv; \
386  if ((rv = fn (arg))) \
387  rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
388  }
389 
390  _(ibv_destroy_flow, rd->flow_mcast6);
391  _(ibv_destroy_flow, rd->flow_ucast6);
392  _(ibv_destroy_flow, rd->flow_mcast4);
393  _(ibv_destroy_flow, rd->flow_ucast4);
394  _(ibv_dereg_mr, rd->mr);
395  vec_foreach (txq, rd->txqs)
396  {
397  _(ibv_destroy_qp, txq->qp);
398  _(ibv_destroy_cq, txq->cq);
399  }
400  vec_foreach (rxq, rd->rxqs)
401  {
402  _(ibv_destroy_wq, rxq->wq);
403  _(ibv_destroy_cq, rxq->cq);
404  }
405  _(ibv_destroy_rwq_ind_table, rd->rx_rwq_ind_tbl);
406  _(ibv_destroy_qp, rd->rx_qp6);
407  _(ibv_destroy_qp, rd->rx_qp4);
408  _(ibv_dealloc_pd, rd->pd);
409  _(ibv_close_device, rd->ctx);
410 #undef _
411 
412  clib_error_free (rd->error);
413 
414  vec_free (rd->rxqs);
415  vec_free (rd->txqs);
416  vec_free (rd->name);
418  pool_put (rm->devices, rd);
419 }
420 
421 static clib_error_t *
423  u8 no_multi_seg, u16 max_pktlen)
424 {
425  rdma_rxq_t *rxq;
426  struct ibv_wq_init_attr wqia;
427  struct ibv_cq_init_attr_ex cqa = { };
428  struct ibv_wq_attr wqa;
429  struct ibv_cq_ex *cqex;
430  struct mlx5dv_wq_init_attr dv_wqia = { };
431  int is_mlx5dv = ! !(rd->flags & RDMA_DEVICE_F_MLX5DV);
432  int is_striding = ! !(rd->flags & RDMA_DEVICE_F_STRIDING_RQ);
433 
435  rxq = vec_elt_at_index (rd->rxqs, qid);
436  rxq->size = n_desc;
437  rxq->log_wqe_sz = 0;
439  vec_validate_aligned (rxq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
440 
441  cqa.cqe = n_desc;
442  if (is_mlx5dv)
443  {
444  struct mlx5dv_cq_init_attr dvcq = { };
445  dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
446  dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
447 
448  if ((cqex = mlx5dv_create_cq (rd->ctx, &cqa, &dvcq)) == 0)
449  return clib_error_return_unix (0, "Create mlx5dv rx CQ Failed");
450  }
451  else
452  {
453  if ((cqex = ibv_create_cq_ex (rd->ctx, &cqa)) == 0)
454  return clib_error_return_unix (0, "Create CQ Failed");
455  }
456 
457  rxq->cq = ibv_cq_ex_to_cq (cqex);
458 
459  memset (&wqia, 0, sizeof (wqia));
460  wqia.wq_type = IBV_WQT_RQ;
461  wqia.max_wr = n_desc;
462  wqia.max_sge = 1;
463  wqia.pd = rd->pd;
464  wqia.cq = rxq->cq;
465  if (is_mlx5dv)
466  {
467  if (is_striding)
468  {
469  /* In STRIDING_RQ mode, map a descriptor to a stride, not a full WQE buffer */
470  uword data_seg_log2_sz =
472  rxq->buf_sz = 1 << data_seg_log2_sz;
473  /* The trick is also to map a descriptor to a data segment in the WQE SG list
474  The number of strides per WQE and the size of a WQE (in 16-bytes words) both
475  must be powers of two.
476  Moreover, in striding RQ mode, WQEs must include the SRQ header, which occupies
477  one 16-bytes word. That is why WQEs have 2*RDMA_RXQ_MAX_CHAIN_SZ 16-bytes words:
478  - One for the SRQ Header
479  - RDMA_RXQ_MAX_CHAIN_SZ for the different data segments (each mapped to
480  a stride, and a vlib_buffer)
481  - RDMA_RXQ_MAX_CHAIN_SZ-1 null data segments
482  */
483  int max_chain_log_sz =
484  max_pktlen ? max_log2 ((max_pktlen /
485  (rxq->buf_sz)) +
487  max_chain_log_sz = clib_max (max_chain_log_sz, 3);
488  wqia.max_sge = 1 << max_chain_log_sz;
489  dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
490  dv_wqia.striding_rq_attrs.two_byte_shift_en = 0;
491  dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides =
492  max_chain_log_sz;
493  dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes =
494  data_seg_log2_sz;
495  wqia.max_wr >>= max_chain_log_sz;
496  rxq->log_wqe_sz = max_chain_log_sz + 1;
497  rxq->log_stride_per_wqe = max_chain_log_sz;
498  }
499  else
500  {
501  /* In non STRIDING_RQ mode and if multiseg is not disabled, each WQE is a SG list of data
502  segments, each pointing to a vlib_buffer. */
503  if (no_multi_seg)
504  {
505  wqia.max_sge = 1;
506  rxq->log_wqe_sz = 0;
507  rxq->n_ds_per_wqe = 1;
508  }
509  else
510  {
511  int max_chain_sz =
512  max_pktlen ? (max_pktlen /
513  (rxq->buf_sz)) +
515  int max_chain_log_sz = max_log2 (max_chain_sz);
516  wqia.max_sge = 1 << max_chain_log_sz;
517  rxq->log_wqe_sz = max_chain_log_sz;
518  rxq->n_ds_per_wqe = max_chain_sz;
519  }
520 
521  }
522 
523  if ((rxq->wq = mlx5dv_create_wq (rd->ctx, &wqia, &dv_wqia)))
524  {
525  rxq->wq->events_completed = 0;
526  pthread_mutex_init (&rxq->wq->mutex, NULL);
527  pthread_cond_init (&rxq->wq->cond, NULL);
528  }
529  else
530  return clib_error_return_unix (0, "Create WQ Failed");
531  }
532  else if ((rxq->wq = ibv_create_wq (rd->ctx, &wqia)) == 0)
533  return clib_error_return_unix (0, "Create WQ Failed");
534 
535  memset (&wqa, 0, sizeof (wqa));
536  wqa.attr_mask = IBV_WQ_ATTR_STATE;
537  wqa.wq_state = IBV_WQS_RDY;
538  if (ibv_modify_wq (rxq->wq, &wqa) != 0)
539  return clib_error_return_unix (0, "Modify WQ (RDY) Failed");
540 
541  if (is_mlx5dv)
542  {
543  struct mlx5dv_obj obj = { };
544  struct mlx5dv_cq dv_cq;
545  struct mlx5dv_rwq dv_rwq;
546  u64 qw0;
547  u64 qw0_nullseg;
548  u32 wqe_sz_mask = (1 << rxq->log_wqe_sz) - 1;
549 
550  obj.cq.in = rxq->cq;
551  obj.cq.out = &dv_cq;
552  obj.rwq.in = rxq->wq;
553  obj.rwq.out = &dv_rwq;
554 
555  if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ)))
556  return clib_error_return_unix (0, "mlx5dv: failed to init rx obj");
557 
558  if (dv_cq.cqe_size != sizeof (mlx5dv_cqe_t))
559  return clib_error_return_unix (0, "mlx5dv: incompatible rx CQE size");
560 
561  rxq->log2_cq_size = max_log2 (dv_cq.cqe_cnt);
562  rxq->cqes = (mlx5dv_cqe_t *) dv_cq.buf;
563  rxq->cq_db = (volatile u32 *) dv_cq.dbrec;
564  rxq->cqn = dv_cq.cqn;
565 
566  rxq->wqes = (mlx5dv_wqe_ds_t *) dv_rwq.buf;
567  rxq->wq_db = (volatile u32 *) dv_rwq.dbrec;
568  rxq->wq_stride = dv_rwq.stride;
569  rxq->wqe_cnt = dv_rwq.wqe_cnt;
570 
571  qw0 = clib_host_to_net_u32 (rxq->buf_sz);
572  qw0_nullseg = 0;
573  qw0 |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
574  qw0_nullseg |= (u64) clib_host_to_net_u32 (rd->lkey) << 32;
575 
576 /* Prefill the different 16 bytes words of the WQ.
577  - If not in striding RQ mode, for each WQE, init with qw0 the first
578  RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ, and init the rest of the WQE
579  with null segments.
580  - If in striding RQ mode, for each WQE, the RDMA_RXQ_MAX_CHAIN_SZ + 1
581  first 16-bytes words are initialised with qw0, the rest are null segments */
582 
583  for (int i = 0; i < rxq->wqe_cnt << rxq->log_wqe_sz; i++)
584  if ((!is_striding
585  && ((i & wqe_sz_mask) < rxq->n_ds_per_wqe))
586  || (is_striding
587  && ((i == 0)
588  || !(((i - 1) >> rxq->log_stride_per_wqe) & 0x1))))
589  rxq->wqes[i].dsz_and_lkey = qw0;
590  else
591  rxq->wqes[i].dsz_and_lkey = qw0_nullseg;
592 
593  for (int i = 0; i < (1 << rxq->log2_cq_size); i++)
594  rxq->cqes[i].opcode_cqefmt_se_owner = 0xff;
595 
596  if (!is_striding)
597  {
598  vec_validate_aligned (rxq->second_bufs, n_desc - 1,
600  vec_validate_aligned (rxq->n_used_per_chain, n_desc - 1,
602  rxq->n_total_additional_segs = n_desc * (rxq->n_ds_per_wqe - 1);
603  for (int i = 0; i < n_desc; i++)
604  rxq->n_used_per_chain[i] = rxq->n_ds_per_wqe - 1;
605  }
606  }
607 
608  return 0;
609 }
610 
611 static clib_error_t *
613 {
614  struct ibv_rwq_ind_table_init_attr rwqia;
615  struct ibv_qp_init_attr_ex qpia;
616  struct ibv_wq **ind_tbl;
617  u32 i;
618 
619  ASSERT (is_pow2 (vec_len (rd->rxqs))
620  && "rxq number should be a power of 2");
621 
622  ind_tbl = vec_new (struct ibv_wq *, vec_len (rd->rxqs));
623  vec_foreach_index (i, rd->rxqs)
624  ind_tbl[i] = vec_elt_at_index (rd->rxqs, i)->wq;
625  memset (&rwqia, 0, sizeof (rwqia));
626  rwqia.log_ind_tbl_size = min_log2 (vec_len (ind_tbl));
627  rwqia.ind_tbl = ind_tbl;
628  if ((rd->rx_rwq_ind_tbl = ibv_create_rwq_ind_table (rd->ctx, &rwqia)) == 0)
629  return clib_error_return_unix (0, "RWQ indirection table create failed");
630  vec_free (ind_tbl);
631 
632  memset (&qpia, 0, sizeof (qpia));
633  qpia.qp_type = IBV_QPT_RAW_PACKET;
634  qpia.comp_mask =
635  IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_IND_TABLE |
636  IBV_QP_INIT_ATTR_RX_HASH;
637  qpia.pd = rd->pd;
638  qpia.rwq_ind_tbl = rd->rx_rwq_ind_tbl;
640  qpia.rx_hash_conf.rx_hash_key_len = sizeof (rdma_rss_hash_key);
641  qpia.rx_hash_conf.rx_hash_key = rdma_rss_hash_key;
642  qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
643 
644  qpia.rx_hash_conf.rx_hash_fields_mask =
645  IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_SRC_PORT_TCP |
646  IBV_RX_HASH_DST_PORT_TCP;
647  if ((rd->rx_qp4 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
648  return clib_error_return_unix (0, "IPv4 Queue Pair create failed");
649 
650  qpia.rx_hash_conf.rx_hash_fields_mask =
651  IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 | IBV_RX_HASH_SRC_PORT_TCP |
652  IBV_RX_HASH_DST_PORT_TCP;
653  if ((rd->rx_qp6 = ibv_create_qp_ex (rd->ctx, &qpia)) == 0)
654  return clib_error_return_unix (0, "IPv6 Queue Pair create failed");
655 
656  if (rdma_dev_set_ucast (rd))
657  return clib_error_return_unix (0, "Set unicast mode failed");
658 
659  return 0;
660 }
661 
662 static clib_error_t *
664 {
665  rdma_txq_t *txq;
666  struct ibv_qp_init_attr qpia;
667  struct ibv_qp_attr qpa;
668  int qp_flags;
669 
671  txq = vec_elt_at_index (rd->txqs, qid);
672  ASSERT (is_pow2 (n_desc));
673  txq->bufs_log2sz = min_log2 (n_desc);
674  vec_validate_aligned (txq->bufs, n_desc - 1, CLIB_CACHE_LINE_BYTES);
675 
676  if ((txq->cq = ibv_create_cq (rd->ctx, n_desc, NULL, NULL, 0)) == 0)
677  return clib_error_return_unix (0, "Create CQ Failed");
678 
679  memset (&qpia, 0, sizeof (qpia));
680  qpia.send_cq = txq->cq;
681  qpia.recv_cq = txq->cq;
682  qpia.cap.max_send_wr = n_desc;
683  qpia.cap.max_send_sge = 1;
684  qpia.qp_type = IBV_QPT_RAW_PACKET;
685 
686  if ((txq->qp = ibv_create_qp (rd->pd, &qpia)) == 0)
687  return clib_error_return_unix (0, "Queue Pair create failed");
688 
689  memset (&qpa, 0, sizeof (qpa));
690  qp_flags = IBV_QP_STATE | IBV_QP_PORT;
691  qpa.qp_state = IBV_QPS_INIT;
692  qpa.port_num = 1;
693  if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
694  return clib_error_return_unix (0, "Modify QP (init) Failed");
695 
696  memset (&qpa, 0, sizeof (qpa));
697  qp_flags = IBV_QP_STATE;
698  qpa.qp_state = IBV_QPS_RTR;
699  if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
700  return clib_error_return_unix (0, "Modify QP (receive) Failed");
701 
702  memset (&qpa, 0, sizeof (qpa));
703  qp_flags = IBV_QP_STATE;
704  qpa.qp_state = IBV_QPS_RTS;
705  if (ibv_modify_qp (txq->qp, &qpa, qp_flags) != 0)
706  return clib_error_return_unix (0, "Modify QP (send) Failed");
707 
708  txq->ibv_cq = txq->cq;
709  txq->ibv_qp = txq->qp;
710 
711  if (rd->flags & RDMA_DEVICE_F_MLX5DV)
712  {
713  rdma_mlx5_wqe_t *tmpl = (void *) txq->dv_wqe_tmpl;
714  struct mlx5dv_cq dv_cq;
715  struct mlx5dv_qp dv_qp;
716  struct mlx5dv_obj obj = { };
717 
718  obj.cq.in = txq->cq;
719  obj.cq.out = &dv_cq;
720  obj.qp.in = txq->qp;
721  obj.qp.out = &dv_qp;
722 
723  if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP))
724  return clib_error_return_unix (0, "DV init obj failed");
725 
726  if (RDMA_TXQ_BUF_SZ (txq) > dv_qp.sq.wqe_cnt
727  || !is_pow2 (dv_qp.sq.wqe_cnt)
728  || sizeof (rdma_mlx5_wqe_t) != dv_qp.sq.stride
729  || (uword) dv_qp.sq.buf % sizeof (rdma_mlx5_wqe_t))
730  return clib_error_return (0, "Unsupported DV SQ parameters");
731 
732  if (RDMA_TXQ_BUF_SZ (txq) > dv_cq.cqe_cnt
733  || !is_pow2 (dv_cq.cqe_cnt)
734  || sizeof (struct mlx5_cqe64) != dv_cq.cqe_size
735  || (uword) dv_cq.buf % sizeof (struct mlx5_cqe64))
736  return clib_error_return (0, "Unsupported DV CQ parameters");
737 
738  /* get SQ and doorbell addresses */
739  txq->dv_sq_wqes = dv_qp.sq.buf;
740  txq->dv_sq_dbrec = dv_qp.dbrec;
741  txq->dv_sq_db = dv_qp.bf.reg;
742  txq->dv_sq_log2sz = min_log2 (dv_qp.sq.wqe_cnt);
743 
744  /* get CQ and doorbell addresses */
745  txq->dv_cq_cqes = dv_cq.buf;
746  txq->dv_cq_dbrec = dv_cq.dbrec;
747  txq->dv_cq_log2sz = min_log2 (dv_cq.cqe_cnt);
748 
749  /* init tx desc template */
750  STATIC_ASSERT_SIZEOF (txq->dv_wqe_tmpl, sizeof (*tmpl));
751  mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
752  txq->qp->qp_num, 0, RDMA_MLX5_WQE_DS, 0,
754  tmpl->eseg.inline_hdr_sz = htobe16 (MLX5_ETH_L2_INLINE_HEADER_SIZE);
755  mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->lkey, 0);
756  }
757 
758  return 0;
759 }
760 
761 static clib_error_t *
763  rdma_create_if_args_t * args)
764 {
765  clib_error_t *err;
768  u32 rxq_num = args->rxq_num;
769  u32 rxq_size = args->rxq_size;
770  u32 txq_size = args->txq_size;
771  u32 i;
772 
773  if (rd->ctx == 0)
774  return clib_error_return_unix (0, "Device Open Failed");
775 
776  if ((rd->pd = ibv_alloc_pd (rd->ctx)) == 0)
777  return clib_error_return_unix (0, "PD Alloc Failed");
778 
779  if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
780  bm->buffer_mem_size,
781  IBV_ACCESS_LOCAL_WRITE)) == 0)
782  return clib_error_return_unix (0, "Register MR Failed");
783 
784  rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
785 
787 
788  if ((rd->mr = ibv_reg_mr (rd->pd, (void *) bm->buffer_mem_start,
789  bm->buffer_mem_size,
790  IBV_ACCESS_LOCAL_WRITE)) == 0)
791  return clib_error_return_unix (0, "Register MR Failed");
792  rd->lkey = rd->mr->lkey; /* avoid indirection in datapath */
793 
794  /*
795  * /!\ WARNING /!\ creation order is important
796  * We *must* create TX queues *before* RX queues, otherwise we will receive
797  * the broacast packets we sent
798  */
799  for (i = 0; i < tm->n_vlib_mains; i++)
800  if ((err = rdma_txq_init (vm, rd, i, txq_size)))
801  return err;
802 
803  for (i = 0; i < rxq_num; i++)
804  if ((err =
805  rdma_rxq_init (vm, rd, i, rxq_size,
806  args->no_multi_seg, args->max_pktlen)))
807  return err;
808  if ((err = rdma_rxq_finalize (vm, rd)))
809  return err;
810 
811  return 0;
812 }
813 
814 static uword
815 sysfs_path_to_pci_addr (char *path, vlib_pci_addr_t * addr)
816 {
817  uword rv;
818  unformat_input_t in;
819  u8 *s;
820 
821  s = clib_sysfs_link_to_name (path);
822  if (!s)
823  return 0;
824 
825  unformat_init_string (&in, (char *) s, strlen ((char *) s));
826  rv = unformat (&in, "%U", unformat_vlib_pci_addr, addr);
827  unformat_free (&in);
828  vec_free (s);
829  return rv;
830 }
831 
832 void
834 {
835  vnet_main_t *vnm = vnet_get_main ();
836  rdma_main_t *rm = &rdma_main;
837  rdma_device_t *rd;
838  vlib_pci_addr_t pci_addr;
839  struct ibv_device **dev_list;
840  int n_devs;
841  u8 *s;
842  u16 qid;
843  int i;
844 
845  args->rxq_size = args->rxq_size ? args->rxq_size : 1024;
846  args->txq_size = args->txq_size ? args->txq_size : 1024;
847  args->rxq_num = args->rxq_num ? args->rxq_num : 2;
848 
849  if (!is_pow2 (args->rxq_num))
850  {
851  args->rv = VNET_API_ERROR_INVALID_VALUE;
852  args->error =
853  clib_error_return (0, "rx queue number must be a power of two");
854  goto err0;
855  }
856 
857  if (args->rxq_size < VLIB_FRAME_SIZE || args->txq_size < VLIB_FRAME_SIZE ||
858  args->rxq_size > 65535 || args->txq_size > 65535 ||
859  !is_pow2 (args->rxq_size) || !is_pow2 (args->txq_size))
860  {
861  args->rv = VNET_API_ERROR_INVALID_VALUE;
862  args->error = clib_error_return (0, "queue size must be a power of two "
863  "between %i and 65535",
865  goto err0;
866  }
867 
868  dev_list = ibv_get_device_list (&n_devs);
869  if (n_devs == 0)
870  {
871  args->error =
873  "no RDMA devices available. Is the ib_uverbs module loaded?");
874  goto err0;
875  }
876 
877  /* get PCI address */
878  s = format (0, "/sys/class/net/%s/device%c", args->ifname, 0);
879  if (sysfs_path_to_pci_addr ((char *) s, &pci_addr) == 0)
880  {
881  args->error =
882  clib_error_return (0, "cannot find PCI address for device ");
883  goto err1;
884  }
885 
886  pool_get_zero (rm->devices, rd);
887  rd->dev_instance = rd - rm->devices;
889  rd->linux_ifname = format (0, "%s", args->ifname);
890 
891  if (!args->name || 0 == args->name[0])
892  rd->name = format (0, "%s/%d", args->ifname, rd->dev_instance);
893  else
894  rd->name = format (0, "%s", args->name);
895 
896  rd->pci = vlib_pci_get_device_info (vm, &pci_addr, &args->error);
897  if (!rd->pci)
898  goto err2;
899 
900  /* if we failed to parse NUMA node, default to 0 */
901  if (-1 == rd->pci->numa_node)
902  rd->pci->numa_node = 0;
903 
905 
906  if (strncmp ((char *) rd->pci->driver_name, "mlx5_core", 9))
907  {
908  args->error =
910  "invalid interface (only mlx5 supported for now)");
911  goto err2;
912  }
913 
914  for (i = 0; i < n_devs; i++)
915  {
916  vlib_pci_addr_t addr;
917 
918  vec_reset_length (s);
919  s = format (s, "%s/device%c", dev_list[i]->dev_path, 0);
920 
921  if (sysfs_path_to_pci_addr ((char *) s, &addr) == 0)
922  continue;
923 
924  if (addr.as_u32 != rd->pci->addr.as_u32)
925  continue;
926 
927  if ((rd->ctx = ibv_open_device (dev_list[i])))
928  break;
929  }
930 
931  if (args->mode != RDMA_MODE_IBV)
932  {
933  struct mlx5dv_context mlx5dv_attrs = { };
934  mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
935 
936  if (mlx5dv_query_device (rd->ctx, &mlx5dv_attrs) == 0)
937  {
938  uword data_seg_log2_sz =
940 
941  if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
942  rd->flags |= RDMA_DEVICE_F_MLX5DV;
943 
944 /* Enable striding RQ if neither multiseg nor striding rq
945 are explicitly disabled, and if the interface supports it.*/
946  if (!args->no_multi_seg && !args->disable_striding_rq
947  && data_seg_log2_sz <=
948  mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes
949  && data_seg_log2_sz >=
950  mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes
952  mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides
954  mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides)
955  rd->flags |= RDMA_DEVICE_F_STRIDING_RQ;
956  }
957  else
958  {
959  if (args->mode == RDMA_MODE_DV)
960  {
961  args->error = clib_error_return (0, "Direct Verbs mode not "
962  "supported on this interface");
963  goto err2;
964  }
965  }
966  }
967 
968  if ((args->error = rdma_dev_init (vm, rd, args)))
969  goto err2;
970 
971  if ((args->error = rdma_register_interface (vnm, rd)))
972  goto err2;
973 
974  if ((args->error = rdma_async_event_init (rd)))
975  goto err3;
976 
977  rdma_update_state (vnm, rd, 1);
978 
980  args->sw_if_index = rd->sw_if_index = sw->sw_if_index;
981  /*
982  * FIXME: add support for interrupt mode
983  * vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, rd->hw_if_index);
984  * hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
985  */
987  rdma_input_node.index);
988  vec_foreach_index (qid, rd->rxqs)
990 
991  vec_free (s);
992  return;
993 
994 err3:
995  rdma_unregister_interface (vnm, rd);
996 err2:
997  rdma_dev_cleanup (rd);
998 err1:
999  ibv_free_device_list (dev_list);
1000  vec_free (s);
1001  args->rv = VNET_API_ERROR_INVALID_INTERFACE;
1002 err0:
1003  vlib_log_err (rm->log_class, "%U", format_clib_error, args->error);
1004 }
1005 
1006 void
1008 {
1011  rdma_dev_cleanup (rd);
1012 }
1013 
1014 static clib_error_t *
1016 {
1017  vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
1018  rdma_main_t *rm = &rdma_main;
1020  uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1021 
1022  if (rd->flags & RDMA_DEVICE_F_ERROR)
1023  return clib_error_return (0, "device is in error state");
1024 
1025  if (is_up)
1026  {
1029  rd->flags |= RDMA_DEVICE_F_ADMIN_UP;
1030  }
1031  else
1032  {
1034  rd->flags &= ~RDMA_DEVICE_F_ADMIN_UP;
1035  }
1036  return 0;
1037 }
1038 
1039 static void
1041  u32 node_index)
1042 {
1043  rdma_main_t *rm = &rdma_main;
1044  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1047  ~0 ==
1049  vlib_node_add_next (vlib_get_main (), rdma_input_node.index, node_index);
1050 }
1051 
1052 static char *rdma_tx_func_error_strings[] = {
1053 #define _(n,s) s,
1055 #undef _
1056 };
1057 
1058 /* *INDENT-OFF* */
1060 {
1061  .name = "RDMA interface",
1062  .format_device = format_rdma_device,
1063  .format_device_name = format_rdma_device_name,
1064  .admin_up_down_function = rdma_interface_admin_up_down,
1065  .rx_redirect_to_node = rdma_set_interface_next_node,
1066  .tx_function_n_errors = RDMA_TX_N_ERROR,
1067  .tx_function_error_strings = rdma_tx_func_error_strings,
1068  .mac_addr_change_function = rdma_mac_change,
1069 };
1070 /* *INDENT-ON* */
1071 
1072 clib_error_t *
1074 {
1075  rdma_main_t *rm = &rdma_main;
1077 
1078  rm->log_class = vlib_log_register_class ("rdma", 0);
1079 
1080  /* vlib_buffer_t template */
1083 
1084  for (int i = 0; i < tm->n_vlib_mains; i++)
1085  {
1087  clib_memset (&ptd->buffer_template, 0, sizeof (vlib_buffer_t));
1088  ptd->buffer_template.flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
1089  ptd->buffer_template.ref_count = 1;
1090  vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_TX] = (u32) ~ 0;
1091  }
1092 
1093  return 0;
1094 }
1095 
1096 /* *INDENT-OFF* */
1098 {
1099  .runs_after = VLIB_INITS ("pci_bus_init"),
1100 };
1101 /* *INDENT-OFF* */
1102 
1103 /*
1104  * fd.io coding-style-patch-verification: ON
1105  *
1106  * Local Variables:
1107  * eval: (c-set-style "gnu")
1108  * End:
1109  */
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
Definition: log.c:338
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
struct ibv_mr * mr
Definition: rdma.h:200
volatile u32 * dv_sq_dbrec
Definition: rdma.h:135
struct mlx5_cqe64 * dv_cq_cqes
Definition: rdma.h:137
#define vec_foreach_index(var, v)
Iterate over vector indices.
static u32 rdma_dev_set_ucast(rdma_device_t *rd)
Definition: device.c:131
u8 * linux_ifname
Definition: rdma.h:193
__clib_export u8 * clib_sysfs_link_to_name(char *link)
Definition: sysfs.c:90
vl_api_mac_address_t mac
Definition: l2.api:502
rdma_mlx5_wqe_t * dv_sq_wqes
Definition: rdma.h:134
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
u32 wq_stride
Definition: rdma.h:91
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface.c:393
struct ibv_flow * flow_mcast6
Definition: rdma.h:207
#define ntohs(x)
Definition: af_xdp.bpf.c:29
static u32 rdma_rxq_destroy_flow(const rdma_device_t *rd, struct ibv_flow **flow)
Definition: device.c:92
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
Definition: pool.h:254
volatile u32 * cq_db
Definition: rdma.h:88
u32 cqn
Definition: rdma.h:89
u8 n_ds_per_wqe
Definition: rdma.h:110
#define rdma_log(lvl, dev, f,...)
Definition: device.c:54
format_function_t format_rdma_device
Definition: rdma.h:279
unsigned long u64
Definition: types.h:89
#define RDMA_RXQ_MAX_CHAIN_LOG_SZ
Definition: rdma.h:173
u32 size
Definition: rdma.h:76
static u32 rdma_dev_set_promisc(rdma_device_t *rd)
Definition: device.c:108
vlib_pci_device_info_t * pci
Definition: rdma.h:191
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 dev_instance
Definition: rdma.h:196
static clib_error_t * rdma_rxq_init(vlib_main_t *vm, rdma_device_t *rd, u16 qid, u32 n_desc, u8 no_multi_seg, u16 max_pktlen)
Definition: device.c:422
static clib_error_t * rdma_rxq_finalize(vlib_main_t *vm, rdma_device_t *rd)
Definition: device.c:612
u64 private_data
Definition: file.h:64
u8 opcode_cqefmt_se_owner
Definition: rdma_mlx5dv.h:59
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define RDMA_TXQ_DV_INVALID_ID
Definition: rdma.h:165
u32 file_descriptor
Definition: file.h:54
struct ibv_wq * wq
Definition: rdma.h:74
static clib_error_t * rdma_dev_init(vlib_main_t *vm, rdma_device_t *rd, rdma_create_if_args_t *args)
Definition: device.c:762
volatile u32 * dv_cq_dbrec
Definition: rdma.h:138
u32 per_interface_next_index
Definition: rdma.h:184
static void vlib_pci_free_device_info(vlib_pci_device_info_t *di)
Definition: pci.h:114
vlib_buffer_main_t * buffer_main
Definition: main.h:182
rdma_main_t rdma_main
Definition: device.c:46
vlib_main_t * vm
Definition: in2out_ed.c:1580
vl_api_fib_path_t path
Definition: mfib_types.api:44
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:520
#define ETHERNET_INTERFACE_FLAG_DEFAULT_L3
Definition: ethernet.h:160
u16 mask
Definition: flow_types.api:52
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:509
vhost_vring_addr_t addr
Definition: vhost_user.h:111
mac_address_t hwaddr
Definition: rdma.h:194
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1173
unsigned char u8
Definition: types.h:56
static uword min_log2(uword x)
Definition: clib.h:162
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
clib_file_function_t * read_function
Definition: file.h:67
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
vlib_log_class_t log_class
Definition: rdma.h:242
static void rdma_async_event_cleanup(rdma_device_t *rd)
Definition: device.c:346
struct ibv_flow * flow_ucast6
Definition: rdma.h:206
#define rdma_log__(lvl, dev, f,...)
Definition: device.c:48
rdma_per_thread_data_t * per_thread_data
Definition: rdma.h:240
VNET_DEVICE_CLASS(af_xdp_device_class)
struct ibv_flow * flow_mcast4
Definition: rdma.h:205
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static void rdma_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1040
struct ibv_pd * pd
Definition: rdma.h:199
static uword sysfs_path_to_pci_addr(char *path, vlib_pci_addr_t *addr)
Definition: device.c:815
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
Definition: vec.h:350
rdma_device_t * devices
Definition: rdma.h:241
description fragment has unexpected format
Definition: map.api:433
vnet_hw_interface_flags_t flags
Definition: interface.h:538
uword buffer_mem_size
Definition: buffer.h:457
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
struct ibv_cq * cq
Definition: rdma.h:158
#define clib_error_return(e, args...)
Definition: error.h:99
static void rdma_dev_cleanup(rdma_device_t *rd)
Definition: device.c:377
clib_file_main_t file_main
Definition: main.c:63
#define vlib_log_emerg(...)
Definition: log.h:129
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:378
void unformat_init_string(unformat_input_t *input, char *string, int string_len)
Definition: unformat.c:1029
u32 flags
Definition: rdma.h:183
static clib_error_t * rdma_txq_init(vlib_main_t *vm, rdma_device_t *rd, u16 qid, u32 n_desc)
Definition: device.c:663
u32 * bufs
Definition: rdma.h:75
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
Definition: pci.c:202
u8 * description
Definition: file.h:70
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
static_always_inline void mac_address_from_bytes(mac_address_t *mac, const u8 *bytes)
Definition: mac_address.h:92
u32 * second_bufs
Definition: rdma.h:105
clib_error_t * rdma_init(vlib_main_t *vm)
Definition: device.c:1073
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
static clib_error_t * rdma_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1015
vlib_node_registration_t rdma_input_node
(constructor) VLIB_REGISTER_NODE (rdma_input_node)
Definition: input.c:1044
#define clib_error_return_unix(e, args...)
Definition: error.h:102
static u32 rdma_dev_change_mtu(rdma_device_t *rd)
Definition: device.c:183
struct ibv_cq * cq
Definition: rdma.h:73
u32 buf_sz
Definition: rdma.h:92
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
unformat_function_t unformat_vlib_pci_addr
Definition: pci.h:325
struct ibv_cq * ibv_cq
Definition: rdma.h:128
struct ibv_qp * qp
Definition: rdma.h:159
static u8 rdma_rss_hash_key[]
Definition: device.c:33
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
vlib_buffer_t buffer_template
Definition: rdma.h:235
#define RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ
Definition: rdma.h:175
u32 hw_if_index
Definition: rdma.h:186
u32 wqe_cnt
Definition: rdma.h:90
u8 log_stride_per_wqe
Definition: rdma.h:98
struct ibv_rwq_ind_table * rx_rwq_ind_tbl
Definition: rdma.h:203
clib_error_t * error
Definition: rdma.h:209
clib_error_t * error
Definition: rdma.h:270
rdma_mode_t mode
Definition: rdma.h:262
static void rdma_unregister_interface(vnet_main_t *vnm, rdma_device_t *rd)
Definition: device.c:369
struct ibv_flow * flow_ucast4
Definition: rdma.h:204
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
u8 * driver_name
Definition: pci.h:82
u32 lkey
Definition: rdma.h:187
#define ETHERNET_INTERFACE_FLAG_MTU
Definition: ethernet.h:166
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
Definition: ethernet.h:163
#define RDMA_TXQ_BUF_SZ(txq)
Definition: rdma.h:167
u32 sw_if_index
Definition: rdma.h:185
u16 log2_cq_size
Definition: rdma.h:80
#define ASSERT(truth)
format_function_t format_rdma_device_name
Definition: rdma.h:280
u8 * n_used_per_chain
Definition: rdma.h:103
void vnet_hw_interface_assign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id, uword thread_index)
Definition: devices.c:139
u8 bufs_log2sz
Definition: rdma.h:146
static uword clib_file_add(clib_file_main_t *um, clib_file_t *template)
Definition: file.h:96
static void clib_file_del_by_index(clib_file_main_t *um, uword index)
Definition: file.h:119
u8 log_wqe_sz
Definition: rdma.h:113
u8 * name
Definition: rdma.h:192
volatile u64 * dv_sq_db
Definition: rdma.h:136
rdma_txq_t * txqs
Definition: rdma.h:182
u8 dv_cq_log2sz
Definition: rdma.h:148
u8 dv_sq_log2sz
Definition: rdma.h:147
static u32 rdma_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
Definition: device.c:190
struct ibv_qp * rx_qp4
Definition: rdma.h:201
vlib_pci_addr_t addr
Definition: pci.h:66
rdma_rxq_t * rxqs
Definition: rdma.h:181
#define clib_max(x, y)
Definition: clib.h:321
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
vl_api_ip4_address_t hi
Definition: arp.api:37
vnet_device_class_t rdma_device_class
static uword is_pow2(uword x)
Definition: clib.h:253
u8 dv_wqe_tmpl[64]
Definition: rdma.h:152
vl_api_flow_t flow
Definition: flow_types.api:240
Definition: defs.h:47
mlx5dv_cqe_t * cqes
Definition: rdma.h:84
u32 async_event_clib_file_index
Definition: rdma.h:195
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u16 n_total_additional_segs
Definition: rdma.h:109
static void ethernet_mac_address_generate(u8 *mac)
Definition: mac_address.h:74
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
Definition: interface.c:348
static uword max_log2(uword x)
Definition: clib.h:209
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void unformat_free(unformat_input_t *i)
Definition: format.h:162
static struct ibv_flow * rdma_rxq_init_flow(const rdma_device_t *rd, struct ibv_qp *qp, const mac_address_t *mac, const mac_address_t *mask, u16 ether_type, u32 flags)
Definition: device.c:58
static void rdma_update_state(vnet_main_t *vnm, rdma_device_t *rd, int port)
Definition: device.c:210
uword buffer_mem_start
Definition: buffer.h:456
u32 * bufs
Definition: rdma.h:142
#define foreach_rdma_tx_func_error
Definition: rdma.h:292
volatile u32 * wq_db
Definition: rdma.h:87
#define clib_error_free(e)
Definition: error.h:86
static char * rdma_tx_func_error_strings[]
Definition: device.c:1052
u16 port
Definition: lb_types.api:73
clib_file_function_t * error_function
Definition: file.h:67
int vnet_hw_interface_unassign_rx_thread(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
Definition: devices.c:188
static clib_error_t * rdma_mac_change(vnet_hw_interface_t *hw, const u8 *old, const u8 *new)
Definition: device.c:169
#define vnet_buffer(b)
Definition: buffer.h:417
static clib_error_t * rdma_register_interface(vnet_main_t *vnm, rdma_device_t *rd)
Definition: device.c:352
static clib_error_t * rdma_async_event_init(rdma_device_t *rd)
Definition: device.c:320
static vlib_thread_main_t * vlib_get_thread_main()
Definition: global_funcs.h:32
#define vec_foreach(var, vec)
Vector iterator.
static clib_error_t * rdma_async_event_error_ready(clib_file_t *f)
Definition: device.c:277
#define MLX5_ETH_L2_INLINE_HEADER_SIZE
Definition: rdma.h:44
#define RDMA_MLX5_WQE_DS
Definition: rdma.h:65
#define vlib_log_err(...)
Definition: log.h:132
Definition: file.h:51
static clib_error_t * rdma_async_event_read_ready(clib_file_t *f)
Definition: device.c:285
void rdma_delete_if(vlib_main_t *vm, rdma_device_t *rd)
Definition: device.c:1007
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
void rdma_create_if(vlib_main_t *vm, rdma_create_if_args_t *args)
Definition: device.c:833
#define STATIC_ASSERT_SIZEOF(d, s)
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
Definition: buffer_funcs.h:199
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:130
__clib_export u8 * format_clib_error(u8 *s, va_list *va)
Definition: error.c:191
#define VLIB_INITS(...)
Definition: init.h:357
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
static void vnet_hw_interface_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: devices.h:79
struct ibv_context * ctx
Definition: rdma.h:198
mlx5dv_wqe_ds_t * wqes
Definition: rdma.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
u32 ethernet_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:441
struct ibv_qp * rx_qp6
Definition: rdma.h:202
struct ibv_qp * ibv_qp
Definition: rdma.h:129