|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
21 #include <linux/if_link.h>
22 #include <linux/if_ether.h>
35 0x2c, 0xc6, 0x81, 0xd1,
36 0x5b, 0xdb, 0xf4, 0xf7,
37 0xfc, 0xa2, 0x83, 0x19,
38 0xdb, 0x1a, 0x3e, 0x94,
39 0x6b, 0x9e, 0x38, 0xd9,
40 0x2c, 0x9c, 0x03, 0xd1,
41 0xad, 0x99, 0x44, 0xa7,
42 0xd9, 0x56, 0x3d, 0x59,
43 0x06, 0x3c, 0x25, 0xf3,
44 0xfc, 0x1f, 0xdc, 0x2a,
50 #define rdma_log__(lvl, dev, f, ...) \
53 vlib_log ((lvl), rdma_main.log_class, "%s: " f, (dev)->name, \
58 #define rdma_log(lvl, dev, f, ...) \
59 rdma_log__((lvl), (dev), "%s (%d): " f, strerror(errno), errno, ##__VA_ARGS__)
61 static struct ibv_flow *
66 struct ibv_flow *
flow;
67 struct raw_eth_flow_attr
69 struct ibv_flow_attr attr;
70 struct ibv_flow_spec_eth spec_eth;
71 } __attribute__ ((packed)) fa;
73 memset (&fa, 0,
sizeof (fa));
74 fa.attr.num_of_specs = 1;
76 fa.attr.flags =
flags;
77 fa.spec_eth.type = IBV_FLOW_SPEC_ETH;
78 fa.spec_eth.size =
sizeof (
struct ibv_flow_spec_eth);
80 memcpy (fa.spec_eth.val.dst_mac,
mac, sizeof (fa.spec_eth.val.dst_mac));
81 memcpy (fa.spec_eth.mask.dst_mac,
mask, sizeof (fa.spec_eth.mask.dst_mac));
85 fa.spec_eth.val.ether_type = ether_type;
86 fa.spec_eth.mask.ether_type = 0xffff;
89 flow = ibv_create_flow (qp, &fa.attr);
91 rdma_log (VLIB_LOG_LEVEL_ERR, rd,
"ibv_create_flow() failed");
101 if (ibv_destroy_flow (*
flow))
103 rdma_log (VLIB_LOG_LEVEL_ERR, rd,
"ibv_destroy_flow() failed");
130 rd->
flags |= RDMA_DEVICE_F_PROMISC;
151 ntohs (ETH_P_IPV6), 0);
154 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
161 IBV_FLOW_ATTR_FLAGS_DONT_TRAP
168 rd->
flags &= ~RDMA_DEVICE_F_PROMISC;
189 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd,
"MTU change not supported");
209 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd,
"unknown flag %x requested",
flags);
216 struct ibv_port_attr attr;
220 if (ibv_query_port (rd->
ctx,
port, &attr))
230 case IBV_PORT_ACTIVE:
231 case IBV_PORT_ACTIVE_DEFER:
232 rd->
flags |= RDMA_DEVICE_F_LINK_UP;
237 rd->
flags &= ~RDMA_DEVICE_F_LINK_UP;
243 switch (attr.active_width)
258 switch (attr.active_speed)
295 struct ibv_async_event event;
296 ret = ibv_get_async_event (rd->
ctx, &event);
300 switch (event.event_type)
302 case IBV_EVENT_PORT_ACTIVE:
305 case IBV_EVENT_PORT_ERR:
308 case IBV_EVENT_DEVICE_FATAL:
309 rd->
flags &= ~RDMA_DEVICE_F_LINK_UP;
314 rdma_log__ (VLIB_LOG_LEVEL_ERR, rd,
"unhandeld RDMA async event %d",
319 ibv_ack_async_event (&event);
330 ret = fcntl (rd->
ctx->async_fd, F_GETFL);
334 ret = fcntl (rd->
ctx->async_fd, F_SETFL, ret | O_NONBLOCK);
386 #define _(fn, arg) if (arg) \
389 if ((rv = fn (arg))) \
390 rdma_log (VLIB_LOG_LEVEL_DEBUG, rd, #fn "() failed (rv = %d)", rv); \
397 _(ibv_dereg_mr, rd->
mr);
400 _(ibv_destroy_qp, txq->
qp);
401 _(ibv_destroy_cq, txq->
cq);
405 _(ibv_destroy_wq, rxq->
wq);
406 _(ibv_destroy_cq, rxq->
cq);
409 _(ibv_destroy_qp, rd->
rx_qp6);
410 _(ibv_destroy_qp, rd->
rx_qp4);
411 _(ibv_dealloc_pd, rd->
pd);
412 _(ibv_close_device, rd->
ctx);
426 u8 no_multi_seg,
u16 max_pktlen)
429 struct ibv_wq_init_attr wqia;
430 struct ibv_cq_init_attr_ex cqa = { };
431 struct ibv_wq_attr wqa;
432 struct ibv_cq_ex *cqex;
433 struct mlx5dv_wq_init_attr dv_wqia = { };
434 int is_mlx5dv = ! !(rd->
flags & RDMA_DEVICE_F_MLX5DV);
435 int is_striding = ! !(rd->
flags & RDMA_DEVICE_F_STRIDING_RQ);
447 struct mlx5dv_cq_init_attr dvcq = { };
448 dvcq.comp_mask = MLX5DV_CQ_INIT_ATTR_MASK_COMPRESSED_CQE;
449 dvcq.cqe_comp_res_format = MLX5DV_CQE_RES_FORMAT_HASH;
451 if ((cqex = mlx5dv_create_cq (rd->
ctx, &cqa, &dvcq)) == 0)
456 if ((cqex = ibv_create_cq_ex (rd->
ctx, &cqa)) == 0)
460 rxq->
cq = ibv_cq_ex_to_cq (cqex);
462 memset (&wqia, 0,
sizeof (wqia));
463 wqia.wq_type = IBV_WQT_RQ;
464 wqia.max_wr = n_desc;
473 uword data_seg_log2_sz =
475 rxq->
buf_sz = 1 << data_seg_log2_sz;
486 int max_chain_log_sz =
487 max_pktlen ?
max_log2 ((max_pktlen /
490 max_chain_log_sz =
clib_max (max_chain_log_sz, 3);
491 wqia.max_sge = 1 << max_chain_log_sz;
492 dv_wqia.comp_mask = MLX5DV_WQ_INIT_ATTR_MASK_STRIDING_RQ;
493 dv_wqia.striding_rq_attrs.two_byte_shift_en = 0;
494 dv_wqia.striding_rq_attrs.single_wqe_log_num_of_strides =
496 dv_wqia.striding_rq_attrs.single_stride_log_num_of_bytes =
498 wqia.max_wr >>= max_chain_log_sz;
515 max_pktlen ? (max_pktlen /
518 int max_chain_log_sz =
max_log2 (max_chain_sz);
519 wqia.max_sge = 1 << max_chain_log_sz;
526 if ((rxq->
wq = mlx5dv_create_wq (rd->
ctx, &wqia, &dv_wqia)))
528 rxq->
wq->events_completed = 0;
529 pthread_mutex_init (&rxq->
wq->mutex, NULL);
530 pthread_cond_init (&rxq->
wq->cond, NULL);
535 else if ((rxq->
wq = ibv_create_wq (rd->
ctx, &wqia)) == 0)
538 memset (&wqa, 0,
sizeof (wqa));
539 wqa.attr_mask = IBV_WQ_ATTR_STATE;
540 wqa.wq_state = IBV_WQS_RDY;
541 if (ibv_modify_wq (rxq->
wq, &wqa) != 0)
546 struct mlx5dv_obj obj = { };
547 struct mlx5dv_cq dv_cq;
548 struct mlx5dv_rwq dv_rwq;
555 obj.rwq.in = rxq->
wq;
556 obj.rwq.out = &dv_rwq;
558 if ((mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_RWQ)))
566 rxq->
cq_db = (
volatile u32 *) dv_cq.dbrec;
567 rxq->
cqn = dv_cq.cqn;
570 rxq->
wq_db = (
volatile u32 *) dv_rwq.dbrec;
574 qw0 = clib_host_to_net_u32 (rxq->
buf_sz);
576 qw0 |= (
u64) clib_host_to_net_u32 (rd->
lkey) << 32;
577 qw0_nullseg |= (
u64) clib_host_to_net_u32 (rd->
lkey) << 32;
606 for (
int i = 0;
i < n_desc;
i++)
620 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4;
622 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
623 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP;
626 return IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4 |
627 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
639 return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6;
641 return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
642 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP;
645 return IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6 |
646 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP;
655 struct ibv_rwq_ind_table_init_attr rwqia;
656 struct ibv_qp_init_attr_ex qpia;
657 struct ibv_wq **ind_tbl;
659 u32 ind_tbl_sz = rxq_sz;
666 struct ibv_device_attr_ex attr;
667 if (ibv_query_device_ex (rd->
ctx, 0, &attr))
669 ind_tbl_sz = attr.rss_caps.max_rwq_indirection_table_size;
670 if (ind_tbl_sz < rxq_sz)
672 "max indirection table size (%d)",
676 ind_tbl =
vec_new (
struct ibv_wq *, ind_tbl_sz);
679 memset (&rwqia, 0,
sizeof (rwqia));
682 rwqia.ind_tbl = ind_tbl;
687 memset (&qpia, 0,
sizeof (qpia));
688 qpia.qp_type = IBV_QPT_RAW_PACKET;
690 IBV_QP_INIT_ATTR_PD | IBV_QP_INIT_ATTR_IND_TABLE |
691 IBV_QP_INIT_ATTR_RX_HASH;
697 qpia.rx_hash_conf.rx_hash_function = IBV_RX_HASH_FUNC_TOEPLITZ;
700 if ((rd->
rx_qp4 = ibv_create_qp_ex (rd->
ctx, &qpia)) == 0)
704 if ((rd->
rx_qp6 = ibv_create_qp_ex (rd->
ctx, &qpia)) == 0)
717 struct ibv_qp_init_attr qpia;
718 struct ibv_qp_attr qpa;
727 if ((txq->
cq = ibv_create_cq (rd->
ctx, n_desc, NULL, NULL, 0)) == 0)
730 memset (&qpia, 0,
sizeof (qpia));
731 qpia.send_cq = txq->
cq;
732 qpia.recv_cq = txq->
cq;
733 qpia.cap.max_send_wr = n_desc;
734 qpia.cap.max_send_sge = 1;
735 qpia.qp_type = IBV_QPT_RAW_PACKET;
737 if ((txq->
qp = ibv_create_qp (rd->
pd, &qpia)) == 0)
740 memset (&qpa, 0,
sizeof (qpa));
741 qp_flags = IBV_QP_STATE | IBV_QP_PORT;
742 qpa.qp_state = IBV_QPS_INIT;
744 if (ibv_modify_qp (txq->
qp, &qpa, qp_flags) != 0)
747 memset (&qpa, 0,
sizeof (qpa));
748 qp_flags = IBV_QP_STATE;
749 qpa.qp_state = IBV_QPS_RTR;
750 if (ibv_modify_qp (txq->
qp, &qpa, qp_flags) != 0)
753 memset (&qpa, 0,
sizeof (qpa));
754 qp_flags = IBV_QP_STATE;
755 qpa.qp_state = IBV_QPS_RTS;
756 if (ibv_modify_qp (txq->
qp, &qpa, qp_flags) != 0)
762 if (rd->
flags & RDMA_DEVICE_F_MLX5DV)
765 struct mlx5dv_cq dv_cq;
766 struct mlx5dv_qp dv_qp;
767 struct mlx5dv_obj obj = { };
774 if (mlx5dv_init_obj (&obj, MLX5DV_OBJ_CQ | MLX5DV_OBJ_QP))
785 || sizeof (
struct mlx5_cqe64) != dv_cq.cqe_size
786 || (
uword) dv_cq.buf % sizeof (
struct mlx5_cqe64))
802 mlx5dv_set_ctrl_seg (&tmpl->ctrl, 0, MLX5_OPCODE_SEND, 0,
806 mlx5dv_set_data_seg (&tmpl->dseg, 0, rd->
lkey, 0);
827 if ((rd->
pd = ibv_alloc_pd (rd->
ctx)) == 0)
832 IBV_ACCESS_LOCAL_WRITE)) == 0)
851 for (
i = 0;
i < rxq_num;
i++)
886 vlib_pci_addr_t pci_addr;
887 struct ibv_device **dev_list;
901 args->
rv = VNET_API_ERROR_INVALID_VALUE;
903 "queue size must be a power of two "
904 "between %d and 65535",
909 dev_list = ibv_get_device_list (&n_devs);
914 "no RDMA devices available. Is the ib_uverbs module loaded?");
919 s =
format (0,
"/sys/class/net/%s/device%c", args->
ifname, 0);
932 if (!args->
name || 0 == args->
name[0])
951 "invalid interface (only mlx5 supported for now)");
955 for (
i = 0;
i < n_devs;
i++)
957 vlib_pci_addr_t
addr;
960 s =
format (s,
"%s/device%c", dev_list[
i]->dev_path, 0);
968 if ((rd->
ctx = ibv_open_device (dev_list[
i])))
974 struct mlx5dv_context mlx5dv_attrs = { };
975 mlx5dv_attrs.comp_mask |= MLX5DV_CONTEXT_MASK_STRIDING_RQ;
977 if (mlx5dv_query_device (rd->
ctx, &mlx5dv_attrs) == 0)
979 uword data_seg_log2_sz =
982 if ((mlx5dv_attrs.flags & MLX5DV_CONTEXT_FLAGS_CQE_V1))
983 rd->
flags |= RDMA_DEVICE_F_MLX5DV;
988 && data_seg_log2_sz <=
989 mlx5dv_attrs.striding_rq_caps.max_single_stride_log_num_of_bytes
990 && data_seg_log2_sz >=
991 mlx5dv_attrs.striding_rq_caps.min_single_stride_log_num_of_bytes
993 mlx5dv_attrs.striding_rq_caps.min_single_wqe_log_num_of_strides
995 mlx5dv_attrs.striding_rq_caps.max_single_wqe_log_num_of_strides)
996 rd->
flags |= RDMA_DEVICE_F_STRIDING_RQ;
1003 "supported on this interface");
1044 ibv_free_device_list (dev_list);
1046 args->
rv = VNET_API_ERROR_INVALID_INTERFACE;
1067 if (rd->
flags & RDMA_DEVICE_F_ERROR)
1074 rd->
flags |= RDMA_DEVICE_F_ADMIN_UP;
1079 rd->
flags &= ~RDMA_DEVICE_F_ADMIN_UP;
1105 .name =
"RDMA interface",
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define ETHERNET_INTERFACE_FLAG_DEFAULT_L3
static clib_error_t * rdma_dev_init(vlib_main_t *vm, rdma_device_t *rd, rdma_create_if_args_t *args)
#define VNET_HW_IF_RXQ_THREAD_ANY
clib_file_main_t file_main
__clib_export u8 * clib_sysfs_link_to_name(char *link)
struct ibv_flow * flow_mcast4
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static clib_error_t * rdma_async_event_init(rdma_device_t *rd)
#define foreach_rdma_tx_func_error
#define vec_new(T, N)
Create new vector of given type and length (unspecified alignment, no header).
@ VNET_HW_INTERFACE_CAP_SUPPORTS_MAC_FILTER
void rdma_create_if(vlib_main_t *vm, rdma_create_if_args_t *args)
static clib_error_t * rdma_async_event_error_ready(clib_file_t *f)
static void rdma_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
static void rdma_async_event_cleanup(rdma_device_t *rd)
u32 ethernet_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
struct ibv_rwq_ind_table * rx_rwq_ind_tbl
void rdma_delete_if(vlib_main_t *vm, rdma_device_t *rd)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
vnet_device_class_t rdma_device_class
vlib_log_class_t vlib_log_register_class(char *class, char *subclass)
static uword sysfs_path_to_pci_addr(char *path, vlib_pci_addr_t *addr)
#define clib_error_return(e, args...)
VNET_DEVICE_CLASS(af_xdp_device_class)
volatile u32 * dv_sq_dbrec
clib_file_function_t * read_function
static void rdma_dev_cleanup(rdma_device_t *rd)
static clib_error_t * rdma_txq_init(vlib_main_t *vm, rdma_device_t *rd, u16 qid, u32 n_desc)
unformat_function_t unformat_vlib_pci_addr
static uint64_t rdma_rss42ibv(const rdma_rss4_t rss4)
@ VNET_SW_INTERFACE_FLAG_ADMIN_UP
#define pool_put(P, E)
Free an object E in pool P.
vlib_buffer_main_t * buffer_main
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
@ VNET_HW_INTERFACE_FLAG_LINK_UP
struct ibv_flow * flow_mcast6
static u32 rdma_rxq_destroy_flow(const rdma_device_t *rd, struct ibv_flow **flow)
static clib_error_t * rdma_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
#define RDMA_RXQ_LEGACY_MODE_MAX_CHAIN_SZ
static void vnet_hw_interface_set_link_speed(vnet_main_t *vnm, u32 hw_if_index, u32 link_speed)
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
rdma_per_thread_data_t * per_thread_data
#define vlib_log_err(...)
static u32 rdma_dev_change_mtu(rdma_device_t *rd)
__clib_export u8 * format_clib_error(u8 *s, va_list *va)
#define rdma_log(lvl, dev, f,...)
static uword max_log2(uword x)
volatile u32 * dv_cq_dbrec
#define vec_elt(v, i)
Get vector value at index i.
static u32 rdma_dev_set_promisc(rdma_device_t *rd)
static clib_error_t * rdma_rxq_init(vlib_main_t *vm, rdma_device_t *rd, u16 qid, u32 n_desc, u8 no_multi_seg, u16 max_pktlen)
static uint64_t rdma_rss62ibv(const rdma_rss6_t rss6)
#define clib_error_create(args...)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
format_function_t format_rdma_device_name
#define RDMA_TXQ_BUF_SZ(txq)
vlib_pci_device_info_t * pci
vlib_log_class_t log_class
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
vnet_main_t * vnet_get_main(void)
#define RDMA_RXQ_MAX_CHAIN_LOG_SZ
static void rdma_unregister_interface(vnet_main_t *vnm, rdma_device_t *rd)
u8 opcode_cqefmt_se_owner
#define rdma_log__(lvl, dev, f,...)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
#define vec_foreach_index(var, v)
Iterate over vector indices.
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
void vnet_hw_if_update_runtime_data(vnet_main_t *vnm, u32 hw_if_index)
#define STATIC_ASSERT_SIZEOF(d, s)
sll srl srl sll sra u16x4 i
#define ETHERNET_INTERFACE_FLAG_MTU
u32 async_event_clib_file_index
volatile u8 ref_count
Reference count for this buffer.
vlib_pci_device_info_t * vlib_pci_get_device_info(vlib_main_t *vm, vlib_pci_addr_t *addr, clib_error_t **error)
#define CLIB_CACHE_LINE_BYTES
clib_error_t * rdma_init(vlib_main_t *vm)
#define vec_free(V)
Free vector's memory (no header).
#define MLX5_ETH_L2_INLINE_HEADER_SIZE
static char * rdma_tx_func_error_strings[]
static void ethernet_mac_address_generate(u8 *mac)
rdma_mlx5_wqe_t * dv_sq_wqes
static u8 rdma_rss_hash_key[]
static clib_error_t * rdma_register_interface(vnet_main_t *vnm, rdma_device_t *rd)
static void rdma_update_state(vnet_main_t *vnm, rdma_device_t *rd, int port)
description fragment has unexpected format
static vnet_sw_interface_t * vnet_get_hw_sw_interface(vnet_main_t *vnm, u32 hw_if_index)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VLIB_INIT_FUNCTION(x)
void ethernet_delete_interface(vnet_main_t *vnm, u32 hw_if_index)
static clib_error_t * rdma_async_event_read_ready(clib_file_t *f)
static clib_error_t * rdma_mac_change(vnet_hw_interface_t *hw, const u8 *old, const u8 *new)
u32 per_interface_next_index
#define vec_foreach(var, vec)
Vector iterator.
#define clib_error_return_unix(e, args...)
static uword clib_file_add(clib_file_main_t *um, clib_file_t *template)
struct ibv_flow * flow_ucast4
vlib_buffer_t buffer_template
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 vnet_hw_if_register_rx_queue(vnet_main_t *vnm, u32 hw_if_index, u32 queue_id, u32 thread_index)
static vlib_main_t * vlib_get_main(void)
#define pool_get_zero(P, E)
Allocate an object E from a pool P and zero it.
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
clib_error_t *() vlib_init_function_t(struct vlib_main_t *vm)
#define ETHERNET_INTERFACE_FLAG_ACCEPT_ALL
u16 n_total_additional_segs
static void clib_file_del_by_index(clib_file_main_t *um, uword index)
#define clib_error_free(e)
struct mlx5_cqe64 * dv_cq_cqes
static u32 rdma_dev_set_ucast(rdma_device_t *rd)
clib_file_function_t * error_function
static uword min_log2(uword x)
static void vlib_pci_free_device_info(vlib_pci_device_info_t *di)
#define RDMA_TXQ_DV_INVALID_ID
static vlib_thread_main_t * vlib_get_thread_main()
static uword is_pow2(uword x)
struct ibv_flow * flow_ucast6
static u32 rdma_flag_change(vnet_main_t *vnm, vnet_hw_interface_t *hw, u32 flags)
#define vlib_log_emerg(...)
format_function_t format_rdma_device
clib_error_t * ethernet_register_interface(vnet_main_t *vnm, u32 dev_class_index, u32 dev_instance, const u8 *address, u32 *hw_if_index_return, ethernet_flag_change_function_t flag_change)
void vnet_hw_if_set_input_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
static struct ibv_flow * rdma_rxq_init_flow(const rdma_device_t *rd, struct ibv_qp *qp, const mac_address_t *mac, const mac_address_t *mask, u16 ether_type, u32 flags)
static_always_inline void mac_address_from_bytes(mac_address_t *mac, const u8 *bytes)
VLIB buffer representation.
static clib_error_t * rdma_rxq_finalize(vlib_main_t *vm, rdma_device_t *rd)
vl_api_wireguard_peer_flags_t flags