29 #define FLOW_IS_ETHERNET_CLASS(f) \ 30 (f->type == VNET_FLOW_TYPE_ETHERNET) 32 #define FLOW_IS_IPV4_CLASS(f) \ 33 ((f->type == VNET_FLOW_TYPE_IP4) || \ 34 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ 35 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ 36 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \ 37 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ 38 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \ 39 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \ 40 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \ 41 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)) 43 #define FLOW_IS_IPV6_CLASS(f) \ 44 ((f->type == VNET_FLOW_TYPE_IP6) || \ 45 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \ 46 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \ 47 (f->type == VNET_FLOW_TYPE_IP6_VXLAN)) 50 #define FLOW_HAS_VLAN_TAG(f) \ 51 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ 52 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED)) 55 #define FLOW_IS_L3_TYPE(f) \ 56 ((f->type == VNET_FLOW_TYPE_IP4) || \ 57 (f->type == VNET_FLOW_TYPE_IP6)) 60 #define FLOW_IS_L4_TYPE(f) \ 61 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ 62 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \ 63 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ 64 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED)) 67 #define FLOW_IS_L4_TUNNEL_TYPE(f) \ 68 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \ 69 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \ 70 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ 71 (f->type == VNET_FLOW_TYPE_IP4_GTPU)) 74 static const struct rte_flow_attr
ingress = {.ingress = 1 };
81 for (i = 0; i < 6; i++)
91 #define BIT_IS_SET(v, b) \ 98 if (n != -1 && BIT_IS_SET(type, n)) \ 106 static inline enum rte_eth_hash_function
109 enum rte_eth_hash_function rss_func;
113 case VNET_RSS_FUNC_DEFAULT:
114 rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
116 case VNET_RSS_FUNC_TOEPLITZ:
117 rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
119 case VNET_RSS_FUNC_SIMPLE_XOR:
120 rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
122 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
123 rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
126 rss_func = RTE_ETH_HASH_FUNCTION_MAX;
136 struct rte_flow_item_eth eth[2] = { };
137 struct rte_flow_item_ipv4 ip4[2] = { };
138 struct rte_flow_item_ipv6 ip6[2] = { };
139 struct rte_flow_item_udp udp[2] = { };
140 struct rte_flow_item_tcp tcp[2] = { };
141 struct rte_flow_item_gtp gtp[2] = { };
142 struct rte_flow_item_l2tpv3oip l2tp[2] = { };
143 struct rte_flow_item_esp esp[2] = { };
144 struct rte_flow_item_ah ah[2] = { };
145 struct rte_flow_action_mark mark = { 0 };
146 struct rte_flow_action_queue queue = { 0 };
147 struct rte_flow_action_rss rss = { 0 };
148 struct rte_flow_item *item, *items = 0;
155 raw_sz =
sizeof (
struct rte_flow_item_raw)
160 struct rte_flow_item_raw item;
161 u8 val[raw_sz + vxlan_hdr_sz];
174 } flow_class = FLOW_UNKNOWN_CLASS;
177 flow_class = FLOW_ETHERNET_CLASS;
179 flow_class = FLOW_IPV4_CLASS;
181 flow_class = FLOW_IPV6_CLASS;
183 return VNET_FLOW_ERROR_NOT_SUPPORTED;
186 return VNET_FLOW_ERROR_NOT_SUPPORTED;
191 item->type = RTE_FLOW_ITEM_TYPE_ETH;
193 if (flow_class == FLOW_ETHERNET_CLASS)
195 vnet_flow_ethernet_t *te = &f->ethernet;
204 sizeof (eth[0].dst));
205 clib_memset (ð[1].dst, 0xFF,
sizeof (eth[1].dst));
211 sizeof (eth[0].src));
212 clib_memset (ð[1].src, 0xFF,
sizeof (eth[1].src));
215 if (te->eth_hdr.type)
217 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
218 eth[1].type = clib_host_to_net_u16 (0xFFFF);
222 item->mask = eth + 1;
234 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
244 if (flow_class == FLOW_IPV4_CLASS)
246 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
248 item->
type = RTE_FLOW_ITEM_TYPE_IPV4;
249 if ((!ip4_ptr->src_addr.mask.as_u32) &&
250 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
257 ip4[0].hdr.src_addr = ip4_ptr->src_addr.addr.as_u32;
258 ip4[1].hdr.src_addr = ip4_ptr->src_addr.mask.as_u32;
259 ip4[0].hdr.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
260 ip4[1].hdr.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
261 ip4[0].hdr.next_proto_id = ip4_ptr->protocol.prot;
262 ip4[1].hdr.next_proto_id = ip4_ptr->protocol.mask;
265 item->mask = ip4 + 1;
270 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
272 src_port = ip4_n_ptr->src_port.port;
273 dst_port = ip4_n_ptr->dst_port.port;
274 src_port_mask = ip4_n_ptr->src_port.mask;
275 dst_port_mask = ip4_n_ptr->dst_port.mask;
278 protocol = ip4_ptr->protocol.prot;
280 else if (flow_class == FLOW_IPV6_CLASS)
282 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
284 item->
type = RTE_FLOW_ITEM_TYPE_IPV6;
286 if ((ip6_ptr->src_addr.mask.as_u64[0] == 0) &&
287 (ip6_ptr->src_addr.mask.as_u64[1] == 0) &&
288 (!ip6_ptr->protocol.mask))
295 clib_memcpy (ip6[0].hdr.src_addr, &ip6_ptr->src_addr.addr,
296 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
297 clib_memcpy (ip6[1].hdr.src_addr, &ip6_ptr->src_addr.mask,
298 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
299 clib_memcpy (ip6[0].hdr.dst_addr, &ip6_ptr->dst_addr.addr,
300 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
301 clib_memcpy (ip6[1].hdr.dst_addr, &ip6_ptr->dst_addr.mask,
302 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
303 ip6[0].hdr.proto = ip6_ptr->protocol.prot;
304 ip6[1].hdr.proto = ip6_ptr->protocol.mask;
307 item->mask = ip6 + 1;
312 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
314 src_port = ip6_n_ptr->src_port.port;
315 dst_port = ip6_n_ptr->dst_port.port;
316 src_port_mask = ip6_n_ptr->src_port.mask;
317 dst_port_mask = ip6_n_ptr->dst_port.mask;
320 protocol = ip6_ptr->protocol.prot;
330 case IP_PROTOCOL_L2TP:
331 item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
332 l2tp[0].session_id = clib_host_to_net_u32 (f->ip4_l2tpv3oip.session_id);
333 l2tp[1].session_id = ~0;
336 item->mask = l2tp + 1;
339 case IP_PROTOCOL_IPSEC_ESP:
340 item->type = RTE_FLOW_ITEM_TYPE_ESP;
341 esp[0].hdr.spi = clib_host_to_net_u32 (f->ip4_ipsec_esp.spi);
345 item->mask = esp + 1;
348 case IP_PROTOCOL_IPSEC_AH:
349 item->type = RTE_FLOW_ITEM_TYPE_AH;
350 ah[0].spi = clib_host_to_net_u32 (f->ip4_ipsec_ah.spi);
356 case IP_PROTOCOL_TCP:
357 item->type = RTE_FLOW_ITEM_TYPE_TCP;
358 if ((src_port_mask == 0) && (dst_port_mask == 0))
365 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
366 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
367 tcp[0].hdr.dst_port = clib_host_to_net_u16 (
dst_port);
368 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
370 item->mask = tcp + 1;
374 case IP_PROTOCOL_UDP:
375 item->type = RTE_FLOW_ITEM_TYPE_UDP;
376 if ((src_port_mask == 0) && (dst_port_mask == 0))
383 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
384 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
385 udp[0].hdr.dst_port = clib_host_to_net_u16 (
dst_port);
386 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
388 item->mask = udp + 1;
392 if (f->
type == VNET_FLOW_TYPE_IP4_GTPC)
394 gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpc.teid);
398 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
400 item->mask = gtp + 1;
402 else if (f->
type == VNET_FLOW_TYPE_IP4_GTPU)
404 gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpu.teid);
408 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
410 item->mask = gtp + 1;
412 else if (f->
type == VNET_FLOW_TYPE_IP4_VXLAN)
414 u32 vni = f->ip4_vxlan.vni;
418 .vni_reserved = clib_host_to_net_u32 (vni << 8)
422 .vni_reserved = clib_host_to_net_u32 (((
u32) - 1) << 8)
426 raw[0].item.relative = 1;
427 raw[0].item.length = vxlan_hdr_sz;
430 raw[0].item.pattern = raw[0].val + raw_sz;
432 raw[1].item.pattern = raw[1].val + raw_sz;
435 item->type = RTE_FLOW_ITEM_TYPE_RAW;
437 item->mask = raw + 1;
442 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
448 item->type = RTE_FLOW_ITEM_TYPE_END;
452 if (f->
actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
456 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
457 action->conf = &queue;
461 if (f->
actions & VNET_FLOW_ACTION_DROP)
464 action->type = RTE_FLOW_ACTION_TYPE_DROP;
467 rv = VNET_FLOW_ERROR_INTERNAL;
474 if (f->
actions & VNET_FLOW_ACTION_RSS)
479 action->type = RTE_FLOW_ACTION_TYPE_RSS;
485 rss.types = rss_type;
487 RTE_ETH_HASH_FUNCTION_MAX)
489 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
495 rv = VNET_FLOW_ERROR_INTERNAL;
505 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
508 if (f->
actions & VNET_FLOW_ACTION_MARK)
512 action->type = RTE_FLOW_ACTION_TYPE_MARK;
513 action->conf = &mark;
517 action->type = RTE_FLOW_ACTION_TYPE_END;
525 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
526 else if (rv == -EEXIST)
527 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
529 rv = VNET_FLOW_ERROR_INTERNAL;
538 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
548 u32 flow_index,
uword * private_data)
575 return VNET_FLOW_ERROR_INTERNAL;
589 goto disable_rx_offload;
593 return VNET_FLOW_ERROR_NOT_SUPPORTED;
600 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
605 if (flow->
actions & (VNET_FLOW_ACTION_MARK |
606 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
607 VNET_FLOW_ACTION_BUFFER_ADVANCE))
618 if (flow->
actions & VNET_FLOW_ACTION_MARK)
620 if (flow->
actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
622 if (flow->
actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
628 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
630 xd->
flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
636 case VNET_FLOW_TYPE_ETHERNET:
637 case VNET_FLOW_TYPE_IP4:
638 case VNET_FLOW_TYPE_IP6:
639 case VNET_FLOW_TYPE_IP4_N_TUPLE:
640 case VNET_FLOW_TYPE_IP6_N_TUPLE:
641 case VNET_FLOW_TYPE_IP4_VXLAN:
642 case VNET_FLOW_TYPE_IP4_GTPC:
643 case VNET_FLOW_TYPE_IP4_GTPU:
644 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
645 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
646 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
651 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
669 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
672 xd->
flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
682 u32 dev_instance = va_arg (*args,
u32);
683 u32 flow_index = va_arg (*args,
u32);
689 if (flow_index == ~0)
691 s =
format (s,
"%-25s: %U\n",
"supported flow actions",
693 s =
format (s,
"%-25s: %d\n",
"last DPDK error type",
695 s =
format (s,
"%-25s: %s\n",
"last DPDK error message",
702 return format (s,
"unknown flow");
volatile u32 main_loop_count
#define FLOW_IS_L4_TUNNEL_TYPE(f)
vnet_rss_function_t rss_fun
vl_api_ip_port_and_mask_t dst_port
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
u32 supported_flow_actions
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
vl_api_ip_proto_t protocol
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
void dpdk_device_setup(dpdk_device_t *xd)
static const struct rte_flow_attr ingress
vl_api_fib_path_type_t type
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define FLOW_IS_IPV4_CLASS(f)
#define FLOW_IS_L4_TYPE(f)
static int dpdk_flow_add(dpdk_device_t *xd, vnet_flow_t *f, dpdk_flow_entry_t *fe)
#define pool_put(P, E)
Free an object E in pool P.
static void dpdk_flow_convert_rss_types(u64 type, u64 *dpdk_rss_type)
vnet_flow_t * vnet_get_flow(u32 flow_index)
static bool mac_address_is_all_zero(const u8 addr[6])
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
vl_api_ip_port_and_mask_t src_port
#define FLOW_IS_IPV6_CLASS(f)
format_function_t format_flow_actions
#define FLOW_IS_L3_TYPE(f)
dpdk_flow_lookup_entry_t * flow_lookup_entries
u32 * parked_lookup_indexes
sll srl srl sll sra u16x4 i
u8 * format_dpdk_flow(u8 *s, va_list *args)
#define vec_free(V)
Free vector's memory (no header).
int dpdk_flow_ops_fn(vnet_main_t *vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
#define pool_put_index(p, i)
Free pool element with given index.
#define FLOW_HAS_VLAN_TAG(f)
#define foreach_dpdk_rss_hf
vl_api_mac_event_action_t action
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vl_api_flow_action_t actions
u32 redirect_device_input_next_index
#define FLOW_IS_ETHERNET_CLASS(f)
#define vec_foreach(var, vec)
Vector iterator.
dpdk_flow_entry_t * flow_entries
#define CLIB_CACHE_LINE_BYTES
struct rte_flow_error last_flow_error
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static enum rte_eth_hash_function dpdk_flow_convert_rss_func(vnet_rss_function_t func)
static uword pool_elts(void *v)
Number of active elements in a pool.