29 #define FLOW_IS_ETHERNET_CLASS(f) \
30 (f->type == VNET_FLOW_TYPE_ETHERNET)
32 #define FLOW_IS_IPV4_CLASS(f) \
33 ((f->type == VNET_FLOW_TYPE_IP4) || \
34 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
35 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
36 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
37 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
38 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
39 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
40 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
41 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
43 #define FLOW_IS_IPV6_CLASS(f) \
44 ((f->type == VNET_FLOW_TYPE_IP6) || \
45 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
46 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
47 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
50 #define FLOW_HAS_VLAN_TAG(f) \
51 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
52 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
55 #define FLOW_IS_L3_TYPE(f) \
56 ((f->type == VNET_FLOW_TYPE_IP4) || \
57 (f->type == VNET_FLOW_TYPE_IP6))
60 #define FLOW_IS_L4_TYPE(f) \
61 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
62 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
63 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
64 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
67 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
68 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
69 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
70 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
71 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
74 static const struct rte_flow_attr
ingress = {.ingress = 1 };
81 for (
i = 0;
i < 6;
i++)
91 #define BIT_IS_SET(v, b) \
98 if (n != -1 && BIT_IS_SET(type, n)) \
106 static inline enum rte_eth_hash_function
109 enum rte_eth_hash_function rss_func;
113 case VNET_RSS_FUNC_DEFAULT:
114 rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
116 case VNET_RSS_FUNC_TOEPLITZ:
117 rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
119 case VNET_RSS_FUNC_SIMPLE_XOR:
120 rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
122 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
123 rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
126 rss_func = RTE_ETH_HASH_FUNCTION_MAX;
136 struct rte_flow_item_eth eth[2] = { };
137 struct rte_flow_item_ipv4
ip4[2] = { };
138 struct rte_flow_item_ipv6
ip6[2] = { };
139 struct rte_flow_item_udp udp[2] = { };
140 struct rte_flow_item_tcp tcp[2] = { };
141 struct rte_flow_item_gtp gtp[2] = { };
142 struct rte_flow_item_l2tpv3oip l2tp[2] = { };
143 struct rte_flow_item_esp esp[2] = { };
144 struct rte_flow_item_ah ah[2] = { };
145 struct rte_flow_action_mark mark = { 0 };
146 struct rte_flow_action_queue queue = { 0 };
147 struct rte_flow_action_rss rss = { 0 };
148 struct rte_flow_item *
item, *items = 0;
155 raw_sz =
sizeof (
struct rte_flow_item_raw)
160 struct rte_flow_item_raw
item;
161 u8 val[raw_sz + vxlan_hdr_sz];
174 } flow_class = FLOW_UNKNOWN_CLASS;
177 flow_class = FLOW_ETHERNET_CLASS;
179 flow_class = FLOW_IPV4_CLASS;
181 flow_class = FLOW_IPV6_CLASS;
183 return VNET_FLOW_ERROR_NOT_SUPPORTED;
186 return VNET_FLOW_ERROR_NOT_SUPPORTED;
191 item->
type = RTE_FLOW_ITEM_TYPE_ETH;
193 if (flow_class == FLOW_ETHERNET_CLASS)
195 vnet_flow_ethernet_t *te = &
f->ethernet;
204 sizeof (eth[0].
dst));
211 sizeof (eth[0].
src));
215 if (te->eth_hdr.type)
217 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
218 eth[1].type = clib_host_to_net_u16 (0xFFFF);
222 item->mask = eth + 1;
234 item->
type = RTE_FLOW_ITEM_TYPE_VLAN;
244 if (flow_class == FLOW_IPV4_CLASS)
246 vnet_flow_ip4_t *ip4_ptr = &
f->ip4;
248 item->
type = RTE_FLOW_ITEM_TYPE_IPV4;
249 if ((!ip4_ptr->src_addr.mask.as_u32) &&
250 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
257 ip4[0].hdr.src_addr = ip4_ptr->src_addr.addr.as_u32;
258 ip4[1].hdr.src_addr = ip4_ptr->src_addr.mask.as_u32;
259 ip4[0].hdr.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
260 ip4[1].hdr.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
261 ip4[0].hdr.next_proto_id = ip4_ptr->protocol.prot;
262 ip4[1].hdr.next_proto_id = ip4_ptr->protocol.mask;
270 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &
f->ip4_n_tuple;
272 src_port = ip4_n_ptr->src_port.port;
273 dst_port = ip4_n_ptr->dst_port.port;
274 src_port_mask = ip4_n_ptr->src_port.mask;
275 dst_port_mask = ip4_n_ptr->dst_port.mask;
280 else if (flow_class == FLOW_IPV6_CLASS)
282 vnet_flow_ip6_t *ip6_ptr = &
f->ip6;
284 item->
type = RTE_FLOW_ITEM_TYPE_IPV6;
286 if ((ip6_ptr->src_addr.mask.as_u64[0] == 0) &&
287 (ip6_ptr->src_addr.mask.as_u64[1] == 0) &&
288 (!ip6_ptr->protocol.mask))
296 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
298 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
300 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
302 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
303 ip6[0].hdr.proto = ip6_ptr->protocol.prot;
304 ip6[1].hdr.proto = ip6_ptr->protocol.mask;
312 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &
f->ip6_n_tuple;
314 src_port = ip6_n_ptr->src_port.port;
315 dst_port = ip6_n_ptr->dst_port.port;
316 src_port_mask = ip6_n_ptr->src_port.mask;
317 dst_port_mask = ip6_n_ptr->dst_port.mask;
330 case IP_PROTOCOL_L2TP:
331 item->
type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
332 l2tp[0].session_id = clib_host_to_net_u32 (
f->ip4_l2tpv3oip.session_id);
333 l2tp[1].session_id = ~0;
336 item->mask = l2tp + 1;
339 case IP_PROTOCOL_IPSEC_ESP:
340 item->
type = RTE_FLOW_ITEM_TYPE_ESP;
341 esp[0].hdr.spi = clib_host_to_net_u32 (
f->ip4_ipsec_esp.spi);
345 item->mask = esp + 1;
348 case IP_PROTOCOL_IPSEC_AH:
350 ah[0].spi = clib_host_to_net_u32 (
f->ip4_ipsec_ah.spi);
356 case IP_PROTOCOL_TCP:
357 item->
type = RTE_FLOW_ITEM_TYPE_TCP;
358 if ((src_port_mask == 0) && (dst_port_mask == 0))
365 tcp[0].hdr.src_port = clib_host_to_net_u16 (
src_port);
366 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
367 tcp[0].hdr.dst_port = clib_host_to_net_u16 (
dst_port);
368 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
370 item->mask = tcp + 1;
374 case IP_PROTOCOL_UDP:
375 item->
type = RTE_FLOW_ITEM_TYPE_UDP;
376 if ((src_port_mask == 0) && (dst_port_mask == 0))
383 udp[0].hdr.src_port = clib_host_to_net_u16 (
src_port);
384 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
385 udp[0].hdr.dst_port = clib_host_to_net_u16 (
dst_port);
386 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
388 item->mask = udp + 1;
392 if (
f->type == VNET_FLOW_TYPE_IP4_GTPC)
394 gtp[0].teid = clib_host_to_net_u32 (
f->ip4_gtpc.teid);
398 item->
type = RTE_FLOW_ITEM_TYPE_GTPC;
400 item->mask = gtp + 1;
402 else if (
f->type == VNET_FLOW_TYPE_IP4_GTPU)
404 gtp[0].teid = clib_host_to_net_u32 (
f->ip4_gtpu.teid);
408 item->
type = RTE_FLOW_ITEM_TYPE_GTPU;
410 item->mask = gtp + 1;
412 else if (
f->type == VNET_FLOW_TYPE_IP4_VXLAN)
418 .vni_reserved = clib_host_to_net_u32 (
vni << 8)
422 .vni_reserved = clib_host_to_net_u32 (((
u32) - 1) << 8)
426 raw[0].item.relative = 1;
427 raw[0].item.length = vxlan_hdr_sz;
430 raw[0].item.pattern =
raw[0].val + raw_sz;
432 raw[1].item.pattern =
raw[1].val + raw_sz;
435 item->
type = RTE_FLOW_ITEM_TYPE_RAW;
442 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
447 if ((
f->actions & VNET_FLOW_ACTION_RSS) &&
448 (
f->rss_types & (1ULL << VNET_FLOW_RSS_TYPES_ESP)))
452 item->
type = RTE_FLOW_ITEM_TYPE_ESP;
456 item->
type = RTE_FLOW_ITEM_TYPE_END;
460 if (
f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
463 queue.index =
f->redirect_queue;
464 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
469 if (
f->actions & VNET_FLOW_ACTION_DROP)
472 action->type = RTE_FLOW_ACTION_TYPE_DROP;
475 rv = VNET_FLOW_ERROR_INTERNAL;
482 if (
f->actions & VNET_FLOW_ACTION_RSS)
487 action->type = RTE_FLOW_ACTION_TYPE_RSS;
493 rss.types = rss_type;
495 RTE_ETH_HASH_FUNCTION_MAX)
497 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
503 rv = VNET_FLOW_ERROR_INTERNAL;
513 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
516 if (
f->actions & VNET_FLOW_ACTION_MARK)
520 action->type = RTE_FLOW_ACTION_TYPE_MARK;
525 action->type = RTE_FLOW_ACTION_TYPE_END;
533 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
534 else if (
rv == -EEXIST)
535 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
537 rv = VNET_FLOW_ERROR_INTERNAL;
546 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
556 u32 flow_index,
uword * private_data)
583 return VNET_FLOW_ERROR_INTERNAL;
597 goto disable_rx_offload;
601 return VNET_FLOW_ERROR_NOT_SUPPORTED;
606 if (
flow->actions == 0)
608 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
613 if (
flow->actions & (VNET_FLOW_ACTION_MARK |
614 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
615 VNET_FLOW_ACTION_BUFFER_ADVANCE))
626 if (
flow->actions & VNET_FLOW_ACTION_MARK)
628 if (
flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
630 if (
flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
636 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
638 xd->
flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
644 case VNET_FLOW_TYPE_ETHERNET:
645 case VNET_FLOW_TYPE_IP4:
646 case VNET_FLOW_TYPE_IP6:
647 case VNET_FLOW_TYPE_IP4_N_TUPLE:
648 case VNET_FLOW_TYPE_IP6_N_TUPLE:
649 case VNET_FLOW_TYPE_IP4_VXLAN:
650 case VNET_FLOW_TYPE_IP4_GTPC:
651 case VNET_FLOW_TYPE_IP4_GTPU:
652 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
653 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
654 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
659 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
677 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
680 xd->
flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
690 u32 dev_instance = va_arg (*args,
u32);
691 u32 flow_index = va_arg (*args,
u32);
697 if (flow_index == ~0)
699 s =
format (s,
"%-25s: %U\n",
"supported flow actions",
701 s =
format (s,
"%-25s: %d\n",
"last DPDK error type",
703 s =
format (s,
"%-25s: %s\n",
"last DPDK error message",
710 return format (s,
"unknown flow");