32 #define FLOW_IS_L2_LAYER(f) \ 33 (f->type == VNET_FLOW_TYPE_ETHERNET) 36 #define FLOW_IS_L4_LAYER(f) \ 37 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ 38 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE)) 41 #define FLOW_IS_L4_TUNNEL_LAYER(f) \ 42 ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \ 43 (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6)) 46 static const struct rte_flow_attr
ingress = {.ingress = 1 };
53 for (i = 0; i < 6; i++)
63 struct rte_flow_item_eth eth[2] = { };
64 struct rte_flow_item_ipv4 ip4[2] = { };
65 struct rte_flow_item_ipv4 inner_ip4[2] = { };
66 struct rte_flow_item_ipv6 ip6[2] = { };
67 struct rte_flow_item_ipv6 inner_ip6[2] = { };
68 struct rte_flow_item_udp udp[2] = { };
69 struct rte_flow_item_tcp tcp[2] = { };
70 struct rte_flow_item_gtp gtp[2] = { };
71 struct rte_flow_action_mark mark = { 0 };
72 struct rte_flow_action_queue queue = { 0 };
73 struct rte_flow_item *item, *items = 0;
74 struct rte_flow_action *
action, *actions = 0;
80 raw_sz =
sizeof (
struct rte_flow_item_raw)
85 struct rte_flow_item_raw item;
86 u8 val[raw_sz + vxlan_hdr_sz];
94 return VNET_FLOW_ERROR_NOT_SUPPORTED;
99 item->type = RTE_FLOW_ITEM_TYPE_ETH;
100 if (f->
type == VNET_FLOW_TYPE_ETHERNET)
102 vnet_flow_ethernet_t *te = &f->ethernet;
111 sizeof (eth[0].dst));
112 clib_memset (ð[1].dst, 0xFF,
sizeof (eth[1].dst));
118 sizeof (eth[0].src));
119 clib_memset (ð[1].src, 0xFF,
sizeof (eth[1].src));
122 if (te->eth_hdr.type)
124 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
125 eth[1].type = clib_host_to_net_u16 (0xFFFF);
129 item->mask = eth + 1;
138 if ((f->
type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
139 (f->
type == VNET_FLOW_TYPE_IP6_N_TUPLE))
142 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
152 if ((f->
type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
153 (f->
type == VNET_FLOW_TYPE_IP6_GTPC) ||
154 (f->
type == VNET_FLOW_TYPE_IP6_GTPU) ||
155 (f->
type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
156 (f->
type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
158 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
159 item->
type = RTE_FLOW_ITEM_TYPE_IPV6;
174 item->mask = ip6 + 1;
177 src_port = t6->src_port.port;
178 dst_port = t6->dst_port.port;
179 src_port_mask = t6->src_port.mask;
180 dst_port_mask = t6->dst_port.mask;
181 protocol = t6->protocol;
183 else if ((f->
type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
184 (f->
type == VNET_FLOW_TYPE_IP4_GTPC) ||
185 (f->
type == VNET_FLOW_TYPE_IP4_GTPU) ||
186 (f->
type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
187 (f->
type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
189 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
190 item->
type = RTE_FLOW_ITEM_TYPE_IPV4;
192 if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
199 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
200 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
201 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
202 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
204 item->mask = ip4 + 1;
207 src_port = t4->src_port.port;
208 dst_port = t4->dst_port.port;
209 src_port_mask = t4->src_port.mask;
210 dst_port_mask = t4->dst_port.mask;
211 protocol = t4->protocol;
213 else if (f->
type == VNET_FLOW_TYPE_IP4_VXLAN)
215 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
216 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
217 ip4[1].hdr.src_addr = -1;
218 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
219 ip4[1].hdr.dst_addr = -1;
220 item->
type = RTE_FLOW_ITEM_TYPE_IPV4;
222 item->mask = ip4 + 1;
224 dst_port = v4->dst_port;
228 protocol = IP_PROTOCOL_UDP;
232 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
238 if (protocol == IP_PROTOCOL_UDP)
240 item->type = RTE_FLOW_ITEM_TYPE_UDP;
242 if ((src_port_mask == 0) && (dst_port_mask == 0))
249 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
250 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
251 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
252 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
254 item->mask = udp + 1;
257 else if (protocol == IP_PROTOCOL_TCP)
259 item->type = RTE_FLOW_ITEM_TYPE_TCP;
261 if ((src_port_mask == 0) && (dst_port_mask == 0))
267 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
268 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
269 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
270 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
272 item->mask = tcp + 1;
276 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
281 if (f->
type == VNET_FLOW_TYPE_IP4_VXLAN)
283 u32 vni = f->ip4_vxlan.vni;
286 .vni_reserved = clib_host_to_net_u32 (vni << 8)
290 .vni_reserved = clib_host_to_net_u32 (((
u32) - 1) << 8)
294 raw[0].item.relative = 1;
295 raw[0].item.length = vxlan_hdr_sz;
298 raw[0].item.pattern = raw[0].val + raw_sz;
300 raw[1].item.pattern = raw[1].val + raw_sz;
303 item->type = RTE_FLOW_ITEM_TYPE_RAW;
305 item->mask = raw + 1;
307 else if (f->
type == VNET_FLOW_TYPE_IP4_GTPC)
309 vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
310 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
314 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
316 item->mask = gtp + 1;
318 else if (f->
type == VNET_FLOW_TYPE_IP4_GTPU)
320 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
321 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
325 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
327 item->mask = gtp + 1;
329 else if ((f->
type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
330 (f->
type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
332 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
333 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
337 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
339 item->mask = gtp + 1;
342 if (f->
type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
345 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
347 vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
348 if (!gu4->inner_src_addr.mask.as_u32 &&
349 !gu4->inner_dst_addr.mask.as_u32)
356 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
357 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
358 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
359 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
360 item->spec = inner_ip4;
361 item->mask = inner_ip4 + 1;
364 else if (f->
type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
367 vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
372 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
374 if (!
clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
375 !
clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
383 &gu6->inner_src_addr.addr, 16);
385 &gu6->inner_src_addr.mask, 16);
387 &gu6->inner_dst_addr.addr, 16);
389 &gu6->inner_dst_addr.mask, 16);
390 item->spec = inner_ip6;
391 item->mask = inner_ip6 + 1;
395 else if (f->
type == VNET_FLOW_TYPE_IP6_GTPC)
397 vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
398 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
402 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
404 item->mask = gtp + 1;
406 else if (f->
type == VNET_FLOW_TYPE_IP6_GTPU)
408 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
409 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
413 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
415 item->mask = gtp + 1;
417 else if ((f->
type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
418 (f->
type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
420 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
421 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
425 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
427 item->mask = gtp + 1;
430 if (f->
type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
433 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
435 vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
437 if (!gu4->inner_src_addr.mask.as_u32 &&
438 !gu4->inner_dst_addr.mask.as_u32)
445 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
446 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
447 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
448 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
449 item->spec = inner_ip4;
450 item->mask = inner_ip4 + 1;
454 if (f->
type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
457 vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
462 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
464 if (!
clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
465 !
clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
473 &gu6->inner_src_addr.addr, 16);
475 &gu6->inner_src_addr.mask, 16);
477 &gu6->inner_dst_addr.addr, 16);
479 &gu6->inner_dst_addr.mask, 16);
480 item->spec = inner_ip6;
481 item->mask = inner_ip6 + 1;
489 item->type = RTE_FLOW_ITEM_TYPE_END;
493 if (f->
actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
497 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
498 action->conf = &queue;
501 if (f->
actions & VNET_FLOW_ACTION_DROP)
504 action->type = RTE_FLOW_ACTION_TYPE_DROP;
507 rv = VNET_FLOW_ERROR_INTERNAL;
516 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
519 if (f->
actions & VNET_FLOW_ACTION_MARK)
523 action->type = RTE_FLOW_ACTION_TYPE_MARK;
524 action->conf = &mark;
528 action->type = RTE_FLOW_ACTION_TYPE_END;
536 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
537 else if (rv == -EEXIST)
538 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
540 rv = VNET_FLOW_ERROR_INTERNAL;
548 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
558 u32 flow_index,
uword * private_data)
585 return VNET_FLOW_ERROR_INTERNAL;
599 goto disable_rx_offload;
603 return VNET_FLOW_ERROR_NOT_SUPPORTED;
610 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
615 if (flow->
actions & (VNET_FLOW_ACTION_MARK |
616 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
617 VNET_FLOW_ACTION_BUFFER_ADVANCE))
628 if (flow->
actions & VNET_FLOW_ACTION_MARK)
630 if (flow->
actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
632 if (flow->
actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
638 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
640 xd->
flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
646 case VNET_FLOW_TYPE_ETHERNET:
647 case VNET_FLOW_TYPE_IP4_N_TUPLE:
648 case VNET_FLOW_TYPE_IP6_N_TUPLE:
649 case VNET_FLOW_TYPE_IP4_VXLAN:
650 case VNET_FLOW_TYPE_IP4_GTPC:
651 case VNET_FLOW_TYPE_IP4_GTPU:
652 case VNET_FLOW_TYPE_IP4_GTPU_IP4:
653 case VNET_FLOW_TYPE_IP4_GTPU_IP6:
654 case VNET_FLOW_TYPE_IP6_GTPC:
655 case VNET_FLOW_TYPE_IP6_GTPU:
656 case VNET_FLOW_TYPE_IP6_GTPU_IP4:
657 case VNET_FLOW_TYPE_IP6_GTPU_IP6:
662 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
680 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
683 xd->
flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
693 u32 dev_instance = va_arg (*args,
u32);
694 u32 flow_index = va_arg (*args,
u32);
700 if (flow_index == ~0)
702 s =
format (s,
"%-25s: %U\n",
"supported flow actions",
704 s =
format (s,
"%-25s: %d\n",
"last DPDK error type",
706 s =
format (s,
"%-25s: %s\n",
"last DPDK error message",
713 return format (s,
"unknown flow");
volatile u32 main_loop_count
#define clib_memcpy_fast(a, b, c)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
u32 supported_flow_actions
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
vl_api_ip_proto_t protocol
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
void dpdk_device_setup(dpdk_device_t *xd)
static const struct rte_flow_attr ingress
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int dpdk_flow_add(dpdk_device_t *xd, vnet_flow_t *f, dpdk_flow_entry_t *fe)
#define pool_put(P, E)
Free an object E in pool P.
vnet_flow_t * vnet_get_flow(u32 flow_index)
static bool mac_address_is_all_zero(const u8 addr[6])
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
format_function_t format_flow_actions
dpdk_flow_lookup_entry_t * flow_lookup_entries
u32 * parked_lookup_indexes
#define clib_memcmp(s1, s2, m1)
u8 * format_dpdk_flow(u8 *s, va_list *args)
#define vec_free(V)
Free vector's memory (no header).
int dpdk_flow_ops_fn(vnet_main_t *vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
#define pool_put_index(p, i)
Free pool element with given index.
dpdk_portid_t device_index
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define FLOW_IS_L2_LAYER(f)
u32 redirect_device_input_next_index
#define vec_foreach(var, vec)
Vector iterator.
dpdk_flow_entry_t * flow_entries
#define CLIB_CACHE_LINE_BYTES
struct rte_flow_error last_flow_error
const ip46_address_t zero_addr
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static uword pool_elts(void *v)
Number of active elements in a pool.