32 static const struct rte_flow_attr
ingress = {.ingress = 1 };
33 static const struct rte_flow_item_eth
any_eth[2] = { };
34 static const struct rte_flow_item_vlan
any_vlan[2] = { };
39 struct rte_flow_item_ipv4 ip4[2] = { };
40 struct rte_flow_item_ipv6 ip6[2] = { };
41 struct rte_flow_item_udp udp[2] = { };
42 struct rte_flow_item_tcp tcp[2] = { };
43 struct rte_flow_action_mark mark = { 0 };
44 struct rte_flow_item *item, *items = 0;
45 struct rte_flow_action *action, *actions = 0;
50 raw_sz =
sizeof (
struct rte_flow_item_raw)
55 struct rte_flow_item_raw item;
56 u8 val[raw_sz + vxlan_hdr_sz];
59 u16 src_port, dst_port, src_port_mask, dst_port_mask;
64 return VNET_FLOW_ERROR_NOT_SUPPORTED;
69 item->type = RTE_FLOW_ITEM_TYPE_ETH;
74 if (f->
type != VNET_FLOW_TYPE_IP4_VXLAN)
77 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
84 if (f->
type == VNET_FLOW_TYPE_IP6_N_TUPLE)
86 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
87 clib_memcpy (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
88 clib_memcpy (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
89 clib_memcpy (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
90 clib_memcpy (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
91 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
95 src_port = t6->src_port.port;
96 dst_port = t6->dst_port.port;
97 src_port_mask = t6->src_port.mask;
98 dst_port_mask = t6->dst_port.mask;
99 protocol = t6->protocol;
101 else if (f->
type == VNET_FLOW_TYPE_IP4_N_TUPLE)
103 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
104 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
105 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
106 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
107 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
108 item->
type = RTE_FLOW_ITEM_TYPE_IPV4;
110 item->mask = ip4 + 1;
112 src_port = t4->src_port.port;
113 dst_port = t4->dst_port.port;
114 src_port_mask = t4->src_port.mask;
115 dst_port_mask = t4->dst_port.mask;
116 protocol = t4->protocol;
118 else if (f->
type == VNET_FLOW_TYPE_IP4_VXLAN)
120 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
121 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
122 ip4[1].hdr.src_addr = -1;
123 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
124 ip4[1].hdr.dst_addr = -1;
125 item->
type = RTE_FLOW_ITEM_TYPE_IPV4;
127 item->mask = ip4 + 1;
129 dst_port = v4->dst_port;
133 protocol = IP_PROTOCOL_UDP;
137 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
143 if (protocol == IP_PROTOCOL_UDP)
145 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
146 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
147 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
148 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
149 item->type = RTE_FLOW_ITEM_TYPE_UDP;
151 item->mask = udp + 1;
153 else if (protocol == IP_PROTOCOL_TCP)
155 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
156 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
157 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
158 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
159 item->type = RTE_FLOW_ITEM_TYPE_TCP;
161 item->mask = tcp + 1;
165 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
170 if (f->
type == VNET_FLOW_TYPE_IP4_VXLAN)
172 u32 vni = f->ip4_vxlan.vni;
175 .vni_reserved = clib_host_to_net_u32 (vni << 8)
179 .vni_reserved = clib_host_to_net_u32 (((
u32) - 1) << 8)
182 memset (raw, 0,
sizeof raw);
183 raw[0].item.relative = 1;
184 raw[0].item.length = vxlan_hdr_sz;
186 clib_memcpy (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
187 clib_memcpy (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);;
190 item->type = RTE_FLOW_ITEM_TYPE_RAW;
192 item->mask = raw + 1;
196 item->type = RTE_FLOW_ITEM_TYPE_END;
200 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
204 action->type = RTE_FLOW_ACTION_TYPE_MARK;
205 action->conf = &mark;
208 action->type = RTE_FLOW_ACTION_TYPE_END;
214 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
224 u32 flow_index,
uword * private_data)
253 return VNET_FLOW_ERROR_INTERNAL;
259 memset (fle, -1,
sizeof (*fle));
264 memset (fe, 0,
sizeof (*fe));
267 goto disable_rx_offload;
271 return VNET_FLOW_ERROR_NOT_SUPPORTED;
278 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
283 if (flow->
actions & (VNET_FLOW_ACTION_MARK |
284 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
285 VNET_FLOW_ACTION_BUFFER_ADVANCE))
295 memset (fle, -1,
sizeof (*fle));
296 if (flow->
actions & VNET_FLOW_ACTION_MARK)
298 if (flow->
actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
300 if (flow->
actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
306 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
308 xd->
flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
314 case VNET_FLOW_TYPE_IP4_N_TUPLE:
315 case VNET_FLOW_TYPE_IP6_N_TUPLE:
316 case VNET_FLOW_TYPE_IP4_VXLAN:
321 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
330 memset (fe, 0,
sizeof (*fe));
334 memset (fle, -1,
sizeof (*fle));
339 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
342 xd->
flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
352 u32 dev_instance = va_arg (*args,
u32);
353 u32 flow_index = va_arg (*args,
u32);
359 if (flow_index == ~0)
361 s =
format (s,
"%-25s: %U\n",
"supported flow actions",
363 s =
format (s,
"%-25s: %d\n",
"last DPDK error type",
365 s =
format (s,
"%-25s: %s\n",
"last DPDK error message",
372 return format (s,
"unknown flow");
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
u32 supported_flow_actions
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static const struct rte_flow_item_eth any_eth[2]
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
void dpdk_device_setup(dpdk_device_t *xd)
static const struct rte_flow_attr ingress
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static int dpdk_flow_add(dpdk_device_t *xd, vnet_flow_t *f, dpdk_flow_entry_t *fe)
#define pool_put(P, E)
Free an object E in pool P.
vnet_flow_t * vnet_get_flow(u32 flow_index)
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P (general version).
format_function_t format_flow_actions
dpdk_flow_lookup_entry_t * flow_lookup_entries
u32 * parked_lookup_indexes
u8 * format_dpdk_flow(u8 *s, va_list *args)
#define vec_free(V)
Free vector's memory (no header).
int dpdk_flow_ops_fn(vnet_main_t *vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
#define clib_memcpy(a, b, c)
static const struct rte_flow_item_vlan any_vlan[2]
#define pool_put_index(p, i)
Free pool element with given index.
dpdk_portid_t device_index
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 redirect_device_input_next_index
#define vec_foreach(var, vec)
Vector iterator.
dpdk_flow_entry_t * flow_entries
#define CLIB_CACHE_LINE_BYTES
struct rte_flow_error last_flow_error
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
static uword pool_elts(void *v)
Number of active elements in a pool.