FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
flow.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <assert.h>
20 
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
26 
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
29 
30 /* check if flow is L2 flow */
31 #define FLOW_IS_L2_LAYER(f) \
32  (f->type == VNET_FLOW_TYPE_ETHERNET)
33 
34 /* check if flow is VLAN sensitive */
35 #define FLOW_IS_VLAN_TAGGED(f) \
36  ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
37  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
38 
39 /* check if flow is L4 type */
40 #define FLOW_IS_L4_LAYER(f) \
41  ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
42  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
43 
44 /* check if flow is L4 tunnel type */
45 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
46  ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
47  (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
48 
49 /* constant structs */
50 static const struct rte_flow_attr ingress = {.ingress = 1 };
51 
52 static inline bool
54 {
55  int i = 0;
56 
57  for (i = 0; i < 6; i++)
58  if (addr[i] != 0)
59  return false;
60 
61  return true;
62 }
63 
64 static inline void
66 {
67 #define BIT_IS_SET(v, b) \
68  ((v) & (u64)1<<(b))
69 
70  *dpdk_rss_type = 0;
71 
72 #undef _
73 #define _(n, f, s) \
74  if (n != -1 && BIT_IS_SET(type, n)) \
75  *dpdk_rss_type |= f;
76 
78 #undef _
79  return;
80 }
81 
82 static inline enum rte_eth_hash_function
84 {
85  enum rte_eth_hash_function rss_func;
86 
87  switch (func)
88  {
89  case VNET_RSS_FUNC_DEFAULT:
90  rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
91  break;
92  case VNET_RSS_FUNC_TOEPLITZ:
93  rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
94  break;
95  case VNET_RSS_FUNC_SIMPLE_XOR:
96  rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
97  break;
98  case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
99  rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
100  break;
101  default:
102  rss_func = RTE_ETH_HASH_FUNCTION_MAX;
103  break;
104  }
105 
106  return rss_func;
107 }
108 
109 static int
111 {
112  struct rte_flow_item_eth eth[2] = { };
113  struct rte_flow_item_ipv4 ip4[2] = { };
114  struct rte_flow_item_ipv4 inner_ip4[2] = { };
115  struct rte_flow_item_ipv6 ip6[2] = { };
116  struct rte_flow_item_ipv6 inner_ip6[2] = { };
117  struct rte_flow_item_udp udp[2] = { };
118  struct rte_flow_item_tcp tcp[2] = { };
119  struct rte_flow_item_gtp gtp[2] = { };
120  struct rte_flow_item_l2tpv3oip l2tp[2] = { };
121  struct rte_flow_action_mark mark = { 0 };
122  struct rte_flow_action_queue queue = { 0 };
123  struct rte_flow_action_rss rss = { 0 };
124  struct rte_flow_item *item, *items = 0;
125  struct rte_flow_action *action, *actions = 0;
126  bool fate = false;
127 
128  enum
129  {
130  vxlan_hdr_sz = sizeof (vxlan_header_t),
131  raw_sz = sizeof (struct rte_flow_item_raw)
132  };
133 
134  union
135  {
136  struct rte_flow_item_raw item;
137  u8 val[raw_sz + vxlan_hdr_sz];
138  } raw[2];
139 
140  u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
141  u8 protocol = IP_PROTOCOL_RESERVED;
142  int rv = 0;
143 
144  if (f->actions & (~xd->supported_flow_actions))
145  return VNET_FLOW_ERROR_NOT_SUPPORTED;
146 
147  /* Match items */
148  /* Ethernet */
149  vec_add2 (items, item, 1);
150  item->type = RTE_FLOW_ITEM_TYPE_ETH;
151  if (f->type == VNET_FLOW_TYPE_ETHERNET)
152  {
153  vnet_flow_ethernet_t *te = &f->ethernet;
154 
155  clib_memset (&eth[0], 0, sizeof (eth[0]));
156  clib_memset (&eth[1], 0, sizeof (eth[1]));
157 
158  /* check if SMAC/DMAC/Ether_type assigned */
159  if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
160  {
161  clib_memcpy_fast (&eth[0].dst, &te->eth_hdr.dst_address,
162  sizeof (eth[0].dst));
163  clib_memset (&eth[1].dst, 0xFF, sizeof (eth[1].dst));
164  }
165 
166  if (!mac_address_is_all_zero (te->eth_hdr.src_address))
167  {
168  clib_memcpy_fast (&eth[0].src, &te->eth_hdr.src_address,
169  sizeof (eth[0].src));
170  clib_memset (&eth[1].src, 0xFF, sizeof (eth[1].src));
171  }
172 
173  if (te->eth_hdr.type)
174  {
175  eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
176  eth[1].type = clib_host_to_net_u16 (0xFFFF);
177  }
178 
179  item->spec = eth;
180  item->mask = eth + 1;
181  }
182  else
183  {
184  item->spec = NULL;
185  item->mask = NULL;
186  }
187 
188  if (FLOW_IS_VLAN_TAGGED (f))
189  {
190  vec_add2 (items, item, 1);
191  item->type = RTE_FLOW_ITEM_TYPE_VLAN;
192  item->spec = NULL;
193  item->mask = NULL;
194  }
195 
196  if (FLOW_IS_L2_LAYER (f))
197  goto pattern_end;
198 
199  /* IP */
200  vec_add2 (items, item, 1);
201  if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
202  {
203  vnet_flow_ip4_l2tpv3oip_t *l2tp = &f->ip4_l2tpv3oip;
204  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
205 
206  if (!l2tp->src_addr.mask.as_u32 && !l2tp->dst_addr.mask.as_u32)
207  {
208  item->spec = NULL;
209  item->mask = NULL;
210  }
211  else
212  {
213  ip4[0].hdr.src_addr = l2tp->src_addr.addr.as_u32;
214  ip4[1].hdr.src_addr = l2tp->src_addr.mask.as_u32;
215  ip4[0].hdr.dst_addr = l2tp->dst_addr.addr.as_u32;
216  ip4[1].hdr.dst_addr = l2tp->dst_addr.mask.as_u32;
217  item->spec = ip4;
218  item->mask = ip4 + 1;
219  }
220  protocol = l2tp->protocol;
221  }
222  else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
223  (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
224  (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
225  (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
226  (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
227  {
228  vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
229  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
230 
231  if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
232  !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
233  {
234  item->spec = NULL;
235  item->mask = NULL;
236  }
237  else
238  {
239  clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
240  clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
241  clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
242  clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
243  item->spec = ip6;
244  item->mask = ip6 + 1;
245  }
246 
247  src_port = t6->src_port.port;
248  dst_port = t6->dst_port.port;
249  src_port_mask = t6->src_port.mask;
250  dst_port_mask = t6->dst_port.mask;
251  protocol = t6->protocol;
252  }
253  else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
254  (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
255  (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
256  (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
257  (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
258  {
259  vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
260  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
261 
262  if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
263  {
264  item->spec = NULL;
265  item->mask = NULL;
266  }
267  else
268  {
269  ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
270  ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
271  ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
272  ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
273  item->spec = ip4;
274  item->mask = ip4 + 1;
275  }
276 
277  src_port = t4->src_port.port;
278  dst_port = t4->dst_port.port;
279  src_port_mask = t4->src_port.mask;
280  dst_port_mask = t4->dst_port.mask;
281  protocol = t4->protocol;
282  }
283  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
284  {
285  vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
286  ip4[0].hdr.src_addr = v4->src_addr.as_u32;
287  ip4[1].hdr.src_addr = -1;
288  ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
289  ip4[1].hdr.dst_addr = -1;
290  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
291  item->spec = ip4;
292  item->mask = ip4 + 1;
293 
294  dst_port = v4->dst_port;
295  dst_port_mask = -1;
296  src_port = 0;
297  src_port_mask = 0;
298  protocol = IP_PROTOCOL_UDP;
299  }
300  else
301  {
302  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
303  goto done;
304  }
305 
306  /* Layer 4 */
307  if (protocol == IP_PROTOCOL_UDP)
308  {
309  vec_add2 (items, item, 1);
310  item->type = RTE_FLOW_ITEM_TYPE_UDP;
311 
312  if ((src_port_mask == 0) && (dst_port_mask == 0))
313  {
314  item->spec = NULL;
315  item->mask = NULL;
316  }
317  else
318  {
319  udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
320  udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
321  udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
322  udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
323  item->spec = udp;
324  item->mask = udp + 1;
325  }
326  }
327  else if (protocol == IP_PROTOCOL_TCP)
328  {
329  vec_add2 (items, item, 1);
330  item->type = RTE_FLOW_ITEM_TYPE_TCP;
331 
332  if ((src_port_mask == 0) && (dst_port_mask == 0))
333  {
334  item->spec = NULL;
335  item->mask = NULL;
336  }
337  else
338  {
339  tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
340  tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
341  tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
342  tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
343  item->spec = tcp;
344  item->mask = tcp + 1;
345  }
346  }
347  else if (protocol == IP_PROTOCOL_RESERVED)
348  {
349  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
350  goto done;
351  }
352 
353  /* Tunnel header match */
354  if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
355  {
356  vec_add2 (items, item, 1);
357  item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
358 
359  vnet_flow_ip4_l2tpv3oip_t *tl2tp = &f->ip4_l2tpv3oip;
360  l2tp[0].session_id = clib_host_to_net_u32 (tl2tp->session_id);
361  l2tp[1].session_id = ~0;
362 
363  item->spec = l2tp;
364  item->mask = l2tp + 1;
365  }
366  if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
367  {
368  u32 vni = f->ip4_vxlan.vni;
369  vxlan_header_t spec_hdr = {
370  .flags = VXLAN_FLAGS_I,
371  .vni_reserved = clib_host_to_net_u32 (vni << 8)
372  };
373  vxlan_header_t mask_hdr = {
374  .flags = 0xff,
375  .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
376  };
377 
378  clib_memset (raw, 0, sizeof raw);
379  raw[0].item.relative = 1;
380  raw[0].item.length = vxlan_hdr_sz;
381 
382  clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
383  raw[0].item.pattern = raw[0].val + raw_sz;
384  clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
385  raw[1].item.pattern = raw[1].val + raw_sz;
386 
387  vec_add2 (items, item, 1);
388  item->type = RTE_FLOW_ITEM_TYPE_RAW;
389  item->spec = raw;
390  item->mask = raw + 1;
391  }
392  else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
393  {
394  vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
395  gtp[0].teid = clib_host_to_net_u32 (gc->teid);
396  gtp[1].teid = ~0;
397 
398  vec_add2 (items, item, 1);
399  item->type = RTE_FLOW_ITEM_TYPE_GTPC;
400  item->spec = gtp;
401  item->mask = gtp + 1;
402  }
403  else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
404  {
405  vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
406  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
407  gtp[1].teid = ~0;
408 
409  vec_add2 (items, item, 1);
410  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
411  item->spec = gtp;
412  item->mask = gtp + 1;
413  }
414  else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
415  (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
416  {
417  vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
418  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
419  gtp[1].teid = ~0;
420 
421  vec_add2 (items, item, 1);
422  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
423  item->spec = gtp;
424  item->mask = gtp + 1;
425 
426  /* inner IP4 header */
427  if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
428  {
429  vec_add2 (items, item, 1);
430  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
431 
432  vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
433  if (!gu4->inner_src_addr.mask.as_u32 &&
434  !gu4->inner_dst_addr.mask.as_u32)
435  {
436  item->spec = NULL;
437  item->mask = NULL;
438  }
439  else
440  {
441  inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
442  inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
443  inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
444  inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
445  item->spec = inner_ip4;
446  item->mask = inner_ip4 + 1;
447  }
448  }
449  else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
450  {
451  ip6_address_t zero_addr;
452  vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
453 
454  clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
455 
456  vec_add2 (items, item, 1);
457  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
458 
459  if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
460  !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
461  {
462  item->spec = NULL;
463  item->mask = NULL;
464  }
465  else
466  {
467  clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
468  &gu6->inner_src_addr.addr, 16);
469  clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
470  &gu6->inner_src_addr.mask, 16);
471  clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
472  &gu6->inner_dst_addr.addr, 16);
473  clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
474  &gu6->inner_dst_addr.mask, 16);
475  item->spec = inner_ip6;
476  item->mask = inner_ip6 + 1;
477  }
478  }
479  }
480  else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
481  {
482  vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
483  gtp[0].teid = clib_host_to_net_u32 (gc->teid);
484  gtp[1].teid = ~0;
485 
486  vec_add2 (items, item, 1);
487  item->type = RTE_FLOW_ITEM_TYPE_GTPC;
488  item->spec = gtp;
489  item->mask = gtp + 1;
490  }
491  else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
492  {
493  vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
494  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
495  gtp[1].teid = ~0;
496 
497  vec_add2 (items, item, 1);
498  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
499  item->spec = gtp;
500  item->mask = gtp + 1;
501  }
502  else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
503  (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
504  {
505  vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
506  gtp[0].teid = clib_host_to_net_u32 (gu->teid);
507  gtp[1].teid = ~0;
508 
509  vec_add2 (items, item, 1);
510  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
511  item->spec = gtp;
512  item->mask = gtp + 1;
513 
514  /* inner IP4 header */
515  if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
516  {
517  vec_add2 (items, item, 1);
518  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
519 
520  vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
521 
522  if (!gu4->inner_src_addr.mask.as_u32 &&
523  !gu4->inner_dst_addr.mask.as_u32)
524  {
525  item->spec = NULL;
526  item->mask = NULL;
527  }
528  else
529  {
530  inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
531  inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
532  inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
533  inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
534  item->spec = inner_ip4;
535  item->mask = inner_ip4 + 1;
536  }
537  }
538 
539  if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
540  {
541  ip6_address_t zero_addr;
542  vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
543 
544  clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
545 
546  vec_add2 (items, item, 1);
547  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
548 
549  if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
550  !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
551  {
552  item->spec = NULL;
553  item->mask = NULL;
554  }
555  else
556  {
557  clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
558  &gu6->inner_src_addr.addr, 16);
559  clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
560  &gu6->inner_src_addr.mask, 16);
561  clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
562  &gu6->inner_dst_addr.addr, 16);
563  clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
564  &gu6->inner_dst_addr.mask, 16);
565  item->spec = inner_ip6;
566  item->mask = inner_ip6 + 1;
567  }
568 
569  }
570  }
571 
572 pattern_end:
573  vec_add2 (items, item, 1);
574  item->type = RTE_FLOW_ITEM_TYPE_END;
575 
576  /* Actions */
577  /* Only one 'fate' can be assigned */
578  if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
579  {
580  vec_add2 (actions, action, 1);
581  queue.index = f->redirect_queue;
582  action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
583  action->conf = &queue;
584  fate = true;
585  }
586  if (f->actions & VNET_FLOW_ACTION_DROP)
587  {
588  vec_add2 (actions, action, 1);
589  action->type = RTE_FLOW_ACTION_TYPE_DROP;
590  if (fate == true)
591  {
592  rv = VNET_FLOW_ERROR_INTERNAL;
593  goto done;
594  }
595  else
596  fate = true;
597  }
598  if (f->actions & VNET_FLOW_ACTION_RSS)
599  {
600  u64 rss_type = 0;
601 
602  vec_add2 (actions, action, 1);
603  action->type = RTE_FLOW_ACTION_TYPE_RSS;
604  action->conf = &rss;
605 
606  /* convert types to DPDK rss bitmask */
607  dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
608 
609  rss.types = rss_type;
610  if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
611  RTE_ETH_HASH_FUNCTION_MAX)
612  {
613  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
614  goto done;
615  }
616 
617  if (fate == true)
618  {
619  rv = VNET_FLOW_ERROR_INTERNAL;
620  goto done;
621  }
622  else
623  fate = true;
624  }
625  if (fate == false)
626  {
627  vec_add2 (actions, action, 1);
628  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
629  }
630 
631  if (f->actions & VNET_FLOW_ACTION_MARK)
632  {
633  vec_add2 (actions, action, 1);
634  mark.id = fe->mark;
635  action->type = RTE_FLOW_ACTION_TYPE_MARK;
636  action->conf = &mark;
637  }
638 
639  vec_add2 (actions, action, 1);
640  action->type = RTE_FLOW_ACTION_TYPE_END;
641 
642  rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
643  &xd->last_flow_error);
644 
645  if (rv)
646  {
647  if (rv == -EINVAL)
648  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
649  else if (rv == -EEXIST)
650  rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
651  else
652  rv = VNET_FLOW_ERROR_INTERNAL;
653  goto done;
654  }
655 
656  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
657  &xd->last_flow_error);
658 
659  if (!fe->handle)
660  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
661 
662 done:
663  vec_free (items);
664  vec_free (actions);
665  return rv;
666 }
667 
668 int
670  u32 flow_index, uword * private_data)
671 {
672  dpdk_main_t *dm = &dpdk_main;
673  vnet_flow_t *flow = vnet_get_flow (flow_index);
674  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
675  dpdk_flow_entry_t *fe;
676  dpdk_flow_lookup_entry_t *fle = 0;
677  int rv;
678 
679  /* recycle old flow lookup entries only after the main loop counter
680  increases - i.e. previously DMA'ed packets were handled */
681  if (vec_len (xd->parked_lookup_indexes) > 0 &&
683  {
684  u32 *fl_index;
685 
686  vec_foreach (fl_index, xd->parked_lookup_indexes)
687  pool_put_index (xd->flow_lookup_entries, *fl_index);
689  }
690 
691  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
692  {
693  fe = vec_elt_at_index (xd->flow_entries, *private_data);
694 
695  if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
696  &xd->last_flow_error)))
697  return VNET_FLOW_ERROR_INTERNAL;
698 
699  if (fe->mark)
700  {
701  /* make sure no action is taken for in-flight (marked) packets */
702  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
703  clib_memset (fle, -1, sizeof (*fle));
706  }
707 
708  clib_memset (fe, 0, sizeof (*fe));
709  pool_put (xd->flow_entries, fe);
710 
711  goto disable_rx_offload;
712  }
713 
714  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
715  return VNET_FLOW_ERROR_NOT_SUPPORTED;
716 
717  pool_get (xd->flow_entries, fe);
718  fe->flow_index = flow->index;
719 
720  if (flow->actions == 0)
721  {
722  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
723  goto done;
724  }
725 
726  /* if we need to mark packets, assign one mark */
727  if (flow->actions & (VNET_FLOW_ACTION_MARK |
728  VNET_FLOW_ACTION_REDIRECT_TO_NODE |
729  VNET_FLOW_ACTION_BUFFER_ADVANCE))
730  {
731  /* reserve slot 0 */
732  if (xd->flow_lookup_entries == 0)
736  fe->mark = fle - xd->flow_lookup_entries;
737 
738  /* install entry in the lookup table */
739  clib_memset (fle, -1, sizeof (*fle));
740  if (flow->actions & VNET_FLOW_ACTION_MARK)
741  fle->flow_id = flow->mark_flow_id;
742  if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
744  if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
745  fle->buffer_advance = flow->buffer_advance;
746  }
747  else
748  fe->mark = 0;
749 
750  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
751  {
752  xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
753  dpdk_device_setup (xd);
754  }
755 
756  switch (flow->type)
757  {
758  case VNET_FLOW_TYPE_ETHERNET:
759  case VNET_FLOW_TYPE_IP4_N_TUPLE:
760  case VNET_FLOW_TYPE_IP6_N_TUPLE:
761  case VNET_FLOW_TYPE_IP4_VXLAN:
762  case VNET_FLOW_TYPE_IP4_GTPC:
763  case VNET_FLOW_TYPE_IP4_GTPU:
764  case VNET_FLOW_TYPE_IP4_GTPU_IP4:
765  case VNET_FLOW_TYPE_IP4_GTPU_IP6:
766  case VNET_FLOW_TYPE_IP6_GTPC:
767  case VNET_FLOW_TYPE_IP6_GTPU:
768  case VNET_FLOW_TYPE_IP6_GTPU_IP4:
769  case VNET_FLOW_TYPE_IP6_GTPU_IP6:
770  case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
771  if ((rv = dpdk_flow_add (xd, flow, fe)))
772  goto done;
773  break;
774  default:
775  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
776  goto done;
777  }
778 
779  *private_data = fe - xd->flow_entries;
780 
781 done:
782  if (rv)
783  {
784  clib_memset (fe, 0, sizeof (*fe));
785  pool_put (xd->flow_entries, fe);
786  if (fle)
787  {
788  clib_memset (fle, -1, sizeof (*fle));
789  pool_put (xd->flow_lookup_entries, fle);
790  }
791  }
792 disable_rx_offload:
793  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
794  && pool_elts (xd->flow_entries) == 0)
795  {
796  xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
797  dpdk_device_setup (xd);
798  }
799 
800  return rv;
801 }
802 
803 u8 *
804 format_dpdk_flow (u8 * s, va_list * args)
805 {
806  u32 dev_instance = va_arg (*args, u32);
807  u32 flow_index = va_arg (*args, u32);
808  uword private_data = va_arg (*args, uword);
809  dpdk_main_t *dm = &dpdk_main;
810  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
811  dpdk_flow_entry_t *fe;
812 
813  if (flow_index == ~0)
814  {
815  s = format (s, "%-25s: %U\n", "supported flow actions",
817  s = format (s, "%-25s: %d\n", "last DPDK error type",
818  xd->last_flow_error.type);
819  s = format (s, "%-25s: %s\n", "last DPDK error message",
820  xd->last_flow_error.message ? xd->last_flow_error.message :
821  "n/a");
822  return s;
823  }
824 
825  if (private_data >= vec_len (xd->flow_entries))
826  return format (s, "unknown flow");
827 
828  fe = vec_elt_at_index (xd->flow_entries, private_data);
829  s = format (s, "mark %u", fe->mark);
830  return s;
831 }
832 
833 /*
834  * fd.io coding-style-patch-verification: ON
835  *
836  * Local Variables:
837  * eval: (c-set-style "gnu")
838  * End:
839  */
volatile u32 main_loop_count
Definition: main.h:99
vnet_rss_function_t rss_fun
Definition: flow.h:263
u64 rss_types
Definition: flow.h:260
vnet_flow_type_t type
Definition: flow.h:238
dpdk_main_t dpdk_main
Definition: init.c:46
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u16 flags
Definition: dpdk.h:176
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:590
vl_api_address_t src
Definition: gre.api:54
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:628
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u32 supported_flow_actions
Definition: dpdk.h:197
struct rte_flow * handle
Definition: dpdk.h:146
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
vhost_vring_addr_t addr
Definition: vhost_user.h:254
unsigned char u8
Definition: types.h:56
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u16 src_port
Definition: udp.api:41
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:52
vl_api_ip_proto_t protocol
Definition: lb_types.api:71
vl_api_ip6_address_t ip6
Definition: one.api:424
u32 mark_flow_id
Definition: flow.h:247
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned int u32
Definition: types.h:88
void dpdk_device_setup(dpdk_device_t *xd)
Definition: common.c:39
i16 buffer_advance
Definition: dpdk.h:153
u32 parked_loop_count
Definition: dpdk.h:201
static const struct rte_flow_attr ingress
Definition: flow.c:50
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u32 redirect_queue
Definition: flow.h:254
static int dpdk_flow_add(dpdk_device_t *xd, vnet_flow_t *f, dpdk_flow_entry_t *fe)
Definition: flow.c:110
unsigned short u16
Definition: types.h:57
u32 vni
Definition: lisp_gpe.api:119
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
static void dpdk_flow_convert_rss_types(u64 type, u64 *dpdk_rss_type)
Definition: flow.c:65
vl_api_ip4_address_t ip4
Definition: one.api:376
vnet_flow_t * vnet_get_flow(u32 flow_index)
Definition: flow.c:57
u32 index
Definition: flow.h:241
vl_api_address_t dst
Definition: gre.api:55
static bool mac_address_is_all_zero(const u8 addr[6])
Definition: flow.c:53
i32 buffer_advance
Definition: flow.h:257
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:246
u32 actions
Definition: flow.h:244
vnet_flow_dev_op_t
Definition: interface.h:91
format_function_t format_flow_actions
Definition: flow.h:306
Definition: dpdk.h:142
dpdk_device_t * devices
Definition: dpdk.h:326
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:199
u32 * parked_lookup_indexes
Definition: dpdk.h:200
#define clib_memcmp(s1, s2, m1)
Definition: string.h:737
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u8 * format_dpdk_flow(u8 *s, va_list *args)
Definition: flow.c:804
vnet_rss_function_t
Definition: flow.h:213
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
int dpdk_flow_ops_fn(vnet_main_t *vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
Definition: flow.c:669
u32 flow_index
Definition: dpdk.h:144
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:331
dpdk_portid_t device_index
Definition: dpdk.h:162
Definition: dpdk.h:149
#define foreach_dpdk_rss_hf
Definition: dpdk.h:413
#define FLOW_IS_VLAN_TAGGED(f)
Definition: flow.c:35
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define FLOW_IS_L2_LAYER(f)
Definition: flow.c:31
u64 uword
Definition: types.h:112
u16 next_index
Definition: dpdk.h:152
u32 redirect_device_input_next_index
Definition: flow.h:251
#define vec_foreach(var, vec)
Vector iterator.
dpdk_flow_entry_t * flow_entries
Definition: dpdk.h:198
u16 dst_port
Definition: udp.api:42
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flow_id
Definition: dpdk.h:151
struct rte_flow_error last_flow_error
Definition: dpdk.h:202
icmpr_flow_t * flow
Definition: main.c:123
const ip46_address_t zero_addr
Definition: lookup.c:181
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
u32 mark
Definition: dpdk.h:145
static enum rte_eth_hash_function dpdk_flow_convert_rss_func(vnet_rss_function_t func)
Definition: flow.c:83
vlib_main_t * vlib_main
Definition: dpdk.h:343
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128