FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
flow.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <assert.h>
20 
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #include <vppinfra/error.h>
28 
29 #define FLOW_IS_ETHERNET_CLASS(f) \
30  (f->type == VNET_FLOW_TYPE_ETHERNET)
31 
32 #define FLOW_IS_IPV4_CLASS(f) \
33  ((f->type == VNET_FLOW_TYPE_IP4) || \
34  (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
35  (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
36  (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
37  (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
38  (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
39  (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
40  (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
41  (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
42 
43 #define FLOW_IS_IPV6_CLASS(f) \
44  ((f->type == VNET_FLOW_TYPE_IP6) || \
45  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
46  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
47  (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
48 
49 /* check if flow is VLAN sensitive */
50 #define FLOW_HAS_VLAN_TAG(f) \
51  ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
52  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
53 
54 /* check if flow is L3 type */
55 #define FLOW_IS_L3_TYPE(f) \
56  ((f->type == VNET_FLOW_TYPE_IP4) || \
57  (f->type == VNET_FLOW_TYPE_IP6))
58 
59 /* check if flow is L4 type */
60 #define FLOW_IS_L4_TYPE(f) \
61  ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
62  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
63  (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
64  (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
65 
66 /* check if flow is L4 tunnel type */
67 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
68  ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
69  (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
70  (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
71  (f->type == VNET_FLOW_TYPE_IP4_GTPU))
72 
73 /* constant structs */
74 static const struct rte_flow_attr ingress = {.ingress = 1 };
75 
76 static inline bool
78 {
79  int i = 0;
80 
81  for (i = 0; i < 6; i++)
82  if (addr[i] != 0)
83  return false;
84 
85  return true;
86 }
87 
88 static inline void
90 {
91 #define BIT_IS_SET(v, b) \
92  ((v) & (u64)1<<(b))
93 
94  *dpdk_rss_type = 0;
95 
96 #undef _
97 #define _(n, f, s) \
98  if (n != -1 && BIT_IS_SET(type, n)) \
99  *dpdk_rss_type |= f;
100 
102 #undef _
103  return;
104 }
105 
106 static inline enum rte_eth_hash_function
108 {
109  enum rte_eth_hash_function rss_func;
110 
111  switch (func)
112  {
113  case VNET_RSS_FUNC_DEFAULT:
114  rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
115  break;
116  case VNET_RSS_FUNC_TOEPLITZ:
117  rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
118  break;
119  case VNET_RSS_FUNC_SIMPLE_XOR:
120  rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
121  break;
122  case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
123  rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
124  break;
125  default:
126  rss_func = RTE_ETH_HASH_FUNCTION_MAX;
127  break;
128  }
129 
130  return rss_func;
131 }
132 
133 static int
135 {
136  struct rte_flow_item_eth eth[2] = { };
137  struct rte_flow_item_ipv4 ip4[2] = { };
138  struct rte_flow_item_ipv6 ip6[2] = { };
139  struct rte_flow_item_udp udp[2] = { };
140  struct rte_flow_item_tcp tcp[2] = { };
141  struct rte_flow_item_gtp gtp[2] = { };
142  struct rte_flow_item_l2tpv3oip l2tp[2] = { };
143  struct rte_flow_item_esp esp[2] = { };
144  struct rte_flow_item_ah ah[2] = { };
145  struct rte_flow_action_mark mark = { 0 };
146  struct rte_flow_action_queue queue = { 0 };
147  struct rte_flow_action_rss rss = { 0 };
148  struct rte_flow_item *item, *items = 0;
149  struct rte_flow_action *action, *actions = 0;
150  bool fate = false;
151 
152  enum
153  {
154  vxlan_hdr_sz = sizeof (vxlan_header_t),
155  raw_sz = sizeof (struct rte_flow_item_raw)
156  };
157 
158  union
159  {
160  struct rte_flow_item_raw item;
161  u8 val[raw_sz + vxlan_hdr_sz];
162  } raw[2];
163 
164  u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
165  u8 protocol = IP_PROTOCOL_RESERVED;
166  int rv = 0;
167 
168  enum
169  {
170  FLOW_UNKNOWN_CLASS,
171  FLOW_ETHERNET_CLASS,
172  FLOW_IPV4_CLASS,
173  FLOW_IPV6_CLASS,
174  } flow_class = FLOW_UNKNOWN_CLASS;
175 
176  if (FLOW_IS_ETHERNET_CLASS (f))
177  flow_class = FLOW_ETHERNET_CLASS;
178  else if (FLOW_IS_IPV4_CLASS (f))
179  flow_class = FLOW_IPV4_CLASS;
180  else if (FLOW_IS_IPV6_CLASS (f))
181  flow_class = FLOW_IPV6_CLASS;
182  else
183  return VNET_FLOW_ERROR_NOT_SUPPORTED;
184 
185  if (f->actions & (~xd->supported_flow_actions))
186  return VNET_FLOW_ERROR_NOT_SUPPORTED;
187 
188  /* Match items */
189  /* Layer 2, Ethernet */
190  vec_add2 (items, item, 1);
191  item->type = RTE_FLOW_ITEM_TYPE_ETH;
192 
193  if (flow_class == FLOW_ETHERNET_CLASS)
194  {
195  vnet_flow_ethernet_t *te = &f->ethernet;
196 
197  clib_memset (&eth[0], 0, sizeof (eth[0]));
198  clib_memset (&eth[1], 0, sizeof (eth[1]));
199 
200  /* check if SMAC/DMAC/Ether_type assigned */
201  if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
202  {
203  clib_memcpy_fast (&eth[0].dst, &te->eth_hdr.dst_address,
204  sizeof (eth[0].dst));
205  clib_memset (&eth[1].dst, 0xFF, sizeof (eth[1].dst));
206  }
207 
208  if (!mac_address_is_all_zero (te->eth_hdr.src_address))
209  {
210  clib_memcpy_fast (&eth[0].src, &te->eth_hdr.src_address,
211  sizeof (eth[0].src));
212  clib_memset (&eth[1].src, 0xFF, sizeof (eth[1].src));
213  }
214 
215  if (te->eth_hdr.type)
216  {
217  eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
218  eth[1].type = clib_host_to_net_u16 (0xFFFF);
219  }
220 
221  item->spec = eth;
222  item->mask = eth + 1;
223  }
224  else
225  {
226  item->spec = NULL;
227  item->mask = NULL;
228  }
229 
230  /* currently only single empty vlan tag is supported */
231  if (FLOW_HAS_VLAN_TAG (f))
232  {
233  vec_add2 (items, item, 1);
234  item->type = RTE_FLOW_ITEM_TYPE_VLAN;
235  item->spec = NULL;
236  item->mask = NULL;
237  }
238 
239  if (FLOW_IS_ETHERNET_CLASS (f))
240  goto pattern_end;
241 
242  /* Layer 3, IP */
243  vec_add2 (items, item, 1);
244  if (flow_class == FLOW_IPV4_CLASS)
245  {
246  vnet_flow_ip4_t *ip4_ptr = &f->ip4;
247 
248  item->type = RTE_FLOW_ITEM_TYPE_IPV4;
249  if ((!ip4_ptr->src_addr.mask.as_u32) &&
250  (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
251  {
252  item->spec = NULL;
253  item->mask = NULL;
254  }
255  else
256  {
257  ip4[0].hdr.src_addr = ip4_ptr->src_addr.addr.as_u32;
258  ip4[1].hdr.src_addr = ip4_ptr->src_addr.mask.as_u32;
259  ip4[0].hdr.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
260  ip4[1].hdr.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
261  ip4[0].hdr.next_proto_id = ip4_ptr->protocol.prot;
262  ip4[1].hdr.next_proto_id = ip4_ptr->protocol.mask;
263 
264  item->spec = ip4;
265  item->mask = ip4 + 1;
266  }
267 
269  {
270  vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
271 
272  src_port = ip4_n_ptr->src_port.port;
273  dst_port = ip4_n_ptr->dst_port.port;
274  src_port_mask = ip4_n_ptr->src_port.mask;
275  dst_port_mask = ip4_n_ptr->dst_port.mask;
276  }
277 
278  protocol = ip4_ptr->protocol.prot;
279  }
280  else if (flow_class == FLOW_IPV6_CLASS)
281  {
282  vnet_flow_ip6_t *ip6_ptr = &f->ip6;
283 
284  item->type = RTE_FLOW_ITEM_TYPE_IPV6;
285 
286  if ((ip6_ptr->src_addr.mask.as_u64[0] == 0) &&
287  (ip6_ptr->src_addr.mask.as_u64[1] == 0) &&
288  (!ip6_ptr->protocol.mask))
289  {
290  item->spec = NULL;
291  item->mask = NULL;
292  }
293  else
294  {
295  clib_memcpy (ip6[0].hdr.src_addr, &ip6_ptr->src_addr.addr,
296  ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
297  clib_memcpy (ip6[1].hdr.src_addr, &ip6_ptr->src_addr.mask,
298  ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
299  clib_memcpy (ip6[0].hdr.dst_addr, &ip6_ptr->dst_addr.addr,
300  ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
301  clib_memcpy (ip6[1].hdr.dst_addr, &ip6_ptr->dst_addr.mask,
302  ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
303  ip6[0].hdr.proto = ip6_ptr->protocol.prot;
304  ip6[1].hdr.proto = ip6_ptr->protocol.mask;
305 
306  item->spec = ip6;
307  item->mask = ip6 + 1;
308  }
309 
311  {
312  vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
313 
314  src_port = ip6_n_ptr->src_port.port;
315  dst_port = ip6_n_ptr->dst_port.port;
316  src_port_mask = ip6_n_ptr->src_port.mask;
317  dst_port_mask = ip6_n_ptr->dst_port.mask;
318  }
319 
320  protocol = ip6_ptr->protocol.prot;
321  }
322 
323  if (FLOW_IS_L3_TYPE (f))
324  goto pattern_end;
325 
326  /* Layer 3, IP */
327  vec_add2 (items, item, 1);
328  switch (protocol)
329  {
330  case IP_PROTOCOL_L2TP:
331  item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
332  l2tp[0].session_id = clib_host_to_net_u32 (f->ip4_l2tpv3oip.session_id);
333  l2tp[1].session_id = ~0;
334 
335  item->spec = l2tp;
336  item->mask = l2tp + 1;
337  break;
338 
339  case IP_PROTOCOL_IPSEC_ESP:
340  item->type = RTE_FLOW_ITEM_TYPE_ESP;
341  esp[0].hdr.spi = clib_host_to_net_u32 (f->ip4_ipsec_esp.spi);
342  esp[1].hdr.spi = ~0;
343 
344  item->spec = esp;
345  item->mask = esp + 1;
346  break;
347 
348  case IP_PROTOCOL_IPSEC_AH:
349  item->type = RTE_FLOW_ITEM_TYPE_AH;
350  ah[0].spi = clib_host_to_net_u32 (f->ip4_ipsec_ah.spi);
351  ah[1].spi = ~0;
352 
353  item->spec = ah;
354  item->mask = ah + 1;
355  break;
356  case IP_PROTOCOL_TCP:
357  item->type = RTE_FLOW_ITEM_TYPE_TCP;
358  if ((src_port_mask == 0) && (dst_port_mask == 0))
359  {
360  item->spec = NULL;
361  item->mask = NULL;
362  }
363  else
364  {
365  tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
366  tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
367  tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
368  tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
369  item->spec = tcp;
370  item->mask = tcp + 1;
371  }
372  break;
373 
374  case IP_PROTOCOL_UDP:
375  item->type = RTE_FLOW_ITEM_TYPE_UDP;
376  if ((src_port_mask == 0) && (dst_port_mask == 0))
377  {
378  item->spec = NULL;
379  item->mask = NULL;
380  }
381  else
382  {
383  udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
384  udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
385  udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
386  udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
387  item->spec = udp;
388  item->mask = udp + 1;
389  }
390 
391  /* handle the UDP tunnels */
392  if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
393  {
394  gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpc.teid);
395  gtp[1].teid = ~0;
396 
397  vec_add2 (items, item, 1);
398  item->type = RTE_FLOW_ITEM_TYPE_GTPC;
399  item->spec = gtp;
400  item->mask = gtp + 1;
401  }
402  else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
403  {
404  gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpu.teid);
405  gtp[1].teid = ~0;
406 
407  vec_add2 (items, item, 1);
408  item->type = RTE_FLOW_ITEM_TYPE_GTPU;
409  item->spec = gtp;
410  item->mask = gtp + 1;
411  }
412  else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
413  {
414  u32 vni = f->ip4_vxlan.vni;
415 
416  vxlan_header_t spec_hdr = {
417  .flags = VXLAN_FLAGS_I,
418  .vni_reserved = clib_host_to_net_u32 (vni << 8)
419  };
420  vxlan_header_t mask_hdr = {
421  .flags = 0xff,
422  .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
423  };
424 
425  clib_memset (raw, 0, sizeof raw);
426  raw[0].item.relative = 1;
427  raw[0].item.length = vxlan_hdr_sz;
428 
429  clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
430  raw[0].item.pattern = raw[0].val + raw_sz;
431  clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
432  raw[1].item.pattern = raw[1].val + raw_sz;
433 
434  vec_add2 (items, item, 1);
435  item->type = RTE_FLOW_ITEM_TYPE_RAW;
436  item->spec = raw;
437  item->mask = raw + 1;
438  }
439  break;
440 
441  default:
442  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
443  goto done;
444  }
445 
446 pattern_end:
447  vec_add2 (items, item, 1);
448  item->type = RTE_FLOW_ITEM_TYPE_END;
449 
450  /* Actions */
451  /* Only one 'fate' can be assigned */
452  if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
453  {
454  vec_add2 (actions, action, 1);
455  queue.index = f->redirect_queue;
456  action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
457  action->conf = &queue;
458  fate = true;
459  }
460 
461  if (f->actions & VNET_FLOW_ACTION_DROP)
462  {
463  vec_add2 (actions, action, 1);
464  action->type = RTE_FLOW_ACTION_TYPE_DROP;
465  if (fate == true)
466  {
467  rv = VNET_FLOW_ERROR_INTERNAL;
468  goto done;
469  }
470  else
471  fate = true;
472  }
473 
474  if (f->actions & VNET_FLOW_ACTION_RSS)
475  {
476  u64 rss_type = 0;
477 
478  vec_add2 (actions, action, 1);
479  action->type = RTE_FLOW_ACTION_TYPE_RSS;
480  action->conf = &rss;
481 
482  /* convert types to DPDK rss bitmask */
483  dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
484 
485  rss.types = rss_type;
486  if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
487  RTE_ETH_HASH_FUNCTION_MAX)
488  {
489  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
490  goto done;
491  }
492 
493  if (fate == true)
494  {
495  rv = VNET_FLOW_ERROR_INTERNAL;
496  goto done;
497  }
498  else
499  fate = true;
500  }
501 
502  if (fate == false)
503  {
504  vec_add2 (actions, action, 1);
505  action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
506  }
507 
508  if (f->actions & VNET_FLOW_ACTION_MARK)
509  {
510  vec_add2 (actions, action, 1);
511  mark.id = fe->mark;
512  action->type = RTE_FLOW_ACTION_TYPE_MARK;
513  action->conf = &mark;
514  }
515 
516  vec_add2 (actions, action, 1);
517  action->type = RTE_FLOW_ACTION_TYPE_END;
518 
519  rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
520  &xd->last_flow_error);
521 
522  if (rv)
523  {
524  if (rv == -EINVAL)
525  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
526  else if (rv == -EEXIST)
527  rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
528  else
529  rv = VNET_FLOW_ERROR_INTERNAL;
530 
531  goto done;
532  }
533 
534  fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
535  &xd->last_flow_error);
536 
537  if (!fe->handle)
538  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
539 
540 done:
541  vec_free (items);
542  vec_free (actions);
543  return rv;
544 }
545 
546 int
548  u32 flow_index, uword * private_data)
549 {
550  dpdk_main_t *dm = &dpdk_main;
551  vnet_flow_t *flow = vnet_get_flow (flow_index);
552  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
553  dpdk_flow_entry_t *fe;
554  dpdk_flow_lookup_entry_t *fle = 0;
555  int rv;
556 
557  /* recycle old flow lookup entries only after the main loop counter
558  increases - i.e. previously DMA'ed packets were handled */
559  if (vec_len (xd->parked_lookup_indexes) > 0 &&
561  {
562  u32 *fl_index;
563 
564  vec_foreach (fl_index, xd->parked_lookup_indexes)
565  pool_put_index (xd->flow_lookup_entries, *fl_index);
567  }
568 
569  if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
570  {
571  fe = vec_elt_at_index (xd->flow_entries, *private_data);
572 
573  if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
574  &xd->last_flow_error)))
575  return VNET_FLOW_ERROR_INTERNAL;
576 
577  if (fe->mark)
578  {
579  /* make sure no action is taken for in-flight (marked) packets */
580  fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
581  clib_memset (fle, -1, sizeof (*fle));
584  }
585 
586  clib_memset (fe, 0, sizeof (*fe));
587  pool_put (xd->flow_entries, fe);
588 
589  goto disable_rx_offload;
590  }
591 
592  if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
593  return VNET_FLOW_ERROR_NOT_SUPPORTED;
594 
595  pool_get (xd->flow_entries, fe);
596  fe->flow_index = flow->index;
597 
598  if (flow->actions == 0)
599  {
600  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
601  goto done;
602  }
603 
604  /* if we need to mark packets, assign one mark */
605  if (flow->actions & (VNET_FLOW_ACTION_MARK |
606  VNET_FLOW_ACTION_REDIRECT_TO_NODE |
607  VNET_FLOW_ACTION_BUFFER_ADVANCE))
608  {
609  /* reserve slot 0 */
610  if (xd->flow_lookup_entries == 0)
614  fe->mark = fle - xd->flow_lookup_entries;
615 
616  /* install entry in the lookup table */
617  clib_memset (fle, -1, sizeof (*fle));
618  if (flow->actions & VNET_FLOW_ACTION_MARK)
619  fle->flow_id = flow->mark_flow_id;
620  if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
622  if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
623  fle->buffer_advance = flow->buffer_advance;
624  }
625  else
626  fe->mark = 0;
627 
628  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
629  {
630  xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
631  dpdk_device_setup (xd);
632  }
633 
634  switch (flow->type)
635  {
636  case VNET_FLOW_TYPE_ETHERNET:
637  case VNET_FLOW_TYPE_IP4:
638  case VNET_FLOW_TYPE_IP6:
639  case VNET_FLOW_TYPE_IP4_N_TUPLE:
640  case VNET_FLOW_TYPE_IP6_N_TUPLE:
641  case VNET_FLOW_TYPE_IP4_VXLAN:
642  case VNET_FLOW_TYPE_IP4_GTPC:
643  case VNET_FLOW_TYPE_IP4_GTPU:
644  case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
645  case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
646  case VNET_FLOW_TYPE_IP4_IPSEC_AH:
647  if ((rv = dpdk_flow_add (xd, flow, fe)))
648  goto done;
649  break;
650  default:
651  rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
652  goto done;
653  }
654 
655  *private_data = fe - xd->flow_entries;
656 
657 done:
658  if (rv)
659  {
660  clib_memset (fe, 0, sizeof (*fe));
661  pool_put (xd->flow_entries, fe);
662  if (fle)
663  {
664  clib_memset (fle, -1, sizeof (*fle));
665  pool_put (xd->flow_lookup_entries, fle);
666  }
667  }
668 disable_rx_offload:
669  if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
670  && pool_elts (xd->flow_entries) == 0)
671  {
672  xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
673  dpdk_device_setup (xd);
674  }
675 
676  return rv;
677 }
678 
679 u8 *
680 format_dpdk_flow (u8 * s, va_list * args)
681 {
682  u32 dev_instance = va_arg (*args, u32);
683  u32 flow_index = va_arg (*args, u32);
684  uword private_data = va_arg (*args, uword);
685  dpdk_main_t *dm = &dpdk_main;
686  dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
687  dpdk_flow_entry_t *fe;
688 
689  if (flow_index == ~0)
690  {
691  s = format (s, "%-25s: %U\n", "supported flow actions",
693  s = format (s, "%-25s: %d\n", "last DPDK error type",
694  xd->last_flow_error.type);
695  s = format (s, "%-25s: %s\n", "last DPDK error message",
696  xd->last_flow_error.message ? xd->last_flow_error.message :
697  "n/a");
698  return s;
699  }
700 
701  if (private_data >= vec_len (xd->flow_entries))
702  return format (s, "unknown flow");
703 
704  fe = vec_elt_at_index (xd->flow_entries, private_data);
705  s = format (s, "mark %u", fe->mark);
706  return s;
707 }
708 
709 /*
710  * fd.io coding-style-patch-verification: ON
711  *
712  * Local Variables:
713  * eval: (c-set-style "gnu")
714  * End:
715  */
volatile u32 main_loop_count
Definition: main.h:136
#define FLOW_IS_L4_TUNNEL_TYPE(f)
Definition: flow.c:67
vnet_rss_function_t rss_fun
Definition: flow.h:245
u16 vni
Definition: flow_types.api:160
u64 rss_types
Definition: flow.h:242
vnet_flow_type_t type
Definition: flow.h:220
dpdk_main_t dpdk_main
Definition: init.c:46
unsigned long u64
Definition: types.h:89
vl_api_ip_port_and_mask_t dst_port
Definition: flow_types.api:92
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define EINVAL
Definition: string.h:93
u16 flags
Definition: dpdk.h:186
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
vl_api_address_t src
Definition: gre.api:54
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:630
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u32 supported_flow_actions
Definition: dpdk.h:208
struct rte_flow * handle
Definition: dpdk.h:146
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:252
vhost_vring_addr_t addr
Definition: vhost_user.h:111
unsigned char u8
Definition: types.h:56
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
Definition: string.h:180
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:53
vl_api_ip_proto_t protocol
Definition: lb_types.api:71
vl_api_ip6_address_t ip6
Definition: one.api:424
u32 mark_flow_id
Definition: flow.h:229
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
unsigned int u32
Definition: types.h:88
void dpdk_device_setup(dpdk_device_t *xd)
Definition: common.c:39
i16 buffer_advance
Definition: dpdk.h:153
u32 parked_loop_count
Definition: dpdk.h:212
u32 device_index
Definition: dpdk.h:176
static const struct rte_flow_attr ingress
Definition: flow.c:74
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
u32 redirect_queue
Definition: flow.h:236
#define FLOW_IS_IPV4_CLASS(f)
Definition: flow.c:32
#define FLOW_IS_L4_TYPE(f)
Definition: flow.c:60
static int dpdk_flow_add(dpdk_device_t *xd, vnet_flow_t *f, dpdk_flow_entry_t *fe)
Definition: flow.c:134
unsigned short u16
Definition: types.h:57
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:302
static void dpdk_flow_convert_rss_types(u64 type, u64 *dpdk_rss_type)
Definition: flow.c:89
vl_api_ip4_address_t ip4
Definition: one.api:376
vnet_flow_t * vnet_get_flow(u32 flow_index)
Definition: flow.c:57
u32 index
Definition: flow.h:223
vl_api_address_t dst
Definition: gre.api:55
static bool mac_address_is_all_zero(const u8 addr[6])
Definition: flow.c:77
i32 buffer_advance
Definition: flow.h:239
#define pool_get_aligned(P, E, A)
Allocate an object E from a pool P with alignment A.
Definition: pool.h:246
u32 actions
Definition: flow.h:226
vl_api_ip_port_and_mask_t src_port
Definition: flow_types.api:91
vnet_flow_dev_op_t
Definition: interface.h:96
#define FLOW_IS_IPV6_CLASS(f)
Definition: flow.c:43
format_function_t format_flow_actions
Definition: flow.h:288
Definition: dpdk.h:142
#define FLOW_IS_L3_TYPE(f)
Definition: flow.c:55
dpdk_device_t * devices
Definition: dpdk.h:323
dpdk_flow_lookup_entry_t * flow_lookup_entries
Definition: dpdk.h:210
u32 * parked_lookup_indexes
Definition: dpdk.h:211
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u8 * format_dpdk_flow(u8 *s, va_list *args)
Definition: flow.c:680
vnet_rss_function_t
Definition: flow.h:195
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
int dpdk_flow_ops_fn(vnet_main_t *vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword *private_data)
Definition: flow.c:547
u32 flow_index
Definition: dpdk.h:144
#define ARRAY_LEN(x)
Definition: clib.h:67
#define pool_put_index(p, i)
Free pool element with given index.
Definition: pool.h:331
#define FLOW_HAS_VLAN_TAG(f)
Definition: flow.c:50
Definition: dpdk.h:149
#define foreach_dpdk_rss_hf
Definition: dpdk.h:409
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vl_api_flow_action_t actions
Definition: flow_types.api:224
u64 uword
Definition: types.h:112
u16 next_index
Definition: dpdk.h:152
u32 redirect_device_input_next_index
Definition: flow.h:233
#define FLOW_IS_ETHERNET_CLASS(f)
Definition: flow.c:29
#define vec_foreach(var, vec)
Vector iterator.
dpdk_flow_entry_t * flow_entries
Definition: dpdk.h:209
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flow_id
Definition: dpdk.h:151
struct rte_flow_error last_flow_error
Definition: dpdk.h:213
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
u32 mark
Definition: dpdk.h:145
static enum rte_eth_hash_function dpdk_flow_convert_rss_func(vnet_rss_function_t func)
Definition: flow.c:107
vlib_main_t * vlib_main
Definition: dpdk.h:340
static uword pool_elts(void *v)
Number of active elements in a pool.
Definition: pool.h:128