FD.io VPP  v20.09-64-g4f7b92f0a
Vector Packet Processing
decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: vxlan tunnel decap packet processing
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
21 
22 #ifndef CLIB_MARCH_VARIANT
25 #endif
26 
27 typedef struct
28 {
34 
35 static u8 *
36 format_vxlan_rx_trace (u8 * s, va_list * args)
37 {
38  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40  vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *);
41 
42  if (t->tunnel_index == ~0)
43  return format (s, "VXLAN decap error - tunnel for vni %d does not exist",
44  t->vni);
45  return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
46  t->tunnel_index, t->vni, t->next_index, t->error);
47 }
48 
50 
51 static const vxlan_decap_info_t decap_not_found = {
52  .sw_if_index = ~0,
53  .next_index = VXLAN_INPUT_NEXT_DROP,
54  .error = VXLAN_ERROR_NO_SUCH_TUNNEL
55 };
56 
57 static const vxlan_decap_info_t decap_bad_flags = {
58  .sw_if_index = ~0,
59  .next_index = VXLAN_INPUT_NEXT_DROP,
60  .error = VXLAN_ERROR_BAD_FLAGS
61 };
62 
65  u32 fib_index, ip4_header_t * ip4_0,
66  vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
67 {
68  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
69  return decap_bad_flags;
70 
71  /* Make sure VXLAN tunnel exist according to packet S/D IP, VRF, and VNI */
72  u32 dst = ip4_0->dst_address.as_u32;
73  u32 src = ip4_0->src_address.as_u32;
74  vxlan4_tunnel_key_t key4 = {
75  .key[0] = ((u64) dst << 32) | src,
76  .key[1] = ((u64) fib_index << 32) | vxlan0->vni_reserved,
77  };
78 
79  if (PREDICT_TRUE
80  (key4.key[0] == cache->key[0] && key4.key[1] == cache->key[1]))
81  {
82  /* cache hit */
83  vxlan_decap_info_t di = {.as_u64 = cache->value };
84  *stats_sw_if_index = di.sw_if_index;
85  return di;
86  }
87 
88  int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
89  if (PREDICT_TRUE (rv == 0))
90  {
91  *cache = key4;
92  vxlan_decap_info_t di = {.as_u64 = key4.value };
93  *stats_sw_if_index = di.sw_if_index;
94  return di;
95  }
96 
97  /* try multicast */
99  return decap_not_found;
100 
101  /* search for mcast decap info by mcast address */
102  key4.key[0] = dst;
103  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
104  if (rv != 0)
105  return decap_not_found;
106 
107  /* search for unicast tunnel using the mcast tunnel local(src) ip */
108  vxlan_decap_info_t mdi = {.as_u64 = key4.value };
109  key4.key[0] = ((u64) mdi.local_ip.as_u32 << 32) | src;
110  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
111  if (PREDICT_FALSE (rv != 0))
112  return decap_not_found;
113 
114  /* mcast traffic does not update the cache */
115  *stats_sw_if_index = mdi.sw_if_index;
116  vxlan_decap_info_t di = {.as_u64 = key4.value };
117  return di;
118 }
119 
121 
124  u32 fib_index, ip6_header_t * ip6_0,
125  vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
126 {
127  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
128  return decap_bad_flags;
129 
130  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
131  vxlan6_tunnel_key_t key6 = {
132  .key[0] = ip6_0->src_address.as_u64[0],
133  .key[1] = ip6_0->src_address.as_u64[1],
134  .key[2] = (((u64) fib_index) << 32) | vxlan0->vni_reserved,
135  };
136 
137  if (PREDICT_FALSE
138  (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
139  {
140  int rv =
141  clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
142  if (PREDICT_FALSE (rv != 0))
143  return decap_not_found;
144 
145  *cache = key6;
146  }
147  vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
148 
149  /* Validate VXLAN tunnel SIP against packet DIP */
150  if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
151  *stats_sw_if_index = t0->sw_if_index;
152  else
153  {
154  /* try multicast */
156  return decap_not_found;
157 
158  /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
159  key6.key[0] = ip6_0->dst_address.as_u64[0];
160  key6.key[1] = ip6_0->dst_address.as_u64[1];
161  int rv =
162  clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
163  if (PREDICT_FALSE (rv != 0))
164  return decap_not_found;
165 
166  vxlan_tunnel_t *mcast_t0 = pool_elt_at_index (vxm->tunnels, key6.value);
167  *stats_sw_if_index = mcast_t0->sw_if_index;
168  }
169 
171  .sw_if_index = t0->sw_if_index,
172  .next_index = t0->decap_next_index,
173  };
174  return di;
175 }
176 
180  vlib_frame_t * from_frame, u32 is_ip4)
181 {
182  vxlan_main_t *vxm = &vxlan_main;
183  vnet_main_t *vnm = vxm->vnet_main;
185  vlib_combined_counter_main_t *rx_counter =
187  last_tunnel_cache4 last4;
188  last_tunnel_cache6 last6;
189  u32 pkts_dropped = 0;
190  u32 thread_index = vlib_get_thread_index ();
191 
192  if (is_ip4)
193  clib_memset (&last4, 0xff, sizeof last4);
194  else
195  clib_memset (&last6, 0xff, sizeof last6);
196 
197  u32 *from = vlib_frame_vector_args (from_frame);
198  u32 n_left_from = from_frame->n_vectors;
199 
200  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
201  vlib_get_buffers (vm, from, bufs, n_left_from);
202 
203  u32 stats_if0 = ~0, stats_if1 = ~0;
204  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
205  while (n_left_from >= 4)
206  {
207  /* Prefetch next iteration. */
208  vlib_prefetch_buffer_header (b[2], LOAD);
209  vlib_prefetch_buffer_header (b[3], LOAD);
210 
211  /* udp leaves current_data pointing at the vxlan header */
212  void *cur0 = vlib_buffer_get_current (b[0]);
213  void *cur1 = vlib_buffer_get_current (b[1]);
214  vxlan_header_t *vxlan0 = cur0;
215  vxlan_header_t *vxlan1 = cur1;
216 
217 
218  ip4_header_t *ip4_0, *ip4_1;
219  ip6_header_t *ip6_0, *ip6_1;
220  if (is_ip4)
221  {
222  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
223  ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
224  }
225  else
226  {
227  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
228  ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
229  }
230 
231  /* pop vxlan */
232  vlib_buffer_advance (b[0], sizeof *vxlan0);
233  vlib_buffer_advance (b[1], sizeof *vxlan1);
234 
235  u32 fi0 = vlib_buffer_get_ip_fib_index (b[0], is_ip4);
236  u32 fi1 = vlib_buffer_get_ip_fib_index (b[1], is_ip4);
237 
238  vxlan_decap_info_t di0 = is_ip4 ?
239  vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
240  vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
241  vxlan_decap_info_t di1 = is_ip4 ?
242  vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_if1) :
243  vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_if1);
244 
245  /* Prefetch next iteration. */
248 
249  u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
250  u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
251 
252  next[0] = di0.next_index;
253  next[1] = di1.next_index;
254 
255  u8 any_error = di0.error | di1.error;
256  if (PREDICT_TRUE (any_error == 0))
257  {
258  /* Required to make the l2 tag push / pop code work on l2 subifs */
259  vnet_update_l2_len (b[0]);
260  vnet_update_l2_len (b[1]);
261  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
262  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
263  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
264  vlib_increment_combined_counter (rx_counter, thread_index,
265  stats_if0, 1, len0);
266  vlib_increment_combined_counter (rx_counter, thread_index,
267  stats_if1, 1, len1);
268  }
269  else
270  {
271  if (di0.error == 0)
272  {
273  vnet_update_l2_len (b[0]);
274  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
275  vlib_increment_combined_counter (rx_counter, thread_index,
276  stats_if0, 1, len0);
277  }
278  else
279  {
280  b[0]->error = node->errors[di0.error];
281  pkts_dropped++;
282  }
283 
284  if (di1.error == 0)
285  {
286  vnet_update_l2_len (b[1]);
287  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
288  vlib_increment_combined_counter (rx_counter, thread_index,
289  stats_if1, 1, len1);
290  }
291  else
292  {
293  b[1]->error = node->errors[di1.error];
294  pkts_dropped++;
295  }
296  }
297 
298  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
299  {
300  vxlan_rx_trace_t *tr =
301  vlib_add_trace (vm, node, b[0], sizeof (*tr));
302  tr->next_index = next[0];
303  tr->error = di0.error;
304  tr->tunnel_index = di0.sw_if_index == ~0 ?
306  tr->vni = vnet_get_vni (vxlan0);
307  }
308  if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
309  {
310  vxlan_rx_trace_t *tr =
311  vlib_add_trace (vm, node, b[1], sizeof (*tr));
312  tr->next_index = next[1];
313  tr->error = di1.error;
314  tr->tunnel_index = di1.sw_if_index == ~0 ?
316  tr->vni = vnet_get_vni (vxlan1);
317  }
318  b += 2;
319  next += 2;
320  n_left_from -= 2;
321  }
322 
323  while (n_left_from > 0)
324  {
325  /* udp leaves current_data pointing at the vxlan header */
326  void *cur0 = vlib_buffer_get_current (b[0]);
327  vxlan_header_t *vxlan0 = cur0;
328  ip4_header_t *ip4_0;
329  ip6_header_t *ip6_0;
330  if (is_ip4)
331  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
332  else
333  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
334 
335  /* pop (ip, udp, vxlan) */
336  vlib_buffer_advance (b[0], sizeof (*vxlan0));
337 
338  u32 fi0 = vlib_buffer_get_ip_fib_index (b[0], is_ip4);
339 
340  vxlan_decap_info_t di0 = is_ip4 ?
341  vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
342  vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
343 
344  uword len0 = vlib_buffer_length_in_chain (vm, b[0]);
345 
346  next[0] = di0.next_index;
347 
348  /* Validate VXLAN tunnel encap-fib index against packet */
349  if (di0.error == 0)
350  {
351  /* Required to make the l2 tag push / pop code work on l2 subifs */
352  vnet_update_l2_len (b[0]);
353 
354  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
355  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
356 
357  vlib_increment_combined_counter (rx_counter, thread_index,
358  stats_if0, 1, len0);
359  }
360  else
361  {
362  b[0]->error = node->errors[di0.error];
363  pkts_dropped++;
364  }
365 
366  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
367  {
368  vxlan_rx_trace_t *tr
369  = vlib_add_trace (vm, node, b[0], sizeof (*tr));
370  tr->next_index = next[0];
371  tr->error = di0.error;
372  tr->tunnel_index = di0.sw_if_index == ~0 ?
374  tr->vni = vnet_get_vni (vxlan0);
375  }
376  b += 1;
377  next += 1;
378  n_left_from -= 1;
379  }
380  vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
381  /* Do we still need this now that tunnel tx stats is kept? */
382  u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
383  vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
384  from_frame->n_vectors - pkts_dropped);
385 
386  return from_frame->n_vectors;
387 }
388 
391  vlib_frame_t * from_frame)
392 {
393  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 1);
394 }
395 
398  vlib_frame_t * from_frame)
399 {
400  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 0);
401 }
402 
403 static char *vxlan_error_strings[] = {
404 #define vxlan_error(n,s) s,
406 #undef vxlan_error
407 };
408 
409 /* *INDENT-OFF* */
411 {
412  .name = "vxlan4-input",
413  .vector_size = sizeof (u32),
414  .n_errors = VXLAN_N_ERROR,
415  .error_strings = vxlan_error_strings,
416  .n_next_nodes = VXLAN_INPUT_N_NEXT,
417  .format_trace = format_vxlan_rx_trace,
418  .next_nodes = {
419 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
421 #undef _
422  },
423 };
424 
426 {
427  .name = "vxlan6-input",
428  .vector_size = sizeof (u32),
429  .n_errors = VXLAN_N_ERROR,
430  .error_strings = vxlan_error_strings,
431  .n_next_nodes = VXLAN_INPUT_N_NEXT,
432  .next_nodes = {
433 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
435 #undef _
436  },
437  .format_trace = format_vxlan_rx_trace,
438 };
439 /* *INDENT-ON* */
440 
441 typedef enum
442 {
447 
451  vlib_frame_t * frame, u32 is_ip4)
452 {
453  vxlan_main_t *vxm = &vxlan_main;
454  u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
455  vlib_node_runtime_t *error_node =
457  vtep4_key_t last_vtep4; /* last IPv4 address / fib index
458  matching a local VTEP address */
459  vtep6_key_t last_vtep6; /* last IPv6 address / fib index
460  matching a local VTEP address */
461  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
462 
463 #ifdef CLIB_HAVE_VEC512
464  vtep4_cache_t vtep4_u512;
465  clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
466 #endif
467 
468  from = vlib_frame_vector_args (frame);
469  n_left_from = frame->n_vectors;
470  next_index = node->cached_next_index;
471 
472  vlib_get_buffers (vm, from, bufs, n_left_from);
473 
474  if (node->flags & VLIB_NODE_FLAG_TRACE)
475  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
476 
477  if (is_ip4)
478  vtep4_key_init (&last_vtep4);
479  else
480  vtep6_key_init (&last_vtep6);
481 
482  while (n_left_from > 0)
483  {
484  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
485 
486  while (n_left_from >= 4 && n_left_to_next >= 2)
487  {
488  vlib_buffer_t *b0, *b1;
489  ip4_header_t *ip40, *ip41;
490  ip6_header_t *ip60, *ip61;
491  udp_header_t *udp0, *udp1;
492  u32 bi0, ip_len0, udp_len0, flags0, next0;
493  u32 bi1, ip_len1, udp_len1, flags1, next1;
494  i32 len_diff0, len_diff1;
495  u8 error0, good_udp0, proto0;
496  u8 error1, good_udp1, proto1;
497 
498  /* Prefetch next iteration. */
499  {
500  vlib_prefetch_buffer_header (b[2], LOAD);
501  vlib_prefetch_buffer_header (b[3], LOAD);
502 
503  CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
504  CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
505  }
506 
507  bi0 = to_next[0] = from[0];
508  bi1 = to_next[1] = from[1];
509  from += 2;
510  n_left_from -= 2;
511  to_next += 2;
512  n_left_to_next -= 2;
513 
514  b0 = b[0];
515  b1 = b[1];
516  b += 2;
517  if (is_ip4)
518  {
519  ip40 = vlib_buffer_get_current (b0);
520  ip41 = vlib_buffer_get_current (b1);
521  }
522  else
523  {
524  ip60 = vlib_buffer_get_current (b0);
525  ip61 = vlib_buffer_get_current (b1);
526  }
527 
528  /* Setup packet for next IP feature */
529  vnet_feature_next (&next0, b0);
530  vnet_feature_next (&next1, b1);
531 
532  if (is_ip4)
533  {
534  /* Treat IP frag packets as "experimental" protocol for now
535  until support of IP frag reassembly is implemented */
536  proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
537  proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
538  }
539  else
540  {
541  proto0 = ip60->protocol;
542  proto1 = ip61->protocol;
543  }
544 
545  /* Process packet 0 */
546  if (proto0 != IP_PROTOCOL_UDP)
547  goto exit0; /* not UDP packet */
548 
549  if (is_ip4)
550  udp0 = ip4_next_header (ip40);
551  else
552  udp0 = ip6_next_header (ip60);
553 
554  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
555  goto exit0; /* not VXLAN packet */
556 
557  /* Validate DIP against VTEPs */
558  if (is_ip4)
559  {
560 #ifdef CLIB_HAVE_VEC512
561  if (!vtep4_check_vector
562  (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
563 #else
564  if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
565 #endif
566  goto exit0; /* no local VTEP for VXLAN packet */
567  }
568  else
569  {
570  if (!vtep6_check (&vxm->vtep_table, b0, ip60, &last_vtep6))
571  goto exit0; /* no local VTEP for VXLAN packet */
572  }
573 
574  flags0 = b0->flags;
575  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
576 
577  /* Don't verify UDP checksum for packets with explicit zero checksum. */
578  good_udp0 |= udp0->checksum == 0;
579 
580  /* Verify UDP length */
581  if (is_ip4)
582  ip_len0 = clib_net_to_host_u16 (ip40->length);
583  else
584  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
585  udp_len0 = clib_net_to_host_u16 (udp0->length);
586  len_diff0 = ip_len0 - udp_len0;
587 
588  /* Verify UDP checksum */
589  if (PREDICT_FALSE (!good_udp0))
590  {
591  if (is_ip4)
592  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
593  else
594  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
595  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
596  }
597 
598  if (is_ip4)
599  {
600  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
601  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
602  }
603  else
604  {
605  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
606  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
607  }
608 
609  next0 = error0 ?
611  b0->error = error0 ? error_node->errors[error0] : 0;
612 
613  /* vxlan-input node expect current at VXLAN header */
614  if (is_ip4)
616  sizeof (ip4_header_t) +
617  sizeof (udp_header_t));
618  else
620  sizeof (ip6_header_t) +
621  sizeof (udp_header_t));
622 
623  exit0:
624  /* Process packet 1 */
625  if (proto1 != IP_PROTOCOL_UDP)
626  goto exit1; /* not UDP packet */
627 
628  if (is_ip4)
629  udp1 = ip4_next_header (ip41);
630  else
631  udp1 = ip6_next_header (ip61);
632 
633  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
634  goto exit1; /* not VXLAN packet */
635 
636  /* Validate DIP against VTEPs */
637  if (is_ip4)
638  {
639 #ifdef CLIB_HAVE_VEC512
640  if (!vtep4_check_vector
641  (&vxm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
642 #else
643  if (!vtep4_check (&vxm->vtep_table, b1, ip41, &last_vtep4))
644 #endif
645  goto exit1; /* no local VTEP for VXLAN packet */
646  }
647  else
648  {
649  if (!vtep6_check (&vxm->vtep_table, b1, ip61, &last_vtep6))
650  goto exit1; /* no local VTEP for VXLAN packet */
651  }
652 
653  flags1 = b1->flags;
654  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
655 
656  /* Don't verify UDP checksum for packets with explicit zero checksum. */
657  good_udp1 |= udp1->checksum == 0;
658 
659  /* Verify UDP length */
660  if (is_ip4)
661  ip_len1 = clib_net_to_host_u16 (ip41->length);
662  else
663  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
664  udp_len1 = clib_net_to_host_u16 (udp1->length);
665  len_diff1 = ip_len1 - udp_len1;
666 
667  /* Verify UDP checksum */
668  if (PREDICT_FALSE (!good_udp1))
669  {
670  if (is_ip4)
671  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
672  else
673  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
674  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
675  }
676 
677  if (is_ip4)
678  {
679  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
680  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
681  }
682  else
683  {
684  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
685  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
686  }
687 
688  next1 = error1 ?
690  b1->error = error1 ? error_node->errors[error1] : 0;
691 
692  /* vxlan-input node expect current at VXLAN header */
693  if (is_ip4)
695  sizeof (ip4_header_t) +
696  sizeof (udp_header_t));
697  else
699  sizeof (ip6_header_t) +
700  sizeof (udp_header_t));
701 
702  exit1:
703  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
704  to_next, n_left_to_next,
705  bi0, bi1, next0, next1);
706  }
707 
708  while (n_left_from > 0 && n_left_to_next > 0)
709  {
710  vlib_buffer_t *b0;
711  ip4_header_t *ip40;
712  ip6_header_t *ip60;
713  udp_header_t *udp0;
714  u32 bi0, ip_len0, udp_len0, flags0, next0;
715  i32 len_diff0;
716  u8 error0, good_udp0, proto0;
717 
718  bi0 = to_next[0] = from[0];
719  from += 1;
720  n_left_from -= 1;
721  to_next += 1;
722  n_left_to_next -= 1;
723 
724  b0 = b[0];
725  b++;
726  if (is_ip4)
727  ip40 = vlib_buffer_get_current (b0);
728  else
729  ip60 = vlib_buffer_get_current (b0);
730 
731  /* Setup packet for next IP feature */
732  vnet_feature_next (&next0, b0);
733 
734  if (is_ip4)
735  /* Treat IP4 frag packets as "experimental" protocol for now
736  until support of IP frag reassembly is implemented */
737  proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
738  else
739  proto0 = ip60->protocol;
740 
741  if (proto0 != IP_PROTOCOL_UDP)
742  goto exit; /* not UDP packet */
743 
744  if (is_ip4)
745  udp0 = ip4_next_header (ip40);
746  else
747  udp0 = ip6_next_header (ip60);
748 
749  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
750  goto exit; /* not VXLAN packet */
751 
752  /* Validate DIP against VTEPs */
753  if (is_ip4)
754  {
755 #ifdef CLIB_HAVE_VEC512
756  if (!vtep4_check_vector
757  (&vxm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
758 #else
759  if (!vtep4_check (&vxm->vtep_table, b0, ip40, &last_vtep4))
760 #endif
761  goto exit; /* no local VTEP for VXLAN packet */
762  }
763  else
764  {
765  if (!vtep6_check (&vxm->vtep_table, b0, ip60, &last_vtep6))
766  goto exit; /* no local VTEP for VXLAN packet */
767  }
768 
769  flags0 = b0->flags;
770  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
771 
772  /* Don't verify UDP checksum for packets with explicit zero checksum. */
773  good_udp0 |= udp0->checksum == 0;
774 
775  /* Verify UDP length */
776  if (is_ip4)
777  ip_len0 = clib_net_to_host_u16 (ip40->length);
778  else
779  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
780  udp_len0 = clib_net_to_host_u16 (udp0->length);
781  len_diff0 = ip_len0 - udp_len0;
782 
783  /* Verify UDP checksum */
784  if (PREDICT_FALSE (!good_udp0))
785  {
786  if (is_ip4)
787  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
788  else
789  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
790  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
791  }
792 
793  if (is_ip4)
794  {
795  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
796  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
797  }
798  else
799  {
800  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
801  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
802  }
803 
804  next0 = error0 ?
806  b0->error = error0 ? error_node->errors[error0] : 0;
807 
808  /* vxlan-input node expect current at VXLAN header */
809  if (is_ip4)
811  sizeof (ip4_header_t) +
812  sizeof (udp_header_t));
813  else
815  sizeof (ip6_header_t) +
816  sizeof (udp_header_t));
817 
818  exit:
819  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
820  to_next, n_left_to_next,
821  bi0, next0);
822  }
823 
824  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
825  }
826 
827  return frame->n_vectors;
828 }
829 
833 {
834  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
835 }
836 
837 /* *INDENT-OFF* */
839 {
840  .name = "ip4-vxlan-bypass",
841  .vector_size = sizeof (u32),
842  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
843  .next_nodes = {
844  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
845  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
846  },
847  .format_buffer = format_ip4_header,
848  .format_trace = format_ip4_forward_next_trace,
849 };
850 
851 /* *INDENT-ON* */
852 
853 /* Dummy init function to get us linked in. */
854 static clib_error_t *
856 {
857  return 0;
858 }
859 
861 
865 {
866  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
867 }
868 
869 /* *INDENT-OFF* */
871 {
872  .name = "ip6-vxlan-bypass",
873  .vector_size = sizeof (u32),
874  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
875  .next_nodes = {
876  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
877  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
878  },
879  .format_buffer = format_ip6_header,
880  .format_trace = format_ip6_forward_next_trace,
881 };
882 
883 /* *INDENT-ON* */
884 
885 /* Dummy init function to get us linked in. */
886 static clib_error_t *
888 {
889  return 0;
890 }
891 
893 
894 #define foreach_vxlan_flow_input_next \
895 _(DROP, "error-drop") \
896 _(L2_INPUT, "l2-input")
897 
898 typedef enum
899 {
900 #define _(s,n) VXLAN_FLOW_NEXT_##s,
902 #undef _
905 
906 #define foreach_vxlan_flow_error \
907  _(NONE, "no error") \
908  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
909  _(IP_HEADER_ERROR, "Rx ip header errors") \
910  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
911  _(UDP_LENGTH_ERROR, "Rx udp length errors")
912 
913 typedef enum
914 {
915 #define _(f,s) VXLAN_FLOW_ERROR_##f,
917 #undef _
920 
921 static char *vxlan_flow_error_strings[] = {
922 #define _(n,s) s,
924 #undef _
925 };
926 
927 
930 {
931  u32 flags = b->flags;
932  enum
933  { offset =
934  sizeof (ip4_header_t) + sizeof (udp_header_t) + sizeof (vxlan_header_t),
935  };
936 
937  /* Verify UDP checksum */
938  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
939  {
941  flags = ip4_tcp_udp_validate_checksum (vm, b);
943  }
944 
945  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
946 }
947 
950 {
951  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
952  udp_header_t *udp = &hdr->udp;
953  /* Don't verify UDP checksum for packets with explicit zero checksum. */
954  u8 good_csum = (b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
955  udp->checksum == 0;
956 
957  return !good_csum;
958 }
959 
961 vxlan_check_ip (vlib_buffer_t * b, u16 payload_len)
962 {
963  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
964  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
965  u16 expected = payload_len + sizeof *hdr;
966  return ip_len > expected || hdr->ip4.ttl == 0
967  || hdr->ip4.ip_version_and_header_length != 0x45;
968 }
969 
972 {
973  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
974  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
975  u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
976  return udp_len > ip_len;
977 }
978 
980 vxlan_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
981 {
982  u8 error0 = VXLAN_FLOW_ERROR_NONE;
983  if (ip_err0)
984  error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
985  if (udp_err0)
986  error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
987  if (csum_err0)
988  error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
989  return error0;
990 }
991 
994  vlib_frame_t * f)
995 {
996  enum
997  { payload_offset = sizeof (ip4_vxlan_header_t) };
998 
999  vxlan_main_t *vxm = &vxlan_main;
1002  [VXLAN_FLOW_NEXT_DROP] =
1004  [VXLAN_FLOW_NEXT_L2_INPUT] =
1006  };
1007  u32 thread_index = vlib_get_thread_index ();
1008 
1009  u32 *from = vlib_frame_vector_args (f);
1010  u32 n_left_from = f->n_vectors;
1011  u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;
1012 
1013  while (n_left_from > 0)
1014  {
1015  u32 n_left_to_next, *to_next;
1016 
1017  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1018 
1019  while (n_left_from > 3 && n_left_to_next > 3)
1020  {
1021  u32 bi0 = to_next[0] = from[0];
1022  u32 bi1 = to_next[1] = from[1];
1023  u32 bi2 = to_next[2] = from[2];
1024  u32 bi3 = to_next[3] = from[3];
1025  from += 4;
1026  n_left_from -= 4;
1027  to_next += 4;
1028  n_left_to_next -= 4;
1029 
1030  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1031  vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
1032  vlib_buffer_t *b2 = vlib_get_buffer (vm, bi2);
1033  vlib_buffer_t *b3 = vlib_get_buffer (vm, bi3);
1034 
1035  vlib_buffer_advance (b0, payload_offset);
1036  vlib_buffer_advance (b1, payload_offset);
1037  vlib_buffer_advance (b2, payload_offset);
1038  vlib_buffer_advance (b3, payload_offset);
1039 
1040  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1041  u16 len1 = vlib_buffer_length_in_chain (vm, b1);
1042  u16 len2 = vlib_buffer_length_in_chain (vm, b2);
1043  u16 len3 = vlib_buffer_length_in_chain (vm, b3);
1044 
1045  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 =
1046  VXLAN_FLOW_NEXT_L2_INPUT, next2 =
1047  VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;
1048 
1049  u8 ip_err0 = vxlan_check_ip (b0, len0);
1050  u8 ip_err1 = vxlan_check_ip (b1, len1);
1051  u8 ip_err2 = vxlan_check_ip (b2, len2);
1052  u8 ip_err3 = vxlan_check_ip (b3, len3);
1053  u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;
1054 
1055  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1056  u8 udp_err1 = vxlan_check_ip_udp_len (b1);
1057  u8 udp_err2 = vxlan_check_ip_udp_len (b2);
1058  u8 udp_err3 = vxlan_check_ip_udp_len (b3);
1059  u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;
1060 
1061  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1062  u8 csum_err1 = vxlan_check_udp_csum (vm, b1);
1063  u8 csum_err2 = vxlan_check_udp_csum (vm, b2);
1064  u8 csum_err3 = vxlan_check_udp_csum (vm, b3);
1065  u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1066 
1067  if (PREDICT_FALSE (csum_err))
1068  {
1069  if (csum_err0)
1070  csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1071  if (csum_err1)
1072  csum_err1 = !vxlan_validate_udp_csum (vm, b1);
1073  if (csum_err2)
1074  csum_err2 = !vxlan_validate_udp_csum (vm, b2);
1075  if (csum_err3)
1076  csum_err3 = !vxlan_validate_udp_csum (vm, b3);
1077  csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1078  }
1079 
1080  if (PREDICT_FALSE (ip_err || udp_err || csum_err))
1081  {
1082  if (ip_err0 || udp_err0 || csum_err0)
1083  {
1084  next0 = VXLAN_FLOW_NEXT_DROP;
1085  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1086  b0->error = node->errors[error0];
1087  }
1088  if (ip_err1 || udp_err1 || csum_err1)
1089  {
1090  next1 = VXLAN_FLOW_NEXT_DROP;
1091  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1092  b1->error = node->errors[error1];
1093  }
1094  if (ip_err2 || udp_err2 || csum_err2)
1095  {
1096  next2 = VXLAN_FLOW_NEXT_DROP;
1097  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1098  b2->error = node->errors[error2];
1099  }
1100  if (ip_err3 || udp_err3 || csum_err3)
1101  {
1102  next3 = VXLAN_FLOW_NEXT_DROP;
1103  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1104  b3->error = node->errors[error3];
1105  }
1106  }
1107 
1108  vnet_update_l2_len (b0);
1109  vnet_update_l2_len (b1);
1110  vnet_update_l2_len (b2);
1111  vnet_update_l2_len (b3);
1112 
1113  ASSERT (b0->flow_id != 0);
1114  ASSERT (b1->flow_id != 0);
1115  ASSERT (b2->flow_id != 0);
1116  ASSERT (b3->flow_id != 0);
1117 
1118  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1119  u32 t_index1 = b1->flow_id - vxm->flow_id_start;
1120  u32 t_index2 = b2->flow_id - vxm->flow_id_start;
1121  u32 t_index3 = b3->flow_id - vxm->flow_id_start;
1122 
1123  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1124  vxlan_tunnel_t *t1 = &vxm->tunnels[t_index1];
1125  vxlan_tunnel_t *t2 = &vxm->tunnels[t_index2];
1126  vxlan_tunnel_t *t3 = &vxm->tunnels[t_index3];
1127 
1128  /* flow id consumed */
1129  b0->flow_id = 0;
1130  b1->flow_id = 0;
1131  b2->flow_id = 0;
1132  b3->flow_id = 0;
1133 
1134  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1135  t0->sw_if_index;
1136  u32 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1137  t1->sw_if_index;
1138  u32 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX] =
1139  t2->sw_if_index;
1140  u32 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX] =
1141  t3->sw_if_index;
1142 
1143  vlib_increment_combined_counter (rx_counter[next0], thread_index,
1144  sw_if_index0, 1, len0);
1145  vlib_increment_combined_counter (rx_counter[next1], thread_index,
1146  sw_if_index1, 1, len1);
1147  vlib_increment_combined_counter (rx_counter[next2], thread_index,
1148  sw_if_index2, 1, len2);
1149  vlib_increment_combined_counter (rx_counter[next3], thread_index,
1150  sw_if_index3, 1, len3);
1151 
1152  u32 flags = b0->flags | b1->flags | b2->flags | b3->flags;
1153 
1154  if (PREDICT_FALSE (flags & VLIB_BUFFER_IS_TRACED))
1155  {
1156  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1157  {
1158  vxlan_rx_trace_t *tr =
1159  vlib_add_trace (vm, node, b0, sizeof *tr);
1160  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1161  tr->next_index = next0;
1162  tr->error = error0;
1163  tr->tunnel_index = t_index0;
1164  tr->vni = t0->vni;
1165  }
1166  if (b1->flags & VLIB_BUFFER_IS_TRACED)
1167  {
1168  vxlan_rx_trace_t *tr =
1169  vlib_add_trace (vm, node, b1, sizeof *tr);
1170  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1171  tr->next_index = next1;
1172  tr->error = error1;
1173  tr->tunnel_index = t_index1;
1174  tr->vni = t1->vni;
1175  }
1176  if (b2->flags & VLIB_BUFFER_IS_TRACED)
1177  {
1178  vxlan_rx_trace_t *tr =
1179  vlib_add_trace (vm, node, b2, sizeof *tr);
1180  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1181  tr->next_index = next2;
1182  tr->error = error2;
1183  tr->tunnel_index = t_index2;
1184  tr->vni = t2->vni;
1185  }
1186  if (b3->flags & VLIB_BUFFER_IS_TRACED)
1187  {
1188  vxlan_rx_trace_t *tr =
1189  vlib_add_trace (vm, node, b3, sizeof *tr);
1190  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1191  tr->next_index = next3;
1192  tr->error = error3;
1193  tr->tunnel_index = t_index3;
1194  tr->vni = t3->vni;
1195  }
1196  }
1198  (vm, node, next_index, to_next, n_left_to_next,
1199  bi0, bi1, bi2, bi3, next0, next1, next2, next3);
1200  }
1201  while (n_left_from > 0 && n_left_to_next > 0)
1202  {
1203  u32 bi0 = to_next[0] = from[0];
1204  from++;
1205  n_left_from--;
1206  to_next++;
1207  n_left_to_next--;
1208 
1209  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1210  vlib_buffer_advance (b0, payload_offset);
1211 
1212  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1213  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;
1214 
1215  u8 ip_err0 = vxlan_check_ip (b0, len0);
1216  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1217  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1218 
1219  if (csum_err0)
1220  csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1221  if (ip_err0 || udp_err0 || csum_err0)
1222  {
1223  next0 = VXLAN_FLOW_NEXT_DROP;
1224  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1225  b0->error = node->errors[error0];
1226  }
1227 
1228  vnet_update_l2_len (b0);
1229 
1230  ASSERT (b0->flow_id != 0);
1231  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1232  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1233  b0->flow_id = 0;
1234 
1235  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1236  t0->sw_if_index;
1237  vlib_increment_combined_counter (rx_counter[next0], thread_index,
1238  sw_if_index0, 1, len0);
1239 
1240  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1241  {
1242  vxlan_rx_trace_t *tr =
1243  vlib_add_trace (vm, node, b0, sizeof *tr);
1244  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1245  tr->next_index = next0;
1246  tr->error = error0;
1247  tr->tunnel_index = t_index0;
1248  tr->vni = t0->vni;
1249  }
1250  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1251  to_next, n_left_to_next,
1252  bi0, next0);
1253  }
1254 
1255  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1256  }
1257 
1258  return f->n_vectors;
1259 }
1260 
1261 /* *INDENT-OFF* */
1262 #ifndef CLIB_MULTIARCH_VARIANT
1264  .name = "vxlan-flow-input",
1265  .type = VLIB_NODE_TYPE_INTERNAL,
1266  .vector_size = sizeof (u32),
1267 
1268  .format_trace = format_vxlan_rx_trace,
1269 
1270  .n_errors = VXLAN_FLOW_N_ERROR,
1271  .error_strings = vxlan_flow_error_strings,
1272 
1273  .n_next_nodes = VXLAN_FLOW_N_NEXT,
1274  .next_nodes = {
1275 #define _(s,n) [VXLAN_FLOW_NEXT_##s] = n,
1277 #undef _
1278  },
1279 };
1280 #endif
1281 /* *INDENT-ON* */
1282 
1283 /*
1284  * fd.io coding-style-patch-verification: ON
1285  *
1286  * Local Variables:
1287  * eval: (c-set-style "gnu")
1288  * End:
1289  */
static_always_inline u8 vxlan_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: decap.c:929
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define foreach_vxlan_flow_error
Definition: decap.c:906
#define CLIB_UNUSED(x)
Definition: clib.h:87
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:103
clib_bihash_24_8_t vxlan6_tunnel_by_key
Definition: vxlan.h:163
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
ip4_address_t src_address
Definition: ip4_packet.h:125
static char * vxlan_flow_error_strings[]
Definition: decap.c:921
vnet_interface_main_t interface_main
Definition: vnet.h:59
format_function_t format_ip4_header
Definition: format.h:81
vlib_node_registration_t vxlan4_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_input_node)
Definition: decap.c:23
#define foreach_vxlan_input_next
Definition: vxlan.h:136
#define PREDICT_TRUE(x)
Definition: clib.h:121
unsigned long u64
Definition: types.h:89
u32 flow_id_start
Definition: vxlan.h:185
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static const vxlan_decap_info_t decap_not_found
Definition: decap.c:51
static const vxlan_decap_info_t decap_bad_flags
Definition: decap.c:57
ip_vxlan_bypass_next_t
Definition: decap.c:441
static uword vxlan_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: decap.c:178
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
vl_api_address_t src
Definition: gre.api:54
vlib_node_registration_t vxlan4_flow_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_flow_input_node)
Definition: decap.c:1263
vlib_main_t * vm
Definition: in2out_ed.c:1582
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
vlib_node_registration_t ip4_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_vxlan_bypass_node)
Definition: decap.c:838
#define VLIB_NODE_FN(node)
Definition: node.h:202
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:434
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:469
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
ip6_address_t src_address
Definition: ip6_packet.h:310
static u32 vlib_buffer_get_ip_fib_index(vlib_buffer_t *b, u8 is_ip4)
Definition: ip.h:284
unsigned char u8
Definition: types.h:56
u8 data[128]
Definition: ipsec_types.api:89
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:168
static int clib_bihash_key_compare_24_8(u64 *a, u64 *b)
Definition: bihash_24_8.h:75
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:53
vnet_main_t * vnet_main
Definition: vxlan.h:181
#define static_always_inline
Definition: clib.h:108
u32 tunnel_index
Definition: decap.c:30
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
ip4_address_t dst_address
Definition: ip4_packet.h:125
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:881
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:196
unsigned int u32
Definition: types.h:88
static void vtep4_key_init(vtep4_key_t *k4)
Definition: vtep.h:81
#define VLIB_FRAME_SIZE
Definition: node.h:377
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
static_always_inline u8 vxlan_check_ip_udp_len(vlib_buffer_t *b)
Definition: decap.c:971
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:385
ip4_address_t local_ip
Definition: vxlan.h:75
unsigned short u16
Definition: types.h:57
vlib_node_registration_t ip6_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_vxlan_bypass_node)
Definition: decap.c:870
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
void di(unformat_input_t *i)
Definition: unformat.c:163
vxlan_main_t vxlan_main
Definition: vxlan.c:44
#define PREDICT_FALSE(x)
Definition: clib.h:120
#define always_inline
Definition: ipsec.h:28
vnet_main_t vnet_main
Definition: misc.c:43
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1400
vlib_node_registration_t vxlan6_input_node
(constructor) VLIB_REGISTER_NODE (vxlan6_input_node)
Definition: decap.c:24
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
vxlan4_tunnel_key_t last_tunnel_cache4
Definition: decap.c:49
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
vl_api_address_t dst
Definition: gre.api:55
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
static u8 vtep6_check(vtep_table_t *t, vlib_buffer_t *b0, ip6_header_t *ip60, vtep6_key_t *last_k6)
Definition: vtep.h:158
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
static u8 vtep4_check(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4)
Definition: vtep.h:101
vtep_table_t vtep_table
Definition: vxlan.h:167
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static u32 vnet_get_vni(vxlan_header_t *h)
Definition: vxlan_packet.h:56
u16 n_vectors
Definition: node.h:396
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:219
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:339
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static_always_inline u8 vxlan_check_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: decap.c:949
static char * vxlan_error_strings[]
Definition: decap.c:403
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:115
vxlan6_tunnel_key_t last_tunnel_cache6
Definition: decap.c:120
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static_always_inline u8 vxlan_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: decap.c:961
static clib_error_t * ip6_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:887
vxlan_flow_input_next_t
Definition: decap.c:898
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:371
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1582
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:510
vxlan_flow_error_t
Definition: decap.c:913
#define ASSERT(truth)
#define foreach_vxlan_flow_input_next
Definition: decap.c:894
static vxlan_decap_info_t vxlan6_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache6 *cache, u32 fib_index, ip6_header_t *ip6_0, vxlan_header_t *vxlan0, u32 *stats_sw_if_index)
Definition: decap.c:123
static void vtep6_key_init(vtep6_key_t *k6)
Definition: vtep.h:87
u32 sw_if_index
Definition: vxlan.h:106
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
format_function_t format_ip6_header
Definition: format.h:95
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:167
static clib_error_t * ip4_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:855
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:121
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1160
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:236
static vxlan_decap_info_t vxlan4_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache4 *cache, u32 fib_index, ip4_header_t *ip4_0, vxlan_header_t *vxlan0, u32 *stats_sw_if_index)
Definition: decap.c:64
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1279
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1583
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
static_always_inline u8 vxlan_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: decap.c:980
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1229
A collection of combined counters.
Definition: counter.h:188
u16 decap_next_index
Definition: vxlan.h:100
u32 * tunnel_index_by_sw_if_index
Definition: vxlan.h:173
#define vnet_buffer(b)
Definition: buffer.h:417
static u8 * format_vxlan_rx_trace(u8 *s, va_list *args)
Definition: decap.c:36
clib_bihash_16_8_t vxlan4_tunnel_by_key
Definition: vxlan.h:162
u16 flags
Copy of main node flags.
Definition: node.h:500
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:577
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:301
u32 next_index
Definition: decap.c:29
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
vxlan_tunnel_t * tunnels
Definition: vxlan.h:159
static uword ip_vxlan_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: decap.c:449
ip46_address_t src
Definition: vxlan.h:93
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:310
static u8 vtep4_check_vector(vtep_table_t *t, vlib_buffer_t *b0, ip4_header_t *ip40, vtep4_key_t *last_k4, vtep4_cache_t *vtep4_u512)
Definition: vtep.h:122
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:951