FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: vxlan tunnel decap packet processing
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
21 
22 #ifndef CLIB_MARCH_VARIANT
25 #endif
26 
27 typedef struct
28 {
34 
35 static u8 *
36 format_vxlan_rx_trace (u8 * s, va_list * args)
37 {
38  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
39  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
40  vxlan_rx_trace_t *t = va_arg (*args, vxlan_rx_trace_t *);
41 
42  if (t->tunnel_index == ~0)
43  return format (s, "VXLAN decap error - tunnel for vni %d does not exist",
44  t->vni);
45  return format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
46  t->tunnel_index, t->vni, t->next_index, t->error);
47 }
48 
51 {
52  u32 sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
53  if (sw_if_index != (u32) ~ 0)
54  return sw_if_index;
55 
56  u32 *fib_index_by_sw_if_index = is_ip4 ?
58  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
59 
60  return vec_elt (fib_index_by_sw_if_index, sw_if_index);
61 }
62 
64 
65 static const vxlan_decap_info_t decap_not_found = {
66  .sw_if_index = ~0,
67  .next_index = VXLAN_INPUT_NEXT_DROP,
68  .error = VXLAN_ERROR_NO_SUCH_TUNNEL
69 };
70 
71 static const vxlan_decap_info_t decap_bad_flags = {
72  .sw_if_index = ~0,
73  .next_index = VXLAN_INPUT_NEXT_DROP,
74  .error = VXLAN_ERROR_BAD_FLAGS
75 };
76 
79  u32 fib_index, ip4_header_t * ip4_0,
80  vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
81 {
82  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
83  return decap_bad_flags;
84 
85  /* Make sure VXLAN tunnel exist according to packet S/D IP, VRF, and VNI */
86  u32 dst = ip4_0->dst_address.as_u32;
87  u32 src = ip4_0->src_address.as_u32;
88  vxlan4_tunnel_key_t key4 = {
89  .key[0] = ((u64) dst << 32) | src,
90  .key[1] = ((u64) fib_index << 32) | vxlan0->vni_reserved,
91  };
92 
93  if (PREDICT_TRUE
94  (key4.key[0] == cache->key[0] && key4.key[1] == cache->key[1]))
95  {
96  /* cache hit */
97  vxlan_decap_info_t di = {.as_u64 = cache->value };
98  *stats_sw_if_index = di.sw_if_index;
99  return di;
100  }
101 
102  int rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
103  if (PREDICT_TRUE (rv == 0))
104  {
105  *cache = key4;
106  vxlan_decap_info_t di = {.as_u64 = key4.value };
107  *stats_sw_if_index = di.sw_if_index;
108  return di;
109  }
110 
111  /* try multicast */
113  return decap_not_found;
114 
115  /* search for mcast decap info by mcast address */
116  key4.key[0] = dst;
117  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
118  if (rv != 0)
119  return decap_not_found;
120 
121  /* search for unicast tunnel using the mcast tunnel local(src) ip */
122  vxlan_decap_info_t mdi = {.as_u64 = key4.value };
123  key4.key[0] = ((u64) mdi.local_ip.as_u32 << 32) | src;
124  rv = clib_bihash_search_inline_16_8 (&vxm->vxlan4_tunnel_by_key, &key4);
125  if (PREDICT_FALSE (rv != 0))
126  return decap_not_found;
127 
128  /* mcast traffic does not update the cache */
129  *stats_sw_if_index = mdi.sw_if_index;
130  vxlan_decap_info_t di = {.as_u64 = key4.value };
131  return di;
132 }
133 
135 
138  u32 fib_index, ip6_header_t * ip6_0,
139  vxlan_header_t * vxlan0, u32 * stats_sw_if_index)
140 {
141  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
142  return decap_bad_flags;
143 
144  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
145  vxlan6_tunnel_key_t key6 = {
146  .key[0] = ip6_0->src_address.as_u64[0],
147  .key[1] = ip6_0->src_address.as_u64[1],
148  .key[2] = (((u64) fib_index) << 32) | vxlan0->vni_reserved,
149  };
150 
151  if (PREDICT_FALSE
152  (clib_bihash_key_compare_24_8 (key6.key, cache->key) == 0))
153  {
154  int rv =
155  clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
156  if (PREDICT_FALSE (rv != 0))
157  return decap_not_found;
158 
159  *cache = key6;
160  }
161  vxlan_tunnel_t *t0 = pool_elt_at_index (vxm->tunnels, cache->value);
162 
163  /* Validate VXLAN tunnel SIP against packet DIP */
164  if (PREDICT_TRUE (ip6_address_is_equal (&ip6_0->dst_address, &t0->src.ip6)))
165  *stats_sw_if_index = t0->sw_if_index;
166  else
167  {
168  /* try multicast */
170  return decap_not_found;
171 
172  /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
173  key6.key[0] = ip6_0->dst_address.as_u64[0];
174  key6.key[1] = ip6_0->dst_address.as_u64[1];
175  int rv =
176  clib_bihash_search_inline_24_8 (&vxm->vxlan6_tunnel_by_key, &key6);
177  if (PREDICT_FALSE (rv != 0))
178  return decap_not_found;
179 
180  vxlan_tunnel_t *mcast_t0 = pool_elt_at_index (vxm->tunnels, key6.value);
181  *stats_sw_if_index = mcast_t0->sw_if_index;
182  }
183 
185  .sw_if_index = t0->sw_if_index,
186  .next_index = t0->decap_next_index,
187  };
188  return di;
189 }
190 
193  vlib_node_runtime_t * node,
194  vlib_frame_t * from_frame, u32 is_ip4)
195 {
196  vxlan_main_t *vxm = &vxlan_main;
197  vnet_main_t *vnm = vxm->vnet_main;
199  vlib_combined_counter_main_t *rx_counter =
201  last_tunnel_cache4 last4;
202  last_tunnel_cache6 last6;
203  u32 pkts_dropped = 0;
204  u32 thread_index = vlib_get_thread_index ();
205 
206  if (is_ip4)
207  clib_memset (&last4, 0xff, sizeof last4);
208  else
209  clib_memset (&last6, 0xff, sizeof last6);
210 
211  u32 *from = vlib_frame_vector_args (from_frame);
212  u32 n_left_from = from_frame->n_vectors;
213 
214  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
215  vlib_get_buffers (vm, from, bufs, n_left_from);
216 
217  u32 stats_if0 = ~0, stats_if1 = ~0;
218  u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
219  while (n_left_from >= 4)
220  {
221  /* Prefetch next iteration. */
222  vlib_prefetch_buffer_header (b[2], LOAD);
223  vlib_prefetch_buffer_header (b[3], LOAD);
224 
225  /* udp leaves current_data pointing at the vxlan header */
226  void *cur0 = vlib_buffer_get_current (b[0]);
227  void *cur1 = vlib_buffer_get_current (b[1]);
228  vxlan_header_t *vxlan0 = cur0;
229  vxlan_header_t *vxlan1 = cur1;
230 
231 
232  ip4_header_t *ip4_0, *ip4_1;
233  ip6_header_t *ip6_0, *ip6_1;
234  if (is_ip4)
235  {
236  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
237  ip4_1 = cur1 - sizeof (udp_header_t) - sizeof (ip4_header_t);
238  }
239  else
240  {
241  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
242  ip6_1 = cur1 - sizeof (udp_header_t) - sizeof (ip6_header_t);
243  }
244 
245  /* pop vxlan */
246  vlib_buffer_advance (b[0], sizeof *vxlan0);
247  vlib_buffer_advance (b[1], sizeof *vxlan1);
248 
249  u32 fi0 = buf_fib_index (b[0], is_ip4);
250  u32 fi1 = buf_fib_index (b[1], is_ip4);
251 
252  vxlan_decap_info_t di0 = is_ip4 ?
253  vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
254  vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
255  vxlan_decap_info_t di1 = is_ip4 ?
256  vxlan4_find_tunnel (vxm, &last4, fi1, ip4_1, vxlan1, &stats_if1) :
257  vxlan6_find_tunnel (vxm, &last6, fi1, ip6_1, vxlan1, &stats_if1);
258 
259  /* Prefetch next iteration. */
262 
263  u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
264  u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
265 
266  next[0] = di0.next_index;
267  next[1] = di1.next_index;
268 
269  u8 any_error = di0.error | di1.error;
270  if (PREDICT_TRUE (any_error == 0))
271  {
272  /* Required to make the l2 tag push / pop code work on l2 subifs */
273  vnet_update_l2_len (b[0]);
274  vnet_update_l2_len (b[1]);
275  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
276  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
277  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
278  vlib_increment_combined_counter (rx_counter, thread_index,
279  stats_if0, 1, len0);
280  vlib_increment_combined_counter (rx_counter, thread_index,
281  stats_if1, 1, len1);
282  }
283  else
284  {
285  if (di0.error == 0)
286  {
287  vnet_update_l2_len (b[0]);
288  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
289  vlib_increment_combined_counter (rx_counter, thread_index,
290  stats_if0, 1, len0);
291  }
292  else
293  {
294  b[0]->error = node->errors[di0.error];
295  pkts_dropped++;
296  }
297 
298  if (di1.error == 0)
299  {
300  vnet_update_l2_len (b[1]);
301  vnet_buffer (b[1])->sw_if_index[VLIB_RX] = di1.sw_if_index;
302  vlib_increment_combined_counter (rx_counter, thread_index,
303  stats_if1, 1, len1);
304  }
305  else
306  {
307  b[1]->error = node->errors[di1.error];
308  pkts_dropped++;
309  }
310  }
311 
312  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
313  {
314  vxlan_rx_trace_t *tr =
315  vlib_add_trace (vm, node, b[0], sizeof (*tr));
316  tr->next_index = next[0];
317  tr->error = di0.error;
318  tr->tunnel_index = di0.sw_if_index == ~0 ?
320  tr->vni = vnet_get_vni (vxlan0);
321  }
322  if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
323  {
324  vxlan_rx_trace_t *tr =
325  vlib_add_trace (vm, node, b[1], sizeof (*tr));
326  tr->next_index = next[1];
327  tr->error = di1.error;
328  tr->tunnel_index = di1.sw_if_index == ~0 ?
330  tr->vni = vnet_get_vni (vxlan1);
331  }
332  b += 2;
333  next += 2;
334  n_left_from -= 2;
335  }
336 
337  while (n_left_from > 0)
338  {
339  /* udp leaves current_data pointing at the vxlan header */
340  void *cur0 = vlib_buffer_get_current (b[0]);
341  vxlan_header_t *vxlan0 = cur0;
342  ip4_header_t *ip4_0;
343  ip6_header_t *ip6_0;
344  if (is_ip4)
345  ip4_0 = cur0 - sizeof (udp_header_t) - sizeof (ip4_header_t);
346  else
347  ip6_0 = cur0 - sizeof (udp_header_t) - sizeof (ip6_header_t);
348 
349  /* pop (ip, udp, vxlan) */
350  vlib_buffer_advance (b[0], sizeof (*vxlan0));
351 
352  u32 fi0 = buf_fib_index (b[0], is_ip4);
353 
354  vxlan_decap_info_t di0 = is_ip4 ?
355  vxlan4_find_tunnel (vxm, &last4, fi0, ip4_0, vxlan0, &stats_if0) :
356  vxlan6_find_tunnel (vxm, &last6, fi0, ip6_0, vxlan0, &stats_if0);
357 
358  uword len0 = vlib_buffer_length_in_chain (vm, b[0]);
359 
360  next[0] = di0.next_index;
361 
362  /* Validate VXLAN tunnel encap-fib index against packet */
363  if (di0.error == 0)
364  {
365  /* Required to make the l2 tag push / pop code work on l2 subifs */
366  vnet_update_l2_len (b[0]);
367 
368  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
369  vnet_buffer (b[0])->sw_if_index[VLIB_RX] = di0.sw_if_index;
370 
371  vlib_increment_combined_counter (rx_counter, thread_index,
372  stats_if0, 1, len0);
373  }
374  else
375  {
376  b[0]->error = node->errors[di0.error];
377  pkts_dropped++;
378  }
379 
380  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
381  {
382  vxlan_rx_trace_t *tr
383  = vlib_add_trace (vm, node, b[0], sizeof (*tr));
384  tr->next_index = next[0];
385  tr->error = di0.error;
386  tr->tunnel_index = di0.sw_if_index == ~0 ?
388  tr->vni = vnet_get_vni (vxlan0);
389  }
390  b += 1;
391  next += 1;
392  n_left_from -= 1;
393  }
394  vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
395  /* Do we still need this now that tunnel tx stats is kept? */
396  u32 node_idx = is_ip4 ? vxlan4_input_node.index : vxlan6_input_node.index;
397  vlib_node_increment_counter (vm, node_idx, VXLAN_ERROR_DECAPSULATED,
398  from_frame->n_vectors - pkts_dropped);
399 
400  return from_frame->n_vectors;
401 }
402 
404  vlib_node_runtime_t * node,
405  vlib_frame_t * from_frame)
406 {
407  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 1);
408 }
409 
411  vlib_node_runtime_t * node,
412  vlib_frame_t * from_frame)
413 {
414  return vxlan_input (vm, node, from_frame, /* is_ip4 */ 0);
415 }
416 
417 static char *vxlan_error_strings[] = {
418 #define vxlan_error(n,s) s,
420 #undef vxlan_error
421 };
422 
423 /* *INDENT-OFF* */
425 {
426  .name = "vxlan4-input",
427  .vector_size = sizeof (u32),
428  .n_errors = VXLAN_N_ERROR,
429  .error_strings = vxlan_error_strings,
430  .n_next_nodes = VXLAN_INPUT_N_NEXT,
431  .format_trace = format_vxlan_rx_trace,
432  .next_nodes = {
433 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
435 #undef _
436  },
437 };
438 
440 {
441  .name = "vxlan6-input",
442  .vector_size = sizeof (u32),
443  .n_errors = VXLAN_N_ERROR,
444  .error_strings = vxlan_error_strings,
445  .n_next_nodes = VXLAN_INPUT_N_NEXT,
446  .next_nodes = {
447 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
449 #undef _
450  },
451  .format_trace = format_vxlan_rx_trace,
452 };
453 /* *INDENT-ON* */
454 
455 typedef enum
456 {
461 
464  vlib_node_runtime_t * node,
465  vlib_frame_t * frame, u32 is_ip4)
466 {
467  vxlan_main_t *vxm = &vxlan_main;
468  u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
469  vlib_node_runtime_t *error_node =
471  ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
472  ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
473 
474  from = vlib_frame_vector_args (frame);
475  n_left_from = frame->n_vectors;
476  next_index = node->cached_next_index;
477 
478  if (node->flags & VLIB_NODE_FLAG_TRACE)
479  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
480 
481  if (is_ip4)
482  addr4.data_u32 = ~0;
483  else
484  ip6_address_set_zero (&addr6);
485 
486  while (n_left_from > 0)
487  {
488  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
489 
490  while (n_left_from >= 4 && n_left_to_next >= 2)
491  {
492  vlib_buffer_t *b0, *b1;
493  ip4_header_t *ip40, *ip41;
494  ip6_header_t *ip60, *ip61;
495  udp_header_t *udp0, *udp1;
496  u32 bi0, ip_len0, udp_len0, flags0, next0;
497  u32 bi1, ip_len1, udp_len1, flags1, next1;
498  i32 len_diff0, len_diff1;
499  u8 error0, good_udp0, proto0;
500  u8 error1, good_udp1, proto1;
501 
502  /* Prefetch next iteration. */
503  {
504  vlib_buffer_t *p2, *p3;
505 
506  p2 = vlib_get_buffer (vm, from[2]);
507  p3 = vlib_get_buffer (vm, from[3]);
508 
509  vlib_prefetch_buffer_header (p2, LOAD);
510  vlib_prefetch_buffer_header (p3, LOAD);
511 
512  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
513  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
514  }
515 
516  bi0 = to_next[0] = from[0];
517  bi1 = to_next[1] = from[1];
518  from += 2;
519  n_left_from -= 2;
520  to_next += 2;
521  n_left_to_next -= 2;
522 
523  b0 = vlib_get_buffer (vm, bi0);
524  b1 = vlib_get_buffer (vm, bi1);
525  if (is_ip4)
526  {
527  ip40 = vlib_buffer_get_current (b0);
528  ip41 = vlib_buffer_get_current (b1);
529  }
530  else
531  {
532  ip60 = vlib_buffer_get_current (b0);
533  ip61 = vlib_buffer_get_current (b1);
534  }
535 
536  /* Setup packet for next IP feature */
537  vnet_feature_next (&next0, b0);
538  vnet_feature_next (&next1, b1);
539 
540  if (is_ip4)
541  {
542  /* Treat IP frag packets as "experimental" protocol for now
543  until support of IP frag reassembly is implemented */
544  proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
545  proto1 = ip4_is_fragment (ip41) ? 0xfe : ip41->protocol;
546  }
547  else
548  {
549  proto0 = ip60->protocol;
550  proto1 = ip61->protocol;
551  }
552 
553  /* Process packet 0 */
554  if (proto0 != IP_PROTOCOL_UDP)
555  goto exit0; /* not UDP packet */
556 
557  if (is_ip4)
558  udp0 = ip4_next_header (ip40);
559  else
560  udp0 = ip6_next_header (ip60);
561 
562  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
563  goto exit0; /* not VXLAN packet */
564 
565  /* Validate DIP against VTEPs */
566  if (is_ip4)
567  {
568  if (addr4.as_u32 != ip40->dst_address.as_u32)
569  {
570  if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
571  goto exit0; /* no local VTEP for VXLAN packet */
572  addr4 = ip40->dst_address;
573  }
574  }
575  else
576  {
577  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
578  {
579  if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
580  goto exit0; /* no local VTEP for VXLAN packet */
581  addr6 = ip60->dst_address;
582  }
583  }
584 
585  flags0 = b0->flags;
586  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
587 
588  /* Don't verify UDP checksum for packets with explicit zero checksum. */
589  good_udp0 |= udp0->checksum == 0;
590 
591  /* Verify UDP length */
592  if (is_ip4)
593  ip_len0 = clib_net_to_host_u16 (ip40->length);
594  else
595  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
596  udp_len0 = clib_net_to_host_u16 (udp0->length);
597  len_diff0 = ip_len0 - udp_len0;
598 
599  /* Verify UDP checksum */
600  if (PREDICT_FALSE (!good_udp0))
601  {
602  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
603  {
604  if (is_ip4)
605  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
606  else
607  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
608  good_udp0 =
609  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
610  }
611  }
612 
613  if (is_ip4)
614  {
615  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
616  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
617  }
618  else
619  {
620  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
621  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
622  }
623 
624  next0 = error0 ?
626  b0->error = error0 ? error_node->errors[error0] : 0;
627 
628  /* vxlan-input node expect current at VXLAN header */
629  if (is_ip4)
631  sizeof (ip4_header_t) +
632  sizeof (udp_header_t));
633  else
635  sizeof (ip6_header_t) +
636  sizeof (udp_header_t));
637 
638  exit0:
639  /* Process packet 1 */
640  if (proto1 != IP_PROTOCOL_UDP)
641  goto exit1; /* not UDP packet */
642 
643  if (is_ip4)
644  udp1 = ip4_next_header (ip41);
645  else
646  udp1 = ip6_next_header (ip61);
647 
648  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
649  goto exit1; /* not VXLAN packet */
650 
651  /* Validate DIP against VTEPs */
652  if (is_ip4)
653  {
654  if (addr4.as_u32 != ip41->dst_address.as_u32)
655  {
656  if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
657  goto exit1; /* no local VTEP for VXLAN packet */
658  addr4 = ip41->dst_address;
659  }
660  }
661  else
662  {
663  if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
664  {
665  if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
666  goto exit1; /* no local VTEP for VXLAN packet */
667  addr6 = ip61->dst_address;
668  }
669  }
670 
671  flags1 = b1->flags;
672  good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
673 
674  /* Don't verify UDP checksum for packets with explicit zero checksum. */
675  good_udp1 |= udp1->checksum == 0;
676 
677  /* Verify UDP length */
678  if (is_ip4)
679  ip_len1 = clib_net_to_host_u16 (ip41->length);
680  else
681  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
682  udp_len1 = clib_net_to_host_u16 (udp1->length);
683  len_diff1 = ip_len1 - udp_len1;
684 
685  /* Verify UDP checksum */
686  if (PREDICT_FALSE (!good_udp1))
687  {
688  if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
689  {
690  if (is_ip4)
691  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
692  else
693  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
694  good_udp1 =
695  (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
696  }
697  }
698 
699  if (is_ip4)
700  {
701  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
702  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
703  }
704  else
705  {
706  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
707  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
708  }
709 
710  next1 = error1 ?
712  b1->error = error1 ? error_node->errors[error1] : 0;
713 
714  /* vxlan-input node expect current at VXLAN header */
715  if (is_ip4)
717  sizeof (ip4_header_t) +
718  sizeof (udp_header_t));
719  else
721  sizeof (ip6_header_t) +
722  sizeof (udp_header_t));
723 
724  exit1:
725  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
726  to_next, n_left_to_next,
727  bi0, bi1, next0, next1);
728  }
729 
730  while (n_left_from > 0 && n_left_to_next > 0)
731  {
732  vlib_buffer_t *b0;
733  ip4_header_t *ip40;
734  ip6_header_t *ip60;
735  udp_header_t *udp0;
736  u32 bi0, ip_len0, udp_len0, flags0, next0;
737  i32 len_diff0;
738  u8 error0, good_udp0, proto0;
739 
740  bi0 = to_next[0] = from[0];
741  from += 1;
742  n_left_from -= 1;
743  to_next += 1;
744  n_left_to_next -= 1;
745 
746  b0 = vlib_get_buffer (vm, bi0);
747  if (is_ip4)
748  ip40 = vlib_buffer_get_current (b0);
749  else
750  ip60 = vlib_buffer_get_current (b0);
751 
752  /* Setup packet for next IP feature */
753  vnet_feature_next (&next0, b0);
754 
755  if (is_ip4)
756  /* Treat IP4 frag packets as "experimental" protocol for now
757  until support of IP frag reassembly is implemented */
758  proto0 = ip4_is_fragment (ip40) ? 0xfe : ip40->protocol;
759  else
760  proto0 = ip60->protocol;
761 
762  if (proto0 != IP_PROTOCOL_UDP)
763  goto exit; /* not UDP packet */
764 
765  if (is_ip4)
766  udp0 = ip4_next_header (ip40);
767  else
768  udp0 = ip6_next_header (ip60);
769 
770  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
771  goto exit; /* not VXLAN packet */
772 
773  /* Validate DIP against VTEPs */
774  if (is_ip4)
775  {
776  if (addr4.as_u32 != ip40->dst_address.as_u32)
777  {
778  if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
779  goto exit; /* no local VTEP for VXLAN packet */
780  addr4 = ip40->dst_address;
781  }
782  }
783  else
784  {
785  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
786  {
787  if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
788  goto exit; /* no local VTEP for VXLAN packet */
789  addr6 = ip60->dst_address;
790  }
791  }
792 
793  flags0 = b0->flags;
794  good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
795 
796  /* Don't verify UDP checksum for packets with explicit zero checksum. */
797  good_udp0 |= udp0->checksum == 0;
798 
799  /* Verify UDP length */
800  if (is_ip4)
801  ip_len0 = clib_net_to_host_u16 (ip40->length);
802  else
803  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
804  udp_len0 = clib_net_to_host_u16 (udp0->length);
805  len_diff0 = ip_len0 - udp_len0;
806 
807  /* Verify UDP checksum */
808  if (PREDICT_FALSE (!good_udp0))
809  {
810  if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
811  {
812  if (is_ip4)
813  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
814  else
815  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
816  good_udp0 =
817  (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
818  }
819  }
820 
821  if (is_ip4)
822  {
823  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
824  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
825  }
826  else
827  {
828  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
829  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
830  }
831 
832  next0 = error0 ?
834  b0->error = error0 ? error_node->errors[error0] : 0;
835 
836  /* vxlan-input node expect current at VXLAN header */
837  if (is_ip4)
839  sizeof (ip4_header_t) +
840  sizeof (udp_header_t));
841  else
843  sizeof (ip6_header_t) +
844  sizeof (udp_header_t));
845 
846  exit:
847  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
848  to_next, n_left_to_next,
849  bi0, next0);
850  }
851 
852  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
853  }
854 
855  return frame->n_vectors;
856 }
857 
859  vlib_node_runtime_t * node,
860  vlib_frame_t * frame)
861 {
862  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
863 }
864 
865 /* *INDENT-OFF* */
867 {
868  .name = "ip4-vxlan-bypass",
869  .vector_size = sizeof (u32),
870  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
871  .next_nodes = {
872  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
873  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
874  },
875  .format_buffer = format_ip4_header,
876  .format_trace = format_ip4_forward_next_trace,
877 };
878 
879 /* *INDENT-ON* */
880 
881 /* Dummy init function to get us linked in. */
882 static clib_error_t *
884 {
885  return 0;
886 }
887 
889 
891  vlib_node_runtime_t * node,
892  vlib_frame_t * frame)
893 {
894  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
895 }
896 
897 /* *INDENT-OFF* */
899 {
900  .name = "ip6-vxlan-bypass",
901  .vector_size = sizeof (u32),
902  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
903  .next_nodes = {
904  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
905  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
906  },
907  .format_buffer = format_ip6_header,
908  .format_trace = format_ip6_forward_next_trace,
909 };
910 
911 /* *INDENT-ON* */
912 
913 /* Dummy init function to get us linked in. */
914 static clib_error_t *
916 {
917  return 0;
918 }
919 
921 
922 #define foreach_vxlan_flow_input_next \
923 _(DROP, "error-drop") \
924 _(L2_INPUT, "l2-input")
925 
926 typedef enum
927 {
928 #define _(s,n) VXLAN_FLOW_NEXT_##s,
930 #undef _
933 
934 #define foreach_vxlan_flow_error \
935  _(NONE, "no error") \
936  _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \
937  _(IP_HEADER_ERROR, "Rx ip header errors") \
938  _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \
939  _(UDP_LENGTH_ERROR, "Rx udp length errors")
940 
941 typedef enum
942 {
943 #define _(f,s) VXLAN_FLOW_ERROR_##f,
945 #undef _
948 
949 static char *vxlan_flow_error_strings[] = {
950 #define _(n,s) s,
952 #undef _
953 };
954 
955 
958 {
959  u32 flags = b->flags;
960  enum
961  { offset =
962  sizeof (ip4_header_t) + sizeof (udp_header_t) + sizeof (vxlan_header_t),
963  };
964 
965  /* Verify UDP checksum */
966  if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
967  {
969  flags = ip4_tcp_udp_validate_checksum (vm, b);
971  }
972 
973  return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
974 }
975 
978 {
979  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
980  udp_header_t *udp = &hdr->udp;
981  /* Don't verify UDP checksum for packets with explicit zero checksum. */
982  u8 good_csum = (b->flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
983  udp->checksum == 0;
984 
985  return !good_csum;
986 }
987 
989 vxlan_check_ip (vlib_buffer_t * b, u16 payload_len)
990 {
991  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
992  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
993  u16 expected = payload_len + sizeof *hdr;
994  return ip_len > expected || hdr->ip4.ttl == 0
995  || hdr->ip4.ip_version_and_header_length != 0x45;
996 }
997 
1000 {
1001  ip4_vxlan_header_t *hdr = vlib_buffer_get_current (b) - sizeof *hdr;
1002  u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1003  u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
1004  return udp_len > ip_len;
1005 }
1006 
1008 vxlan_err_code (u8 ip_err0, u8 udp_err0, u8 csum_err0)
1009 {
1010  u8 error0 = VXLAN_FLOW_ERROR_NONE;
1011  if (ip_err0)
1012  error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
1013  if (udp_err0)
1014  error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
1015  if (csum_err0)
1016  error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1017  return error0;
1018 }
1019 
1021  vlib_node_runtime_t * node,
1022  vlib_frame_t * f)
1023 {
1024  enum
1025  { payload_offset = sizeof (ip4_vxlan_header_t) };
1026 
1027  vxlan_main_t *vxm = &vxlan_main;
1030  [VXLAN_FLOW_NEXT_DROP] =
1032  [VXLAN_FLOW_NEXT_L2_INPUT] =
1034  };
1035  u32 thread_index = vlib_get_thread_index ();
1036 
1037  u32 *from = vlib_frame_vector_args (f);
1038  u32 n_left_from = f->n_vectors;
1039  u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;
1040 
1041  while (n_left_from > 0)
1042  {
1043  u32 n_left_to_next, *to_next;
1044 
1045  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1046 
1047  while (n_left_from > 3 && n_left_to_next > 3)
1048  {
1049  u32 bi0 = to_next[0] = from[0];
1050  u32 bi1 = to_next[1] = from[1];
1051  u32 bi2 = to_next[2] = from[2];
1052  u32 bi3 = to_next[3] = from[3];
1053  from += 4;
1054  n_left_from -= 4;
1055  to_next += 4;
1056  n_left_to_next -= 4;
1057 
1058  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1059  vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
1060  vlib_buffer_t *b2 = vlib_get_buffer (vm, bi2);
1061  vlib_buffer_t *b3 = vlib_get_buffer (vm, bi3);
1062 
1063  vlib_buffer_advance (b0, payload_offset);
1064  vlib_buffer_advance (b1, payload_offset);
1065  vlib_buffer_advance (b2, payload_offset);
1066  vlib_buffer_advance (b3, payload_offset);
1067 
1068  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1069  u16 len1 = vlib_buffer_length_in_chain (vm, b1);
1070  u16 len2 = vlib_buffer_length_in_chain (vm, b2);
1071  u16 len3 = vlib_buffer_length_in_chain (vm, b3);
1072 
1073  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 =
1074  VXLAN_FLOW_NEXT_L2_INPUT, next2 =
1075  VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;
1076 
1077  u8 ip_err0 = vxlan_check_ip (b0, len0);
1078  u8 ip_err1 = vxlan_check_ip (b1, len1);
1079  u8 ip_err2 = vxlan_check_ip (b2, len2);
1080  u8 ip_err3 = vxlan_check_ip (b3, len3);
1081  u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;
1082 
1083  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1084  u8 udp_err1 = vxlan_check_ip_udp_len (b1);
1085  u8 udp_err2 = vxlan_check_ip_udp_len (b2);
1086  u8 udp_err3 = vxlan_check_ip_udp_len (b3);
1087  u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;
1088 
1089  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1090  u8 csum_err1 = vxlan_check_udp_csum (vm, b1);
1091  u8 csum_err2 = vxlan_check_udp_csum (vm, b2);
1092  u8 csum_err3 = vxlan_check_udp_csum (vm, b3);
1093  u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1094 
1095  if (PREDICT_FALSE (csum_err))
1096  {
1097  if (csum_err0)
1098  csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1099  if (csum_err1)
1100  csum_err1 = !vxlan_validate_udp_csum (vm, b1);
1101  if (csum_err2)
1102  csum_err2 = !vxlan_validate_udp_csum (vm, b2);
1103  if (csum_err3)
1104  csum_err3 = !vxlan_validate_udp_csum (vm, b3);
1105  csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1106  }
1107 
1108  if (PREDICT_FALSE (ip_err || udp_err || csum_err))
1109  {
1110  if (ip_err0 || udp_err0 || csum_err0)
1111  {
1112  next0 = VXLAN_FLOW_NEXT_DROP;
1113  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1114  b0->error = node->errors[error0];
1115  }
1116  if (ip_err1 || udp_err1 || csum_err1)
1117  {
1118  next1 = VXLAN_FLOW_NEXT_DROP;
1119  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1120  b1->error = node->errors[error1];
1121  }
1122  if (ip_err2 || udp_err2 || csum_err2)
1123  {
1124  next2 = VXLAN_FLOW_NEXT_DROP;
1125  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1126  b2->error = node->errors[error2];
1127  }
1128  if (ip_err3 || udp_err3 || csum_err3)
1129  {
1130  next3 = VXLAN_FLOW_NEXT_DROP;
1131  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1132  b3->error = node->errors[error3];
1133  }
1134  }
1135 
1136  vnet_update_l2_len (b0);
1137  vnet_update_l2_len (b1);
1138  vnet_update_l2_len (b2);
1139  vnet_update_l2_len (b3);
1140 
1141  ASSERT (b0->flow_id != 0);
1142  ASSERT (b1->flow_id != 0);
1143  ASSERT (b2->flow_id != 0);
1144  ASSERT (b3->flow_id != 0);
1145 
1146  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1147  u32 t_index1 = b1->flow_id - vxm->flow_id_start;
1148  u32 t_index2 = b2->flow_id - vxm->flow_id_start;
1149  u32 t_index3 = b3->flow_id - vxm->flow_id_start;
1150 
1151  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1152  vxlan_tunnel_t *t1 = &vxm->tunnels[t_index1];
1153  vxlan_tunnel_t *t2 = &vxm->tunnels[t_index2];
1154  vxlan_tunnel_t *t3 = &vxm->tunnels[t_index3];
1155 
1156  /* flow id consumed */
1157  b0->flow_id = 0;
1158  b1->flow_id = 0;
1159  b2->flow_id = 0;
1160  b3->flow_id = 0;
1161 
1162  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1163  t0->sw_if_index;
1164  u32 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1165  t1->sw_if_index;
1166  u32 sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX] =
1167  t2->sw_if_index;
1168  u32 sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX] =
1169  t3->sw_if_index;
1170 
1171  vlib_increment_combined_counter (rx_counter[next0], thread_index,
1172  sw_if_index0, 1, len0);
1173  vlib_increment_combined_counter (rx_counter[next1], thread_index,
1174  sw_if_index1, 1, len1);
1175  vlib_increment_combined_counter (rx_counter[next2], thread_index,
1176  sw_if_index2, 1, len2);
1177  vlib_increment_combined_counter (rx_counter[next3], thread_index,
1178  sw_if_index3, 1, len3);
1179 
1180  u32 flags = b0->flags | b1->flags | b2->flags | b3->flags;
1181 
1182  if (PREDICT_FALSE (flags & VLIB_BUFFER_IS_TRACED))
1183  {
1184  if (b0->flags & VLIB_BUFFER_IS_TRACED)
1185  {
1186  vxlan_rx_trace_t *tr =
1187  vlib_add_trace (vm, node, b0, sizeof *tr);
1188  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1189  tr->next_index = next0;
1190  tr->error = error0;
1191  tr->tunnel_index = t_index0;
1192  tr->vni = t0->vni;
1193  }
1194  if (b1->flags & VLIB_BUFFER_IS_TRACED)
1195  {
1196  vxlan_rx_trace_t *tr =
1197  vlib_add_trace (vm, node, b1, sizeof *tr);
1198  u8 error1 = vxlan_err_code (ip_err1, udp_err1, csum_err1);
1199  tr->next_index = next1;
1200  tr->error = error1;
1201  tr->tunnel_index = t_index1;
1202  tr->vni = t1->vni;
1203  }
1204  if (b2->flags & VLIB_BUFFER_IS_TRACED)
1205  {
1206  vxlan_rx_trace_t *tr =
1207  vlib_add_trace (vm, node, b2, sizeof *tr);
1208  u8 error2 = vxlan_err_code (ip_err2, udp_err2, csum_err2);
1209  tr->next_index = next2;
1210  tr->error = error2;
1211  tr->tunnel_index = t_index2;
1212  tr->vni = t2->vni;
1213  }
1214  if (b3->flags & VLIB_BUFFER_IS_TRACED)
1215  {
1216  vxlan_rx_trace_t *tr =
1217  vlib_add_trace (vm, node, b3, sizeof *tr);
1218  u8 error3 = vxlan_err_code (ip_err3, udp_err3, csum_err3);
1219  tr->next_index = next3;
1220  tr->error = error3;
1221  tr->tunnel_index = t_index3;
1222  tr->vni = t3->vni;
1223  }
1224  }
1226  (vm, node, next_index, to_next, n_left_to_next,
1227  bi0, bi1, bi2, bi3, next0, next1, next2, next3);
1228  }
1229  while (n_left_from > 0 && n_left_to_next > 0)
1230  {
1231  u32 bi0 = to_next[0] = from[0];
1232  from++;
1233  n_left_from--;
1234  to_next++;
1235  n_left_to_next--;
1236 
1237  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
1238  vlib_buffer_advance (b0, payload_offset);
1239 
1240  u16 len0 = vlib_buffer_length_in_chain (vm, b0);
1241  u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;
1242 
1243  u8 ip_err0 = vxlan_check_ip (b0, len0);
1244  u8 udp_err0 = vxlan_check_ip_udp_len (b0);
1245  u8 csum_err0 = vxlan_check_udp_csum (vm, b0);
1246 
1247  if (csum_err0)
1248  csum_err0 = !vxlan_validate_udp_csum (vm, b0);
1249  if (ip_err0 || udp_err0 || csum_err0)
1250  {
1251  next0 = VXLAN_FLOW_NEXT_DROP;
1252  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1253  b0->error = node->errors[error0];
1254  }
1255 
1256  vnet_update_l2_len (b0);
1257 
1258  ASSERT (b0->flow_id != 0);
1259  u32 t_index0 = b0->flow_id - vxm->flow_id_start;
1260  vxlan_tunnel_t *t0 = &vxm->tunnels[t_index0];
1261  b0->flow_id = 0;
1262 
1263  u32 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1264  t0->sw_if_index;
1265  vlib_increment_combined_counter (rx_counter[next0], thread_index,
1266  sw_if_index0, 1, len0);
1267 
1268  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1269  {
1270  vxlan_rx_trace_t *tr =
1271  vlib_add_trace (vm, node, b0, sizeof *tr);
1272  u8 error0 = vxlan_err_code (ip_err0, udp_err0, csum_err0);
1273  tr->next_index = next0;
1274  tr->error = error0;
1275  tr->tunnel_index = t_index0;
1276  tr->vni = t0->vni;
1277  }
1278  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1279  to_next, n_left_to_next,
1280  bi0, next0);
1281  }
1282 
1283  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1284  }
1285 
1286  return f->n_vectors;
1287 }
1288 
1289 /* *INDENT-OFF* */
1290 #ifndef CLIB_MULTIARCH_VARIANT
1292  .name = "vxlan-flow-input",
1293  .type = VLIB_NODE_TYPE_INTERNAL,
1294  .vector_size = sizeof (u32),
1295 
1296  .format_trace = format_vxlan_rx_trace,
1297 
1298  .n_errors = VXLAN_FLOW_N_ERROR,
1299  .error_strings = vxlan_flow_error_strings,
1300 
1301  .n_next_nodes = VXLAN_FLOW_N_NEXT,
1302  .next_nodes = {
1303 #define _(s,n) [VXLAN_FLOW_NEXT_##s] = n,
1305 #undef _
1306  },
1307 };
1308 #endif
1309 /* *INDENT-ON* */
1310 
1311 /*
1312  * fd.io coding-style-patch-verification: ON
1313  *
1314  * Local Variables:
1315  * eval: (c-set-style "gnu")
1316  * End:
1317  */
static_always_inline u8 vxlan_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: decap.c:957
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define foreach_vxlan_flow_error
Definition: decap.c:934
u32 flags
Definition: vhost_user.h:141
#define CLIB_UNUSED(x)
Definition: clib.h:83
clib_bihash_24_8_t vxlan6_tunnel_by_key
Definition: vxlan.h:162
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
ip4_address_t src_address
Definition: ip4_packet.h:170
uword * vtep6
Definition: vxlan.h:167
static char * vxlan_flow_error_strings[]
Definition: decap.c:949
vnet_interface_main_t interface_main
Definition: vnet.h:56
format_function_t format_ip4_header
Definition: format.h:83
vlib_node_registration_t vxlan4_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_input_node)
Definition: decap.c:23
#define foreach_vxlan_input_next
Definition: vxlan.h:135
#define PREDICT_TRUE(x)
Definition: clib.h:113
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
u32 flow_id_start
Definition: vxlan.h:185
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static const vxlan_decap_info_t decap_not_found
Definition: decap.c:65
static const vxlan_decap_info_t decap_bad_flags
Definition: decap.c:71
ip_vxlan_bypass_next_t
Definition: decap.c:455
static uword vxlan_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: decap.c:192
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:138
vl_api_address_t src
Definition: gre.api:51
vlib_node_registration_t vxlan4_flow_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_flow_input_node)
Definition: decap.c:1291
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:121
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
u8 data[128]
Definition: ipsec.api:251
vlib_node_registration_t ip4_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_vxlan_bypass_node)
Definition: decap.c:866
#define VLIB_NODE_FN(node)
Definition: node.h:202
static uword ip4_address_is_multicast(const ip4_address_t *a)
Definition: ip4_packet.h:318
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
ip6_address_t src_address
Definition: ip6_packet.h:383
unsigned char u8
Definition: types.h:56
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
static int clib_bihash_key_compare_24_8(u64 *a, u64 *b)
Definition: bihash_24_8.h:69
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:52
vnet_main_t * vnet_main
Definition: vxlan.h:181
#define static_always_inline
Definition: clib.h:100
u32 tunnel_index
Definition: decap.c:30
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
#define always_inline
Definition: clib.h:99
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:846
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
void di(unformat_input_t *i)
Definition: unformat.c:163
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:378
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define hash_get(h, key)
Definition: hash.h:249
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
static_always_inline u8 vxlan_check_ip_udp_len(vlib_buffer_t *b)
Definition: decap.c:999
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:317
ip4_address_t local_ip
Definition: vxlan.h:74
unsigned short u16
Definition: types.h:57
vlib_node_registration_t ip6_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_vxlan_bypass_node)
Definition: decap.c:898
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
vxlan_main_t vxlan_main
Definition: vxlan.c:44
#define PREDICT_FALSE(x)
Definition: clib.h:112
vnet_main_t vnet_main
Definition: misc.c:43
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1332
vlib_node_registration_t vxlan6_input_node
(constructor) VLIB_REGISTER_NODE (vxlan6_input_node)
Definition: decap.c:24
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
vxlan4_tunnel_key_t last_tunnel_cache4
Definition: decap.c:63
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
vl_api_address_t dst
Definition: gre.api:52
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static u32 vnet_get_vni(vxlan_header_t *h)
Definition: vxlan_packet.h:54
u16 n_vectors
Definition: node.h:397
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:218
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
static_always_inline void vlib_buffer_enqueue_to_next(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts, uword count)
Definition: buffer_node.h:332
static void ip6_address_set_zero(ip6_address_t *a)
Definition: ip6_packet.h:276
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
static_always_inline u8 vxlan_check_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
Definition: decap.c:977
static char * vxlan_error_strings[]
Definition: decap.c:417
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u8 data[]
Packet data.
Definition: buffer.h:181
vxlan6_tunnel_key_t last_tunnel_cache6
Definition: decap.c:134
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
static_always_inline u8 vxlan_check_ip(vlib_buffer_t *b, u16 payload_len)
Definition: decap.c:989
static clib_error_t * ip6_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:915
vxlan_flow_input_next_t
Definition: decap.c:926
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:410
signed int i32
Definition: types.h:77
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
vxlan_flow_error_t
Definition: decap.c:941
#define ASSERT(truth)
#define foreach_vxlan_flow_input_next
Definition: decap.c:922
ip6_main_t ip6_main
Definition: ip6_forward.c:2805
static vxlan_decap_info_t vxlan6_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache6 *cache, u32 fib_index, ip6_header_t *ip6_0, vxlan_header_t *vxlan0, u32 *stats_sw_if_index)
Definition: decap.c:137
uword * vtep4
Definition: vxlan.h:166
u32 sw_if_index
Definition: vxlan.h:105
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
format_function_t format_ip6_header
Definition: format.h:97
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
Definition: ip6_packet.h:240
static clib_error_t * ip4_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:883
static uword ip6_address_is_multicast(const ip6_address_t *a)
Definition: ip6_packet.h:187
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
Definition: bihash_doc.h:44
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:374
static u32 buf_fib_index(vlib_buffer_t *b, u32 is_ip4)
Definition: decap.c:50
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1075
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:236
static vxlan_decap_info_t vxlan4_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache4 *cache, u32 fib_index, ip4_header_t *ip4_0, vxlan_header_t *vxlan0, u32 *stats_sw_if_index)
Definition: decap.c:78
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1211
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline u8 vxlan_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
Definition: decap.c:1008
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1161
A collection of combined counters.
Definition: counter.h:188
u16 decap_next_index
Definition: vxlan.h:99
u32 * tunnel_index_by_sw_if_index
Definition: vxlan.h:173
#define hash_get_mem(h, key)
Definition: hash.h:269
#define vnet_buffer(b)
Definition: buffer.h:365
static u8 * format_vxlan_rx_trace(u8 *s, va_list *args)
Definition: decap.c:36
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1076
clib_bihash_16_8_t vxlan4_tunnel_by_key
Definition: vxlan.h:161
u16 flags
Copy of main node flags.
Definition: node.h:509
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
u32 next_index
Definition: decap.c:29
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 * fib_index_by_sw_if_index
Definition: ip6.h:194
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
vxlan_tunnel_t * tunnels
Definition: vxlan.h:158
static uword ip_vxlan_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: decap.c:463
ip46_address_t src
Definition: vxlan.h:92
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:383
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:866