FD.io VPP  v17.04.2-2-ga8f93f8
Vector Packet Processing
decap.c
Go to the documentation of this file.
1 /*
2  * decap.c: vxlan tunnel decap packet processing
3  *
4  * Copyright (c) 2013 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  * http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17 
18 #include <vlib/vlib.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/vxlan/vxlan.h>
21 
24 
25 typedef struct {
31 
32 static u8 * format_vxlan_rx_trace (u8 * s, va_list * args)
33 {
34  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
35  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
36  vxlan_rx_trace_t * t = va_arg (*args, vxlan_rx_trace_t *);
37 
38  if (t->tunnel_index != ~0)
39  {
40  s = format (s, "VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
41  t->tunnel_index, t->vni, t->next_index, t->error);
42  }
43  else
44  {
45  s = format (s, "VXLAN decap error - tunnel for vni %d does not exist",
46  t->vni);
47  }
48  return s;
49 }
50 
53 {
54  u32 fib_index, sw_if_index;
55 
56  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
57 
58  if (is_ip4)
59  fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
61  vnet_buffer (b)->sw_if_index[VLIB_TX];
62  else
63  fib_index = (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
65  vnet_buffer (b)->sw_if_index[VLIB_TX];
66 
67  return (fib_index == t->encap_fib_index);
68 }
69 
72  vlib_node_runtime_t * node,
73  vlib_frame_t * from_frame,
74  u32 is_ip4)
75 {
76  u32 n_left_from, next_index, * from, * to_next;
77  vxlan_main_t * vxm = &vxlan_main;
78  vnet_main_t * vnm = vxm->vnet_main;
80  u32 last_tunnel_index = ~0;
81  vxlan4_tunnel_key_t last_key4;
82  vxlan6_tunnel_key_t last_key6;
83  u32 pkts_decapsulated = 0;
84  u32 cpu_index = os_get_cpu_number();
85  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
86 
87  if (is_ip4)
88  last_key4.as_u64 = ~0;
89  else
90  memset (&last_key6, 0xff, sizeof (last_key6));
91 
92  from = vlib_frame_vector_args (from_frame);
93  n_left_from = from_frame->n_vectors;
94 
95  next_index = node->cached_next_index;
96  stats_sw_if_index = node->runtime_data[0];
97  stats_n_packets = stats_n_bytes = 0;
98 
99  while (n_left_from > 0)
100  {
101  u32 n_left_to_next;
102 
103  vlib_get_next_frame (vm, node, next_index,
104  to_next, n_left_to_next);
105  while (n_left_from >= 4 && n_left_to_next >= 2)
106  {
107  u32 bi0, bi1;
108  vlib_buffer_t * b0, * b1;
109  u32 next0, next1;
110  ip4_header_t * ip4_0, * ip4_1;
111  ip6_header_t * ip6_0, * ip6_1;
112  vxlan_header_t * vxlan0, * vxlan1;
113  uword * p0, * p1;
114  u32 tunnel_index0, tunnel_index1;
115  vxlan_tunnel_t * t0, * t1, * mt0 = NULL, * mt1 = NULL;
116  vxlan4_tunnel_key_t key4_0, key4_1;
117  vxlan6_tunnel_key_t key6_0, key6_1;
118  u32 error0, error1;
119  u32 sw_if_index0, sw_if_index1, len0, len1;
120 
121  /* Prefetch next iteration. */
122  {
123  vlib_buffer_t * p2, * p3;
124 
125  p2 = vlib_get_buffer (vm, from[2]);
126  p3 = vlib_get_buffer (vm, from[3]);
127 
128  vlib_prefetch_buffer_header (p2, LOAD);
129  vlib_prefetch_buffer_header (p3, LOAD);
130 
133  }
134 
135  bi0 = from[0];
136  bi1 = from[1];
137  to_next[0] = bi0;
138  to_next[1] = bi1;
139  from += 2;
140  to_next += 2;
141  n_left_to_next -= 2;
142  n_left_from -= 2;
143 
144  b0 = vlib_get_buffer (vm, bi0);
145  b1 = vlib_get_buffer (vm, bi1);
146 
147  /* udp leaves current_data pointing at the vxlan header */
148  vxlan0 = vlib_buffer_get_current (b0);
149  vxlan1 = vlib_buffer_get_current (b1);
150  if (is_ip4) {
152  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
154  (b1, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
155  ip4_0 = vlib_buffer_get_current (b0);
156  ip4_1 = vlib_buffer_get_current (b1);
157  } else {
159  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
161  (b1, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
162  ip6_0 = vlib_buffer_get_current (b0);
163  ip6_1 = vlib_buffer_get_current (b1);
164  }
165 
166  /* pop (ip, udp, vxlan) */
167  if (is_ip4) {
169  (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
171  (b1, sizeof(*ip4_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
172  } else {
174  (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
176  (b1, sizeof(*ip6_1)+sizeof(udp_header_t)+sizeof(*vxlan1));
177  }
178 
179  tunnel_index0 = ~0;
180  error0 = 0;
181 
182  tunnel_index1 = ~0;
183  error1 = 0;
184 
185  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
186  {
187  error0 = VXLAN_ERROR_BAD_FLAGS;
188  next0 = VXLAN_INPUT_NEXT_DROP;
189  goto trace0;
190  }
191 
192  if (is_ip4) {
193  key4_0.src = ip4_0->src_address.as_u32;
194  key4_0.vni = vxlan0->vni_reserved;
195 
196  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
197  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
198  {
199  p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
200  if (PREDICT_FALSE (p0 == NULL))
201  {
202  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
203  next0 = VXLAN_INPUT_NEXT_DROP;
204  goto trace0;
205  }
206  last_key4.as_u64 = key4_0.as_u64;
207  tunnel_index0 = last_tunnel_index = p0[0];
208  }
209  else
210  tunnel_index0 = last_tunnel_index;
211  t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
212 
213  /* Validate VXLAN tunnel encap-fib index agaist packet */
214  if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
215  {
216  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
217  next0 = VXLAN_INPUT_NEXT_DROP;
218  goto trace0;
219  }
220 
221  /* Validate VXLAN tunnel SIP against packet DIP */
222  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
223  goto next0; /* valid packet */
225  {
226  key4_0.src = ip4_0->dst_address.as_u32;
227  key4_0.vni = vxlan0->vni_reserved;
228  /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
229  p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
230  if (PREDICT_TRUE (p0 != NULL))
231  {
232  mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
233  goto next0; /* valid packet */
234  }
235  }
236  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
237  next0 = VXLAN_INPUT_NEXT_DROP;
238  goto trace0;
239 
240  } else /* !is_ip4 */ {
241  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
242  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
243  key6_0.vni = vxlan0->vni_reserved;
244 
245  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
246  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
247  {
248  p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
249  if (PREDICT_FALSE (p0 == NULL))
250  {
251  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
252  next0 = VXLAN_INPUT_NEXT_DROP;
253  goto trace0;
254  }
255  clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
256  tunnel_index0 = last_tunnel_index = p0[0];
257  }
258  else
259  tunnel_index0 = last_tunnel_index;
260  t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
261 
262  /* Validate VXLAN tunnel encap-fib index agaist packet */
263  if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
264  {
265  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
266  next0 = VXLAN_INPUT_NEXT_DROP;
267  goto trace0;
268  }
269 
270  /* Validate VXLAN tunnel SIP against packet DIP */
272  &t0->src.ip6)))
273  goto next0; /* valid packet */
275  {
276  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
277  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
278  key6_0.vni = vxlan0->vni_reserved;
279  p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
280  if (PREDICT_TRUE (p0 != NULL))
281  {
282  mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
283  goto next0; /* valid packet */
284  }
285  }
286  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
287  next0 = VXLAN_INPUT_NEXT_DROP;
288  goto trace0;
289  }
290 
291  next0:
292  next0 = t0->decap_next_index;
293  sw_if_index0 = t0->sw_if_index;
294  len0 = vlib_buffer_length_in_chain (vm, b0);
295 
296  /* Required to make the l2 tag push / pop code work on l2 subifs */
297  if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
298  vnet_update_l2_len (b0);
299 
300  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
301  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
302  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
303 
304  pkts_decapsulated ++;
305  stats_n_packets += 1;
306  stats_n_bytes += len0;
307 
308  /* Batch stats increment on the same vxlan tunnel so counter
309  is not incremented per packet */
310  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
311  {
312  stats_n_packets -= 1;
313  stats_n_bytes -= len0;
314  if (stats_n_packets)
317  cpu_index, stats_sw_if_index,
318  stats_n_packets, stats_n_bytes);
319  stats_n_packets = 1;
320  stats_n_bytes = len0;
321  stats_sw_if_index = sw_if_index0;
322  }
323 
324  trace0:
325  b0->error = error0 ? node->errors[error0] : 0;
326 
328  {
329  vxlan_rx_trace_t *tr
330  = vlib_add_trace (vm, node, b0, sizeof (*tr));
331  tr->next_index = next0;
332  tr->error = error0;
333  tr->tunnel_index = tunnel_index0;
334  tr->vni = vnet_get_vni (vxlan0);
335  }
336 
337  if (PREDICT_FALSE (vxlan1->flags != VXLAN_FLAGS_I))
338  {
339  error1 = VXLAN_ERROR_BAD_FLAGS;
340  next1 = VXLAN_INPUT_NEXT_DROP;
341  goto trace1;
342  }
343 
344  if (is_ip4) {
345  key4_1.src = ip4_1->src_address.as_u32;
346  key4_1.vni = vxlan1->vni_reserved;
347 
348  /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
349  if (PREDICT_FALSE (key4_1.as_u64 != last_key4.as_u64))
350  {
351  p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
352  if (PREDICT_FALSE (p1 == NULL))
353  {
354  error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
355  next1 = VXLAN_INPUT_NEXT_DROP;
356  goto trace1;
357  }
358  last_key4.as_u64 = key4_1.as_u64;
359  tunnel_index1 = last_tunnel_index = p1[0];
360  }
361  else
362  tunnel_index1 = last_tunnel_index;
363  t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
364 
365  /* Validate VXLAN tunnel encap-fib index agaist packet */
366  if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
367  {
368  error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
369  next1 = VXLAN_INPUT_NEXT_DROP;
370  goto trace1;
371  }
372 
373  /* Validate VXLAN tunnel SIP against packet DIP */
374  if (PREDICT_TRUE (ip4_1->dst_address.as_u32 == t1->src.ip4.as_u32))
375  goto next1; /* valid packet */
377  {
378  key4_1.src = ip4_1->dst_address.as_u32;
379  key4_1.vni = vxlan1->vni_reserved;
380  /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
381  p1 = hash_get (vxm->vxlan4_tunnel_by_key, key4_1.as_u64);
382  if (PREDICT_TRUE (p1 != NULL))
383  {
384  mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
385  goto next1; /* valid packet */
386  }
387  }
388  error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
389  next1 = VXLAN_INPUT_NEXT_DROP;
390  goto trace1;
391 
392  } else /* !is_ip4 */ {
393  key6_1.src.as_u64[0] = ip6_1->src_address.as_u64[0];
394  key6_1.src.as_u64[1] = ip6_1->src_address.as_u64[1];
395  key6_1.vni = vxlan1->vni_reserved;
396 
397  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
398  if (PREDICT_FALSE (memcmp(&key6_1, &last_key6, sizeof(last_key6)) != 0))
399  {
400  p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
401 
402  if (PREDICT_FALSE (p1 == NULL))
403  {
404  error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
405  next1 = VXLAN_INPUT_NEXT_DROP;
406  goto trace1;
407  }
408 
409  clib_memcpy (&last_key6, &key6_1, sizeof(key6_1));
410  tunnel_index1 = last_tunnel_index = p1[0];
411  }
412  else
413  tunnel_index1 = last_tunnel_index;
414  t1 = pool_elt_at_index (vxm->tunnels, tunnel_index1);
415 
416  /* Validate VXLAN tunnel encap-fib index agaist packet */
417  if (PREDICT_FALSE (validate_vxlan_fib (b1, t1, is_ip4) == 0))
418  {
419  error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
420  next1 = VXLAN_INPUT_NEXT_DROP;
421  goto trace1;
422  }
423 
424  /* Validate VXLAN tunnel SIP against packet DIP */
426  &t1->src.ip6)))
427  goto next1; /* valid packet */
429  {
430  key6_1.src.as_u64[0] = ip6_1->dst_address.as_u64[0];
431  key6_1.src.as_u64[1] = ip6_1->dst_address.as_u64[1];
432  key6_1.vni = vxlan1->vni_reserved;
433  p1 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_1);
434  if (PREDICT_TRUE (p1 != NULL))
435  {
436  mt1 = pool_elt_at_index (vxm->tunnels, p1[0]);
437  goto next1; /* valid packet */
438  }
439  }
440  error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
441  next1 = VXLAN_INPUT_NEXT_DROP;
442  goto trace1;
443  }
444 
445  next1:
446  next1 = t1->decap_next_index;
447  sw_if_index1 = t1->sw_if_index;
448  len1 = vlib_buffer_length_in_chain (vm, b1);
449 
450  /* Required to make the l2 tag push / pop code work on l2 subifs */
451  if (PREDICT_TRUE(next1 == VXLAN_INPUT_NEXT_L2_INPUT))
452  vnet_update_l2_len (b1);
453 
454  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
455  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
456  sw_if_index1 = (mt1) ? mt1->sw_if_index : sw_if_index1;
457 
458  pkts_decapsulated ++;
459  stats_n_packets += 1;
460  stats_n_bytes += len1;
461 
462  /* Batch stats increment on the same vxlan tunnel so counter
463  is not incremented per packet */
464  if (PREDICT_FALSE (sw_if_index1 != stats_sw_if_index))
465  {
466  stats_n_packets -= 1;
467  stats_n_bytes -= len1;
468  if (stats_n_packets)
471  cpu_index, stats_sw_if_index,
472  stats_n_packets, stats_n_bytes);
473  stats_n_packets = 1;
474  stats_n_bytes = len1;
475  stats_sw_if_index = sw_if_index1;
476  }
477 
478  trace1:
479  b1->error = error1 ? node->errors[error1] : 0;
480 
482  {
483  vxlan_rx_trace_t *tr
484  = vlib_add_trace (vm, node, b1, sizeof (*tr));
485  tr->next_index = next1;
486  tr->error = error1;
487  tr->tunnel_index = tunnel_index1;
488  tr->vni = vnet_get_vni (vxlan1);
489  }
490 
491  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
492  to_next, n_left_to_next,
493  bi0, bi1, next0, next1);
494  }
495 
496  while (n_left_from > 0 && n_left_to_next > 0)
497  {
498  u32 bi0;
499  vlib_buffer_t * b0;
500  u32 next0;
501  ip4_header_t * ip4_0;
502  ip6_header_t * ip6_0;
503  vxlan_header_t * vxlan0;
504  uword * p0;
505  u32 tunnel_index0;
506  vxlan_tunnel_t * t0, * mt0 = NULL;
507  vxlan4_tunnel_key_t key4_0;
508  vxlan6_tunnel_key_t key6_0;
509  u32 error0;
510  u32 sw_if_index0, len0;
511 
512  bi0 = from[0];
513  to_next[0] = bi0;
514  from += 1;
515  to_next += 1;
516  n_left_from -= 1;
517  n_left_to_next -= 1;
518 
519  b0 = vlib_get_buffer (vm, bi0);
520 
521  /* udp leaves current_data pointing at the vxlan header */
522  vxlan0 = vlib_buffer_get_current (b0);
523  if (is_ip4) {
525  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip4_header_t)));
526  ip4_0 = vlib_buffer_get_current (b0);
527  } else {
529  (b0, -(word)(sizeof(udp_header_t)+sizeof(ip6_header_t)));
530  ip6_0 = vlib_buffer_get_current (b0);
531  }
532 
533  /* pop (ip, udp, vxlan) */
534  if (is_ip4) {
536  (b0, sizeof(*ip4_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
537  } else {
539  (b0, sizeof(*ip6_0)+sizeof(udp_header_t)+sizeof(*vxlan0));
540  }
541 
542  tunnel_index0 = ~0;
543  error0 = 0;
544 
545  if (PREDICT_FALSE (vxlan0->flags != VXLAN_FLAGS_I))
546  {
547  error0 = VXLAN_ERROR_BAD_FLAGS;
548  next0 = VXLAN_INPUT_NEXT_DROP;
549  goto trace00;
550  }
551 
552  if (is_ip4) {
553  key4_0.src = ip4_0->src_address.as_u32;
554  key4_0.vni = vxlan0->vni_reserved;
555 
556  /* Make sure unicast VXLAN tunnel exist by packet SIP and VNI */
557  if (PREDICT_FALSE (key4_0.as_u64 != last_key4.as_u64))
558  {
559  p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
560  if (PREDICT_FALSE (p0 == NULL))
561  {
562  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
563  next0 = VXLAN_INPUT_NEXT_DROP;
564  goto trace00;
565  }
566  last_key4.as_u64 = key4_0.as_u64;
567  tunnel_index0 = last_tunnel_index = p0[0];
568  }
569  else
570  tunnel_index0 = last_tunnel_index;
571  t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
572 
573  /* Validate VXLAN tunnel encap-fib index agaist packet */
574  if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
575  {
576  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
577  next0 = VXLAN_INPUT_NEXT_DROP;
578  goto trace00;
579  }
580 
581  /* Validate VXLAN tunnel SIP against packet DIP */
582  if (PREDICT_TRUE (ip4_0->dst_address.as_u32 == t0->src.ip4.as_u32))
583  goto next00; /* valid packet */
585  {
586  key4_0.src = ip4_0->dst_address.as_u32;
587  key4_0.vni = vxlan0->vni_reserved;
588  /* Make sure mcast VXLAN tunnel exist by packet DIP and VNI */
589  p0 = hash_get (vxm->vxlan4_tunnel_by_key, key4_0.as_u64);
590  if (PREDICT_TRUE (p0 != NULL))
591  {
592  mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
593  goto next00; /* valid packet */
594  }
595  }
596  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
597  next0 = VXLAN_INPUT_NEXT_DROP;
598  goto trace00;
599 
600  } else /* !is_ip4 */ {
601  key6_0.src.as_u64[0] = ip6_0->src_address.as_u64[0];
602  key6_0.src.as_u64[1] = ip6_0->src_address.as_u64[1];
603  key6_0.vni = vxlan0->vni_reserved;
604 
605  /* Make sure VXLAN tunnel exist according to packet SIP and VNI */
606  if (PREDICT_FALSE (memcmp(&key6_0, &last_key6, sizeof(last_key6)) != 0))
607  {
608  p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
609  if (PREDICT_FALSE (p0 == NULL))
610  {
611  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
612  next0 = VXLAN_INPUT_NEXT_DROP;
613  goto trace00;
614  }
615  clib_memcpy (&last_key6, &key6_0, sizeof(key6_0));
616  tunnel_index0 = last_tunnel_index = p0[0];
617  }
618  else
619  tunnel_index0 = last_tunnel_index;
620  t0 = pool_elt_at_index (vxm->tunnels, tunnel_index0);
621 
622  /* Validate VXLAN tunnel encap-fib index agaist packet */
623  if (PREDICT_FALSE (validate_vxlan_fib (b0, t0, is_ip4) == 0))
624  {
625  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
626  next0 = VXLAN_INPUT_NEXT_DROP;
627  goto trace00;
628  }
629 
630  /* Validate VXLAN tunnel SIP against packet DIP */
632  &t0->src.ip6)))
633  goto next00; /* valid packet */
635  {
636  key6_0.src.as_u64[0] = ip6_0->dst_address.as_u64[0];
637  key6_0.src.as_u64[1] = ip6_0->dst_address.as_u64[1];
638  key6_0.vni = vxlan0->vni_reserved;
639  p0 = hash_get_mem (vxm->vxlan6_tunnel_by_key, &key6_0);
640  if (PREDICT_TRUE (p0 != NULL))
641  {
642  mt0 = pool_elt_at_index (vxm->tunnels, p0[0]);
643  goto next00; /* valid packet */
644  }
645  }
646  error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
647  next0 = VXLAN_INPUT_NEXT_DROP;
648  goto trace00;
649  }
650 
651  next00:
652  next0 = t0->decap_next_index;
653  sw_if_index0 = t0->sw_if_index;
654  len0 = vlib_buffer_length_in_chain (vm, b0);
655 
656  /* Required to make the l2 tag push / pop code work on l2 subifs */
657  if (PREDICT_TRUE(next0 == VXLAN_INPUT_NEXT_L2_INPUT))
658  vnet_update_l2_len (b0);
659 
660  /* Set packet input sw_if_index to unicast VXLAN tunnel for learning */
661  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
662  sw_if_index0 = (mt0) ? mt0->sw_if_index : sw_if_index0;
663 
664  pkts_decapsulated ++;
665  stats_n_packets += 1;
666  stats_n_bytes += len0;
667 
668  /* Batch stats increment on the same vxlan tunnel so counter
669  is not incremented per packet */
670  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
671  {
672  stats_n_packets -= 1;
673  stats_n_bytes -= len0;
674  if (stats_n_packets)
677  cpu_index, stats_sw_if_index,
678  stats_n_packets, stats_n_bytes);
679  stats_n_packets = 1;
680  stats_n_bytes = len0;
681  stats_sw_if_index = sw_if_index0;
682  }
683 
684  trace00:
685  b0->error = error0 ? node->errors[error0] : 0;
686 
688  {
689  vxlan_rx_trace_t *tr
690  = vlib_add_trace (vm, node, b0, sizeof (*tr));
691  tr->next_index = next0;
692  tr->error = error0;
693  tr->tunnel_index = tunnel_index0;
694  tr->vni = vnet_get_vni (vxlan0);
695  }
696  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
697  to_next, n_left_to_next,
698  bi0, next0);
699  }
700 
701  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
702  }
703  /* Do we still need this now that tunnel tx stats is kept? */
704  vlib_node_increment_counter (vm, is_ip4?
706  VXLAN_ERROR_DECAPSULATED,
707  pkts_decapsulated);
708 
709  /* Increment any remaining batch stats */
710  if (stats_n_packets)
711  {
714  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
715  node->runtime_data[0] = stats_sw_if_index;
716  }
717 
718  return from_frame->n_vectors;
719 }
720 
721 static uword
723  vlib_node_runtime_t * node,
724  vlib_frame_t * from_frame)
725 {
726  return vxlan_input(vm, node, from_frame, /* is_ip4 */ 1);
727 }
728 
729 static uword
731  vlib_node_runtime_t * node,
732  vlib_frame_t * from_frame)
733 {
734  return vxlan_input(vm, node, from_frame, /* is_ip4 */ 0);
735 }
736 
737 static char * vxlan_error_strings[] = {
738 #define vxlan_error(n,s) s,
740 #undef vxlan_error
741 #undef _
742 };
743 
745  .function = vxlan4_input,
746  .name = "vxlan4-input",
747  /* Takes a vector of packets. */
748  .vector_size = sizeof (u32),
749 
750  .n_errors = VXLAN_N_ERROR,
751  .error_strings = vxlan_error_strings,
752 
753  .n_next_nodes = VXLAN_INPUT_N_NEXT,
754  .next_nodes = {
755 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
757 #undef _
758  },
759 
760 //temp .format_buffer = format_vxlan_header,
761  .format_trace = format_vxlan_rx_trace,
762  // $$$$ .unformat_buffer = unformat_vxlan_header,
763 };
764 
766 
768  .function = vxlan6_input,
769  .name = "vxlan6-input",
770  /* Takes a vector of packets. */
771  .vector_size = sizeof (u32),
772 
773  .n_errors = VXLAN_N_ERROR,
774  .error_strings = vxlan_error_strings,
775 
776  .n_next_nodes = VXLAN_INPUT_N_NEXT,
777  .next_nodes = {
778 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n,
780 #undef _
781  },
782 
783 //temp .format_buffer = format_vxlan_header,
784  .format_trace = format_vxlan_rx_trace,
785  // $$$$ .unformat_buffer = unformat_vxlan_header,
786 };
787 
789 
790 
791 typedef enum {
796 
799  vlib_node_runtime_t * node,
800  vlib_frame_t * frame,
801  u32 is_ip4)
802 {
803  vxlan_main_t * vxm = &vxlan_main;
804  u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
805  vlib_node_runtime_t * error_node = vlib_node_get_runtime (vm, ip4_input_node.index);
806  ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
807  ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
808 
809  from = vlib_frame_vector_args (frame);
810  n_left_from = frame->n_vectors;
811  next_index = node->cached_next_index;
812 
813  if (node->flags & VLIB_NODE_FLAG_TRACE)
814  ip4_forward_next_trace (vm, node, frame, VLIB_TX);
815 
816  if (is_ip4) addr4.data_u32 = ~0;
817  else ip6_address_set_zero (&addr6);
818 
819  while (n_left_from > 0)
820  {
821  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
822 
823  while (n_left_from >= 4 && n_left_to_next >= 2)
824  {
825  vlib_buffer_t * b0, * b1;
826  ip4_header_t * ip40, * ip41;
827  ip6_header_t * ip60, * ip61;
828  udp_header_t * udp0, * udp1;
829  u32 bi0, ip_len0, udp_len0, flags0, next0;
830  u32 bi1, ip_len1, udp_len1, flags1, next1;
831  i32 len_diff0, len_diff1;
832  u8 error0, good_udp0, proto0;
833  u8 error1, good_udp1, proto1;
834 
835  /* Prefetch next iteration. */
836  {
837  vlib_buffer_t * p2, * p3;
838 
839  p2 = vlib_get_buffer (vm, from[2]);
840  p3 = vlib_get_buffer (vm, from[3]);
841 
842  vlib_prefetch_buffer_header (p2, LOAD);
843  vlib_prefetch_buffer_header (p3, LOAD);
844 
847  }
848 
849  bi0 = to_next[0] = from[0];
850  bi1 = to_next[1] = from[1];
851  from += 2;
852  n_left_from -= 2;
853  to_next += 2;
854  n_left_to_next -= 2;
855 
856  b0 = vlib_get_buffer (vm, bi0);
857  b1 = vlib_get_buffer (vm, bi1);
858  if (is_ip4)
859  {
860  ip40 = vlib_buffer_get_current (b0);
861  ip41 = vlib_buffer_get_current (b1);
862  }
863  else
864  {
865  ip60 = vlib_buffer_get_current (b0);
866  ip61 = vlib_buffer_get_current (b1);
867  }
868 
869  /* Setup packet for next IP feature */
870  vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
871  vnet_feature_next(vnet_buffer(b1)->sw_if_index[VLIB_RX], &next1, b1);
872 
873  if (is_ip4)
874  {
875  /* Treat IP frag packets as "experimental" protocol for now
876  until support of IP frag reassembly is implemented */
877  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
878  proto1 = ip4_is_fragment(ip41) ? 0xfe : ip41->protocol;
879  }
880  else
881  {
882  proto0 = ip60->protocol;
883  proto1 = ip61->protocol;
884  }
885 
886  /* Process packet 0 */
887  if (proto0 != IP_PROTOCOL_UDP)
888  goto exit0; /* not UDP packet */
889 
890  if (is_ip4)
891  udp0 = ip4_next_header (ip40);
892  else
893  udp0 = ip6_next_header (ip60);
894 
895  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
896  goto exit0; /* not VXLAN packet */
897 
898  /* Validate DIP against VTEPs*/
899  if (is_ip4)
900  {
901  if (addr4.as_u32 != ip40->dst_address.as_u32)
902  {
903  if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
904  goto exit0; /* no local VTEP for VXLAN packet */
905  addr4 = ip40->dst_address;
906  }
907  }
908  else
909  {
910  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
911  {
912  if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
913  goto exit0; /* no local VTEP for VXLAN packet */
914  addr6 = ip60->dst_address;
915  }
916  }
917 
918  flags0 = b0->flags;
919  good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
920 
921  /* Don't verify UDP checksum for packets with explicit zero checksum. */
922  good_udp0 |= udp0->checksum == 0;
923 
924  /* Verify UDP length */
925  if (is_ip4)
926  ip_len0 = clib_net_to_host_u16 (ip40->length);
927  else
928  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
929  udp_len0 = clib_net_to_host_u16 (udp0->length);
930  len_diff0 = ip_len0 - udp_len0;
931 
932  /* Verify UDP checksum */
933  if (PREDICT_FALSE (!good_udp0))
934  {
935  if ((flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED) == 0)
936  {
937  if (is_ip4)
938  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
939  else
940  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
941  good_udp0 =
942  (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
943  }
944  }
945 
946  if (is_ip4)
947  {
948  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
949  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
950  }
951  else
952  {
953  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
954  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
955  }
956 
957  next0 = error0 ?
959  b0->error = error0 ? error_node->errors[error0] : 0;
960 
961  /* vxlan-input node expect current at VXLAN header */
962  if (is_ip4)
963  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
964  else
965  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
966 
967  exit0:
968  /* Process packet 1 */
969  if (proto1 != IP_PROTOCOL_UDP)
970  goto exit1; /* not UDP packet */
971 
972  if (is_ip4)
973  udp1 = ip4_next_header (ip41);
974  else
975  udp1 = ip6_next_header (ip61);
976 
977  if (udp1->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
978  goto exit1; /* not VXLAN packet */
979 
980  /* Validate DIP against VTEPs*/
981  if (is_ip4)
982  {
983  if (addr4.as_u32 != ip41->dst_address.as_u32)
984  {
985  if (!hash_get (vxm->vtep4, ip41->dst_address.as_u32))
986  goto exit1; /* no local VTEP for VXLAN packet */
987  addr4 = ip41->dst_address;
988  }
989  }
990  else
991  {
992  if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
993  {
994  if (!hash_get_mem (vxm->vtep6, &ip61->dst_address))
995  goto exit1; /* no local VTEP for VXLAN packet */
996  addr6 = ip61->dst_address;
997  }
998  }
999 
1000  flags1 = b1->flags;
1001  good_udp1 = (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
1002 
1003  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1004  good_udp1 |= udp1->checksum == 0;
1005 
1006  /* Verify UDP length */
1007  if (is_ip4)
1008  ip_len1 = clib_net_to_host_u16 (ip41->length);
1009  else
1010  ip_len1 = clib_net_to_host_u16 (ip61->payload_length);
1011  udp_len1 = clib_net_to_host_u16 (udp1->length);
1012  len_diff1 = ip_len1 - udp_len1;
1013 
1014  /* Verify UDP checksum */
1015  if (PREDICT_FALSE (!good_udp1))
1016  {
1017  if ((flags1 & IP_BUFFER_L4_CHECKSUM_COMPUTED) == 0)
1018  {
1019  if (is_ip4)
1020  flags1 = ip4_tcp_udp_validate_checksum (vm, b1);
1021  else
1022  flags1 = ip6_tcp_udp_icmp_validate_checksum (vm, b1);
1023  good_udp1 =
1024  (flags1 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
1025  }
1026  }
1027 
1028  if (is_ip4)
1029  {
1030  error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1031  error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
1032  }
1033  else
1034  {
1035  error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1036  error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
1037  }
1038 
1039  next1 = error1 ?
1041  b1->error = error1 ? error_node->errors[error1] : 0;
1042 
1043  /* vxlan-input node expect current at VXLAN header */
1044  if (is_ip4)
1045  vlib_buffer_advance (b1, sizeof(ip4_header_t)+sizeof(udp_header_t));
1046  else
1047  vlib_buffer_advance (b1, sizeof(ip6_header_t)+sizeof(udp_header_t));
1048 
1049  exit1:
1050  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
1051  to_next, n_left_to_next,
1052  bi0, bi1, next0, next1);
1053  }
1054 
1055  while (n_left_from > 0 && n_left_to_next > 0)
1056  {
1057  vlib_buffer_t * b0;
1058  ip4_header_t * ip40;
1059  ip6_header_t * ip60;
1060  udp_header_t * udp0;
1061  u32 bi0, ip_len0, udp_len0, flags0, next0;
1062  i32 len_diff0;
1063  u8 error0, good_udp0, proto0;
1064 
1065  bi0 = to_next[0] = from[0];
1066  from += 1;
1067  n_left_from -= 1;
1068  to_next += 1;
1069  n_left_to_next -= 1;
1070 
1071  b0 = vlib_get_buffer (vm, bi0);
1072  if (is_ip4)
1073  ip40 = vlib_buffer_get_current (b0);
1074  else
1075  ip60 = vlib_buffer_get_current (b0);
1076 
1077  /* Setup packet for next IP feature */
1078  vnet_feature_next(vnet_buffer(b0)->sw_if_index[VLIB_RX], &next0, b0);
1079 
1080  if (is_ip4)
1081  /* Treat IP4 frag packets as "experimental" protocol for now
1082  until support of IP frag reassembly is implemented */
1083  proto0 = ip4_is_fragment(ip40) ? 0xfe : ip40->protocol;
1084  else
1085  proto0 = ip60->protocol;
1086 
1087  if (proto0 != IP_PROTOCOL_UDP)
1088  goto exit; /* not UDP packet */
1089 
1090  if (is_ip4)
1091  udp0 = ip4_next_header (ip40);
1092  else
1093  udp0 = ip6_next_header (ip60);
1094 
1095  if (udp0->dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
1096  goto exit; /* not VXLAN packet */
1097 
1098  /* Validate DIP against VTEPs*/
1099  if (is_ip4)
1100  {
1101  if (addr4.as_u32 != ip40->dst_address.as_u32)
1102  {
1103  if (!hash_get (vxm->vtep4, ip40->dst_address.as_u32))
1104  goto exit; /* no local VTEP for VXLAN packet */
1105  addr4 = ip40->dst_address;
1106  }
1107  }
1108  else
1109  {
1110  if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
1111  {
1112  if (!hash_get_mem (vxm->vtep6, &ip60->dst_address))
1113  goto exit; /* no local VTEP for VXLAN packet */
1114  addr6 = ip60->dst_address;
1115  }
1116  }
1117 
1118  flags0 = b0->flags;
1119  good_udp0 = (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
1120 
1121  /* Don't verify UDP checksum for packets with explicit zero checksum. */
1122  good_udp0 |= udp0->checksum == 0;
1123 
1124  /* Verify UDP length */
1125  if (is_ip4)
1126  ip_len0 = clib_net_to_host_u16 (ip40->length);
1127  else
1128  ip_len0 = clib_net_to_host_u16 (ip60->payload_length);
1129  udp_len0 = clib_net_to_host_u16 (udp0->length);
1130  len_diff0 = ip_len0 - udp_len0;
1131 
1132  /* Verify UDP checksum */
1133  if (PREDICT_FALSE (!good_udp0))
1134  {
1135  if ((flags0 & IP_BUFFER_L4_CHECKSUM_COMPUTED) == 0)
1136  {
1137  if (is_ip4)
1138  flags0 = ip4_tcp_udp_validate_checksum (vm, b0);
1139  else
1140  flags0 = ip6_tcp_udp_icmp_validate_checksum (vm, b0);
1141  good_udp0 =
1142  (flags0 & IP_BUFFER_L4_CHECKSUM_CORRECT) != 0;
1143  }
1144  }
1145 
1146  if (is_ip4)
1147  {
1148  error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
1149  error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
1150  }
1151  else
1152  {
1153  error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
1154  error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
1155  }
1156 
1157  next0 = error0 ?
1159  b0->error = error0 ? error_node->errors[error0] : 0;
1160 
1161  /* vxlan-input node expect current at VXLAN header */
1162  if (is_ip4)
1163  vlib_buffer_advance (b0, sizeof(ip4_header_t)+sizeof(udp_header_t));
1164  else
1165  vlib_buffer_advance (b0, sizeof(ip6_header_t)+sizeof(udp_header_t));
1166 
1167  exit:
1168  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1169  to_next, n_left_to_next,
1170  bi0, next0);
1171  }
1172 
1173  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1174  }
1175 
1176  return frame->n_vectors;
1177 }
1178 
1179 static uword
1181  vlib_node_runtime_t * node,
1182  vlib_frame_t * frame)
1183 {
1184  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
1185 }
1186 
1188  .function = ip4_vxlan_bypass,
1189  .name = "ip4-vxlan-bypass",
1190  .vector_size = sizeof (u32),
1191 
1192  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1193  .next_nodes = {
1194  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1195  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan4-input",
1196  },
1197 
1198  .format_buffer = format_ip4_header,
1199  .format_trace = format_ip4_forward_next_trace,
1200 };
1201 
1203 
1204 /* Dummy init function to get us linked in. */
1206 { return 0; }
1207 
1209 
1210 static uword
1212  vlib_node_runtime_t * node,
1213  vlib_frame_t * frame)
1214 {
1215  return ip_vxlan_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
1216 }
1217 
1219  .function = ip6_vxlan_bypass,
1220  .name = "ip6-vxlan-bypass",
1221  .vector_size = sizeof (u32),
1222 
1223  .n_next_nodes = IP_VXLAN_BYPASS_N_NEXT,
1224  .next_nodes = {
1225  [IP_VXLAN_BYPASS_NEXT_DROP] = "error-drop",
1226  [IP_VXLAN_BYPASS_NEXT_VXLAN] = "vxlan6-input",
1227  },
1228 
1229  .format_buffer = format_ip6_header,
1230  .format_trace = format_ip6_forward_next_trace,
1231 };
1232 
1234 
1235 /* Dummy init function to get us linked in. */
1237 { return 0; }
1238 
#define CLIB_UNUSED(x)
Definition: clib.h:79
static u32 validate_vxlan_fib(vlib_buffer_t *b, vxlan_tunnel_t *t, u32 is_ip4)
Definition: decap.c:52
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:463
ip4_address_t src_address
Definition: ip4_packet.h:163
uword * vtep6
Definition: vxlan.h:145
vnet_interface_main_t interface_main
Definition: vnet.h:57
format_function_t format_ip4_header
Definition: format.h:86
vlib_node_registration_t vxlan4_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_input_node)
Definition: decap.c:22
#define foreach_vxlan_input_next
Definition: vxlan.h:116
#define PREDICT_TRUE(x)
Definition: clib.h:98
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
static uword vxlan_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: decap.c:71
struct _vlib_node_registration vlib_node_registration_t
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
Definition: ip4.h:120
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
vlib_node_registration_t ip4_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_vxlan_bypass_node)
Definition: decap.c:1187
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:418
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:100
ip6_address_t src_address
Definition: ip6_packet.h:341
static uword ip6_vxlan_bypass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: decap.c:1211
#define VXLAN_FLAGS_I
Definition: vxlan_packet.h:52
vnet_main_t * vnet_main
Definition: vxlan.h:158
u32 tunnel_index
Definition: decap.c:27
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:111
#define always_inline
Definition: clib.h:84
#define IP_BUFFER_L4_CHECKSUM_CORRECT
Definition: buffer.h:50
ip4_address_t dst_address
Definition: ip4_packet.h:163
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:626
static uword ip6_address_is_equal(ip6_address_t *a, ip6_address_t *b)
Definition: ip6_packet.h:208
int i32
Definition: types.h:81
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:164
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:232
static int ip4_is_fragment(ip4_header_t *i)
Definition: ip4_packet.h:204
#define hash_get(h, key)
Definition: hash.h:248
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:397
vlib_node_registration_t ip4_input_node
Global ip4 input node.
Definition: ip4_input.c:416
uword * vxlan4_tunnel_by_key
Definition: vxlan.h:139
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
vlib_node_registration_t ip6_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_vxlan_bypass_node)
Definition: decap.c:1218
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:188
vxlan_main_t vxlan_main
Definition: vxlan.c:41
static uword ip4_address_is_multicast(ip4_address_t *a)
Definition: ip4_packet.h:309
#define PREDICT_FALSE(x)
Definition: clib.h:97
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip4_forward.c:1449
vlib_node_registration_t vxlan6_input_node
(constructor) VLIB_REGISTER_NODE (vxlan6_input_node)
Definition: decap.c:23
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
static_always_inline void vnet_feature_next(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:221
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1115
static u32 vnet_get_vni(vxlan_header_t *h)
Definition: vxlan_packet.h:54
static uword vxlan4_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: decap.c:722
u16 n_vectors
Definition: node.h:344
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:276
static void ip6_address_set_zero(ip6_address_t *a)
Definition: ip6_packet.h:243
static char * vxlan_error_strings[]
Definition: decap.c:737
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:85
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
#define clib_memcpy(a, b, c)
Definition: string.h:69
u32 decap_next_index
Definition: vxlan.h:86
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:345
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:455
unsigned int u32
Definition: types.h:88
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
ip6_main_t ip6_main
Definition: ip6_forward.c:2846
#define IP_BUFFER_L4_CHECKSUM_COMPUTED
Definition: buffer.h:49
uword * vtep4
Definition: vxlan.h:144
uword * vxlan6_tunnel_by_key
Definition: vxlan.h:140
u32 sw_if_index
Definition: vxlan.h:92
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:201
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:259
format_function_t format_ip6_header
Definition: format.h:98
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
Definition: defs.h:47
static uword ip6_address_is_multicast(ip6_address_t *a)
Definition: ip6_packet.h:145
u16 payload_length
Definition: ip6_packet.h:332
i64 word
Definition: types.h:111
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
Definition: ip6_forward.c:1215
unsigned char u8
Definition: types.h:56
static void vnet_update_l2_len(vlib_buffer_t *b)
Definition: l2_input.h:196
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
Definition: ip4_forward.c:1244
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
u32 encap_fib_index
Definition: vxlan.h:89
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
Definition: ip4_forward.c:1196
#define hash_get_mem(h, key)
Definition: hash.h:268
#define vnet_buffer(b)
Definition: buffer.h:294
VLIB_NODE_FUNCTION_MULTIARCH(l2t_decap_node, l2t_decap_node_fn)
static u8 * format_vxlan_rx_trace(u8 *s, va_list *args)
Definition: decap.c:32
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
ip4_main_t ip4_main
Global ip4 main structure.
Definition: ip4_forward.c:1117
u8 data[0]
Packet data.
Definition: buffer.h:152
clib_error_t * ip6_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:1236
u16 flags
Copy of main node flags.
Definition: node.h:449
ip_vxan_bypass_next_t
Definition: decap.c:791
u32 next_index
Definition: decap.c:26
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:74
static uword ip4_vxlan_bypass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: decap.c:1180
u32 * fib_index_by_sw_if_index
Definition: ip6.h:163
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
vxlan_tunnel_t * tunnels
Definition: vxlan.h:136
static uword ip_vxlan_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
Definition: decap.c:798
ip46_address_t src
Definition: vxlan.h:79
static uword vxlan6_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: decap.c:730
Definition: defs.h:46
ip6_address_t dst_address
Definition: ip6_packet.h:341
clib_error_t * ip4_vxlan_bypass_init(vlib_main_t *vm)
Definition: decap.c:1205
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)
Definition: ip6_forward.c:933