FD.io VPP  v16.06
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/vxlan/vxlan.h>
21 
22 /* Statistics (not all errors) */
23 #define foreach_vxlan_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated") \
25 _(DEL_TUNNEL, "deleted tunnel packets")
26 
27 static char * vxlan_encap_error_strings[] = {
28 #define _(sym,string) string,
30 #undef _
31 };
32 
33 typedef enum {
34 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
36 #undef _
39 
40 typedef enum {
46 
47 typedef struct {
51 
52 u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
53 {
54  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57  = va_arg (*args, vxlan_encap_trace_t *);
58 
59  s = format (s, "VXLAN-ENCAP: tunnel %d vni %d", t->tunnel_index, t->vni);
60  return s;
61 }
62 
63 
64 #define foreach_fixed_header4_offset \
65  _(0) _(1) _(2) _(3)
66 
67 #define foreach_fixed_header6_offset \
68  _(0) _(1) _(2) _(3) _(4) _(5) _(6)
69 
70 static uword
72  vlib_node_runtime_t * node,
73  vlib_frame_t * from_frame)
74 {
75  u32 n_left_from, next_index, * from, * to_next;
76  vxlan_main_t * vxm = &vxlan_main;
77  vnet_main_t * vnm = vxm->vnet_main;
79  u32 pkts_encapsulated = 0;
80  u16 old_l0 = 0, old_l1 = 0;
81  u32 cpu_index = os_get_cpu_number();
82  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
83 
84  from = vlib_frame_vector_args (from_frame);
85  n_left_from = from_frame->n_vectors;
86 
87  next_index = node->cached_next_index;
88  stats_sw_if_index = node->runtime_data[0];
89  stats_n_packets = stats_n_bytes = 0;
90 
91  while (n_left_from > 0)
92  {
93  u32 n_left_to_next;
94 
95  vlib_get_next_frame (vm, node, next_index,
96  to_next, n_left_to_next);
97 
98  while (n_left_from >= 4 && n_left_to_next >= 2)
99  {
100  u32 bi0, bi1;
101  vlib_buffer_t * b0, * b1;
102  u32 flow_hash0, flow_hash1;
105  u32 sw_if_index0, sw_if_index1, len0, len1;
106  vnet_hw_interface_t * hi0, * hi1;
107  ip4_header_t * ip4_0, * ip4_1;
108  ip6_header_t * ip6_0, * ip6_1;
109  udp_header_t * udp0, * udp1;
110  u64 * copy_src0, * copy_dst0;
111  u64 * copy_src1, * copy_dst1;
112  u32 * copy_src_last0, * copy_dst_last0;
113  u32 * copy_src_last1, * copy_dst_last1;
114  vxlan_tunnel_t * t0, * t1;
115  u16 new_l0, new_l1;
116  ip_csum_t sum0, sum1;
117  u8 is_ip4_0, is_ip4_1;
118 
119  /* Prefetch next iteration. */
120  {
121  vlib_buffer_t * p2, * p3;
122 
123  p2 = vlib_get_buffer (vm, from[2]);
124  p3 = vlib_get_buffer (vm, from[3]);
125 
126  vlib_prefetch_buffer_header (p2, LOAD);
127  vlib_prefetch_buffer_header (p3, LOAD);
128 
131  }
132 
133  bi0 = from[0];
134  bi1 = from[1];
135  to_next[0] = bi0;
136  to_next[1] = bi1;
137  from += 2;
138  to_next += 2;
139  n_left_to_next -= 2;
140  n_left_from -= 2;
141 
142  b0 = vlib_get_buffer (vm, bi0);
143  b1 = vlib_get_buffer (vm, bi1);
144 
145  flow_hash0 = vnet_l2_compute_flow_hash (b0);
146  flow_hash1 = vnet_l2_compute_flow_hash (b1);
147 
148  /* 1-wide cache? */
149  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
150  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
151  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
152  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
153 
154  t0 = &vxm->tunnels[hi0->dev_instance];
155  t1 = &vxm->tunnels[hi1->dev_instance];
156 
157  is_ip4_0 = (t0->flags & VXLAN_TUNNEL_IS_IPV4);
158  is_ip4_1 = (t1->flags & VXLAN_TUNNEL_IS_IPV4);
159 
160  if (PREDICT_FALSE(!is_ip4_0)) next0 = VXLAN_ENCAP_NEXT_IP6_LOOKUP;
161  if (PREDICT_FALSE(!is_ip4_1)) next1 = VXLAN_ENCAP_NEXT_IP6_LOOKUP;
162 
163  /* Check rewrite string and drop packet if tunnel is deleted */
166  {
167  next0 = VXLAN_ENCAP_NEXT_DROP;
168  b0->error = node->errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
169  pkts_encapsulated --;
170  } /* Still go through normal encap with dummy rewrite */
173  {
174  next1 = VXLAN_ENCAP_NEXT_DROP;
175  b1->error = node->errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
176  pkts_encapsulated --;
177  } /* Still go through normal encap with dummy rewrite */
178 
179  /* IP4 VXLAN header sizeof(ip4_vxlan_header_t) should be 36 octects */
180  /* IP6 VXLAN header sizeof(ip6_vxlan_header_t) should be 56 octects */
181  if (PREDICT_TRUE(is_ip4_0))
182  ASSERT(vec_len(t0->rewrite) == 36);
183  else
184  ASSERT(vec_len(t0->rewrite) == 56);
185  if (PREDICT_TRUE(is_ip4_1))
186  ASSERT(vec_len(t1->rewrite) == 36);
187  else
188  ASSERT(vec_len(t1->rewrite) == 56);
189 
190  /* Apply the rewrite string. $$$$ vnet_rewrite? */
191  vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
192  vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
193 
194  /* assign both v4 and v6; avoid a branch, optimizer will help us */
195  ip4_0 = vlib_buffer_get_current(b0);
196  ip6_0 = (void *)ip4_0;
197  ip4_1 = vlib_buffer_get_current(b1);
198  ip6_1 = (void *)ip4_1;
199 
200  /* Copy the fixed header (v4 and v6 variables point to the same
201  * place at this point)
202  */
203  copy_dst0 = (u64 *) ip4_0;
204  copy_src0 = (u64 *) t0->rewrite;
205 
206  copy_dst1 = (u64 *) ip4_1;
207  copy_src1 = (u64 *) t1->rewrite;
208 
209  /* Copy first 32 (ip4)/56 (ip6) octets 8-bytes at a time */
210 #define _(offs) copy_dst0[offs] = copy_src0[offs];
211  if (PREDICT_TRUE(is_ip4_0)) {
213  } else {
215  }
216 #undef _
217 #define _(offs) copy_dst1[offs] = copy_src1[offs];
218  if (PREDICT_TRUE(is_ip4_1)) {
220  } else {
222  }
223 #undef _
224  /* Last 4 octets. Hopefully gcc will be our friend */
225  if (PREDICT_TRUE(is_ip4_0)) {
226  copy_dst_last0 = (u32 *)(&copy_dst0[4]);
227  copy_src_last0 = (u32 *)(&copy_src0[4]);
228  copy_dst_last0[0] = copy_src_last0[0];
229  }
230  if (PREDICT_TRUE(is_ip4_1)) {
231  copy_dst_last1 = (u32 *)(&copy_dst1[4]);
232  copy_src_last1 = (u32 *)(&copy_src1[4]);
233  copy_dst_last1[0] = copy_src_last1[0];
234  }
235 
236  if (PREDICT_TRUE(is_ip4_0)) {
237  /* fix the <bleep>ing outer-IP checksum */
238  sum0 = ip4_0->checksum;
239 
240  /* old_l0 always 0, see the rewrite setup */
241  new_l0 =
242  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
243  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
244  length /* changed member */);
245  ip4_0->checksum = ip_csum_fold (sum0);
246  ip4_0->length = new_l0;
247  } else {
248  new_l0 =
249  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
250  - sizeof(*ip6_0));
251  ip6_0->payload_length = new_l0;
252  }
253 
254  if (PREDICT_TRUE(is_ip4_1)) {
255  /* fix the <bleep>ing outer-IP checksum */
256  sum1 = ip4_1->checksum;
257 
258  /* old_l1 always 0, see the rewrite setup */
259  new_l1 =
260  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
261  sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
262  length /* changed member */);
263  ip4_1->checksum = ip_csum_fold (sum1);
264  ip4_1->length = new_l1;
265  } else {
266  new_l1 =
267  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
268  - sizeof(*ip6_1));
269  ip6_1->payload_length = new_l1;
270  }
271 
272  /* Fix UDP length */
273  if (PREDICT_TRUE(is_ip4_0)) {
274  udp0 = (udp_header_t *)(ip4_0+1);
275  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
276  - sizeof (*ip4_0));
277  } else {
278  udp0 = (udp_header_t *)(ip6_0+1);
279  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
280  - sizeof (*ip6_0));
281  }
282  if (PREDICT_TRUE(is_ip4_1)) {
283  udp1 = (udp_header_t *)(ip4_1+1);
284  new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
285  - sizeof (*ip4_1));
286  } else {
287  udp1 = (udp_header_t *)(ip6_1+1);
288  new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
289  - sizeof (*ip6_1));
290  }
291 
292  udp0->length = new_l0;
293  udp0->src_port = flow_hash0;
294 
295  udp1->length = new_l1;
296  udp1->src_port = flow_hash1;
297 
298  if (PREDICT_FALSE(!is_ip4_0)) {
299  int bogus = 0;
300  /* IPv6 UDP checksum is mandatory */
302  ip6_0, &bogus);
303  ASSERT(bogus == 0);
304  if (udp0->checksum == 0)
305  udp0->checksum = 0xffff;
306  }
307 
308  if (PREDICT_FALSE(!is_ip4_1)) {
309  int bogus = 0;
310  /* IPv6 UDP checksum is mandatory */
312  ip6_1, &bogus);
313  ASSERT(bogus == 0);
314  if (udp1->checksum == 0)
315  udp1->checksum = 0xffff;
316  }
317 
318  /* Reset to look up tunnel partner in the configured FIB */
319  vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
320  vnet_buffer(b1)->sw_if_index[VLIB_TX] = t1->encap_fib_index;
321  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
322  vnet_buffer(b1)->sw_if_index[VLIB_RX] = sw_if_index1;
323  pkts_encapsulated += 2;
324 
325  len0 = vlib_buffer_length_in_chain (vm, b0);
326  len1 = vlib_buffer_length_in_chain (vm, b0);
327  stats_n_packets += 2;
328  stats_n_bytes += len0 + len1;
329 
330  /* Batch stats increment on the same vxlan tunnel so counter is not
331  incremented per packet. Note stats are still incremented for deleted
332  and admin-down tunnel where packets are dropped. It is not worthwhile
333  to check for this rare case and affect normal path performance. */
334  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
335  (sw_if_index1 != stats_sw_if_index)))
336  {
337  stats_n_packets -= 2;
338  stats_n_bytes -= len0 + len1;
339  if (sw_if_index0 == sw_if_index1)
340  {
341  if (stats_n_packets)
344  cpu_index, stats_sw_if_index,
345  stats_n_packets, stats_n_bytes);
346  stats_sw_if_index = sw_if_index0;
347  stats_n_packets = 2;
348  stats_n_bytes = len0 + len1;
349  }
350  else
351  {
354  cpu_index, sw_if_index0, 1, len0);
357  cpu_index, sw_if_index1, 1, len1);
358  }
359  }
360 
362  {
363  vxlan_encap_trace_t *tr =
364  vlib_add_trace (vm, node, b0, sizeof (*tr));
365  tr->tunnel_index = t0 - vxm->tunnels;
366  tr->vni = t0->vni;
367  }
368 
370  {
371  vxlan_encap_trace_t *tr =
372  vlib_add_trace (vm, node, b1, sizeof (*tr));
373  tr->tunnel_index = t1 - vxm->tunnels;
374  tr->vni = t1->vni;
375  }
376 
377  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
378  to_next, n_left_to_next,
379  bi0, bi1, next0, next1);
380  }
381 
382  while (n_left_from > 0 && n_left_to_next > 0)
383  {
384  u32 bi0;
385  vlib_buffer_t * b0;
386  u32 flow_hash0;
388  u32 sw_if_index0, len0;
389  vnet_hw_interface_t * hi0;
390  ip4_header_t * ip4_0;
391  ip6_header_t * ip6_0;
392  udp_header_t * udp0;
393  u64 * copy_src0, * copy_dst0;
394  u32 * copy_src_last0, * copy_dst_last0;
395  vxlan_tunnel_t * t0;
396  u16 new_l0;
397  ip_csum_t sum0;
398  u8 is_ip4_0;
399 
400  bi0 = from[0];
401  to_next[0] = bi0;
402  from += 1;
403  to_next += 1;
404  n_left_from -= 1;
405  n_left_to_next -= 1;
406 
407  b0 = vlib_get_buffer (vm, bi0);
408 
409  flow_hash0 = vnet_l2_compute_flow_hash(b0);
410 
411  /* 1-wide cache? */
412  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
413  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
414 
415  t0 = &vxm->tunnels[hi0->dev_instance];
416 
417  is_ip4_0 = (t0->flags & VXLAN_TUNNEL_IS_IPV4);
418 
419  if (PREDICT_FALSE(!is_ip4_0)) next0 = VXLAN_ENCAP_NEXT_IP6_LOOKUP;
420 
421  /* Check rewrite string and drop packet if tunnel is deleted */
424  {
425  next0 = VXLAN_ENCAP_NEXT_DROP;
426  b0->error = node->errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
427  pkts_encapsulated --;
428  } /* Still go through normal encap with dummy rewrite */
429 
430 
431  /* IP4 VXLAN header sizeof(ip4_vxlan_header_t) should be 36 octets */
432  /* IP6 VXLAN header sizeof(ip4_vxlan_header_t) should be 56 octets */
433  if (PREDICT_TRUE(is_ip4_0))
434  ASSERT(vec_len(t0->rewrite) == 36);
435  else
436  ASSERT(vec_len(t0->rewrite) == 56);
437 
438  /* Apply the rewrite string. $$$$ vnet_rewrite? */
439  vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
440 
441  /* assign both v4 and v6; avoid a branch, optimizer will help us */
442  ip4_0 = vlib_buffer_get_current(b0);
443  ip6_0 = (void *)ip4_0;
444 
445  /* Copy the fixed header (v4 and v6 variables point to the same
446  * place at this point)
447  */
448  copy_dst0 = (u64 *) ip4_0;
449  copy_src0 = (u64 *) t0->rewrite;
450 
451  /* Copy first 32 octets 8-bytes at a time */
452 #define _(offs) copy_dst0[offs] = copy_src0[offs];
453  if (PREDICT_TRUE(is_ip4_0)) {
455  } else {
457  }
458 #undef _
459  if (PREDICT_TRUE(is_ip4_0)) {
460  /* Last 4 octets. Hopefully gcc will be our friend */
461  copy_dst_last0 = (u32 *)(&copy_dst0[4]);
462  copy_src_last0 = (u32 *)(&copy_src0[4]);
463 
464  copy_dst_last0[0] = copy_src_last0[0];
465  }
466 
467  if (PREDICT_TRUE(is_ip4_0)) {
468  /* fix the <bleep>ing outer-IP checksum */
469  sum0 = ip4_0->checksum;
470 
471  /* old_l0 always 0, see the rewrite setup */
472  new_l0 =
473  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
474  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
475  length /* changed member */);
476  ip4_0->checksum = ip_csum_fold (sum0);
477  ip4_0->length = new_l0;
478  } else {
479  new_l0 =
480  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
481  - sizeof(*ip6_0));
482  ip6_0->payload_length = new_l0;
483  }
484 
485  /* Fix UDP length */
486  if (PREDICT_TRUE(is_ip4_0)) {
487  udp0 = (udp_header_t *)(ip4_0+1);
488  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
489  - sizeof (*ip4_0));
490  } else {
491  udp0 = (udp_header_t *)(ip6_0+1);
492  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
493  - sizeof (*ip6_0));
494  }
495 
496  udp0->length = new_l0;
497  udp0->src_port = flow_hash0;
498 
499  if (PREDICT_FALSE(!is_ip4_0)) {
500  int bogus = 0;
501  /* IPv6 UDP checksum is mandatory */
503  ip6_0, &bogus);
504  ASSERT(bogus == 0);
505  if (udp0->checksum == 0)
506  udp0->checksum = 0xffff;
507  }
508 
509 
510  /* vnet_update_l2_len (b0); do we need this? cluke */
511 
512  /* Reset to look up tunnel partner in the configured FIB */
513  vnet_buffer(b0)->sw_if_index[VLIB_TX] = t0->encap_fib_index;
514  vnet_buffer(b0)->sw_if_index[VLIB_RX] = sw_if_index0;
515  pkts_encapsulated ++;
516 
517  len0 = vlib_buffer_length_in_chain (vm, b0);
518  stats_n_packets += 1;
519  stats_n_bytes += len0;
520 
521  /* Batch stats increment on the same vxlan tunnel so counter is not
522  incremented per packet. Note stats are still incremented for deleted
523  and admin-down tunnel where packets are dropped. It is not worthwhile
524  to check for this rare case and affect normal path performance. */
525  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
526  {
527  stats_n_packets -= 1;
528  stats_n_bytes -= len0;
529  if (stats_n_packets)
532  cpu_index, stats_sw_if_index,
533  stats_n_packets, stats_n_bytes);
534  stats_n_packets = 1;
535  stats_n_bytes = len0;
536  stats_sw_if_index = sw_if_index0;
537  }
538 
540  {
541  vxlan_encap_trace_t *tr =
542  vlib_add_trace (vm, node, b0, sizeof (*tr));
543  tr->tunnel_index = t0 - vxm->tunnels;
544  tr->vni = t0->vni;
545  }
546  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
547  to_next, n_left_to_next,
548  bi0, next0);
549  }
550 
551  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
552  }
553 
554  /* Do we still need this now that tunnel tx stats is kept? */
556  VXLAN_ENCAP_ERROR_ENCAPSULATED,
557  pkts_encapsulated);
558 
559  /* Increment any remaining batch stats */
560  if (stats_n_packets)
561  {
564  cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
565  node->runtime_data[0] = stats_sw_if_index;
566  }
567 
568  return from_frame->n_vectors;
569 }
570 
572  .function = vxlan_encap,
573  .name = "vxlan-encap",
574  .vector_size = sizeof (u32),
575  .format_trace = format_vxlan_encap_trace,
577 
579  .error_strings = vxlan_encap_error_strings,
580 
581  .n_next_nodes = VXLAN_ENCAP_N_NEXT,
582 
583  .next_nodes = {
584  [VXLAN_ENCAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
585  [VXLAN_ENCAP_NEXT_IP6_LOOKUP] = "ip6-lookup",
586  [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
587  },
588 };
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
#define CLIB_UNUSED(x)
Definition: clib.h:79
static uword vxlan_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:71
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
vnet_interface_main_t interface_main
Definition: vnet.h:62
#define foreach_vxlan_encap_error
Definition: encap.c:23
#define PREDICT_TRUE(x)
Definition: clib.h:98
vxlan_encap_error_t
Definition: encap.c:33
uword ip_csum_t
Definition: ip_packet.h:86
vlib_error_t * errors
Definition: node.h:378
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
#define vxlan4_dummy_rewrite
Definition: vxlan.h:131
vnet_main_t * vnet_main
Definition: vxlan.h:141
static char * vxlan_encap_error_strings[]
Definition: encap.c:27
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:458
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Definition: counter.h:210
always_inline uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:112
unsigned long u64
Definition: types.h:89
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
always_inline u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:138
u8 * rewrite
Definition: vxlan.h:67
#define foreach_fixed_header4_offset
Definition: encap.c:64
static u32 vnet_l2_compute_flow_hash(vlib_buffer_t *b)
Definition: l2_input.h:266
uword os_get_cpu_number(void)
Definition: unix-misc.c:206
vxlan_main_t vxlan_main
Definition: vxlan.c:18
#define PREDICT_FALSE(x)
Definition: clib.h:97
always_inline void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:970
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:129
u8 * format_vxlan_encap_trace(u8 *s, va_list *args)
Definition: encap.c:52
#define vxlan6_dummy_rewrite
Definition: vxlan.h:134
u16 encap_fib_index
Definition: vxlan.h:80
u16 n_vectors
Definition: node.h:307
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
always_inline vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define ARRAY_LEN(x)
Definition: clib.h:59
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1544
u16 cached_next_index
Definition: node.h:422
#define foreach_fixed_header6_offset
Definition: encap.c:67
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:300
u8 * format(u8 *s, char *fmt,...)
Definition: format.c:405
vlib_node_registration_t vxlan_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan_encap_node)
Definition: encap.c:571
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:91
u64 uword
Definition: types.h:112
Definition: defs.h:46
unsigned short u16
Definition: types.h:57
u16 payload_length
Definition: ip6_packet.h:284
i64 word
Definition: types.h:111
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:197
unsigned char u8
Definition: types.h:56
u32 flags
Definition: vxlan.h:92
#define VXLAN_TUNNEL_IS_IPV4
Definition: vxlan.h:96
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:133
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
vxlan_encap_next_t
Definition: encap.c:40
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:162
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
u8 data[0]
Packet data.
Definition: buffer.h:150
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:84
uword runtime_data[(128-1 *sizeof(vlib_node_function_t *)-1 *sizeof(vlib_error_t *)-11 *sizeof(u32)-5 *sizeof(u16))/sizeof(uword)]
Definition: node.h:432
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
vxlan_tunnel_t * tunnels
Definition: vxlan.h:120
Definition: defs.h:45