FD.io VPP  v17.07.01-10-g3be13f0
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/vxlan/vxlan.h>
21 
22 /* Statistics (not all errors) */
23 #define foreach_vxlan_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
25 
26 static char * vxlan_encap_error_strings[] = {
27 #define _(sym,string) string,
29 #undef _
30 };
31 
32 typedef enum {
33 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym,
35 #undef _
38 
39 typedef enum {
43 
44 typedef struct {
48 
49 u8 * format_vxlan_encap_trace (u8 * s, va_list * args)
50 {
51  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
54  = va_arg (*args, vxlan_encap_trace_t *);
55 
56  s = format (s, "VXLAN encap to vxlan_tunnel%d vni %d",
57  t->tunnel_index, t->vni);
58  return s;
59 }
60 
61 
62 #define foreach_fixed_header4_offset \
63  _(0) _(1) _(2) _(3)
64 
65 #define foreach_fixed_header6_offset \
66  _(0) _(1) _(2) _(3) _(4) _(5) _(6)
67 
70  vlib_node_runtime_t * node,
71  vlib_frame_t * from_frame,
72  u32 is_ip4)
73 {
74  u32 n_left_from, next_index, * from, * to_next;
75  vxlan_main_t * vxm = &vxlan_main;
76  vnet_main_t * vnm = vxm->vnet_main;
78  u32 pkts_encapsulated = 0;
79  u16 old_l0 = 0, old_l1 = 0;
80  u32 thread_index = vlib_get_thread_index();
81  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
82  u32 sw_if_index0 = 0, sw_if_index1 = 0;
83  u32 next0 = 0, next1 = 0;
84  vnet_hw_interface_t * hi0, * hi1;
85  vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
86 
87  from = vlib_frame_vector_args (from_frame);
88  n_left_from = from_frame->n_vectors;
89 
90  next_index = node->cached_next_index;
91  stats_sw_if_index = node->runtime_data[0];
92  stats_n_packets = stats_n_bytes = 0;
93 
94  while (n_left_from > 0)
95  {
96  u32 n_left_to_next;
97 
98  vlib_get_next_frame (vm, node, next_index,
99  to_next, n_left_to_next);
100 
101  while (n_left_from >= 4 && n_left_to_next >= 2)
102  {
103  u32 bi0, bi1;
104  vlib_buffer_t * b0, * b1;
105  u32 flow_hash0, flow_hash1;
106  u32 len0, len1;
107  ip4_header_t * ip4_0, * ip4_1;
108  ip6_header_t * ip6_0, * ip6_1;
109  udp_header_t * udp0, * udp1;
110  u64 * copy_src0, * copy_dst0;
111  u64 * copy_src1, * copy_dst1;
112  u32 * copy_src_last0, * copy_dst_last0;
113  u32 * copy_src_last1, * copy_dst_last1;
114  u16 new_l0, new_l1;
115  ip_csum_t sum0, sum1;
116 
117  /* Prefetch next iteration. */
118  {
119  vlib_buffer_t * p2, * p3;
120 
121  p2 = vlib_get_buffer (vm, from[2]);
122  p3 = vlib_get_buffer (vm, from[3]);
123 
124  vlib_prefetch_buffer_header (p2, LOAD);
125  vlib_prefetch_buffer_header (p3, LOAD);
126 
129  }
130 
131  bi0 = from[0];
132  bi1 = from[1];
133  to_next[0] = bi0;
134  to_next[1] = bi1;
135  from += 2;
136  to_next += 2;
137  n_left_to_next -= 2;
138  n_left_from -= 2;
139 
140  b0 = vlib_get_buffer (vm, bi0);
141  b1 = vlib_get_buffer (vm, bi1);
142 
143  flow_hash0 = vnet_l2_compute_flow_hash (b0);
144  flow_hash1 = vnet_l2_compute_flow_hash (b1);
145 
146  /* Get next node index and adj index from tunnel next_dpo */
147  if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
148  {
149  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
150  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
151  t0 = &vxm->tunnels[hi0->dev_instance];
152  /* Note: change to always set next0 if it may be set to drop */
153  next0 = t0->next_dpo.dpoi_next_node;
154  }
155  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
156 
157  /* Get next node index and adj index from tunnel next_dpo */
158  if (sw_if_index1 != vnet_buffer(b1)->sw_if_index[VLIB_TX])
159  {
160  sw_if_index1 = vnet_buffer(b1)->sw_if_index[VLIB_TX];
161  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
162  t1 = &vxm->tunnels[hi1->dev_instance];
163  /* Note: change to always set next1 if it may be set to drop */
164  next1 = t1->next_dpo.dpoi_next_node;
165  }
166  vnet_buffer(b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
167 
168  /* Apply the rewrite string. $$$$ vnet_rewrite? */
169  vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
170  vlib_buffer_advance (b1, -(word)_vec_len(t1->rewrite));
171 
172  if (is_ip4)
173  {
174  /* IP4 VXLAN header should be 36 octects */
175  ASSERT(sizeof(ip4_vxlan_header_t) == 36);
176  ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
177  ASSERT(vec_len(t1->rewrite) == sizeof(ip4_vxlan_header_t));
178 
179  ip4_0 = vlib_buffer_get_current(b0);
180  ip4_1 = vlib_buffer_get_current(b1);
181 
182  /* Copy the fixed header */
183  copy_dst0 = (u64 *) ip4_0;
184  copy_src0 = (u64 *) t0->rewrite;
185  copy_dst1 = (u64 *) ip4_1;
186  copy_src1 = (u64 *) t1->rewrite;
187  /* Copy first 32 octets 8-bytes at a time */
188 #define _(offs) copy_dst0[offs] = copy_src0[offs];
190 #undef _
191 #define _(offs) copy_dst1[offs] = copy_src1[offs];
193 #undef _
194  /* Last 4 octets. Hopefully gcc will be our friend */
195  copy_dst_last0 = (u32 *)(&copy_dst0[4]);
196  copy_src_last0 = (u32 *)(&copy_src0[4]);
197  copy_dst_last0[0] = copy_src_last0[0];
198  copy_dst_last1 = (u32 *)(&copy_dst1[4]);
199  copy_src_last1 = (u32 *)(&copy_src1[4]);
200  copy_dst_last1[0] = copy_src_last1[0];
201 
202  /* Fix the IP4 checksum and length */
203  sum0 = ip4_0->checksum;
204  new_l0 = /* old_l0 always 0, see the rewrite setup */
205  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
206  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
207  length /* changed member */);
208  ip4_0->checksum = ip_csum_fold (sum0);
209  ip4_0->length = new_l0;
210  sum1 = ip4_1->checksum;
211  new_l1 = /* old_l1 always 0, see the rewrite setup */
212  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
213  sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
214  length /* changed member */);
215  ip4_1->checksum = ip_csum_fold (sum1);
216  ip4_1->length = new_l1;
217 
218  /* Fix UDP length and set source port */
219  udp0 = (udp_header_t *)(ip4_0+1);
220  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
221  - sizeof (*ip4_0));
222  udp0->length = new_l0;
223  udp0->src_port = flow_hash0;
224  udp1 = (udp_header_t *)(ip4_1+1);
225  new_l1 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b1)
226  - sizeof (*ip4_1));
227  udp1->length = new_l1;
228  udp1->src_port = flow_hash1;
229  }
230  else /* ipv6 */
231  {
232  int bogus = 0;
233 
234  /* IP6 VXLAN header should be 56 octects */
235  ASSERT(sizeof(ip6_vxlan_header_t) == 56);
236  ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
237  ASSERT(vec_len(t1->rewrite) == sizeof(ip6_vxlan_header_t));
238  ip6_0 = vlib_buffer_get_current(b0);
239  ip6_1 = vlib_buffer_get_current(b1);
240 
241  /* Copy the fixed header */
242  copy_dst0 = (u64 *) ip6_0;
243  copy_src0 = (u64 *) t0->rewrite;
244  copy_dst1 = (u64 *) ip6_1;
245  copy_src1 = (u64 *) t1->rewrite;
246  /* Copy first 56 (ip6) octets 8-bytes at a time */
247 #define _(offs) copy_dst0[offs] = copy_src0[offs];
249 #undef _
250 #define _(offs) copy_dst1[offs] = copy_src1[offs];
252 #undef _
253  /* Fix IP6 payload length */
254  new_l0 =
255  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
256  - sizeof(*ip6_0));
257  ip6_0->payload_length = new_l0;
258  new_l1 =
259  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
260  - sizeof(*ip6_1));
261  ip6_1->payload_length = new_l1;
262 
263  /* Fix UDP length and set source port */
264  udp0 = (udp_header_t *)(ip6_0+1);
265  udp0->length = new_l0;
266  udp0->src_port = flow_hash0;
267  udp1 = (udp_header_t *)(ip6_1+1);
268  udp1->length = new_l1;
269  udp1->src_port = flow_hash1;
270 
271  /* IPv6 UDP checksum is mandatory */
273  ip6_0, &bogus);
274  ASSERT(bogus == 0);
275  if (udp0->checksum == 0)
276  udp0->checksum = 0xffff;
278  ip6_1, &bogus);
279  ASSERT(bogus == 0);
280  if (udp1->checksum == 0)
281  udp1->checksum = 0xffff;
282  }
283 
284  pkts_encapsulated += 2;
285  len0 = vlib_buffer_length_in_chain (vm, b0);
286  len1 = vlib_buffer_length_in_chain (vm, b1);
287  stats_n_packets += 2;
288  stats_n_bytes += len0 + len1;
289 
290  /* Batch stats increment on the same vxlan tunnel so counter is not
291  incremented per packet. Note stats are still incremented for deleted
292  and admin-down tunnel where packets are dropped. It is not worthwhile
293  to check for this rare case and affect normal path performance. */
294  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
295  (sw_if_index1 != stats_sw_if_index)))
296  {
297  stats_n_packets -= 2;
298  stats_n_bytes -= len0 + len1;
299  if (sw_if_index0 == sw_if_index1)
300  {
301  if (stats_n_packets)
304  thread_index, stats_sw_if_index,
305  stats_n_packets, stats_n_bytes);
306  stats_sw_if_index = sw_if_index0;
307  stats_n_packets = 2;
308  stats_n_bytes = len0 + len1;
309  }
310  else
311  {
314  thread_index, sw_if_index0, 1, len0);
317  thread_index, sw_if_index1, 1, len1);
318  }
319  }
320 
322  {
323  vxlan_encap_trace_t *tr =
324  vlib_add_trace (vm, node, b0, sizeof (*tr));
325  tr->tunnel_index = t0 - vxm->tunnels;
326  tr->vni = t0->vni;
327  }
328 
330  {
331  vxlan_encap_trace_t *tr =
332  vlib_add_trace (vm, node, b1, sizeof (*tr));
333  tr->tunnel_index = t1 - vxm->tunnels;
334  tr->vni = t1->vni;
335  }
336 
337  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
338  to_next, n_left_to_next,
339  bi0, bi1, next0, next1);
340  }
341 
342  while (n_left_from > 0 && n_left_to_next > 0)
343  {
344  u32 bi0;
345  vlib_buffer_t * b0;
346  u32 flow_hash0;
347  u32 len0;
348  ip4_header_t * ip4_0;
349  ip6_header_t * ip6_0;
350  udp_header_t * udp0;
351  u64 * copy_src0, * copy_dst0;
352  u32 * copy_src_last0, * copy_dst_last0;
353  u16 new_l0;
354  ip_csum_t sum0;
355 
356  bi0 = from[0];
357  to_next[0] = bi0;
358  from += 1;
359  to_next += 1;
360  n_left_from -= 1;
361  n_left_to_next -= 1;
362 
363  b0 = vlib_get_buffer (vm, bi0);
364 
365  flow_hash0 = vnet_l2_compute_flow_hash(b0);
366 
367  /* Get next node index and adj index from tunnel next_dpo */
368  if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
369  {
370  sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
371  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
372  t0 = &vxm->tunnels[hi0->dev_instance];
373  /* Note: change to always set next0 if it may be set to drop */
374  next0 = t0->next_dpo.dpoi_next_node;
375  }
376  vnet_buffer(b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
377 
378  /* Apply the rewrite string. $$$$ vnet_rewrite? */
379  vlib_buffer_advance (b0, -(word)_vec_len(t0->rewrite));
380 
381  if (is_ip4)
382  {
383  /* IP4 VXLAN header should be 36 octects */
384  ASSERT(sizeof(ip4_vxlan_header_t) == 36);
385  ASSERT(vec_len(t0->rewrite) == sizeof(ip4_vxlan_header_t));
386  ip4_0 = vlib_buffer_get_current(b0);
387 
388  /* Copy the fixed header */
389  copy_dst0 = (u64 *) ip4_0;
390  copy_src0 = (u64 *) t0->rewrite;
391  /* Copy first 32 octets 8-bytes at a time */
392 #define _(offs) copy_dst0[offs] = copy_src0[offs];
394 #undef _
395  /* Last 4 octets. Hopefully gcc will be our friend */
396  copy_dst_last0 = (u32 *)(&copy_dst0[4]);
397  copy_src_last0 = (u32 *)(&copy_src0[4]);
398  copy_dst_last0[0] = copy_src_last0[0];
399 
400  /* Fix the IP4 checksum and length */
401  sum0 = ip4_0->checksum;
402  new_l0 = /* old_l0 always 0, see the rewrite setup */
403  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
404  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
405  length /* changed member */);
406  ip4_0->checksum = ip_csum_fold (sum0);
407  ip4_0->length = new_l0;
408 
409  /* Fix UDP length and set source port */
410  udp0 = (udp_header_t *)(ip4_0+1);
411  new_l0 = clib_host_to_net_u16 (vlib_buffer_length_in_chain(vm, b0)
412  - sizeof (*ip4_0));
413  udp0->length = new_l0;
414  udp0->src_port = flow_hash0;
415  }
416 
417  else /* ip6 path */
418  {
419  int bogus = 0;
420 
421  /* IP6 VXLAN header should be 56 octects */
422  ASSERT(sizeof(ip6_vxlan_header_t) == 56);
423  ASSERT(vec_len(t0->rewrite) == sizeof(ip6_vxlan_header_t));
424  ip6_0 = vlib_buffer_get_current(b0);
425  /* Copy the fixed header */
426  copy_dst0 = (u64 *) ip6_0;
427  copy_src0 = (u64 *) t0->rewrite;
428  /* Copy first 56 (ip6) octets 8-bytes at a time */
429 #define _(offs) copy_dst0[offs] = copy_src0[offs];
431 #undef _
432  /* Fix IP6 payload length */
433  new_l0 =
434  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
435  - sizeof(*ip6_0));
436  ip6_0->payload_length = new_l0;
437 
438  /* Fix UDP length and set source port */
439  udp0 = (udp_header_t *)(ip6_0+1);
440  udp0->length = new_l0;
441  udp0->src_port = flow_hash0;
442 
443  /* IPv6 UDP checksum is mandatory */
445  ip6_0, &bogus);
446  ASSERT(bogus == 0);
447  if (udp0->checksum == 0)
448  udp0->checksum = 0xffff;
449  }
450 
451  pkts_encapsulated ++;
452  len0 = vlib_buffer_length_in_chain (vm, b0);
453  stats_n_packets += 1;
454  stats_n_bytes += len0;
455 
456  /* Batch stats increment on the same vxlan tunnel so counter is not
457  incremented per packet. Note stats are still incremented for deleted
458  and admin-down tunnel where packets are dropped. It is not worthwhile
459  to check for this rare case and affect normal path performance. */
460  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
461  {
462  stats_n_packets -= 1;
463  stats_n_bytes -= len0;
464  if (stats_n_packets)
467  thread_index, stats_sw_if_index,
468  stats_n_packets, stats_n_bytes);
469  stats_n_packets = 1;
470  stats_n_bytes = len0;
471  stats_sw_if_index = sw_if_index0;
472  }
473 
475  {
476  vxlan_encap_trace_t *tr =
477  vlib_add_trace (vm, node, b0, sizeof (*tr));
478  tr->tunnel_index = t0 - vxm->tunnels;
479  tr->vni = t0->vni;
480  }
481  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
482  to_next, n_left_to_next,
483  bi0, next0);
484  }
485 
486  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
487  }
488 
489  /* Do we still need this now that tunnel tx stats is kept? */
491  VXLAN_ENCAP_ERROR_ENCAPSULATED,
492  pkts_encapsulated);
493 
494  /* Increment any remaining batch stats */
495  if (stats_n_packets)
496  {
499  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
500  node->runtime_data[0] = stats_sw_if_index;
501  }
502 
503  return from_frame->n_vectors;
504 }
505 
506 static uword
508  vlib_node_runtime_t * node,
509  vlib_frame_t * from_frame)
510 {
511  return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
512 }
513 
514 static uword
516  vlib_node_runtime_t * node,
517  vlib_frame_t * from_frame)
518 {
519  return vxlan_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
520 }
521 
523  .function = vxlan4_encap,
524  .name = "vxlan4-encap",
525  .vector_size = sizeof (u32),
526  .format_trace = format_vxlan_encap_trace,
527  .type = VLIB_NODE_TYPE_INTERNAL,
529  .error_strings = vxlan_encap_error_strings,
530  .n_next_nodes = VXLAN_ENCAP_N_NEXT,
531  .next_nodes = {
532  [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
533  },
534 };
535 
537 
539  .function = vxlan6_encap,
540  .name = "vxlan6-encap",
541  .vector_size = sizeof (u32),
542  .format_trace = format_vxlan_encap_trace,
543  .type = VLIB_NODE_TYPE_INTERNAL,
545  .error_strings = vxlan_encap_error_strings,
546  .n_next_nodes = VXLAN_ENCAP_N_NEXT,
547  .next_nodes = {
548  [VXLAN_ENCAP_NEXT_DROP] = "error-drop",
549  },
550 };
551 
553 
#define CLIB_UNUSED(x)
Definition: clib.h:79
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:468
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define foreach_vxlan_encap_error
Definition: encap.c:23
vxlan_encap_error_t
Definition: encap.c:32
#define NULL
Definition: clib.h:55
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
uword ip_csum_t
Definition: ip_packet.h:90
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
VLIB_NODE_FUNCTION_MULTIARCH(l2t_encap_node, l2t_encap_node_fn)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:100
vnet_main_t * vnet_main
Definition: vxlan.h:158
static char * vxlan_encap_error_strings[]
Definition: encap.c:26
#define always_inline
Definition: clib.h:84
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:653
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:164
unsigned long u64
Definition: types.h:89
u8 * rewrite
Definition: vxlan.h:70
#define foreach_fixed_header4_offset
Definition: encap.c:62
static u32 vnet_l2_compute_flow_hash(vlib_buffer_t *b)
Definition: l2_input.h:253
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:188
vxlan_main_t vxlan_main
Definition: vxlan.c:41
#define PREDICT_FALSE(x)
Definition: clib.h:97
u32 node_index
Node index.
Definition: node.h:441
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:366
u8 * format_vxlan_encap_trace(u8 *s, va_list *args)
Definition: encap.c:49
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1131
u16 n_vectors
Definition: node.h:345
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:185
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:85
#define ARRAY_LEN(x)
Definition: clib.h:59
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1195
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:460
#define foreach_fixed_header6_offset
Definition: encap.c:65
#define ASSERT(truth)
unsigned int u32
Definition: types.h:88
static uword vxlan_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: encap.c:69
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:201
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
u16 payload_length
Definition: ip6_packet.h:332
i64 word
Definition: types.h:111
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:168
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
static uword vxlan6_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:515
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:269
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:139
vxlan_encap_next_t
Definition: encap.c:39
#define vnet_buffer(b)
Definition: buffer.h:304
vlib_node_registration_t vxlan4_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan4_encap_node)
Definition: encap.c:522
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:144
u8 data[0]
Packet data.
Definition: buffer.h:152
vlib_node_registration_t vxlan6_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan6_encap_node)
Definition: encap.c:538
dpo_id_t next_dpo
Definition: vxlan.h:73
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:164
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:74
static uword vxlan4_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:507
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
vxlan_tunnel_t * tunnels
Definition: vxlan.h:136
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:145