FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 SUSE LLC.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/geneve/geneve.h>
21 
22 /* Statistics (not all errors) */
23 #define foreach_geneve_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
25 
26 static char *geneve_encap_error_strings[] = {
27 #define _(sym,string) string,
29 #undef _
30 };
31 
32 typedef enum
33 {
34 #define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
36 #undef _
39 
40 typedef enum
41 {
45 
46 typedef struct
47 {
51 
52 u8 *
53 format_geneve_encap_trace (u8 * s, va_list * args)
54 {
55  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57  geneve_encap_trace_t *t = va_arg (*args, geneve_encap_trace_t *);
58 
59  s = format (s, "GENEVE encap to geneve_tunnel%d vni %d",
60  t->tunnel_index, t->vni);
61  return s;
62 }
63 
64 
65 #define foreach_fixed_header4_offset \
66  _(0) _(1) _(2) _(3)
67 
68 #define foreach_fixed_header6_offset \
69  _(0) _(1) _(2) _(3) _(4) _(5) _(6)
70 
73  vlib_node_runtime_t * node,
74  vlib_frame_t * from_frame, u32 is_ip4)
75 {
76  u32 n_left_from, next_index, *from, *to_next;
77  geneve_main_t *vxm = &geneve_main;
78  vnet_main_t *vnm = vxm->vnet_main;
80  u32 pkts_encapsulated = 0;
81  u16 old_l0 = 0, old_l1 = 0;
82  u32 thread_index = vm->thread_index;
83  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
84  u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
85  u32 next0 = 0, next1 = 0;
86  vnet_hw_interface_t *hi0, *hi1;
87  geneve_tunnel_t *t0 = NULL, *t1 = NULL;
88 
89  from = vlib_frame_vector_args (from_frame);
90  n_left_from = from_frame->n_vectors;
91 
92  next_index = node->cached_next_index;
93  stats_sw_if_index = node->runtime_data[0];
94  stats_n_packets = stats_n_bytes = 0;
95 
96  while (n_left_from > 0)
97  {
98  u32 n_left_to_next;
99 
100  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
101 
102  while (n_left_from >= 4 && n_left_to_next >= 2)
103  {
104  u32 bi0, bi1;
105  vlib_buffer_t *b0, *b1;
106  u32 flow_hash0, flow_hash1;
107  u32 len0, len1;
108  ip4_header_t *ip4_0, *ip4_1;
109  ip6_header_t *ip6_0, *ip6_1;
110  udp_header_t *udp0, *udp1;
111  u64 *copy_src0, *copy_dst0;
112  u64 *copy_src1, *copy_dst1;
113  u32 *copy_src_last0, *copy_dst_last0;
114  u32 *copy_src_last1, *copy_dst_last1;
115  u16 new_l0, new_l1;
116  ip_csum_t sum0, sum1;
117 
118  /* Prefetch next iteration. */
119  {
120  vlib_buffer_t *p2, *p3;
121 
122  p2 = vlib_get_buffer (vm, from[2]);
123  p3 = vlib_get_buffer (vm, from[3]);
124 
125  vlib_prefetch_buffer_header (p2, LOAD);
126  vlib_prefetch_buffer_header (p3, LOAD);
127 
128  CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
129  CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
130  }
131 
132  bi0 = from[0];
133  bi1 = from[1];
134  to_next[0] = bi0;
135  to_next[1] = bi1;
136  from += 2;
137  to_next += 2;
138  n_left_to_next -= 2;
139  n_left_from -= 2;
140 
141  b0 = vlib_get_buffer (vm, bi0);
142  b1 = vlib_get_buffer (vm, bi1);
143 
144  flow_hash0 = vnet_l2_compute_flow_hash (b0);
145  flow_hash1 = vnet_l2_compute_flow_hash (b1);
146 
147  /* Get next node index and adj index from tunnel next_dpo */
148  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
149  {
150  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
151  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
152  t0 = &vxm->tunnels[hi0->dev_instance];
153  /* Note: change to always set next0 if it may be set to drop */
154  next0 = t0->next_dpo.dpoi_next_node;
155  }
156 
157  ASSERT (t0 != NULL);
158 
159  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
160 
161  /* Get next node index and adj index from tunnel next_dpo */
162  if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
163  {
164  sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
165  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
166  t1 = &vxm->tunnels[hi1->dev_instance];
167  /* Note: change to always set next1 if it may be set to drop */
168  next1 = t1->next_dpo.dpoi_next_node;
169  }
170 
171  ASSERT (t1 != NULL);
172 
173  vnet_buffer (b1)->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
174 
175  /* Apply the rewrite string. $$$$ vnet_rewrite? */
176  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
177  vlib_buffer_advance (b1, -(word) _vec_len (t1->rewrite));
178 
179  if (is_ip4)
180  {
181  u8 ip4_geneve_base_header_len =
182  sizeof (ip4_header_t) + sizeof (udp_header_t) +
184  u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
185  u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
186 #if SUPPORT_OPTIONS_HEADER==1
187  ip4_geneve_header_total_len0 += t0->options_len;
188  ip4_geneve_header_total_len1 += t1->options_len;
189 #endif
190  ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
191  ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
192 
193  ip4_0 = vlib_buffer_get_current (b0);
194  ip4_1 = vlib_buffer_get_current (b1);
195 
196  /* Copy the fixed header */
197  copy_dst0 = (u64 *) ip4_0;
198  copy_src0 = (u64 *) t0->rewrite;
199  copy_dst1 = (u64 *) ip4_1;
200  copy_src1 = (u64 *) t1->rewrite;
201  /* Copy first 32 octets 8-bytes at a time */
202 #define _(offs) copy_dst0[offs] = copy_src0[offs];
204 #undef _
205 #define _(offs) copy_dst1[offs] = copy_src1[offs];
207 #undef _
208  /* Last 4 octets. Hopefully gcc will be our friend */
209  copy_dst_last0 = (u32 *) (&copy_dst0[4]);
210  copy_src_last0 = (u32 *) (&copy_src0[4]);
211  copy_dst_last0[0] = copy_src_last0[0];
212  copy_dst_last1 = (u32 *) (&copy_dst1[4]);
213  copy_src_last1 = (u32 *) (&copy_src1[4]);
214  copy_dst_last1[0] = copy_src_last1[0];
215 
216  /* Fix the IP4 checksum and length */
217  sum0 = ip4_0->checksum;
218  new_l0 = /* old_l0 always 0, see the rewrite setup */
219  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
220  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
221  length /* changed member */ );
222  ip4_0->checksum = ip_csum_fold (sum0);
223  ip4_0->length = new_l0;
224  sum1 = ip4_1->checksum;
225  new_l1 = /* old_l1 always 0, see the rewrite setup */
226  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1));
227  sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
228  length /* changed member */ );
229  ip4_1->checksum = ip_csum_fold (sum1);
230  ip4_1->length = new_l1;
231 
232  /* Fix UDP length and set source port */
233  udp0 = (udp_header_t *) (ip4_0 + 1);
234  new_l0 =
235  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
236  sizeof (*ip4_0));
237  udp0->length = new_l0;
238  udp0->src_port = flow_hash0;
239  udp1 = (udp_header_t *) (ip4_1 + 1);
240  new_l1 =
241  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1) -
242  sizeof (*ip4_1));
243  udp1->length = new_l1;
244  udp1->src_port = flow_hash1;
245  }
246  else /* ipv6 */
247  {
248  int bogus = 0;
249 
250  u8 ip6_geneve_base_header_len =
251  sizeof (ip6_header_t) + sizeof (udp_header_t) +
253  u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
254  u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
255 #if SUPPORT_OPTIONS_HEADER==1
256  ip6_geneve_header_total_len0 += t0->options_len;
257  ip6_geneve_header_total_len1 += t1->options_len;
258 #endif
259  ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
260  ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
261 
262  ip6_0 = vlib_buffer_get_current (b0);
263  ip6_1 = vlib_buffer_get_current (b1);
264 
265  /* Copy the fixed header */
266  copy_dst0 = (u64 *) ip6_0;
267  copy_src0 = (u64 *) t0->rewrite;
268  copy_dst1 = (u64 *) ip6_1;
269  copy_src1 = (u64 *) t1->rewrite;
270  /* Copy first 56 (ip6) octets 8-bytes at a time */
271 #define _(offs) copy_dst0[offs] = copy_src0[offs];
273 #undef _
274 #define _(offs) copy_dst1[offs] = copy_src1[offs];
276 #undef _
277  /* Fix IP6 payload length */
278  new_l0 =
279  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
280  - sizeof (*ip6_0));
281  ip6_0->payload_length = new_l0;
282  new_l1 =
283  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b1)
284  - sizeof (*ip6_1));
285  ip6_1->payload_length = new_l1;
286 
287  /* Fix UDP length and set source port */
288  udp0 = (udp_header_t *) (ip6_0 + 1);
289  udp0->length = new_l0;
290  udp0->src_port = flow_hash0;
291  udp1 = (udp_header_t *) (ip6_1 + 1);
292  udp1->length = new_l1;
293  udp1->src_port = flow_hash1;
294 
295  /* IPv6 UDP checksum is mandatory */
297  ip6_0,
298  &bogus);
299  ASSERT (bogus == 0);
300  if (udp0->checksum == 0)
301  udp0->checksum = 0xffff;
303  ip6_1,
304  &bogus);
305  ASSERT (bogus == 0);
306  if (udp1->checksum == 0)
307  udp1->checksum = 0xffff;
308  }
309 
310  pkts_encapsulated += 2;
311  len0 = vlib_buffer_length_in_chain (vm, b0);
312  len1 = vlib_buffer_length_in_chain (vm, b1);
313  stats_n_packets += 2;
314  stats_n_bytes += len0 + len1;
315 
316  /* Batch stats increment on the same geneve tunnel so counter is not
317  incremented per packet. Note stats are still incremented for deleted
318  and admin-down tunnel where packets are dropped. It is not worthwhile
319  to check for this rare case and affect normal path performance. */
320  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
321  (sw_if_index1 != stats_sw_if_index)))
322  {
323  stats_n_packets -= 2;
324  stats_n_bytes -= len0 + len1;
325  if (sw_if_index0 == sw_if_index1)
326  {
327  if (stats_n_packets)
330  VNET_INTERFACE_COUNTER_TX, thread_index,
331  stats_sw_if_index, stats_n_packets, stats_n_bytes);
332  stats_sw_if_index = sw_if_index0;
333  stats_n_packets = 2;
334  stats_n_bytes = len0 + len1;
335  }
336  else
337  {
340  thread_index, sw_if_index0, 1, len0);
343  thread_index, sw_if_index1, 1, len1);
344  }
345  }
346 
347  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
348  {
350  vlib_add_trace (vm, node, b0, sizeof (*tr));
351  tr->tunnel_index = t0 - vxm->tunnels;
352  tr->vni = t0->vni;
353  }
354 
355  if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
356  {
358  vlib_add_trace (vm, node, b1, sizeof (*tr));
359  tr->tunnel_index = t1 - vxm->tunnels;
360  tr->vni = t1->vni;
361  }
362 
363  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
364  to_next, n_left_to_next,
365  bi0, bi1, next0, next1);
366  }
367 
368  while (n_left_from > 0 && n_left_to_next > 0)
369  {
370  u32 bi0;
371  vlib_buffer_t *b0;
372  u32 flow_hash0;
373  u32 len0;
374  ip4_header_t *ip4_0;
375  ip6_header_t *ip6_0;
376  udp_header_t *udp0;
377  u64 *copy_src0, *copy_dst0;
378  u32 *copy_src_last0, *copy_dst_last0;
379  u16 new_l0;
380  ip_csum_t sum0;
381 
382  bi0 = from[0];
383  to_next[0] = bi0;
384  from += 1;
385  to_next += 1;
386  n_left_from -= 1;
387  n_left_to_next -= 1;
388 
389  b0 = vlib_get_buffer (vm, bi0);
390 
391  flow_hash0 = vnet_l2_compute_flow_hash (b0);
392 
393  /* Get next node index and adj index from tunnel next_dpo */
394  if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
395  {
396  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
397  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
398  t0 = &vxm->tunnels[hi0->dev_instance];
399  /* Note: change to always set next0 if it may be set to drop */
400  next0 = t0->next_dpo.dpoi_next_node;
401  }
402  vnet_buffer (b0)->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
403 
404  /* Apply the rewrite string. $$$$ vnet_rewrite? */
405  vlib_buffer_advance (b0, -(word) _vec_len (t0->rewrite));
406 
407  if (is_ip4)
408  {
409  u8 ip4_geneve_base_header_len =
410  sizeof (ip4_header_t) + sizeof (udp_header_t) +
412  u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
413 #if SUPPORT_OPTIONS_HEADER==1
414  ip4_geneve_header_total_len0 += t0->options_len;
415 #endif
416  ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
417 
418  ip4_0 = vlib_buffer_get_current (b0);
419 
420  /* Copy the fixed header */
421  copy_dst0 = (u64 *) ip4_0;
422  copy_src0 = (u64 *) t0->rewrite;
423  /* Copy first 32 octets 8-bytes at a time */
424 #define _(offs) copy_dst0[offs] = copy_src0[offs];
426 #undef _
427  /* Last 4 octets. Hopefully gcc will be our friend */
428  copy_dst_last0 = (u32 *) (&copy_dst0[4]);
429  copy_src_last0 = (u32 *) (&copy_src0[4]);
430  copy_dst_last0[0] = copy_src_last0[0];
431 
432  /* Fix the IP4 checksum and length */
433  sum0 = ip4_0->checksum;
434  new_l0 = /* old_l0 always 0, see the rewrite setup */
435  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0));
436  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
437  length /* changed member */ );
438  ip4_0->checksum = ip_csum_fold (sum0);
439  ip4_0->length = new_l0;
440 
441  /* Fix UDP length and set source port */
442  udp0 = (udp_header_t *) (ip4_0 + 1);
443  new_l0 =
444  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
445  sizeof (*ip4_0));
446  udp0->length = new_l0;
447  udp0->src_port = flow_hash0;
448  }
449 
450  else /* ip6 path */
451  {
452  int bogus = 0;
453 
454  u8 ip6_geneve_base_header_len =
455  sizeof (ip6_header_t) + sizeof (udp_header_t) +
457  u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
458 #if SUPPORT_OPTIONS_HEADER==1
459  ip6_geneve_header_total_len0 += t0->options_len;
460 #endif
461  ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
462 
463  ip6_0 = vlib_buffer_get_current (b0);
464  /* Copy the fixed header */
465  copy_dst0 = (u64 *) ip6_0;
466  copy_src0 = (u64 *) t0->rewrite;
467  /* Copy first 56 (ip6) octets 8-bytes at a time */
468 #define _(offs) copy_dst0[offs] = copy_src0[offs];
470 #undef _
471  /* Fix IP6 payload length */
472  new_l0 =
473  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0)
474  - sizeof (*ip6_0));
475  ip6_0->payload_length = new_l0;
476 
477  /* Fix UDP length and set source port */
478  udp0 = (udp_header_t *) (ip6_0 + 1);
479  udp0->length = new_l0;
480  udp0->src_port = flow_hash0;
481 
482  /* IPv6 UDP checksum is mandatory */
484  ip6_0,
485  &bogus);
486  ASSERT (bogus == 0);
487  if (udp0->checksum == 0)
488  udp0->checksum = 0xffff;
489  }
490 
491  pkts_encapsulated++;
492  len0 = vlib_buffer_length_in_chain (vm, b0);
493  stats_n_packets += 1;
494  stats_n_bytes += len0;
495 
496  /* Batch stats increment on the same geneve tunnel so counter is not
497  incremented per packet. Note stats are still incremented for deleted
498  and admin-down tunnel where packets are dropped. It is not worthwhile
499  to check for this rare case and affect normal path performance. */
500  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
501  {
502  stats_n_packets -= 1;
503  stats_n_bytes -= len0;
504  if (stats_n_packets)
507  thread_index, stats_sw_if_index,
508  stats_n_packets, stats_n_bytes);
509  stats_n_packets = 1;
510  stats_n_bytes = len0;
511  stats_sw_if_index = sw_if_index0;
512  }
513 
514  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
515  {
517  vlib_add_trace (vm, node, b0, sizeof (*tr));
518  tr->tunnel_index = t0 - vxm->tunnels;
519  tr->vni = t0->vni;
520  }
521  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
522  to_next, n_left_to_next,
523  bi0, next0);
524  }
525 
526  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
527  }
528 
529  /* Do we still need this now that tunnel tx stats is kept? */
531  GENEVE_ENCAP_ERROR_ENCAPSULATED,
532  pkts_encapsulated);
533 
534  /* Increment any remaining batch stats */
535  if (stats_n_packets)
536  {
539  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
540  node->runtime_data[0] = stats_sw_if_index;
541  }
542 
543  return from_frame->n_vectors;
544 }
545 
546 static uword
548  vlib_node_runtime_t * node, vlib_frame_t * from_frame)
549 {
550  return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
551 }
552 
553 static uword
555  vlib_node_runtime_t * node, vlib_frame_t * from_frame)
556 {
557  return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
558 }
559 
560 /* *INDENT-OFF* */
562  .function = geneve4_encap,
563  .name = "geneve4-encap",
564  .vector_size = sizeof (u32),
565  .format_trace = format_geneve_encap_trace,
566  .type = VLIB_NODE_TYPE_INTERNAL,
568  .error_strings = geneve_encap_error_strings,
569  .n_next_nodes = GENEVE_ENCAP_N_NEXT,
570  .next_nodes = {
571  [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
572  },
573 };
574 
576 
578  .function = geneve6_encap,
579  .name = "geneve6-encap",
580  .vector_size = sizeof (u32),
581  .format_trace = format_geneve_encap_trace,
582  .type = VLIB_NODE_TYPE_INTERNAL,
584  .error_strings = geneve_encap_error_strings,
585  .n_next_nodes = GENEVE_ENCAP_N_NEXT,
586  .next_nodes = {
587  [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
588  },
589 };
590 
592 /* *INDENT-ON* */
593 
594 /*
595  * fd.io coding-style-patch-verification: ON
596  *
597  * Local Variables:
598  * eval: (c-set-style "gnu")
599  * End:
600  */
#define CLIB_UNUSED(x)
Definition: clib.h:81
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:204
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:521
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_main_t interface_main
Definition: vnet.h:56
unsigned long u64
Definition: types.h:89
#define NULL
Definition: clib.h:57
u32 thread_index
Definition: main.h:179
uword ip_csum_t
Definition: ip_packet.h:181
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
VLIB_NODE_FUNCTION_MULTIARCH(l2t_encap_node, l2t_encap_node_fn)
dpo_id_t next_dpo
Definition: geneve.h:85
#define foreach_fixed_header6_offset
Definition: encap.c:68
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:263
vlib_node_registration_t geneve6_encap_node
(constructor) VLIB_REGISTER_NODE (geneve6_encap_node)
Definition: encap.c:577
unsigned char u8
Definition: types.h:56
#define GENEVE_BASE_HEADER_LENGTH
Definition: geneve_packet.h:95
geneve_tunnel_t * tunnels
Definition: geneve.h:156
i64 word
Definition: types.h:111
u8 * rewrite
Definition: geneve.h:82
u32 sw_if_index
Definition: vxlan_gbp.api:39
#define always_inline
Definition: clib.h:94
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:855
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:187
u8 * format_geneve_encap_trace(u8 *s, va_list *args)
Definition: encap.c:53
geneve_encap_next_t
Definition: encap.c:40
unsigned int u32
Definition: types.h:88
static uword geneve_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: encap.c:72
unsigned short u16
Definition: types.h:57
static u32 vnet_l2_compute_flow_hash(vlib_buffer_t *b)
Definition: l2_input.h:261
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
#define PREDICT_FALSE(x)
Definition: clib.h:107
u32 node_index
Node index.
Definition: node.h:494
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1176
geneve_encap_error_t
Definition: encap.c:32
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
u16 n_vectors
Definition: node.h:401
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
static uword geneve6_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:554
#define ARRAY_LEN(x)
Definition: clib.h:61
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:910
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
#define ASSERT(truth)
#define foreach_fixed_header4_offset
Definition: encap.c:65
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:218
vnet_main_t * vnet_main
Definition: geneve.h:178
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:369
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:231
#define foreach_geneve_encap_error
Definition: encap.c:23
static uword geneve4_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
Definition: encap.c:547
#define vnet_buffer(b)
Definition: buffer.h:344
geneve_main_t geneve_main
Definition: geneve.c:38
u8 data[0]
Packet data.
Definition: buffer.h:175
vlib_node_registration_t geneve4_encap_node
(constructor) VLIB_REGISTER_NODE (geneve4_encap_node)
Definition: encap.c:561
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:180
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
static char * geneve_encap_error_strings[]
Definition: encap.c:26
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237