FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
encap.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 SUSE LLC.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include <vppinfra/error.h>
16 #include <vppinfra/hash.h>
17 #include <vnet/vnet.h>
18 #include <vnet/ip/ip.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/geneve/geneve.h>
21 
22 /* Statistics (not all errors) */
23 #define foreach_geneve_encap_error \
24 _(ENCAPSULATED, "good packets encapsulated")
25 
26 static char *geneve_encap_error_strings[] = {
27 #define _(sym,string) string,
29 #undef _
30 };
31 
32 typedef enum
33 {
34 #define _(sym,str) GENEVE_ENCAP_ERROR_##sym,
36 #undef _
39 
40 typedef enum
41 {
45 
46 #define foreach_fixed_header4_offset \
47  _(0) _(1) _(2) _(3)
48 
49 #define foreach_fixed_header6_offset \
50  _(0) _(1) _(2) _(3) _(4) _(5) _(6)
51 
54  vlib_node_runtime_t * node,
55  vlib_frame_t * from_frame, u32 is_ip4)
56 {
57  u32 n_left_from, next_index, *from, *to_next;
58  geneve_main_t *vxm = &geneve_main;
59  vnet_main_t *vnm = vxm->vnet_main;
61  u32 pkts_encapsulated = 0;
62  u16 old_l0 = 0, old_l1 = 0;
63  u32 thread_index = vm->thread_index;
64  u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
65  u32 sw_if_index0 = ~0, sw_if_index1 = ~0;
66  u32 next0 = 0, next1 = 0;
67  vnet_hw_interface_t *hi0, *hi1;
68  geneve_tunnel_t *t0 = NULL, *t1 = NULL;
69  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
70 
71  from = vlib_frame_vector_args (from_frame);
72  n_left_from = from_frame->n_vectors;
73  vlib_get_buffers (vm, from, bufs, n_left_from);
74 
75  next_index = node->cached_next_index;
76  stats_sw_if_index = node->runtime_data[0];
77  stats_n_packets = stats_n_bytes = 0;
78 
79  while (n_left_from > 0)
80  {
81  u32 n_left_to_next;
82 
83  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
84 
85  while (n_left_from >= 4 && n_left_to_next >= 2)
86  {
87  u32 bi0, bi1;
88  u32 flow_hash0, flow_hash1;
89  u32 len0, len1;
90  ip4_header_t *ip4_0, *ip4_1;
91  ip6_header_t *ip6_0, *ip6_1;
92  udp_header_t *udp0, *udp1;
93  u64 *copy_src0, *copy_dst0;
94  u64 *copy_src1, *copy_dst1;
95  u32 *copy_src_last0, *copy_dst_last0;
96  u32 *copy_src_last1, *copy_dst_last1;
97  u16 new_l0, new_l1;
98  ip_csum_t sum0, sum1;
99 
100  /* Prefetch next iteration. */
101  {
102  vlib_prefetch_buffer_header (b[2], LOAD);
103  vlib_prefetch_buffer_header (b[3], LOAD);
104 
106  2 * CLIB_CACHE_LINE_BYTES, LOAD);
108  2 * CLIB_CACHE_LINE_BYTES, LOAD);
109  }
110 
111  bi0 = from[0];
112  bi1 = from[1];
113  to_next[0] = bi0;
114  to_next[1] = bi1;
115  from += 2;
116  to_next += 2;
117  n_left_to_next -= 2;
118  n_left_from -= 2;
119 
120  flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
121  flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
122 
123 
124  /* Get next node index and adj index from tunnel next_dpo */
125  if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
126  {
127  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
128  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
129  t0 = &vxm->tunnels[hi0->dev_instance];
130  /* Note: change to always set next0 if it may be set to drop */
131  next0 = t0->next_dpo.dpoi_next_node;
132  }
133 
134  ALWAYS_ASSERT (t0 != NULL);
135 
136  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
137 
138  /* Get next node index and adj index from tunnel next_dpo */
139  if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
140  {
141  sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
142  hi1 = vnet_get_sup_hw_interface (vnm, sw_if_index1);
143  t1 = &vxm->tunnels[hi1->dev_instance];
144  /* Note: change to always set next1 if it may be set to drop */
145  next1 = t1->next_dpo.dpoi_next_node;
146  }
147 
148  ALWAYS_ASSERT (t1 != NULL);
149 
150  vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = t1->next_dpo.dpoi_index;
151 
152  /* Apply the rewrite string. $$$$ vnet_rewrite? */
153  vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
154  vlib_buffer_advance (b[1], -(word) _vec_len (t1->rewrite));
155 
156  if (is_ip4)
157  {
158  u8 ip4_geneve_base_header_len =
159  sizeof (ip4_header_t) + sizeof (udp_header_t) +
161  u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
162  u8 ip4_geneve_header_total_len1 = ip4_geneve_base_header_len;
163 #if SUPPORT_OPTIONS_HEADER==1
164  ip4_geneve_header_total_len0 += t0->options_len;
165  ip4_geneve_header_total_len1 += t1->options_len;
166 #endif
167  ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
168  ASSERT (vec_len (t1->rewrite) == ip4_geneve_header_total_len1);
169 
170  ip4_0 = vlib_buffer_get_current (b[0]);
171  ip4_1 = vlib_buffer_get_current (b[1]);
172 
173  /* Copy the fixed header */
174  copy_dst0 = (u64 *) ip4_0;
175  copy_src0 = (u64 *) t0->rewrite;
176  copy_dst1 = (u64 *) ip4_1;
177  copy_src1 = (u64 *) t1->rewrite;
178  /* Copy first 32 octets 8-bytes at a time */
179 #define _(offs) copy_dst0[offs] = copy_src0[offs];
181 #undef _
182 #define _(offs) copy_dst1[offs] = copy_src1[offs];
184 #undef _
185  /* Last 4 octets. Hopefully gcc will be our friend */
186  copy_dst_last0 = (u32 *) (&copy_dst0[4]);
187  copy_src_last0 = (u32 *) (&copy_src0[4]);
188  copy_dst_last0[0] = copy_src_last0[0];
189  copy_dst_last1 = (u32 *) (&copy_dst1[4]);
190  copy_src_last1 = (u32 *) (&copy_src1[4]);
191  copy_dst_last1[0] = copy_src_last1[0];
192 
193  /* Fix the IP4 checksum and length */
194  sum0 = ip4_0->checksum;
195  new_l0 = /* old_l0 always 0, see the rewrite setup */
196  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
197  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
198  length /* changed member */ );
199  ip4_0->checksum = ip_csum_fold (sum0);
200  ip4_0->length = new_l0;
201  sum1 = ip4_1->checksum;
202  new_l1 = /* old_l1 always 0, see the rewrite setup */
203  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]));
204  sum1 = ip_csum_update (sum1, old_l1, new_l1, ip4_header_t,
205  length /* changed member */ );
206  ip4_1->checksum = ip_csum_fold (sum1);
207  ip4_1->length = new_l1;
208 
209  /* Fix UDP length and set source port */
210  udp0 = (udp_header_t *) (ip4_0 + 1);
211  new_l0 =
212  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
213  sizeof (*ip4_0));
214  udp0->length = new_l0;
215  udp0->src_port = flow_hash0;
216  udp1 = (udp_header_t *) (ip4_1 + 1);
217  new_l1 =
218  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1]) -
219  sizeof (*ip4_1));
220  udp1->length = new_l1;
221  udp1->src_port = flow_hash1;
222  }
223  else /* ipv6 */
224  {
225  int bogus = 0;
226 
227  u8 ip6_geneve_base_header_len =
228  sizeof (ip6_header_t) + sizeof (udp_header_t) +
230  u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
231  u8 ip6_geneve_header_total_len1 = ip6_geneve_base_header_len;
232 #if SUPPORT_OPTIONS_HEADER==1
233  ip6_geneve_header_total_len0 += t0->options_len;
234  ip6_geneve_header_total_len1 += t1->options_len;
235 #endif
236  ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
237  ASSERT (vec_len (t1->rewrite) == ip6_geneve_header_total_len1);
238 
239  ip6_0 = vlib_buffer_get_current (b[0]);
240  ip6_1 = vlib_buffer_get_current (b[1]);
241 
242  /* Copy the fixed header */
243  copy_dst0 = (u64 *) ip6_0;
244  copy_src0 = (u64 *) t0->rewrite;
245  copy_dst1 = (u64 *) ip6_1;
246  copy_src1 = (u64 *) t1->rewrite;
247  /* Copy first 56 (ip6) octets 8-bytes at a time */
248 #define _(offs) copy_dst0[offs] = copy_src0[offs];
250 #undef _
251 #define _(offs) copy_dst1[offs] = copy_src1[offs];
253 #undef _
254  /* Fix IP6 payload length */
255  new_l0 =
256  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
257  - sizeof (*ip6_0));
258  ip6_0->payload_length = new_l0;
259  new_l1 =
260  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[1])
261  - sizeof (*ip6_1));
262  ip6_1->payload_length = new_l1;
263 
264  /* Fix UDP length and set source port */
265  udp0 = (udp_header_t *) (ip6_0 + 1);
266  udp0->length = new_l0;
267  udp0->src_port = flow_hash0;
268  udp1 = (udp_header_t *) (ip6_1 + 1);
269  udp1->length = new_l1;
270  udp1->src_port = flow_hash1;
271 
272  /* IPv6 UDP checksum is mandatory */
274  ip6_0,
275  &bogus);
276  ASSERT (bogus == 0);
277  if (udp0->checksum == 0)
278  udp0->checksum = 0xffff;
280  ip6_1,
281  &bogus);
282  ASSERT (bogus == 0);
283  if (udp1->checksum == 0)
284  udp1->checksum = 0xffff;
285  }
286 
287  pkts_encapsulated += 2;
288  len0 = vlib_buffer_length_in_chain (vm, b[0]);
289  len1 = vlib_buffer_length_in_chain (vm, b[1]);
290  stats_n_packets += 2;
291  stats_n_bytes += len0 + len1;
292 
293  /* save inner packet flow_hash for load-balance node */
294  vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
295  vnet_buffer (b[1])->ip.flow_hash = flow_hash1;
296 
297  /* Batch stats increment on the same geneve tunnel so counter is not
298  incremented per packet. Note stats are still incremented for deleted
299  and admin-down tunnel where packets are dropped. It is not worthwhile
300  to check for this rare case and affect normal path performance. */
301  if (PREDICT_FALSE ((sw_if_index0 != stats_sw_if_index) ||
302  (sw_if_index1 != stats_sw_if_index)))
303  {
304  stats_n_packets -= 2;
305  stats_n_bytes -= len0 + len1;
306  if (sw_if_index0 == sw_if_index1)
307  {
308  if (stats_n_packets)
311  VNET_INTERFACE_COUNTER_TX, thread_index,
312  stats_sw_if_index, stats_n_packets, stats_n_bytes);
313  stats_sw_if_index = sw_if_index0;
314  stats_n_packets = 2;
315  stats_n_bytes = len0 + len1;
316  }
317  else
318  {
321  thread_index, sw_if_index0, 1, len0);
324  thread_index, sw_if_index1, 1, len1);
325  }
326  }
327 
328  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
329  {
331  vlib_add_trace (vm, node, b[0], sizeof (*tr));
332  tr->tunnel_index = t0 - vxm->tunnels;
333  tr->vni = t0->vni;
334  }
335 
336  if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
337  {
339  vlib_add_trace (vm, node, b[1], sizeof (*tr));
340  tr->tunnel_index = t1 - vxm->tunnels;
341  tr->vni = t1->vni;
342  }
343  b += 2;
344 
345  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
346  to_next, n_left_to_next,
347  bi0, bi1, next0, next1);
348  }
349 
350  while (n_left_from > 0 && n_left_to_next > 0)
351  {
352  u32 bi0;
353  u32 flow_hash0;
354  u32 len0;
355  ip4_header_t *ip4_0;
356  ip6_header_t *ip6_0;
357  udp_header_t *udp0;
358  u64 *copy_src0, *copy_dst0;
359  u32 *copy_src_last0, *copy_dst_last0;
360  u16 new_l0;
361  ip_csum_t sum0;
362 
363  bi0 = from[0];
364  to_next[0] = bi0;
365  from += 1;
366  to_next += 1;
367  n_left_from -= 1;
368  n_left_to_next -= 1;
369 
370  flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
371 
372  /* Get next node index and adj index from tunnel next_dpo */
373  if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
374  {
375  sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
376  hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
377  t0 = &vxm->tunnels[hi0->dev_instance];
378  /* Note: change to always set next0 if it may be set to drop */
379  next0 = t0->next_dpo.dpoi_next_node;
380  }
381 
382  ALWAYS_ASSERT (t0 != NULL);
383 
384  vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = t0->next_dpo.dpoi_index;
385 
386  /* Apply the rewrite string. $$$$ vnet_rewrite? */
387  vlib_buffer_advance (b[0], -(word) _vec_len (t0->rewrite));
388 
389  if (is_ip4)
390  {
391  u8 ip4_geneve_base_header_len =
392  sizeof (ip4_header_t) + sizeof (udp_header_t) +
394  u8 ip4_geneve_header_total_len0 = ip4_geneve_base_header_len;
395 #if SUPPORT_OPTIONS_HEADER==1
396  ip4_geneve_header_total_len0 += t0->options_len;
397 #endif
398  ASSERT (vec_len (t0->rewrite) == ip4_geneve_header_total_len0);
399 
400  ip4_0 = vlib_buffer_get_current (b[0]);
401 
402  /* Copy the fixed header */
403  copy_dst0 = (u64 *) ip4_0;
404  copy_src0 = (u64 *) t0->rewrite;
405  /* Copy first 32 octets 8-bytes at a time */
406 #define _(offs) copy_dst0[offs] = copy_src0[offs];
408 #undef _
409  /* Last 4 octets. Hopefully gcc will be our friend */
410  copy_dst_last0 = (u32 *) (&copy_dst0[4]);
411  copy_src_last0 = (u32 *) (&copy_src0[4]);
412  copy_dst_last0[0] = copy_src_last0[0];
413 
414  /* Fix the IP4 checksum and length */
415  sum0 = ip4_0->checksum;
416  new_l0 = /* old_l0 always 0, see the rewrite setup */
417  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
418  sum0 = ip_csum_update (sum0, old_l0, new_l0, ip4_header_t,
419  length /* changed member */ );
420  ip4_0->checksum = ip_csum_fold (sum0);
421  ip4_0->length = new_l0;
422 
423  /* Fix UDP length and set source port */
424  udp0 = (udp_header_t *) (ip4_0 + 1);
425  new_l0 =
426  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
427  sizeof (*ip4_0));
428  udp0->length = new_l0;
429  udp0->src_port = flow_hash0;
430  }
431 
432  else /* ip6 path */
433  {
434  int bogus = 0;
435 
436  u8 ip6_geneve_base_header_len =
437  sizeof (ip6_header_t) + sizeof (udp_header_t) +
439  u8 ip6_geneve_header_total_len0 = ip6_geneve_base_header_len;
440 #if SUPPORT_OPTIONS_HEADER==1
441  ip6_geneve_header_total_len0 += t0->options_len;
442 #endif
443  ASSERT (vec_len (t0->rewrite) == ip6_geneve_header_total_len0);
444 
445  ip6_0 = vlib_buffer_get_current (b[0]);
446  /* Copy the fixed header */
447  copy_dst0 = (u64 *) ip6_0;
448  copy_src0 = (u64 *) t0->rewrite;
449  /* Copy first 56 (ip6) octets 8-bytes at a time */
450 #define _(offs) copy_dst0[offs] = copy_src0[offs];
452 #undef _
453  /* Fix IP6 payload length */
454  new_l0 =
455  clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0])
456  - sizeof (*ip6_0));
457  ip6_0->payload_length = new_l0;
458 
459  /* Fix UDP length and set source port */
460  udp0 = (udp_header_t *) (ip6_0 + 1);
461  udp0->length = new_l0;
462  udp0->src_port = flow_hash0;
463 
464  /* IPv6 UDP checksum is mandatory */
466  ip6_0,
467  &bogus);
468  ASSERT (bogus == 0);
469  if (udp0->checksum == 0)
470  udp0->checksum = 0xffff;
471  }
472 
473  pkts_encapsulated++;
474  len0 = vlib_buffer_length_in_chain (vm, b[0]);
475  stats_n_packets += 1;
476  stats_n_bytes += len0;
477 
478  /* save inner packet flow_hash for load-balance node */
479  vnet_buffer (b[0])->ip.flow_hash = flow_hash0;
480 
481  /* Batch stats increment on the same geneve tunnel so counter is not
482  incremented per packet. Note stats are still incremented for deleted
483  and admin-down tunnel where packets are dropped. It is not worthwhile
484  to check for this rare case and affect normal path performance. */
485  if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
486  {
487  stats_n_packets -= 1;
488  stats_n_bytes -= len0;
489  if (stats_n_packets)
492  thread_index, stats_sw_if_index,
493  stats_n_packets, stats_n_bytes);
494  stats_n_packets = 1;
495  stats_n_bytes = len0;
496  stats_sw_if_index = sw_if_index0;
497  }
498 
499  if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
500  {
502  vlib_add_trace (vm, node, b[0], sizeof (*tr));
503  tr->tunnel_index = t0 - vxm->tunnels;
504  tr->vni = t0->vni;
505  }
506  b += 1;
507 
508  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
509  to_next, n_left_to_next,
510  bi0, next0);
511  }
512 
513  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
514  }
515 
516  /* Do we still need this now that tunnel tx stats is kept? */
518  GENEVE_ENCAP_ERROR_ENCAPSULATED,
519  pkts_encapsulated);
520 
521  /* Increment any remaining batch stats */
522  if (stats_n_packets)
523  {
526  thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
527  node->runtime_data[0] = stats_sw_if_index;
528  }
529 
530  return from_frame->n_vectors;
531 }
532 
534  vlib_node_runtime_t * node,
535  vlib_frame_t * from_frame)
536 {
537  return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 1);
538 }
539 
541  vlib_node_runtime_t * node,
542  vlib_frame_t * from_frame)
543 {
544  return geneve_encap_inline (vm, node, from_frame, /* is_ip4 */ 0);
545 }
546 
547 /* *INDENT-OFF* */
549  .name = "geneve4-encap",
550  .vector_size = sizeof (u32),
551  .format_trace = format_geneve_encap_trace,
554  .error_strings = geneve_encap_error_strings,
555  .n_next_nodes = GENEVE_ENCAP_N_NEXT,
556  .next_nodes = {
557  [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
558  },
559 };
560 
562  .name = "geneve6-encap",
563  .vector_size = sizeof (u32),
564  .format_trace = format_geneve_encap_trace,
567  .error_strings = geneve_encap_error_strings,
568  .n_next_nodes = GENEVE_ENCAP_N_NEXT,
569  .next_nodes = {
570  [GENEVE_ENCAP_NEXT_DROP] = "error-drop",
571  },
572 };
573 /* *INDENT-ON* */
574 
575 /*
576  * fd.io coding-style-patch-verification: ON
577  *
578  * Local Variables:
579  * eval: (c-set-style "gnu")
580  * End:
581  */
u32 flags
Definition: vhost_user.h:141
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:523
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_main_t interface_main
Definition: vnet.h:56
unsigned long u64
Definition: types.h:89
u32 thread_index
Definition: main.h:218
uword ip_csum_t
Definition: ip_packet.h:219
dpo_id_t next_dpo
Definition: geneve.h:91
u8 data[128]
Definition: ipsec.api:251
#define VLIB_NODE_FN(node)
Definition: node.h:202
#define foreach_fixed_header6_offset
Definition: encap.c:49
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
vlib_node_registration_t geneve6_encap_node
(constructor) VLIB_REGISTER_NODE (geneve6_encap_node)
Definition: encap.c:561
u8 * format_geneve_encap_trace(u8 *s, va_list *args)
Definition: geneve.c:42
unsigned char u8
Definition: types.h:56
#define GENEVE_BASE_HEADER_LENGTH
Definition: geneve_packet.h:95
geneve_tunnel_t * tunnels
Definition: geneve.h:162
i64 word
Definition: types.h:111
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
u8 * rewrite
Definition: geneve.h:88
#define always_inline
Definition: clib.h:99
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:846
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
geneve_encap_next_t
Definition: encap.c:40
#define ALWAYS_ASSERT(truth)
unsigned int u32
Definition: types.h:88
static uword geneve_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
Definition: encap.c:53
#define VLIB_FRAME_SIZE
Definition: node.h:378
vl_api_fib_path_type_t type
Definition: fib_types.api:123
unsigned short u16
Definition: types.h:57
static u32 vnet_l2_compute_flow_hash(vlib_buffer_t *b)
Definition: l2_input.h:276
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:112
u32 node_index
Node index.
Definition: node.h:496
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
geneve_encap_error_t
Definition: encap.c:32
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u16 n_vectors
Definition: node.h:397
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:323
#define ARRAY_LEN(x)
Definition: clib.h:63
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1010
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
#define ASSERT(truth)
#define foreach_fixed_header4_offset
Definition: encap.c:46
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
vnet_main_t * vnet_main
Definition: geneve.h:184
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:374
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:186
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
#define ip_csum_update(sum, old, new, type, field)
Definition: ip_packet.h:269
#define foreach_geneve_encap_error
Definition: encap.c:23
#define vnet_buffer(b)
Definition: buffer.h:365
geneve_main_t geneve_main
Definition: geneve.c:39
vlib_node_registration_t geneve4_encap_node
(constructor) VLIB_REGISTER_NODE (geneve4_encap_node)
Definition: encap.c:548
u16 dpoi_next_node
The next VLIB node to follow.
Definition: dpo.h:182
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static char * geneve_encap_error_strings[]
Definition: encap.c:26
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:275