FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
27 
28 #define foreach_gso_error \
29  _ (NO_BUFFERS, "no buffers to segment GSO") \
30  _ (UNHANDLED_TYPE, "unhandled gso type")
31 
32 static char *gso_error_strings[] = {
33 #define _(sym, string) string,
35 #undef _
36 };
37 
38 typedef enum
39 {
40 #define _(sym, str) GSO_ERROR_##sym,
42 #undef _
44 } gso_error_t;
45 
46 typedef enum
47 {
50 } gso_next_t;
51 
52 typedef struct
53 {
58 } gso_trace_t;
59 
60 static u8 *
61 format_gso_trace (u8 * s, va_list * args)
62 {
63  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
64  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
65  gso_trace_t *t = va_arg (*args, gso_trace_t *);
66 
67  if (t->flags & VNET_BUFFER_F_GSO)
68  {
69  s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
71  &t->gho);
72  }
73  else
74  {
75  s =
76  format (s, "non-gso buffer\n%U", format_generic_header_offset,
77  &t->gho);
78  }
79 
80  return s;
81 }
82 
86  vlib_buffer_t * sb0,
88 {
89  u16 n_tx_bufs = vec_len (ptd->split_buffers);
90  u16 i = 0, n_tx_bytes = 0;
91 
92  while (i < n_tx_bufs)
93  {
95  vnet_get_outer_header (b0, gho);
98 
99  ip4_header_t *ip4 =
101  gho->outer_l3_hdr_offset);
102  ip6_header_t *ip6 =
104  gho->outer_l3_hdr_offset);
105 
106  if (gho->gho_flags & GHO_F_OUTER_IP4)
107  {
108  ip4->length =
109  clib_host_to_net_u16 (b0->current_length -
110  gho->outer_l3_hdr_offset);
111  ip4->checksum = ip4_header_checksum (ip4);
112  }
113  else if (gho->gho_flags & GHO_F_OUTER_IP6)
114  {
115  ip6->payload_length =
116  clib_host_to_net_u16 (b0->current_length -
117  gho->outer_l4_hdr_offset);
118  }
119 
120  n_tx_bytes += gho->outer_hdr_sz;
121  i++;
122  }
123  return n_tx_bytes;
124 }
125 
129 {
130  u8 proto = 0;
131  ip4_header_t *ip4 = 0;
132  ip6_header_t *ip6 = 0;
133  udp_header_t *udp = 0;
134 
135  ip4 =
137  ip6 =
139  udp =
141 
142  if (gho->gho_flags & GHO_F_OUTER_IP4)
143  {
144  proto = ip4->protocol;
145  ip4->length =
146  clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
147  ip4->checksum = ip4_header_checksum (ip4);
148  }
149  else if (gho->gho_flags & GHO_F_OUTER_IP6)
150  {
151  proto = ip6->protocol;
152  ip6->payload_length =
153  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
154  }
155  if (proto == IP_PROTOCOL_UDP)
156  {
157  int bogus;
158  udp->length =
159  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
160  udp->checksum = 0;
161  if (gho->gho_flags & GHO_F_OUTER_IP6)
162  {
163  udp->checksum =
165  }
166  else if (gho->gho_flags & GHO_F_OUTER_IP4)
167  {
169  }
170  /* FIXME: it should be OUTER_UDP_CKSUM */
171  vnet_buffer_offload_flags_clear (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
172  }
173 }
174 
178  vlib_buffer_t * sb0,
180 {
181  u16 n_tx_bufs = vec_len (ptd->split_buffers);
182  u16 i = 0, n_tx_bytes = 0;
183 
184  while (i < n_tx_bufs)
185  {
187  vnet_get_outer_header (b0, gho);
190 
192  n_tx_bytes += gho->outer_hdr_sz;
193  i++;
194  }
195  return n_tx_bytes;
196 }
197 
201  vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
202  u16 gso_size, u16 first_data_size,
204 {
205  u16 n_alloc, size;
206  u16 first_packet_length = l234_sz + first_data_size;
207 
208  /*
209  * size is the amount of data per segmented buffer except the 1st
210  * segmented buffer.
211  * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
212  * l234_sz is hdr_sz from l2_hdr_offset.
213  */
214  size =
215  clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
216  - gho->l2_hdr_offset);
217 
218  /*
219  * First segmented buffer length is calculated separately.
220  * As it may contain less data than gso_size (when gso_size is
221  * greater than current_length of 1st buffer from GSO chained
222  * buffers) and/or size calculated above.
223  */
224  u16 n_bufs = 1;
225 
226  /*
227  * Total packet length minus first packet length including l234 header.
228  * rounded-up division
229  */
230  ASSERT (n_bytes_b0 > first_packet_length);
231  n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
232 
233  vec_validate (ptd->split_buffers, n_bufs - 1);
234 
235  n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
236  if (n_alloc < n_bufs)
237  {
238  vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
239  return 0;
240  }
241  return n_alloc;
242 }
243 
246  u32 flags, u16 length)
247 {
248  /* copying objects from cacheline 0 */
249  nb0->current_data = b0->current_data;
250  nb0->current_length = length;
251  nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
252  nb0->flow_id = b0->flow_id;
253  nb0->error = b0->error;
255  clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
256 
257  /* copying objects from cacheline 1 */
258  nb0->trace_handle = b0->trace_handle;
260 
261  /* copying data */
264 }
265 
268  vlib_buffer_t * b0, u16 template_data_sz,
269  u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
270  u32 next_tcp_seq, u32 flags,
272 {
273  tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
274 
275  *p_dst_left =
276  clib_min (gso_size,
277  vlib_buffer_get_default_data_size (vm) - (template_data_sz +
278  nb0->current_data));
279  *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
280 
281  tcp_header_t *tcp =
283  tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
284 }
285 
288  int is_l2, int is_ip6, generic_header_offset_t * gho)
289 {
290  ip4_header_t *ip4 =
292  ip6_header_t *ip6 =
294  tcp_header_t *tcp =
296 
297  tcp->flags = tcp_flags;
298 
299  if (is_ip6)
300  {
301  ip6->payload_length =
302  clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
303  if (gho->gho_flags & GHO_F_TCP)
304  {
305  int bogus = 0;
306  tcp->checksum = 0;
307  tcp->checksum =
310  VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
311  }
312  }
313  else
314  {
315  ip4->length =
316  clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
317  if (gho->gho_flags & GHO_F_IP4)
318  ip4->checksum = ip4_header_checksum (ip4);
319  if (gho->gho_flags & GHO_F_TCP)
320  {
321  tcp->checksum = 0;
322  tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
323  }
324  vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM |
325  VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
326  }
327 
328  if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
329  {
330  u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
331 
332  ip_adjacency_t *adj0 = adj_get (adj_index0);
333 
335  adj0->sub_type.midchain.fixup_func)
336  /* calls e.g. ipip44_fixup */
337  adj0->sub_type.midchain.fixup_func
338  (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
339  }
340 }
341 
342 /**
343  * Allocate the necessary number of ptd->split_buffers,
344  * and segment the possibly chained buffer(s) from b0 into
345  * there.
346  *
347  * Return the cumulative number of bytes sent or zero
348  * if allocation failed.
349  */
350 
353  u32 sbi0, vlib_buffer_t * sb0,
354  generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
355  int is_ip6)
356 {
357  u32 n_tx_bytes = 0;
358  u16 gso_size = vnet_buffer2 (sb0)->gso_size;
359 
360  u8 save_tcp_flags = 0;
361  u8 tcp_flags_no_fin_psh = 0;
362  u32 next_tcp_seq = 0;
363 
364  tcp_header_t *tcp =
366  next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
367  /* store original flags for last packet and reset FIN and PSH */
368  save_tcp_flags = tcp->flags;
369  tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
370  tcp->checksum = 0;
371 
372  u32 default_bflags =
373  sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
374  u16 l234_sz = gho->hdr_sz;
375  int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
376  next_tcp_seq += first_data_size;
377 
378  if (PREDICT_FALSE
380  (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
381  return 0;
382 
384  tso_init_buf_from_template_base (b0, sb0, default_bflags,
385  l234_sz + first_data_size);
386 
387  u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
388  if (total_src_left)
389  {
390  /* Need to copy more segments */
391  u8 *src_ptr, *dst_ptr;
392  u16 src_left, dst_left;
393  /* current source buffer */
394  vlib_buffer_t *csb0 = sb0;
395  u32 csbi0 = sbi0;
396  /* current dest buffer */
397  vlib_buffer_t *cdb0;
398  u16 dbi = 1; /* the buffer [0] is b0 */
399 
400  src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
401  src_left = sb0->current_length - l234_sz - first_data_size;
402 
403  tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
404  gho);
405 
406  /* grab a second buffer and prepare the loop */
407  ASSERT (dbi < vec_len (ptd->split_buffers));
408  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
409  tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
410  &dst_left, next_tcp_seq, default_bflags,
411  gho);
412 
413  /* an arbitrary large number to catch the runaway loops */
414  int nloops = 2000;
415  while (total_src_left)
416  {
417  if (nloops-- <= 0)
418  clib_panic ("infinite loop detected");
419  u16 bytes_to_copy = clib_min (src_left, dst_left);
420 
421  clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
422 
423  src_left -= bytes_to_copy;
424  src_ptr += bytes_to_copy;
425  total_src_left -= bytes_to_copy;
426  dst_left -= bytes_to_copy;
427  dst_ptr += bytes_to_copy;
428  next_tcp_seq += bytes_to_copy;
429  cdb0->current_length += bytes_to_copy;
430 
431  if (0 == src_left)
432  {
433  int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
434  u32 next_bi = csb0->next_buffer;
435 
436  /* init src to the next buffer in chain */
437  if (has_next)
438  {
439  csbi0 = next_bi;
440  csb0 = vlib_get_buffer (vm, csbi0);
441  src_left = csb0->current_length;
442  src_ptr = vlib_buffer_get_current (csb0);
443  }
444  else
445  {
446  ASSERT (total_src_left == 0);
447  break;
448  }
449  }
450  if (0 == dst_left && total_src_left)
451  {
452  n_tx_bytes += cdb0->current_length;
453  tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
454  is_ip6, gho);
455  ASSERT (dbi < vec_len (ptd->split_buffers));
456  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
457  tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
458  gso_size, &dst_ptr, &dst_left,
459  next_tcp_seq, default_bflags, gho);
460  }
461  }
462 
463  tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
464 
465  n_tx_bytes += cdb0->current_length;
466  }
467  n_tx_bytes += b0->current_length;
468  return n_tx_bytes;
469 }
470 
473  vlib_node_runtime_t * node, u32 * pbi0,
474  u32 sw_if_index, u32 drop_error_code)
475 {
477 
479  cm =
483 
485  /* buffer stride */ 1,
486  /* n_buffers */ 1, GSO_NEXT_DROP, node->node_index,
487  drop_error_code);
488 }
489 
494  vnet_main_t * vnm,
496  int is_l2, int is_ip4, int is_ip6, int do_segmentation)
497 {
498  u32 *to_next;
499  u32 next_index = node->cached_next_index;
501  u32 n_left_from = frame->n_vectors;
502  u32 *from_end = from + n_left_from;
508 
510 
511  while (n_left_from > 0)
512  {
513  u32 n_left_to_next;
514 
515  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
516 
517  if (!do_segmentation)
518  while (from + 8 <= from_end && n_left_to_next >= 4)
519  {
520  u32 bi0, bi1, bi2, bi3;
521  u32 next0, next1, next2, next3;
522  u32 swif0, swif1, swif2, swif3;
523  gso_trace_t *t0, *t1, *t2, *t3;
524  vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
525 
526  /* Prefetch next iteration. */
527  vlib_prefetch_buffer_header (b[4], LOAD);
528  vlib_prefetch_buffer_header (b[5], LOAD);
529  vlib_prefetch_buffer_header (b[6], LOAD);
530  vlib_prefetch_buffer_header (b[7], LOAD);
531 
532  bi0 = from[0];
533  bi1 = from[1];
534  bi2 = from[2];
535  bi3 = from[3];
536  to_next[0] = bi0;
537  to_next[1] = bi1;
538  to_next[2] = bi2;
539  to_next[3] = bi3;
540 
541  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
542  swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
543  swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
544  swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
545 
546  if (PREDICT_FALSE (hi->sw_if_index != swif0))
547  {
548  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
550  0 &&
551  (b[0]->flags & VNET_BUFFER_F_GSO))
552  break;
553  }
554  if (PREDICT_FALSE (hi->sw_if_index != swif1))
555  {
556  hi1 = vnet_get_sup_hw_interface (vnm, swif1);
558  (b[1]->flags & VNET_BUFFER_F_GSO))
559  break;
560  }
561  if (PREDICT_FALSE (hi->sw_if_index != swif2))
562  {
563  hi2 = vnet_get_sup_hw_interface (vnm, swif2);
565  0 &&
566  (b[2]->flags & VNET_BUFFER_F_GSO))
567  break;
568  }
569  if (PREDICT_FALSE (hi->sw_if_index != swif3))
570  {
571  hi3 = vnet_get_sup_hw_interface (vnm, swif3);
573  (b[3]->flags & VNET_BUFFER_F_GSO))
574  break;
575  }
576 
577  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
578  {
579  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
580  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
581  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
582  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
583  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
584  is_ip4, is_ip6);
585  }
586  if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
587  {
588  t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
589  t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
590  t1->gso_size = vnet_buffer2 (b[1])->gso_size;
591  t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
592  vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
593  is_ip4, is_ip6);
594  }
595  if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
596  {
597  t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
598  t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
599  t2->gso_size = vnet_buffer2 (b[2])->gso_size;
600  t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
601  vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
602  is_ip4, is_ip6);
603  }
604  if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
605  {
606  t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
607  t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
608  t3->gso_size = vnet_buffer2 (b[3])->gso_size;
609  t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
610  vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
611  is_ip4, is_ip6);
612  }
613 
614  from += 4;
615  to_next += 4;
616  n_left_to_next -= 4;
617  n_left_from -= 4;
618 
619  next0 = next1 = 0;
620  next2 = next3 = 0;
621  vnet_feature_next (&next0, b[0]);
622  vnet_feature_next (&next1, b[1]);
623  vnet_feature_next (&next2, b[2]);
624  vnet_feature_next (&next3, b[3]);
626  n_left_to_next, bi0, bi1, bi2,
627  bi3, next0, next1, next2, next3);
628  b += 4;
629  }
630 
631  while (from + 1 <= from_end && n_left_to_next > 0)
632  {
633  u32 bi0, swif0;
634  gso_trace_t *t0;
635  vnet_hw_interface_t *hi0;
636  u32 next0 = 0;
637  u32 do_segmentation0 = 0;
638 
639  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
640  if (PREDICT_FALSE (hi->sw_if_index != swif0))
641  {
642  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
643  if ((hi0->caps & VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO) == 0 &&
644  (b[0]->flags & VNET_BUFFER_F_GSO))
645  do_segmentation0 = 1;
646  }
647  else
648  do_segmentation0 = do_segmentation;
649 
650  /* speculatively enqueue b0 to the current next frame */
651  to_next[0] = bi0 = from[0];
652  to_next += 1;
653  n_left_to_next -= 1;
654  from += 1;
655  n_left_from -= 1;
656 
657  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
658  {
659  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
660  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
661  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
662  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
663  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
664  is_ip4, is_ip6);
665  }
666 
667  if (do_segmentation0)
668  {
669  if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
670  {
671  /*
672  * Undo the enqueue of the b0 - it is not going anywhere,
673  * and will be freed either after it's segmented or
674  * when dropped, if there is no buffers to segment into.
675  */
676  to_next -= 1;
677  n_left_to_next += 1;
678  /* undo the counting. */
679  generic_header_offset_t gho = { 0 };
680  u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
681  u32 n_tx_bytes = 0;
682  u32 inner_is_ip6 = is_ip6;
683 
684  vnet_generic_header_offset_parser (b[0], &gho, is_l2,
685  is_ip4, is_ip6);
686 
688  {
689  if (PREDICT_FALSE
690  (gho.gho_flags & (GHO_F_GRE_TUNNEL |
691  GHO_F_GENEVE_TUNNEL)))
692  {
693  /* not supported yet */
695  hi->sw_if_index,
696  GSO_ERROR_UNHANDLED_TYPE);
697  b += 1;
698  continue;
699  }
700 
701  vnet_get_inner_header (b[0], &gho);
702 
703  n_bytes_b0 -= gho.outer_hdr_sz;
704  inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
705  }
706 
707  n_tx_bytes =
708  tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
709  is_l2, inner_is_ip6);
710 
711  if (PREDICT_FALSE (n_tx_bytes == 0))
712  {
714  hi->sw_if_index,
715  GSO_ERROR_NO_BUFFERS);
716  b += 1;
717  continue;
718  }
719 
720 
721  if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
722  {
723  vnet_get_outer_header (b[0], &gho);
724  n_tx_bytes +=
725  tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
726  }
727  else
728  if (PREDICT_FALSE
729  (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
730  GHO_F_IPIP6_TUNNEL)))
731  {
732  vnet_get_outer_header (b[0], &gho);
733  n_tx_bytes +=
734  tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
735  }
736 
737  u16 n_tx_bufs = vec_len (ptd->split_buffers);
738  u32 *from_seg = ptd->split_buffers;
739 
740  while (n_tx_bufs > 0)
741  {
742  u32 sbi0;
743  vlib_buffer_t *sb0;
744  while (n_tx_bufs > 0 && n_left_to_next > 0)
745  {
746  sbi0 = to_next[0] = from_seg[0];
747  sb0 = vlib_get_buffer (vm, sbi0);
748  ASSERT (sb0->current_length > 0);
749  to_next += 1;
750  from_seg += 1;
751  n_left_to_next -= 1;
752  n_tx_bufs -= 1;
753  next0 = 0;
754  vnet_feature_next (&next0, sb0);
756  next_index,
757  to_next,
758  n_left_to_next,
759  sbi0, next0);
760  }
762  n_left_to_next);
763  if (n_tx_bufs > 0)
765  to_next, n_left_to_next);
766  }
767  /* The buffers were enqueued. Reset the length */
768  _vec_len (ptd->split_buffers) = 0;
769  /* Free the now segmented buffer */
770  vlib_buffer_free_one (vm, bi0);
771  b += 1;
772  continue;
773  }
774  }
775 
776  vnet_feature_next (&next0, b[0]);
778  n_left_to_next, bi0, next0);
779  b += 1;
780  }
781  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
782  }
783 
784  return frame->n_vectors;
785 }
786 
789  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
790  int is_ip4, int is_ip6)
791 {
792  vnet_main_t *vnm = vnet_get_main ();
794 
795  if (frame->n_vectors > 0)
796  {
801 
804  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
805  is_l2, is_ip4, is_ip6,
806  /* do_segmentation */ 0);
807  else
808  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
809  is_l2, is_ip4, is_ip6,
810  /* do_segmentation */ 1);
811  }
812  return 0;
813 }
814 
817 {
818  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
819  0 /* ip6 */ );
820 }
821 
824 {
825  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
826  1 /* ip6 */ );
827 }
828 
831 {
832  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
833  0 /* ip6 */ );
834 }
835 
838 {
839  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
840  1 /* ip6 */ );
841 }
842 
843 /* *INDENT-OFF* */
844 
846  .vector_size = sizeof (u32),
847  .format_trace = format_gso_trace,
849  .n_errors = ARRAY_LEN(gso_error_strings),
850  .error_strings = gso_error_strings,
851  .n_next_nodes = GSO_N_NEXT,
852  .next_nodes = {
853  [GSO_NEXT_DROP] = "error-drop",
854  },
855  .name = "gso-l2-ip4",
856 };
857 
859  .vector_size = sizeof (u32),
860  .format_trace = format_gso_trace,
862  .n_errors = ARRAY_LEN(gso_error_strings),
863  .error_strings = gso_error_strings,
864  .n_next_nodes = GSO_N_NEXT,
865  .next_nodes = {
866  [GSO_NEXT_DROP] = "error-drop",
867  },
868  .name = "gso-l2-ip6",
869 };
870 
872  .vector_size = sizeof (u32),
873  .format_trace = format_gso_trace,
875  .n_errors = ARRAY_LEN(gso_error_strings),
876  .error_strings = gso_error_strings,
877  .n_next_nodes = GSO_N_NEXT,
878  .next_nodes = {
879  [GSO_NEXT_DROP] = "error-drop",
880  },
881  .name = "gso-ip4",
882 };
883 
885  .vector_size = sizeof (u32),
886  .format_trace = format_gso_trace,
888  .n_errors = ARRAY_LEN(gso_error_strings),
889  .error_strings = gso_error_strings,
890  .n_next_nodes = GSO_N_NEXT,
891  .next_nodes = {
892  [GSO_NEXT_DROP] = "error-drop",
893  },
894  .name = "gso-ip6",
895 };
896 
898  .arc_name = "l2-output-ip4",
899  .node_name = "gso-l2-ip4",
900  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
901 };
902 
904  .arc_name = "l2-output-ip6",
905  .node_name = "gso-l2-ip6",
906  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
907 };
908 
909 VNET_FEATURE_INIT (gso_ip4_node, static) = {
910  .arc_name = "ip4-output",
911  .node_name = "gso-ip4",
912  .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
913 };
914 
915 VNET_FEATURE_INIT (gso_ip6_node, static) = {
916  .arc_name = "ip6-output",
917  .node_name = "gso-ip6",
918  .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
919 };
920 
921 /*
922  * fd.io coding-style-patch-verification: ON
923  *
924  * Local Variables:
925  * eval: (c-set-style "gnu")
926  * End:
927  */
vlib.h
tso_init_buf_from_template_base
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
Definition: node.c:245
gso_trace_t::flags
u32 flags
Definition: node.c:54
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
im
vnet_interface_main_t * im
Definition: interface_output.c:415
vlib_buffer_free
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:979
tso_segment_ipip_tunnel_fixup
static_always_inline u16 tso_segment_ipip_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:84
udp_header_t::length
u16 length
Definition: udp_packet.h:51
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
tso_alloc_tx_bufs
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size, u16 first_data_size, generic_header_offset_t *gho)
Definition: node.c:199
TCP_FLAG_FIN
#define TCP_FLAG_FIN
Definition: fa_node.h:12
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
format_generic_header_offset
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
Definition: hdr_offset_parser.h:81
generic_header_offset_t::outer_hdr_sz
u16 outer_hdr_sz
Definition: hdr_offset_parser.h:71
vnet_hw_interface_t::caps
vnet_hw_interface_capabilities_t caps
Definition: interface.h:645
ip6_tcp_udp_icmp_compute_checksum
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1096
ip4
vl_api_ip4_address_t ip4
Definition: one.api:376
next_index
nat44_ei_hairpin_src_next_t next_index
Definition: nat44_ei_hairpinning.c:412
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
generic_header_offset_t::l4_hdr_offset
i16 l4_hdr_offset
Definition: hdr_offset_parser.h:74
tcp_header_t
struct _tcp_header tcp_header_t
vlib_get_buffers
vlib_get_buffers(vm, from, b, n_left_from)
VLIB_NODE_TYPE_INTERNAL
@ VLIB_NODE_TYPE_INTERNAL
Definition: node.h:72
GSO_N_ERROR
@ GSO_N_ERROR
Definition: node.c:43
VLIB_FRAME_SIZE
#define VLIB_FRAME_SIZE
Definition: node.h:368
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
gso_trace_t::gho
generic_header_offset_t gho
Definition: node.c:57
vnet_interface_per_thread_data_t
Definition: interface.h:981
vnet_interface_main_t
Definition: interface.h:990
vlib_validate_buffer_enqueue_x4
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
u16
unsigned short u16
Definition: types.h:57
gso_l2_ip4_node
vlib_node_registration_t gso_l2_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip4_node)
Definition: node.c:845
gso_l2_ip6_node
vlib_node_registration_t gso_l2_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip6_node)
Definition: node.c:858
IP_LOOKUP_NEXT_MIDCHAIN
@ IP_LOOKUP_NEXT_MIDCHAIN
This packets follow a mid-chain adjacency.
Definition: adj.h:76
GSO_NEXT_DROP
@ GSO_NEXT_DROP
Definition: node.c:48
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
foreach_gso_error
#define foreach_gso_error
Definition: node.c:28
gso_trace_t
Definition: node.c:52
gso_trace_t::gso_size
u16 gso_size
Definition: node.c:55
generic_header_offset_t::gho_flags
gho_flag_t gho_flags
Definition: hdr_offset_parser.h:77
generic_header_offset_t::outer_l3_hdr_offset
i16 outer_l3_hdr_offset
Definition: hdr_offset_parser.h:68
vnet_interface_main_t::sw_if_counters
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:1023
vlib_buffer_t::trace_handle
u32 trace_handle
Specifies trace buffer handle if VLIB_PACKET_IS_TRACED flag is set.
Definition: buffer.h:172
hi
vl_api_ip4_address_t hi
Definition: arp.api:37
generic_header_offset_t::outer_l4_hdr_offset
i16 outer_l4_hdr_offset
Definition: hdr_offset_parser.h:69
vnet_buffer2
#define vnet_buffer2(b)
Definition: buffer.h:505
vlib_frame_t
Definition: node.h:372
vlib_buffer_length_in_chain
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:433
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
udp_header_t
Definition: udp_packet.h:45
ip4_header_t
Definition: ip4_packet.h:87
ethernet.h
gso_ip4_node
vlib_node_registration_t gso_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_ip4_node)
Definition: node.c:871
vlib_buffer_t::current_data
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
vlib_increment_simple_counter
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:74
tso_segment_buffer
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 sbi0, vlib_buffer_t *sb0, generic_header_offset_t *gho, u32 n_bytes_b0, int is_l2, int is_ip6)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
Definition: node.c:352
gso_trace_t::gso_l4_hdr_sz
u8 gso_l4_hdr_sz
Definition: node.c:56
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
tso_segment_vxlan_tunnel_headers_fixup
static_always_inline void tso_segment_vxlan_tunnel_headers_fixup(vlib_main_t *vm, vlib_buffer_t *b, generic_header_offset_t *gho)
Definition: node.c:127
vlib_buffer_t::error
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
error.h
GHO_F_TUNNEL
#define GHO_F_TUNNEL
Definition: hdr_offset_parser.h:49
VLIB_NODE_FN
#define VLIB_NODE_FN(node)
Definition: node.h:202
feature.h
ip_adjacency_t_::lookup_next_index
ip_lookup_next_t lookup_next_index
Next hop after ip4-lookup.
Definition: adj.h:337
VNET_FEATURE_INIT
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)
vlib_buffer_alloc
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:702
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
vnet_get_main
vnet_main_t * vnet_get_main(void)
Definition: pnat_test_stubs.h:56
vnet_buffer_offload_flags_clear
static_always_inline void vnet_buffer_offload_flags_clear(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
Definition: buffer.h:544
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO
@ VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO
Definition: interface.h:537
vnet_feature_next
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
ARRAY_LEN
#define ARRAY_LEN(x)
Definition: clib.h:70
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
ip4_tcp_udp_compute_checksum
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: pnat_test_stubs.h:59
gso_error_strings
static char * gso_error_strings[]
Definition: node.c:32
static_always_inline
#define static_always_inline
Definition: clib.h:112
VNET_HW_INTERFACE_CAP_SUPPORTS_VXLAN_TNL_GSO
@ VNET_HW_INTERFACE_CAP_SUPPORTS_VXLAN_TNL_GSO
Definition: interface.h:539
udp_packet.h
uword
u64 uword
Definition: types.h:112
vlib_error_drop_buffers
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
Definition: error.c:45
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
vnet_gso_inline
static_always_inline uword vnet_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2, int is_ip4, int is_ip6)
Definition: node.c:788
GSO_N_NEXT
@ GSO_N_NEXT
Definition: node.c:49
gso_ip6_node
vlib_node_registration_t gso_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_ip6_node)
Definition: node.c:884
format_gso_trace
static u8 * format_gso_trace(u8 *s, va_list *args)
Definition: node.c:61
gso.h
cm
vnet_feature_config_main_t * cm
Definition: nat44_ei_hairpinning.c:594
vec_validate
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
Definition: vec.h:523
vnet_gso_node_inline
static_always_inline uword vnet_gso_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int is_l2, int is_ip4, int is_ip6, int do_segmentation)
Definition: node.c:491
vlib_buffer_t::flow_id
u32 flow_id
Generic flow identifier.
Definition: buffer.h:136
ip_adjacency_t_
IP unicast adjacency.
Definition: adj.h:235
clib_min
#define clib_min(x, y)
Definition: clib.h:342
tso_init_buf_from_template
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags, generic_header_offset_t *gho)
Definition: node.c:267
vnet_get_outer_header
static_always_inline void vnet_get_outer_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
Definition: hdr_offset_parser.h:143
vlib_node_registration_t
struct _vlib_node_registration vlib_node_registration_t
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
generic_header_offset_t::l3_hdr_offset
i16 l3_hdr_offset
Definition: hdr_offset_parser.h:73
udp_header_t::checksum
u16 checksum
Definition: udp_packet.h:55
vlib_buffer_t::current_config_index
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:156
is_ip6
bool is_ip6
Definition: ip.api:43
vnet_hw_interface_t
Definition: interface.h:638
vnet_main_t
Definition: vnet.h:76
ip_adjacency_t_::sub_type
union ip_adjacency_t_::@144 sub_type
vlib_validate_buffer_enqueue_x1
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
size
u32 size
Definition: vhost_user.h:125
vnet_interface_per_thread_data_t::split_buffers
u32 * split_buffers
Definition: interface.h:984
VNET_INTERFACE_COUNTER_TX_ERROR
@ VNET_INTERFACE_COUNTER_TX_ERROR
Definition: interface.h:911
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
gso_next_t
gso_next_t
Definition: node.c:46
vlib_put_next_frame
vlib_put_next_frame(vm, node, next_index, 0)
vlib_buffer_get_default_data_size
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
u32
unsigned int u32
Definition: types.h:88
vnet_get_sup_hw_interface
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
Definition: interface_funcs.h:92
vnet_interface_main_t::per_thread_data
vnet_interface_per_thread_data_t * per_thread_data
Definition: interface.h:1040
ip6
vl_api_ip6_address_t ip6
Definition: one.api:424
generic_header_offset_t
Definition: hdr_offset_parser.h:65
tso_segment_vxlan_tunnel_fixup
static_always_inline u16 tso_segment_vxlan_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:176
drop_one_buffer_and_count
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 sw_if_index, u32 drop_error_code)
Definition: node.c:472
generic_header_offset_t::l2_hdr_offset
i16 l2_hdr_offset
Definition: hdr_offset_parser.h:72
ip6_header_t
Definition: ip6_packet.h:294
ip4.h
vnet_get_inner_header
static_always_inline void vnet_get_inner_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
Definition: hdr_offset_parser.h:134
length
char const int length
Definition: cJSON.h:163
vlib_main_t
Definition: main.h:102
vlib_node_t
Definition: node.h:247
vlib_simple_counter_main_t
A collection of simple counters.
Definition: counter.h:57
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
VNET_FEATURES
#define VNET_FEATURES(...)
Definition: feature.h:470
u8
unsigned char u8
Definition: types.h:56
vnet_generic_header_offset_parser
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
Definition: hdr_offset_parser.h:471
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
ip4_header_checksum
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
icmp46_packet.h
i
int i
Definition: flowhash_template.h:376
generic_header_offset_t::hdr_sz
u16 hdr_sz
Definition: hdr_offset_parser.h:76
vlib_buffer_free_one
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:1012
vnet.h
gso_error_t
gso_error_t
Definition: node.c:38
vlib_node_runtime_t
Definition: node.h:454
clib_panic
#define clib_panic(format, args...)
Definition: error.h:72
proto
vl_api_ip_proto_t proto
Definition: acl_types.api:51
from
from
Definition: nat44_ei_hairpinning.c:415
tso_fixup_segmented_buf
static_always_inline void tso_fixup_segmented_buf(vlib_main_t *vm, vlib_buffer_t *b0, u8 tcp_flags, int is_l2, int is_ip6, generic_header_offset_t *gho)
Definition: node.c:287
vlib_buffer_t::total_length_not_including_first_buffer
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
sw_if_index
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:34
vlib_get_next_frame
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
ip6.h
VLIB_TX
@ VLIB_TX
Definition: defs.h:47
hdr_offset_parser.h
adj_get
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:470
n_left_from
n_left_from
Definition: nat44_ei_hairpinning.c:416
vlib_buffer_t::opaque
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
Definition: buffer.h:162
type
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vnet_main_t::interface_main
vnet_interface_main_t interface_main
Definition: vnet.h:81
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
VLIB_REGISTER_NODE
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
ip_adjacency_t_::midchain
struct ip_adjacency_t_::@144::@146 midchain
IP_LOOKUP_NEXT_MIDCHAIN.
flags
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105