FD.io VPP  v21.01.1
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
27 
28 typedef struct
29 {
34 } gso_trace_t;
35 
36 static u8 *
37 format_gso_trace (u8 * s, va_list * args)
38 {
39  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41  gso_trace_t *t = va_arg (*args, gso_trace_t *);
42 
43  if (t->flags & VNET_BUFFER_F_GSO)
44  {
45  s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
47  &t->gho);
48  }
49  else
50  {
51  s =
52  format (s, "non-gso buffer\n%U", format_generic_header_offset,
53  &t->gho);
54  }
55 
56  return s;
57 }
58 
62  vlib_buffer_t * sb0,
64 {
65  u16 n_tx_bufs = vec_len (ptd->split_buffers);
66  u16 i = 0, n_tx_bytes = 0;
67 
68  while (i < n_tx_bufs)
69  {
70  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
71  vnet_get_outer_header (b0, gho);
74 
75  ip4_header_t *ip4 =
77  gho->outer_l3_hdr_offset);
78  ip6_header_t *ip6 =
80  gho->outer_l3_hdr_offset);
81 
82  if (gho->gho_flags & GHO_F_OUTER_IP4)
83  {
84  ip4->length =
85  clib_host_to_net_u16 (b0->current_length -
86  gho->outer_l3_hdr_offset);
87  ip4->checksum = ip4_header_checksum (ip4);
88  }
89  else if (gho->gho_flags & GHO_F_OUTER_IP6)
90  {
91  ip6->payload_length =
92  clib_host_to_net_u16 (b0->current_length -
93  gho->outer_l4_hdr_offset);
94  }
95 
96  n_tx_bytes += gho->outer_hdr_sz;
97  i++;
98  }
99  return n_tx_bytes;
100 }
101 
105 {
106  u8 proto = 0;
107  ip4_header_t *ip4 = 0;
108  ip6_header_t *ip6 = 0;
109  udp_header_t *udp = 0;
110 
111  ip4 =
113  ip6 =
115  udp =
117 
118  if (gho->gho_flags & GHO_F_OUTER_IP4)
119  {
120  proto = ip4->protocol;
121  ip4->length =
122  clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
123  ip4->checksum = ip4_header_checksum (ip4);
124  }
125  else if (gho->gho_flags & GHO_F_OUTER_IP6)
126  {
127  proto = ip6->protocol;
128  ip6->payload_length =
129  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
130  }
131  if (proto == IP_PROTOCOL_UDP)
132  {
133  int bogus;
134  udp->length =
135  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
136  udp->checksum = 0;
137  if (gho->gho_flags & GHO_F_OUTER_IP6)
138  {
139  udp->checksum =
140  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
141  }
142  else if (gho->gho_flags & GHO_F_OUTER_IP4)
143  {
144  udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
145  }
146  b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
147  }
148 }
149 
153  vlib_buffer_t * sb0,
155 {
156  u16 n_tx_bufs = vec_len (ptd->split_buffers);
157  u16 i = 0, n_tx_bytes = 0;
158 
159  while (i < n_tx_bufs)
160  {
161  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
162  vnet_get_outer_header (b0, gho);
165 
167  n_tx_bytes += gho->outer_hdr_sz;
168  i++;
169  }
170  return n_tx_bytes;
171 }
172 
176  vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
177  u16 gso_size, u16 first_data_size,
179 {
180  u16 n_alloc, size;
181  u16 first_packet_length = l234_sz + first_data_size;
182 
183  /*
184  * size is the amount of data per segmented buffer except the 1st
185  * segmented buffer.
186  * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
187  * l234_sz is hdr_sz from l2_hdr_offset.
188  */
189  size =
190  clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
191  - gho->l2_hdr_offset);
192 
193  /*
194  * First segmented buffer length is calculated separately.
195  * As it may contain less data than gso_size (when gso_size is
196  * greater than current_length of 1st buffer from GSO chained
197  * buffers) and/or size calculated above.
198  */
199  u16 n_bufs = 1;
200 
201  /*
202  * Total packet length minus first packet length including l234 header.
203  * rounded-up division
204  */
205  ASSERT (n_bytes_b0 > first_packet_length);
206  n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
207 
208  vec_validate (ptd->split_buffers, n_bufs - 1);
209 
210  n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
211  if (n_alloc < n_bufs)
212  {
213  vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
214  return 0;
215  }
216  return n_alloc;
217 }
218 
221  u32 flags, u16 length)
222 {
223  /* copying objects from cacheline 0 */
224  nb0->current_data = b0->current_data;
225  nb0->current_length = length;
226  nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
227  nb0->flow_id = b0->flow_id;
228  nb0->error = b0->error;
230  clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
231 
232  /* copying objects from cacheline 1 */
233  nb0->trace_handle = b0->trace_handle;
235 
236  /* copying data */
238  vlib_buffer_get_current (b0), length);
239 }
240 
243  vlib_buffer_t * b0, u16 template_data_sz,
244  u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
245  u32 next_tcp_seq, u32 flags,
247 {
248  tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
249 
250  *p_dst_left =
251  clib_min (gso_size,
252  vlib_buffer_get_default_data_size (vm) - (template_data_sz +
253  nb0->current_data));
254  *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
255 
256  tcp_header_t *tcp =
258  tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
259 }
260 
263  int is_l2, int is_ip6, generic_header_offset_t * gho)
264 {
265  ip4_header_t *ip4 =
267  ip6_header_t *ip6 =
269  tcp_header_t *tcp =
271 
272  tcp->flags = tcp_flags;
273 
274  if (is_ip6)
275  {
276  ip6->payload_length =
277  clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
278  if (gho->gho_flags & GHO_F_TCP)
279  {
280  int bogus = 0;
281  tcp->checksum = 0;
282  tcp->checksum =
283  ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
284  b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
285  }
286  }
287  else
288  {
289  ip4->length =
290  clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
291  if (gho->gho_flags & GHO_F_IP4)
292  ip4->checksum = ip4_header_checksum (ip4);
293  if (gho->gho_flags & GHO_F_TCP)
294  {
295  tcp->checksum = 0;
296  tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
297  }
298  b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
299  b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
300  }
301 
302  if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
303  {
304  u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
305 
306  ip_adjacency_t *adj0 = adj_get (adj_index0);
307 
309  adj0->sub_type.midchain.fixup_func)
310  /* calls e.g. ipip44_fixup */
311  adj0->sub_type.midchain.fixup_func
312  (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
313  }
314 }
315 
316 /**
317  * Allocate the necessary number of ptd->split_buffers,
318  * and segment the possibly chained buffer(s) from b0 into
319  * there.
320  *
321  * Return the cumulative number of bytes sent or zero
322  * if allocation failed.
323  */
324 
327  u32 sbi0, vlib_buffer_t * sb0,
328  generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
329  int is_ip6)
330 {
331  u32 n_tx_bytes = 0;
332  u16 gso_size = vnet_buffer2 (sb0)->gso_size;
333 
334  u8 save_tcp_flags = 0;
335  u8 tcp_flags_no_fin_psh = 0;
336  u32 next_tcp_seq = 0;
337 
338  tcp_header_t *tcp =
340  next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
341  /* store original flags for last packet and reset FIN and PSH */
342  save_tcp_flags = tcp->flags;
343  tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
344  tcp->checksum = 0;
345 
346  u32 default_bflags =
347  sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
348  u16 l234_sz = gho->hdr_sz;
349  int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
350  next_tcp_seq += first_data_size;
351 
352  if (PREDICT_FALSE
354  (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
355  return 0;
356 
357  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
358  tso_init_buf_from_template_base (b0, sb0, default_bflags,
359  l234_sz + first_data_size);
360 
361  u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
362  if (total_src_left)
363  {
364  /* Need to copy more segments */
365  u8 *src_ptr, *dst_ptr;
366  u16 src_left, dst_left;
367  /* current source buffer */
368  vlib_buffer_t *csb0 = sb0;
369  u32 csbi0 = sbi0;
370  /* current dest buffer */
371  vlib_buffer_t *cdb0;
372  u16 dbi = 1; /* the buffer [0] is b0 */
373 
374  src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
375  src_left = sb0->current_length - l234_sz - first_data_size;
376 
377  tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
378  gho);
379 
380  /* grab a second buffer and prepare the loop */
381  ASSERT (dbi < vec_len (ptd->split_buffers));
382  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
383  tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
384  &dst_left, next_tcp_seq, default_bflags,
385  gho);
386 
387  /* an arbitrary large number to catch the runaway loops */
388  int nloops = 2000;
389  while (total_src_left)
390  {
391  if (nloops-- <= 0)
392  clib_panic ("infinite loop detected");
393  u16 bytes_to_copy = clib_min (src_left, dst_left);
394 
395  clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
396 
397  src_left -= bytes_to_copy;
398  src_ptr += bytes_to_copy;
399  total_src_left -= bytes_to_copy;
400  dst_left -= bytes_to_copy;
401  dst_ptr += bytes_to_copy;
402  next_tcp_seq += bytes_to_copy;
403  cdb0->current_length += bytes_to_copy;
404 
405  if (0 == src_left)
406  {
407  int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
408  u32 next_bi = csb0->next_buffer;
409 
410  /* init src to the next buffer in chain */
411  if (has_next)
412  {
413  csbi0 = next_bi;
414  csb0 = vlib_get_buffer (vm, csbi0);
415  src_left = csb0->current_length;
416  src_ptr = vlib_buffer_get_current (csb0);
417  }
418  else
419  {
420  ASSERT (total_src_left == 0);
421  break;
422  }
423  }
424  if (0 == dst_left && total_src_left)
425  {
426  n_tx_bytes += cdb0->current_length;
427  tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
428  is_ip6, gho);
429  ASSERT (dbi < vec_len (ptd->split_buffers));
430  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
431  tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
432  gso_size, &dst_ptr, &dst_left,
433  next_tcp_seq, default_bflags, gho);
434  }
435  }
436 
437  tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
438 
439  n_tx_bytes += cdb0->current_length;
440  }
441  n_tx_bytes += b0->current_length;
442  return n_tx_bytes;
443 }
444 
447  vlib_node_runtime_t * node, u32 * pbi0,
448  u32 sw_if_index, u32 drop_error_code)
449 {
450  u32 thread_index = vm->thread_index;
451 
453  cm =
456  vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
457 
458  vlib_error_drop_buffers (vm, node, pbi0,
459  /* buffer stride */ 1,
460  /* n_buffers */ 1,
462  node->node_index, drop_error_code);
463 }
464 
469  vnet_main_t * vnm,
471  int is_l2, int is_ip4, int is_ip6, int do_segmentation)
472 {
473  u32 *to_next;
474  u32 next_index = node->cached_next_index;
475  u32 *from = vlib_frame_vector_args (frame);
476  u32 n_left_from = frame->n_vectors;
477  u32 *from_end = from + n_left_from;
478  u32 thread_index = vm->thread_index;
481  vec_elt_at_index (im->per_thread_data, thread_index);
482  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
483 
484  vlib_get_buffers (vm, from, b, n_left_from);
485 
486  while (n_left_from > 0)
487  {
488  u32 n_left_to_next;
489 
490  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
491 
492  if (!do_segmentation)
493  while (from + 8 <= from_end && n_left_to_next >= 4)
494  {
495  u32 bi0, bi1, bi2, bi3;
496  u32 next0, next1, next2, next3;
497  u32 swif0, swif1, swif2, swif3;
498  gso_trace_t *t0, *t1, *t2, *t3;
499  vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
500 
501  /* Prefetch next iteration. */
502  vlib_prefetch_buffer_header (b[4], LOAD);
503  vlib_prefetch_buffer_header (b[5], LOAD);
504  vlib_prefetch_buffer_header (b[6], LOAD);
505  vlib_prefetch_buffer_header (b[7], LOAD);
506 
507  bi0 = from[0];
508  bi1 = from[1];
509  bi2 = from[2];
510  bi3 = from[3];
511  to_next[0] = bi0;
512  to_next[1] = bi1;
513  to_next[2] = bi2;
514  to_next[3] = bi3;
515 
516  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
517  swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
518  swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
519  swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
520 
521  if (PREDICT_FALSE (hi->sw_if_index != swif0))
522  {
523  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
524  if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
525  (b[0]->flags & VNET_BUFFER_F_GSO))
526  break;
527  }
528  if (PREDICT_FALSE (hi->sw_if_index != swif1))
529  {
530  hi1 = vnet_get_sup_hw_interface (vnm, swif1);
532  (b[1]->flags & VNET_BUFFER_F_GSO))
533  break;
534  }
535  if (PREDICT_FALSE (hi->sw_if_index != swif2))
536  {
537  hi2 = vnet_get_sup_hw_interface (vnm, swif2);
538  if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
539  (b[2]->flags & VNET_BUFFER_F_GSO))
540  break;
541  }
542  if (PREDICT_FALSE (hi->sw_if_index != swif3))
543  {
544  hi3 = vnet_get_sup_hw_interface (vnm, swif3);
546  (b[3]->flags & VNET_BUFFER_F_GSO))
547  break;
548  }
549 
550  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
551  {
552  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
553  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
554  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
555  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
556  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
557  is_ip4, is_ip6);
558  }
559  if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
560  {
561  t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
562  t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
563  t1->gso_size = vnet_buffer2 (b[1])->gso_size;
564  t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
565  vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
566  is_ip4, is_ip6);
567  }
568  if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
569  {
570  t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
571  t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
572  t2->gso_size = vnet_buffer2 (b[2])->gso_size;
573  t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
574  vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
575  is_ip4, is_ip6);
576  }
577  if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
578  {
579  t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
580  t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
581  t3->gso_size = vnet_buffer2 (b[3])->gso_size;
582  t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
583  vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
584  is_ip4, is_ip6);
585  }
586 
587  from += 4;
588  to_next += 4;
589  n_left_to_next -= 4;
590  n_left_from -= 4;
591 
592  next0 = next1 = 0;
593  next2 = next3 = 0;
594  vnet_feature_next (&next0, b[0]);
595  vnet_feature_next (&next1, b[1]);
596  vnet_feature_next (&next2, b[2]);
597  vnet_feature_next (&next3, b[3]);
598  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
599  n_left_to_next, bi0, bi1, bi2,
600  bi3, next0, next1, next2, next3);
601  b += 4;
602  }
603 
604  while (from + 1 <= from_end && n_left_to_next > 0)
605  {
606  u32 bi0, swif0;
607  gso_trace_t *t0;
608  vnet_hw_interface_t *hi0;
609  u32 next0 = 0;
610  u32 do_segmentation0 = 0;
611 
612  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
613  if (PREDICT_FALSE (hi->sw_if_index != swif0))
614  {
615  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
616  if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
617  (b[0]->flags & VNET_BUFFER_F_GSO))
618  do_segmentation0 = 1;
619  }
620  else
621  do_segmentation0 = do_segmentation;
622 
623  /* speculatively enqueue b0 to the current next frame */
624  to_next[0] = bi0 = from[0];
625  to_next += 1;
626  n_left_to_next -= 1;
627  from += 1;
628  n_left_from -= 1;
629 
630  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
631  {
632  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
633  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
634  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
635  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
636  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
637  is_ip4, is_ip6);
638  }
639 
640  if (do_segmentation0)
641  {
642  if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
643  {
644  /*
645  * Undo the enqueue of the b0 - it is not going anywhere,
646  * and will be freed either after it's segmented or
647  * when dropped, if there is no buffers to segment into.
648  */
649  to_next -= 1;
650  n_left_to_next += 1;
651  /* undo the counting. */
652  generic_header_offset_t gho = { 0 };
653  u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
654  u32 n_tx_bytes = 0;
655  u32 inner_is_ip6 = is_ip6;
656 
657  vnet_generic_header_offset_parser (b[0], &gho, is_l2,
658  is_ip4, is_ip6);
659 
661  {
662  if (PREDICT_FALSE
663  (gho.gho_flags & (GHO_F_GRE_TUNNEL |
664  GHO_F_GENEVE_TUNNEL)))
665  {
666  /* not supported yet */
667  drop_one_buffer_and_count (vm, vnm, node, from - 1,
668  hi->sw_if_index,
670  b += 1;
671  continue;
672  }
673 
674  vnet_get_inner_header (b[0], &gho);
675 
676  n_bytes_b0 -= gho.outer_hdr_sz;
677  inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
678  }
679 
680  n_tx_bytes =
681  tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
682  is_l2, inner_is_ip6);
683 
684  if (PREDICT_FALSE (n_tx_bytes == 0))
685  {
686  drop_one_buffer_and_count (vm, vnm, node, from - 1,
687  hi->sw_if_index,
689  b += 1;
690  continue;
691  }
692 
693 
694  if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
695  {
696  vnet_get_outer_header (b[0], &gho);
697  n_tx_bytes +=
698  tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
699  }
700  else
701  if (PREDICT_FALSE
702  (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
703  GHO_F_IPIP6_TUNNEL)))
704  {
705  vnet_get_outer_header (b[0], &gho);
706  n_tx_bytes +=
707  tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
708  }
709 
710  u16 n_tx_bufs = vec_len (ptd->split_buffers);
711  u32 *from_seg = ptd->split_buffers;
712 
713  while (n_tx_bufs > 0)
714  {
715  u32 sbi0;
716  vlib_buffer_t *sb0;
717  while (n_tx_bufs > 0 && n_left_to_next > 0)
718  {
719  sbi0 = to_next[0] = from_seg[0];
720  sb0 = vlib_get_buffer (vm, sbi0);
721  ASSERT (sb0->current_length > 0);
722  to_next += 1;
723  from_seg += 1;
724  n_left_to_next -= 1;
725  n_tx_bufs -= 1;
726  next0 = 0;
727  vnet_feature_next (&next0, sb0);
729  next_index,
730  to_next,
731  n_left_to_next,
732  sbi0, next0);
733  }
734  vlib_put_next_frame (vm, node, next_index,
735  n_left_to_next);
736  if (n_tx_bufs > 0)
737  vlib_get_next_frame (vm, node, next_index,
738  to_next, n_left_to_next);
739  }
740  /* The buffers were enqueued. Reset the length */
741  _vec_len (ptd->split_buffers) = 0;
742  /* Free the now segmented buffer */
743  vlib_buffer_free_one (vm, bi0);
744  b += 1;
745  continue;
746  }
747  }
748 
749  vnet_feature_next (&next0, b[0]);
750  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
751  n_left_to_next, bi0, next0);
752  b += 1;
753  }
754  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
755  }
756 
757  return frame->n_vectors;
758 }
759 
762  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
763  int is_ip4, int is_ip6)
764 {
765  vnet_main_t *vnm = vnet_get_main ();
767 
768  if (frame->n_vectors > 0)
769  {
770  u32 *from = vlib_frame_vector_args (frame);
771  vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
772  hi = vnet_get_sup_hw_interface (vnm,
774 
776  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
777  is_l2, is_ip4, is_ip6,
778  /* do_segmentation */ 0);
779  else
780  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
781  is_l2, is_ip4, is_ip6,
782  /* do_segmentation */ 1);
783  }
784  return 0;
785 }
786 
789 {
790  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
791  0 /* ip6 */ );
792 }
793 
796 {
797  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
798  1 /* ip6 */ );
799 }
800 
803 {
804  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
805  0 /* ip6 */ );
806 }
807 
810 {
811  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
812  1 /* ip6 */ );
813 }
814 
815 /* *INDENT-OFF* */
816 
818  .vector_size = sizeof (u32),
819  .format_trace = format_gso_trace,
821  .n_errors = 0,
822  .n_next_nodes = 0,
823  .name = "gso-l2-ip4",
824 };
825 
827  .vector_size = sizeof (u32),
828  .format_trace = format_gso_trace,
830  .n_errors = 0,
831  .n_next_nodes = 0,
832  .name = "gso-l2-ip6",
833 };
834 
836  .vector_size = sizeof (u32),
837  .format_trace = format_gso_trace,
839  .n_errors = 0,
840  .n_next_nodes = 0,
841  .name = "gso-ip4",
842 };
843 
845  .vector_size = sizeof (u32),
846  .format_trace = format_gso_trace,
848  .n_errors = 0,
849  .n_next_nodes = 0,
850  .name = "gso-ip6",
851 };
852 
854  .arc_name = "l2-output-ip4",
855  .node_name = "gso-l2-ip4",
856  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
857 };
858 
860  .arc_name = "l2-output-ip6",
861  .node_name = "gso-l2-ip6",
862  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
863 };
864 
865 VNET_FEATURE_INIT (gso_ip4_node, static) = {
866  .arc_name = "ip4-output",
867  .node_name = "gso-ip4",
868  .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
869 };
870 
871 VNET_FEATURE_INIT (gso_ip6_node, static) = {
872  .arc_name = "ip6-output",
873  .node_name = "gso-ip6",
874  .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
875 };
876 
877 /*
878  * fd.io coding-style-patch-verification: ON
879  *
880  * Local Variables:
881  * eval: (c-set-style "gnu")
882  * End:
883  */
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 sbi0, vlib_buffer_t *sb0, generic_header_offset_t *gho, u32 n_bytes_b0, int is_l2, int is_ip6)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
Definition: node.c:326
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
static_always_inline void vnet_get_outer_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define clib_min(x, y)
Definition: clib.h:328
#define CLIB_UNUSED(x)
Definition: clib.h:87
vnet_interface_per_thread_data_t * per_thread_data
Definition: interface.h:898
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:481
vnet_interface_main_t interface_main
Definition: vnet.h:65
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
IP unicast adjacency.
Definition: adj.h:235
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
struct ip_adjacency_t_::@161::@163 midchain
IP_LOOKUP_NEXT_MIDCHAIN.
vlib_main_t * vm
Definition: in2out_ed.c:1580
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
vlib_node_registration_t gso_l2_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip4_node)
Definition: node.c:817
#define VLIB_NODE_FN(node)
Definition: node.h:203
static_always_inline void vnet_get_inner_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
u32 flags
Definition: node.c:30
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:467
#define static_always_inline
Definition: clib.h:109
vl_api_ip6_address_t ip6
Definition: one.api:424
static_always_inline u16 tso_segment_vxlan_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:151
description fragment has unexpected format
Definition: map.api:433
vnet_hw_interface_flags_t flags
Definition: interface.h:538
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:207
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
A collection of simple counters.
Definition: counter.h:57
#define VLIB_FRAME_SIZE
Definition: node.h:378
bool is_ip6
Definition: ip.api:43
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
static_always_inline void tso_segment_vxlan_tunnel_headers_fixup(vlib_main_t *vm, vlib_buffer_t *b, generic_header_offset_t *gho)
Definition: node.c:103
vl_api_ip_proto_t proto
Definition: acl_types.api:51
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
Definition: error.c:45
unsigned short u16
Definition: types.h:57
u32 size
Definition: vhost_user.h:106
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
u32 trace_handle
Specifies trace buffer handle if VLIB_PACKET_IS_TRACED flag is set.
Definition: buffer.h:163
#define PREDICT_FALSE(x)
Definition: clib.h:121
vl_api_ip4_address_t ip4
Definition: one.api:376
#define TCP_FLAG_FIN
Definition: fa_node.h:12
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:881
u32 node_index
Node index.
Definition: node.h:488
static u8 * format_gso_trace(u8 *s, va_list *args)
Definition: node.c:37
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags, generic_header_offset_t *gho)
Definition: node.c:242
u16 n_vectors
Definition: node.h:397
vlib_node_registration_t gso_l2_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip6_node)
Definition: node.c:826
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
Definition: node.c:220
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
string name[64]
Definition: ip.api:44
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size, u16 first_data_size, generic_header_offset_t *gho)
Definition: node.c:174
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
This packets follow a mid-chain adjacency.
Definition: adj.h:76
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1099
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:511
#define ASSERT(truth)
u16 gso_size
Definition: node.c:31
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1328
u8 gso_l4_hdr_sz
Definition: node.c:32
char const int length
Definition: cJSON.h:163
#define VNET_FEATURES(...)
Definition: feature.h:470
vl_api_ip4_address_t hi
Definition: arp.api:37
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
union ip_adjacency_t_::@161 sub_type
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 sw_if_index, u32 drop_error_code)
Definition: node.c:446
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
ip_lookup_next_t lookup_next_index
Next hop after ip4-lookup.
Definition: adj.h:337
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
generic_header_offset_t gho
Definition: node.c:33
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
static_always_inline uword vnet_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2, int is_ip4, int is_ip6)
Definition: node.c:761
static_always_inline void tso_fixup_segmented_buf(vlib_main_t *vm, vlib_buffer_t *b0, u8 tcp_flags, int is_l2, int is_ip6, generic_header_offset_t *gho)
Definition: node.c:262
#define vnet_buffer(b)
Definition: buffer.h:417
static_always_inline uword vnet_gso_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int is_l2, int is_ip4, int is_ip6, int do_segmentation)
Definition: node.c:466
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
vlib_node_registration_t gso_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_ip6_node)
Definition: node.c:844
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:970
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define GHO_F_TUNNEL
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
#define clib_panic(format, args...)
Definition: error.h:72
vlib_node_registration_t gso_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_ip4_node)
Definition: node.c:835
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
Definition: buffer.h:153
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:34
static_always_inline u16 tso_segment_ipip_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:60
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)