FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <vlib/vlib.h>
17 #include <vnet/vnet.h>
18 #include <vppinfra/error.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/feature/feature.h>
21 #include <vnet/gso/gso.h>
23 #include <vnet/ip/icmp46_packet.h>
24 #include <vnet/ip/ip4.h>
25 #include <vnet/ip/ip6.h>
26 #include <vnet/udp/udp_packet.h>
27 
28 typedef struct
29 {
34 } gso_trace_t;
35 
36 static u8 *
37 format_gso_trace (u8 * s, va_list * args)
38 {
39  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
40  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
41  gso_trace_t *t = va_arg (*args, gso_trace_t *);
42 
43  if (t->flags & VNET_BUFFER_F_GSO)
44  {
45  s = format (s, "gso_sz %d gso_l4_hdr_sz %d %U",
47  &t->gho);
48  }
49  else
50  {
51  s =
52  format (s, "non-gso buffer %U", format_generic_header_offset,
53  &t->gho);
54  }
55 
56  return s;
57 }
58 
62  vlib_buffer_t * sb0,
64 {
65  u16 n_tx_bufs = vec_len (ptd->split_buffers);
66  u16 i = 0, n_tx_bytes = 0;
67 
68  while (i < n_tx_bufs)
69  {
70  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
71  vnet_get_outer_header (b0, gho);
74 
75  ip4_header_t *ip4 =
77  gho->outer_l3_hdr_offset);
78  ip6_header_t *ip6 =
80  gho->outer_l3_hdr_offset);
81 
82  if (gho->gho_flags & GHO_F_OUTER_IP4)
83  {
84  ip4->length =
85  clib_host_to_net_u16 (b0->current_length -
86  gho->outer_l3_hdr_offset);
87  ip4->checksum = ip4_header_checksum (ip4);
88  }
89  else if (gho->gho_flags & GHO_F_OUTER_IP6)
90  {
91  ip6->payload_length =
92  clib_host_to_net_u16 (b0->current_length -
93  gho->outer_l4_hdr_offset);
94  }
95 
96  n_tx_bytes += gho->outer_hdr_sz;
97  i++;
98  }
99  return n_tx_bytes;
100 
101 }
102 
106 {
107  u8 proto = 0;
108  ip4_header_t *ip4 = 0;
109  ip6_header_t *ip6 = 0;
110  udp_header_t *udp = 0;
111 
112  ip4 =
114  ip6 =
116  udp =
118 
119  if (gho->gho_flags & GHO_F_OUTER_IP4)
120  {
121  proto = ip4->protocol;
122  ip4->length =
123  clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
124  ip4->checksum = ip4_header_checksum (ip4);
125  }
126  else if (gho->gho_flags & GHO_F_OUTER_IP6)
127  {
128  proto = ip6->protocol;
129  ip6->payload_length =
130  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
131  }
132  if (proto == IP_PROTOCOL_UDP)
133  {
134  int bogus;
135  udp->length =
136  clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
137  udp->checksum = 0;
138  if (gho->gho_flags & GHO_F_OUTER_IP6)
139  {
140  udp->checksum =
141  ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
142  }
143  else if (gho->gho_flags & GHO_F_OUTER_IP4)
144  {
145  udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
146  }
147  b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
148  }
149 }
150 
154  vlib_buffer_t * sb0,
156 {
157  u16 n_tx_bufs = vec_len (ptd->split_buffers);
158  u16 i = 0, n_tx_bytes = 0;
159 
160  while (i < n_tx_bufs)
161  {
162  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
163  vnet_get_outer_header (b0, gho);
166 
168  n_tx_bytes += gho->outer_hdr_sz;
169  i++;
170  }
171  return n_tx_bytes;
172 }
173 
177  vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
178  u16 gso_size, u16 first_data_size,
180 {
181  u16 n_alloc, size;
182  u16 first_packet_length = l234_sz + first_data_size;
183 
184  /*
185  * size is the amount of data per segmented buffer except the 1st
186  * segmented buffer.
187  * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
188  * l234_sz is hdr_sz from l2_hdr_offset.
189  */
190  size =
191  clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
192  - gho->l2_hdr_offset);
193 
194  /*
195  * First segmented buffer length is calculated separately.
196  * As it may contain less data than gso_size (when gso_size is
197  * greater than current_length of 1st buffer from GSO chained
198  * buffers) and/or size calculated above.
199  */
200  u16 n_bufs = 1;
201 
202  /*
203  * Total packet length minus first packet length including l234 header.
204  * rounded-up division
205  */
206  ASSERT (n_bytes_b0 > first_packet_length);
207  n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
208 
209  vec_validate (ptd->split_buffers, n_bufs - 1);
210 
211  n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
212  if (n_alloc < n_bufs)
213  {
214  vlib_buffer_free (vm, ptd->split_buffers, n_alloc);
215  return 0;
216  }
217  return n_alloc;
218 }
219 
222  u32 flags, u16 length)
223 {
224  /* copying objects from cacheline 0 */
225  nb0->current_data = b0->current_data;
226  nb0->current_length = length;
227  nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
228  nb0->flow_id = b0->flow_id;
229  nb0->error = b0->error;
231  clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
232 
233  /* copying objects from cacheline 1 */
234  nb0->trace_handle = b0->trace_handle;
236 
237  /* copying data */
239  vlib_buffer_get_current (b0), length);
240 }
241 
244  vlib_buffer_t * b0, u16 template_data_sz,
245  u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
246  u32 next_tcp_seq, u32 flags,
248 {
249  tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
250 
251  *p_dst_left =
252  clib_min (gso_size,
253  vlib_buffer_get_default_data_size (vm) - (template_data_sz +
254  nb0->current_data));
255  *p_dst_ptr = vlib_buffer_get_current (nb0) + template_data_sz;
256 
257  tcp_header_t *tcp =
259  tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
260 }
261 
264  int is_l2, int is_ip6, generic_header_offset_t * gho)
265 {
266  ip4_header_t *ip4 =
268  ip6_header_t *ip6 =
270  tcp_header_t *tcp =
272 
273  tcp->flags = tcp_flags;
274 
275  if (is_ip6)
276  {
277  ip6->payload_length =
278  clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
279  if (gho->gho_flags & GHO_F_TCP)
280  {
281  int bogus = 0;
282  tcp->checksum = 0;
283  tcp->checksum =
284  ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
285  b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
286  }
287  }
288  else
289  {
290  ip4->length =
291  clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
292  if (gho->gho_flags & GHO_F_IP4)
293  ip4->checksum = ip4_header_checksum (ip4);
294  if (gho->gho_flags & GHO_F_TCP)
295  {
296  tcp->checksum = 0;
297  tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
298  }
299  b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
300  b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
301  }
302 
303  if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
304  {
305  u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
306 
307  ip_adjacency_t *adj0 = adj_get (adj_index0);
308 
310  adj0->sub_type.midchain.fixup_func)
311  /* calls e.g. ipip44_fixup */
312  adj0->sub_type.midchain.fixup_func
313  (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
314  }
315 }
316 
317 /**
318  * Allocate the necessary number of ptd->split_buffers,
319  * and segment the possibly chained buffer(s) from b0 into
320  * there.
321  *
322  * Return the cumulative number of bytes sent or zero
323  * if allocation failed.
324  */
325 
328  u32 sbi0, vlib_buffer_t * sb0,
329  generic_header_offset_t * gho, u32 n_bytes_b0, int is_l2,
330  int is_ip6)
331 {
332  u32 n_tx_bytes = 0;
333  u16 gso_size = vnet_buffer2 (sb0)->gso_size;
334 
335  u8 save_tcp_flags = 0;
336  u8 tcp_flags_no_fin_psh = 0;
337  u32 next_tcp_seq = 0;
338 
339  tcp_header_t *tcp =
341  next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
342  /* store original flags for last packet and reset FIN and PSH */
343  save_tcp_flags = tcp->flags;
344  tcp_flags_no_fin_psh = tcp->flags & ~(TCP_FLAG_FIN | TCP_FLAG_PSH);
345  tcp->checksum = 0;
346 
347  u32 default_bflags =
348  sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
349  u16 l234_sz = gho->hdr_sz;
350  int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
351  next_tcp_seq += first_data_size;
352 
353  if (PREDICT_FALSE
355  (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
356  return 0;
357 
358  vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
359  tso_init_buf_from_template_base (b0, sb0, default_bflags,
360  l234_sz + first_data_size);
361 
362  u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
363  if (total_src_left)
364  {
365  /* Need to copy more segments */
366  u8 *src_ptr, *dst_ptr;
367  u16 src_left, dst_left;
368  /* current source buffer */
369  vlib_buffer_t *csb0 = sb0;
370  u32 csbi0 = sbi0;
371  /* current dest buffer */
372  vlib_buffer_t *cdb0;
373  u16 dbi = 1; /* the buffer [0] is b0 */
374 
375  src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
376  src_left = sb0->current_length - l234_sz - first_data_size;
377 
378  tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_l2, is_ip6,
379  gho);
380 
381  /* grab a second buffer and prepare the loop */
382  ASSERT (dbi < vec_len (ptd->split_buffers));
383  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
384  tso_init_buf_from_template (vm, cdb0, b0, l234_sz, gso_size, &dst_ptr,
385  &dst_left, next_tcp_seq, default_bflags,
386  gho);
387 
388  /* an arbitrary large number to catch the runaway loops */
389  int nloops = 2000;
390  while (total_src_left)
391  {
392  if (nloops-- <= 0)
393  clib_panic ("infinite loop detected");
394  u16 bytes_to_copy = clib_min (src_left, dst_left);
395 
396  clib_memcpy_fast (dst_ptr, src_ptr, bytes_to_copy);
397 
398  src_left -= bytes_to_copy;
399  src_ptr += bytes_to_copy;
400  total_src_left -= bytes_to_copy;
401  dst_left -= bytes_to_copy;
402  dst_ptr += bytes_to_copy;
403  next_tcp_seq += bytes_to_copy;
404  cdb0->current_length += bytes_to_copy;
405 
406  if (0 == src_left)
407  {
408  int has_next = (csb0->flags & VLIB_BUFFER_NEXT_PRESENT);
409  u32 next_bi = csb0->next_buffer;
410 
411  /* init src to the next buffer in chain */
412  if (has_next)
413  {
414  csbi0 = next_bi;
415  csb0 = vlib_get_buffer (vm, csbi0);
416  src_left = csb0->current_length;
417  src_ptr = vlib_buffer_get_current (csb0);
418  }
419  else
420  {
421  ASSERT (total_src_left == 0);
422  break;
423  }
424  }
425  if (0 == dst_left && total_src_left)
426  {
427  n_tx_bytes += cdb0->current_length;
428  tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_l2,
429  is_ip6, gho);
430  ASSERT (dbi < vec_len (ptd->split_buffers));
431  cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
432  tso_init_buf_from_template (vm, cdb0, b0, l234_sz,
433  gso_size, &dst_ptr, &dst_left,
434  next_tcp_seq, default_bflags, gho);
435  }
436  }
437 
438  tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_l2, is_ip6, gho);
439 
440  n_tx_bytes += cdb0->current_length;
441  }
442  n_tx_bytes += b0->current_length;
443  return n_tx_bytes;
444 }
445 
448  vlib_node_runtime_t * node, u32 * pbi0,
449  u32 sw_if_index, u32 drop_error_code)
450 {
451  u32 thread_index = vm->thread_index;
452 
454  cm =
457  vlib_increment_simple_counter (cm, thread_index, sw_if_index, 1);
458 
459  vlib_error_drop_buffers (vm, node, pbi0,
460  /* buffer stride */ 1,
461  /* n_buffers */ 1,
463  node->node_index, drop_error_code);
464 }
465 
470  vnet_main_t * vnm,
472  int is_l2, int is_ip4, int is_ip6, int do_segmentation)
473 {
474  u32 *to_next;
475  u32 next_index = node->cached_next_index;
476  u32 *from = vlib_frame_vector_args (frame);
477  u32 n_left_from = frame->n_vectors;
478  u32 *from_end = from + n_left_from;
479  u32 thread_index = vm->thread_index;
482  vec_elt_at_index (im->per_thread_data, thread_index);
483  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
484 
485  vlib_get_buffers (vm, from, b, n_left_from);
486 
487  while (n_left_from > 0)
488  {
489  u32 n_left_to_next;
490 
491  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
492 
493  if (!do_segmentation)
494  while (from + 8 <= from_end && n_left_to_next >= 4)
495  {
496  u32 bi0, bi1, bi2, bi3;
497  u32 next0, next1, next2, next3;
498  u32 swif0, swif1, swif2, swif3;
499  gso_trace_t *t0, *t1, *t2, *t3;
500  vnet_hw_interface_t *hi0, *hi1, *hi2, *hi3;
501 
502  /* Prefetch next iteration. */
503  vlib_prefetch_buffer_header (b[4], LOAD);
504  vlib_prefetch_buffer_header (b[5], LOAD);
505  vlib_prefetch_buffer_header (b[6], LOAD);
506  vlib_prefetch_buffer_header (b[7], LOAD);
507 
508  bi0 = from[0];
509  bi1 = from[1];
510  bi2 = from[2];
511  bi3 = from[3];
512  to_next[0] = bi0;
513  to_next[1] = bi1;
514  to_next[2] = bi2;
515  to_next[3] = bi3;
516 
517  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
518  swif1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
519  swif2 = vnet_buffer (b[2])->sw_if_index[VLIB_TX];
520  swif3 = vnet_buffer (b[3])->sw_if_index[VLIB_TX];
521 
522  if (PREDICT_FALSE (hi->sw_if_index != swif0))
523  {
524  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
525  if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
526  (b[0]->flags & VNET_BUFFER_F_GSO))
527  break;
528  }
529  if (PREDICT_FALSE (hi->sw_if_index != swif1))
530  {
531  hi1 = vnet_get_sup_hw_interface (vnm, swif1);
533  (b[1]->flags & VNET_BUFFER_F_GSO))
534  break;
535  }
536  if (PREDICT_FALSE (hi->sw_if_index != swif2))
537  {
538  hi2 = vnet_get_sup_hw_interface (vnm, swif2);
539  if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
540  (b[2]->flags & VNET_BUFFER_F_GSO))
541  break;
542  }
543  if (PREDICT_FALSE (hi->sw_if_index != swif3))
544  {
545  hi3 = vnet_get_sup_hw_interface (vnm, swif3);
547  (b[3]->flags & VNET_BUFFER_F_GSO))
548  break;
549  }
550 
551  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
552  {
553  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
554  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
555  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
556  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
557  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
558  is_ip4, is_ip6);
559  }
560  if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
561  {
562  t1 = vlib_add_trace (vm, node, b[1], sizeof (t1[0]));
563  t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
564  t1->gso_size = vnet_buffer2 (b[1])->gso_size;
565  t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
566  vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
567  is_ip4, is_ip6);
568  }
569  if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
570  {
571  t2 = vlib_add_trace (vm, node, b[2], sizeof (t2[0]));
572  t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
573  t2->gso_size = vnet_buffer2 (b[2])->gso_size;
574  t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
575  vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
576  is_ip4, is_ip6);
577  }
578  if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
579  {
580  t3 = vlib_add_trace (vm, node, b[3], sizeof (t3[0]));
581  t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
582  t3->gso_size = vnet_buffer2 (b[3])->gso_size;
583  t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
584  vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
585  is_ip4, is_ip6);
586  }
587 
588  from += 4;
589  to_next += 4;
590  n_left_to_next -= 4;
591  n_left_from -= 4;
592 
593  next0 = next1 = 0;
594  next2 = next3 = 0;
595  vnet_feature_next (&next0, b[0]);
596  vnet_feature_next (&next1, b[1]);
597  vnet_feature_next (&next2, b[2]);
598  vnet_feature_next (&next3, b[3]);
599  vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
600  n_left_to_next, bi0, bi1, bi2,
601  bi3, next0, next1, next2, next3);
602  b += 4;
603  }
604 
605  while (from + 1 <= from_end && n_left_to_next > 0)
606  {
607  u32 bi0, swif0;
608  gso_trace_t *t0;
609  vnet_hw_interface_t *hi0;
610  u32 next0 = 0;
611  u32 do_segmentation0 = 0;
612 
613  swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
614  if (PREDICT_FALSE (hi->sw_if_index != swif0))
615  {
616  hi0 = vnet_get_sup_hw_interface (vnm, swif0);
617  if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
618  (b[0]->flags & VNET_BUFFER_F_GSO))
619  do_segmentation0 = 1;
620  }
621  else
622  do_segmentation0 = do_segmentation;
623 
624  /* speculatively enqueue b0 to the current next frame */
625  to_next[0] = bi0 = from[0];
626  to_next += 1;
627  n_left_to_next -= 1;
628  from += 1;
629  n_left_from -= 1;
630 
631  if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
632  {
633  t0 = vlib_add_trace (vm, node, b[0], sizeof (t0[0]));
634  t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
635  t0->gso_size = vnet_buffer2 (b[0])->gso_size;
636  t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
637  vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
638  is_ip4, is_ip6);
639  }
640 
641  if (do_segmentation0)
642  {
643  if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_GSO))
644  {
645  /*
646  * Undo the enqueue of the b0 - it is not going anywhere,
647  * and will be freed either after it's segmented or
648  * when dropped, if there is no buffers to segment into.
649  */
650  to_next -= 1;
651  n_left_to_next += 1;
652  /* undo the counting. */
653  generic_header_offset_t gho = { 0 };
654  u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
655  u32 n_tx_bytes = 0;
656  u32 inner_is_ip6 = is_ip6;
657 
658  vnet_generic_header_offset_parser (b[0], &gho, is_l2,
659  is_ip4, is_ip6);
660 
662  {
663  if (PREDICT_FALSE
664  (gho.gho_flags & (GHO_F_GRE_TUNNEL |
665  GHO_F_GENEVE_TUNNEL)))
666  {
667  /* not supported yet */
668  drop_one_buffer_and_count (vm, vnm, node, from - 1,
669  hi->sw_if_index,
671  b += 1;
672  continue;
673  }
674 
675  vnet_get_inner_header (b[0], &gho);
676 
677  n_bytes_b0 -= gho.outer_hdr_sz;
678  inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
679  }
680 
681  n_tx_bytes =
682  tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
683  is_l2, inner_is_ip6);
684 
685  if (PREDICT_FALSE (n_tx_bytes == 0))
686  {
687  drop_one_buffer_and_count (vm, vnm, node, from - 1,
688  hi->sw_if_index,
690  b += 1;
691  continue;
692  }
693 
694 
695  if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
696  {
697  vnet_get_outer_header (b[0], &gho);
698  n_tx_bytes +=
699  tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
700  }
701  else
702  if (PREDICT_FALSE
703  (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
704  GHO_F_IPIP6_TUNNEL)))
705  {
706  vnet_get_outer_header (b[0], &gho);
707  n_tx_bytes +=
708  tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
709  }
710 
711  u16 n_tx_bufs = vec_len (ptd->split_buffers);
712  u32 *from_seg = ptd->split_buffers;
713 
714  while (n_tx_bufs > 0)
715  {
716  u32 sbi0;
717  vlib_buffer_t *sb0;
718  while (n_tx_bufs > 0 && n_left_to_next > 0)
719  {
720  sbi0 = to_next[0] = from_seg[0];
721  sb0 = vlib_get_buffer (vm, sbi0);
722  ASSERT (sb0->current_length > 0);
723  to_next += 1;
724  from_seg += 1;
725  n_left_to_next -= 1;
726  n_tx_bufs -= 1;
727  next0 = 0;
728  vnet_feature_next (&next0, sb0);
730  next_index,
731  to_next,
732  n_left_to_next,
733  sbi0, next0);
734  }
735  vlib_put_next_frame (vm, node, next_index,
736  n_left_to_next);
737  if (n_tx_bufs > 0)
738  vlib_get_next_frame (vm, node, next_index,
739  to_next, n_left_to_next);
740  }
741  /* The buffers were enqueued. Reset the length */
742  _vec_len (ptd->split_buffers) = 0;
743  /* Free the now segmented buffer */
744  vlib_buffer_free_one (vm, bi0);
745  b += 1;
746  continue;
747  }
748  }
749 
750  vnet_feature_next (&next0, b[0]);
751  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
752  n_left_to_next, bi0, next0);
753  b += 1;
754  }
755  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
756  }
757 
758  return frame->n_vectors;
759 }
760 
763  vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
764  int is_ip4, int is_ip6)
765 {
766  vnet_main_t *vnm = vnet_get_main ();
768 
769  if (frame->n_vectors > 0)
770  {
771  u32 *from = vlib_frame_vector_args (frame);
772  vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
773  hi = vnet_get_sup_hw_interface (vnm,
775 
777  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
778  is_l2, is_ip4, is_ip6,
779  /* do_segmentation */ 0);
780  else
781  return vnet_gso_node_inline (vm, node, frame, vnm, hi,
782  is_l2, is_ip4, is_ip6,
783  /* do_segmentation */ 1);
784  }
785  return 0;
786 }
787 
790 {
791  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
792  0 /* ip6 */ );
793 }
794 
797 {
798  return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
799  1 /* ip6 */ );
800 }
801 
804 {
805  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
806  0 /* ip6 */ );
807 }
808 
811 {
812  return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
813  1 /* ip6 */ );
814 }
815 
816 /* *INDENT-OFF* */
817 
819  .vector_size = sizeof (u32),
820  .format_trace = format_gso_trace,
822  .n_errors = 0,
823  .n_next_nodes = 0,
824  .name = "gso-l2-ip4",
825 };
826 
828  .vector_size = sizeof (u32),
829  .format_trace = format_gso_trace,
831  .n_errors = 0,
832  .n_next_nodes = 0,
833  .name = "gso-l2-ip6",
834 };
835 
837  .vector_size = sizeof (u32),
838  .format_trace = format_gso_trace,
840  .n_errors = 0,
841  .n_next_nodes = 0,
842  .name = "gso-ip4",
843 };
844 
846  .vector_size = sizeof (u32),
847  .format_trace = format_gso_trace,
849  .n_errors = 0,
850  .n_next_nodes = 0,
851  .name = "gso-ip6",
852 };
853 
855  .arc_name = "l2-output-ip4",
856  .node_name = "gso-l2-ip4",
857  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
858 };
859 
861  .arc_name = "l2-output-ip6",
862  .node_name = "gso-l2-ip6",
863  .runs_before = VNET_FEATURES ("l2-output-feat-arc-end"),
864 };
865 
866 VNET_FEATURE_INIT (gso_ip4_node, static) = {
867  .arc_name = "ip4-output",
868  .node_name = "gso-ip4",
869  .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
870 };
871 
872 VNET_FEATURE_INIT (gso_ip6_node, static) = {
873  .arc_name = "ip6-output",
874  .node_name = "gso-ip6",
875  .runs_before = VNET_FEATURES ("ipsec6-output-feature"),
876 };
877 
878 /*
879  * fd.io coding-style-patch-verification: ON
880  *
881  * Local Variables:
882  * eval: (c-set-style "gnu")
883  * End:
884  */
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 sbi0, vlib_buffer_t *sb0, generic_header_offset_t *gho, u32 n_bytes_b0, int is_l2, int is_ip6)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
Definition: node.c:327
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:507
static_always_inline void vnet_get_outer_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define clib_min(x, y)
Definition: clib.h:319
#define CLIB_UNUSED(x)
Definition: clib.h:86
vnet_interface_per_thread_data_t * per_thread_data
Definition: interface.h:883
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
#define vnet_buffer2(b)
Definition: buffer.h:482
vnet_interface_main_t interface_main
Definition: vnet.h:56
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
IP unicast adjacency.
Definition: adj.h:227
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
union ip_adjacency_t_::@137 sub_type
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
vlib_node_registration_t gso_l2_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip4_node)
Definition: node.c:818
#define VLIB_NODE_FN(node)
Definition: node.h:202
static_always_inline void vnet_get_inner_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
u32 flags
Definition: node.c:30
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
Definition: adj.h:459
#define static_always_inline
Definition: clib.h:106
vl_api_interface_index_t sw_if_index
Definition: gre.api:53
vl_api_ip6_address_t ip6
Definition: one.api:424
static_always_inline u16 tso_segment_vxlan_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:152
vnet_hw_interface_flags_t flags
Definition: interface.h:526
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
struct ip_adjacency_t_::@137::@139 midchain
IP_LOOKUP_NEXT_MIDCHAIN.
unsigned int u32
Definition: types.h:88
A collection of simple counters.
Definition: counter.h:57
#define VLIB_FRAME_SIZE
Definition: node.h:380
bool is_ip6
Definition: ip.api:43
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
vnet_crypto_main_t * cm
Definition: quic_crypto.c:53
static_always_inline void tso_segment_vxlan_tunnel_headers_fixup(vlib_main_t *vm, vlib_buffer_t *b, generic_header_offset_t *gho)
Definition: node.c:104
vl_api_ip_proto_t proto
Definition: acl_types.api:50
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
Definition: error.c:45
unsigned short u16
Definition: types.h:57
u64 size
Definition: vhost_user.h:150
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
u32 trace_handle
Specifies trace buffer handle if VLIB_PACKET_IS_TRACED flag is set.
Definition: buffer.h:163
#define PREDICT_FALSE(x)
Definition: clib.h:118
vl_api_ip4_address_t ip4
Definition: one.api:376
#define TCP_FLAG_FIN
Definition: fa_node.h:12
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:866
u32 node_index
Node index.
Definition: node.h:498
static u8 * format_gso_trace(u8 *s, va_list *args)
Definition: node.c:37
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
vlib_main_t * vm
Definition: in2out_ed.c:1599
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags, generic_header_offset_t *gho)
Definition: node.c:243
u32 flags
Definition: vhost_user.h:248
u16 n_vectors
Definition: node.h:399
vlib_node_registration_t gso_l2_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip6_node)
Definition: node.c:827
u32 flow_id
Generic flow identifier.
Definition: buffer.h:127
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
Definition: node.c:221
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
string name[64]
Definition: ip.api:44
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size, u16 first_data_size, generic_header_offset_t *gho)
Definition: node.c:175
This packets follow a mid-chain adjacency.
Definition: adj.h:76
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
Definition: ip6_forward.c:1095
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:517
#define ASSERT(truth)
u16 gso_size
Definition: node.c:31
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
Definition: ip4_forward.c:1367
u8 gso_l4_hdr_sz
Definition: node.c:32
#define VNET_FEATURES(...)
Definition: feature.h:470
vl_api_ip4_address_t hi
Definition: arp.api:37
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 sw_if_index, u32 drop_error_code)
Definition: node.c:447
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
ip_lookup_next_t lookup_next_index
Next hop after ip4-lookup.
Definition: adj.h:329
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
generic_header_offset_t gho
Definition: node.c:33
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
static_always_inline uword vnet_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2, int is_ip4, int is_ip6)
Definition: node.c:762
static_always_inline void tso_fixup_segmented_buf(vlib_main_t *vm, vlib_buffer_t *b0, u8 tcp_flags, int is_l2, int is_ip6, generic_header_offset_t *gho)
Definition: node.c:263
#define vnet_buffer(b)
Definition: buffer.h:417
static_always_inline uword vnet_gso_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int is_l2, int is_ip4, int is_ip6, int do_segmentation)
Definition: node.c:467
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
vlib_node_registration_t gso_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_ip6_node)
Definition: node.c:845
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
Definition: buffer_funcs.h:970
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define GHO_F_TUNNEL
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
#define clib_panic(format, args...)
Definition: error.h:72
vlib_node_registration_t gso_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_ip4_node)
Definition: node.c:836
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
Definition: buffer.h:153
static_always_inline u16 tso_segment_ipip_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
Definition: node.c:60
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)