FD.io VPP  v21.01.1
Vector Packet Processing
ip6_full_reass.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @file
18  * @brief IPv6 Full Reassembly.
19  *
20  * This file contains the source code for IPv6 full reassembly.
21  */
22 
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vppinfra/bihash_48_8.h>
28 
29 #define MSEC_PER_SEC 1000
30 #define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS 100
31 #define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
32 #define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT 1024
33 #define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
34 #define IP6_FULL_REASS_HT_LOAD_FACTOR (0.75)
35 
36 typedef enum
37 {
44 
45 typedef struct
46 {
47  union
48  {
49  struct
50  {
51  ip6_address_t src;
52  ip6_address_t dst;
55  u8 unused[7];
57  };
58  u64 as_u64[6];
59  };
61 
62 typedef union
63 {
64  struct
65  {
68  };
71 
72 typedef union
73 {
74  struct
75  {
78  };
81 
82 
85 {
87  return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
88 }
89 
92 {
94  return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
95  (vnb->ip.reass.fragment_first +
97 }
98 
99 typedef struct
100 {
101  // hash table key
103  // time when last packet was received
105  // internal id of this reassembly
107  // buffer index of first buffer in this reassembly context
109  // last octet of packet, ~0 until fragment without more_fragments arrives
111  // length of data collected so far
113  // trace operation counter
115  // next index - used by custom apps (~0 if not set)
117  // error next index - used by custom apps (~0 if not set)
119  // minimum fragment length for this reassembly - used to estimate MTU
121  // number of fragments for this reassembly
123  // thread owning memory for this context (whose pool contains this ctx)
125  // thread which received fragment with offset 0 and which sends out the
126  // completed reassembly
129 
130 typedef struct
131 {
137 
138 typedef struct
139 {
140  // IPv6 config
144  // maximum number of fragments in one reassembly
146  // maximum number of reassemblies
148 
149  // IPv6 runtime
150  clib_bihash_48_8_t hash;
151 
152  // per-thread data
154 
155  // convenience
157 
158  // node index of ip6-drop node
162 
163  /** Worker handoff */
166 
167  // reference count for enabling/disabling feature - per interface
170 
172 
173 #ifndef CLIB_MARCH_VARIANT
175 #endif /* CLIB_MARCH_VARIANT */
176 
177 typedef enum
178 {
185 
186 typedef enum
187 {
196 
197 typedef struct
198 {
206 
207 typedef struct
208 {
209  ip6_full_reass_trace_operation_e action;
220  ip6_frag_hdr_t ip6_frag_header;
222 
223 static void
226 {
227  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
229  trace->range_first = vnb->ip.reass.range_first;
230  trace->range_last = vnb->ip.reass.range_last;
233  trace->range_bi = bi;
234 }
235 
236 static u8 *
238 {
240  va_arg (*args, ip6_full_reass_range_trace_t *);
241  s =
242  format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
243  trace->range_last, trace->data_offset, trace->data_len,
244  trace->range_bi);
245  return s;
246 }
247 
248 static u8 *
249 format_ip6_full_reass_trace (u8 * s, va_list * args)
250 {
251  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
252  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
253  ip6_full_reass_trace_t *t = va_arg (*args, ip6_full_reass_trace_t *);
254  u32 indent = 0;
255  if (~0 != t->reass_id)
256  {
257  if (t->is_after_handoff)
258  {
259  s =
260  format (s, "%U\n", format_ip6_header, &t->ip6_header,
261  sizeof (t->ip6_header));
262  s =
263  format (s, " %U\n", format_ip6_frag_hdr, &t->ip6_frag_header,
264  sizeof (t->ip6_frag_header));
265  indent = 2;
266  }
267  s =
268  format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
269  t->reass_id, t->op_id);
270  indent = format_get_indent (s);
271  s = format (s, "first bi: %u, data len: %u, ip/fragment[%u, %u]",
274  }
275  switch (t->action)
276  {
277  case RANGE_NEW:
278  s = format (s, "\n%Unew %U", format_white_space, indent,
280  break;
281  case RANGE_OVERLAP:
282  s = format (s, "\n%Uoverlap %U", format_white_space, indent,
284  break;
286  s = format (s, "\n%Uicmp-error - frag_len > 65535 %U",
287  format_white_space, indent,
289  break;
291  s = format (s, "\n%Uicmp-error - frag_len mod 8 != 0 %U",
292  format_white_space, indent,
294  break;
296  s = format (s, "\n%Uicmp-error - reassembly time exceeded",
297  format_white_space, indent);
298  break;
299  case FINALIZE:
300  s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
301  break;
302  case HANDOFF:
303  s =
304  format (s, "handoff from thread #%u to thread #%u", t->thread_id,
305  t->thread_id_to);
306  break;
307  }
308  return s;
309 }
310 
311 static void
314  ip6_full_reass_t * reass, u32 bi,
315  ip6_frag_hdr_t * ip6_frag_header,
316  ip6_full_reass_trace_operation_e action,
317  u32 thread_id_to)
318 {
319  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
321  bool is_after_handoff = false;
324  {
325  // this buffer's trace is gone
326  b->flags &= ~VLIB_BUFFER_IS_TRACED;
327  return;
328  }
330  {
331  is_after_handoff = true;
332  }
333  ip6_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
334  t->is_after_handoff = is_after_handoff;
335  if (t->is_after_handoff)
336  {
338  clib_min (sizeof (t->ip6_header), b->current_length));
339  if (ip6_frag_header)
340  {
341  clib_memcpy (&t->ip6_frag_header, ip6_frag_header,
342  sizeof (t->ip6_frag_header));
343  }
344  else
345  {
346  clib_memset (&t->ip6_frag_header, 0, sizeof (t->ip6_frag_header));
347  }
348  }
349  if (reass)
350  {
351  t->reass_id = reass->id;
352  t->op_id = reass->trace_op_counter;
353  t->trace_range.first_bi = reass->first_bi;
354  t->total_data_len = reass->data_len;
355  ++reass->trace_op_counter;
356  }
357  else
358  {
359  t->reass_id = ~0;
360  }
361  t->action = action;
362  t->thread_id = vm->thread_index;
363  t->thread_id_to = thread_id_to;
365  t->fragment_first = vnb->ip.reass.fragment_first;
366  t->fragment_last = vnb->ip.reass.fragment_last;
367 #if 0
368  static u8 *s = NULL;
369  s = format (s, "%U", format_ip6_full_reass_trace, NULL, NULL, t);
370  printf ("%.*s\n", vec_len (s), s);
371  fflush (stdout);
372  vec_reset_length (s);
373 #endif
374 }
375 
376 always_inline void
378  ip6_full_reass_t * reass)
379 {
380  pool_put (rt->pool, reass);
381  --rt->reass_n;
382 }
383 
384 always_inline void
387  ip6_full_reass_t * reass)
388 {
390  kv.key[0] = reass->key.as_u64[0];
391  kv.key[1] = reass->key.as_u64[1];
392  kv.key[2] = reass->key.as_u64[2];
393  kv.key[3] = reass->key.as_u64[3];
394  kv.key[4] = reass->key.as_u64[4];
395  kv.key[5] = reass->key.as_u64[5];
396  clib_bihash_add_del_48_8 (&rm->hash, &kv, 0);
397  ip6_full_reass_free_ctx (rt, reass);
398 }
399 
400 always_inline void
403 {
404  u32 range_bi = reass->first_bi;
405  vlib_buffer_t *range_b;
406  vnet_buffer_opaque_t *range_vnb;
407  u32 *to_free = NULL;
408  while (~0 != range_bi)
409  {
410  range_b = vlib_get_buffer (vm, range_bi);
411  range_vnb = vnet_buffer (range_b);
412  u32 bi = range_bi;
413  while (~0 != bi)
414  {
415  vec_add1 (to_free, bi);
416  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
417  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
418  {
419  bi = b->next_buffer;
420  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
421  }
422  else
423  {
424  bi = ~0;
425  }
426  }
427  range_bi = range_vnb->ip.reass.next_range_bi;
428  }
429  /* send to next_error_index */
430  if (~0 != reass->error_next_index)
431  {
432  u32 n_left_to_next, *to_next, next_index;
433 
434  next_index = reass->error_next_index;
435  u32 bi = ~0;
436 
437  while (vec_len (to_free) > 0)
438  {
439  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
440 
441  while (vec_len (to_free) > 0 && n_left_to_next > 0)
442  {
443  bi = vec_pop (to_free);
444 
445  if (~0 != bi)
446  {
447  to_next[0] = bi;
448  to_next += 1;
449  n_left_to_next -= 1;
450  }
451  }
452  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
453  }
454  }
455  else
456  {
457  vlib_buffer_free (vm, to_free, vec_len (to_free));
458  }
459  vec_free (to_free);
460 }
461 
462 always_inline void
465  ip6_full_reass_t * reass, u32 * icmp_bi)
466 {
467  if (~0 == reass->first_bi)
468  {
469  return;
470  }
471  if (~0 == reass->next_index) // custom apps don't want icmp
472  {
473  vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
474  if (0 == vnet_buffer (b)->ip.reass.fragment_first)
475  {
476  *icmp_bi = reass->first_bi;
477  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
478  {
479  ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
480  NULL, ICMP_ERROR_RT_EXCEEDED, ~0);
481  }
482  // fragment with offset zero received - send icmp message back
483  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
484  {
485  // separate first buffer from chain and steer it towards icmp node
486  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
487  reass->first_bi = b->next_buffer;
488  }
489  else
490  {
491  reass->first_bi = vnet_buffer (b)->ip.reass.next_range_bi;
492  }
493  icmp6_error_set_vnet_buffer (b, ICMP6_time_exceeded,
494  ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
495  0);
496  }
497  }
498  ip6_full_reass_drop_all (vm, node, rm, reass);
499 }
500 
505  ip6_full_reass_kv_t * kv, u32 * icmp_bi,
506  u8 * do_handoff)
507 {
508  ip6_full_reass_t *reass;
509  f64 now;
510 
511 again:
512 
513  reass = NULL;
514  now = vlib_time_now (vm);
515 
516  if (!clib_bihash_search_48_8 (&rm->hash, &kv->kv, &kv->kv))
517  {
519  {
520  *do_handoff = 1;
521  return NULL;
522  }
523 
524  reass =
527  kv->v.reass_index);
528 
529  if (now > reass->last_heard + rm->timeout)
530  {
531  ip6_full_reass_on_timeout (vm, node, rm, reass, icmp_bi);
532  ip6_full_reass_free (rm, rt, reass);
533  reass = NULL;
534  }
535  }
536 
537  if (reass)
538  {
539  reass->last_heard = now;
540  return reass;
541  }
542 
543  if (rt->reass_n >= rm->max_reass_n)
544  {
545  reass = NULL;
546  return reass;
547  }
548  else
549  {
550  pool_get (rt->pool, reass);
551  clib_memset (reass, 0, sizeof (*reass));
552  reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
553  ++rt->id_counter;
554  reass->first_bi = ~0;
555  reass->last_packet_octet = ~0;
556  reass->data_len = 0;
557  reass->next_index = ~0;
558  reass->error_next_index = ~0;
559  ++rt->reass_n;
560  }
561 
562  reass->key.as_u64[0] = kv->kv.key[0];
563  reass->key.as_u64[1] = kv->kv.key[1];
564  reass->key.as_u64[2] = kv->kv.key[2];
565  reass->key.as_u64[3] = kv->kv.key[3];
566  reass->key.as_u64[4] = kv->kv.key[4];
567  reass->key.as_u64[5] = kv->kv.key[5];
568  kv->v.reass_index = (reass - rt->pool);
570  reass->last_heard = now;
571 
572  int rv = clib_bihash_add_del_48_8 (&rm->hash, &kv->kv, 2);
573  if (rv)
574  {
575  ip6_full_reass_free (rm, rt, reass);
576  reass = NULL;
577  // if other worker created a context already work with the other copy
578  if (-2 == rv)
579  goto again;
580  }
581 
582  return reass;
583 }
584 
585 always_inline ip6_full_reass_rc_t
589  ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
590  u32 * error0, bool is_custom_app)
591 {
592  *bi0 = reass->first_bi;
593  *error0 = IP6_ERROR_NONE;
594  ip6_frag_hdr_t *frag_hdr;
595  vlib_buffer_t *last_b = NULL;
596  u32 sub_chain_bi = reass->first_bi;
597  u32 total_length = 0;
598  u32 buf_cnt = 0;
599  u32 dropped_cnt = 0;
600  u32 *vec_drop_compress = NULL;
601  ip6_full_reass_rc_t rv = IP6_FULL_REASS_RC_OK;
602  do
603  {
604  u32 tmp_bi = sub_chain_bi;
605  vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
606  vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
607  if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
608  !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
609  {
611  goto free_buffers_and_return;
612  }
613 
615  u32 trim_front = vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
616  sizeof (*frag_hdr) + ip6_full_reass_buffer_get_data_offset (tmp);
617  u32 trim_end =
618  vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
619  if (tmp_bi == reass->first_bi)
620  {
621  /* first buffer - keep ip6 header */
622  if (0 != ip6_full_reass_buffer_get_data_offset (tmp))
623  {
625  goto free_buffers_and_return;
626  }
627  trim_front = 0;
628  trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
629  (vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
630  sizeof (*frag_hdr));
631  if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
632  {
634  goto free_buffers_and_return;
635  }
636  }
637  u32 keep_data =
638  vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
639  while (1)
640  {
641  ++buf_cnt;
642  if (trim_front)
643  {
644  if (trim_front > tmp->current_length)
645  {
646  /* drop whole buffer */
647  vec_add1 (vec_drop_compress, tmp_bi);
648  trim_front -= tmp->current_length;
649  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
650  {
652  goto free_buffers_and_return;
653  }
654  tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
655  tmp_bi = tmp->next_buffer;
656  tmp = vlib_get_buffer (vm, tmp_bi);
657  continue;
658  }
659  else
660  {
661  vlib_buffer_advance (tmp, trim_front);
662  trim_front = 0;
663  }
664  }
665  if (keep_data)
666  {
667  if (last_b)
668  {
669  last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
670  last_b->next_buffer = tmp_bi;
671  }
672  last_b = tmp;
673  if (keep_data <= tmp->current_length)
674  {
675  tmp->current_length = keep_data;
676  keep_data = 0;
677  }
678  else
679  {
680  keep_data -= tmp->current_length;
681  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
682  {
684  goto free_buffers_and_return;
685  }
686  }
687  total_length += tmp->current_length;
688  }
689  else
690  {
691  vec_add1 (vec_drop_compress, tmp_bi);
692  if (reass->first_bi == tmp_bi)
693  {
695  goto free_buffers_and_return;
696  }
697  ++dropped_cnt;
698  }
699  if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
700  {
701  tmp_bi = tmp->next_buffer;
702  tmp = vlib_get_buffer (vm, tmp->next_buffer);
703  }
704  else
705  {
706  break;
707  }
708  }
709  sub_chain_bi =
710  vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
711  reass.next_range_bi;
712  }
713  while (~0 != sub_chain_bi);
714 
715  if (!last_b)
716  {
718  goto free_buffers_and_return;
719  }
720  last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
721  vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
722  if (total_length < first_b->current_length)
723  {
725  goto free_buffers_and_return;
726  }
727  total_length -= first_b->current_length;
728  first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
729  first_b->total_length_not_including_first_buffer = total_length;
730  // drop fragment header
731  vnet_buffer_opaque_t *first_b_vnb = vnet_buffer (first_b);
733  u16 ip6_frag_hdr_offset = first_b_vnb->ip.reass.ip6_frag_hdr_offset;
734  ip6_ext_header_t *prev_hdr;
735  frag_hdr =
736  ip6_ext_header_find (vm, first_b, ip, IP_PROTOCOL_IPV6_FRAGMENTATION,
737  &prev_hdr);
738  if (prev_hdr)
739  {
740  prev_hdr->next_hdr = frag_hdr->next_hdr;
741  }
742  else
743  {
744  ip->protocol = frag_hdr->next_hdr;
745  }
746  if (!((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset))
747  {
749  goto free_buffers_and_return;
750  }
751  memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
752  first_b->current_length - ip6_frag_hdr_offset -
753  sizeof (ip6_frag_hdr_t));
754  first_b->current_length -= sizeof (*frag_hdr);
755  ip->payload_length =
756  clib_host_to_net_u16 (total_length + first_b->current_length -
757  sizeof (*ip));
758  if (!vlib_buffer_chain_linearize (vm, first_b))
759  {
761  goto free_buffers_and_return;
762  }
763  first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
764  if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
765  {
766  ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi, NULL,
767  FINALIZE, ~0);
768 #if 0
769  // following code does a hexdump of packet fragments to stdout ...
770  do
771  {
772  u32 bi = reass->first_bi;
773  u8 *s = NULL;
774  while (~0 != bi)
775  {
776  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
777  s = format (s, "%u: %U\n", bi, format_hexdump,
779  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
780  {
781  bi = b->next_buffer;
782  }
783  else
784  {
785  break;
786  }
787  }
788  printf ("%.*s\n", vec_len (s), s);
789  fflush (stdout);
790  vec_free (s);
791  }
792  while (0);
793 #endif
794  }
795  if (!is_custom_app)
796  {
798  }
799  else
800  {
801  *next0 = reass->next_index;
802  }
803  vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
804  ip6_full_reass_free (rm, rt, reass);
805  reass = NULL;
806 free_buffers_and_return:
807  vlib_buffer_free (vm, vec_drop_compress, vec_len (vec_drop_compress));
808  vec_free (vec_drop_compress);
809  return rv;
810 }
811 
812 always_inline void
816  ip6_full_reass_t * reass,
817  u32 prev_range_bi, u32 new_next_bi)
818 {
819 
820  vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
821  vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
822  if (~0 != prev_range_bi)
823  {
824  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
825  vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
826  new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
827  prev_vnb->ip.reass.next_range_bi = new_next_bi;
828  }
829  else
830  {
831  if (~0 != reass->first_bi)
832  {
833  new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
834  }
835  reass->first_bi = new_next_bi;
836  }
837  reass->data_len += ip6_full_reass_buffer_get_data_len (new_next_b);
838 }
839 
840 always_inline ip6_full_reass_rc_t
844  ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
845  u32 * error0, ip6_frag_hdr_t * frag_hdr,
846  bool is_custom_app, u32 * handoff_thread_idx)
847 {
848  int consumed = 0;
849  vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
850  vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
851  if (is_custom_app)
852  {
853  reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
854  reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten
855  }
856 
857  fvnb->ip.reass.ip6_frag_hdr_offset =
858  (u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb);
860  if (fb->current_length < sizeof (*fip) ||
861  fvnb->ip.reass.ip6_frag_hdr_offset == 0 ||
862  fvnb->ip.reass.ip6_frag_hdr_offset >= fb->current_length)
863  {
865  }
866 
867  u32 fragment_first = fvnb->ip.reass.fragment_first =
868  ip6_frag_hdr_offset_bytes (frag_hdr);
869  u32 fragment_length =
870  vlib_buffer_length_in_chain (vm, fb) -
871  (fvnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
872  u32 fragment_last = fvnb->ip.reass.fragment_last =
873  fragment_first + fragment_length - 1;
874  int more_fragments = ip6_frag_hdr_more (frag_hdr);
875  u32 candidate_range_bi = reass->first_bi;
876  u32 prev_range_bi = ~0;
877  fvnb->ip.reass.range_first = fragment_first;
878  fvnb->ip.reass.range_last = fragment_last;
879  fvnb->ip.reass.next_range_bi = ~0;
880  if (!more_fragments)
881  {
882  reass->last_packet_octet = fragment_last;
883  }
884  if (~0 == reass->first_bi)
885  {
886  // starting a new reassembly
887  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
888  *bi0);
889  reass->min_fragment_length = clib_net_to_host_u16 (fip->payload_length);
890  consumed = 1;
891  reass->fragments_n = 1;
892  goto check_if_done_maybe;
893  }
894  reass->min_fragment_length =
895  clib_min (clib_net_to_host_u16 (fip->payload_length),
896  fvnb->ip.reass.estimated_mtu);
897  while (~0 != candidate_range_bi)
898  {
899  vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
900  vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
901  if (fragment_first > candidate_vnb->ip.reass.range_last)
902  {
903  // this fragments starts after candidate range
904  prev_range_bi = candidate_range_bi;
905  candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
906  if (candidate_vnb->ip.reass.range_last < fragment_last &&
907  ~0 == candidate_range_bi)
908  {
909  // special case - this fragment falls beyond all known ranges
910  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
911  prev_range_bi, *bi0);
912  consumed = 1;
913  break;
914  }
915  continue;
916  }
917  if (fragment_last < candidate_vnb->ip.reass.range_first)
918  {
919  // this fragment ends before candidate range without any overlap
920  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
921  prev_range_bi, *bi0);
922  consumed = 1;
923  }
924  else if (fragment_first == candidate_vnb->ip.reass.range_first &&
925  fragment_last == candidate_vnb->ip.reass.range_last)
926  {
927  // duplicate fragment - ignore
928  }
929  else
930  {
931  // overlapping fragment - not allowed by RFC 8200
932  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
933  {
934  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, frag_hdr,
935  RANGE_OVERLAP, ~0);
936  }
937  ip6_full_reass_drop_all (vm, node, rm, reass);
938  ip6_full_reass_free (rm, rt, reass);
940  *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
941  return IP6_FULL_REASS_RC_OK;
942  }
943  break;
944  }
945  ++reass->fragments_n;
946 check_if_done_maybe:
947  if (consumed)
948  {
949  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
950  {
951  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, frag_hdr,
952  RANGE_NEW, ~0);
953  }
954  }
955  if (~0 != reass->last_packet_octet &&
956  reass->data_len == reass->last_packet_octet + 1)
957  {
958  *handoff_thread_idx = reass->sendout_thread_index;
959  int handoff =
961  ip6_full_reass_rc_t rc =
962  ip6_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
963  is_custom_app);
964  if (IP6_FULL_REASS_RC_OK == rc && handoff)
965  {
967  }
968  return rc;
969  }
970  else
971  {
972  if (consumed)
973  {
974  *bi0 = ~0;
975  if (reass->fragments_n > rm->max_reass_len)
976  {
978  }
979  }
980  else
981  {
983  *error0 = IP6_ERROR_REASS_DUPLICATE_FRAGMENT;
984  }
985  }
986  return IP6_FULL_REASS_RC_OK;
987 }
988 
989 always_inline bool
991  vlib_buffer_t * b,
992  ip6_frag_hdr_t * frag_hdr)
993 {
994  ip6_ext_header_t *tmp = (ip6_ext_header_t *) frag_hdr;
995  while (ip6_ext_hdr (tmp->next_hdr))
996  {
997  tmp = ip6_ext_next_header (tmp);
998  }
999  if (IP_PROTOCOL_IP6_NONXT == tmp->next_hdr)
1000  {
1001  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
1002  ICMP6_parameter_problem_first_fragment_has_incomplete_header_chain,
1003  0);
1004  b->error = node->errors[IP6_ERROR_REASS_MISSING_UPPER];
1005 
1006  return false;
1007  }
1008  return true;
1009 }
1010 
1011 always_inline bool
1013  vlib_node_runtime_t * node,
1014  vlib_buffer_t * b,
1015  ip6_frag_hdr_t * frag_hdr)
1016 {
1017  vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1019  int more_fragments = ip6_frag_hdr_more (frag_hdr);
1020  u32 fragment_length =
1021  vlib_buffer_length_in_chain (vm, b) -
1022  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
1023  if (more_fragments && 0 != fragment_length % 8)
1024  {
1025  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
1026  ICMP6_parameter_problem_erroneous_header_field,
1027  (u8 *) & ip->payload_length - (u8 *) ip);
1028  return false;
1029  }
1030  return true;
1031 }
1032 
1033 always_inline bool
1035  vlib_node_runtime_t * node,
1036  vlib_buffer_t * b,
1037  ip6_frag_hdr_t * frag_hdr)
1038 {
1039  vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1040  u32 fragment_first = ip6_frag_hdr_offset_bytes (frag_hdr);
1041  u32 fragment_length =
1042  vlib_buffer_length_in_chain (vm, b) -
1043  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
1044  if (fragment_first + fragment_length > 65535)
1045  {
1047  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
1048  ICMP6_parameter_problem_erroneous_header_field,
1049  (u8 *) & frag_hdr->fragment_offset_and_more
1050  - (u8 *) ip0);
1051  return false;
1052  }
1053  return true;
1054 }
1055 
1058  vlib_node_runtime_t * node,
1059  vlib_frame_t * frame, bool is_feature,
1060  bool is_custom_app)
1061 {
1062  u32 *from = vlib_frame_vector_args (frame);
1063  u32 n_left_from, n_left_to_next, *to_next, next_index;
1066  clib_spinlock_lock (&rt->lock);
1067 
1068  n_left_from = frame->n_vectors;
1069  next_index = node->cached_next_index;
1070  while (n_left_from > 0)
1071  {
1072  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1073 
1074  while (n_left_from > 0 && n_left_to_next > 0)
1075  {
1076  u32 bi0;
1077  vlib_buffer_t *b0;
1079  u32 error0 = IP6_ERROR_NONE;
1080  u32 icmp_bi = ~0;
1081 
1082  bi0 = from[0];
1083  b0 = vlib_get_buffer (vm, bi0);
1084 
1086  ip6_frag_hdr_t *frag_hdr = NULL;
1087  ip6_ext_header_t *prev_hdr;
1088  if (ip6_ext_hdr (ip0->protocol))
1089  {
1090  frag_hdr =
1091  ip6_ext_header_find (vm, b0, ip0,
1092  IP_PROTOCOL_IPV6_FRAGMENTATION,
1093  &prev_hdr);
1094  }
1095  if (!frag_hdr)
1096  {
1097  // this is a regular packet - no fragmentation
1099  goto skip_reass;
1100  }
1101  vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
1102  (u8 *) frag_hdr - (u8 *) ip0;
1103 
1104  if (0 == ip6_frag_hdr_offset (frag_hdr))
1105  {
1106  // first fragment - verify upper-layer is present
1108  (node, b0, frag_hdr))
1109  {
1111  goto skip_reass;
1112  }
1113  }
1115  (vm, node, b0, frag_hdr)
1116  || !ip6_full_reass_verify_packet_size_lt_64k (vm, node, b0,
1117  frag_hdr))
1118  {
1120  goto skip_reass;
1121  }
1123  u8 do_handoff = 0;
1124 
1125  kv.k.as_u64[0] = ip0->src_address.as_u64[0];
1126  kv.k.as_u64[1] = ip0->src_address.as_u64[1];
1127  kv.k.as_u64[2] = ip0->dst_address.as_u64[0];
1128  kv.k.as_u64[3] = ip0->dst_address.as_u64[1];
1129  kv.k.as_u64[4] =
1131  vnet_buffer (b0)->sw_if_index[VLIB_RX])) << 32 |
1132  (u64) frag_hdr->identification;
1133  kv.k.as_u64[5] = ip0->protocol;
1134 
1135  ip6_full_reass_t *reass =
1136  ip6_full_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
1137  &do_handoff);
1138 
1139  if (reass)
1140  {
1141  const u32 fragment_first = ip6_frag_hdr_offset (frag_hdr);
1142  if (0 == fragment_first)
1143  {
1144  reass->sendout_thread_index = vm->thread_index;
1145  }
1146  }
1147  if (PREDICT_FALSE (do_handoff))
1148  {
1150  vnet_buffer (b0)->ip.reass.owner_thread_index =
1152  }
1153  else if (reass)
1154  {
1155  u32 handoff_thread_idx;
1156  switch (ip6_full_reass_update
1157  (vm, node, rm, rt, reass, &bi0, &next0, &error0,
1158  frag_hdr, is_custom_app, &handoff_thread_idx))
1159  {
1160  case IP6_FULL_REASS_RC_OK:
1161  /* nothing to do here */
1162  break;
1165  b0 = vlib_get_buffer (vm, bi0);
1166  vnet_buffer (b0)->ip.reass.owner_thread_index =
1167  handoff_thread_idx;
1168  break;
1171  IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1172  1);
1173  ip6_full_reass_drop_all (vm, node, rm, reass);
1174  ip6_full_reass_free (rm, rt, reass);
1175  goto next_packet;
1176  break;
1179  IP6_ERROR_REASS_NO_BUF, 1);
1180  ip6_full_reass_drop_all (vm, node, rm, reass);
1181  ip6_full_reass_free (rm, rt, reass);
1182  goto next_packet;
1183  break;
1186  IP6_ERROR_REASS_INTERNAL_ERROR,
1187  1);
1188  ip6_full_reass_drop_all (vm, node, rm, reass);
1189  ip6_full_reass_free (rm, rt, reass);
1190  goto next_packet;
1191  break;
1192  }
1193  }
1194  else
1195  {
1196  if (is_feature)
1197  {
1199  }
1200  else
1201  {
1202  vnet_buffer_opaque_t *fvnb = vnet_buffer (b0);
1203  next0 = fvnb->ip.reass.error_next_index;
1204  }
1205  error0 = IP6_ERROR_REASS_LIMIT_REACHED;
1206  }
1207 
1208  if (~0 != bi0)
1209  {
1210  skip_reass:
1211  to_next[0] = bi0;
1212  to_next += 1;
1213  n_left_to_next -= 1;
1214 
1215  /* bi0 might have been updated by reass_finalize, reload */
1216  b0 = vlib_get_buffer (vm, bi0);
1217  if (IP6_ERROR_NONE != error0)
1218  {
1219  b0->error = node->errors[error0];
1220  }
1221 
1222  if (next0 == IP6_FULL_REASSEMBLY_NEXT_HANDOFF)
1223  {
1224  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1225  {
1226  ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
1227  frag_hdr, HANDOFF,
1228  vnet_buffer (b0)->ip.
1229  reass.owner_thread_index);
1230  }
1231  }
1232  else if (is_feature && IP6_ERROR_NONE == error0)
1233  {
1234  vnet_feature_next (&next0, b0);
1235  }
1236  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1237  n_left_to_next, bi0, next0);
1238  }
1239 
1240  if (~0 != icmp_bi)
1241  {
1243  to_next[0] = icmp_bi;
1244  to_next += 1;
1245  n_left_to_next -= 1;
1246  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1247  n_left_to_next, icmp_bi,
1248  next0);
1249  }
1250  next_packet:
1251  from += 1;
1252  n_left_from -= 1;
1253  }
1254 
1255  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1256  }
1257 
1258  clib_spinlock_unlock (&rt->lock);
1259  return frame->n_vectors;
1260 }
1261 
1263 #define _(sym, string) string,
1265 #undef _
1266 };
1267 
1270  vlib_frame_t * frame)
1271 {
1272  return ip6_full_reassembly_inline (vm, node, frame, false /* is_feature */ ,
1273  false /* is_custom_app */ );
1274 }
1275 
1276 /* *INDENT-OFF* */
1278  .name = "ip6-full-reassembly",
1279  .vector_size = sizeof (u32),
1280  .format_trace = format_ip6_full_reass_trace,
1281  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1282  .error_strings = ip6_full_reassembly_error_strings,
1283  .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
1284  .next_nodes =
1285  {
1286  [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1287  [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1288  [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1289  [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reassembly-handoff",
1290  },
1291 };
1292 /* *INDENT-ON* */
1293 
1296  vlib_frame_t * frame)
1297 {
1298  return ip6_full_reassembly_inline (vm, node, frame, true /* is_feature */ ,
1299  false /* is_custom_app */ );
1300 }
1301 
1302 /* *INDENT-OFF* */
1304  .name = "ip6-full-reassembly-feature",
1305  .vector_size = sizeof (u32),
1306  .format_trace = format_ip6_full_reass_trace,
1307  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1308  .error_strings = ip6_full_reassembly_error_strings,
1309  .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
1310  .next_nodes =
1311  {
1312  [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1313  [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1314  [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1315  [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reass-feature-hoff",
1316  },
1317 };
1318 /* *INDENT-ON* */
1319 
1320 /* *INDENT-OFF* */
1321 VNET_FEATURE_INIT (ip6_full_reassembly_feature, static) = {
1322  .arc_name = "ip6-unicast",
1323  .node_name = "ip6-full-reassembly-feature",
1324  .runs_before = VNET_FEATURES ("ip6-lookup",
1325  "ipsec6-input-feature"),
1326  .runs_after = 0,
1327 };
1328 /* *INDENT-ON* */
1329 
1330 #ifndef CLIB_MARCH_VARIANT
1331 static u32
1333 {
1335  u32 nbuckets;
1336  u8 i;
1337 
1338  nbuckets = (u32) (rm->max_reass_n / IP6_FULL_REASS_HT_LOAD_FACTOR);
1339 
1340  for (i = 0; i < 31; i++)
1341  if ((1 << i) >= nbuckets)
1342  break;
1343  nbuckets = 1 << i;
1344 
1345  return nbuckets;
1346 }
1347 #endif /* CLIB_MARCH_VARIANT */
1348 
1349 typedef enum
1350 {
1353 
1354 #ifndef CLIB_MARCH_VARIANT
1355 typedef struct
1356 {
1357  int failure;
1358  clib_bihash_48_8_t *new_hash;
1360 
1361 static int
1363 {
1364  ip6_rehash_cb_ctx *ctx = _ctx;
1365  if (clib_bihash_add_del_48_8 (ctx->new_hash, kv, 1))
1366  {
1367  ctx->failure = 1;
1368  }
1369  return (BIHASH_WALK_CONTINUE);
1370 }
1371 
1372 static void
1373 ip6_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1374  u32 max_reassembly_length,
1375  u32 expire_walk_interval_ms)
1376 {
1377  ip6_full_reass_main.timeout_ms = timeout_ms;
1378  ip6_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1379  ip6_full_reass_main.max_reass_n = max_reassemblies;
1380  ip6_full_reass_main.max_reass_len = max_reassembly_length;
1381  ip6_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1382 }
1383 
1385 ip6_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1386  u32 max_reassembly_length, u32 expire_walk_interval_ms)
1387 {
1388  u32 old_nbuckets = ip6_full_reass_get_nbuckets ();
1389  ip6_full_reass_set_params (timeout_ms, max_reassemblies,
1390  max_reassembly_length, expire_walk_interval_ms);
1391  vlib_process_signal_event (ip6_full_reass_main.vlib_main,
1392  ip6_full_reass_main.ip6_full_reass_expire_node_idx,
1394  u32 new_nbuckets = ip6_full_reass_get_nbuckets ();
1395  if (ip6_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
1396  {
1397  clib_bihash_48_8_t new_hash;
1398  clib_memset (&new_hash, 0, sizeof (new_hash));
1400  ctx.failure = 0;
1401  ctx.new_hash = &new_hash;
1402  clib_bihash_init_48_8 (&new_hash, "ip6-full-reass", new_nbuckets,
1403  new_nbuckets * 1024);
1404  clib_bihash_foreach_key_value_pair_48_8 (&ip6_full_reass_main.hash,
1405  ip6_rehash_cb, &ctx);
1406  if (ctx.failure)
1407  {
1408  clib_bihash_free_48_8 (&new_hash);
1409  return -1;
1410  }
1411  else
1412  {
1413  clib_bihash_free_48_8 (&ip6_full_reass_main.hash);
1414  clib_memcpy_fast (&ip6_full_reass_main.hash, &new_hash,
1415  sizeof (ip6_full_reass_main.hash));
1416  clib_bihash_copied (&ip6_full_reass_main.hash, &new_hash);
1417  }
1418  }
1419  return 0;
1420 }
1421 
1423 ip6_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1424  u32 * max_reassembly_length,
1425  u32 * expire_walk_interval_ms)
1426 {
1427  *timeout_ms = ip6_full_reass_main.timeout_ms;
1428  *max_reassemblies = ip6_full_reass_main.max_reass_n;
1429  *max_reassembly_length = ip6_full_reass_main.max_reass_len;
1430  *expire_walk_interval_ms = ip6_full_reass_main.expire_walk_interval_ms;
1431  return 0;
1432 }
1433 
1434 static clib_error_t *
1436 {
1438  clib_error_t *error = 0;
1439  u32 nbuckets;
1440  vlib_node_t *node;
1441 
1442  rm->vlib_main = vm;
1443 
1446  vec_foreach (rt, rm->per_thread_data)
1447  {
1448  clib_spinlock_init (&rt->lock);
1449  pool_alloc (rt->pool, rm->max_reass_n);
1450  }
1451 
1452  node = vlib_get_node_by_name (vm, (u8 *) "ip6-full-reassembly-expire-walk");
1453  ASSERT (node);
1455 
1460 
1461  nbuckets = ip6_full_reass_get_nbuckets ();
1462  clib_bihash_init_48_8 (&rm->hash, "ip6-full-reass", nbuckets,
1463  nbuckets * 1024);
1464 
1465  node = vlib_get_node_by_name (vm, (u8 *) "ip6-drop");
1466  ASSERT (node);
1467  rm->ip6_drop_idx = node->index;
1468  node = vlib_get_node_by_name (vm, (u8 *) "ip6-icmp-error");
1469  ASSERT (node);
1470  rm->ip6_icmp_error_idx = node->index;
1471 
1472  if ((error = vlib_call_init_function (vm, ip_main_init)))
1473  return error;
1474  ip6_register_protocol (IP_PROTOCOL_IPV6_FRAGMENTATION,
1475  ip6_full_reass_node.index);
1476 
1478  rm->fq_feature_index =
1480 
1481  rm->feature_use_refcount_per_intf = NULL;
1482  return error;
1483 }
1484 
1486 #endif /* CLIB_MARCH_VARIANT */
1487 
1488 static uword
1490  vlib_node_runtime_t * node, vlib_frame_t * f)
1491 {
1493  uword event_type, *event_data = 0;
1494 
1495  while (true)
1496  {
1499  / (f64) MSEC_PER_SEC);
1500  event_type = vlib_process_get_events (vm, &event_data);
1501 
1502  switch (event_type)
1503  {
1504  case ~0: /* no events => timeout */
1505  /* nothing to do here */
1506  break;
1508  break;
1509  default:
1510  clib_warning ("BUG: event type 0x%wx", event_type);
1511  break;
1512  }
1513  f64 now = vlib_time_now (vm);
1514 
1515  ip6_full_reass_t *reass;
1516  int *pool_indexes_to_free = NULL;
1517 
1518  uword thread_index = 0;
1519  int index;
1520  const uword nthreads = vlib_num_workers () + 1;
1521  u32 *vec_icmp_bi = NULL;
1522  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1523  {
1525  &rm->per_thread_data[thread_index];
1526  clib_spinlock_lock (&rt->lock);
1527 
1528  vec_reset_length (pool_indexes_to_free);
1529  /* *INDENT-OFF* */
1530  pool_foreach_index (index, rt->pool) {
1531  reass = pool_elt_at_index (rt->pool, index);
1532  if (now > reass->last_heard + rm->timeout)
1533  {
1534  vec_add1 (pool_indexes_to_free, index);
1535  }
1536  }
1537  /* *INDENT-ON* */
1538  int *i;
1539  /* *INDENT-OFF* */
1540  vec_foreach (i, pool_indexes_to_free)
1541  {
1542  ip6_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1543  u32 icmp_bi = ~0;
1544  ip6_full_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
1545  if (~0 != icmp_bi)
1546  vec_add1 (vec_icmp_bi, icmp_bi);
1547 
1548  ip6_full_reass_free (rm, rt, reass);
1549  }
1550  /* *INDENT-ON* */
1551 
1552  clib_spinlock_unlock (&rt->lock);
1553  }
1554 
1555  while (vec_len (vec_icmp_bi) > 0)
1556  {
1557  vlib_frame_t *f =
1559  u32 *to_next = vlib_frame_vector_args (f);
1560  u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors;
1561  int trace_frame = 0;
1562  while (vec_len (vec_icmp_bi) > 0 && n_left_to_next > 0)
1563  {
1564  u32 bi = vec_pop (vec_icmp_bi);
1565  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1566  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
1567  trace_frame = 1;
1568  b->error = node->errors[IP6_ERROR_REASS_TIMEOUT];
1569  to_next[0] = bi;
1570  ++f->n_vectors;
1571  to_next += 1;
1572  n_left_to_next -= 1;
1573  }
1574  f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
1576  }
1577 
1578  vec_free (pool_indexes_to_free);
1579  vec_free (vec_icmp_bi);
1580  if (event_data)
1581  {
1582  _vec_len (event_data) = 0;
1583  }
1584  }
1585 
1586  return 0;
1587 }
1588 
1589 /* *INDENT-OFF* */
1591  .function = ip6_full_reass_walk_expired,
1592  .format_trace = format_ip6_full_reass_trace,
1593  .type = VLIB_NODE_TYPE_PROCESS,
1594  .name = "ip6-full-reassembly-expire-walk",
1595 
1596  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1597  .error_strings = ip6_full_reassembly_error_strings,
1598 
1599 };
1600 /* *INDENT-ON* */
1601 
1602 static u8 *
1603 format_ip6_full_reass_key (u8 * s, va_list * args)
1604 {
1605  ip6_full_reass_key_t *key = va_arg (*args, ip6_full_reass_key_t *);
1606  s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1608  &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1609  return s;
1610 }
1611 
1612 static u8 *
1613 format_ip6_full_reass (u8 * s, va_list * args)
1614 {
1615  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1616  ip6_full_reass_t *reass = va_arg (*args, ip6_full_reass_t *);
1617 
1618  s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
1619  "last_packet_octet: %u, trace_op_counter: %u\n",
1620  reass->id, format_ip6_full_reass_key, &reass->key,
1621  reass->first_bi, reass->data_len, reass->last_packet_octet,
1622  reass->trace_op_counter);
1623  u32 bi = reass->first_bi;
1624  u32 counter = 0;
1625  while (~0 != bi)
1626  {
1627  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1628  vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1629  s = format (s, " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1630  "fragment[%u, %u]\n",
1631  counter, vnb->ip.reass.range_first,
1632  vnb->ip.reass.range_last, bi,
1635  vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
1636  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1637  {
1638  bi = b->next_buffer;
1639  }
1640  else
1641  {
1642  bi = ~0;
1643  }
1644  }
1645  return s;
1646 }
1647 
1648 static clib_error_t *
1651 {
1653 
1654  vlib_cli_output (vm, "---------------------");
1655  vlib_cli_output (vm, "IP6 reassembly status");
1656  vlib_cli_output (vm, "---------------------");
1657  bool details = false;
1658  if (unformat (input, "details"))
1659  {
1660  details = true;
1661  }
1662 
1663  u32 sum_reass_n = 0;
1664  u64 sum_buffers_n = 0;
1665  ip6_full_reass_t *reass;
1666  uword thread_index;
1667  const uword nthreads = vlib_num_workers () + 1;
1668  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1669  {
1670  ip6_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1671  clib_spinlock_lock (&rt->lock);
1672  if (details)
1673  {
1674  /* *INDENT-OFF* */
1675  pool_foreach (reass, rt->pool) {
1676  vlib_cli_output (vm, "%U", format_ip6_full_reass, vm, reass);
1677  }
1678  /* *INDENT-ON* */
1679  }
1680  sum_reass_n += rt->reass_n;
1681  clib_spinlock_unlock (&rt->lock);
1682  }
1683  vlib_cli_output (vm, "---------------------");
1684  vlib_cli_output (vm, "Current IP6 reassemblies count: %lu\n",
1685  (long unsigned) sum_reass_n);
1686  vlib_cli_output (vm,
1687  "Maximum configured concurrent full IP6 reassemblies per worker-thread: %lu\n",
1688  (long unsigned) rm->max_reass_n);
1689  vlib_cli_output (vm,
1690  "Maximum configured full IP6 reassembly timeout: %lums\n",
1691  (long unsigned) rm->timeout_ms);
1692  vlib_cli_output (vm,
1693  "Maximum configured full IP6 reassembly expire walk interval: %lums\n",
1694  (long unsigned) rm->expire_walk_interval_ms);
1695  vlib_cli_output (vm, "Buffers in use: %lu\n",
1696  (long unsigned) sum_buffers_n);
1697  return 0;
1698 }
1699 
1700 /* *INDENT-OFF* */
1701 VLIB_CLI_COMMAND (show_ip6_full_reassembly_cmd, static) = {
1702  .path = "show ip6-full-reassembly",
1703  .short_help = "show ip6-full-reassembly [details]",
1704  .function = show_ip6_full_reass,
1705 };
1706 /* *INDENT-ON* */
1707 
1708 #ifndef CLIB_MARCH_VARIANT
1711 {
1712  return vnet_feature_enable_disable ("ip6-unicast",
1713  "ip6-full-reassembly-feature",
1714  sw_if_index, enable_disable, 0, 0);
1715 }
1716 #endif /* CLIB_MARCH_VARIANT */
1717 
1718 #define foreach_ip6_full_reassembly_handoff_error \
1719 _(CONGESTION_DROP, "congestion drop")
1720 
1721 
1722 typedef enum
1723 {
1724 #define _(sym,str) IP6_FULL_REASSEMBLY_HANDOFF_ERROR_##sym,
1726 #undef _
1729 
1730 static char *ip6_full_reassembly_handoff_error_strings[] = {
1731 #define _(sym,string) string,
1733 #undef _
1734 };
1735 
1736 typedef struct
1737 {
1740 
1741 static u8 *
1743 {
1744  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1745  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1747  va_arg (*args, ip6_full_reassembly_handoff_trace_t *);
1748 
1749  s =
1750  format (s, "ip6-full-reassembly-handoff: next-worker %d",
1751  t->next_worker_index);
1752 
1753  return s;
1754 }
1755 
1758  vlib_node_runtime_t * node,
1759  vlib_frame_t * frame, bool is_feature)
1760 {
1762 
1763  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1764  u32 n_enq, n_left_from, *from;
1765  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1766  u32 fq_index;
1767 
1768  from = vlib_frame_vector_args (frame);
1769  n_left_from = frame->n_vectors;
1770  vlib_get_buffers (vm, from, bufs, n_left_from);
1771 
1772  b = bufs;
1773  ti = thread_indices;
1774 
1775  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1776 
1777  while (n_left_from > 0)
1778  {
1779  ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
1780 
1781  if (PREDICT_FALSE
1782  ((node->flags & VLIB_NODE_FLAG_TRACE)
1783  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1784  {
1786  vlib_add_trace (vm, node, b[0], sizeof (*t));
1787  t->next_worker_index = ti[0];
1788  }
1789 
1790  n_left_from -= 1;
1791  ti += 1;
1792  b += 1;
1793  }
1794  n_enq =
1795  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1796  frame->n_vectors, 1);
1797 
1798  if (n_enq < frame->n_vectors)
1800  IP6_FULL_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1801  frame->n_vectors - n_enq);
1802  return frame->n_vectors;
1803 }
1804 
1805 VLIB_NODE_FN (ip6_full_reassembly_handoff_node) (vlib_main_t * vm,
1807  vlib_frame_t * frame)
1808 {
1809  return ip6_full_reassembly_handoff_inline (vm, node, frame,
1810  false /* is_feature */ );
1811 }
1812 
1813 /* *INDENT-OFF* */
1814 VLIB_REGISTER_NODE (ip6_full_reassembly_handoff_node) = {
1815  .name = "ip6-full-reassembly-handoff",
1816  .vector_size = sizeof (u32),
1817  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1820 
1821  .n_next_nodes = 1,
1822 
1823  .next_nodes = {
1824  [0] = "error-drop",
1825  },
1826 };
1827 
1828 
1829 VLIB_NODE_FN (ip6_full_reassembly_feature_handoff_node) (vlib_main_t * vm,
1831 {
1832  return ip6_full_reassembly_handoff_inline (vm, node, frame, true /* is_feature */ );
1833 }
1834 
1835 
1836 /* *INDENT-OFF* */
1837 VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node) = {
1838  .name = "ip6-full-reass-feature-hoff",
1839  .vector_size = sizeof (u32),
1840  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1843 
1844  .n_next_nodes = 1,
1845 
1846  .next_nodes = {
1847  [0] = "error-drop",
1848  },
1849 };
1850 /* *INDENT-ON* */
1851 
1852 #ifndef CLIB_MARCH_VARIANT
1853 int
1855 {
1857  vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
1858  if (is_enable)
1859  {
1860  if (!rm->feature_use_refcount_per_intf[sw_if_index])
1861  {
1863  return vnet_feature_enable_disable ("ip6-unicast",
1864  "ip6-full-reassembly-feature",
1865  sw_if_index, 1, 0, 0);
1866  }
1868  }
1869  else
1870  {
1872  if (!rm->feature_use_refcount_per_intf[sw_if_index])
1873  return vnet_feature_enable_disable ("ip6-unicast",
1874  "ip6-full-reassembly-feature",
1875  sw_if_index, 0, 0, 0);
1876  }
1877  return -1;
1878 }
1879 #endif
1880 
1881 /*
1882  * fd.io coding-style-patch-verification: ON
1883  *
1884  * Local Variables:
1885  * eval: (c-set-style "gnu")
1886  * End:
1887  */
#define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:509
vnet_api_error_t ip6_full_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *max_reassembly_length, u32 *expire_walk_interval_ms)
get ip6 reassembly configuration
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
clib_bihash_48_8_t hash
static bool ip6_full_reass_verify_fragment_multiple_8(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
vnet_api_error_t
Definition: api_errno.h:162
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:899
#define clib_min(x, y)
Definition: clib.h:328
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
#define pool_foreach_index(i, v)
Definition: pool.h:569
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
#define CLIB_UNUSED(x)
Definition: clib.h:87
static u32 ip6_full_reass_buffer_get_data_offset(vlib_buffer_t *b)
ip6_full_reass_trace_operation_e action
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:751
void ip6_register_protocol(u32 protocol, u32 node_index)
Definition: ip6_forward.c:1668
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
static u32 vlib_buffer_get_trace_index(vlib_buffer_t *b)
Extract the trace (pool) index from a trace handle.
Definition: buffer.h:392
ip6_full_reass_main_t ip6_full_reass_main
static void ip6_full_reass_insert_range_in_chain(vlib_main_t *vm, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 prev_range_bi, u32 new_next_bi)
struct vnet_buffer_opaque_t::@174::@176 ip
#define pool_foreach(VAR, POOL)
Iterate through pool.
Definition: pool.h:527
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
Definition: pool.h:360
u64 as_u64
Definition: bihash_doc.h:63
static ip6_full_reass_rc_t ip6_full_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, ip6_frag_hdr_t *frag_hdr, bool is_custom_app, u32 *handoff_thread_idx)
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_bihash_48_8_t * new_hash
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 index
Definition: node.h:280
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:334
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1880
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:592
static ip6_full_reass_t * ip6_full_reass_find_or_create(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_kv_t *kv, u32 *icmp_bi, u8 *do_handoff)
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:518
ip6_full_reass_rc_t
static uword ip6_full_reassembly_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_custom_app)
#define MSEC_PER_SEC
static u32 format_get_indent(u8 *s)
Definition: format.h:72
vlib_main_t * vm
Definition: in2out_ed.c:1580
format_function_t format_ip6_frag_hdr
Definition: format.h:96
static void ip6_full_reass_free_ctx(ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define VLIB_NODE_FN(node)
Definition: node.h:203
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:402
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:251
ip6_address_t src_address
Definition: ip6_packet.h:310
ip6_full_reass_next_t
unsigned char u8
Definition: types.h:56
#define vec_pop(V)
Returns last element of a vector and decrements its length.
Definition: vec.h:685
vlib_node_registration_t ip6_full_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_expire_node)
static bool ip6_full_reass_verify_upper_layer_present(vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
#define clib_memcpy(d, s, n)
Definition: string.h:180
vlib_trace_header_t ** trace_buffer_pool
Definition: trace.h:86
static u8 * format_ip6_full_reass_trace(u8 *s, va_list *args)
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:579
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
description fragment has unexpected format
Definition: map.api:433
static char * ip6_full_reassembly_error_strings[]
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:182
const cJSON *const b
Definition: cJSON.h:255
vlib_node_registration_t ip6_full_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node)
unsigned int u32
Definition: types.h:88
static void ip6_full_reass_free(ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define vlib_call_init_function(vm, x)
Definition: init.h:270
#define VLIB_FRAME_SIZE
Definition: node.h:378
static void * ip6_ext_header_find(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6_header, u8 header_type, ip6_ext_header_t **prev_ext_header)
Definition: ip6_packet.h:544
static u32 ip6_full_reass_get_nbuckets()
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:446
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:65
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:675
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
#define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
Definition: cJSON.c:84
static ip6_full_reass_rc_t ip6_full_reass_finalize(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, bool is_custom_app)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
static clib_error_t * show_ip6_full_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
ip6_full_reassembly_handoff_error_t
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1015
static u8 * format_ip6_full_reassembly_handoff_trace(u8 *s, va_list *args)
u16 frame_flags
Definition: node.h:385
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
u8 data_len
Definition: ikev2_types.api:24
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:216
ip6_full_reass_trace_operation_e
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:301
static int ip6_rehash_cb(clib_bihash_kv_48_8_t *kv, void *_ctx)
#define PREDICT_FALSE(x)
Definition: clib.h:121
#define always_inline
Definition: ipsec.h:28
ip6_main_t ip6_main
Definition: ip6_forward.c:2785
static void ip6_full_reass_on_timeout(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 *icmp_bi)
static char * ip6_full_reassembly_handoff_error_strings[]
u32 node_index
Node index.
Definition: node.h:488
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1231
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
clib_error_t * ip_main_init(vlib_main_t *vm)
Definition: ip_init.c:45
VNET_FEATURE_INIT(ip6_full_reassembly_feature, static)
u16 n_vectors
Definition: node.h:397
format_function_t format_ip6_address
Definition: format.h:91
#define IP6_FULL_REASS_HT_LOAD_FACTOR
ip6_full_reass_range_trace_t trace_range
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:380
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:322
#define clib_warning(format, args...)
Definition: error.h:59
ip6_full_reass_per_thread_t * per_thread_data
ip6_frag_hdr_t ip6_frag_header
static u32 vlib_buffer_get_trace_thread(vlib_buffer_t *b)
Extract the thread id from a trace handle.
Definition: buffer.h:380
__clib_export void clib_bihash_copied(void *dst, void *src)
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:352
#define pool_is_free_index(P, I)
Use free bitmap to query whether given index is free.
Definition: pool.h:298
#define ip6_frag_hdr_offset_bytes(hdr)
Definition: ip6_packet.h:672
vlib_main_t * vlib_main
#define ARRAY_LEN(x)
Definition: clib.h:67
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static u8 ip6_ext_hdr(u8 nexthdr)
Definition: ip6_packet.h:493
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
vlib_node_registration_t ip6_full_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node_feature)
clib_bihash_kv_48_8_t kv
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:158
u32 fq_index
Worker handoff.
signed int i32
Definition: types.h:77
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:669
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:511
#define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS
#define ASSERT(truth)
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:696
ip6_full_reass_key_t k
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:252
static u16 ip6_full_reass_buffer_get_data_len(vlib_buffer_t *b)
vlib_trace_main_t trace_main
Definition: main.h:194
format_function_t format_ip6_header
Definition: format.h:95
static uword ip6_full_reassembly_handoff_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define VNET_FEATURES(...)
Definition: feature.h:470
static void ip6_full_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 bi, ip6_frag_hdr_t *ip6_frag_header, ip6_full_reass_trace_operation_e action, u32 thread_id_to)
static u8 * format_ip6_full_reass(u8 *s, va_list *args)
#define vec_elt(v, i)
Get vector value at index i.
typedef key
Definition: ipsec_types.api:86
static void ip6_full_reass_drop_all(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass)
static void ip6_full_reass_trace_details(vlib_main_t *vm, u32 bi, ip6_full_reass_range_trace_t *trace)
u16 payload_length
Definition: ip6_packet.h:301
vl_api_address_t ip
Definition: l2.api:501
vl_api_mac_event_action_t action
Definition: l2.api:181
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
#define foreach_ip6_full_reassembly_handoff_error
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
u32 index
Definition: flow_types.api:221
#define VLIB_FRAME_TRACE
Definition: node.h:435
vnet_api_error_t ip6_full_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip6 reassembly configuration
#define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
#define foreach_ip6_error
Definition: ip6_error.h:43
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
static void ip6_full_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
#define vnet_buffer(b)
Definition: buffer.h:417
static bool ip6_full_reass_verify_packet_size_lt_64k(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
static u32 vlib_num_workers()
Definition: threads.h:377
ip6_full_reass_key_t key
#define vec_foreach(var, vec)
Vector iterator.
vnet_api_error_t ip6_full_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
ip6_full_reass_event_t
u16 flags
Copy of main node flags.
Definition: node.h:501
static u8 * format_ip6_full_reass_key(u8 *s, va_list *args)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
int ip6_full_reass_enable_disable_with_refcnt(u32 sw_if_index, int is_enable)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static clib_error_t * ip6_full_reass_init_function(vlib_main_t *vm)
static uword ip6_full_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
ip6_full_reass_val_t v
u32 * fib_index_by_sw_if_index
Definition: ip6.h:127
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
vl_api_interface_index_t sw_if_index
Definition: wireguard.api:34
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:303
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
ip6_address_t dst_address
Definition: ip6_packet.h:310
static u8 * format_ip6_full_reass_range_trace(u8 *s, va_list *args)
IPv6 Reassembly.