FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
ip6_full_reass.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 /**
17  * @file
18  * @brief IPv6 Full Reassembly.
19  *
20  * This file contains the source code for IPv6 full reassembly.
21  */
22 
23 #include <vppinfra/vec.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ip/ip.h>
26 #include <vppinfra/bihash_48_8.h>
28 
29 #define MSEC_PER_SEC 1000
30 #define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS 100
31 #define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000 // 10 seconds default
32 #define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT 1024
33 #define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
34 #define IP6_FULL_REASS_HT_LOAD_FACTOR (0.75)
35 
36 typedef enum
37 {
44 
45 typedef struct
46 {
47  union
48  {
49  struct
50  {
55  u8 unused[7];
57  };
58  u64 as_u64[6];
59  };
61 
62 typedef union
63 {
64  struct
65  {
68  };
71 
72 typedef union
73 {
74  struct
75  {
78  };
81 
82 
85 {
87  return vnb->ip.reass.range_first - vnb->ip.reass.fragment_first;
88 }
89 
92 {
94  return clib_min (vnb->ip.reass.range_last, vnb->ip.reass.fragment_last) -
95  (vnb->ip.reass.fragment_first +
97 }
98 
99 typedef struct
100 {
101  // hash table key
103  // time when last packet was received
105  // internal id of this reassembly
107  // buffer index of first buffer in this reassembly context
109  // last octet of packet, ~0 until fragment without more_fragments arrives
111  // length of data collected so far
113  // trace operation counter
115  // next index - used by custom apps (~0 if not set)
117  // error next index - used by custom apps (~0 if not set)
119  // minimum fragment length for this reassembly - used to estimate MTU
121  // number of fragments for this reassembly
123  // thread owning memory for this context (whose pool contains this ctx)
125  // thread which received fragment with offset 0 and which sends out the
126  // completed reassembly
129 
130 typedef struct
131 {
137 
138 typedef struct
139 {
140  // IPv6 config
144  // maximum number of fragments in one reassembly
146  // maximum number of reassemblies
148 
149  // IPv6 runtime
150  clib_bihash_48_8_t hash;
151 
152  // per-thread data
154 
155  // convenience
157 
158  // node index of ip6-drop node
162 
163  /** Worker handoff */
166 
168 
170 
171 #ifndef CLIB_MARCH_VARIANT
173 #endif /* CLIB_MARCH_VARIANT */
174 
175 typedef enum
176 {
183 
184 typedef enum
185 {
194 
195 typedef struct
196 {
204 
205 typedef struct
206 {
207  ip6_full_reass_trace_operation_e action;
217 
218 static void
221 {
222  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
224  trace->range_first = vnb->ip.reass.range_first;
225  trace->range_last = vnb->ip.reass.range_last;
228  trace->range_bi = bi;
229 }
230 
231 static u8 *
233 {
235  va_arg (*args, ip6_full_reass_range_trace_t *);
236  s =
237  format (s, "range: [%u, %u], off %d, len %u, bi %u", trace->range_first,
238  trace->range_last, trace->data_offset, trace->data_len,
239  trace->range_bi);
240  return s;
241 }
242 
243 static u8 *
244 format_ip6_full_reass_trace (u8 * s, va_list * args)
245 {
246  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
247  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
248  ip6_full_reass_trace_t *t = va_arg (*args, ip6_full_reass_trace_t *);
249  u32 indent = 0;
250  if (~0 != t->reass_id)
251  {
252  s = format (s, "reass id: %u, op id: %u ", t->reass_id, t->op_id);
253  indent = format_get_indent (s);
254  s = format (s, "first bi: %u, data len: %u, ip/fragment[%u, %u]",
257  }
258  switch (t->action)
259  {
260  case RANGE_NEW:
261  s = format (s, "\n%Unew %U", format_white_space, indent,
263  break;
264  case RANGE_OVERLAP:
265  s = format (s, "\n%Uoverlap %U", format_white_space, indent,
267  break;
269  s = format (s, "\n%Uicmp-error - frag_len > 65535 %U",
270  format_white_space, indent,
272  break;
274  s = format (s, "\n%Uicmp-error - frag_len mod 8 != 0 %U",
275  format_white_space, indent,
277  break;
279  s = format (s, "\n%Uicmp-error - reassembly time exceeded",
280  format_white_space, indent);
281  break;
282  case FINALIZE:
283  s = format (s, "\n%Ufinalize reassembly", format_white_space, indent);
284  break;
285  case HANDOFF:
286  s =
287  format (s, "handoff from thread #%u to thread #%u", t->thread_id,
288  t->thread_id_to);
289  break;
290  }
291  return s;
292 }
293 
294 static void
297  ip6_full_reass_t * reass, u32 bi,
298  ip6_full_reass_trace_operation_e action,
299  u32 thread_id_to)
300 {
301  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
303  ip6_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
304  if (reass)
305  {
306  t->reass_id = reass->id;
307  t->op_id = reass->trace_op_counter;
308  t->trace_range.first_bi = reass->first_bi;
309  t->total_data_len = reass->data_len;
310  ++reass->trace_op_counter;
311  }
312  else
313  {
314  t->reass_id = ~0;
315  }
316  t->action = action;
317  t->thread_id = vm->thread_index;
318  t->thread_id_to = thread_id_to;
320  t->fragment_first = vnb->ip.reass.fragment_first;
321  t->fragment_last = vnb->ip.reass.fragment_last;
322 #if 0
323  static u8 *s = NULL;
324  s = format (s, "%U", format_ip6_full_reass_trace, NULL, NULL, t);
325  printf ("%.*s\n", vec_len (s), s);
326  fflush (stdout);
327  vec_reset_length (s);
328 #endif
329 }
330 
331 always_inline void
333  ip6_full_reass_t * reass)
334 {
335  pool_put (rt->pool, reass);
336  --rt->reass_n;
337 }
338 
339 always_inline void
342  ip6_full_reass_t * reass)
343 {
345  kv.key[0] = reass->key.as_u64[0];
346  kv.key[1] = reass->key.as_u64[1];
347  kv.key[2] = reass->key.as_u64[2];
348  kv.key[3] = reass->key.as_u64[3];
349  kv.key[4] = reass->key.as_u64[4];
350  kv.key[5] = reass->key.as_u64[5];
351  clib_bihash_add_del_48_8 (&rm->hash, &kv, 0);
352  ip6_full_reass_free_ctx (rt, reass);
353 }
354 
355 always_inline void
358 {
359  u32 range_bi = reass->first_bi;
360  vlib_buffer_t *range_b;
361  vnet_buffer_opaque_t *range_vnb;
362  u32 *to_free = NULL;
363  while (~0 != range_bi)
364  {
365  range_b = vlib_get_buffer (vm, range_bi);
366  range_vnb = vnet_buffer (range_b);
367  u32 bi = range_bi;
368  while (~0 != bi)
369  {
370  vec_add1 (to_free, bi);
371  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
372  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
373  {
374  bi = b->next_buffer;
375  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
376  }
377  else
378  {
379  bi = ~0;
380  }
381  }
382  range_bi = range_vnb->ip.reass.next_range_bi;
383  }
384  /* send to next_error_index */
385  if (~0 != reass->error_next_index)
386  {
387  u32 n_left_to_next, *to_next, next_index;
388 
389  next_index = reass->error_next_index;
390  u32 bi = ~0;
391 
392  while (vec_len (to_free) > 0)
393  {
394  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
395 
396  while (vec_len (to_free) > 0 && n_left_to_next > 0)
397  {
398  bi = vec_pop (to_free);
399 
400  if (~0 != bi)
401  {
402  to_next[0] = bi;
403  to_next += 1;
404  n_left_to_next -= 1;
405  }
406  }
407  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
408  }
409  }
410  else
411  {
412  vlib_buffer_free (vm, to_free, vec_len (to_free));
413  }
414  vec_free (to_free);
415 }
416 
417 always_inline void
420  ip6_full_reass_t * reass, u32 * icmp_bi)
421 {
422  if (~0 == reass->first_bi)
423  {
424  return;
425  }
426  if (~0 == reass->next_index) // custom apps don't want icmp
427  {
428  vlib_buffer_t *b = vlib_get_buffer (vm, reass->first_bi);
429  if (0 == vnet_buffer (b)->ip.reass.fragment_first)
430  {
431  *icmp_bi = reass->first_bi;
432  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
433  {
434  ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
436  }
437  // fragment with offset zero received - send icmp message back
438  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
439  {
440  // separate first buffer from chain and steer it towards icmp node
441  b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
442  reass->first_bi = b->next_buffer;
443  }
444  else
445  {
446  reass->first_bi = vnet_buffer (b)->ip.reass.next_range_bi;
447  }
448  icmp6_error_set_vnet_buffer (b, ICMP6_time_exceeded,
449  ICMP6_time_exceeded_fragment_reassembly_time_exceeded,
450  0);
451  }
452  }
453  ip6_full_reass_drop_all (vm, node, rm, reass);
454 }
455 
460  ip6_full_reass_kv_t * kv, u32 * icmp_bi,
461  u8 * do_handoff)
462 {
463  ip6_full_reass_t *reass;
464  f64 now;
465 
466 again:
467 
468  reass = NULL;
469  now = vlib_time_now (vm);
470 
471  if (!clib_bihash_search_48_8
472  (&rm->hash, (clib_bihash_kv_48_8_t *) kv, (clib_bihash_kv_48_8_t *) kv))
473  {
474  if (vm->thread_index != kv->v.memory_owner_thread_index)
475  {
476  *do_handoff = 1;
477  return NULL;
478  }
479 
480  reass =
482  [kv->v.memory_owner_thread_index].pool,
483  kv->v.reass_index);
484 
485  if (now > reass->last_heard + rm->timeout)
486  {
487  ip6_full_reass_on_timeout (vm, node, rm, reass, icmp_bi);
488  ip6_full_reass_free (rm, rt, reass);
489  reass = NULL;
490  }
491  }
492 
493  if (reass)
494  {
495  reass->last_heard = now;
496  return reass;
497  }
498 
499  if (rt->reass_n >= rm->max_reass_n)
500  {
501  reass = NULL;
502  return reass;
503  }
504  else
505  {
506  pool_get (rt->pool, reass);
507  clib_memset (reass, 0, sizeof (*reass));
508  reass->id = ((u64) vm->thread_index * 1000000000) + rt->id_counter;
509  ++rt->id_counter;
510  reass->first_bi = ~0;
511  reass->last_packet_octet = ~0;
512  reass->data_len = 0;
513  reass->next_index = ~0;
514  reass->error_next_index = ~0;
515  ++rt->reass_n;
516  }
517 
518  reass->key.as_u64[0] = ((clib_bihash_kv_48_8_t *) kv)->key[0];
519  reass->key.as_u64[1] = ((clib_bihash_kv_48_8_t *) kv)->key[1];
520  reass->key.as_u64[2] = ((clib_bihash_kv_48_8_t *) kv)->key[2];
521  reass->key.as_u64[3] = ((clib_bihash_kv_48_8_t *) kv)->key[3];
522  reass->key.as_u64[4] = ((clib_bihash_kv_48_8_t *) kv)->key[4];
523  reass->key.as_u64[5] = ((clib_bihash_kv_48_8_t *) kv)->key[5];
524  kv->v.reass_index = (reass - rt->pool);
525  kv->v.memory_owner_thread_index = vm->thread_index;
526  reass->last_heard = now;
527 
528  int rv =
529  clib_bihash_add_del_48_8 (&rm->hash, (clib_bihash_kv_48_8_t *) kv, 2);
530  if (rv)
531  {
532  ip6_full_reass_free (rm, rt, reass);
533  reass = NULL;
534  // if other worker created a context already work with the other copy
535  if (-2 == rv)
536  goto again;
537  }
538 
539  return reass;
540 }
541 
542 always_inline ip6_full_reass_rc_t
546  ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
547  u32 * error0, bool is_custom_app)
548 {
549  *bi0 = reass->first_bi;
550  *error0 = IP6_ERROR_NONE;
551  ip6_frag_hdr_t *frag_hdr;
552  vlib_buffer_t *last_b = NULL;
553  u32 sub_chain_bi = reass->first_bi;
554  u32 total_length = 0;
555  u32 buf_cnt = 0;
556  u32 dropped_cnt = 0;
557  u32 *vec_drop_compress = NULL;
558  ip6_full_reass_rc_t rv = IP6_FULL_REASS_RC_OK;
559  do
560  {
561  u32 tmp_bi = sub_chain_bi;
562  vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
563  vnet_buffer_opaque_t *vnb = vnet_buffer (tmp);
564  if (!(vnb->ip.reass.range_first >= vnb->ip.reass.fragment_first) &&
565  !(vnb->ip.reass.range_last > vnb->ip.reass.fragment_first))
566  {
568  goto free_buffers_and_return;
569  }
570 
571  u32 data_len = ip6_full_reass_buffer_get_data_len (tmp);
572  u32 trim_front = vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
573  sizeof (*frag_hdr) + ip6_full_reass_buffer_get_data_offset (tmp);
574  u32 trim_end =
575  vlib_buffer_length_in_chain (vm, tmp) - trim_front - data_len;
576  if (tmp_bi == reass->first_bi)
577  {
578  /* first buffer - keep ip6 header */
579  if (0 != ip6_full_reass_buffer_get_data_offset (tmp))
580  {
582  goto free_buffers_and_return;
583  }
584  trim_front = 0;
585  trim_end = vlib_buffer_length_in_chain (vm, tmp) - data_len -
586  (vnet_buffer (tmp)->ip.reass.ip6_frag_hdr_offset +
587  sizeof (*frag_hdr));
588  if (!(vlib_buffer_length_in_chain (vm, tmp) - trim_end > 0))
589  {
591  goto free_buffers_and_return;
592  }
593  }
594  u32 keep_data =
595  vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
596  while (1)
597  {
598  ++buf_cnt;
599  if (trim_front)
600  {
601  if (trim_front > tmp->current_length)
602  {
603  /* drop whole buffer */
604  vec_add1 (vec_drop_compress, tmp_bi);
605  trim_front -= tmp->current_length;
606  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
607  {
609  goto free_buffers_and_return;
610  }
611  tmp->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
612  tmp_bi = tmp->next_buffer;
613  tmp = vlib_get_buffer (vm, tmp_bi);
614  continue;
615  }
616  else
617  {
618  vlib_buffer_advance (tmp, trim_front);
619  trim_front = 0;
620  }
621  }
622  if (keep_data)
623  {
624  if (last_b)
625  {
626  last_b->flags |= VLIB_BUFFER_NEXT_PRESENT;
627  last_b->next_buffer = tmp_bi;
628  }
629  last_b = tmp;
630  if (keep_data <= tmp->current_length)
631  {
632  tmp->current_length = keep_data;
633  keep_data = 0;
634  }
635  else
636  {
637  keep_data -= tmp->current_length;
638  if (!(tmp->flags & VLIB_BUFFER_NEXT_PRESENT))
639  {
641  goto free_buffers_and_return;
642  }
643  }
644  total_length += tmp->current_length;
645  }
646  else
647  {
648  vec_add1 (vec_drop_compress, tmp_bi);
649  if (reass->first_bi == tmp_bi)
650  {
652  goto free_buffers_and_return;
653  }
654  ++dropped_cnt;
655  }
656  if (tmp->flags & VLIB_BUFFER_NEXT_PRESENT)
657  {
658  tmp_bi = tmp->next_buffer;
659  tmp = vlib_get_buffer (vm, tmp->next_buffer);
660  }
661  else
662  {
663  break;
664  }
665  }
666  sub_chain_bi =
667  vnet_buffer (vlib_get_buffer (vm, sub_chain_bi))->ip.
668  reass.next_range_bi;
669  }
670  while (~0 != sub_chain_bi);
671 
672  if (!last_b)
673  {
675  goto free_buffers_and_return;
676  }
677  last_b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
678  vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
679  if (total_length < first_b->current_length)
680  {
682  goto free_buffers_and_return;
683  }
684  total_length -= first_b->current_length;
685  first_b->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
686  first_b->total_length_not_including_first_buffer = total_length;
687  // drop fragment header
688  vnet_buffer_opaque_t *first_b_vnb = vnet_buffer (first_b);
690  u16 ip6_frag_hdr_offset = first_b_vnb->ip.reass.ip6_frag_hdr_offset;
691  ip6_ext_header_t *prev_hdr;
692  frag_hdr =
693  ip6_ext_header_find (vm, first_b, ip, IP_PROTOCOL_IPV6_FRAGMENTATION,
694  &prev_hdr);
695  if (prev_hdr)
696  {
697  prev_hdr->next_hdr = frag_hdr->next_hdr;
698  }
699  else
700  {
701  ip->protocol = frag_hdr->next_hdr;
702  }
703  if (!((u8 *) frag_hdr - (u8 *) ip == ip6_frag_hdr_offset))
704  {
706  goto free_buffers_and_return;
707  }
708  memmove (frag_hdr, (u8 *) frag_hdr + sizeof (*frag_hdr),
709  first_b->current_length - ip6_frag_hdr_offset -
710  sizeof (ip6_frag_hdr_t));
711  first_b->current_length -= sizeof (*frag_hdr);
712  ip->payload_length =
713  clib_host_to_net_u16 (total_length + first_b->current_length -
714  sizeof (*ip));
715  if (!vlib_buffer_chain_linearize (vm, first_b))
716  {
718  goto free_buffers_and_return;
719  }
720  first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
721  if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
722  {
723  ip6_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
724  FINALIZE, ~0);
725 #if 0
726  // following code does a hexdump of packet fragments to stdout ...
727  do
728  {
729  u32 bi = reass->first_bi;
730  u8 *s = NULL;
731  while (~0 != bi)
732  {
733  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
734  s = format (s, "%u: %U\n", bi, format_hexdump,
736  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
737  {
738  bi = b->next_buffer;
739  }
740  else
741  {
742  break;
743  }
744  }
745  printf ("%.*s\n", vec_len (s), s);
746  fflush (stdout);
747  vec_free (s);
748  }
749  while (0);
750 #endif
751  }
752  if (!is_custom_app)
753  {
755  }
756  else
757  {
758  *next0 = reass->next_index;
759  }
760  vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
761  ip6_full_reass_free (rm, rt, reass);
762  reass = NULL;
763 free_buffers_and_return:
764  vlib_buffer_free (vm, vec_drop_compress, vec_len (vec_drop_compress));
765  vec_free (vec_drop_compress);
766  return rv;
767 }
768 
769 always_inline void
773  ip6_full_reass_t * reass,
774  u32 prev_range_bi, u32 new_next_bi)
775 {
776 
777  vlib_buffer_t *new_next_b = vlib_get_buffer (vm, new_next_bi);
778  vnet_buffer_opaque_t *new_next_vnb = vnet_buffer (new_next_b);
779  if (~0 != prev_range_bi)
780  {
781  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_range_bi);
782  vnet_buffer_opaque_t *prev_vnb = vnet_buffer (prev_b);
783  new_next_vnb->ip.reass.next_range_bi = prev_vnb->ip.reass.next_range_bi;
784  prev_vnb->ip.reass.next_range_bi = new_next_bi;
785  }
786  else
787  {
788  if (~0 != reass->first_bi)
789  {
790  new_next_vnb->ip.reass.next_range_bi = reass->first_bi;
791  }
792  reass->first_bi = new_next_bi;
793  }
794  reass->data_len += ip6_full_reass_buffer_get_data_len (new_next_b);
795 }
796 
797 always_inline ip6_full_reass_rc_t
801  ip6_full_reass_t * reass, u32 * bi0, u32 * next0,
802  u32 * error0, ip6_frag_hdr_t * frag_hdr,
803  bool is_custom_app, u32 * handoff_thread_idx)
804 {
805  int consumed = 0;
806  vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
807  vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
808  if (is_custom_app)
809  {
810  reass->next_index = fvnb->ip.reass.next_index; // store next_index before it's overwritten
811  reass->error_next_index = fvnb->ip.reass.error_next_index; // store error_next_index before it is overwritten
812  }
813 
814  fvnb->ip.reass.ip6_frag_hdr_offset =
815  (u8 *) frag_hdr - (u8 *) vlib_buffer_get_current (fb);
817  if (fb->current_length < sizeof (*fip) ||
818  fvnb->ip.reass.ip6_frag_hdr_offset == 0 ||
819  fvnb->ip.reass.ip6_frag_hdr_offset >= fb->current_length)
820  {
822  }
823 
824  u32 fragment_first = fvnb->ip.reass.fragment_first =
825  ip6_frag_hdr_offset_bytes (frag_hdr);
826  u32 fragment_length =
827  vlib_buffer_length_in_chain (vm, fb) -
828  (fvnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
829  u32 fragment_last = fvnb->ip.reass.fragment_last =
830  fragment_first + fragment_length - 1;
831  int more_fragments = ip6_frag_hdr_more (frag_hdr);
832  u32 candidate_range_bi = reass->first_bi;
833  u32 prev_range_bi = ~0;
834  fvnb->ip.reass.range_first = fragment_first;
835  fvnb->ip.reass.range_last = fragment_last;
836  fvnb->ip.reass.next_range_bi = ~0;
837  if (!more_fragments)
838  {
839  reass->last_packet_octet = fragment_last;
840  }
841  if (~0 == reass->first_bi)
842  {
843  // starting a new reassembly
844  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass, prev_range_bi,
845  *bi0);
846  reass->min_fragment_length = clib_net_to_host_u16 (fip->payload_length);
847  consumed = 1;
848  reass->fragments_n = 1;
849  goto check_if_done_maybe;
850  }
851  reass->min_fragment_length =
852  clib_min (clib_net_to_host_u16 (fip->payload_length),
853  fvnb->ip.reass.estimated_mtu);
854  while (~0 != candidate_range_bi)
855  {
856  vlib_buffer_t *candidate_b = vlib_get_buffer (vm, candidate_range_bi);
857  vnet_buffer_opaque_t *candidate_vnb = vnet_buffer (candidate_b);
858  if (fragment_first > candidate_vnb->ip.reass.range_last)
859  {
860  // this fragments starts after candidate range
861  prev_range_bi = candidate_range_bi;
862  candidate_range_bi = candidate_vnb->ip.reass.next_range_bi;
863  if (candidate_vnb->ip.reass.range_last < fragment_last &&
864  ~0 == candidate_range_bi)
865  {
866  // special case - this fragment falls beyond all known ranges
867  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
868  prev_range_bi, *bi0);
869  consumed = 1;
870  break;
871  }
872  continue;
873  }
874  if (fragment_last < candidate_vnb->ip.reass.range_first)
875  {
876  // this fragment ends before candidate range without any overlap
877  ip6_full_reass_insert_range_in_chain (vm, rm, rt, reass,
878  prev_range_bi, *bi0);
879  consumed = 1;
880  }
881  else if (fragment_first == candidate_vnb->ip.reass.range_first &&
882  fragment_last == candidate_vnb->ip.reass.range_last)
883  {
884  // duplicate fragment - ignore
885  }
886  else
887  {
888  // overlapping fragment - not allowed by RFC 8200
889  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
890  {
891  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0,
892  RANGE_OVERLAP, ~0);
893  }
894  ip6_full_reass_drop_all (vm, node, rm, reass);
895  ip6_full_reass_free (rm, rt, reass);
897  *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
898  return IP6_FULL_REASS_RC_OK;
899  }
900  break;
901  }
902  ++reass->fragments_n;
903 check_if_done_maybe:
904  if (consumed)
905  {
906  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
907  {
908  ip6_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, ~0);
909  }
910  }
911  if (~0 != reass->last_packet_octet &&
912  reass->data_len == reass->last_packet_octet + 1)
913  {
914  *handoff_thread_idx = reass->sendout_thread_index;
915  int handoff =
917  ip6_full_reass_rc_t rc =
918  ip6_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
919  is_custom_app);
920  if (IP6_FULL_REASS_RC_OK == rc && handoff)
921  {
923  }
924  return rc;
925  }
926  else
927  {
928  if (consumed)
929  {
930  *bi0 = ~0;
931  if (reass->fragments_n > rm->max_reass_len)
932  {
934  }
935  }
936  else
937  {
939  *error0 = IP6_ERROR_REASS_DUPLICATE_FRAGMENT;
940  }
941  }
942  return IP6_FULL_REASS_RC_OK;
943 }
944 
945 always_inline bool
947  vlib_buffer_t * b,
948  ip6_frag_hdr_t * frag_hdr)
949 {
950  ip6_ext_header_t *tmp = (ip6_ext_header_t *) frag_hdr;
951  while (ip6_ext_hdr (tmp->next_hdr))
952  {
953  tmp = ip6_ext_next_header (tmp);
954  }
955  if (IP_PROTOCOL_IP6_NONXT == tmp->next_hdr)
956  {
957  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
958  ICMP6_parameter_problem_first_fragment_has_incomplete_header_chain,
959  0);
960  b->error = node->errors[IP6_ERROR_REASS_MISSING_UPPER];
961 
962  return false;
963  }
964  return true;
965 }
966 
967 always_inline bool
969  vlib_node_runtime_t * node,
970  vlib_buffer_t * b,
971  ip6_frag_hdr_t * frag_hdr)
972 {
975  int more_fragments = ip6_frag_hdr_more (frag_hdr);
976  u32 fragment_length =
978  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
979  if (more_fragments && 0 != fragment_length % 8)
980  {
981  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
982  ICMP6_parameter_problem_erroneous_header_field,
983  (u8 *) & ip->payload_length - (u8 *) ip);
984  return false;
985  }
986  return true;
987 }
988 
989 always_inline bool
991  vlib_node_runtime_t * node,
992  vlib_buffer_t * b,
993  ip6_frag_hdr_t * frag_hdr)
994 {
996  u32 fragment_first = ip6_frag_hdr_offset_bytes (frag_hdr);
997  u32 fragment_length =
999  (vnb->ip.reass.ip6_frag_hdr_offset + sizeof (*frag_hdr));
1000  if (fragment_first + fragment_length > 65535)
1001  {
1003  icmp6_error_set_vnet_buffer (b, ICMP6_parameter_problem,
1004  ICMP6_parameter_problem_erroneous_header_field,
1005  (u8 *) & frag_hdr->fragment_offset_and_more
1006  - (u8 *) ip0);
1007  return false;
1008  }
1009  return true;
1010 }
1011 
1014  vlib_node_runtime_t * node,
1015  vlib_frame_t * frame, bool is_feature,
1016  bool is_custom_app)
1017 {
1018  u32 *from = vlib_frame_vector_args (frame);
1019  u32 n_left_from, n_left_to_next, *to_next, next_index;
1022  clib_spinlock_lock (&rt->lock);
1023 
1024  n_left_from = frame->n_vectors;
1025  next_index = node->cached_next_index;
1026  while (n_left_from > 0)
1027  {
1028  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1029 
1030  while (n_left_from > 0 && n_left_to_next > 0)
1031  {
1032  u32 bi0;
1033  vlib_buffer_t *b0;
1035  u32 error0 = IP6_ERROR_NONE;
1036  u32 icmp_bi = ~0;
1037 
1038  bi0 = from[0];
1039  b0 = vlib_get_buffer (vm, bi0);
1040 
1042  ip6_frag_hdr_t *frag_hdr = NULL;
1043  ip6_ext_header_t *prev_hdr;
1044  if (ip6_ext_hdr (ip0->protocol))
1045  {
1046  frag_hdr =
1047  ip6_ext_header_find (vm, b0, ip0,
1048  IP_PROTOCOL_IPV6_FRAGMENTATION,
1049  &prev_hdr);
1050  }
1051  if (!frag_hdr)
1052  {
1053  // this is a regular packet - no fragmentation
1055  goto skip_reass;
1056  }
1057  if (0 == ip6_frag_hdr_offset (frag_hdr))
1058  {
1059  // first fragment - verify upper-layer is present
1061  (node, b0, frag_hdr))
1062  {
1064  goto skip_reass;
1065  }
1066  }
1068  (vm, node, b0, frag_hdr)
1069  || !ip6_full_reass_verify_packet_size_lt_64k (vm, node, b0,
1070  frag_hdr))
1071  {
1073  goto skip_reass;
1074  }
1075  vnet_buffer (b0)->ip.reass.ip6_frag_hdr_offset =
1076  (u8 *) frag_hdr - (u8 *) ip0;
1077 
1079  u8 do_handoff = 0;
1080 
1081  kv.k.as_u64[0] = ip0->src_address.as_u64[0];
1082  kv.k.as_u64[1] = ip0->src_address.as_u64[1];
1083  kv.k.as_u64[2] = ip0->dst_address.as_u64[0];
1084  kv.k.as_u64[3] = ip0->dst_address.as_u64[1];
1085  kv.k.as_u64[4] =
1087  vnet_buffer (b0)->sw_if_index[VLIB_RX])) << 32 |
1088  (u64) frag_hdr->identification;
1089  kv.k.as_u64[5] = ip0->protocol;
1090 
1091  ip6_full_reass_t *reass =
1092  ip6_full_reass_find_or_create (vm, node, rm, rt, &kv, &icmp_bi,
1093  &do_handoff);
1094 
1095  if (reass)
1096  {
1097  const u32 fragment_first = ip6_frag_hdr_offset (frag_hdr);
1098  if (0 == fragment_first)
1099  {
1100  reass->sendout_thread_index = vm->thread_index;
1101  }
1102  }
1103  if (PREDICT_FALSE (do_handoff))
1104  {
1106  if (is_feature)
1107  vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
1109  else
1110  vnet_buffer (b0)->ip.reass.owner_thread_index =
1112  }
1113  else if (reass)
1114  {
1115  u32 handoff_thread_idx;
1116  switch (ip6_full_reass_update
1117  (vm, node, rm, rt, reass, &bi0, &next0, &error0,
1118  frag_hdr, is_custom_app, &handoff_thread_idx))
1119  {
1120  case IP6_FULL_REASS_RC_OK:
1121  /* nothing to do here */
1122  break;
1125  b0 = vlib_get_buffer (vm, bi0);
1126  if (is_feature)
1127  vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
1128  handoff_thread_idx;
1129  else
1130  vnet_buffer (b0)->ip.reass.owner_thread_index =
1131  handoff_thread_idx;
1132  break;
1135  IP6_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
1136  1);
1137  ip6_full_reass_drop_all (vm, node, rm, reass);
1138  ip6_full_reass_free (rm, rt, reass);
1139  goto next_packet;
1140  break;
1143  IP6_ERROR_REASS_NO_BUF, 1);
1144  ip6_full_reass_drop_all (vm, node, rm, reass);
1145  ip6_full_reass_free (rm, rt, reass);
1146  goto next_packet;
1147  break;
1150  IP6_ERROR_REASS_INTERNAL_ERROR,
1151  1);
1152  ip6_full_reass_drop_all (vm, node, rm, reass);
1153  ip6_full_reass_free (rm, rt, reass);
1154  goto next_packet;
1155  break;
1156  }
1157  }
1158  else
1159  {
1160  if (is_feature)
1161  {
1163  }
1164  else
1165  {
1166  vnet_buffer_opaque_t *fvnb = vnet_buffer (b0);
1167  next0 = fvnb->ip.reass.error_next_index;
1168  }
1169  error0 = IP6_ERROR_REASS_LIMIT_REACHED;
1170  }
1171 
1172  if (~0 != bi0)
1173  {
1174  skip_reass:
1175  to_next[0] = bi0;
1176  to_next += 1;
1177  n_left_to_next -= 1;
1178 
1179  /* bi0 might have been updated by reass_finalize, reload */
1180  b0 = vlib_get_buffer (vm, bi0);
1181  b0->error = node->errors[error0];
1182 
1183  if (next0 == IP6_FULL_REASSEMBLY_NEXT_HANDOFF)
1184  {
1185  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
1186  {
1187  if (is_feature)
1188  ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
1189  HANDOFF,
1190  vnet_buffer (b0)->ip.
1191  reass.owner_feature_thread_index);
1192  else
1193  ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
1194  HANDOFF,
1195  vnet_buffer (b0)->ip.
1196  reass.owner_thread_index);
1197  }
1198  }
1199  else if (is_feature && IP6_ERROR_NONE == error0)
1200  {
1201  vnet_feature_next (&next0, b0);
1202  }
1203  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1204  n_left_to_next, bi0, next0);
1205  }
1206 
1207  if (~0 != icmp_bi)
1208  {
1210  to_next[0] = icmp_bi;
1211  to_next += 1;
1212  n_left_to_next -= 1;
1213  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1214  n_left_to_next, icmp_bi,
1215  next0);
1216  }
1217  next_packet:
1218  from += 1;
1219  n_left_from -= 1;
1220  }
1221 
1222  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1223  }
1224 
1225  clib_spinlock_unlock (&rt->lock);
1226  return frame->n_vectors;
1227 }
1228 
1230 #define _(sym, string) string,
1232 #undef _
1233 };
1234 
1236  vlib_node_runtime_t * node,
1237  vlib_frame_t * frame)
1238 {
1239  return ip6_full_reassembly_inline (vm, node, frame, false /* is_feature */ ,
1240  false /* is_custom_app */ );
1241 }
1242 
1243 /* *INDENT-OFF* */
1245  .name = "ip6-full-reassembly",
1246  .vector_size = sizeof (u32),
1247  .format_trace = format_ip6_full_reass_trace,
1248  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1249  .error_strings = ip6_full_reassembly_error_strings,
1250  .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
1251  .next_nodes =
1252  {
1253  [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1254  [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1255  [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1256  [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reassembly-handoff",
1257  },
1258 };
1259 /* *INDENT-ON* */
1260 
1262  vlib_node_runtime_t * node,
1263  vlib_frame_t * frame)
1264 {
1265  return ip6_full_reassembly_inline (vm, node, frame, true /* is_feature */ ,
1266  false /* is_custom_app */ );
1267 }
1268 
1269 /* *INDENT-OFF* */
1271  .name = "ip6-full-reassembly-feature",
1272  .vector_size = sizeof (u32),
1273  .format_trace = format_ip6_full_reass_trace,
1274  .n_errors = ARRAY_LEN (ip6_full_reassembly_error_strings),
1275  .error_strings = ip6_full_reassembly_error_strings,
1276  .n_next_nodes = IP6_FULL_REASSEMBLY_N_NEXT,
1277  .next_nodes =
1278  {
1279  [IP6_FULL_REASSEMBLY_NEXT_INPUT] = "ip6-input",
1280  [IP6_FULL_REASSEMBLY_NEXT_DROP] = "ip6-drop",
1281  [IP6_FULL_REASSEMBLY_NEXT_ICMP_ERROR] = "ip6-icmp-error",
1282  [IP6_FULL_REASSEMBLY_NEXT_HANDOFF] = "ip6-full-reass-feature-hoff",
1283  },
1284 };
1285 /* *INDENT-ON* */
1286 
1287 /* *INDENT-OFF* */
1288 VNET_FEATURE_INIT (ip6_full_reassembly_feature, static) = {
1289  .arc_name = "ip6-unicast",
1290  .node_name = "ip6-full-reassembly-feature",
1291  .runs_before = VNET_FEATURES ("ip6-lookup",
1292  "ipsec6-input-feature"),
1293  .runs_after = 0,
1294 };
1295 /* *INDENT-ON* */
1296 
1297 #ifndef CLIB_MARCH_VARIANT
1298 static u32
1300 {
1302  u32 nbuckets;
1303  u8 i;
1304 
1305  nbuckets = (u32) (rm->max_reass_n / IP6_FULL_REASS_HT_LOAD_FACTOR);
1306 
1307  for (i = 0; i < 31; i++)
1308  if ((1 << i) >= nbuckets)
1309  break;
1310  nbuckets = 1 << i;
1311 
1312  return nbuckets;
1313 }
1314 #endif /* CLIB_MARCH_VARIANT */
1315 
1316 typedef enum
1317 {
1320 
1321 #ifndef CLIB_MARCH_VARIANT
1322 typedef struct
1323 {
1324  int failure;
1325  clib_bihash_48_8_t *new_hash;
1327 
1328 static void
1330 {
1331  ip6_rehash_cb_ctx *ctx = _ctx;
1332  if (clib_bihash_add_del_48_8 (ctx->new_hash, kv, 1))
1333  {
1334  ctx->failure = 1;
1335  }
1336 }
1337 
1338 static void
1339 ip6_full_reass_set_params (u32 timeout_ms, u32 max_reassemblies,
1340  u32 max_reassembly_length,
1341  u32 expire_walk_interval_ms)
1342 {
1343  ip6_full_reass_main.timeout_ms = timeout_ms;
1344  ip6_full_reass_main.timeout = (f64) timeout_ms / (f64) MSEC_PER_SEC;
1345  ip6_full_reass_main.max_reass_n = max_reassemblies;
1346  ip6_full_reass_main.max_reass_len = max_reassembly_length;
1347  ip6_full_reass_main.expire_walk_interval_ms = expire_walk_interval_ms;
1348 }
1349 
1351 ip6_full_reass_set (u32 timeout_ms, u32 max_reassemblies,
1352  u32 max_reassembly_length, u32 expire_walk_interval_ms)
1353 {
1354  u32 old_nbuckets = ip6_full_reass_get_nbuckets ();
1355  ip6_full_reass_set_params (timeout_ms, max_reassemblies,
1356  max_reassembly_length, expire_walk_interval_ms);
1357  vlib_process_signal_event (ip6_full_reass_main.vlib_main,
1358  ip6_full_reass_main.ip6_full_reass_expire_node_idx,
1360  u32 new_nbuckets = ip6_full_reass_get_nbuckets ();
1361  if (ip6_full_reass_main.max_reass_n > 0 && new_nbuckets > old_nbuckets)
1362  {
1363  clib_bihash_48_8_t new_hash;
1364  clib_memset (&new_hash, 0, sizeof (new_hash));
1366  ctx.failure = 0;
1367  ctx.new_hash = &new_hash;
1368  clib_bihash_init_48_8 (&new_hash, "ip6-full-reass", new_nbuckets,
1369  new_nbuckets * 1024);
1370  clib_bihash_foreach_key_value_pair_48_8 (&ip6_full_reass_main.hash,
1371  ip6_rehash_cb, &ctx);
1372  if (ctx.failure)
1373  {
1374  clib_bihash_free_48_8 (&new_hash);
1375  return -1;
1376  }
1377  else
1378  {
1379  clib_bihash_free_48_8 (&ip6_full_reass_main.hash);
1380  clib_memcpy_fast (&ip6_full_reass_main.hash, &new_hash,
1381  sizeof (ip6_full_reass_main.hash));
1382  clib_bihash_copied (&ip6_full_reass_main.hash, &new_hash);
1383  }
1384  }
1385  return 0;
1386 }
1387 
1389 ip6_full_reass_get (u32 * timeout_ms, u32 * max_reassemblies,
1390  u32 * max_reassembly_length,
1391  u32 * expire_walk_interval_ms)
1392 {
1393  *timeout_ms = ip6_full_reass_main.timeout_ms;
1394  *max_reassemblies = ip6_full_reass_main.max_reass_n;
1395  *max_reassembly_length = ip6_full_reass_main.max_reass_len;
1396  *expire_walk_interval_ms = ip6_full_reass_main.expire_walk_interval_ms;
1397  return 0;
1398 }
1399 
1400 static clib_error_t *
1402 {
1404  clib_error_t *error = 0;
1405  u32 nbuckets;
1406  vlib_node_t *node;
1407 
1408  rm->vlib_main = vm;
1409 
1412  vec_foreach (rt, rm->per_thread_data)
1413  {
1414  clib_spinlock_init (&rt->lock);
1415  pool_alloc (rt->pool, rm->max_reass_n);
1416  }
1417 
1418  node = vlib_get_node_by_name (vm, (u8 *) "ip6-full-reassembly-expire-walk");
1419  ASSERT (node);
1421 
1426 
1427  nbuckets = ip6_full_reass_get_nbuckets ();
1428  clib_bihash_init_48_8 (&rm->hash, "ip6-full-reass", nbuckets,
1429  nbuckets * 1024);
1430 
1431  node = vlib_get_node_by_name (vm, (u8 *) "ip6-drop");
1432  ASSERT (node);
1433  rm->ip6_drop_idx = node->index;
1434  node = vlib_get_node_by_name (vm, (u8 *) "ip6-icmp-error");
1435  ASSERT (node);
1436  rm->ip6_icmp_error_idx = node->index;
1437 
1438  if ((error = vlib_call_init_function (vm, ip_main_init)))
1439  return error;
1440  ip6_register_protocol (IP_PROTOCOL_IPV6_FRAGMENTATION,
1441  ip6_full_reass_node.index);
1442 
1444  rm->fq_feature_index =
1446 
1447  return error;
1448 }
1449 
1451 #endif /* CLIB_MARCH_VARIANT */
1452 
1453 static uword
1455  vlib_node_runtime_t * node, vlib_frame_t * f)
1456 {
1458  uword event_type, *event_data = 0;
1459 
1460  while (true)
1461  {
1464  / (f64) MSEC_PER_SEC);
1465  event_type = vlib_process_get_events (vm, &event_data);
1466 
1467  switch (event_type)
1468  {
1469  case ~0: /* no events => timeout */
1470  /* nothing to do here */
1471  break;
1473  break;
1474  default:
1475  clib_warning ("BUG: event type 0x%wx", event_type);
1476  break;
1477  }
1478  f64 now = vlib_time_now (vm);
1479 
1480  ip6_full_reass_t *reass;
1481  int *pool_indexes_to_free = NULL;
1482 
1483  uword thread_index = 0;
1484  int index;
1485  const uword nthreads = vlib_num_workers () + 1;
1486  u32 *vec_icmp_bi = NULL;
1487  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1488  {
1490  &rm->per_thread_data[thread_index];
1491  clib_spinlock_lock (&rt->lock);
1492 
1493  vec_reset_length (pool_indexes_to_free);
1494  /* *INDENT-OFF* */
1495  pool_foreach_index (index, rt->pool, ({
1496  reass = pool_elt_at_index (rt->pool, index);
1497  if (now > reass->last_heard + rm->timeout)
1498  {
1499  vec_add1 (pool_indexes_to_free, index);
1500  }
1501  }));
1502  /* *INDENT-ON* */
1503  int *i;
1504  /* *INDENT-OFF* */
1505  vec_foreach (i, pool_indexes_to_free)
1506  {
1507  ip6_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
1508  u32 icmp_bi = ~0;
1509  ip6_full_reass_on_timeout (vm, node, rm, reass, &icmp_bi);
1510  if (~0 != icmp_bi)
1511  vec_add1 (vec_icmp_bi, icmp_bi);
1512 
1513  ip6_full_reass_free (rm, rt, reass);
1514  }
1515  /* *INDENT-ON* */
1516 
1517  clib_spinlock_unlock (&rt->lock);
1518  }
1519 
1520  while (vec_len (vec_icmp_bi) > 0)
1521  {
1522  vlib_frame_t *f =
1524  u32 *to_next = vlib_frame_vector_args (f);
1525  u32 n_left_to_next = VLIB_FRAME_SIZE - f->n_vectors;
1526  int trace_frame = 0;
1527  while (vec_len (vec_icmp_bi) > 0 && n_left_to_next > 0)
1528  {
1529  u32 bi = vec_pop (vec_icmp_bi);
1530  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1531  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
1532  trace_frame = 1;
1533  b->error = node->errors[IP6_ERROR_REASS_TIMEOUT];
1534  to_next[0] = bi;
1535  ++f->n_vectors;
1536  to_next += 1;
1537  n_left_to_next -= 1;
1538  }
1539  f->frame_flags |= (trace_frame * VLIB_FRAME_TRACE);
1541  }
1542 
1543  vec_free (pool_indexes_to_free);
1544  vec_free (vec_icmp_bi);
1545  if (event_data)
1546  {
1547  _vec_len (event_data) = 0;
1548  }
1549  }
1550 
1551  return 0;
1552 }
1553 
1554 /* *INDENT-OFF* */
1556  .function = ip6_full_reass_walk_expired,
1557  .format_trace = format_ip6_full_reass_trace,
1558  .type = VLIB_NODE_TYPE_PROCESS,
1559  .name = "ip6-full-reassembly-expire-walk",
1560 
1562  .error_strings = ip6_full_reassembly_error_strings,
1563 
1564 };
1565 /* *INDENT-ON* */
1566 
1567 static u8 *
1568 format_ip6_full_reass_key (u8 * s, va_list * args)
1569 {
1570  ip6_full_reass_key_t *key = va_arg (*args, ip6_full_reass_key_t *);
1571  s = format (s, "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
1573  &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
1574  return s;
1575 }
1576 
1577 static u8 *
1578 format_ip6_full_reass (u8 * s, va_list * args)
1579 {
1580  vlib_main_t *vm = va_arg (*args, vlib_main_t *);
1581  ip6_full_reass_t *reass = va_arg (*args, ip6_full_reass_t *);
1582 
1583  s = format (s, "ID: %lu, key: %U\n first_bi: %u, data_len: %u, "
1584  "last_packet_octet: %u, trace_op_counter: %u\n",
1585  reass->id, format_ip6_full_reass_key, &reass->key,
1586  reass->first_bi, reass->data_len, reass->last_packet_octet,
1587  reass->trace_op_counter);
1588  u32 bi = reass->first_bi;
1589  u32 counter = 0;
1590  while (~0 != bi)
1591  {
1592  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
1593  vnet_buffer_opaque_t *vnb = vnet_buffer (b);
1594  s = format (s, " #%03u: range: [%u, %u], bi: %u, off: %d, len: %u, "
1595  "fragment[%u, %u]\n",
1596  counter, vnb->ip.reass.range_first,
1597  vnb->ip.reass.range_last, bi,
1600  vnb->ip.reass.fragment_first, vnb->ip.reass.fragment_last);
1601  if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
1602  {
1603  bi = b->next_buffer;
1604  }
1605  else
1606  {
1607  bi = ~0;
1608  }
1609  }
1610  return s;
1611 }
1612 
1613 static clib_error_t *
1616 {
1618 
1619  vlib_cli_output (vm, "---------------------");
1620  vlib_cli_output (vm, "IP6 reassembly status");
1621  vlib_cli_output (vm, "---------------------");
1622  bool details = false;
1623  if (unformat (input, "details"))
1624  {
1625  details = true;
1626  }
1627 
1628  u32 sum_reass_n = 0;
1629  u64 sum_buffers_n = 0;
1630  ip6_full_reass_t *reass;
1631  uword thread_index;
1632  const uword nthreads = vlib_num_workers () + 1;
1633  for (thread_index = 0; thread_index < nthreads; ++thread_index)
1634  {
1635  ip6_full_reass_per_thread_t *rt = &rm->per_thread_data[thread_index];
1636  clib_spinlock_lock (&rt->lock);
1637  if (details)
1638  {
1639  /* *INDENT-OFF* */
1640  pool_foreach (reass, rt->pool, {
1641  vlib_cli_output (vm, "%U", format_ip6_full_reass, vm, reass);
1642  });
1643  /* *INDENT-ON* */
1644  }
1645  sum_reass_n += rt->reass_n;
1646  clib_spinlock_unlock (&rt->lock);
1647  }
1648  vlib_cli_output (vm, "---------------------");
1649  vlib_cli_output (vm, "Current IP6 reassemblies count: %lu\n",
1650  (long unsigned) sum_reass_n);
1651  vlib_cli_output (vm, "Maximum configured concurrent IP6 reassemblies per "
1652  "worker-thread: %lu\n", (long unsigned) rm->max_reass_n);
1653  vlib_cli_output (vm, "Buffers in use: %lu\n",
1654  (long unsigned) sum_buffers_n);
1655  return 0;
1656 }
1657 
1658 /* *INDENT-OFF* */
1660  .path = "show ip6-full-reassembly",
1661  .short_help = "show ip6-full-reassembly [details]",
1662  .function = show_ip6_full_reass,
1663 };
1664 /* *INDENT-ON* */
1665 
1666 #ifndef CLIB_MARCH_VARIANT
1669 {
1670  return vnet_feature_enable_disable ("ip6-unicast",
1671  "ip6-full-reassembly-feature",
1672  sw_if_index, enable_disable, 0, 0);
1673 }
1674 #endif /* CLIB_MARCH_VARIANT */
1675 
1676 #define foreach_ip6_full_reassembly_handoff_error \
1677 _(CONGESTION_DROP, "congestion drop")
1678 
1679 
1680 typedef enum
1681 {
1682 #define _(sym,str) IP6_FULL_REASSEMBLY_HANDOFF_ERROR_##sym,
1684 #undef _
1687 
1689 #define _(sym,string) string,
1691 #undef _
1692 };
1693 
1694 typedef struct
1695 {
1698 
1699 static u8 *
1701 {
1702  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1703  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1705  va_arg (*args, ip6_full_reassembly_handoff_trace_t *);
1706 
1707  s =
1708  format (s, "ip6-full-reassembly-handoff: next-worker %d",
1709  t->next_worker_index);
1710 
1711  return s;
1712 }
1713 
1716  vlib_node_runtime_t * node,
1717  vlib_frame_t * frame, bool is_feature)
1718 {
1720 
1721  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1722  u32 n_enq, n_left_from, *from;
1723  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1724  u32 fq_index;
1725 
1726  from = vlib_frame_vector_args (frame);
1727  n_left_from = frame->n_vectors;
1728  vlib_get_buffers (vm, from, bufs, n_left_from);
1729 
1730  b = bufs;
1731  ti = thread_indices;
1732 
1733  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
1734 
1735  while (n_left_from > 0)
1736  {
1737  ti[0] =
1738  (is_feature) ? vnet_buffer (b[0])->ip.
1739  reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
1740  reass.owner_thread_index;
1741 
1742  if (PREDICT_FALSE
1743  ((node->flags & VLIB_NODE_FLAG_TRACE)
1744  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1745  {
1747  vlib_add_trace (vm, node, b[0], sizeof (*t));
1748  t->next_worker_index = ti[0];
1749  }
1750 
1751  n_left_from -= 1;
1752  ti += 1;
1753  b += 1;
1754  }
1755  n_enq =
1756  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1757  frame->n_vectors, 1);
1758 
1759  if (n_enq < frame->n_vectors)
1761  IP6_FULL_REASSEMBLY_HANDOFF_ERROR_CONGESTION_DROP,
1762  frame->n_vectors - n_enq);
1763  return frame->n_vectors;
1764 }
1765 
1767  vlib_node_runtime_t * node,
1768  vlib_frame_t * frame)
1769 {
1770  return ip6_full_reassembly_handoff_inline (vm, node, frame,
1771  false /* is_feature */ );
1772 }
1773 
1774 /* *INDENT-OFF* */
1776  .name = "ip6-full-reassembly-handoff",
1777  .vector_size = sizeof (u32),
1778  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1781 
1782  .n_next_nodes = 1,
1783 
1784  .next_nodes = {
1785  [0] = "error-drop",
1786  },
1787 };
1788 
1789 
1791  vlib_node_runtime_t * node, vlib_frame_t * frame)
1792 {
1793  return ip6_full_reassembly_handoff_inline (vm, node, frame, true /* is_feature */ );
1794 }
1795 
1796 
1797 /* *INDENT-OFF* */
1799  .name = "ip6-full-reass-feature-hoff",
1800  .vector_size = sizeof (u32),
1801  .n_errors = ARRAY_LEN(ip6_full_reassembly_handoff_error_strings),
1804 
1805  .n_next_nodes = 1,
1806 
1807  .next_nodes = {
1808  [0] = "error-drop",
1809  },
1810 };
1811 /* *INDENT-ON* */
1812 
1813 /*
1814  * fd.io coding-style-patch-verification: ON
1815  *
1816  * Local Variables:
1817  * eval: (c-set-style "gnu")
1818  * End:
1819  */
#define IP6_FULL_REASS_MAX_REASSEMBLIES_DEFAULT
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:439
vnet_api_error_t ip6_full_reass_get(u32 *timeout_ms, u32 *max_reassemblies, u32 *max_reassembly_length, u32 *expire_walk_interval_ms)
get ip6 reassembly configuration
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
clib_bihash_48_8_t hash
static bool ip6_full_reass_verify_fragment_multiple_8(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
vnet_api_error_t
Definition: api_errno.h:154
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
Definition: vlib_api_cli.c:889
#define clib_min(x, y)
Definition: clib.h:302
vlib_node_registration_t ip6_full_reassembly_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reassembly_handoff_node)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:102
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:80
#define CLIB_UNUSED(x)
Definition: clib.h:83
static u32 ip6_full_reass_buffer_get_data_offset(vlib_buffer_t *b)
ip6_full_reass_trace_operation_e action
static f64 vlib_process_wait_for_event_or_clock(vlib_main_t *vm, f64 dt)
Suspend a cooperative multi-tasking thread Waits for an event, or for the indicated number of seconds...
Definition: node_funcs.h:673
void ip6_register_protocol(u32 protocol, u32 node_index)
Definition: ip6_forward.c:1581
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:865
ip6_full_reass_main_t ip6_full_reass_main
static void ip6_full_reass_insert_range_in_chain(vlib_main_t *vm, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 prev_range_bi, u32 new_next_bi)
#define pool_alloc(P, N)
Allocate N more free elements to pool (unspecified alignment).
Definition: pool.h:341
u64 as_u64
Definition: bihash_doc.h:63
static ip6_full_reass_rc_t ip6_full_reass_update(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, ip6_frag_hdr_t *frag_hdr, bool is_custom_app, u32 *handoff_thread_idx)
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 index
Definition: node.h:280
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:279
u32 vlib_frame_queue_main_init(u32 node_index, u32 frame_queue_nelts)
Definition: threads.c:1834
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
static ip6_full_reass_t * ip6_full_reass_find_or_create(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_kv_t *kv, u32 *icmp_bi, u8 *do_handoff)
static void * ip6_ext_next_header(ip6_ext_header_t *ext_hdr)
Definition: ip6_packet.h:552
ip6_full_reass_rc_t
int i
static uword ip6_full_reassembly_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature, bool is_custom_app)
#define MSEC_PER_SEC
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void ip6_full_reass_free_ctx(ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define VLIB_NODE_FN(node)
Definition: node.h:202
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
Definition: buffer_funcs.h:366
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
Definition: pool.h:236
ip6_address_t src_address
Definition: ip6_packet.h:383
ip6_full_reass_next_t
unsigned char u8
Definition: types.h:56
#define vec_pop(V)
Returns last element of a vector and decrements its length.
Definition: vec.h:615
vlib_node_registration_t ip6_full_reass_expire_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_expire_node)
static bool ip6_full_reass_verify_upper_layer_present(vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
double f64
Definition: types.h:142
static u8 * format_ip6_full_reass_trace(u8 *s, va_list *args)
#define pool_foreach(VAR, POOL, BODY)
Iterate through pool.
Definition: pool.h:493
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
#define VLIB_INIT_FUNCTION(x)
Definition: init.h:173
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
Definition: node_funcs.h:516
#define always_inline
Definition: clib.h:99
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
struct vnet_buffer_opaque_t::@60::@62 ip
static char * ip6_full_reassembly_error_strings[]
vlib_frame_t * vlib_get_frame_to_node(vlib_main_t *vm, u32 to_node_index)
Definition: main.c:185
vlib_node_registration_t ip6_full_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node)
unsigned int u32
Definition: types.h:88
static void ip6_full_reass_free(ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass)
#define vlib_call_init_function(vm, x)
Definition: init.h:270
#define VLIB_FRAME_SIZE
Definition: node.h:378
static void * ip6_ext_header_find(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6_header, u8 header_type, ip6_ext_header_t **prev_ext_header)
Definition: ip6_packet.h:578
static u32 ip6_full_reass_get_nbuckets()
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:446
static void clib_spinlock_init(clib_spinlock_t *p)
Definition: lock.h:63
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:648
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static vlib_cli_command_t show_ip6_full_reassembly_cmd
(constructor) VLIB_CLI_COMMAND (show_ip6_full_reassembly_cmd)
#define IP6_FULL_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT
static u32 vlib_buffer_chain_linearize(vlib_main_t *vm, vlib_buffer_t *b)
static ip6_full_reass_rc_t ip6_full_reass_finalize(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_per_thread_t *rt, ip6_full_reass_t *reass, u32 *bi0, u32 *next0, u32 *error0, bool is_custom_app)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:514
static clib_error_t * show_ip6_full_reass(vlib_main_t *vm, unformat_input_t *input, CLIB_UNUSED(vlib_cli_command_t *lmd))
ip6_full_reassembly_handoff_error_t
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
static u8 * format_ip6_full_reassembly_handoff_trace(u8 *s, va_list *args)
u16 frame_flags
Definition: node.h:385
long ctx[MAX_CONNS]
Definition: main.c:144
struct _unformat_input_t unformat_input_t
unsigned short u16
Definition: types.h:57
void vlib_put_frame_to_node(vlib_main_t *vm, u32 to_node_index, vlib_frame_t *f)
Definition: main.c:194
ip6_full_reass_trace_operation_e
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define pool_put(P, E)
Free an object E in pool P.
Definition: pool.h:286
#define PREDICT_FALSE(x)
Definition: clib.h:112
static void ip6_full_reass_on_timeout(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 *icmp_bi)
static char * ip6_full_reassembly_handoff_error_strings[]
u32 node_index
Node index.
Definition: node.h:496
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
clib_error_t * ip_main_init(vlib_main_t *vm)
Definition: ip_init.c:45
VNET_FEATURE_INIT(ip6_full_reassembly_feature, static)
u16 n_vectors
Definition: node.h:397
format_function_t format_ip6_address
Definition: format.h:93
vlib_main_t * vm
Definition: buffer.c:323
#define IP6_FULL_REASS_HT_LOAD_FACTOR
ip6_full_reass_range_trace_t trace_range
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:302
#define clib_warning(format, args...)
Definition: error.h:59
ip6_full_reass_per_thread_t * per_thread_data
u8 * format_hexdump(u8 *s, va_list *va)
Definition: std-formats.c:297
static void ip6_full_reass_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass, u32 bi, ip6_full_reass_trace_operation_e action, u32 thread_id_to)
#define ip6_frag_hdr_offset_bytes(hdr)
Definition: ip6_packet.h:645
vlib_main_t * vlib_main
#define ARRAY_LEN(x)
Definition: clib.h:63
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
vlib_node_t * vlib_get_node_by_name(vlib_main_t *vm, u8 *name)
Definition: node.c:45
static u8 ip6_ext_hdr(u8 nexthdr)
Definition: ip6_packet.h:527
vlib_node_registration_t ip6_full_reass_node_feature
(constructor) VLIB_REGISTER_NODE (ip6_full_reass_node_feature)
clib_bihash_kv_48_8_t kv
#define VLIB_CLI_COMMAND(x,...)
Definition: cli.h:161
u32 fq_index
Worker handoff.
signed int i32
Definition: types.h:77
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:642
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
#define IP6_FULL_REASS_TIMEOUT_DEFAULT_MS
#define ASSERT(truth)
ip6_main_t ip6_main
Definition: ip6_forward.c:2805
ip6_full_reass_key_t k
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static u16 ip6_full_reass_buffer_get_data_len(vlib_buffer_t *b)
static uword ip6_full_reassembly_handoff_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, bool is_feature)
#define VNET_FEATURES(...)
Definition: feature.h:442
static u8 * format_ip6_full_reass(u8 *s, va_list *args)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_elt(v, i)
Get vector value at index i.
void clib_bihash_copied(void *dst, void *src)
static void ip6_full_reass_drop_all(vlib_main_t *vm, vlib_node_runtime_t *node, ip6_full_reass_main_t *rm, ip6_full_reass_t *reass)
static void ip6_full_reass_trace_details(vlib_main_t *vm, u32 bi, ip6_full_reass_range_trace_t *trace)
u16 payload_length
Definition: ip6_packet.h:374
vl_api_address_t ip
Definition: l2.api:489
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
#define foreach_ip6_full_reassembly_handoff_error
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
typedef key
Definition: ipsec.api:247
#define VLIB_FRAME_TRACE
Definition: node.h:435
vnet_api_error_t ip6_full_reass_set(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
set ip6 reassembly configuration
#define IP6_FULL_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS
#define foreach_ip6_error
Definition: ip6_error.h:43
vlib_node_registration_t ip6_full_reassembly_feature_handoff_node
(constructor) VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node)
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
static void ip6_full_reass_set_params(u32 timeout_ms, u32 max_reassemblies, u32 max_reassembly_length, u32 expire_walk_interval_ms)
#define vnet_buffer(b)
Definition: buffer.h:365
static bool ip6_full_reass_verify_packet_size_lt_64k(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, ip6_frag_hdr_t *frag_hdr)
clib_bihash_48_8_t * new_hash
static u32 vlib_num_workers()
Definition: threads.h:372
ip6_full_reass_key_t key
#define vec_foreach(var, vec)
Vector iterator.
vnet_api_error_t ip6_full_reass_enable_disable(u32 sw_if_index, u8 enable_disable)
ip6_full_reass_event_t
u16 flags
Copy of main node flags.
Definition: node.h:509
static u8 * format_ip6_full_reass_key(u8 *s, va_list *args)
#define pool_foreach_index(i, v, body)
Iterate pool by index.
Definition: pool.h:538
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
static clib_error_t * ip6_full_reass_init_function(vlib_main_t *vm)
static uword ip6_full_reass_walk_expired(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
ip6_full_reass_val_t v
void vlib_cli_output(vlib_main_t *vm, char *fmt,...)
Definition: cli.c:772
u32 * fib_index_by_sw_if_index
Definition: ip6.h:194
static void ip6_rehash_cb(clib_bihash_kv_48_8_t *kv, void *_ctx)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
uword unformat(unformat_input_t *i, const char *fmt,...)
Definition: unformat.c:978
Definition: defs.h:46
int vnet_feature_enable_disable(const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes)
Definition: feature.c:275
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
ip6_address_t dst_address
Definition: ip6_packet.h:383
static u8 * format_ip6_full_reass_range_trace(u8 *s, va_list *args)
IPv6 Reassembly.