FD.io VPP  v19.04.4-rc0-5-ge88582fac
Vector Packet Processing
ip6_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 #include <vnet/ip/ip6_to_ip4.h>
20 
22 {
24 #ifdef MAP_SKIP_IP6_LOOKUP
26 #endif
35 };
36 
38 {
42 };
43 
45 {
50 };
51 
53 {
57 };
58 
62 
63 typedef struct
64 {
69 
70 u8 *
71 format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
72 {
73  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76  va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
77  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
78  t->map_domain_index, t->port,
79  t->cached ? "cached" : "forwarded");
80 }
81 
82 typedef struct
83 {
88 
89 u8 *
90 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
91 {
92  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
93  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
95  va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
96  return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
97  t->frag_len, t->out ? "out" : "in");
98 }
99 
100 /*
101  * ip6_map_sec_check
102  */
105  ip6_header_t * ip6)
106 {
107  u16 sp4 = clib_net_to_host_u16 (port);
108  u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
109  u64 sal6 = map_get_pfx (d, sa4, sp4);
110  u64 sar6 = map_get_sfx (d, sa4, sp4);
111 
112  if (PREDICT_FALSE
113  (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
114  || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
115  return (false);
116  return (true);
117 }
118 
121  ip6_header_t * ip6, u32 * next, u8 * error)
122 {
123  map_main_t *mm = &map_main;
124  if (d->ea_bits_len || d->rules)
125  {
126  if (d->psid_length > 0)
127  {
128  if (!ip4_is_fragment (ip4))
129  {
130  u16 port = ip4_get_port (ip4, 1);
131  if (port)
132  {
133  if (mm->sec_check)
134  *error =
135  ip6_map_sec_check (d, port, ip4,
136  ip6) ? MAP_ERROR_NONE :
137  MAP_ERROR_DECAP_SEC_CHECK;
138  }
139  else
140  {
141  *error = MAP_ERROR_BAD_PROTOCOL;
142  }
143  }
144  else
145  {
146  *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
147  }
148  }
149  }
150 }
151 
154 {
155 #ifdef MAP_SKIP_IP6_LOOKUP
157  {
158  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
160  return (true);
161  }
162 #endif
163  return (false);
164 }
165 
166 /*
167  * ip6_map
168  */
169 static uword
171 {
172  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
173  vlib_node_runtime_t *error_node =
175  map_main_t *mm = &map_main;
177  u32 thread_index = vm->thread_index;
178 
179  from = vlib_frame_vector_args (frame);
180  n_left_from = frame->n_vectors;
181  next_index = node->cached_next_index;
182  while (n_left_from > 0)
183  {
184  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
185 
186  /* Dual loop */
187  while (n_left_from >= 4 && n_left_to_next >= 2)
188  {
189  u32 pi0, pi1;
190  vlib_buffer_t *p0, *p1;
191  u8 error0 = MAP_ERROR_NONE;
192  u8 error1 = MAP_ERROR_NONE;
193  map_domain_t *d0 = 0, *d1 = 0;
194  ip4_header_t *ip40, *ip41;
195  ip6_header_t *ip60, *ip61;
196  u16 port0 = 0, port1 = 0;
197  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
200 
201  /* Prefetch next iteration. */
202  {
203  vlib_buffer_t *p2, *p3;
204 
205  p2 = vlib_get_buffer (vm, from[2]);
206  p3 = vlib_get_buffer (vm, from[3]);
207 
208  vlib_prefetch_buffer_header (p2, LOAD);
209  vlib_prefetch_buffer_header (p3, LOAD);
210 
211  /* IPv6 + IPv4 header + 8 bytes of ULP */
212  CLIB_PREFETCH (p2->data, 68, LOAD);
213  CLIB_PREFETCH (p3->data, 68, LOAD);
214  }
215 
216  pi0 = to_next[0] = from[0];
217  pi1 = to_next[1] = from[1];
218  from += 2;
219  n_left_from -= 2;
220  to_next += 2;
221  n_left_to_next -= 2;
222 
223  p0 = vlib_get_buffer (vm, pi0);
224  p1 = vlib_get_buffer (vm, pi1);
225  ip60 = vlib_buffer_get_current (p0);
226  ip61 = vlib_buffer_get_current (p1);
227  vlib_buffer_advance (p0, sizeof (ip6_header_t));
228  vlib_buffer_advance (p1, sizeof (ip6_header_t));
229  ip40 = vlib_buffer_get_current (p0);
230  ip41 = vlib_buffer_get_current (p1);
231 
232  /*
233  * Encapsulated IPv4 packet
234  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
235  * - Lookup/Rewrite or Fragment node in case of packet > MTU
236  * Fragmented IPv6 packet
237  * ICMP IPv6 packet
238  * - Error -> Pass to ICMPv6/ICMPv4 relay
239  * - Info -> Pass to IPv6 local
240  * Anything else -> drop
241  */
242  if (PREDICT_TRUE
243  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
244  && clib_net_to_host_u16 (ip60->payload_length) > 20))
245  {
246  d0 =
247  ip4_map_get_domain ((ip4_address_t *) & ip40->
248  src_address.as_u32, &map_domain_index0,
249  &error0);
250  }
251  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
252  clib_net_to_host_u16 (ip60->payload_length) >
253  sizeof (icmp46_header_t))
254  {
255  icmp46_header_t *icmp = (void *) (ip60 + 1);
256  next0 = (icmp->type == ICMP6_echo_request
257  || icmp->type ==
258  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
260  }
261  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
262  {
263  next0 = IP6_MAP_NEXT_IP6_REASS;
264  }
265  else
266  {
267  error0 = MAP_ERROR_BAD_PROTOCOL;
268  }
269  if (PREDICT_TRUE
270  (ip61->protocol == IP_PROTOCOL_IP_IN_IP
271  && clib_net_to_host_u16 (ip61->payload_length) > 20))
272  {
273  d1 =
274  ip4_map_get_domain ((ip4_address_t *) & ip41->
275  src_address.as_u32, &map_domain_index1,
276  &error1);
277  }
278  else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
279  clib_net_to_host_u16 (ip61->payload_length) >
280  sizeof (icmp46_header_t))
281  {
282  icmp46_header_t *icmp = (void *) (ip61 + 1);
283  next1 = (icmp->type == ICMP6_echo_request
284  || icmp->type ==
285  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
287  }
288  else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
289  {
290  next1 = IP6_MAP_NEXT_IP6_REASS;
291  }
292  else
293  {
294  error1 = MAP_ERROR_BAD_PROTOCOL;
295  }
296 
297  if (d0)
298  {
299  /* MAP inbound security check */
300  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
301 
302  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
303  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
304  {
305  if (PREDICT_FALSE
306  (d0->mtu
307  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
308  {
309  vnet_buffer (p0)->ip_frag.flags = 0;
310  vnet_buffer (p0)->ip_frag.next_index =
312  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
314  }
315  else
316  {
317  next0 =
319  ip40) ?
320  IP6_MAP_NEXT_IP4_REWRITE : next0;
321  }
323  thread_index,
324  map_domain_index0, 1,
325  clib_net_to_host_u16
326  (ip40->length));
327  }
328  }
329  if (d1)
330  {
331  /* MAP inbound security check */
332  ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
333 
334  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
335  next1 == IP6_MAP_NEXT_IP4_LOOKUP))
336  {
337  if (PREDICT_FALSE
338  (d1->mtu
339  && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
340  {
341  vnet_buffer (p1)->ip_frag.flags = 0;
342  vnet_buffer (p1)->ip_frag.next_index =
344  vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
346  }
347  else
348  {
349  next1 =
351  ip41) ?
352  IP6_MAP_NEXT_IP4_REWRITE : next1;
353  }
355  thread_index,
356  map_domain_index1, 1,
357  clib_net_to_host_u16
358  (ip41->length));
359  }
360  }
361 
362  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
363  {
364  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
365  tr->map_domain_index = map_domain_index0;
366  tr->port = port0;
367  }
368 
369  if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
370  {
371  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
372  tr->map_domain_index = map_domain_index1;
373  tr->port = port1;
374  }
375 
376  if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
377  {
378  /* Set ICMP parameters */
379  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
380  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
381  ICMP6_destination_unreachable_source_address_failed_policy,
382  0);
383  next0 = IP6_MAP_NEXT_ICMP;
384  }
385  else
386  {
387  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
388  }
389 
390  if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
391  {
392  /* Set ICMP parameters */
393  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
394  icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
395  ICMP6_destination_unreachable_source_address_failed_policy,
396  0);
397  next1 = IP6_MAP_NEXT_ICMP;
398  }
399  else
400  {
401  next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
402  }
403 
404  /* Reset packet */
405  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
406  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
407  if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
408  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
409 
410  p0->error = error_node->errors[error0];
411  p1->error = error_node->errors[error1];
412  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
413  n_left_to_next, pi0, pi1, next0,
414  next1);
415  }
416 
417  /* Single loop */
418  while (n_left_from > 0 && n_left_to_next > 0)
419  {
420  u32 pi0;
421  vlib_buffer_t *p0;
422  u8 error0 = MAP_ERROR_NONE;
423  map_domain_t *d0 = 0;
424  ip4_header_t *ip40;
425  ip6_header_t *ip60;
426  i32 port0 = 0;
427  u32 map_domain_index0 = ~0;
429 
430  pi0 = to_next[0] = from[0];
431  from += 1;
432  n_left_from -= 1;
433  to_next += 1;
434  n_left_to_next -= 1;
435 
436  p0 = vlib_get_buffer (vm, pi0);
437  ip60 = vlib_buffer_get_current (p0);
438  vlib_buffer_advance (p0, sizeof (ip6_header_t));
439  ip40 = vlib_buffer_get_current (p0);
440 
441  /*
442  * Encapsulated IPv4 packet
443  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
444  * - Lookup/Rewrite or Fragment node in case of packet > MTU
445  * Fragmented IPv6 packet
446  * ICMP IPv6 packet
447  * - Error -> Pass to ICMPv6/ICMPv4 relay
448  * - Info -> Pass to IPv6 local
449  * Anything else -> drop
450  */
451  if (PREDICT_TRUE
452  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
453  && clib_net_to_host_u16 (ip60->payload_length) > 20))
454  {
455  d0 =
456  ip4_map_get_domain ((ip4_address_t *) & ip40->
457  src_address.as_u32, &map_domain_index0,
458  &error0);
459  }
460  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
461  clib_net_to_host_u16 (ip60->payload_length) >
462  sizeof (icmp46_header_t))
463  {
464  icmp46_header_t *icmp = (void *) (ip60 + 1);
465  next0 = (icmp->type == ICMP6_echo_request
466  || icmp->type ==
467  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
469  }
470  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
471  (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
472  IP_PROTOCOL_IP_IN_IP))
473  {
474  next0 = IP6_MAP_NEXT_IP6_REASS;
475  }
476  else
477  {
478  /* XXX: Move get_domain to ip6_get_domain lookup on source */
479  //error0 = MAP_ERROR_BAD_PROTOCOL;
480  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
481  vnet_feature_next (&next0, p0);
482  }
483 
484  if (d0)
485  {
486  /* MAP inbound security check */
487  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
488 
489  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
490  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
491  {
492  if (PREDICT_FALSE
493  (d0->mtu
494  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
495  {
496  vnet_buffer (p0)->ip_frag.flags = 0;
497  vnet_buffer (p0)->ip_frag.next_index =
499  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
501  }
502  else
503  {
504  next0 =
506  ip40) ?
507  IP6_MAP_NEXT_IP4_REWRITE : next0;
508  }
510  thread_index,
511  map_domain_index0, 1,
512  clib_net_to_host_u16
513  (ip40->length));
514  }
515  }
516 
517  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
518  {
519  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
520  tr->map_domain_index = map_domain_index0;
521  tr->port = (u16) port0;
522  }
523 
524  if (mm->icmp6_enabled &&
525  (error0 == MAP_ERROR_DECAP_SEC_CHECK
526  || error0 == MAP_ERROR_NO_DOMAIN))
527  {
528  /* Set ICMP parameters */
529  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
530  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
531  ICMP6_destination_unreachable_source_address_failed_policy,
532  0);
533  next0 = IP6_MAP_NEXT_ICMP;
534  }
535  else
536  {
537  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
538  }
539 
540  /* Reset packet */
541  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
542  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
543 
544  p0->error = error_node->errors[error0];
545  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
546  n_left_to_next, pi0, next0);
547  }
548  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
549  }
550 
551  return frame->n_vectors;
552 }
553 
554 
557  map_ip6_reass_t * r, u32 ** fragments_ready,
558  u32 ** fragments_to_drop)
559 {
560  ip4_header_t *ip40;
561  ip6_header_t *ip60;
562  ip6_frag_hdr_t *frag0;
563  vlib_buffer_t *p0;
564 
566  return;
567 
568  //The IP header is here, we need to check for packets
569  //that can be forwarded
570  int i;
571  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
572  {
573  if (r->fragments[i].pi == ~0 ||
574  ((!r->fragments[i].next_data_len)
575  && (r->fragments[i].next_data_offset != (0xffff))))
576  continue;
577 
578  p0 = vlib_get_buffer (vm, r->fragments[i].pi);
579  ip60 = vlib_buffer_get_current (p0);
580  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
581  ip40 = (ip4_header_t *) (frag0 + 1);
582 
583  if (ip6_frag_hdr_offset (frag0))
584  {
585  //Not first fragment, add the IPv4 header
586  clib_memcpy_fast (ip40, &r->ip4_header, 20);
587  }
588 
589 #ifdef MAP_IP6_REASS_COUNT_BYTES
590  r->forwarded +=
591  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
592 #endif
593 
594  if (ip6_frag_hdr_more (frag0))
595  {
596  //Not last fragment, we copy end of next
598  r->fragments[i].next_data, 20);
599  p0->current_length += 20;
600  ip60->payload_length = u16_net_add (ip60->payload_length, 20);
601  }
602 
603  if (!ip4_is_fragment (ip40))
604  {
605  ip40->fragment_id = frag_id_6to4 (frag0->identification);
607  clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
608  }
609  else
610  {
612  clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
613  ip6_frag_hdr_offset (frag0));
614  }
615 
616  if (ip6_frag_hdr_more (frag0))
618  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
619 
620  ip40->length =
621  clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
622  sizeof (*frag0));
623  ip40->checksum = ip4_header_checksum (ip40);
624 
625  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
626  {
628  vlib_add_trace (vm, node, p0, sizeof (*tr));
629  tr->offset = ip4_get_fragment_offset (ip40);
630  tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
631  tr->out = 1;
632  }
633 
634  vec_add1 (*fragments_ready, r->fragments[i].pi);
635  r->fragments[i].pi = ~0;
636  r->fragments[i].next_data_len = 0;
637  r->fragments[i].next_data_offset = 0;
639 
640  //TODO: Best solution would be that ip6_map handles extension headers
641  // and ignores atomic fragment. But in the meantime, let's just copy the header.
642 
643  u8 protocol = frag0->next_hdr;
644  memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
645  ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
646  protocol;
647  vlib_buffer_advance (p0, sizeof (*frag0));
648  }
649 }
650 
651 void
653 {
658 }
659 
660 void
662 {
667 }
668 
669 /*
670  * ip6_reass
671  * TODO: We should count the number of successfully
672  * transmitted fragment bytes and compare that to the last fragment
673  * offset such that we can free the reassembly structure when all fragments
674  * have been forwarded.
675  */
676 static uword
678  vlib_node_runtime_t * node, vlib_frame_t * frame)
679 {
680  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
681  vlib_node_runtime_t *error_node =
683  u32 *fragments_to_drop = NULL;
684  u32 *fragments_ready = NULL;
685 
686  from = vlib_frame_vector_args (frame);
687  n_left_from = frame->n_vectors;
688  next_index = node->cached_next_index;
689  while (n_left_from > 0)
690  {
691  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
692 
693  /* Single loop */
694  while (n_left_from > 0 && n_left_to_next > 0)
695  {
696  u32 pi0;
697  vlib_buffer_t *p0;
698  u8 error0 = MAP_ERROR_NONE;
699  ip6_header_t *ip60;
700  ip6_frag_hdr_t *frag0;
701  u16 offset;
702  u16 next_offset;
703  u16 frag_len;
704 
705  pi0 = to_next[0] = from[0];
706  from += 1;
707  n_left_from -= 1;
708  to_next += 1;
709  n_left_to_next -= 1;
710 
711  p0 = vlib_get_buffer (vm, pi0);
712  ip60 = vlib_buffer_get_current (p0);
713  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
714  offset =
715  clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
716  frag_len =
717  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
718  next_offset =
719  ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
720 
721  //FIXME: Support other extension headers, maybe
722 
723  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
724  {
726  vlib_add_trace (vm, node, p0, sizeof (*tr));
727  tr->offset = offset;
728  tr->frag_len = frag_len;
729  tr->out = 0;
730  }
731 
733  map_ip6_reass_t *r =
734  map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
735  frag0->identification, frag0->next_hdr,
736  &fragments_to_drop);
737  //FIXME: Use better error codes
738  if (PREDICT_FALSE (!r))
739  {
740  // Could not create a caching entry
741  error0 = MAP_ERROR_FRAGMENT_MEMORY;
742  }
743  else if (PREDICT_FALSE ((frag_len <= 20 &&
744  (ip6_frag_hdr_more (frag0) || (!offset)))))
745  {
746  //Very small fragment are restricted to the last one and
747  //can't be the first one
748  error0 = MAP_ERROR_FRAGMENT_MALFORMED;
749  }
750  else
752  (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
753  {
754  map_ip6_reass_free (r, &fragments_to_drop);
755  error0 = MAP_ERROR_FRAGMENT_MEMORY;
756  }
757  else
758  {
759 #ifdef MAP_IP6_REASS_COUNT_BYTES
760  if (!ip6_frag_hdr_more (frag0))
761  r->expected_total = offset + frag_len;
762 #endif
763  ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
764  &fragments_to_drop);
765 #ifdef MAP_IP6_REASS_COUNT_BYTES
766  if (r->forwarded >= r->expected_total)
767  map_ip6_reass_free (r, &fragments_to_drop);
768 #endif
769  }
771 
772  if (error0 == MAP_ERROR_NONE)
773  {
774  if (frag_len > 20)
775  {
776  //Dequeue the packet
777  n_left_to_next++;
778  to_next--;
779  }
780  else
781  {
782  //All data from that packet was copied no need to keep it, but this is not an error
783  p0->error = error_node->errors[MAP_ERROR_NONE];
784  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
785  to_next, n_left_to_next,
786  pi0,
788  }
789  }
790  else
791  {
792  p0->error = error_node->errors[error0];
793  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
794  n_left_to_next, pi0,
796  }
797  }
798  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
799  }
800 
801  map_send_all_to_node (vm, fragments_ready, node,
802  &error_node->errors[MAP_ERROR_NONE],
804  map_send_all_to_node (vm, fragments_to_drop, node,
805  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
807 
808  vec_free (fragments_to_drop);
809  vec_free (fragments_ready);
810  return frame->n_vectors;
811 }
812 
813 /*
814  * ip6_map_ip4_reass
815  */
816 static uword
818  vlib_node_runtime_t * node, vlib_frame_t * frame)
819 {
820  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
821  vlib_node_runtime_t *error_node =
823  map_main_t *mm = &map_main;
825  u32 thread_index = vm->thread_index;
826  u32 *fragments_to_drop = NULL;
827  u32 *fragments_to_loopback = NULL;
828 
829  from = vlib_frame_vector_args (frame);
830  n_left_from = frame->n_vectors;
831  next_index = node->cached_next_index;
832  while (n_left_from > 0)
833  {
834  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
835 
836  /* Single loop */
837  while (n_left_from > 0 && n_left_to_next > 0)
838  {
839  u32 pi0;
840  vlib_buffer_t *p0;
841  u8 error0 = MAP_ERROR_NONE;
842  map_domain_t *d0;
843  ip4_header_t *ip40;
844  ip6_header_t *ip60;
845  i32 port0 = 0;
846  u32 map_domain_index0 = ~0;
848  u8 cached = 0;
849 
850  pi0 = to_next[0] = from[0];
851  from += 1;
852  n_left_from -= 1;
853  to_next += 1;
854  n_left_to_next -= 1;
855 
856  p0 = vlib_get_buffer (vm, pi0);
857  ip40 = vlib_buffer_get_current (p0);
858  ip60 = ((ip6_header_t *) ip40) - 1;
859 
860  d0 =
862  &map_domain_index0, &error0);
863 
865  //This node only deals with fragmented ip4
867  ip40->dst_address.as_u32,
868  ip40->fragment_id,
869  ip40->protocol,
870  &fragments_to_drop);
871  if (PREDICT_FALSE (!r))
872  {
873  // Could not create a caching entry
874  error0 = MAP_ERROR_FRAGMENT_MEMORY;
875  }
876  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
877  {
878  // This is a fragment
879  if (r->port >= 0)
880  {
881  // We know the port already
882  port0 = r->port;
883  }
884  else if (map_ip4_reass_add_fragment (r, pi0))
885  {
886  // Not enough space for caching
887  error0 = MAP_ERROR_FRAGMENT_MEMORY;
888  map_ip4_reass_free (r, &fragments_to_drop);
889  }
890  else
891  {
892  cached = 1;
893  }
894  }
895  else if ((port0 = ip4_get_port (ip40, 1)) == 0)
896  {
897  // Could not find port from first fragment. Stop reassembling.
898  error0 = MAP_ERROR_BAD_PROTOCOL;
899  port0 = 0;
900  map_ip4_reass_free (r, &fragments_to_drop);
901  }
902  else
903  {
904  // Found port. Remember it and loopback saved fragments
905  r->port = port0;
906  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
907  }
908 
909 #ifdef MAP_IP4_REASS_COUNT_BYTES
910  if (!cached && r)
911  {
912  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
913  if (!ip4_get_fragment_more (ip40))
914  r->expected_total =
915  ip4_get_fragment_offset (ip40) * 8 +
916  clib_host_to_net_u16 (ip40->length) - 20;
917  if (r->forwarded >= r->expected_total)
918  map_ip4_reass_free (r, &fragments_to_drop);
919  }
920 #endif
921 
923 
924  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
925  error0 =
926  ip6_map_sec_check (d0, port0, ip40,
927  ip60) ? MAP_ERROR_NONE :
928  MAP_ERROR_DECAP_SEC_CHECK;
929 
930  if (PREDICT_FALSE
931  (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
932  && error0 == MAP_ERROR_NONE && !cached))
933  {
934  vnet_buffer (p0)->ip_frag.flags = 0;
935  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
936  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
938  }
939 
940  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
941  {
943  vlib_add_trace (vm, node, p0, sizeof (*tr));
944  tr->map_domain_index = map_domain_index0;
945  tr->port = port0;
946  tr->cached = cached;
947  }
948 
949  if (cached)
950  {
951  //Dequeue the packet
952  n_left_to_next++;
953  to_next--;
954  }
955  else
956  {
957  if (error0 == MAP_ERROR_NONE)
959  thread_index,
960  map_domain_index0, 1,
961  clib_net_to_host_u16
962  (ip40->length));
963  next0 =
964  (error0 ==
965  MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
966  p0->error = error_node->errors[error0];
967  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
968  n_left_to_next, pi0, next0);
969  }
970 
971  //Loopback when we reach the end of the inpu vector
972  if (n_left_from == 0 && vec_len (fragments_to_loopback))
973  {
974  from = vlib_frame_vector_args (frame);
975  u32 len = vec_len (fragments_to_loopback);
976  if (len <= VLIB_FRAME_SIZE)
977  {
978  clib_memcpy_fast (from, fragments_to_loopback,
979  sizeof (u32) * len);
980  n_left_from = len;
981  vec_reset_length (fragments_to_loopback);
982  }
983  else
984  {
985  clib_memcpy_fast (from, fragments_to_loopback +
986  (len - VLIB_FRAME_SIZE),
987  sizeof (u32) * VLIB_FRAME_SIZE);
988  n_left_from = VLIB_FRAME_SIZE;
989  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
990  }
991  }
992  }
993  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
994  }
995  map_send_all_to_node (vm, fragments_to_drop, node,
996  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
998 
999  vec_free (fragments_to_drop);
1000  vec_free (fragments_to_loopback);
1001  return frame->n_vectors;
1002 }
1003 
1004 /*
1005  * ip6_icmp_relay
1006  */
1007 static uword
1009  vlib_node_runtime_t * node, vlib_frame_t * frame)
1010 {
1011  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1012  vlib_node_runtime_t *error_node =
1014  map_main_t *mm = &map_main;
1015  u32 thread_index = vm->thread_index;
1016  u16 *fragment_ids, *fid;
1017 
1018  from = vlib_frame_vector_args (frame);
1019  n_left_from = frame->n_vectors;
1020  next_index = node->cached_next_index;
1021 
1022  /* Get random fragment IDs for replies. */
1023  fid = fragment_ids =
1025  n_left_from * sizeof (fragment_ids[0]));
1026 
1027  while (n_left_from > 0)
1028  {
1029  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1030 
1031  /* Single loop */
1032  while (n_left_from > 0 && n_left_to_next > 0)
1033  {
1034  u32 pi0;
1035  vlib_buffer_t *p0;
1036  u8 error0 = MAP_ERROR_NONE;
1037  ip6_header_t *ip60;
1039  u32 mtu;
1040 
1041  pi0 = to_next[0] = from[0];
1042  from += 1;
1043  n_left_from -= 1;
1044  to_next += 1;
1045  n_left_to_next -= 1;
1046 
1047  p0 = vlib_get_buffer (vm, pi0);
1048  ip60 = vlib_buffer_get_current (p0);
1049  u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
1050 
1051  /*
1052  * In:
1053  * IPv6 header (40)
1054  * ICMPv6 header (8)
1055  * IPv6 header (40)
1056  * Original IPv4 header / packet
1057  * Out:
1058  * New IPv4 header
1059  * New ICMP header
1060  * Original IPv4 header / packet
1061  */
1062 
1063  /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
1064  if (tlen < 76)
1065  {
1066  error0 = MAP_ERROR_ICMP_RELAY;
1067  goto error;
1068  }
1069 
1070  icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
1071  ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
1072 
1073  if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
1074  {
1075  error0 = MAP_ERROR_ICMP_RELAY;
1076  goto error;
1077  }
1078 
1079  ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
1080  vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
1081  ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
1082  icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
1083 
1084  /*
1085  * Relay according to RFC2473, section 8.3
1086  */
1087  switch (icmp60->type)
1088  {
1089  case ICMP6_destination_unreachable:
1090  case ICMP6_time_exceeded:
1091  case ICMP6_parameter_problem:
1092  /* Type 3 - destination unreachable, Code 1 - host unreachable */
1093  new_icmp40->type = ICMP4_destination_unreachable;
1094  new_icmp40->code =
1095  ICMP4_destination_unreachable_destination_unreachable_host;
1096  break;
1097 
1098  case ICMP6_packet_too_big:
1099  /* Type 3 - destination unreachable, Code 4 - packet too big */
1100  /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
1101  mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
1102 
1103  /* Check DF flag */
1104  if (!
1105  (inner_ip40->flags_and_fragment_offset &
1106  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
1107  {
1108  error0 = MAP_ERROR_ICMP_RELAY;
1109  goto error;
1110  }
1111 
1112  new_icmp40->type = ICMP4_destination_unreachable;
1113  new_icmp40->code =
1114  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
1115  *((u32 *) (new_icmp40 + 1)) =
1116  clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
1117  break;
1118 
1119  default:
1120  error0 = MAP_ERROR_ICMP_RELAY;
1121  break;
1122  }
1123 
1124  /*
1125  * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
1126  */
1127  new_ip40->ip_version_and_header_length = 0x45;
1128  new_ip40->tos = 0;
1129  u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
1130  new_ip40->length = clib_host_to_net_u16 (nlen);
1131  new_ip40->fragment_id = fid[0];
1132  fid++;
1133  new_ip40->ttl = 64;
1134  new_ip40->protocol = IP_PROTOCOL_ICMP;
1135  new_ip40->src_address = mm->icmp4_src_address;
1136  new_ip40->dst_address = inner_ip40->src_address;
1137  new_ip40->checksum = ip4_header_checksum (new_ip40);
1138 
1139  new_icmp40->checksum = 0;
1140  ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
1141  new_icmp40->checksum = ~ip_csum_fold (sum);
1142 
1143  vlib_increment_simple_counter (&mm->icmp_relayed, thread_index, 0,
1144  1);
1145 
1146  error:
1147  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
1148  {
1149  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
1150  tr->map_domain_index = 0;
1151  tr->port = 0;
1152  }
1153 
1154  next0 =
1155  (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
1156  p0->error = error_node->errors[error0];
1157  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1158  n_left_to_next, pi0, next0);
1159  }
1160  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1161  }
1162 
1163  return frame->n_vectors;
1164 
1165 }
1166 
1167 static char *map_error_strings[] = {
1168 #define _(sym,string) string,
1170 #undef _
1171 };
1172 
1173 /* *INDENT-OFF* */
1174 VNET_FEATURE_INIT (ip6_map_feature, static) =
1175 {
1176  .arc_name = "ip6-unicast",
1177  .node_name = "ip6-map",
1178  .runs_before = VNET_FEATURES ("ip6-flow-classify"),
1179 };
1180 
1182  .function = ip6_map,
1183  .name = "ip6-map",
1184  .vector_size = sizeof(u32),
1185  .format_trace = format_map_trace,
1186  .type = VLIB_NODE_TYPE_INTERNAL,
1187 
1188  .n_errors = MAP_N_ERROR,
1189  .error_strings = map_error_strings,
1190 
1191  .n_next_nodes = IP6_MAP_N_NEXT,
1192  .next_nodes = {
1193  [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1194 #ifdef MAP_SKIP_IP6_LOOKUP
1195  [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance",
1196 #endif
1197  [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
1198  [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
1199  [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
1200  [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
1201  [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
1202  [IP6_MAP_NEXT_DROP] = "error-drop",
1203  [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
1204  },
1205 };
1206 /* *INDENT-ON* */
1207 
1208 /* *INDENT-OFF* */
1210  .function = ip6_map_ip6_reass,
1211  .name = "ip6-map-ip6-reass",
1212  .vector_size = sizeof(u32),
1213  .format_trace = format_ip6_map_ip6_reass_trace,
1214  .type = VLIB_NODE_TYPE_INTERNAL,
1215  .n_errors = MAP_N_ERROR,
1216  .error_strings = map_error_strings,
1217  .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
1218  .next_nodes = {
1219  [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
1220  [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
1221  },
1222 };
1223 /* *INDENT-ON* */
1224 
1225 /* *INDENT-OFF* */
1227  .function = ip6_map_ip4_reass,
1228  .name = "ip6-map-ip4-reass",
1229  .vector_size = sizeof(u32),
1230  .format_trace = format_ip6_map_ip4_reass_trace,
1231  .type = VLIB_NODE_TYPE_INTERNAL,
1232  .n_errors = MAP_N_ERROR,
1233  .error_strings = map_error_strings,
1234  .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
1235  .next_nodes = {
1236  [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
1237  [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
1238  [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
1239  },
1240 };
1241 /* *INDENT-ON* */
1242 
1243 /* *INDENT-OFF* */
1245  .function = ip6_map_icmp_relay,
1246  .name = "ip6-map-icmp-relay",
1247  .vector_size = sizeof(u32),
1248  .format_trace = format_map_trace, //FIXME
1249  .type = VLIB_NODE_TYPE_INTERNAL,
1250  .n_errors = MAP_N_ERROR,
1251  .error_strings = map_error_strings,
1252  .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1253  .next_nodes = {
1254  [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1255  [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1256  },
1257 };
1258 /* *INDENT-ON* */
1259 
1260 /*
1261  * fd.io coding-style-patch-verification: ON
1262  *
1263  * Local Variables:
1264  * eval: (c-set-style "gnu")
1265  * End:
1266  */
u16 forwarded
Definition: map.h:218
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
#define map_ip4_reass_lock()
Definition: map.h:504
u8 psid_length
Definition: map.h:119
#define CLIB_UNUSED(x)
Definition: clib.h:82
map_main_t map_main
Definition: map.c:26
static_always_inline bool ip6_map_sec_check(map_domain_t *d, u16 port, ip4_header_t *ip4, ip6_header_t *ip6)
Definition: ip6_map.c:104
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:220
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:401
ip4_address_t src_address
Definition: ip4_packet.h:170
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:371
ip6_map_ip4_reass_next_e
Definition: ip6_map.c:44
#define PREDICT_TRUE(x)
Definition: clib.h:112
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
dpo_id_t dpo
The Load-balance object index to use to forward.
Definition: map.h:252
#define NULL
Definition: clib.h:58
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:78
u32 thread_index
Definition: main.h:197
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
u8 data[0]
Packet data.
Definition: buffer.h:181
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
ip6_icmp_relay_next_e
Definition: ip6_map.c:52
int i
static_always_inline map_domain_t * ip4_map_get_domain(ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:464
bool sec_check_frag
Definition: map.h:280
uword ip_csum_t
Definition: ip_packet.h:181
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u16 flags_and_fragment_offset
Definition: ip4_packet.h:151
static char * map_error_strings[]
Definition: ip6_map.c:1167
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:468
vlib_node_registration_t ip6_map_ip4_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip4_reass_node)
Definition: ip6_map.c:59
static_always_inline void ip6_map_ip6_reass_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, map_ip6_reass_t *r, u32 **fragments_ready, u32 **fragments_to_drop)
Definition: ip6_map.c:556
ip6_address_t src_address
Definition: ip6_packet.h:385
unsigned char u8
Definition: types.h:56
vlib_node_registration_t ip6_map_node
(constructor) VLIB_REGISTER_NODE (ip6_map_node)
Definition: ip6_map.c:1181
IPv4 to IPv6 translation.
map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]
Definition: map.h:225
u16 port
Definition: map.h:379
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:213
#define map_ip6_reass_unlock()
Definition: map.h:530
#define static_always_inline
Definition: clib.h:99
static_always_inline void ip6_map_security_check(map_domain_t *d, ip4_header_t *ip4, ip6_header_t *ip6, u32 *next, u8 *error)
Definition: ip6_map.c:120
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_combined_counter_main_t * domain_counters
Definition: map.h:269
ip4_address_t icmp4_src_address
Definition: map.h:286
vlib_node_registration_t ip6_map_ip6_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip6_reass_node)
Definition: ip6_map.c:60
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:203
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:600
vlib_simple_counter_main_t icmp_relayed
Definition: map.h:287
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1528
ip6_address_t * rules
Definition: map.h:109
u8 ea_bits_len
Definition: map.h:117
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:376
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:45
u8 * format_ip6_map_ip4_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:71
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:446
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:606
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
VNET_FEATURE_INIT(ip6_map_feature, static)
unsigned short u16
Definition: types.h:57
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1447
#define PREDICT_FALSE(x)
Definition: clib.h:111
vl_api_address_union_t src_address
Definition: ip_types.api:44
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
u16 expected_total
Definition: map.h:166
u8 len
Definition: ip_types.api:49
void map_ip4_drop_pi(u32 pi)
Definition: ip6_map.c:661
map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]
Pre-resolved per-protocol global next-hops.
Definition: map.c:288
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1404
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:152
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
u8 next_data_len
Definition: map.h:209
u16 n_vectors
Definition: node.h:395
u16 next_data_offset
Definition: map.h:208
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:508
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
vlib_main_t * vm
Definition: buffer.c:312
map_ip6_reass_t * map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1619
u8 next_data[20]
Definition: map.h:210
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:295
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:254
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u16 forwarded
Definition: map.h:167
static uword ip6_map_icmp_relay(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:1008
bool icmp6_enabled
Definition: map.h:281
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:458
static uword ip6_map_ip4_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:817
#define foreach_map_error
Definition: map.h:349
static int ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:206
signed int i32
Definition: types.h:77
static uword ip6_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:170
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:600
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
bool sec_check
Definition: map.h:279
ip6_map_next_e
Definition: ip6_map.c:21
static_always_inline bool ip6_map_ip4_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip6_map.c:153
ip4_header_t ip4_header
Definition: map.h:224
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
#define map_ip4_reass_unlock()
Definition: map.h:505
static vlib_node_registration_t ip6_map_icmp_relay_node
(constructor) VLIB_REGISTER_NODE (ip6_map_icmp_relay_node)
Definition: ip6_map.c:61
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1367
IPv6 to IPv4 translation.
u8 * format_ip6_map_ip6_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:90
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
#define VNET_FEATURES(...)
Definition: feature.h:435
ip6_map_ip6_reass_next_e
Definition: ip6_map.c:37
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static uword ip6_map_ip6_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:677
u16 mtu
Definition: map.h:113
u16 payload_length
Definition: ip6_packet.h:376
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define map_ip6_reass_lock()
Definition: map.h:529
i32 port
Definition: map.h:169
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
A collection of combined counters.
Definition: counter.h:188
struct clib_bihash_value offset
template key/value backing page structure
void map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1567
#define vnet_buffer(b)
Definition: buffer.h:369
#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY
Definition: map.h:91
#define u16_net_add(u, val)
Definition: ip.h:69
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:153
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:425
u32 map_domain_index
Definition: map.h:378
u8 ip_version_and_header_length
Definition: ip4_packet.h:138
u32 ip6_reass_buffered_counter
Definition: map.h:334
int map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, u16 data_offset, u16 next_data_offset, u8 *data_start, u16 data_len)
Definition: map.c:1707
u16 expected_total
Definition: map.h:217
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
clib_random_buffer_t random_buffer
Definition: main.h:191
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237
void map_ip6_drop_pi(u32 pi)
Definition: ip6_map.c:652
u8 protocol
Definition: ipsec.api:96
ip6_address_t dst_address
Definition: ip6_packet.h:385