FD.io VPP  v18.10-34-gcce845e
Vector Packet Processing
ip6_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include <vnet/ip/ip_frag.h>
18 #include <vnet/ip/ip4_to_ip6.h>
19 #include <vnet/ip/ip6_to_ip4.h>
20 
22 {
24 #ifdef MAP_SKIP_IP6_LOOKUP
26 #endif
35 };
36 
38 {
42 };
43 
45 {
50 };
51 
53 {
57 };
58 
62 
63 typedef struct
64 {
69 
70 u8 *
71 format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
72 {
73  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76  va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
77  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
78  t->map_domain_index, t->port,
79  t->cached ? "cached" : "forwarded");
80 }
81 
82 typedef struct
83 {
88 
89 u8 *
90 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
91 {
92  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
93  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
95  va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
96  return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
97  t->frag_len, t->out ? "out" : "in");
98 }
99 
100 /*
101  * ip6_map_sec_check
102  */
105  ip6_header_t * ip6)
106 {
107  u16 sp4 = clib_net_to_host_u16 (port);
108  u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
109  u64 sal6 = map_get_pfx (d, sa4, sp4);
110  u64 sar6 = map_get_sfx (d, sa4, sp4);
111 
112  if (PREDICT_FALSE
113  (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
114  || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
115  return (false);
116  return (true);
117 }
118 
121  ip6_header_t * ip6, u32 * next, u8 * error)
122 {
123  map_main_t *mm = &map_main;
124  if (d->ea_bits_len || d->rules)
125  {
126  if (d->psid_length > 0)
127  {
128  if (!ip4_is_fragment (ip4))
129  {
130  u16 port = ip4_get_port (ip4, 1);
131  if (port)
132  {
133  if (mm->sec_check)
134  *error =
135  ip6_map_sec_check (d, port, ip4,
136  ip6) ? MAP_ERROR_NONE :
137  MAP_ERROR_DECAP_SEC_CHECK;
138  }
139  else
140  {
141  *error = MAP_ERROR_BAD_PROTOCOL;
142  }
143  }
144  else
145  {
146  *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
147  }
148  }
149  }
150 }
151 
154 {
155 #ifdef MAP_SKIP_IP6_LOOKUP
157  {
158  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
160  return (true);
161  }
162 #endif
163  return (false);
164 }
165 
166 /*
167  * ip6_map
168  */
169 static uword
171 {
172  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
173  vlib_node_runtime_t *error_node =
175  map_main_t *mm = &map_main;
177  u32 thread_index = vm->thread_index;
178 
179  from = vlib_frame_vector_args (frame);
180  n_left_from = frame->n_vectors;
181  next_index = node->cached_next_index;
182  while (n_left_from > 0)
183  {
184  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
185 
186  /* Dual loop */
187  while (n_left_from >= 4 && n_left_to_next >= 2)
188  {
189  u32 pi0, pi1;
190  vlib_buffer_t *p0, *p1;
191  u8 error0 = MAP_ERROR_NONE;
192  u8 error1 = MAP_ERROR_NONE;
193  map_domain_t *d0 = 0, *d1 = 0;
194  ip4_header_t *ip40, *ip41;
195  ip6_header_t *ip60, *ip61;
196  u16 port0 = 0, port1 = 0;
197  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
200 
201  /* Prefetch next iteration. */
202  {
203  vlib_buffer_t *p2, *p3;
204 
205  p2 = vlib_get_buffer (vm, from[2]);
206  p3 = vlib_get_buffer (vm, from[3]);
207 
208  vlib_prefetch_buffer_header (p2, LOAD);
209  vlib_prefetch_buffer_header (p3, LOAD);
210 
211  /* IPv6 + IPv4 header + 8 bytes of ULP */
212  CLIB_PREFETCH (p2->data, 68, LOAD);
213  CLIB_PREFETCH (p3->data, 68, LOAD);
214  }
215 
216  pi0 = to_next[0] = from[0];
217  pi1 = to_next[1] = from[1];
218  from += 2;
219  n_left_from -= 2;
220  to_next += 2;
221  n_left_to_next -= 2;
222 
223  p0 = vlib_get_buffer (vm, pi0);
224  p1 = vlib_get_buffer (vm, pi1);
225  ip60 = vlib_buffer_get_current (p0);
226  ip61 = vlib_buffer_get_current (p1);
227  vlib_buffer_advance (p0, sizeof (ip6_header_t));
228  vlib_buffer_advance (p1, sizeof (ip6_header_t));
229  ip40 = vlib_buffer_get_current (p0);
230  ip41 = vlib_buffer_get_current (p1);
231 
232  /*
233  * Encapsulated IPv4 packet
234  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
235  * - Lookup/Rewrite or Fragment node in case of packet > MTU
236  * Fragmented IPv6 packet
237  * ICMP IPv6 packet
238  * - Error -> Pass to ICMPv6/ICMPv4 relay
239  * - Info -> Pass to IPv6 local
240  * Anything else -> drop
241  */
242  if (PREDICT_TRUE
243  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
244  && clib_net_to_host_u16 (ip60->payload_length) > 20))
245  {
246  d0 =
247  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
248  (ip4_address_t *) & ip40->
249  src_address.as_u32, &map_domain_index0,
250  &error0);
251  }
252  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
253  clib_net_to_host_u16 (ip60->payload_length) >
254  sizeof (icmp46_header_t))
255  {
256  icmp46_header_t *icmp = (void *) (ip60 + 1);
257  next0 = (icmp->type == ICMP6_echo_request
258  || icmp->type ==
259  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
261  }
262  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
263  {
264  next0 = IP6_MAP_NEXT_IP6_REASS;
265  }
266  else
267  {
268  error0 = MAP_ERROR_BAD_PROTOCOL;
269  }
270  if (PREDICT_TRUE
271  (ip61->protocol == IP_PROTOCOL_IP_IN_IP
272  && clib_net_to_host_u16 (ip61->payload_length) > 20))
273  {
274  d1 =
275  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
276  (ip4_address_t *) & ip41->
277  src_address.as_u32, &map_domain_index1,
278  &error1);
279  }
280  else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
281  clib_net_to_host_u16 (ip61->payload_length) >
282  sizeof (icmp46_header_t))
283  {
284  icmp46_header_t *icmp = (void *) (ip61 + 1);
285  next1 = (icmp->type == ICMP6_echo_request
286  || icmp->type ==
287  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
289  }
290  else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
291  {
292  next1 = IP6_MAP_NEXT_IP6_REASS;
293  }
294  else
295  {
296  error1 = MAP_ERROR_BAD_PROTOCOL;
297  }
298 
299  if (d0)
300  {
301  /* MAP inbound security check */
302  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
303 
304  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
305  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
306  {
307  if (PREDICT_FALSE
308  (d0->mtu
309  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
310  {
311  vnet_buffer (p0)->ip_frag.flags = 0;
312  vnet_buffer (p0)->ip_frag.next_index =
314  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
316  }
317  else
318  {
319  next0 =
321  ip40) ?
322  IP6_MAP_NEXT_IP4_REWRITE : next0;
323  }
325  thread_index,
326  map_domain_index0, 1,
327  clib_net_to_host_u16
328  (ip40->length));
329  }
330  }
331  if (d1)
332  {
333  /* MAP inbound security check */
334  ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
335 
336  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
337  next1 == IP6_MAP_NEXT_IP4_LOOKUP))
338  {
339  if (PREDICT_FALSE
340  (d1->mtu
341  && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
342  {
343  vnet_buffer (p1)->ip_frag.flags = 0;
344  vnet_buffer (p1)->ip_frag.next_index =
346  vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
348  }
349  else
350  {
351  next1 =
353  ip41) ?
354  IP6_MAP_NEXT_IP4_REWRITE : next1;
355  }
357  thread_index,
358  map_domain_index1, 1,
359  clib_net_to_host_u16
360  (ip41->length));
361  }
362  }
363 
364  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
365  {
366  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
367  tr->map_domain_index = map_domain_index0;
368  tr->port = port0;
369  }
370 
371  if (PREDICT_FALSE (p1->flags & VLIB_BUFFER_IS_TRACED))
372  {
373  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
374  tr->map_domain_index = map_domain_index1;
375  tr->port = port1;
376  }
377 
378  if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
379  {
380  /* Set ICMP parameters */
381  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
382  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
383  ICMP6_destination_unreachable_source_address_failed_policy,
384  0);
385  next0 = IP6_MAP_NEXT_ICMP;
386  }
387  else
388  {
389  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
390  }
391 
392  if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
393  {
394  /* Set ICMP parameters */
395  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
396  icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
397  ICMP6_destination_unreachable_source_address_failed_policy,
398  0);
399  next1 = IP6_MAP_NEXT_ICMP;
400  }
401  else
402  {
403  next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
404  }
405 
406  /* Reset packet */
407  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
408  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
409  if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
410  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
411 
412  p0->error = error_node->errors[error0];
413  p1->error = error_node->errors[error1];
414  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
415  n_left_to_next, pi0, pi1, next0,
416  next1);
417  }
418 
419  /* Single loop */
420  while (n_left_from > 0 && n_left_to_next > 0)
421  {
422  u32 pi0;
423  vlib_buffer_t *p0;
424  u8 error0 = MAP_ERROR_NONE;
425  map_domain_t *d0 = 0;
426  ip4_header_t *ip40;
427  ip6_header_t *ip60;
428  i32 port0 = 0;
429  u32 map_domain_index0 = ~0;
431 
432  pi0 = to_next[0] = from[0];
433  from += 1;
434  n_left_from -= 1;
435  to_next += 1;
436  n_left_to_next -= 1;
437 
438  p0 = vlib_get_buffer (vm, pi0);
439  ip60 = vlib_buffer_get_current (p0);
440  vlib_buffer_advance (p0, sizeof (ip6_header_t));
441  ip40 = vlib_buffer_get_current (p0);
442 
443  /*
444  * Encapsulated IPv4 packet
445  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
446  * - Lookup/Rewrite or Fragment node in case of packet > MTU
447  * Fragmented IPv6 packet
448  * ICMP IPv6 packet
449  * - Error -> Pass to ICMPv6/ICMPv4 relay
450  * - Info -> Pass to IPv6 local
451  * Anything else -> drop
452  */
453  if (PREDICT_TRUE
454  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
455  && clib_net_to_host_u16 (ip60->payload_length) > 20))
456  {
457  d0 =
458  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
459  (ip4_address_t *) & ip40->
460  src_address.as_u32, &map_domain_index0,
461  &error0);
462  }
463  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
464  clib_net_to_host_u16 (ip60->payload_length) >
465  sizeof (icmp46_header_t))
466  {
467  icmp46_header_t *icmp = (void *) (ip60 + 1);
468  next0 = (icmp->type == ICMP6_echo_request
469  || icmp->type ==
470  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
472  }
473  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
474  (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
475  IP_PROTOCOL_IP_IN_IP))
476  {
477  next0 = IP6_MAP_NEXT_IP6_REASS;
478  }
479  else
480  {
481  error0 = MAP_ERROR_BAD_PROTOCOL;
482  }
483 
484  if (d0)
485  {
486  /* MAP inbound security check */
487  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
488 
489  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
490  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
491  {
492  if (PREDICT_FALSE
493  (d0->mtu
494  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
495  {
496  vnet_buffer (p0)->ip_frag.flags = 0;
497  vnet_buffer (p0)->ip_frag.next_index =
499  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
501  }
502  else
503  {
504  next0 =
506  ip40) ?
507  IP6_MAP_NEXT_IP4_REWRITE : next0;
508  }
510  thread_index,
511  map_domain_index0, 1,
512  clib_net_to_host_u16
513  (ip40->length));
514  }
515  }
516 
517  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
518  {
519  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
520  tr->map_domain_index = map_domain_index0;
521  tr->port = (u16) port0;
522  }
523 
524  if (mm->icmp6_enabled &&
525  (error0 == MAP_ERROR_DECAP_SEC_CHECK
526  || error0 == MAP_ERROR_NO_DOMAIN))
527  {
528  /* Set ICMP parameters */
529  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
530  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
531  ICMP6_destination_unreachable_source_address_failed_policy,
532  0);
533  next0 = IP6_MAP_NEXT_ICMP;
534  }
535  else
536  {
537  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
538  }
539 
540  /* Reset packet */
541  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
542  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
543 
544  p0->error = error_node->errors[error0];
545  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
546  n_left_to_next, pi0, next0);
547  }
548  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
549  }
550 
551  return frame->n_vectors;
552 }
553 
554 
557  map_ip6_reass_t * r, u32 ** fragments_ready,
558  u32 ** fragments_to_drop)
559 {
560  ip4_header_t *ip40;
561  ip6_header_t *ip60;
562  ip6_frag_hdr_t *frag0;
563  vlib_buffer_t *p0;
564 
566  return;
567 
568  //The IP header is here, we need to check for packets
569  //that can be forwarded
570  int i;
571  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
572  {
573  if (r->fragments[i].pi == ~0 ||
574  ((!r->fragments[i].next_data_len)
575  && (r->fragments[i].next_data_offset != (0xffff))))
576  continue;
577 
578  p0 = vlib_get_buffer (vm, r->fragments[i].pi);
579  ip60 = vlib_buffer_get_current (p0);
580  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
581  ip40 = (ip4_header_t *) (frag0 + 1);
582 
583  if (ip6_frag_hdr_offset (frag0))
584  {
585  //Not first fragment, add the IPv4 header
586  clib_memcpy (ip40, &r->ip4_header, 20);
587  }
588 
589 #ifdef MAP_IP6_REASS_COUNT_BYTES
590  r->forwarded +=
591  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
592 #endif
593 
594  if (ip6_frag_hdr_more (frag0))
595  {
596  //Not last fragment, we copy end of next
598  r->fragments[i].next_data, 20);
599  p0->current_length += 20;
600  ip60->payload_length = u16_net_add (ip60->payload_length, 20);
601  }
602 
603  if (!ip4_is_fragment (ip40))
604  {
605  ip40->fragment_id = frag_id_6to4 (frag0->identification);
607  clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
608  }
609  else
610  {
612  clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
613  ip6_frag_hdr_offset (frag0));
614  }
615 
616  if (ip6_frag_hdr_more (frag0))
618  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
619 
620  ip40->length =
621  clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
622  sizeof (*frag0));
623  ip40->checksum = ip4_header_checksum (ip40);
624 
625  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
626  {
628  vlib_add_trace (vm, node, p0, sizeof (*tr));
629  tr->offset = ip4_get_fragment_offset (ip40);
630  tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
631  tr->out = 1;
632  }
633 
634  vec_add1 (*fragments_ready, r->fragments[i].pi);
635  r->fragments[i].pi = ~0;
636  r->fragments[i].next_data_len = 0;
637  r->fragments[i].next_data_offset = 0;
639 
640  //TODO: Best solution would be that ip6_map handles extension headers
641  // and ignores atomic fragment. But in the meantime, let's just copy the header.
642 
643  u8 protocol = frag0->next_hdr;
644  memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
645  ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
646  protocol;
647  vlib_buffer_advance (p0, sizeof (*frag0));
648  }
649 }
650 
651 void
653 {
658 }
659 
660 void
662 {
667 }
668 
669 /*
670  * ip6_reass
671  * TODO: We should count the number of successfully
672  * transmitted fragment bytes and compare that to the last fragment
673  * offset such that we can free the reassembly structure when all fragments
674  * have been forwarded.
675  */
676 static uword
678  vlib_node_runtime_t * node, vlib_frame_t * frame)
679 {
680  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
681  vlib_node_runtime_t *error_node =
683  u32 *fragments_to_drop = NULL;
684  u32 *fragments_ready = NULL;
685 
686  from = vlib_frame_vector_args (frame);
687  n_left_from = frame->n_vectors;
688  next_index = node->cached_next_index;
689  while (n_left_from > 0)
690  {
691  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
692 
693  /* Single loop */
694  while (n_left_from > 0 && n_left_to_next > 0)
695  {
696  u32 pi0;
697  vlib_buffer_t *p0;
698  u8 error0 = MAP_ERROR_NONE;
699  ip6_header_t *ip60;
700  ip6_frag_hdr_t *frag0;
701  u16 offset;
702  u16 next_offset;
703  u16 frag_len;
704 
705  pi0 = to_next[0] = from[0];
706  from += 1;
707  n_left_from -= 1;
708  to_next += 1;
709  n_left_to_next -= 1;
710 
711  p0 = vlib_get_buffer (vm, pi0);
712  ip60 = vlib_buffer_get_current (p0);
713  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
714  offset =
715  clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
716  frag_len =
717  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
718  next_offset =
719  ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
720 
721  //FIXME: Support other extension headers, maybe
722 
723  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
724  {
726  vlib_add_trace (vm, node, p0, sizeof (*tr));
727  tr->offset = offset;
728  tr->frag_len = frag_len;
729  tr->out = 0;
730  }
731 
733  map_ip6_reass_t *r =
734  map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
735  frag0->identification, frag0->next_hdr,
736  &fragments_to_drop);
737  //FIXME: Use better error codes
738  if (PREDICT_FALSE (!r))
739  {
740  // Could not create a caching entry
741  error0 = MAP_ERROR_FRAGMENT_MEMORY;
742  }
743  else if (PREDICT_FALSE ((frag_len <= 20 &&
744  (ip6_frag_hdr_more (frag0) || (!offset)))))
745  {
746  //Very small fragment are restricted to the last one and
747  //can't be the first one
748  error0 = MAP_ERROR_FRAGMENT_MALFORMED;
749  }
750  else
752  (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
753  {
754  map_ip6_reass_free (r, &fragments_to_drop);
755  error0 = MAP_ERROR_FRAGMENT_MEMORY;
756  }
757  else
758  {
759 #ifdef MAP_IP6_REASS_COUNT_BYTES
760  if (!ip6_frag_hdr_more (frag0))
761  r->expected_total = offset + frag_len;
762 #endif
763  ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
764  &fragments_to_drop);
765 #ifdef MAP_IP6_REASS_COUNT_BYTES
766  if (r->forwarded >= r->expected_total)
767  map_ip6_reass_free (r, &fragments_to_drop);
768 #endif
769  }
771 
772  if (error0 == MAP_ERROR_NONE)
773  {
774  if (frag_len > 20)
775  {
776  //Dequeue the packet
777  n_left_to_next++;
778  to_next--;
779  }
780  else
781  {
782  //All data from that packet was copied no need to keep it, but this is not an error
783  p0->error = error_node->errors[MAP_ERROR_NONE];
784  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
785  to_next, n_left_to_next,
786  pi0,
788  }
789  }
790  else
791  {
792  p0->error = error_node->errors[error0];
793  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
794  n_left_to_next, pi0,
796  }
797  }
798  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
799  }
800 
801  map_send_all_to_node (vm, fragments_ready, node,
802  &error_node->errors[MAP_ERROR_NONE],
804  map_send_all_to_node (vm, fragments_to_drop, node,
805  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
807 
808  vec_free (fragments_to_drop);
809  vec_free (fragments_ready);
810  return frame->n_vectors;
811 }
812 
813 /*
814  * ip6_ip4_virt_reass
815  */
816 static uword
818  vlib_node_runtime_t * node, vlib_frame_t * frame)
819 {
820  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
821  vlib_node_runtime_t *error_node =
823  map_main_t *mm = &map_main;
825  u32 thread_index = vm->thread_index;
826  u32 *fragments_to_drop = NULL;
827  u32 *fragments_to_loopback = NULL;
828 
829  from = vlib_frame_vector_args (frame);
830  n_left_from = frame->n_vectors;
831  next_index = node->cached_next_index;
832  while (n_left_from > 0)
833  {
834  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
835 
836  /* Single loop */
837  while (n_left_from > 0 && n_left_to_next > 0)
838  {
839  u32 pi0;
840  vlib_buffer_t *p0;
841  u8 error0 = MAP_ERROR_NONE;
842  map_domain_t *d0;
843  ip4_header_t *ip40;
844  ip6_header_t *ip60;
845  i32 port0 = 0;
846  u32 map_domain_index0 = ~0;
848  u8 cached = 0;
849 
850  pi0 = to_next[0] = from[0];
851  from += 1;
852  n_left_from -= 1;
853  to_next += 1;
854  n_left_to_next -= 1;
855 
856  p0 = vlib_get_buffer (vm, pi0);
857  ip40 = vlib_buffer_get_current (p0);
858  ip60 = ((ip6_header_t *) ip40) - 1;
859 
860  d0 =
861  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
862  (ip4_address_t *) & ip40->src_address.as_u32,
863  &map_domain_index0, &error0);
864 
866  //This node only deals with fragmented ip4
868  ip40->dst_address.as_u32,
869  ip40->fragment_id,
870  ip40->protocol,
871  &fragments_to_drop);
872  if (PREDICT_FALSE (!r))
873  {
874  // Could not create a caching entry
875  error0 = MAP_ERROR_FRAGMENT_MEMORY;
876  }
877  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
878  {
879  // This is a fragment
880  if (r->port >= 0)
881  {
882  // We know the port already
883  port0 = r->port;
884  }
885  else if (map_ip4_reass_add_fragment (r, pi0))
886  {
887  // Not enough space for caching
888  error0 = MAP_ERROR_FRAGMENT_MEMORY;
889  map_ip4_reass_free (r, &fragments_to_drop);
890  }
891  else
892  {
893  cached = 1;
894  }
895  }
896  else if ((port0 = ip4_get_port (ip40, 1)) == 0)
897  {
898  // Could not find port from first fragment. Stop reassembling.
899  error0 = MAP_ERROR_BAD_PROTOCOL;
900  port0 = 0;
901  map_ip4_reass_free (r, &fragments_to_drop);
902  }
903  else
904  {
905  // Found port. Remember it and loopback saved fragments
906  r->port = port0;
907  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
908  }
909 
910 #ifdef MAP_IP4_REASS_COUNT_BYTES
911  if (!cached && r)
912  {
913  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
914  if (!ip4_get_fragment_more (ip40))
915  r->expected_total =
916  ip4_get_fragment_offset (ip40) * 8 +
917  clib_host_to_net_u16 (ip40->length) - 20;
918  if (r->forwarded >= r->expected_total)
919  map_ip4_reass_free (r, &fragments_to_drop);
920  }
921 #endif
922 
924 
925  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
926  error0 =
927  ip6_map_sec_check (d0, port0, ip40,
928  ip60) ? MAP_ERROR_NONE :
929  MAP_ERROR_DECAP_SEC_CHECK;
930 
931  if (PREDICT_FALSE
932  (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
933  && error0 == MAP_ERROR_NONE && !cached))
934  {
935  vnet_buffer (p0)->ip_frag.flags = 0;
936  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
937  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
939  }
940 
941  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
942  {
944  vlib_add_trace (vm, node, p0, sizeof (*tr));
945  tr->map_domain_index = map_domain_index0;
946  tr->port = port0;
947  tr->cached = cached;
948  }
949 
950  if (cached)
951  {
952  //Dequeue the packet
953  n_left_to_next++;
954  to_next--;
955  }
956  else
957  {
958  if (error0 == MAP_ERROR_NONE)
960  thread_index,
961  map_domain_index0, 1,
962  clib_net_to_host_u16
963  (ip40->length));
964  next0 =
965  (error0 ==
966  MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
967  p0->error = error_node->errors[error0];
968  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
969  n_left_to_next, pi0, next0);
970  }
971 
972  //Loopback when we reach the end of the inpu vector
973  if (n_left_from == 0 && vec_len (fragments_to_loopback))
974  {
975  from = vlib_frame_vector_args (frame);
976  u32 len = vec_len (fragments_to_loopback);
977  if (len <= VLIB_FRAME_SIZE)
978  {
979  clib_memcpy (from, fragments_to_loopback,
980  sizeof (u32) * len);
981  n_left_from = len;
982  vec_reset_length (fragments_to_loopback);
983  }
984  else
985  {
986  clib_memcpy (from,
987  fragments_to_loopback + (len -
989  sizeof (u32) * VLIB_FRAME_SIZE);
990  n_left_from = VLIB_FRAME_SIZE;
991  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
992  }
993  }
994  }
995  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
996  }
997  map_send_all_to_node (vm, fragments_to_drop, node,
998  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
1000 
1001  vec_free (fragments_to_drop);
1002  vec_free (fragments_to_loopback);
1003  return frame->n_vectors;
1004 }
1005 
1006 /*
1007  * ip6_icmp_relay
1008  */
1009 static uword
1011  vlib_node_runtime_t * node, vlib_frame_t * frame)
1012 {
1013  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1014  vlib_node_runtime_t *error_node =
1016  map_main_t *mm = &map_main;
1017  u32 thread_index = vm->thread_index;
1018  u16 *fragment_ids, *fid;
1019 
1020  from = vlib_frame_vector_args (frame);
1021  n_left_from = frame->n_vectors;
1022  next_index = node->cached_next_index;
1023 
1024  /* Get random fragment IDs for replies. */
1025  fid = fragment_ids =
1027  n_left_from * sizeof (fragment_ids[0]));
1028 
1029  while (n_left_from > 0)
1030  {
1031  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1032 
1033  /* Single loop */
1034  while (n_left_from > 0 && n_left_to_next > 0)
1035  {
1036  u32 pi0;
1037  vlib_buffer_t *p0;
1038  u8 error0 = MAP_ERROR_NONE;
1039  ip6_header_t *ip60;
1041  u32 mtu;
1042 
1043  pi0 = to_next[0] = from[0];
1044  from += 1;
1045  n_left_from -= 1;
1046  to_next += 1;
1047  n_left_to_next -= 1;
1048 
1049  p0 = vlib_get_buffer (vm, pi0);
1050  ip60 = vlib_buffer_get_current (p0);
1051  u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
1052 
1053  /*
1054  * In:
1055  * IPv6 header (40)
1056  * ICMPv6 header (8)
1057  * IPv6 header (40)
1058  * Original IPv4 header / packet
1059  * Out:
1060  * New IPv4 header
1061  * New ICMP header
1062  * Original IPv4 header / packet
1063  */
1064 
1065  /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
1066  if (tlen < 76)
1067  {
1068  error0 = MAP_ERROR_ICMP_RELAY;
1069  goto error;
1070  }
1071 
1072  icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
1073  ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
1074 
1075  if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
1076  {
1077  error0 = MAP_ERROR_ICMP_RELAY;
1078  goto error;
1079  }
1080 
1081  ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
1082  vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
1083  ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
1084  icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
1085 
1086  /*
1087  * Relay according to RFC2473, section 8.3
1088  */
1089  switch (icmp60->type)
1090  {
1091  case ICMP6_destination_unreachable:
1092  case ICMP6_time_exceeded:
1093  case ICMP6_parameter_problem:
1094  /* Type 3 - destination unreachable, Code 1 - host unreachable */
1095  new_icmp40->type = ICMP4_destination_unreachable;
1096  new_icmp40->code =
1097  ICMP4_destination_unreachable_destination_unreachable_host;
1098  break;
1099 
1100  case ICMP6_packet_too_big:
1101  /* Type 3 - destination unreachable, Code 4 - packet too big */
1102  /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
1103  mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
1104 
1105  /* Check DF flag */
1106  if (!
1107  (inner_ip40->flags_and_fragment_offset &
1108  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
1109  {
1110  error0 = MAP_ERROR_ICMP_RELAY;
1111  goto error;
1112  }
1113 
1114  new_icmp40->type = ICMP4_destination_unreachable;
1115  new_icmp40->code =
1116  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
1117  *((u32 *) (new_icmp40 + 1)) =
1118  clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
1119  break;
1120 
1121  default:
1122  error0 = MAP_ERROR_ICMP_RELAY;
1123  break;
1124  }
1125 
1126  /*
1127  * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
1128  */
1129  new_ip40->ip_version_and_header_length = 0x45;
1130  new_ip40->tos = 0;
1131  u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
1132  new_ip40->length = clib_host_to_net_u16 (nlen);
1133  new_ip40->fragment_id = fid[0];
1134  fid++;
1135  new_ip40->ttl = 64;
1136  new_ip40->protocol = IP_PROTOCOL_ICMP;
1137  new_ip40->src_address = mm->icmp4_src_address;
1138  new_ip40->dst_address = inner_ip40->src_address;
1139  new_ip40->checksum = ip4_header_checksum (new_ip40);
1140 
1141  new_icmp40->checksum = 0;
1142  ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
1143  new_icmp40->checksum = ~ip_csum_fold (sum);
1144 
1145  vlib_increment_simple_counter (&mm->icmp_relayed, thread_index, 0,
1146  1);
1147 
1148  error:
1149  if (PREDICT_FALSE (p0->flags & VLIB_BUFFER_IS_TRACED))
1150  {
1151  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
1152  tr->map_domain_index = 0;
1153  tr->port = 0;
1154  }
1155 
1156  next0 =
1157  (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
1158  p0->error = error_node->errors[error0];
1159  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1160  n_left_to_next, pi0, next0);
1161  }
1162  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1163  }
1164 
1165  return frame->n_vectors;
1166 
1167 }
1168 
1169 static char *map_error_strings[] = {
1170 #define _(sym,string) string,
1172 #undef _
1173 };
1174 
1175 /* *INDENT-OFF* */
1177  .function = ip6_map,
1178  .name = "ip6-map",
1179  .vector_size = sizeof(u32),
1180  .format_trace = format_map_trace,
1181  .type = VLIB_NODE_TYPE_INTERNAL,
1182 
1183  .n_errors = MAP_N_ERROR,
1184  .error_strings = map_error_strings,
1185 
1186  .n_next_nodes = IP6_MAP_N_NEXT,
1187  .next_nodes = {
1188  [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1189 #ifdef MAP_SKIP_IP6_LOOKUP
1190  [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance",
1191 #endif
1192  [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
1193  [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
1194  [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
1195  [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
1196  [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
1197  [IP6_MAP_NEXT_DROP] = "error-drop",
1198  [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
1199  },
1200 };
1201 /* *INDENT-ON* */
1202 
1203 /* *INDENT-OFF* */
1205  .function = ip6_map_ip6_reass,
1206  .name = "ip6-map-ip6-reass",
1207  .vector_size = sizeof(u32),
1208  .format_trace = format_ip6_map_ip6_reass_trace,
1209  .type = VLIB_NODE_TYPE_INTERNAL,
1210  .n_errors = MAP_N_ERROR,
1211  .error_strings = map_error_strings,
1212  .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
1213  .next_nodes = {
1214  [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
1215  [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
1216  },
1217 };
1218 /* *INDENT-ON* */
1219 
1220 /* *INDENT-OFF* */
1222  .function = ip6_map_ip4_reass,
1223  .name = "ip6-map-ip4-reass",
1224  .vector_size = sizeof(u32),
1225  .format_trace = format_ip6_map_ip4_reass_trace,
1226  .type = VLIB_NODE_TYPE_INTERNAL,
1227  .n_errors = MAP_N_ERROR,
1228  .error_strings = map_error_strings,
1229  .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
1230  .next_nodes = {
1231  [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
1232  [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
1233  [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
1234  },
1235 };
1236 /* *INDENT-ON* */
1237 
1238 /* *INDENT-OFF* */
1240  .function = ip6_map_icmp_relay,
1241  .name = "ip6-map-icmp-relay",
1242  .vector_size = sizeof(u32),
1243  .format_trace = format_map_trace, //FIXME
1244  .type = VLIB_NODE_TYPE_INTERNAL,
1245  .n_errors = MAP_N_ERROR,
1246  .error_strings = map_error_strings,
1247  .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1248  .next_nodes = {
1249  [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1250  [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1251  },
1252 };
1253 /* *INDENT-ON* */
1254 
1255 /*
1256  * fd.io coding-style-patch-verification: ON
1257  *
1258  * Local Variables:
1259  * eval: (c-set-style "gnu")
1260  * End:
1261  */
u16 forwarded
Definition: map.h:186
#define map_ip4_reass_lock()
Definition: map.h:477
u8 psid_length
Definition: map.h:97
#define CLIB_UNUSED(x)
Definition: clib.h:81
map_main_t map_main
Definition: map.c:27
static_always_inline bool ip6_map_sec_check(map_domain_t *d, u16 port, ip4_header_t *ip4, ip6_header_t *ip6)
Definition: ip6_map.c:104
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:204
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:360
ip4_address_t src_address
Definition: ip4_packet.h:169
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:397
ip6_map_ip4_reass_next_e
Definition: ip6_map.c:44
#define PREDICT_TRUE(x)
Definition: clib.h:108
u64 as_u64[2]
Definition: ip6_packet.h:51
unsigned long u64
Definition: types.h:89
dpo_id_t dpo
The Load-balance object index to use to forward.
Definition: map.h:220
#define NULL
Definition: clib.h:57
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:78
u32 thread_index
Definition: main.h:179
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:523
ip6_icmp_relay_next_e
Definition: ip6_map.c:52
int i
bool sec_check_frag
Definition: map.h:247
uword ip_csum_t
Definition: ip_packet.h:181
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
u16 flags_and_fragment_offset
Definition: ip4_packet.h:150
static char * map_error_strings[]
Definition: ip6_map.c:1169
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:472
vlib_node_registration_t ip6_map_ip4_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip4_reass_node)
Definition: ip6_map.c:59
static_always_inline void ip6_map_ip6_reass_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, map_ip6_reass_t *r, u32 **fragments_ready, u32 **fragments_to_drop)
Definition: ip6_map.c:556
ip6_address_t src_address
Definition: ip6_packet.h:378
unsigned char u8
Definition: types.h:56
vlib_node_registration_t ip6_map_node
(constructor) VLIB_REGISTER_NODE (ip6_map_node)
Definition: ip6_map.c:1176
IPv4 to IPv6 translation.
map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]
Definition: map.h:193
u16 port
Definition: map.h:338
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int ip4_is_fragment(const ip4_header_t *i)
Definition: ip4_packet.h:212
#define map_ip6_reass_unlock()
Definition: map.h:503
#define static_always_inline
Definition: clib.h:95
static_always_inline void ip6_map_security_check(map_domain_t *d, ip4_header_t *ip4, ip6_header_t *ip6, u32 *next, u8 *error)
Definition: ip6_map.c:120
ip4_address_t dst_address
Definition: ip4_packet.h:169
vlib_combined_counter_main_t * domain_counters
Definition: map.h:236
ip4_address_t icmp4_src_address
Definition: map.h:253
vlib_node_registration_t ip6_map_ip6_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip6_reass_node)
Definition: ip6_map.c:60
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:187
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:570
vlib_simple_counter_main_t icmp_relayed
Definition: map.h:254
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1591
ip6_address_t * rules
Definition: map.h:87
u8 ea_bits_len
Definition: map.h:95
unsigned int u32
Definition: types.h:88
#define VLIB_FRAME_SIZE
Definition: node.h:382
#define frag_id_6to4(id)
Definition: ip6_to_ip4.h:45
u8 * format_ip6_map_ip4_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:71
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:446
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:579
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static_always_inline map_domain_t * ip6_map_get_domain(u32 mdi, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:438
unsigned short u16
Definition: types.h:57
static int ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:199
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:205
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1510
#define PREDICT_FALSE(x)
Definition: clib.h:107
vl_api_address_union_t src_address
Definition: ip_types.api:49
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
u16 expected_total
Definition: map.h:134
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:138
void map_ip4_drop_pi(u32 pi)
Definition: ip6_map.c:661
map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]
Pre-resolvd per-protocol global next-hops.
Definition: map.c:344
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1467
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:151
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:155
u8 next_data_len
Definition: map.h:177
u16 n_vectors
Definition: node.h:401
u16 next_data_offset
Definition: map.h:176
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:481
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:79
vlib_main_t * vm
Definition: buffer.c:294
map_ip6_reass_t * map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1682
u8 next_data[20]
Definition: map.h:178
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:339
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:254
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:89
u16 forwarded
Definition: map.h:135
static uword ip6_map_icmp_relay(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:1010
#define clib_memcpy(a, b, c)
Definition: string.h:75
bool icmp6_enabled
Definition: map.h:248
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:455
static uword ip6_map_ip4_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:817
#define foreach_map_error
Definition: map.h:308
static int ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:205
signed int i32
Definition: types.h:77
static uword ip6_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:170
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:573
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:513
bool sec_check
Definition: map.h:246
ip6_map_next_e
Definition: ip6_map.c:21
static_always_inline bool ip6_map_ip4_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip6_map.c:153
ip4_header_t ip4_header
Definition: map.h:192
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:218
#define map_ip4_reass_unlock()
Definition: map.h:478
static vlib_node_registration_t ip6_map_icmp_relay_node
(constructor) VLIB_REGISTER_NODE (ip6_map_icmp_relay_node)
Definition: ip6_map.c:61
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1430
IPv6 to IPv4 translation.
u8 * format_ip6_map_ip6_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:90
#define u8_ptr_add(ptr, index)
Definition: ip.h:68
ip6_map_ip6_reass_next_e
Definition: ip6_map.c:37
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:57
struct _vlib_node_registration vlib_node_registration_t
Definition: defs.h:47
static uword ip6_map_ip6_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:677
u16 mtu
Definition: map.h:91
u16 payload_length
Definition: ip6_packet.h:369
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:184
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:31
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define map_ip6_reass_lock()
Definition: map.h:502
i32 port
Definition: map.h:137
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:267
A collection of combined counters.
Definition: counter.h:172
struct clib_bihash_value offset
template key/value backing page structure
void map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1630
#define vnet_buffer(b)
Definition: buffer.h:344
#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY
Definition: map.h:70
u8 data[0]
Packet data.
Definition: buffer.h:175
#define u16_net_add(u, val)
Definition: ip.h:69
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:152
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:384
u32 map_domain_index
Definition: map.h:337
u8 ip_version_and_header_length
Definition: ip4_packet.h:137
u32 ip6_reass_buffered_counter
Definition: map.h:301
int map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, u16 data_offset, u16 next_data_offset, u8 *data_start, u16 data_len)
Definition: map.c:1770
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:116
u16 expected_total
Definition: map.h:185
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:58
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:246
clib_random_buffer_t random_buffer
Definition: main.h:173
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:237
void map_ip6_drop_pi(u32 pi)
Definition: ip6_map.c:652
ip6_address_t dst_address
Definition: ip6_packet.h:378