FD.io VPP  v17.04.2-2-ga8f93f8
Vector Packet Processing
ip6_map.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 
20 {
22 #ifdef MAP_SKIP_IP6_LOOKUP
24 #endif
33 };
34 
36 {
40 };
41 
43 {
48 };
49 
51 {
55 };
56 
60 
61 typedef struct
62 {
67 
68 u8 *
69 format_ip6_map_ip4_reass_trace (u8 * s, va_list * args)
70 {
71  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
72  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
74  va_arg (*args, map_ip6_map_ip4_reass_trace_t *);
75  return format (s, "MAP domain index: %d L4 port: %u Status: %s",
76  t->map_domain_index, t->port,
77  t->cached ? "cached" : "forwarded");
78 }
79 
80 typedef struct
81 {
86 
87 u8 *
88 format_ip6_map_ip6_reass_trace (u8 * s, va_list * args)
89 {
90  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
91  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
93  va_arg (*args, map_ip6_map_ip6_reass_trace_t *);
94  return format (s, "Offset: %d Fragment length: %d Status: %s", t->offset,
95  t->frag_len, t->out ? "out" : "in");
96 }
97 
98 /*
99  * ip6_map_sec_check
100  */
103  ip6_header_t * ip6)
104 {
105  u16 sp4 = clib_net_to_host_u16 (port);
106  u32 sa4 = clib_net_to_host_u32 (ip4->src_address.as_u32);
107  u64 sal6 = map_get_pfx (d, sa4, sp4);
108  u64 sar6 = map_get_sfx (d, sa4, sp4);
109 
110  if (PREDICT_FALSE
111  (sal6 != clib_net_to_host_u64 (ip6->src_address.as_u64[0])
112  || sar6 != clib_net_to_host_u64 (ip6->src_address.as_u64[1])))
113  return (false);
114  return (true);
115 }
116 
119  ip6_header_t * ip6, u32 * next, u8 * error)
120 {
121  map_main_t *mm = &map_main;
122  if (d->ea_bits_len || d->rules)
123  {
124  if (d->psid_length > 0)
125  {
126  if (!ip4_is_fragment (ip4))
127  {
128  u16 port = ip4_map_get_port (ip4, MAP_SENDER);
129  if (port)
130  {
131  if (mm->sec_check)
132  *error =
133  ip6_map_sec_check (d, port, ip4,
134  ip6) ? MAP_ERROR_NONE :
135  MAP_ERROR_DECAP_SEC_CHECK;
136  }
137  else
138  {
139  *error = MAP_ERROR_BAD_PROTOCOL;
140  }
141  }
142  else
143  {
144  *next = mm->sec_check_frag ? IP6_MAP_NEXT_IP4_REASS : *next;
145  }
146  }
147  }
148 }
149 
152 {
153 #ifdef MAP_SKIP_IP6_LOOKUP
155  {
156  vnet_buffer (p0)->ip.adj_index[VLIB_TX] =
158  return (true);
159  }
160 #endif
161  return (false);
162 }
163 
164 /*
165  * ip6_map
166  */
167 static uword
169 {
170  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
171  vlib_node_runtime_t *error_node =
173  map_main_t *mm = &map_main;
175  u32 cpu_index = os_get_cpu_number ();
176 
177  from = vlib_frame_vector_args (frame);
178  n_left_from = frame->n_vectors;
179  next_index = node->cached_next_index;
180  while (n_left_from > 0)
181  {
182  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
183 
184  /* Dual loop */
185  while (n_left_from >= 4 && n_left_to_next >= 2)
186  {
187  u32 pi0, pi1;
188  vlib_buffer_t *p0, *p1;
189  u8 error0 = MAP_ERROR_NONE;
190  u8 error1 = MAP_ERROR_NONE;
191  map_domain_t *d0 = 0, *d1 = 0;
192  ip4_header_t *ip40, *ip41;
193  ip6_header_t *ip60, *ip61;
194  u16 port0 = 0, port1 = 0;
195  u32 map_domain_index0 = ~0, map_domain_index1 = ~0;
198 
199  /* Prefetch next iteration. */
200  {
201  vlib_buffer_t *p2, *p3;
202 
203  p2 = vlib_get_buffer (vm, from[2]);
204  p3 = vlib_get_buffer (vm, from[3]);
205 
206  vlib_prefetch_buffer_header (p2, LOAD);
207  vlib_prefetch_buffer_header (p3, LOAD);
208 
209  /* IPv6 + IPv4 header + 8 bytes of ULP */
210  CLIB_PREFETCH (p2->data, 68, LOAD);
211  CLIB_PREFETCH (p3->data, 68, LOAD);
212  }
213 
214  pi0 = to_next[0] = from[0];
215  pi1 = to_next[1] = from[1];
216  from += 2;
217  n_left_from -= 2;
218  to_next += 2;
219  n_left_to_next -= 2;
220 
221  p0 = vlib_get_buffer (vm, pi0);
222  p1 = vlib_get_buffer (vm, pi1);
223  ip60 = vlib_buffer_get_current (p0);
224  ip61 = vlib_buffer_get_current (p1);
225  vlib_buffer_advance (p0, sizeof (ip6_header_t));
226  vlib_buffer_advance (p1, sizeof (ip6_header_t));
227  ip40 = vlib_buffer_get_current (p0);
228  ip41 = vlib_buffer_get_current (p1);
229 
230  /*
231  * Encapsulated IPv4 packet
232  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
233  * - Lookup/Rewrite or Fragment node in case of packet > MTU
234  * Fragmented IPv6 packet
235  * ICMP IPv6 packet
236  * - Error -> Pass to ICMPv6/ICMPv4 relay
237  * - Info -> Pass to IPv6 local
238  * Anything else -> drop
239  */
240  if (PREDICT_TRUE
241  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
242  && clib_net_to_host_u16 (ip60->payload_length) > 20))
243  {
244  d0 =
245  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
246  (ip4_address_t *) & ip40->src_address.
247  as_u32, &map_domain_index0, &error0);
248  }
249  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
250  clib_net_to_host_u16 (ip60->payload_length) >
251  sizeof (icmp46_header_t))
252  {
253  icmp46_header_t *icmp = (void *) (ip60 + 1);
254  next0 = (icmp->type == ICMP6_echo_request
255  || icmp->type ==
256  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
258  }
259  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
260  {
261  next0 = IP6_MAP_NEXT_IP6_REASS;
262  }
263  else
264  {
265  error0 = MAP_ERROR_BAD_PROTOCOL;
266  }
267  if (PREDICT_TRUE
268  (ip61->protocol == IP_PROTOCOL_IP_IN_IP
269  && clib_net_to_host_u16 (ip61->payload_length) > 20))
270  {
271  d1 =
272  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
273  (ip4_address_t *) & ip41->src_address.
274  as_u32, &map_domain_index1, &error1);
275  }
276  else if (ip61->protocol == IP_PROTOCOL_ICMP6 &&
277  clib_net_to_host_u16 (ip61->payload_length) >
278  sizeof (icmp46_header_t))
279  {
280  icmp46_header_t *icmp = (void *) (ip61 + 1);
281  next1 = (icmp->type == ICMP6_echo_request
282  || icmp->type ==
283  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
285  }
286  else if (ip61->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION)
287  {
288  next1 = IP6_MAP_NEXT_IP6_REASS;
289  }
290  else
291  {
292  error1 = MAP_ERROR_BAD_PROTOCOL;
293  }
294 
295  if (d0)
296  {
297  /* MAP inbound security check */
298  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
299 
300  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
301  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
302  {
303  if (PREDICT_FALSE
304  (d0->mtu
305  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
306  {
307  vnet_buffer (p0)->ip_frag.header_offset = 0;
308  vnet_buffer (p0)->ip_frag.flags = 0;
309  vnet_buffer (p0)->ip_frag.next_index =
311  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
313  }
314  else
315  {
316  next0 =
318  ip40) ?
319  IP6_MAP_NEXT_IP4_REWRITE : next0;
320  }
322  cpu_index,
323  map_domain_index0, 1,
324  clib_net_to_host_u16
325  (ip40->length));
326  }
327  }
328  if (d1)
329  {
330  /* MAP inbound security check */
331  ip6_map_security_check (d1, ip41, ip61, &next1, &error1);
332 
333  if (PREDICT_TRUE (error1 == MAP_ERROR_NONE &&
334  next1 == IP6_MAP_NEXT_IP4_LOOKUP))
335  {
336  if (PREDICT_FALSE
337  (d1->mtu
338  && (clib_host_to_net_u16 (ip41->length) > d1->mtu)))
339  {
340  vnet_buffer (p1)->ip_frag.header_offset = 0;
341  vnet_buffer (p1)->ip_frag.flags = 0;
342  vnet_buffer (p1)->ip_frag.next_index =
344  vnet_buffer (p1)->ip_frag.mtu = d1->mtu;
346  }
347  else
348  {
349  next1 =
351  ip41) ?
352  IP6_MAP_NEXT_IP4_REWRITE : next1;
353  }
355  cpu_index,
356  map_domain_index1, 1,
357  clib_net_to_host_u16
358  (ip41->length));
359  }
360  }
361 
363  {
364  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
365  tr->map_domain_index = map_domain_index0;
366  tr->port = port0;
367  }
368 
370  {
371  map_trace_t *tr = vlib_add_trace (vm, node, p1, sizeof (*tr));
372  tr->map_domain_index = map_domain_index1;
373  tr->port = port1;
374  }
375 
376  if (error0 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
377  {
378  /* Set ICMP parameters */
379  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
380  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
381  ICMP6_destination_unreachable_source_address_failed_policy,
382  0);
383  next0 = IP6_MAP_NEXT_ICMP;
384  }
385  else
386  {
387  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
388  }
389 
390  if (error1 == MAP_ERROR_DECAP_SEC_CHECK && mm->icmp6_enabled)
391  {
392  /* Set ICMP parameters */
393  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
394  icmp6_error_set_vnet_buffer (p1, ICMP6_destination_unreachable,
395  ICMP6_destination_unreachable_source_address_failed_policy,
396  0);
397  next1 = IP6_MAP_NEXT_ICMP;
398  }
399  else
400  {
401  next1 = (error1 == MAP_ERROR_NONE) ? next1 : IP6_MAP_NEXT_DROP;
402  }
403 
404  /* Reset packet */
405  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
406  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
407  if (next1 == IP6_MAP_NEXT_IP6_LOCAL)
408  vlib_buffer_advance (p1, -sizeof (ip6_header_t));
409 
410  p0->error = error_node->errors[error0];
411  p1->error = error_node->errors[error1];
412  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
413  n_left_to_next, pi0, pi1, next0,
414  next1);
415  }
416 
417  /* Single loop */
418  while (n_left_from > 0 && n_left_to_next > 0)
419  {
420  u32 pi0;
421  vlib_buffer_t *p0;
422  u8 error0 = MAP_ERROR_NONE;
423  map_domain_t *d0 = 0;
424  ip4_header_t *ip40;
425  ip6_header_t *ip60;
426  i32 port0 = 0;
427  u32 map_domain_index0 = ~0;
429 
430  pi0 = to_next[0] = from[0];
431  from += 1;
432  n_left_from -= 1;
433  to_next += 1;
434  n_left_to_next -= 1;
435 
436  p0 = vlib_get_buffer (vm, pi0);
437  ip60 = vlib_buffer_get_current (p0);
438  vlib_buffer_advance (p0, sizeof (ip6_header_t));
439  ip40 = vlib_buffer_get_current (p0);
440 
441  /*
442  * Encapsulated IPv4 packet
443  * - IPv4 fragmented -> Pass to virtual reassembly unless security check disabled
444  * - Lookup/Rewrite or Fragment node in case of packet > MTU
445  * Fragmented IPv6 packet
446  * ICMP IPv6 packet
447  * - Error -> Pass to ICMPv6/ICMPv4 relay
448  * - Info -> Pass to IPv6 local
449  * Anything else -> drop
450  */
451  if (PREDICT_TRUE
452  (ip60->protocol == IP_PROTOCOL_IP_IN_IP
453  && clib_net_to_host_u16 (ip60->payload_length) > 20))
454  {
455  d0 =
456  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
457  (ip4_address_t *) & ip40->src_address.
458  as_u32, &map_domain_index0, &error0);
459  }
460  else if (ip60->protocol == IP_PROTOCOL_ICMP6 &&
461  clib_net_to_host_u16 (ip60->payload_length) >
462  sizeof (icmp46_header_t))
463  {
464  icmp46_header_t *icmp = (void *) (ip60 + 1);
465  next0 = (icmp->type == ICMP6_echo_request
466  || icmp->type ==
467  ICMP6_echo_reply) ? IP6_MAP_NEXT_IP6_LOCAL :
469  }
470  else if (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION &&
471  (((ip6_frag_hdr_t *) (ip60 + 1))->next_hdr ==
472  IP_PROTOCOL_IP_IN_IP))
473  {
474  next0 = IP6_MAP_NEXT_IP6_REASS;
475  }
476  else
477  {
478  error0 = MAP_ERROR_BAD_PROTOCOL;
479  }
480 
481  if (d0)
482  {
483  /* MAP inbound security check */
484  ip6_map_security_check (d0, ip40, ip60, &next0, &error0);
485 
486  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE &&
487  next0 == IP6_MAP_NEXT_IP4_LOOKUP))
488  {
489  if (PREDICT_FALSE
490  (d0->mtu
491  && (clib_host_to_net_u16 (ip40->length) > d0->mtu)))
492  {
493  vnet_buffer (p0)->ip_frag.header_offset = 0;
494  vnet_buffer (p0)->ip_frag.flags = 0;
495  vnet_buffer (p0)->ip_frag.next_index =
497  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
499  }
500  else
501  {
502  next0 =
504  ip40) ?
505  IP6_MAP_NEXT_IP4_REWRITE : next0;
506  }
508  cpu_index,
509  map_domain_index0, 1,
510  clib_net_to_host_u16
511  (ip40->length));
512  }
513  }
514 
516  {
517  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
518  tr->map_domain_index = map_domain_index0;
519  tr->port = (u16) port0;
520  }
521 
522  if (mm->icmp6_enabled &&
523  (error0 == MAP_ERROR_DECAP_SEC_CHECK
524  || error0 == MAP_ERROR_NO_DOMAIN))
525  {
526  /* Set ICMP parameters */
527  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
528  icmp6_error_set_vnet_buffer (p0, ICMP6_destination_unreachable,
529  ICMP6_destination_unreachable_source_address_failed_policy,
530  0);
531  next0 = IP6_MAP_NEXT_ICMP;
532  }
533  else
534  {
535  next0 = (error0 == MAP_ERROR_NONE) ? next0 : IP6_MAP_NEXT_DROP;
536  }
537 
538  /* Reset packet */
539  if (next0 == IP6_MAP_NEXT_IP6_LOCAL)
540  vlib_buffer_advance (p0, -sizeof (ip6_header_t));
541 
542  p0->error = error_node->errors[error0];
543  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
544  n_left_to_next, pi0, next0);
545  }
546  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
547  }
548 
549  return frame->n_vectors;
550 }
551 
552 
555  map_ip6_reass_t * r, u32 ** fragments_ready,
556  u32 ** fragments_to_drop)
557 {
558  ip4_header_t *ip40;
559  ip6_header_t *ip60;
560  ip6_frag_hdr_t *frag0;
561  vlib_buffer_t *p0;
562 
564  return;
565 
566  //The IP header is here, we need to check for packets
567  //that can be forwarded
568  int i;
569  for (i = 0; i < MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY; i++)
570  {
571  if (r->fragments[i].pi == ~0 ||
572  ((!r->fragments[i].next_data_len)
573  && (r->fragments[i].next_data_offset != (0xffff))))
574  continue;
575 
576  p0 = vlib_get_buffer (vm, r->fragments[i].pi);
577  ip60 = vlib_buffer_get_current (p0);
578  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
579  ip40 = (ip4_header_t *) (frag0 + 1);
580 
581  if (ip6_frag_hdr_offset (frag0))
582  {
583  //Not first fragment, add the IPv4 header
584  clib_memcpy (ip40, &r->ip4_header, 20);
585  }
586 
587 #ifdef MAP_IP6_REASS_COUNT_BYTES
588  r->forwarded +=
589  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
590 #endif
591 
592  if (ip6_frag_hdr_more (frag0))
593  {
594  //Not last fragment, we copy end of next
596  r->fragments[i].next_data, 20);
597  p0->current_length += 20;
598  ip60->payload_length = u16_net_add (ip60->payload_length, 20);
599  }
600 
601  if (!ip4_is_fragment (ip40))
602  {
603  ip40->fragment_id = frag_id_6to4 (frag0->identification);
605  clib_host_to_net_u16 (ip6_frag_hdr_offset (frag0));
606  }
607  else
608  {
610  clib_host_to_net_u16 (ip4_get_fragment_offset (ip40) +
611  ip6_frag_hdr_offset (frag0));
612  }
613 
614  if (ip6_frag_hdr_more (frag0))
616  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
617 
618  ip40->length =
619  clib_host_to_net_u16 (p0->current_length - sizeof (*ip60) -
620  sizeof (*frag0));
621  ip40->checksum = ip4_header_checksum (ip40);
622 
624  {
626  vlib_add_trace (vm, node, p0, sizeof (*tr));
627  tr->offset = ip4_get_fragment_offset (ip40);
628  tr->frag_len = clib_net_to_host_u16 (ip40->length) - sizeof (*ip40);
629  tr->out = 1;
630  }
631 
632  vec_add1 (*fragments_ready, r->fragments[i].pi);
633  r->fragments[i].pi = ~0;
634  r->fragments[i].next_data_len = 0;
635  r->fragments[i].next_data_offset = 0;
637 
638  //TODO: Best solution would be that ip6_map handles extension headers
639  // and ignores atomic fragment. But in the meantime, let's just copy the header.
640 
641  u8 protocol = frag0->next_hdr;
642  memmove (u8_ptr_add (ip40, -sizeof (*ip60)), ip60, sizeof (*ip60));
643  ((ip6_header_t *) u8_ptr_add (ip40, -sizeof (*ip60)))->protocol =
644  protocol;
645  vlib_buffer_advance (p0, sizeof (*frag0));
646  }
647 }
648 
649 void
651 {
656 }
657 
658 void
660 {
665 }
666 
667 /*
668  * ip6_reass
669  * TODO: We should count the number of successfully
670  * transmitted fragment bytes and compare that to the last fragment
671  * offset such that we can free the reassembly structure when all fragments
672  * have been forwarded.
673  */
674 static uword
676  vlib_node_runtime_t * node, vlib_frame_t * frame)
677 {
678  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
679  vlib_node_runtime_t *error_node =
681  u32 *fragments_to_drop = NULL;
682  u32 *fragments_ready = NULL;
683 
684  from = vlib_frame_vector_args (frame);
685  n_left_from = frame->n_vectors;
686  next_index = node->cached_next_index;
687  while (n_left_from > 0)
688  {
689  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
690 
691  /* Single loop */
692  while (n_left_from > 0 && n_left_to_next > 0)
693  {
694  u32 pi0;
695  vlib_buffer_t *p0;
696  u8 error0 = MAP_ERROR_NONE;
697  ip6_header_t *ip60;
698  ip6_frag_hdr_t *frag0;
699  u16 offset;
700  u16 next_offset;
701  u16 frag_len;
702 
703  pi0 = to_next[0] = from[0];
704  from += 1;
705  n_left_from -= 1;
706  to_next += 1;
707  n_left_to_next -= 1;
708 
709  p0 = vlib_get_buffer (vm, pi0);
710  ip60 = vlib_buffer_get_current (p0);
711  frag0 = (ip6_frag_hdr_t *) (ip60 + 1);
712  offset =
713  clib_host_to_net_u16 (frag0->fragment_offset_and_more) & (~7);
714  frag_len =
715  clib_net_to_host_u16 (ip60->payload_length) - sizeof (*frag0);
716  next_offset =
717  ip6_frag_hdr_more (frag0) ? (offset + frag_len) : (0xffff);
718 
719  //FIXME: Support other extension headers, maybe
720 
722  {
724  vlib_add_trace (vm, node, p0, sizeof (*tr));
725  tr->offset = offset;
726  tr->frag_len = frag_len;
727  tr->out = 0;
728  }
729 
731  map_ip6_reass_t *r =
732  map_ip6_reass_get (&ip60->src_address, &ip60->dst_address,
733  frag0->identification, frag0->next_hdr,
734  &fragments_to_drop);
735  //FIXME: Use better error codes
736  if (PREDICT_FALSE (!r))
737  {
738  // Could not create a caching entry
739  error0 = MAP_ERROR_FRAGMENT_MEMORY;
740  }
741  else if (PREDICT_FALSE ((frag_len <= 20 &&
742  (ip6_frag_hdr_more (frag0) || (!offset)))))
743  {
744  //Very small fragment are restricted to the last one and
745  //can't be the first one
746  error0 = MAP_ERROR_FRAGMENT_MALFORMED;
747  }
748  else
750  (r, pi0, offset, next_offset, (u8 *) (frag0 + 1), frag_len))
751  {
752  map_ip6_reass_free (r, &fragments_to_drop);
753  error0 = MAP_ERROR_FRAGMENT_MEMORY;
754  }
755  else
756  {
757 #ifdef MAP_IP6_REASS_COUNT_BYTES
758  if (!ip6_frag_hdr_more (frag0))
759  r->expected_total = offset + frag_len;
760 #endif
761  ip6_map_ip6_reass_prepare (vm, node, r, &fragments_ready,
762  &fragments_to_drop);
763 #ifdef MAP_IP6_REASS_COUNT_BYTES
764  if (r->forwarded >= r->expected_total)
765  map_ip6_reass_free (r, &fragments_to_drop);
766 #endif
767  }
769 
770  if (error0 == MAP_ERROR_NONE)
771  {
772  if (frag_len > 20)
773  {
774  //Dequeue the packet
775  n_left_to_next++;
776  to_next--;
777  }
778  else
779  {
780  //All data from that packet was copied no need to keep it, but this is not an error
781  p0->error = error_node->errors[MAP_ERROR_NONE];
782  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
783  to_next, n_left_to_next,
784  pi0,
786  }
787  }
788  else
789  {
790  p0->error = error_node->errors[error0];
791  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
792  n_left_to_next, pi0,
794  }
795  }
796  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
797  }
798 
799  map_send_all_to_node (vm, fragments_ready, node,
800  &error_node->errors[MAP_ERROR_NONE],
802  map_send_all_to_node (vm, fragments_to_drop, node,
803  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
805 
806  vec_free (fragments_to_drop);
807  vec_free (fragments_ready);
808  return frame->n_vectors;
809 }
810 
811 /*
812  * ip6_ip4_virt_reass
813  */
814 static uword
816  vlib_node_runtime_t * node, vlib_frame_t * frame)
817 {
818  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
819  vlib_node_runtime_t *error_node =
821  map_main_t *mm = &map_main;
823  u32 cpu_index = os_get_cpu_number ();
824  u32 *fragments_to_drop = NULL;
825  u32 *fragments_to_loopback = NULL;
826 
827  from = vlib_frame_vector_args (frame);
828  n_left_from = frame->n_vectors;
829  next_index = node->cached_next_index;
830  while (n_left_from > 0)
831  {
832  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
833 
834  /* Single loop */
835  while (n_left_from > 0 && n_left_to_next > 0)
836  {
837  u32 pi0;
838  vlib_buffer_t *p0;
839  u8 error0 = MAP_ERROR_NONE;
840  map_domain_t *d0;
841  ip4_header_t *ip40;
842  ip6_header_t *ip60;
843  i32 port0 = 0;
844  u32 map_domain_index0 = ~0;
846  u8 cached = 0;
847 
848  pi0 = to_next[0] = from[0];
849  from += 1;
850  n_left_from -= 1;
851  to_next += 1;
852  n_left_to_next -= 1;
853 
854  p0 = vlib_get_buffer (vm, pi0);
855  ip40 = vlib_buffer_get_current (p0);
856  ip60 = ((ip6_header_t *) ip40) - 1;
857 
858  d0 =
859  ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
860  (ip4_address_t *) & ip40->src_address.as_u32,
861  &map_domain_index0, &error0);
862 
864  //This node only deals with fragmented ip4
866  ip40->dst_address.as_u32,
867  ip40->fragment_id,
868  ip40->protocol,
869  &fragments_to_drop);
870  if (PREDICT_FALSE (!r))
871  {
872  // Could not create a caching entry
873  error0 = MAP_ERROR_FRAGMENT_MEMORY;
874  }
875  else if (PREDICT_TRUE (ip4_get_fragment_offset (ip40)))
876  {
877  // This is a fragment
878  if (r->port >= 0)
879  {
880  // We know the port already
881  port0 = r->port;
882  }
883  else if (map_ip4_reass_add_fragment (r, pi0))
884  {
885  // Not enough space for caching
886  error0 = MAP_ERROR_FRAGMENT_MEMORY;
887  map_ip4_reass_free (r, &fragments_to_drop);
888  }
889  else
890  {
891  cached = 1;
892  }
893  }
894  else
895  if ((port0 =
896  ip4_get_port (ip40, MAP_SENDER, p0->current_length)) < 0)
897  {
898  // Could not find port from first fragment. Stop reassembling.
899  error0 = MAP_ERROR_BAD_PROTOCOL;
900  port0 = 0;
901  map_ip4_reass_free (r, &fragments_to_drop);
902  }
903  else
904  {
905  // Found port. Remember it and loopback saved fragments
906  r->port = port0;
907  map_ip4_reass_get_fragments (r, &fragments_to_loopback);
908  }
909 
910 #ifdef MAP_IP4_REASS_COUNT_BYTES
911  if (!cached && r)
912  {
913  r->forwarded += clib_host_to_net_u16 (ip40->length) - 20;
914  if (!ip4_get_fragment_more (ip40))
915  r->expected_total =
916  ip4_get_fragment_offset (ip40) * 8 +
917  clib_host_to_net_u16 (ip40->length) - 20;
918  if (r->forwarded >= r->expected_total)
919  map_ip4_reass_free (r, &fragments_to_drop);
920  }
921 #endif
922 
924 
925  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
926  error0 =
927  ip6_map_sec_check (d0, port0, ip40,
928  ip60) ? MAP_ERROR_NONE :
929  MAP_ERROR_DECAP_SEC_CHECK;
930 
931  if (PREDICT_FALSE
932  (d0->mtu && (clib_host_to_net_u16 (ip40->length) > d0->mtu)
933  && error0 == MAP_ERROR_NONE && !cached))
934  {
935  vnet_buffer (p0)->ip_frag.header_offset = 0;
936  vnet_buffer (p0)->ip_frag.flags = 0;
937  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
938  vnet_buffer (p0)->ip_frag.mtu = d0->mtu;
940  }
941 
943  {
945  vlib_add_trace (vm, node, p0, sizeof (*tr));
946  tr->map_domain_index = map_domain_index0;
947  tr->port = port0;
948  tr->cached = cached;
949  }
950 
951  if (cached)
952  {
953  //Dequeue the packet
954  n_left_to_next++;
955  to_next--;
956  }
957  else
958  {
959  if (error0 == MAP_ERROR_NONE)
961  cpu_index, map_domain_index0,
962  1,
963  clib_net_to_host_u16
964  (ip40->length));
965  next0 =
966  (error0 ==
967  MAP_ERROR_NONE) ? next0 : IP6_MAP_IP4_REASS_NEXT_DROP;
968  p0->error = error_node->errors[error0];
969  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
970  n_left_to_next, pi0, next0);
971  }
972 
973  //Loopback when we reach the end of the inpu vector
974  if (n_left_from == 0 && vec_len (fragments_to_loopback))
975  {
976  from = vlib_frame_vector_args (frame);
977  u32 len = vec_len (fragments_to_loopback);
978  if (len <= VLIB_FRAME_SIZE)
979  {
980  clib_memcpy (from, fragments_to_loopback,
981  sizeof (u32) * len);
982  n_left_from = len;
983  vec_reset_length (fragments_to_loopback);
984  }
985  else
986  {
987  clib_memcpy (from,
988  fragments_to_loopback + (len -
990  sizeof (u32) * VLIB_FRAME_SIZE);
991  n_left_from = VLIB_FRAME_SIZE;
992  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
993  }
994  }
995  }
996  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
997  }
998  map_send_all_to_node (vm, fragments_to_drop, node,
999  &error_node->errors[MAP_ERROR_FRAGMENT_DROPPED],
1001 
1002  vec_free (fragments_to_drop);
1003  vec_free (fragments_to_loopback);
1004  return frame->n_vectors;
1005 }
1006 
1007 /*
1008  * ip6_icmp_relay
1009  */
1010 static uword
1012  vlib_node_runtime_t * node, vlib_frame_t * frame)
1013 {
1014  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1015  vlib_node_runtime_t *error_node =
1017  map_main_t *mm = &map_main;
1018  u32 cpu_index = os_get_cpu_number ();
1019  u16 *fragment_ids, *fid;
1020 
1021  from = vlib_frame_vector_args (frame);
1022  n_left_from = frame->n_vectors;
1023  next_index = node->cached_next_index;
1024 
1025  /* Get random fragment IDs for replies. */
1026  fid = fragment_ids =
1028  n_left_from * sizeof (fragment_ids[0]));
1029 
1030  while (n_left_from > 0)
1031  {
1032  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1033 
1034  /* Single loop */
1035  while (n_left_from > 0 && n_left_to_next > 0)
1036  {
1037  u32 pi0;
1038  vlib_buffer_t *p0;
1039  u8 error0 = MAP_ERROR_NONE;
1040  ip6_header_t *ip60;
1042  u32 mtu;
1043 
1044  pi0 = to_next[0] = from[0];
1045  from += 1;
1046  n_left_from -= 1;
1047  to_next += 1;
1048  n_left_to_next -= 1;
1049 
1050  p0 = vlib_get_buffer (vm, pi0);
1051  ip60 = vlib_buffer_get_current (p0);
1052  u16 tlen = clib_net_to_host_u16 (ip60->payload_length);
1053 
1054  /*
1055  * In:
1056  * IPv6 header (40)
1057  * ICMPv6 header (8)
1058  * IPv6 header (40)
1059  * Original IPv4 header / packet
1060  * Out:
1061  * New IPv4 header
1062  * New ICMP header
1063  * Original IPv4 header / packet
1064  */
1065 
1066  /* Need at least ICMP(8) + IPv6(40) + IPv4(20) + L4 header(8) */
1067  if (tlen < 76)
1068  {
1069  error0 = MAP_ERROR_ICMP_RELAY;
1070  goto error;
1071  }
1072 
1073  icmp46_header_t *icmp60 = (icmp46_header_t *) (ip60 + 1);
1074  ip6_header_t *inner_ip60 = (ip6_header_t *) (icmp60 + 2);
1075 
1076  if (inner_ip60->protocol != IP_PROTOCOL_IP_IN_IP)
1077  {
1078  error0 = MAP_ERROR_ICMP_RELAY;
1079  goto error;
1080  }
1081 
1082  ip4_header_t *inner_ip40 = (ip4_header_t *) (inner_ip60 + 1);
1083  vlib_buffer_advance (p0, 60); /* sizeof ( IPv6 + ICMP + IPv6 - IPv4 - ICMP ) */
1084  ip4_header_t *new_ip40 = vlib_buffer_get_current (p0);
1085  icmp46_header_t *new_icmp40 = (icmp46_header_t *) (new_ip40 + 1);
1086 
1087  /*
1088  * Relay according to RFC2473, section 8.3
1089  */
1090  switch (icmp60->type)
1091  {
1092  case ICMP6_destination_unreachable:
1093  case ICMP6_time_exceeded:
1094  case ICMP6_parameter_problem:
1095  /* Type 3 - destination unreachable, Code 1 - host unreachable */
1096  new_icmp40->type = ICMP4_destination_unreachable;
1097  new_icmp40->code =
1098  ICMP4_destination_unreachable_destination_unreachable_host;
1099  break;
1100 
1101  case ICMP6_packet_too_big:
1102  /* Type 3 - destination unreachable, Code 4 - packet too big */
1103  /* Potential TODO: Adjust domain tunnel MTU based on the value received here */
1104  mtu = clib_net_to_host_u32 (*((u32 *) (icmp60 + 1)));
1105 
1106  /* Check DF flag */
1107  if (!
1108  (inner_ip40->flags_and_fragment_offset &
1109  clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT)))
1110  {
1111  error0 = MAP_ERROR_ICMP_RELAY;
1112  goto error;
1113  }
1114 
1115  new_icmp40->type = ICMP4_destination_unreachable;
1116  new_icmp40->code =
1117  ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set;
1118  *((u32 *) (new_icmp40 + 1)) =
1119  clib_host_to_net_u32 (mtu < 1280 ? 1280 : mtu);
1120  break;
1121 
1122  default:
1123  error0 = MAP_ERROR_ICMP_RELAY;
1124  break;
1125  }
1126 
1127  /*
1128  * Ensure the total ICMP packet is no longer than 576 bytes (RFC1812)
1129  */
1130  new_ip40->ip_version_and_header_length = 0x45;
1131  new_ip40->tos = 0;
1132  u16 nlen = (tlen - 20) > 576 ? 576 : tlen - 20;
1133  new_ip40->length = clib_host_to_net_u16 (nlen);
1134  new_ip40->fragment_id = fid[0];
1135  fid++;
1136  new_ip40->ttl = 64;
1137  new_ip40->protocol = IP_PROTOCOL_ICMP;
1138  new_ip40->src_address = mm->icmp4_src_address;
1139  new_ip40->dst_address = inner_ip40->src_address;
1140  new_ip40->checksum = ip4_header_checksum (new_ip40);
1141 
1142  new_icmp40->checksum = 0;
1143  ip_csum_t sum = ip_incremental_checksum (0, new_icmp40, nlen - 20);
1144  new_icmp40->checksum = ~ip_csum_fold (sum);
1145 
1146  vlib_increment_simple_counter (&mm->icmp_relayed, cpu_index, 0, 1);
1147 
1148  error:
1150  {
1151  map_trace_t *tr = vlib_add_trace (vm, node, p0, sizeof (*tr));
1152  tr->map_domain_index = 0;
1153  tr->port = 0;
1154  }
1155 
1156  next0 =
1157  (error0 == MAP_ERROR_NONE) ? next0 : IP6_ICMP_RELAY_NEXT_DROP;
1158  p0->error = error_node->errors[error0];
1159  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1160  n_left_to_next, pi0, next0);
1161  }
1162  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1163  }
1164 
1165  return frame->n_vectors;
1166 
1167 }
1168 
1169 static char *map_error_strings[] = {
1170 #define _(sym,string) string,
1172 #undef _
1173 };
1174 
1175 /* *INDENT-OFF* */
1177  .function = ip6_map,
1178  .name = "ip6-map",
1179  .vector_size = sizeof(u32),
1180  .format_trace = format_map_trace,
1181  .type = VLIB_NODE_TYPE_INTERNAL,
1182 
1183  .n_errors = MAP_N_ERROR,
1184  .error_strings = map_error_strings,
1185 
1186  .n_next_nodes = IP6_MAP_N_NEXT,
1187  .next_nodes = {
1188  [IP6_MAP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1189 #ifdef MAP_SKIP_IP6_LOOKUP
1190  [IP6_MAP_NEXT_IP4_REWRITE] = "ip4-load-balance",
1191 #endif
1192  [IP6_MAP_NEXT_IP6_REASS] = "ip6-map-ip6-reass",
1193  [IP6_MAP_NEXT_IP4_REASS] = "ip6-map-ip4-reass",
1194  [IP6_MAP_NEXT_IP4_FRAGMENT] = "ip4-frag",
1195  [IP6_MAP_NEXT_IP6_ICMP_RELAY] = "ip6-map-icmp-relay",
1196  [IP6_MAP_NEXT_IP6_LOCAL] = "ip6-local",
1197  [IP6_MAP_NEXT_DROP] = "error-drop",
1198  [IP6_MAP_NEXT_ICMP] = "ip6-icmp-error",
1199  },
1200 };
1201 /* *INDENT-ON* */
1202 
1203 /* *INDENT-OFF* */
1205  .function = ip6_map_ip6_reass,
1206  .name = "ip6-map-ip6-reass",
1207  .vector_size = sizeof(u32),
1208  .format_trace = format_ip6_map_ip6_reass_trace,
1209  .type = VLIB_NODE_TYPE_INTERNAL,
1210  .n_errors = MAP_N_ERROR,
1211  .error_strings = map_error_strings,
1212  .n_next_nodes = IP6_MAP_IP6_REASS_N_NEXT,
1213  .next_nodes = {
1214  [IP6_MAP_IP6_REASS_NEXT_IP6_MAP] = "ip6-map",
1215  [IP6_MAP_IP6_REASS_NEXT_DROP] = "error-drop",
1216  },
1217 };
1218 /* *INDENT-ON* */
1219 
1220 /* *INDENT-OFF* */
1222  .function = ip6_map_ip4_reass,
1223  .name = "ip6-map-ip4-reass",
1224  .vector_size = sizeof(u32),
1225  .format_trace = format_ip6_map_ip4_reass_trace,
1226  .type = VLIB_NODE_TYPE_INTERNAL,
1227  .n_errors = MAP_N_ERROR,
1228  .error_strings = map_error_strings,
1229  .n_next_nodes = IP6_MAP_IP4_REASS_N_NEXT,
1230  .next_nodes = {
1231  [IP6_MAP_IP4_REASS_NEXT_IP4_LOOKUP] = "ip4-lookup",
1232  [IP6_MAP_IP4_REASS_NEXT_IP4_FRAGMENT] = "ip4-frag",
1233  [IP6_MAP_IP4_REASS_NEXT_DROP] = "error-drop",
1234  },
1235 };
1236 /* *INDENT-ON* */
1237 
1238 /* *INDENT-OFF* */
1240  .function = ip6_map_icmp_relay,
1241  .name = "ip6-map-icmp-relay",
1242  .vector_size = sizeof(u32),
1243  .format_trace = format_map_trace, //FIXME
1244  .type = VLIB_NODE_TYPE_INTERNAL,
1245  .n_errors = MAP_N_ERROR,
1246  .error_strings = map_error_strings,
1247  .n_next_nodes = IP6_ICMP_RELAY_N_NEXT,
1248  .next_nodes = {
1249  [IP6_ICMP_RELAY_NEXT_IP4_LOOKUP] = "ip4-lookup",
1250  [IP6_ICMP_RELAY_NEXT_DROP] = "error-drop",
1251  },
1252 };
1253 /* *INDENT-ON* */
1254 
1255 /*
1256  * fd.io coding-style-patch-verification: ON
1257  *
1258  * Local Variables:
1259  * eval: (c-set-style "gnu")
1260  * End:
1261  */
u16 forwarded
Definition: map.h:191
#define map_ip4_reass_lock()
Definition: map.h:476
u8 psid_length
Definition: map.h:102
sll srl srl sll sra u16x4 i
Definition: vector_sse2.h:343
#define CLIB_UNUSED(x)
Definition: clib.h:79
static_always_inline bool ip6_map_sec_check(map_domain_t *d, u16 port, ip4_header_t *ip4, ip6_header_t *ip6)
Definition: ip6_map.c:102
static_always_inline u64 map_get_pfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:360
ip4_address_t src_address
Definition: ip4_packet.h:163
static void vlib_set_next_frame_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, u32 next_index, u32 buffer_index)
Definition: node_funcs.h:383
ip6_map_ip4_reass_next_e
Definition: ip6_map.c:42
#define PREDICT_TRUE(x)
Definition: clib.h:98
u64 as_u64[2]
Definition: ip6_packet.h:51
dpo_id_t dpo
The Load-balance object index to use to forward.
Definition: map.h:225
#define NULL
Definition: clib.h:55
static void * clib_random_buffer_get_data(clib_random_buffer_t *b, uword n_bytes)
Definition: random_buffer.h:78
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
Definition: vec.h:522
struct _vlib_node_registration vlib_node_registration_t
ip6_icmp_relay_next_e
Definition: ip6_map.c:50
bool sec_check_frag
Definition: map.h:249
uword ip_csum_t
Definition: ip_packet.h:90
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:418
u16 flags_and_fragment_offset
Definition: ip4_packet.h:144
static char * map_error_strings[]
Definition: ip6_map.c:1169
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:418
vlib_node_registration_t ip6_map_ip4_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip4_reass_node)
Definition: ip6_map.c:57
#define u16_net_add(u, val)
Definition: map.h:550
static_always_inline void ip6_map_ip6_reass_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, map_ip6_reass_t *r, u32 **fragments_ready, u32 **fragments_to_drop)
Definition: ip6_map.c:554
ip6_address_t src_address
Definition: ip6_packet.h:341
#define frag_id_6to4(id)
Definition: map.h:552
vlib_node_registration_t ip6_map_node
(constructor) VLIB_REGISTER_NODE (ip6_map_node)
Definition: ip6_map.c:1176
map_ip6_fragment_t fragments[MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY]
Definition: map.h:198
u16 port
Definition: map.h:338
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define map_ip6_reass_unlock()
Definition: map.h:500
#define static_always_inline
Definition: clib.h:85
static_always_inline void ip6_map_security_check(map_domain_t *d, ip4_header_t *ip4, ip6_header_t *ip6, u32 *next, u8 *error)
Definition: ip6_map.c:118
ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_checksum.c:43
ip4_address_t dst_address
Definition: ip4_packet.h:163
vlib_combined_counter_main_t * domain_counters
Definition: map.h:241
static int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:191
ip4_address_t icmp4_src_address
Definition: map.h:253
int i32
Definition: types.h:81
vlib_node_registration_t ip6_map_ip6_reass_node
(constructor) VLIB_REGISTER_NODE (ip6_map_ip6_reass_node)
Definition: ip6_map.c:58
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:164
static_always_inline void map_send_all_to_node(vlib_main_t *vm, u32 *pi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: map.h:587
vlib_simple_counter_main_t icmp_relayed
Definition: map.h:254
int map_ip4_reass_add_fragment(map_ip4_reass_t *r, u32 pi)
Definition: map.c:1673
ip6_address_t * rules
Definition: map.h:92
unsigned long u64
Definition: types.h:89
u8 ea_bits_len
Definition: map.h:100
u8 * format_ip6_map_ip4_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:69
void icmp6_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
Definition: icmp6.c:509
static int ip4_is_fragment(ip4_header_t *i)
Definition: ip4_packet.h:204
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:516
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:71
static_always_inline map_domain_t * ip6_map_get_domain(u32 mdi, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:438
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:188
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1594
#define PREDICT_FALSE(x)
Definition: clib.h:97
#define VLIB_FRAME_SIZE
Definition: node.h:328
map_main_t map_main
Definition: map.h:341
i32 ip4_get_port(ip4_header_t *ip, map_dir_e dir, u16 buffer_len)
Definition: map.c:80
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:350
u16 expected_total
Definition: map.h:139
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
void map_ip4_drop_pi(u32 pi)
Definition: ip6_map.c:659
u16 ip4_map_get_port(ip4_header_t *ip, map_dir_e dir)
Definition: ip4_map.c:69
map_main_pre_resolved_t pre_resolved[FIB_PROTOCOL_MAX]
Pre-resolvd per-protocol global next-hops.
Definition: map.c:444
void map_ip4_reass_free(map_ip4_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1551
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:145
u8 next_data_len
Definition: map.h:182
u16 n_vectors
Definition: node.h:344
u16 next_data_offset
Definition: map.h:181
static_always_inline void map_ip4_reass_get_fragments(map_ip4_reass_t *r, u32 **pi)
Definition: map.h:480
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:276
map_ip6_reass_t * map_ip6_reass_get(ip6_address_t *src, ip6_address_t *dst, u32 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1764
u8 next_data[20]
Definition: map.h:183
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:340
#define VLIB_BUFFER_IS_TRACED
Definition: buffer.h:85
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
u16 forwarded
Definition: map.h:140
static uword ip6_map_icmp_relay(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:1011
#define clib_memcpy(a, b, c)
Definition: string.h:69
bool icmp6_enabled
Definition: map.h:250
static uword ip6_map_ip4_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:815
#define foreach_map_error
Definition: map.h:308
static uword ip6_map(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:168
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:513
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:455
#define u8_ptr_add(ptr, index)
Definition: map.h:549
unsigned int u32
Definition: types.h:88
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
bool sec_check
Definition: map.h:248
ip6_map_next_e
Definition: ip6_map.c:19
static_always_inline bool ip6_map_ip4_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
Definition: ip6_map.c:151
ip4_header_t ip4_header
Definition: map.h:197
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:201
#define map_ip4_reass_unlock()
Definition: map.h:477
static vlib_node_registration_t ip6_map_icmp_relay_node
(constructor) VLIB_REGISTER_NODE (ip6_map_icmp_relay_node)
Definition: ip6_map.c:59
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1514
static int ip4_get_fragment_more(ip4_header_t *i)
Definition: ip4_packet.h:197
u8 * format_ip6_map_ip6_reass_trace(u8 *s, va_list *args)
Definition: ip6_map.c:88
ip6_map_ip6_reass_next_e
Definition: ip6_map.c:35
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
static uword ip6_map_ip6_reass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map.c:675
u16 mtu
Definition: map.h:96
u16 payload_length
Definition: ip6_packet.h:332
index_t dpoi_index
the index of objects of that type
Definition: dpo.h:162
#define FIB_NODE_INDEX_INVALID
Definition: fib_types.h:29
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define map_ip6_reass_lock()
Definition: map.h:499
i32 port
Definition: map.h:142
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:253
A collection of combined counters.
Definition: counter.h:180
struct clib_bihash_value offset
template key/value backing page structure
void map_ip6_reass_free(map_ip6_reass_t *r, u32 **pi_to_drop)
Definition: map.c:1712
#define vnet_buffer(b)
Definition: buffer.h:294
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
#define MAP_IP6_REASS_MAX_FRAGMENTS_PER_REASSEMBLY
Definition: map.h:77
u8 data[0]
Packet data.
Definition: buffer.h:152
#define IP4_HEADER_FLAG_DONT_FRAGMENT
Definition: ip4_packet.h:146
static_always_inline u64 map_get_sfx(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:384
u32 map_domain_index
Definition: map.h:337
u8 ip_version_and_header_length
Definition: ip4_packet.h:131
Definition: map.h:30
u32 ip6_reass_buffered_counter
Definition: map.h:301
int map_ip6_reass_add_fragment(map_ip6_reass_t *r, u32 pi, u16 data_offset, u16 next_data_offset, u8 *data_start, u16 data_len)
Definition: map.c:1845
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:74
u16 expected_total
Definition: map.h:190
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 cpu_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:238
clib_random_buffer_t random_buffer
Definition: main.h:153
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:145
void map_ip6_drop_pi(u32 pi)
Definition: ip6_map.c:650
ip6_address_t dst_address
Definition: ip6_packet.h:341