FD.io VPP  v16.09
Vector Packet Processing
ip6_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 
19 #define IP6_MAP_T_DUAL_LOOP
20 
21 typedef enum
22 {
29 
30 typedef enum
31 {
37 
38 typedef enum
39 {
45 
46 typedef enum
47 {
53 
55 ip6_map_fragment_cache (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
56  map_domain_t * d, u16 port)
57 {
58  u32 *ignore = NULL;
62  &ip6->
63  dst_address),
64  frag_id_6to4 (frag->identification),
65  (ip6->protocol ==
66  IP_PROTOCOL_ICMP6) ?
67  IP_PROTOCOL_ICMP : ip6->protocol,
68  &ignore);
69  if (r)
70  r->port = port;
71 
73  return !r;
74 }
75 
76 /* Returns the associated port or -1 */
78 ip6_map_fragment_get (ip6_header_t * ip6, ip6_frag_hdr_t * frag,
79  map_domain_t * d)
80 {
81  u32 *ignore = NULL;
85  &ip6->
86  dst_address),
87  frag_id_6to4 (frag->identification),
88  (ip6->protocol ==
89  IP_PROTOCOL_ICMP6) ?
90  IP_PROTOCOL_ICMP : ip6->protocol,
91  &ignore);
92  i32 ret = r ? r->port : -1;
94  return ret;
95 }
96 
99 {
100 #ifdef IP6_MAP_T_OVERRIDE_TOS
101  return IP6_MAP_T_OVERRIDE_TOS;
102 #else
103  return (clib_net_to_host_u32 (ip6->ip_version_traffic_class_and_flow_label)
104  & 0x0ff00000) >> 20;
105 #endif
106 }
107 
108 //TODO: Find right place in memory for that
109 /* *INDENT-OFF* */
111  { 0, 1, ~0, ~0,
112  2, 2, 9, 8,
113  12, 12, 12, 12,
114  12, 12, 12, 12,
115  12, 12, 12, 12,
116  12, 12, 12, 12,
117  24, 24, 24, 24,
118  24, 24, 24, 24,
119  24, 24, 24, 24,
120  24, 24, 24, 24
121  };
122 /* *INDENT-ON* */
123 
125 ip6_icmp_to_icmp6_in_place (icmp46_header_t * icmp, u32 icmp_len,
126  i32 * sender_port, ip6_header_t ** inner_ip6)
127 {
128  *inner_ip6 = NULL;
129  switch (icmp->type)
130  {
131  case ICMP6_echo_request:
132  *sender_port = ((u16 *) icmp)[2];
133  icmp->type = ICMP4_echo_request;
134  break;
135  case ICMP6_echo_reply:
136  *sender_port = ((u16 *) icmp)[2];
137  icmp->type = ICMP4_echo_reply;
138  break;
139  case ICMP6_destination_unreachable:
140  *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
141  *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
142 
143  switch (icmp->code)
144  {
145  case ICMP6_destination_unreachable_no_route_to_destination: //0
146  case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2
147  case ICMP6_destination_unreachable_address_unreachable: //3
148  icmp->type = ICMP4_destination_unreachable;
149  icmp->code =
150  ICMP4_destination_unreachable_destination_unreachable_host;
151  break;
152  case ICMP6_destination_unreachable_destination_administratively_prohibited: //1
153  icmp->type =
154  ICMP4_destination_unreachable;
155  icmp->code =
156  ICMP4_destination_unreachable_communication_administratively_prohibited;
157  break;
158  case ICMP6_destination_unreachable_port_unreachable:
159  icmp->type = ICMP4_destination_unreachable;
160  icmp->code = ICMP4_destination_unreachable_port_unreachable;
161  break;
162  default:
163  return -1;
164  }
165  break;
166  case ICMP6_packet_too_big:
167  *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
168  *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
169 
170  icmp->type = ICMP4_destination_unreachable;
171  icmp->code = 4;
172  {
173  u32 advertised_mtu = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
174  advertised_mtu -= 20;
175  //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20)
176  ((u16 *) (icmp))[3] = clib_host_to_net_u16 (advertised_mtu);
177  }
178  break;
179 
180  case ICMP6_time_exceeded:
181  *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
182  *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
183 
184  icmp->type = ICMP4_time_exceeded;
185  break;
186 
187  case ICMP6_parameter_problem:
188  *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
189  *sender_port = ip6_get_port (*inner_ip6, MAP_RECEIVER, icmp_len);
190 
191  switch (icmp->code)
192  {
193  case ICMP6_parameter_problem_erroneous_header_field:
194  icmp->type = ICMP4_parameter_problem;
195  icmp->code = ICMP4_parameter_problem_pointer_indicates_error;
196  u32 pointer = clib_net_to_host_u32 (*((u32 *) (icmp + 1)));
197  if (pointer >= 40)
198  return -1;
199 
200  ((u8 *) (icmp + 1))[0] =
202  break;
203  case ICMP6_parameter_problem_unrecognized_next_header:
204  icmp->type = ICMP4_destination_unreachable;
205  icmp->code = ICMP4_destination_unreachable_port_unreachable;
206  break;
207  case ICMP6_parameter_problem_unrecognized_option:
208  default:
209  return -1;
210  }
211  break;
212  default:
213  return -1;
214  break;
215  }
216  return 0;
217 }
218 
220 _ip6_map_t_icmp (map_domain_t * d, vlib_buffer_t * p, u8 * error)
221 {
222  ip6_header_t *ip6, *inner_ip6;
223  ip4_header_t *ip4, *inner_ip4;
224  u32 ip6_pay_len;
225  icmp46_header_t *icmp;
226  i32 sender_port;
227  ip_csum_t csum;
228  u32 ip4_sadr, inner_ip4_dadr;
229 
230  ip6 = vlib_buffer_get_current (p);
231  ip6_pay_len = clib_net_to_host_u16 (ip6->payload_length);
232  icmp = (icmp46_header_t *) (ip6 + 1);
233  ASSERT (ip6_pay_len + sizeof (*ip6) <= p->current_length);
234 
235  if (ip6->protocol != IP_PROTOCOL_ICMP6)
236  {
237  //No extensions headers allowed here
238  //TODO: SR header
239  *error = MAP_ERROR_MALFORMED;
240  return;
241  }
242 
243  //There are no fragmented ICMP messages, so no extension header for now
244 
246  (icmp, ip6_pay_len, &sender_port, &inner_ip6))
247  {
248  //TODO: In case of 1:1 mapping it is not necessary to have the sender port
249  *error = MAP_ERROR_ICMP;
250  return;
251  }
252 
253  if (sender_port < 0)
254  {
255  // In case of 1:1 mapping, we don't care about the port
256  if (d->ea_bits_len == 0 && d->rules)
257  {
258  sender_port = 0;
259  }
260  else
261  {
262  *error = MAP_ERROR_ICMP;
263  return;
264  }
265  }
266 
267  //Security check
268  //Note that this prevents an intermediate IPv6 router from answering the request
269  ip4_sadr = map_get_ip4 (&ip6->src_address);
270  if (ip6->src_address.as_u64[0] != map_get_pfx_net (d, ip4_sadr, sender_port)
271  || ip6->src_address.as_u64[1] != map_get_sfx_net (d, ip4_sadr,
272  sender_port))
273  {
274  *error = MAP_ERROR_SEC_CHECK;
275  return;
276  }
277 
278  if (inner_ip6)
279  {
280  u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset,
281  inner_frag_id;
282  u8 *inner_l4, inner_protocol;
283 
284  //We have two headers to translate
285  // FROM
286  // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ...
287  // Handled cases:
288  // [ IPv6 ][IC][ IPv6 ][L4 header ...
289  // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ...
290  // TO
291  // [ IPv4][IC][ IPv4][L4 header ...
292 
293  //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place
294  //We shouldn't have to do it again
295  if (ip6_parse (inner_ip6, ip6_pay_len - 8,
296  &inner_protocol, &inner_l4_offset, &inner_frag_offset))
297  {
298  *error = MAP_ERROR_MALFORMED;
299  return;
300  }
301 
302  inner_l4 = u8_ptr_add (inner_ip6, inner_l4_offset);
303  inner_ip4 =
304  (ip4_header_t *) u8_ptr_add (inner_l4, -sizeof (*inner_ip4));
305  if (inner_frag_offset)
306  {
307  ip6_frag_hdr_t *inner_frag =
308  (ip6_frag_hdr_t *) u8_ptr_add (inner_ip6, inner_frag_offset);
309  inner_frag_id = frag_id_6to4 (inner_frag->identification);
310  }
311  else
312  {
313  inner_frag_id = 0;
314  }
315 
316  //Do the translation of the inner packet
317  if (inner_protocol == IP_PROTOCOL_TCP)
318  {
319  inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 16);
320  }
321  else if (inner_protocol == IP_PROTOCOL_UDP)
322  {
323  inner_L4_checksum = (u16 *) u8_ptr_add (inner_l4, 6);
324  }
325  else if (inner_protocol == IP_PROTOCOL_ICMP6)
326  {
327  icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4;
328  csum = inner_icmp->checksum;
329  csum = ip_csum_sub_even (csum, *((u16 *) inner_icmp));
330  //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded
331  inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ?
332  ICMP4_echo_request : ICMP4_echo_reply;
333  csum = ip_csum_add_even (csum, *((u16 *) inner_icmp));
334  inner_icmp->checksum = ip_csum_fold (csum);
335  inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later
336  inner_L4_checksum = &inner_icmp->checksum;
337  }
338  else
339  {
340  *error = MAP_ERROR_BAD_PROTOCOL;
341  return;
342  }
343 
344  csum = *inner_L4_checksum;
345  csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[0]);
346  csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
347  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
348  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
349 
350  //Sanity check of the outer destination address
351  if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] &&
352  ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1])
353  {
354  *error = MAP_ERROR_SEC_CHECK;
355  return;
356  }
357 
358  //Security check of inner packet
359  inner_ip4_dadr = map_get_ip4 (&inner_ip6->dst_address);
360  if (inner_ip6->dst_address.as_u64[0] !=
361  map_get_pfx_net (d, inner_ip4_dadr, sender_port)
362  || inner_ip6->dst_address.as_u64[1] != map_get_sfx_net (d,
363  inner_ip4_dadr,
364  sender_port))
365  {
366  *error = MAP_ERROR_SEC_CHECK;
367  return;
368  }
369 
370  inner_ip4->dst_address.as_u32 = inner_ip4_dadr;
371  inner_ip4->src_address.as_u32 =
372  ip6_map_t_embedded_address (d, &inner_ip6->src_address);
373  inner_ip4->ip_version_and_header_length =
375  inner_ip4->tos = ip6_translate_tos (inner_ip6);
376  inner_ip4->length =
377  u16_net_add (inner_ip6->payload_length,
378  sizeof (*ip4) + sizeof (*ip6) - inner_l4_offset);
379  inner_ip4->fragment_id = inner_frag_id;
380  inner_ip4->flags_and_fragment_offset =
381  clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
382  inner_ip4->ttl = inner_ip6->hop_limit;
383  inner_ip4->protocol = inner_protocol;
384  inner_ip4->checksum = ip4_header_checksum (inner_ip4);
385 
386  if (inner_ip4->protocol == IP_PROTOCOL_ICMP)
387  {
388  //Remove remainings of the pseudo-header in the csum
389  csum =
390  ip_csum_sub_even (csum, clib_host_to_net_u16 (IP_PROTOCOL_ICMP6));
391  csum =
392  ip_csum_sub_even (csum, inner_ip4->length - sizeof (*inner_ip4));
393  }
394  else
395  {
396  //Update to new pseudo-header
397  csum = ip_csum_add_even (csum, inner_ip4->src_address.as_u32);
398  csum = ip_csum_add_even (csum, inner_ip4->dst_address.as_u32);
399  }
400  *inner_L4_checksum = ip_csum_fold (csum);
401 
402  //Move up icmp header
403  ip4 = (ip4_header_t *) u8_ptr_add (inner_l4, -2 * sizeof (*ip4) - 8);
404  clib_memcpy (u8_ptr_add (inner_l4, -sizeof (*ip4) - 8), icmp, 8);
405  icmp = (icmp46_header_t *) u8_ptr_add (inner_l4, -sizeof (*ip4) - 8);
406  }
407  else
408  {
409  //Only one header to translate
410  ip4 = (ip4_header_t *) u8_ptr_add (ip6, sizeof (*ip6) - sizeof (*ip4));
411  }
412  vlib_buffer_advance (p, (u32) (((u8 *) ip4) - ((u8 *) ip6)));
413 
415  ip4->src_address.as_u32 = ip4_sadr;
418  ip4->tos = ip6_translate_tos (ip6);
419  ip4->fragment_id = 0;
420  ip4->flags_and_fragment_offset = 0;
421  ip4->ttl = ip6->hop_limit;
422  ip4->protocol = IP_PROTOCOL_ICMP;
423  //TODO fix the length depending on offset length
424  ip4->length = u16_net_add (ip6->payload_length,
425  (inner_ip6 ==
426  NULL) ? sizeof (*ip4) : (2 * sizeof (*ip4) -
427  sizeof (*ip6)));
428  ip4->checksum = ip4_header_checksum (ip4);
429 
430  //TODO: We could do an easy diff-checksum for echo requests/replies
431  //Recompute ICMP checksum
432  icmp->checksum = 0;
433  csum =
434  ip_incremental_checksum (0, icmp,
435  clib_net_to_host_u16 (ip4->length) -
436  sizeof (*ip4));
437  icmp->checksum = ~ip_csum_fold (csum);
438 }
439 
440 static uword
442  vlib_node_runtime_t * node, vlib_frame_t * frame)
443 {
444  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
445  vlib_node_runtime_t *error_node =
447  from = vlib_frame_vector_args (frame);
448  n_left_from = frame->n_vectors;
449  next_index = node->cached_next_index;
451  u32 cpu_index = os_get_cpu_number ();
452 
453  while (n_left_from > 0)
454  {
455  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
456 
457  while (n_left_from > 0 && n_left_to_next > 0)
458  {
459  u32 pi0;
460  vlib_buffer_t *p0;
461  u8 error0;
462  ip6_mapt_icmp_next_t next0;
463  map_domain_t *d0;
464  u16 len0;
465 
466  pi0 = to_next[0] = from[0];
467  from += 1;
468  n_left_from -= 1;
469  to_next += 1;
470  n_left_to_next -= 1;
471  error0 = MAP_ERROR_NONE;
473 
474  p0 = vlib_get_buffer (vm, pi0);
475  len0 =
476  clib_net_to_host_u16 (((ip6_header_t *)
478  (p0))->payload_length);
479  d0 =
481  vnet_buffer (p0)->map_t.map_domain_index);
482  _ip6_map_t_icmp (d0, p0, &error0);
483 
484  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
485  {
486  //Send to fragmentation node if necessary
487  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
488  vnet_buffer (p0)->ip_frag.header_offset = 0;
489  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
491  }
492 
493  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
494  {
496  cpu_index,
497  vnet_buffer (p0)->
498  map_t.map_domain_index, 1,
499  len0);
500  }
501  else
502  {
503  next0 = IP6_MAPT_ICMP_NEXT_DROP;
504  }
505 
506  p0->error = error_node->errors[error0];
507  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
508  to_next, n_left_to_next, pi0,
509  next0);
510  }
511  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
512  }
513  return frame->n_vectors;
514 }
515 
516 static uword
518  vlib_node_runtime_t * node, vlib_frame_t * frame)
519 {
520  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
521  from = vlib_frame_vector_args (frame);
522  n_left_from = frame->n_vectors;
523  next_index = node->cached_next_index;
524 
525  while (n_left_from > 0)
526  {
527  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
528 
529 #ifdef IP6_MAP_T_DUAL_LOOP
530  while (n_left_from >= 4 && n_left_to_next >= 2)
531  {
532  u32 pi0, pi1;
533  vlib_buffer_t *p0, *p1;
534  ip6_header_t *ip60, *ip61;
535  ip6_frag_hdr_t *frag0, *frag1;
536  ip4_header_t *ip40, *ip41;
537  u16 frag_id0, frag_offset0, frag_id1, frag_offset1;
538  u8 frag_more0, frag_more1;
539  u32 next0, next1;
540 
541  pi0 = to_next[0] = from[0];
542  pi1 = to_next[1] = from[1];
543  from += 2;
544  n_left_from -= 2;
545  to_next += 2;
546  n_left_to_next -= 2;
547 
550  p0 = vlib_get_buffer (vm, pi0);
551  p1 = vlib_get_buffer (vm, pi1);
552  ip60 = vlib_buffer_get_current (p0);
553  ip61 = vlib_buffer_get_current (p1);
554  frag0 =
555  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
556  vnet_buffer (p0)->map_t.
557  v6.frag_offset);
558  frag1 =
559  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
560  vnet_buffer (p1)->map_t.
561  v6.frag_offset);
562  ip40 =
563  (ip4_header_t *) u8_ptr_add (ip60,
564  vnet_buffer (p0)->map_t.
565  v6.l4_offset - sizeof (*ip40));
566  ip41 =
567  (ip4_header_t *) u8_ptr_add (ip61,
568  vnet_buffer (p1)->map_t.
569  v6.l4_offset - sizeof (*ip40));
571  vnet_buffer (p0)->map_t.v6.l4_offset -
572  sizeof (*ip40));
574  vnet_buffer (p1)->map_t.v6.l4_offset -
575  sizeof (*ip40));
576 
577  frag_id0 = frag_id_6to4 (frag0->identification);
578  frag_id1 = frag_id_6to4 (frag1->identification);
579  frag_more0 = ip6_frag_hdr_more (frag0);
580  frag_more1 = ip6_frag_hdr_more (frag1);
581  frag_offset0 = ip6_frag_hdr_offset (frag0);
582  frag_offset1 = ip6_frag_hdr_offset (frag1);
583 
584  ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
585  ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr;
586  ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
587  ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr;
592  ip40->tos = ip6_translate_tos (ip60);
593  ip41->tos = ip6_translate_tos (ip61);
594  ip40->length = u16_net_add (ip60->payload_length,
595  sizeof (*ip40) -
596  vnet_buffer (p0)->map_t.v6.l4_offset +
597  sizeof (*ip60));
598  ip41->length =
600  sizeof (*ip40) -
601  vnet_buffer (p1)->map_t.v6.l4_offset +
602  sizeof (*ip60));
603  ip40->fragment_id = frag_id0;
604  ip41->fragment_id = frag_id1;
606  clib_host_to_net_u16 (frag_offset0 |
607  (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
608  : 0));
610  clib_host_to_net_u16 (frag_offset1 |
611  (frag_more1 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
612  : 0));
613  ip40->ttl = ip60->hop_limit;
614  ip41->ttl = ip61->hop_limit;
615  ip40->protocol =
616  (vnet_buffer (p0)->map_t.v6.l4_protocol ==
617  IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)->
618  map_t.v6.l4_protocol;
619  ip41->protocol =
620  (vnet_buffer (p1)->map_t.v6.l4_protocol ==
621  IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p1)->
622  map_t.v6.l4_protocol;
623  ip40->checksum = ip4_header_checksum (ip40);
624  ip41->checksum = ip4_header_checksum (ip41);
625 
626  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
627  {
628  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
629  vnet_buffer (p0)->ip_frag.header_offset = 0;
630  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
632  }
633 
634  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
635  {
636  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
637  vnet_buffer (p1)->ip_frag.header_offset = 0;
638  vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
640  }
641 
642  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
643  to_next, n_left_to_next, pi0, pi1,
644  next0, next1);
645  }
646 #endif
647 
648  while (n_left_from > 0 && n_left_to_next > 0)
649  {
650  u32 pi0;
651  vlib_buffer_t *p0;
652  ip6_header_t *ip60;
653  ip6_frag_hdr_t *frag0;
654  ip4_header_t *ip40;
655  u16 frag_id0;
656  u8 frag_more0;
657  u16 frag_offset0;
658  u32 next0;
659 
660  pi0 = to_next[0] = from[0];
661  from += 1;
662  n_left_from -= 1;
663  to_next += 1;
664  n_left_to_next -= 1;
665 
667  p0 = vlib_get_buffer (vm, pi0);
668  ip60 = vlib_buffer_get_current (p0);
669  frag0 =
670  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
671  vnet_buffer (p0)->map_t.
672  v6.frag_offset);
673  ip40 =
674  (ip4_header_t *) u8_ptr_add (ip60,
675  vnet_buffer (p0)->map_t.
676  v6.l4_offset - sizeof (*ip40));
678  vnet_buffer (p0)->map_t.v6.l4_offset -
679  sizeof (*ip40));
680 
681  frag_id0 = frag_id_6to4 (frag0->identification);
682  frag_more0 = ip6_frag_hdr_more (frag0);
683  frag_offset0 = ip6_frag_hdr_offset (frag0);
684 
685  ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
686  ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
689  ip40->tos = ip6_translate_tos (ip60);
690  ip40->length = u16_net_add (ip60->payload_length,
691  sizeof (*ip40) -
692  vnet_buffer (p0)->map_t.v6.l4_offset +
693  sizeof (*ip60));
694  ip40->fragment_id = frag_id0;
696  clib_host_to_net_u16 (frag_offset0 |
697  (frag_more0 ? IP4_HEADER_FLAG_MORE_FRAGMENTS
698  : 0));
699  ip40->ttl = ip60->hop_limit;
700  ip40->protocol =
701  (vnet_buffer (p0)->map_t.v6.l4_protocol ==
702  IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : vnet_buffer (p0)->
703  map_t.v6.l4_protocol;
704  ip40->checksum = ip4_header_checksum (ip40);
705 
706  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
707  {
708  //Send to fragmentation node if necessary
709  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
710  vnet_buffer (p0)->ip_frag.header_offset = 0;
711  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
713  }
714 
715  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
716  to_next, n_left_to_next, pi0,
717  next0);
718  }
719  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
720  }
721  return frame->n_vectors;
722 }
723 
724 static uword
726  vlib_node_runtime_t * node, vlib_frame_t * frame)
727 {
728  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
729  from = vlib_frame_vector_args (frame);
730  n_left_from = frame->n_vectors;
731  next_index = node->cached_next_index;
732  while (n_left_from > 0)
733  {
734  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
735 
736 #ifdef IP6_MAP_T_DUAL_LOOP
737  while (n_left_from >= 4 && n_left_to_next >= 2)
738  {
739  u32 pi0, pi1;
740  vlib_buffer_t *p0, *p1;
741  ip6_header_t *ip60, *ip61;
742  ip_csum_t csum0, csum1;
743  ip4_header_t *ip40, *ip41;
744  u16 fragment_id0, flags0, *checksum0,
745  fragment_id1, flags1, *checksum1;
746  ip6_mapt_tcp_udp_next_t next0, next1;
747 
748  pi0 = to_next[0] = from[0];
749  pi1 = to_next[1] = from[1];
750  from += 2;
751  n_left_from -= 2;
752  to_next += 2;
753  n_left_to_next -= 2;
756 
757  p0 = vlib_get_buffer (vm, pi0);
758  p1 = vlib_get_buffer (vm, pi1);
759  ip60 = vlib_buffer_get_current (p0);
760  ip61 = vlib_buffer_get_current (p1);
761  ip40 =
762  (ip4_header_t *) u8_ptr_add (ip60,
763  vnet_buffer (p0)->map_t.
764  v6.l4_offset - sizeof (*ip40));
765  ip41 =
766  (ip4_header_t *) u8_ptr_add (ip61,
767  vnet_buffer (p1)->map_t.
768  v6.l4_offset - sizeof (*ip40));
770  vnet_buffer (p0)->map_t.v6.l4_offset -
771  sizeof (*ip40));
773  vnet_buffer (p1)->map_t.v6.l4_offset -
774  sizeof (*ip40));
775  checksum0 =
776  (u16 *) u8_ptr_add (ip60,
777  vnet_buffer (p0)->map_t.checksum_offset);
778  checksum1 =
779  (u16 *) u8_ptr_add (ip61,
780  vnet_buffer (p1)->map_t.checksum_offset);
781 
782  csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]);
783  csum1 = ip_csum_sub_even (*checksum1, ip61->src_address.as_u64[0]);
784  csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]);
785  csum1 = ip_csum_sub_even (csum1, ip61->src_address.as_u64[1]);
786  csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]);
787  csum1 = ip_csum_sub_even (csum0, ip61->dst_address.as_u64[0]);
788  csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]);
789  csum1 = ip_csum_sub_even (csum1, ip61->dst_address.as_u64[1]);
790  csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr);
791  csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.daddr);
792  csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr);
793  csum1 = ip_csum_add_even (csum1, vnet_buffer (p1)->map_t.v6.saddr);
794  *checksum0 = ip_csum_fold (csum0);
795  *checksum1 = ip_csum_fold (csum1);
796 
797  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset))
798  {
799  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60,
801  (p0)->
802  map_t.
803  v6.frag_offset);
804  fragment_id0 = frag_id_6to4 (hdr->identification);
805  flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
806  }
807  else
808  {
809  fragment_id0 = 0;
810  flags0 = 0;
811  }
812 
813  if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset))
814  {
815  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip61,
817  (p1)->
818  map_t.
819  v6.frag_offset);
820  fragment_id1 = frag_id_6to4 (hdr->identification);
821  flags1 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
822  }
823  else
824  {
825  fragment_id1 = 0;
826  flags1 = 0;
827  }
828 
829  ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
830  ip41->dst_address.as_u32 = vnet_buffer (p1)->map_t.v6.daddr;
831  ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
832  ip41->src_address.as_u32 = vnet_buffer (p1)->map_t.v6.saddr;
837  ip40->tos = ip6_translate_tos (ip60);
838  ip41->tos = ip6_translate_tos (ip61);
839  ip40->length = u16_net_add (ip60->payload_length,
840  sizeof (*ip40) + sizeof (*ip60) -
841  vnet_buffer (p0)->map_t.v6.l4_offset);
842  ip41->length =
844  sizeof (*ip40) + sizeof (*ip60) -
845  vnet_buffer (p1)->map_t.v6.l4_offset);
846  ip40->fragment_id = fragment_id0;
847  ip41->fragment_id = fragment_id1;
848  ip40->flags_and_fragment_offset = flags0;
849  ip41->flags_and_fragment_offset = flags1;
850  ip40->ttl = ip60->hop_limit;
851  ip41->ttl = ip61->hop_limit;
852  ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol;
853  ip41->protocol = vnet_buffer (p1)->map_t.v6.l4_protocol;
854  ip40->checksum = ip4_header_checksum (ip40);
855  ip41->checksum = ip4_header_checksum (ip41);
856 
857  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
858  {
859  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
860  vnet_buffer (p0)->ip_frag.header_offset = 0;
861  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
863  }
864 
865  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
866  {
867  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
868  vnet_buffer (p1)->ip_frag.header_offset = 0;
869  vnet_buffer (p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
871  }
872 
873  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
874  n_left_to_next, pi0, pi1, next0,
875  next1);
876  }
877 #endif
878 
879  while (n_left_from > 0 && n_left_to_next > 0)
880  {
881  u32 pi0;
882  vlib_buffer_t *p0;
883  ip6_header_t *ip60;
884  u16 *checksum0;
885  ip_csum_t csum0;
886  ip4_header_t *ip40;
887  u16 fragment_id0;
888  u16 flags0;
890 
891  pi0 = to_next[0] = from[0];
892  from += 1;
893  n_left_from -= 1;
894  to_next += 1;
895  n_left_to_next -= 1;
897 
898  p0 = vlib_get_buffer (vm, pi0);
899  ip60 = vlib_buffer_get_current (p0);
900  ip40 =
901  (ip4_header_t *) u8_ptr_add (ip60,
902  vnet_buffer (p0)->map_t.
903  v6.l4_offset - sizeof (*ip40));
905  vnet_buffer (p0)->map_t.v6.l4_offset -
906  sizeof (*ip40));
907  checksum0 =
908  (u16 *) u8_ptr_add (ip60,
909  vnet_buffer (p0)->map_t.checksum_offset);
910 
911  //TODO: This can probably be optimized
912  csum0 = ip_csum_sub_even (*checksum0, ip60->src_address.as_u64[0]);
913  csum0 = ip_csum_sub_even (csum0, ip60->src_address.as_u64[1]);
914  csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[0]);
915  csum0 = ip_csum_sub_even (csum0, ip60->dst_address.as_u64[1]);
916  csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.daddr);
917  csum0 = ip_csum_add_even (csum0, vnet_buffer (p0)->map_t.v6.saddr);
918  *checksum0 = ip_csum_fold (csum0);
919 
920  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset))
921  {
922  //Only the first fragment
923  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add (ip60,
925  (p0)->
926  map_t.
927  v6.frag_offset);
928  fragment_id0 = frag_id_6to4 (hdr->identification);
929  flags0 = clib_host_to_net_u16 (IP4_HEADER_FLAG_MORE_FRAGMENTS);
930  }
931  else
932  {
933  fragment_id0 = 0;
934  flags0 = 0;
935  }
936 
937  ip40->dst_address.as_u32 = vnet_buffer (p0)->map_t.v6.daddr;
938  ip40->src_address.as_u32 = vnet_buffer (p0)->map_t.v6.saddr;
941  ip40->tos = ip6_translate_tos (ip60);
942  ip40->length = u16_net_add (ip60->payload_length,
943  sizeof (*ip40) + sizeof (*ip60) -
944  vnet_buffer (p0)->map_t.v6.l4_offset);
945  ip40->fragment_id = fragment_id0;
946  ip40->flags_and_fragment_offset = flags0;
947  ip40->ttl = ip60->hop_limit;
948  ip40->protocol = vnet_buffer (p0)->map_t.v6.l4_protocol;
949  ip40->checksum = ip4_header_checksum (ip40);
950 
951  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
952  {
953  //Send to fragmentation node if necessary
954  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
955  vnet_buffer (p0)->ip_frag.header_offset = 0;
956  vnet_buffer (p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
958  }
959 
960  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
961  to_next, n_left_to_next, pi0,
962  next0);
963  }
964  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
965  }
966  return frame->n_vectors;
967 }
968 
971  map_domain_t * d0, i32 * src_port0,
972  u8 * error0, ip6_mapt_next_t * next0,
973  u32 l4_len0, ip6_frag_hdr_t * frag0)
974 {
975  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
976  ip6_frag_hdr_offset (frag0)))
977  {
979  if (d0->ea_bits_len == 0 && d0->rules)
980  {
981  *src_port0 = 0;
982  }
983  else
984  {
985  *src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
986  *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
987  }
988  }
989  else
990  if (PREDICT_TRUE
991  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
992  {
993  *error0 =
994  l4_len0 < sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
995  vnet_buffer (p0)->map_t.checksum_offset =
996  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
998  *src_port0 =
999  (i32) *
1000  ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
1001  }
1002  else
1003  if (PREDICT_TRUE
1004  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
1005  {
1006  *error0 =
1007  l4_len0 < sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
1008  vnet_buffer (p0)->map_t.checksum_offset =
1009  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
1010  *next0 = IP6_MAPT_NEXT_MAPT_TCP_UDP;
1011  *src_port0 =
1012  (i32) *
1013  ((u16 *) u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
1014  }
1015  else if (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)
1016  {
1017  *error0 =
1018  l4_len0 < sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
1019  *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1020  if (d0->ea_bits_len == 0 && d0->rules)
1021  {
1022  *src_port0 = 0;
1023  }
1024  else
1025  if (((icmp46_header_t *)
1026  u8_ptr_add (ip60,
1027  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1028  ICMP6_echo_reply
1029  || ((icmp46_header_t *)
1030  u8_ptr_add (ip60,
1031  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1032  ICMP6_echo_request)
1033  {
1034  *src_port0 =
1035  (i32) *
1036  ((u16 *)
1037  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1038  }
1039  }
1040  else
1041  {
1042  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1043  *error0 = MAP_ERROR_BAD_PROTOCOL;
1044  }
1045 }
1046 
1047 static uword
1049 {
1050  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
1051  vlib_node_runtime_t *error_node =
1054  u32 cpu_index = os_get_cpu_number ();
1055 
1056  from = vlib_frame_vector_args (frame);
1057  n_left_from = frame->n_vectors;
1058  next_index = node->cached_next_index;
1059  while (n_left_from > 0)
1060  {
1061  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1062 
1063 #ifdef IP6_MAP_T_DUAL_LOOP
1064  while (n_left_from >= 4 && n_left_to_next >= 2)
1065  {
1066  u32 pi0, pi1;
1067  vlib_buffer_t *p0, *p1;
1068  ip6_header_t *ip60, *ip61;
1069  u8 error0, error1;
1070  ip6_mapt_next_t next0, next1;
1071  u32 l4_len0, l4_len1;
1072  i32 src_port0, src_port1;
1073  map_domain_t *d0, *d1;
1074  ip6_frag_hdr_t *frag0, *frag1;
1075  u32 saddr0, saddr1;
1076  next0 = next1 = 0; //Because compiler whines
1077 
1078  pi0 = to_next[0] = from[0];
1079  pi1 = to_next[1] = from[1];
1080  from += 2;
1081  n_left_from -= 2;
1082  to_next += 2;
1083  n_left_to_next -= 2;
1084 
1085  error0 = MAP_ERROR_NONE;
1086  error1 = MAP_ERROR_NONE;
1087 
1088  p0 = vlib_get_buffer (vm, pi0);
1089  p1 = vlib_get_buffer (vm, pi1);
1090  ip60 = vlib_buffer_get_current (p0);
1091  ip61 = vlib_buffer_get_current (p1);
1092 
1093  saddr0 = map_get_ip4 (&ip60->src_address);
1094  saddr1 = map_get_ip4 (&ip61->src_address);
1095  d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
1096  (ip4_address_t *) & saddr0,
1097  &vnet_buffer (p0)->map_t.map_domain_index,
1098  &error0);
1099  d1 =
1100  ip6_map_get_domain (vnet_buffer (p1)->ip.adj_index[VLIB_TX],
1101  (ip4_address_t *) & saddr1,
1102  &vnet_buffer (p1)->map_t.map_domain_index,
1103  &error1);
1104 
1105  vnet_buffer (p0)->map_t.v6.saddr = saddr0;
1106  vnet_buffer (p1)->map_t.v6.saddr = saddr1;
1107  vnet_buffer (p0)->map_t.v6.daddr =
1109  vnet_buffer (p1)->map_t.v6.daddr =
1111  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
1112  vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
1113 
1114  if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
1115  &(vnet_buffer (p0)->map_t.
1116  v6.l4_protocol),
1117  &(vnet_buffer (p0)->map_t.
1118  v6.l4_offset),
1119  &(vnet_buffer (p0)->map_t.
1120  v6.frag_offset))))
1121  {
1122  error0 = MAP_ERROR_MALFORMED;
1123  next0 = IP6_MAPT_NEXT_DROP;
1124  }
1125 
1126  if (PREDICT_FALSE (ip6_parse (ip61, p1->current_length,
1127  &(vnet_buffer (p1)->map_t.
1128  v6.l4_protocol),
1129  &(vnet_buffer (p1)->map_t.
1130  v6.l4_offset),
1131  &(vnet_buffer (p1)->map_t.
1132  v6.frag_offset))))
1133  {
1134  error1 = MAP_ERROR_MALFORMED;
1135  next1 = IP6_MAPT_NEXT_DROP;
1136  }
1137 
1138  src_port0 = src_port1 = -1;
1139  l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
1140  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
1141  l4_len1 = (u32) clib_net_to_host_u16 (ip61->payload_length) +
1142  sizeof (*ip60) - vnet_buffer (p1)->map_t.v6.l4_offset;
1143  frag0 =
1144  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1145  vnet_buffer (p0)->map_t.
1146  v6.frag_offset);
1147  frag1 =
1148  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
1149  vnet_buffer (p1)->map_t.
1150  v6.frag_offset);
1151 
1152  ip6_map_t_classify (p0, ip60, d0, &src_port0, &error0, &next0,
1153  l4_len0, frag0);
1154  ip6_map_t_classify (p1, ip61, d1, &src_port1, &error1, &next1,
1155  l4_len1, frag1);
1156 
1157  if (PREDICT_FALSE
1158  ((src_port0 != -1)
1159  && (ip60->src_address.as_u64[0] !=
1160  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1161  src_port0)
1162  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1163  vnet_buffer
1164  (p0)->map_t.v6.saddr,
1165  src_port0))))
1166  {
1167  error0 = MAP_ERROR_SEC_CHECK;
1168  }
1169 
1170  if (PREDICT_FALSE
1171  ((src_port1 != -1)
1172  && (ip61->src_address.as_u64[0] !=
1173  map_get_pfx_net (d1, vnet_buffer (p1)->map_t.v6.saddr,
1174  src_port1)
1175  || ip61->src_address.as_u64[1] != map_get_sfx_net (d1,
1176  vnet_buffer
1177  (p1)->map_t.v6.saddr,
1178  src_port1))))
1179  {
1180  error1 = MAP_ERROR_SEC_CHECK;
1181  }
1182 
1183  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1184  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1185  u8_ptr_add (ip60,
1186  vnet_buffer
1187  (p0)->map_t.
1188  v6.frag_offset)))
1189  && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1190  && (error0 == MAP_ERROR_NONE))
1191  {
1192  ip6_map_fragment_cache (ip60,
1193  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1194  vnet_buffer
1195  (p0)->map_t.
1196  v6.frag_offset),
1197  d0, src_port0);
1198  }
1199 
1200  if (PREDICT_FALSE (vnet_buffer (p1)->map_t.v6.frag_offset &&
1201  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1202  u8_ptr_add (ip61,
1203  vnet_buffer
1204  (p1)->map_t.
1205  v6.frag_offset)))
1206  && (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules)
1207  && (error1 == MAP_ERROR_NONE))
1208  {
1209  ip6_map_fragment_cache (ip61,
1210  (ip6_frag_hdr_t *) u8_ptr_add (ip61,
1211  vnet_buffer
1212  (p1)->map_t.
1213  v6.frag_offset),
1214  d1, src_port1);
1215  }
1216 
1217  if (PREDICT_TRUE
1218  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1219  {
1221  cpu_index,
1222  vnet_buffer (p0)->
1223  map_t.map_domain_index, 1,
1224  clib_net_to_host_u16
1225  (ip60->payload_length));
1226  }
1227 
1228  if (PREDICT_TRUE
1229  (error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP))
1230  {
1232  cpu_index,
1233  vnet_buffer (p1)->
1234  map_t.map_domain_index, 1,
1235  clib_net_to_host_u16
1236  (ip61->payload_length));
1237  }
1238 
1239  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1240  next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
1241  p0->error = error_node->errors[error0];
1242  p1->error = error_node->errors[error1];
1243  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1244  n_left_to_next, pi0, pi1, next0,
1245  next1);
1246  }
1247 #endif
1248 
1249  while (n_left_from > 0 && n_left_to_next > 0)
1250  {
1251  u32 pi0;
1252  vlib_buffer_t *p0;
1253  ip6_header_t *ip60;
1254  u8 error0;
1255  u32 l4_len0;
1256  i32 src_port0;
1257  map_domain_t *d0;
1258  ip6_frag_hdr_t *frag0;
1259  ip6_mapt_next_t next0 = 0;
1260  u32 saddr;
1261 
1262  pi0 = to_next[0] = from[0];
1263  from += 1;
1264  n_left_from -= 1;
1265  to_next += 1;
1266  n_left_to_next -= 1;
1267  error0 = MAP_ERROR_NONE;
1268 
1269  p0 = vlib_get_buffer (vm, pi0);
1270  ip60 = vlib_buffer_get_current (p0);
1271  //Save saddr in a different variable to not overwrite ip.adj_index
1272  saddr = map_get_ip4 (&ip60->src_address);
1273  d0 = ip6_map_get_domain (vnet_buffer (p0)->ip.adj_index[VLIB_TX],
1274  (ip4_address_t *) & saddr,
1275  &vnet_buffer (p0)->map_t.map_domain_index,
1276  &error0);
1277 
1278  //FIXME: What if d0 is null
1279  vnet_buffer (p0)->map_t.v6.saddr = saddr;
1280  vnet_buffer (p0)->map_t.v6.daddr =
1282  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
1283 
1284  if (PREDICT_FALSE (ip6_parse (ip60, p0->current_length,
1285  &(vnet_buffer (p0)->map_t.
1286  v6.l4_protocol),
1287  &(vnet_buffer (p0)->map_t.
1288  v6.l4_offset),
1289  &(vnet_buffer (p0)->map_t.
1290  v6.frag_offset))))
1291  {
1292  error0 = MAP_ERROR_MALFORMED;
1293  next0 = IP6_MAPT_NEXT_DROP;
1294  }
1295 
1296  src_port0 = -1;
1297  l4_len0 = (u32) clib_net_to_host_u16 (ip60->payload_length) +
1298  sizeof (*ip60) - vnet_buffer (p0)->map_t.v6.l4_offset;
1299  frag0 =
1300  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1301  vnet_buffer (p0)->map_t.
1302  v6.frag_offset);
1303 
1304 
1305  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1306  ip6_frag_hdr_offset (frag0)))
1307  {
1308  src_port0 = ip6_map_fragment_get (ip60, frag0, d0);
1309  error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
1311  }
1312  else
1313  if (PREDICT_TRUE
1314  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP))
1315  {
1316  error0 =
1317  l4_len0 <
1318  sizeof (tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
1319  vnet_buffer (p0)->map_t.checksum_offset =
1320  vnet_buffer (p0)->map_t.v6.l4_offset + 16;
1322  src_port0 =
1323  (i32) *
1324  ((u16 *)
1325  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
1326  }
1327  else
1328  if (PREDICT_TRUE
1329  (vnet_buffer (p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP))
1330  {
1331  error0 =
1332  l4_len0 <
1333  sizeof (udp_header_t) ? MAP_ERROR_MALFORMED : error0;
1334  vnet_buffer (p0)->map_t.checksum_offset =
1335  vnet_buffer (p0)->map_t.v6.l4_offset + 6;
1337  src_port0 =
1338  (i32) *
1339  ((u16 *)
1340  u8_ptr_add (ip60, vnet_buffer (p0)->map_t.v6.l4_offset));
1341  }
1342  else if (vnet_buffer (p0)->map_t.v6.l4_protocol ==
1343  IP_PROTOCOL_ICMP6)
1344  {
1345  error0 =
1346  l4_len0 <
1347  sizeof (icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1348  next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1349  if (((icmp46_header_t *)
1350  u8_ptr_add (ip60,
1351  vnet_buffer (p0)->map_t.v6.l4_offset))->code ==
1352  ICMP6_echo_reply
1353  || ((icmp46_header_t *)
1354  u8_ptr_add (ip60,
1355  vnet_buffer (p0)->map_t.v6.
1356  l4_offset))->code == ICMP6_echo_request)
1357  src_port0 =
1358  (i32) *
1359  ((u16 *)
1360  u8_ptr_add (ip60,
1361  vnet_buffer (p0)->map_t.v6.l4_offset + 6));
1362  }
1363  else
1364  {
1365  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1366  error0 = MAP_ERROR_BAD_PROTOCOL;
1367  }
1368 
1369  //Security check
1370  if (PREDICT_FALSE
1371  ((src_port0 != -1)
1372  && (ip60->src_address.as_u64[0] !=
1373  map_get_pfx_net (d0, vnet_buffer (p0)->map_t.v6.saddr,
1374  src_port0)
1375  || ip60->src_address.as_u64[1] != map_get_sfx_net (d0,
1376  vnet_buffer
1377  (p0)->map_t.v6.saddr,
1378  src_port0))))
1379  {
1380  //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1381  error0 = MAP_ERROR_SEC_CHECK;
1382  }
1383 
1384  //Fragmented first packet needs to be cached for following packets
1385  if (PREDICT_FALSE (vnet_buffer (p0)->map_t.v6.frag_offset &&
1386  !ip6_frag_hdr_offset ((ip6_frag_hdr_t *)
1387  u8_ptr_add (ip60,
1388  vnet_buffer
1389  (p0)->map_t.
1390  v6.frag_offset)))
1391  && (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules)
1392  && (error0 == MAP_ERROR_NONE))
1393  {
1394  ip6_map_fragment_cache (ip60,
1395  (ip6_frag_hdr_t *) u8_ptr_add (ip60,
1396  vnet_buffer
1397  (p0)->map_t.
1398  v6.frag_offset),
1399  d0, src_port0);
1400  }
1401 
1402  if (PREDICT_TRUE
1403  (error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP))
1404  {
1406  cpu_index,
1407  vnet_buffer (p0)->
1408  map_t.map_domain_index, 1,
1409  clib_net_to_host_u16
1410  (ip60->payload_length));
1411  }
1412 
1413  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1414  p0->error = error_node->errors[error0];
1415  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1416  to_next, n_left_to_next, pi0,
1417  next0);
1418  }
1419  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1420  }
1421  return frame->n_vectors;
1422 }
1423 
1424 static char *map_t_error_strings[] = {
1425 #define _(sym,string) string,
1427 #undef _
1428 };
1429 
1430 /* *INDENT-OFF* */
1432  .function = ip6_map_t_fragmented,
1433  .name = "ip6-map-t-fragmented",
1434  .vector_size = sizeof (u32),
1435  .format_trace = format_map_trace,
1437 
1438  .n_errors = MAP_N_ERROR,
1439  .error_strings = map_t_error_strings,
1440 
1441  .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1442  .next_nodes = {
1443  [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1445  [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1446  },
1447 };
1448 /* *INDENT-ON* */
1449 
1450 /* *INDENT-OFF* */
1452  .function = ip6_map_t_icmp,
1453  .name = "ip6-map-t-icmp",
1454  .vector_size = sizeof (u32),
1455  .format_trace = format_map_trace,
1457 
1458  .n_errors = MAP_N_ERROR,
1459  .error_strings = map_t_error_strings,
1460 
1461  .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1462  .next_nodes = {
1463  [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1465  [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1466  },
1467 };
1468 /* *INDENT-ON* */
1469 
1470 /* *INDENT-OFF* */
1472  .function = ip6_map_t_tcp_udp,
1473  .name = "ip6-map-t-tcp-udp",
1474  .vector_size = sizeof (u32),
1475  .format_trace = format_map_trace,
1477 
1478  .n_errors = MAP_N_ERROR,
1479  .error_strings = map_t_error_strings,
1480 
1481  .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1482  .next_nodes = {
1483  [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1485  [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1486  },
1487 };
1488 /* *INDENT-ON* */
1489 
1490 /* *INDENT-OFF* */
1492  .function = ip6_map_t,
1493  .name = "ip6-map-t",
1494  .vector_size = sizeof(u32),
1495  .format_trace = format_map_trace,
1497 
1498  .n_errors = MAP_N_ERROR,
1499  .error_strings = map_t_error_strings,
1500 
1501  .n_next_nodes = IP6_MAPT_N_NEXT,
1502  .next_nodes = {
1503  [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1504  [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1505  [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1506  [IP6_MAPT_NEXT_DROP] = "error-drop",
1507  },
1508 };
1509 /* *INDENT-ON* */
1510 
1511 /*
1512  * fd.io coding-style-patch-verification: ON
1513  *
1514  * Local Variables:
1515  * eval: (c-set-style "gnu")
1516  * End:
1517  */
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:457
#define map_ip4_reass_lock()
Definition: map.h:442
static_always_inline u8 ip6_translate_tos(const ip6_header_t *ip6)
Definition: ip6_map_t.c:98
static uword ip6_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:725
ip6_mapt_next_t
Definition: ip6_map_t.c:21
ip4_address_t src_address
Definition: ip4_packet.h:138
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
#define PREDICT_TRUE(x)
Definition: clib.h:98
u64 as_u64[2]
Definition: ip6_packet.h:50
#define NULL
Definition: clib.h:55
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Definition: map.h:497
static_always_inline int ip6_icmp_to_icmp6_in_place(icmp46_header_t *icmp, u32 icmp_len, i32 *sender_port, ip6_header_t **inner_ip6)
Definition: ip6_map_t.c:125
i32 ip6_get_port(ip6_header_t *ip6, map_dir_e dir, u16 buffer_len)
Definition: map.c:110
static uword ip6_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:1048
uword ip_csum_t
Definition: ip_packet.h:86
u16 flags_and_fragment_offset
Definition: ip4_packet.h:121
vlib_error_t * errors
Definition: node.h:418
#define u16_net_add(u, val)
Definition: map.h:516
ip6_address_t src_address
Definition: ip6_packet.h:298
#define frag_id_6to4(id)
Definition: map.h:518
static_always_inline u32 ip6_map_t_embedded_address(map_domain_t *d, ip6_address_t *addr)
Definition: map.h:531
#define static_always_inline
Definition: clib.h:85
ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_checksum.c:43
static u8 icmp6_to_icmp_updater_pointer_table[]
Definition: ip6_map_t.c:110
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:187
ip4_address_t dst_address
Definition: ip4_packet.h:138
vlib_combined_counter_main_t * domain_counters
Definition: map.h:199
int i32
Definition: types.h:81
ip6_address_t * rules
Definition: map.h:87
u8 ea_bits_len
Definition: map.h:95
static_always_inline i32 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
Definition: ip6_map_t.c:78
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:393
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:369
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:82
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:43
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:339
map_domain_t * domains
Definition: map.h:195
uword os_get_cpu_number(void)
Definition: unix-misc.c:224
vlib_node_registration_t ip6_map_t_tcp_udp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_tcp_udp_node)
Definition: ip6_map_t.c:1471
static_always_inline int ip6_map_fragment_cache(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
Definition: ip6_map_t.c:55
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:368
static_always_inline map_domain_t * ip6_map_get_domain(u32 adj_index, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:402
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1352
#define PREDICT_FALSE(x)
Definition: clib.h:97
map_main_t map_main
Definition: map.h:306
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:130
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:348
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:118
vlib_node_registration_t ip6_map_t_fragmented_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_fragmented_node)
Definition: ip6_map_t.c:1431
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:122
ip6_mapt_tcp_udp_next_t
Definition: ip6_map_t.c:38
u16 n_vectors
Definition: node.h:344
static uword ip6_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:441
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
#define clib_memcpy(a, b, c)
Definition: string.h:63
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:200
#define foreach_map_error
Definition: map.h:273
ip6_mapt_icmp_next_t
Definition: ip6_map_t.c:30
vlib_node_registration_t ip6_map_t_icmp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_icmp_node)
Definition: ip6_map_t.c:1451
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:390
u16 cached_next_index
Definition: node.h:462
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Increment a combined counter.
Definition: counter.h:241
#define ASSERT(truth)
#define u8_ptr_add(ptr, index)
Definition: map.h:515
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:335
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:113
#define map_ip4_reass_unlock()
Definition: map.h:443
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1272
u64 uword
Definition: types.h:112
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:285
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
u16 mtu
Definition: map.h:91
static uword ip6_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:517
u16 payload_length
Definition: ip6_packet.h:289
static_always_inline u32 map_get_ip4(ip6_address_t *addr)
Definition: map.h:375
unsigned char u8
Definition: types.h:56
i32 port
Definition: map.h:134
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:251
A collection of combined counters.
Definition: counter.h:212
static char * map_t_error_strings[]
Definition: ip6_map_t.c:1424
vlib_node_registration_t ip6_map_t_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_node)
Definition: ip6_map_t.c:1491
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
Definition: ip4_packet.h:158
static_always_inline void ip6_map_t_classify(vlib_buffer_t *p0, ip6_header_t *ip60, map_domain_t *d0, i32 *src_port0, u8 *error0, ip6_mapt_next_t *next0, u32 l4_len0, ip6_frag_hdr_t *frag0)
Definition: ip6_map_t.c:970
u8 ip_version_and_header_length
Definition: ip4_packet.h:108
ip6_mapt_fragmented_next_t
Definition: ip6_map_t.c:46
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:194
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:138
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:97
ip6_address_t dst_address
Definition: ip6_packet.h:298