FD.io VPP  v16.06
Vector Packet Processing
ip6_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 
19 #define IP6_MAP_T_DUAL_LOOP
20 
21 typedef enum {
28 
29 typedef enum {
35 
36 typedef enum {
42 
43 typedef enum {
49 
51 ip6_map_fragment_cache (ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
52 {
53  u32 *ignore = NULL;
56  frag_id_6to4(frag->identification),
57  (ip6->protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
58  &ignore);
59  if (r)
60  r->port = port;
61 
63  return !r;
64 }
65 
66 /* Returns the associated port or -1 */
68 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
69 {
70  u32 *ignore = NULL;
73  frag_id_6to4(frag->identification),
74  (ip6->protocol == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : ip6->protocol,
75  &ignore);
76  i32 ret = r?r->port:-1;
78  return ret;
79 }
80 
83 {
84 #ifdef IP6_MAP_T_OVERRIDE_TOS
85  return IP6_MAP_T_OVERRIDE_TOS;
86 #else
87  return (clib_net_to_host_u32(ip6->ip_version_traffic_class_and_flow_label) & 0x0ff00000) >> 20;
88 #endif
89 }
90 
91 //TODO: Find right place in memory for that
93  { 0, 1,~0,~0,
94  2, 2, 9, 8,
95  12,12,12,12,
96  12,12,12,12,
97  12,12,12,12,
98  12,12,12,12,
99  24,24,24,24,
100  24,24,24,24,
101  24,24,24,24,
102  24,24,24,24
103  };
104 
106 ip6_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
107  i32 *sender_port, ip6_header_t **inner_ip6)
108 {
109  *inner_ip6 = NULL;
110  switch (icmp->type) {
111  case ICMP6_echo_request:
112  *sender_port = ((u16 *)icmp)[2];
113  icmp->type = ICMP4_echo_request;
114  break;
115  case ICMP6_echo_reply:
116  *sender_port = ((u16 *)icmp)[2];
117  icmp->type = ICMP4_echo_reply;
118  break;
119  case ICMP6_destination_unreachable:
120  *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
121  *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
122 
123  switch (icmp->code) {
124  case ICMP6_destination_unreachable_no_route_to_destination: //0
125  case ICMP6_destination_unreachable_beyond_scope_of_source_address: //2
126  case ICMP6_destination_unreachable_address_unreachable: //3
127  icmp->type = ICMP4_destination_unreachable;
128  icmp->code = ICMP4_destination_unreachable_destination_unreachable_host;
129  break;
130  case ICMP6_destination_unreachable_destination_administratively_prohibited: //1
131  icmp->type = ICMP4_destination_unreachable;
132  icmp->code = ICMP4_destination_unreachable_communication_administratively_prohibited;
133  break;
134  case ICMP6_destination_unreachable_port_unreachable:
135  icmp->type = ICMP4_destination_unreachable;
136  icmp->code = ICMP4_destination_unreachable_port_unreachable;
137  break;
138  default:
139  return -1;
140  }
141  break;
142  case ICMP6_packet_too_big:
143  *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
144  *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
145 
146  icmp->type = ICMP4_destination_unreachable;
147  icmp->code = 4;
148  {
149  u32 advertised_mtu = clib_net_to_host_u32(*((u32 *)(icmp + 1)));
150  advertised_mtu -= 20;
151  //FIXME: = minimum(advertised MTU-20, MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20)
152  ((u16 *)(icmp))[3] = clib_host_to_net_u16(advertised_mtu);
153  }
154  break;
155 
156  case ICMP6_time_exceeded:
157  *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
158  *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
159 
160  icmp->type = ICMP4_time_exceeded;
161  break;
162 
163  case ICMP6_parameter_problem:
164  *inner_ip6 = (ip6_header_t *) u8_ptr_add(icmp, 8);
165  *sender_port = ip6_get_port(*inner_ip6, MAP_RECEIVER, icmp_len);
166 
167  switch (icmp->code) {
168  case ICMP6_parameter_problem_erroneous_header_field:
169  icmp->type = ICMP4_parameter_problem;
170  icmp->code = ICMP4_parameter_problem_pointer_indicates_error;
171  u32 pointer = clib_net_to_host_u32(*((u32*)(icmp + 1)));
172  if (pointer >= 40)
173  return -1;
174 
175  ((u8*)(icmp + 1))[0] = icmp6_to_icmp_updater_pointer_table[pointer];
176  break;
177  case ICMP6_parameter_problem_unrecognized_next_header:
178  icmp->type = ICMP4_destination_unreachable;
179  icmp->code = ICMP4_destination_unreachable_port_unreachable;
180  break;
181  case ICMP6_parameter_problem_unrecognized_option:
182  default:
183  return -1;
184  }
185  break;
186  default:
187  return -1;
188  break;
189  }
190  return 0;
191 }
192 
194 _ip6_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
195 {
196  ip6_header_t *ip6, *inner_ip6;
197  ip4_header_t *ip4, *inner_ip4;
198  u32 ip6_pay_len;
199  icmp46_header_t *icmp;
200  i32 sender_port;
201  ip_csum_t csum;
202  u32 ip4_sadr, inner_ip4_dadr;
203 
204  ip6 = vlib_buffer_get_current(p);
205  ip6_pay_len = clib_net_to_host_u16(ip6->payload_length);
206  icmp = (icmp46_header_t *)(ip6 + 1);
207  ASSERT(ip6_pay_len + sizeof(*ip6) <= p->current_length);
208 
209  if (ip6->protocol != IP_PROTOCOL_ICMP6) {
210  //No extensions headers allowed here
211  //TODO: SR header
212  *error = MAP_ERROR_MALFORMED;
213  return;
214  }
215 
216  //There are no fragmented ICMP messages, so no extension header for now
217 
218  if (ip6_icmp_to_icmp6_in_place(icmp, ip6_pay_len, &sender_port, &inner_ip6)) {
219  //TODO: In case of 1:1 mapping it is not necessary to have the sender port
220  *error = MAP_ERROR_ICMP;
221  return;
222  }
223 
224  if (sender_port < 0) {
225  // In case of 1:1 mapping, we don't care about the port
226  if(d->ea_bits_len == 0 && d->rules) {
227  sender_port = 0;
228  } else {
229  *error = MAP_ERROR_ICMP;
230  return;
231  }
232  }
233 
234  //Security check
235  //Note that this prevents an intermediate IPv6 router from answering the request
236  ip4_sadr = map_get_ip4(&ip6->src_address);
237  if (ip6->src_address.as_u64[0] != map_get_pfx_net(d, ip4_sadr, sender_port) ||
238  ip6->src_address.as_u64[1] != map_get_sfx_net(d, ip4_sadr, sender_port)) {
239  *error = MAP_ERROR_SEC_CHECK;
240  return;
241  }
242 
243  if (inner_ip6) {
244  u16 *inner_L4_checksum, inner_l4_offset, inner_frag_offset, inner_frag_id;
245  u8 *inner_l4, inner_protocol;
246 
247  //We have two headers to translate
248  // FROM
249  // [ IPv6 ]<- ext ->[IC][ IPv6 ]<- ext ->[L4 header ...
250  // Handled cases:
251  // [ IPv6 ][IC][ IPv6 ][L4 header ...
252  // [ IPv6 ][IC][ IPv6 ][Fr][L4 header ...
253  // TO
254  // [ IPv4][IC][ IPv4][L4 header ...
255 
256  //TODO: This was already done deep in ip6_icmp_to_icmp6_in_place
257  //We shouldn't have to do it again
258  if (ip6_parse(inner_ip6, ip6_pay_len - 8,
259  &inner_protocol, &inner_l4_offset, &inner_frag_offset)) {
260  *error = MAP_ERROR_MALFORMED;
261  return;
262  }
263 
264  inner_l4 = u8_ptr_add(inner_ip6, inner_l4_offset);
265  inner_ip4 = (ip4_header_t *) u8_ptr_add(inner_l4, - sizeof(*inner_ip4));
266  if (inner_frag_offset) {
267  ip6_frag_hdr_t *inner_frag = (ip6_frag_hdr_t *) u8_ptr_add(inner_ip6, inner_frag_offset);
268  inner_frag_id = frag_id_6to4(inner_frag->identification);
269  } else {
270  inner_frag_id = 0;
271  }
272 
273  //Do the translation of the inner packet
274  if (inner_protocol == IP_PROTOCOL_TCP) {
275  inner_L4_checksum = (u16 *) u8_ptr_add(inner_l4, 16);
276  } else if (inner_protocol == IP_PROTOCOL_UDP) {
277  inner_L4_checksum = (u16 *) u8_ptr_add(inner_l4, 6);
278  } else if (inner_protocol == IP_PROTOCOL_ICMP6) {
279  icmp46_header_t *inner_icmp = (icmp46_header_t *) inner_l4;
280  csum = inner_icmp->checksum;
281  csum = ip_csum_sub_even(csum, *((u16 *)inner_icmp));
282  //It cannot be of a different type as ip6_icmp_to_icmp6_in_place succeeded
283  inner_icmp->type = (inner_icmp->type == ICMP6_echo_request) ?
284  ICMP4_echo_request : ICMP4_echo_reply;
285  csum = ip_csum_add_even(csum, *((u16 *)inner_icmp));
286  inner_icmp->checksum = ip_csum_fold(csum);
287  inner_protocol = IP_PROTOCOL_ICMP; //Will be copied to ip6 later
288  inner_L4_checksum = &inner_icmp->checksum;
289  } else {
290  *error = MAP_ERROR_BAD_PROTOCOL;
291  return;
292  }
293 
294  csum = *inner_L4_checksum;
295  csum = ip_csum_sub_even(csum, inner_ip6->src_address.as_u64[0]);
296  csum = ip_csum_sub_even(csum, inner_ip6->src_address.as_u64[1]);
297  csum = ip_csum_sub_even(csum, inner_ip6->dst_address.as_u64[0]);
298  csum = ip_csum_sub_even(csum, inner_ip6->dst_address.as_u64[1]);
299 
300  //Sanity check of the outer destination address
301  if (ip6->dst_address.as_u64[0] != inner_ip6->src_address.as_u64[0] &&
302  ip6->dst_address.as_u64[1] != inner_ip6->src_address.as_u64[1]) {
303  *error = MAP_ERROR_SEC_CHECK;
304  return;
305  }
306 
307  //Security check of inner packet
308  inner_ip4_dadr = map_get_ip4(&inner_ip6->dst_address);
309  if (inner_ip6->dst_address.as_u64[0] != map_get_pfx_net(d, inner_ip4_dadr, sender_port) ||
310  inner_ip6->dst_address.as_u64[1] != map_get_sfx_net(d, inner_ip4_dadr, sender_port)) {
311  *error = MAP_ERROR_SEC_CHECK;
312  return;
313  }
314 
315  inner_ip4->dst_address.as_u32 = inner_ip4_dadr;
316  inner_ip4->src_address.as_u32 = ip6_map_t_embedded_address(d, &inner_ip6->src_address);
318  inner_ip4->tos = ip6_translate_tos(inner_ip6);
319  inner_ip4->length = u16_net_add(inner_ip6->payload_length, sizeof(*ip4) + sizeof(*ip6) -
320  inner_l4_offset);
321  inner_ip4->fragment_id = inner_frag_id;
322  inner_ip4->flags_and_fragment_offset = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
323  inner_ip4->ttl = inner_ip6->hop_limit;
324  inner_ip4->protocol = inner_protocol;
325  inner_ip4->checksum = ip4_header_checksum(inner_ip4);
326 
327  if (inner_ip4->protocol == IP_PROTOCOL_ICMP) {
328  //Remove remainings of the pseudo-header in the csum
329  csum = ip_csum_sub_even(csum, clib_host_to_net_u16(IP_PROTOCOL_ICMP6));
330  csum = ip_csum_sub_even(csum, inner_ip4->length - sizeof(*inner_ip4));
331  } else {
332  //Update to new pseudo-header
333  csum = ip_csum_add_even(csum, inner_ip4->src_address.as_u32);
334  csum = ip_csum_add_even(csum, inner_ip4->dst_address.as_u32);
335  }
336  *inner_L4_checksum = ip_csum_fold(csum);
337 
338  //Move up icmp header
339  ip4 = (ip4_header_t *) u8_ptr_add(inner_l4, - 2 * sizeof(*ip4) - 8);
340  clib_memcpy(u8_ptr_add(inner_l4, - sizeof(*ip4) - 8), icmp, 8);
341  icmp = (icmp46_header_t *) u8_ptr_add(inner_l4, - sizeof(*ip4) - 8);
342  } else {
343  //Only one header to translate
344  ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
345  }
346  vlib_buffer_advance(p, (u32) (((u8 *)ip4) - ((u8 *)ip6)));
347 
349  ip4->src_address.as_u32 = ip4_sadr;
351  ip4->tos = ip6_translate_tos(ip6);
352  ip4->fragment_id = 0;
353  ip4->flags_and_fragment_offset = 0;
354  ip4->ttl = ip6->hop_limit;
355  ip4->protocol = IP_PROTOCOL_ICMP;
356  //TODO fix the length depending on offset length
357  ip4->length = u16_net_add(ip6->payload_length,
358  (inner_ip6 == NULL)?sizeof(*ip4):(2*sizeof(*ip4) - sizeof(*ip6)));
359  ip4->checksum = ip4_header_checksum(ip4);
360 
361  //TODO: We could do an easy diff-checksum for echo requests/replies
362  //Recompute ICMP checksum
363  icmp->checksum = 0;
364  csum = ip_incremental_checksum(0, icmp, clib_net_to_host_u16(ip4->length) - sizeof(*ip4));
365  icmp->checksum = ~ip_csum_fold (csum);
366 }
367 
368 static uword
370  vlib_node_runtime_t *node,
371  vlib_frame_t *frame)
372 {
373  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
375  from = vlib_frame_vector_args (frame);
376  n_left_from = frame->n_vectors;
377  next_index = node->cached_next_index;
379  u32 cpu_index = os_get_cpu_number();
380 
381  while (n_left_from > 0) {
382  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
383 
384  while (n_left_from > 0 && n_left_to_next > 0) {
385  u32 pi0;
386  vlib_buffer_t *p0;
387  u8 error0;
388  ip6_mapt_icmp_next_t next0;
389  map_domain_t *d0;
390  u16 len0;
391 
392  pi0 = to_next[0] = from[0];
393  from += 1;
394  n_left_from -= 1;
395  to_next +=1;
396  n_left_to_next -= 1;
397  error0 = MAP_ERROR_NONE;
399 
400  p0 = vlib_get_buffer(vm, pi0);
401  len0 = clib_net_to_host_u16(((ip6_header_t *)vlib_buffer_get_current(p0))->payload_length);
402  d0 = pool_elt_at_index(map_main.domains, vnet_buffer(p0)->map_t.map_domain_index);
403  _ip6_map_t_icmp(d0, p0, &error0);
404 
405  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
406  //Send to fragmentation node if necessary
407  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
408  vnet_buffer(p0)->ip_frag.header_offset = 0;
409  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
411  }
412 
413  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
415  vnet_buffer(p0)->map_t.map_domain_index, 1,
416  len0);
417  } else {
418  next0 = IP6_MAPT_ICMP_NEXT_DROP;
419  }
420 
421  p0->error = error_node->errors[error0];
422  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
423  to_next, n_left_to_next, pi0,
424  next0);
425  }
426  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
427  }
428  return frame->n_vectors;
429 }
430 
431 static uword
433  vlib_node_runtime_t *node,
434  vlib_frame_t *frame)
435 {
436  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
437  from = vlib_frame_vector_args(frame);
438  n_left_from = frame->n_vectors;
439  next_index = node->cached_next_index;
440 
441  while (n_left_from > 0) {
442  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
443 
444 #ifdef IP6_MAP_T_DUAL_LOOP
445  while(n_left_from >= 4 && n_left_to_next >= 2) {
446  u32 pi0, pi1;
447  vlib_buffer_t *p0, *p1;
448  ip6_header_t *ip60, *ip61;
449  ip6_frag_hdr_t *frag0, *frag1;
450  ip4_header_t *ip40, *ip41;
451  u16 frag_id0, frag_offset0,
452  frag_id1, frag_offset1;
453  u8 frag_more0, frag_more1;
454  u32 next0, next1;
455 
456  pi0 = to_next[0] = from[0];
457  pi1 = to_next[1] = from[1];
458  from += 2;
459  n_left_from -= 2;
460  to_next += 2;
461  n_left_to_next -= 2;
462 
465  p0 = vlib_get_buffer(vm, pi0);
466  p1 = vlib_get_buffer(vm, pi1);
467  ip60 = vlib_buffer_get_current(p0);
468  ip61 = vlib_buffer_get_current(p1);
469  frag0 = (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
470  frag1 = (ip6_frag_hdr_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
471  ip40 = (ip4_header_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
472  ip41 = (ip4_header_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
473  vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
474  vlib_buffer_advance(p1, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
475 
476  frag_id0 = frag_id_6to4(frag0->identification);
477  frag_id1 = frag_id_6to4(frag1->identification);
478  frag_more0 = ip6_frag_hdr_more(frag0);
479  frag_more1 = ip6_frag_hdr_more(frag1);
480  frag_offset0 = ip6_frag_hdr_offset(frag0);
481  frag_offset1 = ip6_frag_hdr_offset(frag1);
482 
483  ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
484  ip41->dst_address.as_u32 = vnet_buffer(p1)->map_t.v6.daddr;
485  ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
486  ip41->src_address.as_u32 = vnet_buffer(p1)->map_t.v6.saddr;
489  ip40->tos = ip6_translate_tos(ip60);
490  ip41->tos = ip6_translate_tos(ip61);
491  ip40->length = u16_net_add(ip60->payload_length,
492  sizeof(*ip40) - vnet_buffer(p0)->map_t.v6.l4_offset + sizeof(*ip60));
493  ip41->length = u16_net_add(ip61->payload_length,
494  sizeof(*ip40) - vnet_buffer(p1)->map_t.v6.l4_offset + sizeof(*ip60));
495  ip40->fragment_id = frag_id0;
496  ip41->fragment_id = frag_id1;
498  clib_host_to_net_u16(frag_offset0 | (frag_more0?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
500  clib_host_to_net_u16(frag_offset1 | (frag_more1?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
501  ip40->ttl = ip60->hop_limit;
502  ip41->ttl = ip61->hop_limit;
503  ip40->protocol = (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
504  IP_PROTOCOL_ICMP:vnet_buffer(p0)->map_t.v6.l4_protocol;
505  ip41->protocol = (vnet_buffer(p1)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
506  IP_PROTOCOL_ICMP:vnet_buffer(p1)->map_t.v6.l4_protocol;
507  ip40->checksum = ip4_header_checksum(ip40);
508  ip41->checksum = ip4_header_checksum(ip41);
509 
510  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
511  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
512  vnet_buffer(p0)->ip_frag.header_offset = 0;
513  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
515  }
516 
517  if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
518  vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
519  vnet_buffer(p1)->ip_frag.header_offset = 0;
520  vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
522  }
523 
524  vlib_validate_buffer_enqueue_x2(vm, node, next_index,
525  to_next, n_left_to_next, pi0, pi1,
526  next0, next1);
527  }
528 #endif
529 
530  while (n_left_from > 0 && n_left_to_next > 0) {
531  u32 pi0;
532  vlib_buffer_t *p0;
533  ip6_header_t *ip60;
534  ip6_frag_hdr_t *frag0;
535  ip4_header_t *ip40;
536  u16 frag_id0;
537  u8 frag_more0;
538  u16 frag_offset0;
539  u32 next0;
540 
541  pi0 = to_next[0] = from[0];
542  from += 1;
543  n_left_from -= 1;
544  to_next +=1;
545  n_left_to_next -= 1;
546 
548  p0 = vlib_get_buffer(vm, pi0);
549  ip60 = vlib_buffer_get_current(p0);
550  frag0 = (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
551  ip40 = (ip4_header_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
552  vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
553 
554  frag_id0 = frag_id_6to4(frag0->identification);
555  frag_more0 = ip6_frag_hdr_more(frag0);
556  frag_offset0 = ip6_frag_hdr_offset(frag0);
557 
558  ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
559  ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
561  ip40->tos = ip6_translate_tos(ip60);
562  ip40->length = u16_net_add(ip60->payload_length,
563  sizeof(*ip40) - vnet_buffer(p0)->map_t.v6.l4_offset + sizeof(*ip60));
564  ip40->fragment_id = frag_id0;
566  clib_host_to_net_u16(frag_offset0 | (frag_more0?IP4_HEADER_FLAG_MORE_FRAGMENTS:0));
567  ip40->ttl = ip60->hop_limit;
568  ip40->protocol = (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6)?
569  IP_PROTOCOL_ICMP:vnet_buffer(p0)->map_t.v6.l4_protocol;
570  ip40->checksum = ip4_header_checksum(ip40);
571 
572  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
573  //Send to fragmentation node if necessary
574  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
575  vnet_buffer(p0)->ip_frag.header_offset = 0;
576  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
578  }
579 
580  vlib_validate_buffer_enqueue_x1(vm, node, next_index,
581  to_next, n_left_to_next, pi0,
582  next0);
583  }
584  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
585  }
586  return frame->n_vectors;
587 }
588 
589 static uword
591  vlib_node_runtime_t *node,
592  vlib_frame_t *frame)
593 {
594  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
595  from = vlib_frame_vector_args(frame);
596  n_left_from = frame->n_vectors;
597  next_index = node->cached_next_index;
598  while (n_left_from > 0) {
599  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
600 
601 #ifdef IP6_MAP_T_DUAL_LOOP
602  while(n_left_from >= 4 && n_left_to_next >= 2) {
603  u32 pi0, pi1;
604  vlib_buffer_t *p0, *p1;
605  ip6_header_t *ip60, *ip61;
606  ip_csum_t csum0, csum1;
607  ip4_header_t *ip40, *ip41;
608  u16 fragment_id0, flags0, *checksum0,
609  fragment_id1, flags1, *checksum1;
610  ip6_mapt_tcp_udp_next_t next0, next1;
611 
612  pi0 = to_next[0] = from[0];
613  pi1 = to_next[1] = from[1];
614  from += 2;
615  n_left_from -= 2;
616  to_next += 2;
617  n_left_to_next -= 2;
620 
621  p0 = vlib_get_buffer(vm, pi0);
622  p1 = vlib_get_buffer(vm, pi1);
623  ip60 = vlib_buffer_get_current(p0);
624  ip61 = vlib_buffer_get_current(p1);
625  ip40 = (ip4_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
626  ip41 = (ip4_header_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
627  vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
628  vlib_buffer_advance(p1, vnet_buffer(p1)->map_t.v6.l4_offset - sizeof(*ip40));
629  checksum0 = (u16 *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.checksum_offset);
630  checksum1 = (u16 *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.checksum_offset);
631 
632  csum0 = ip_csum_sub_even(*checksum0, ip60->src_address.as_u64[0]);
633  csum1 = ip_csum_sub_even(*checksum1, ip61->src_address.as_u64[0]);
634  csum0 = ip_csum_sub_even(csum0, ip60->src_address.as_u64[1]);
635  csum1 = ip_csum_sub_even(csum1, ip61->src_address.as_u64[1]);
636  csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[0]);
637  csum1 = ip_csum_sub_even(csum0, ip61->dst_address.as_u64[0]);
638  csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[1]);
639  csum1 = ip_csum_sub_even(csum1, ip61->dst_address.as_u64[1]);
640  csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.daddr);
641  csum1 = ip_csum_add_even(csum1, vnet_buffer(p1)->map_t.v6.daddr);
642  csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.saddr);
643  csum1 = ip_csum_add_even(csum1, vnet_buffer(p1)->map_t.v6.saddr);
644  *checksum0 = ip_csum_fold(csum0);
645  *checksum1 = ip_csum_fold(csum1);
646 
647  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset)) {
648  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
649  fragment_id0 = frag_id_6to4(hdr->identification);
650  flags0 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
651  } else {
652  fragment_id0 = 0;
653  flags0 = 0;
654  }
655 
656  if (PREDICT_FALSE(vnet_buffer(p1)->map_t.v6.frag_offset)) {
657  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
658  fragment_id1 = frag_id_6to4(hdr->identification);
659  flags1 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
660  } else {
661  fragment_id1 = 0;
662  flags1 = 0;
663  }
664 
665  ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
666  ip41->dst_address.as_u32 = vnet_buffer(p1)->map_t.v6.daddr;
667  ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
668  ip41->src_address.as_u32 = vnet_buffer(p1)->map_t.v6.saddr;
671  ip40->tos = ip6_translate_tos(ip60);
672  ip41->tos = ip6_translate_tos(ip61);
673  ip40->length = u16_net_add(ip60->payload_length,
674  sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset);
675  ip41->length = u16_net_add(ip61->payload_length,
676  sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p1)->map_t.v6.l4_offset);
677  ip40->fragment_id = fragment_id0;
678  ip41->fragment_id = fragment_id1;
679  ip40->flags_and_fragment_offset = flags0;
680  ip41->flags_and_fragment_offset = flags1;
681  ip40->ttl = ip60->hop_limit;
682  ip41->ttl = ip61->hop_limit;
683  ip40->protocol = vnet_buffer(p0)->map_t.v6.l4_protocol;
684  ip41->protocol = vnet_buffer(p1)->map_t.v6.l4_protocol;
685  ip40->checksum = ip4_header_checksum(ip40);
686  ip41->checksum = ip4_header_checksum(ip41);
687 
688  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
689  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
690  vnet_buffer(p0)->ip_frag.header_offset = 0;
691  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
693  }
694 
695  if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
696  vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
697  vnet_buffer(p1)->ip_frag.header_offset = 0;
698  vnet_buffer(p1)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
700  }
701 
702  vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
703  n_left_to_next, pi0, pi1, next0, next1);
704  }
705 #endif
706 
707  while (n_left_from > 0 && n_left_to_next > 0) {
708  u32 pi0;
709  vlib_buffer_t *p0;
710  ip6_header_t *ip60;
711  u16 *checksum0;
712  ip_csum_t csum0;
713  ip4_header_t *ip40;
714  u16 fragment_id0;
715  u16 flags0;
717 
718  pi0 = to_next[0] = from[0];
719  from += 1;
720  n_left_from -= 1;
721  to_next +=1;
722  n_left_to_next -= 1;
724 
725  p0 = vlib_get_buffer(vm, pi0);
726  ip60 = vlib_buffer_get_current(p0);
727  ip40 = (ip4_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
728  vlib_buffer_advance(p0, vnet_buffer(p0)->map_t.v6.l4_offset - sizeof(*ip40));
729  checksum0 = (u16 *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.checksum_offset);
730 
731  //TODO: This can probably be optimized
732  csum0 = ip_csum_sub_even(*checksum0, ip60->src_address.as_u64[0]);
733  csum0 = ip_csum_sub_even(csum0, ip60->src_address.as_u64[1]);
734  csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[0]);
735  csum0 = ip_csum_sub_even(csum0, ip60->dst_address.as_u64[1]);
736  csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.daddr);
737  csum0 = ip_csum_add_even(csum0, vnet_buffer(p0)->map_t.v6.saddr);
738  *checksum0 = ip_csum_fold(csum0);
739 
740  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset)) {
741  //Only the first fragment
742  ip6_frag_hdr_t *hdr = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
743  fragment_id0 = frag_id_6to4(hdr->identification);
744  flags0 = clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS);
745  } else {
746  fragment_id0 = 0;
747  flags0 = 0;
748  }
749 
750  ip40->dst_address.as_u32 = vnet_buffer(p0)->map_t.v6.daddr;
751  ip40->src_address.as_u32 = vnet_buffer(p0)->map_t.v6.saddr;
753  ip40->tos = ip6_translate_tos(ip60);
754  ip40->length = u16_net_add(ip60->payload_length,
755  sizeof(*ip40) + sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset);
756  ip40->fragment_id = fragment_id0;
757  ip40->flags_and_fragment_offset = flags0;
758  ip40->ttl = ip60->hop_limit;
759  ip40->protocol = vnet_buffer(p0)->map_t.v6.l4_protocol;
760  ip40->checksum = ip4_header_checksum(ip40);
761 
762  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
763  //Send to fragmentation node if necessary
764  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
765  vnet_buffer(p0)->ip_frag.header_offset = 0;
766  vnet_buffer(p0)->ip_frag.next_index = IP4_FRAG_NEXT_IP4_LOOKUP;
768  }
769 
770  vlib_validate_buffer_enqueue_x1(vm, node, next_index,
771  to_next, n_left_to_next, pi0,
772  next0);
773  }
774  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
775  }
776  return frame->n_vectors;
777 }
778 
781  map_domain_t *d0, i32 *src_port0,
782  u8 *error0, ip6_mapt_next_t *next0,
783  u32 l4_len0, ip6_frag_hdr_t *frag0)
784 {
785  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
786  ip6_frag_hdr_offset(frag0))) {
788  if(d0->ea_bits_len == 0 && d0->rules) {
789  *src_port0 = 0;
790  } else {
791  *src_port0 = ip6_map_fragment_get(ip60, frag0, d0);
792  *error0 = (*src_port0 != -1) ? *error0 : MAP_ERROR_FRAGMENT_DROPPED;
793  }
794  } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) {
795  *error0 = l4_len0 < sizeof(tcp_header_t) ? MAP_ERROR_MALFORMED : *error0;
796  vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 16;
798  *src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
799  } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) {
800  *error0 = l4_len0 < sizeof(udp_header_t) ? MAP_ERROR_MALFORMED : *error0;
801  vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 6;
803  *src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
804  } else if (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) {
805  *error0 = l4_len0 < sizeof(icmp46_header_t) ? MAP_ERROR_MALFORMED : *error0;
806  *next0 = IP6_MAPT_NEXT_MAPT_ICMP;
807  if(d0->ea_bits_len == 0 && d0->rules) {
808  *src_port0 = 0;
809  } else if (((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_reply ||
810  ((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_request) {
811  *src_port0 = (i32) *((u16 *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset + 6));
812  }
813  } else {
814  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
815  *error0 = MAP_ERROR_BAD_PROTOCOL;
816  }
817 }
818 
819 static uword
821  vlib_node_runtime_t *node,
822  vlib_frame_t *frame)
823 {
824  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
827  u32 cpu_index = os_get_cpu_number();
828 
829  from = vlib_frame_vector_args(frame);
830  n_left_from = frame->n_vectors;
831  next_index = node->cached_next_index;
832  while (n_left_from > 0) {
833  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
834 
835 #ifdef IP6_MAP_T_DUAL_LOOP
836  while (n_left_from >= 4 && n_left_to_next >=2) {
837  u32 pi0, pi1;
838  vlib_buffer_t *p0, *p1;
839  ip6_header_t *ip60, *ip61;
840  u8 error0, error1;
841  ip6_mapt_next_t next0, next1;
842  u32 l4_len0, l4_len1;
843  i32 src_port0, src_port1;
844  map_domain_t *d0, *d1;
845  ip6_frag_hdr_t *frag0, *frag1;
846  u32 saddr0, saddr1;
847  next0 = next1 = 0; //Because compiler whines
848 
849  pi0 = to_next[0] = from[0];
850  pi1 = to_next[1] = from[1];
851  from += 2;
852  n_left_from -= 2;
853  to_next += 2;
854  n_left_to_next -= 2;
855 
856  error0 = MAP_ERROR_NONE;
857  error1 = MAP_ERROR_NONE;
858 
859  p0 = vlib_get_buffer(vm, pi0);
860  p1 = vlib_get_buffer(vm, pi1);
861  ip60 = vlib_buffer_get_current(p0);
862  ip61 = vlib_buffer_get_current(p1);
863 
864  saddr0 = map_get_ip4(&ip60->src_address);
865  saddr1 = map_get_ip4(&ip61->src_address);
866  d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
867  (ip4_address_t *)&saddr0,
868  &vnet_buffer(p0)->map_t.map_domain_index, &error0);
869  d1 = ip6_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX],
870  (ip4_address_t *)&saddr1,
871  &vnet_buffer(p1)->map_t.map_domain_index, &error1);
872 
873  vnet_buffer(p0)->map_t.v6.saddr = saddr0;
874  vnet_buffer(p1)->map_t.v6.saddr = saddr1;
875  vnet_buffer(p0)->map_t.v6.daddr = ip6_map_t_embedded_address(d0, &ip60->dst_address);
876  vnet_buffer(p1)->map_t.v6.daddr = ip6_map_t_embedded_address(d1, &ip61->dst_address);
877  vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
878  vnet_buffer(p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
879 
880  if (PREDICT_FALSE(ip6_parse(ip60, p0->current_length,
881  &(vnet_buffer(p0)->map_t.v6.l4_protocol),
882  &(vnet_buffer(p0)->map_t.v6.l4_offset),
883  &(vnet_buffer(p0)->map_t.v6.frag_offset)))) {
884  error0 = MAP_ERROR_MALFORMED;
885  next0 = IP6_MAPT_NEXT_DROP;
886  }
887 
888  if (PREDICT_FALSE(ip6_parse(ip61, p1->current_length,
889  &(vnet_buffer(p1)->map_t.v6.l4_protocol),
890  &(vnet_buffer(p1)->map_t.v6.l4_offset),
891  &(vnet_buffer(p1)->map_t.v6.frag_offset)))) {
892  error1 = MAP_ERROR_MALFORMED;
893  next1 = IP6_MAPT_NEXT_DROP;
894  }
895 
896  src_port0 = src_port1 = -1;
897  l4_len0 = (u32)clib_net_to_host_u16(ip60->payload_length) +
898  sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset;
899  l4_len1 = (u32)clib_net_to_host_u16(ip61->payload_length) +
900  sizeof(*ip60) - vnet_buffer(p1)->map_t.v6.l4_offset;
901  frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
902  frag1 = (ip6_frag_hdr_t *) u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset);
903 
904  ip6_map_t_classify(p0, ip60, d0, &src_port0, &error0, &next0, l4_len0, frag0);
905  ip6_map_t_classify(p1, ip61, d1, &src_port1, &error1, &next1, l4_len1, frag1);
906 
907  if (PREDICT_FALSE((src_port0 != -1) && (
908  ip60->src_address.as_u64[0] != map_get_pfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0) ||
909  ip60->src_address.as_u64[1] != map_get_sfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0)))) {
910  error0 = MAP_ERROR_SEC_CHECK;
911  }
912 
913  if (PREDICT_FALSE((src_port1 != -1) && (
914  ip61->src_address.as_u64[0] != map_get_pfx_net(d1, vnet_buffer(p1)->map_t.v6.saddr, src_port1) ||
915  ip61->src_address.as_u64[1] != map_get_sfx_net(d1, vnet_buffer(p1)->map_t.v6.saddr, src_port1)))) {
916  error1 = MAP_ERROR_SEC_CHECK;
917  }
918 
919  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
920  !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
921  u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset))) &&
922  (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && (error0 == MAP_ERROR_NONE)) {
924  (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset),
925  d0, src_port0);
926  }
927 
928  if (PREDICT_FALSE(vnet_buffer(p1)->map_t.v6.frag_offset &&
929  !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
930  u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset))) &&
931  (src_port1 != -1) && (d1->ea_bits_len != 0 || !d1->rules) && (error1 == MAP_ERROR_NONE)) {
933  (ip6_frag_hdr_t *)u8_ptr_add(ip61, vnet_buffer(p1)->map_t.v6.frag_offset),
934  d1, src_port1);
935  }
936 
937  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) {
939  vnet_buffer(p0)->map_t.map_domain_index, 1,
940  clib_net_to_host_u16(ip60->payload_length));
941  }
942 
943  if (PREDICT_TRUE(error1 == MAP_ERROR_NONE && next1 != IP6_MAPT_NEXT_MAPT_ICMP)) {
945  vnet_buffer(p1)->map_t.map_domain_index, 1,
946  clib_net_to_host_u16(ip61->payload_length));
947  }
948 
949  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
950  next1 = (error1 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next1;
951  p0->error = error_node->errors[error0];
952  p1->error = error_node->errors[error1];
953  vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, pi0, pi1, next0, next1);
954  }
955 #endif
956 
957  while (n_left_from > 0 && n_left_to_next > 0) {
958  u32 pi0;
959  vlib_buffer_t *p0;
960  ip6_header_t *ip60;
961  u8 error0;
962  u32 l4_len0;
963  i32 src_port0;
964  map_domain_t *d0;
965  ip6_frag_hdr_t *frag0;
966  ip6_mapt_next_t next0 = 0;
967  u32 saddr;
968 
969  pi0 = to_next[0] = from[0];
970  from += 1;
971  n_left_from -= 1;
972  to_next +=1;
973  n_left_to_next -= 1;
974  error0 = MAP_ERROR_NONE;
975 
976  p0 = vlib_get_buffer(vm, pi0);
977  ip60 = vlib_buffer_get_current(p0);
978  //Save saddr in a different variable to not overwrite ip.adj_index
979  saddr = map_get_ip4(&ip60->src_address);
980  d0 = ip6_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
981  (ip4_address_t *)&saddr,
982  &vnet_buffer(p0)->map_t.map_domain_index, &error0);
983 
984  //FIXME: What if d0 is null
985  vnet_buffer(p0)->map_t.v6.saddr = saddr;
986  vnet_buffer(p0)->map_t.v6.daddr = ip6_map_t_embedded_address(d0, &ip60->dst_address);
987  vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
988 
989  if (PREDICT_FALSE(ip6_parse(ip60, p0->current_length,
990  &(vnet_buffer(p0)->map_t.v6.l4_protocol),
991  &(vnet_buffer(p0)->map_t.v6.l4_offset),
992  &(vnet_buffer(p0)->map_t.v6.frag_offset)))) {
993  error0 = MAP_ERROR_MALFORMED;
994  next0 = IP6_MAPT_NEXT_DROP;
995  }
996 
997  src_port0 = -1;
998  l4_len0 = (u32)clib_net_to_host_u16(ip60->payload_length) +
999  sizeof(*ip60) - vnet_buffer(p0)->map_t.v6.l4_offset;
1000  frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset);
1001 
1002 
1003  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
1004  ip6_frag_hdr_offset(frag0))) {
1005  src_port0 = ip6_map_fragment_get(ip60, frag0, d0);
1006  error0 = (src_port0 != -1) ? error0 : MAP_ERROR_FRAGMENT_MEMORY;
1008  } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_TCP)) {
1009  error0 = l4_len0 < sizeof(tcp_header_t) ? MAP_ERROR_MALFORMED : error0;
1010  vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 16;
1012  src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
1013  } else if (PREDICT_TRUE(vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_UDP)) {
1014  error0 = l4_len0 < sizeof(udp_header_t) ? MAP_ERROR_MALFORMED : error0;
1015  vnet_buffer(p0)->map_t.checksum_offset = vnet_buffer(p0)->map_t.v6.l4_offset + 6;
1017  src_port0 = (i32) *((u16*)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset));
1018  } else if (vnet_buffer(p0)->map_t.v6.l4_protocol == IP_PROTOCOL_ICMP6) {
1019  error0 = l4_len0 < sizeof(icmp46_header_t) ? MAP_ERROR_MALFORMED : error0;
1020  next0 = IP6_MAPT_NEXT_MAPT_ICMP;
1021  if (((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_reply ||
1022  ((icmp46_header_t *) u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset))->code == ICMP6_echo_request)
1023  src_port0 = (i32) *((u16 *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.l4_offset + 6));
1024  } else {
1025  //TODO: In case of 1:1 mapping, it might be possible to do something with those packets.
1026  error0 = MAP_ERROR_BAD_PROTOCOL;
1027  }
1028 
1029  //Security check
1030  if (PREDICT_FALSE((src_port0 != -1) && (
1031  ip60->src_address.as_u64[0] != map_get_pfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0) ||
1032  ip60->src_address.as_u64[1] != map_get_sfx_net(d0, vnet_buffer(p0)->map_t.v6.saddr, src_port0)))) {
1033  //Security check when src_port0 is not zero (non-first fragment, UDP or TCP)
1034  error0 = MAP_ERROR_SEC_CHECK;
1035  }
1036 
1037  //Fragmented first packet needs to be cached for following packets
1038  if (PREDICT_FALSE(vnet_buffer(p0)->map_t.v6.frag_offset &&
1039  !ip6_frag_hdr_offset((ip6_frag_hdr_t *)
1040  u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset))) &&
1041  (src_port0 != -1) && (d0->ea_bits_len != 0 || !d0->rules) && (error0 == MAP_ERROR_NONE)) {
1043  (ip6_frag_hdr_t *)u8_ptr_add(ip60, vnet_buffer(p0)->map_t.v6.frag_offset),
1044  d0, src_port0);
1045  }
1046 
1047  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP6_MAPT_NEXT_MAPT_ICMP)) {
1049  vnet_buffer(p0)->map_t.map_domain_index, 1,
1050  clib_net_to_host_u16(ip60->payload_length));
1051  }
1052 
1053  next0 = (error0 != MAP_ERROR_NONE) ? IP6_MAPT_NEXT_DROP : next0;
1054  p0->error = error_node->errors[error0];
1055  vlib_validate_buffer_enqueue_x1(vm, node, next_index,
1056  to_next, n_left_to_next, pi0,
1057  next0);
1058  }
1059  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
1060  }
1061  return frame->n_vectors;
1062 }
1063 
1064 static char *map_t_error_strings[] = {
1065 #define _(sym,string) string,
1067 #undef _
1068 };
1069 
1071  .function = ip6_map_t_fragmented,
1072  .name = "ip6-map-t-fragmented",
1073  .vector_size = sizeof (u32),
1074  .format_trace = format_map_trace,
1076 
1077  .n_errors = MAP_N_ERROR,
1078  .error_strings = map_t_error_strings,
1079 
1080  .n_next_nodes = IP6_MAPT_FRAGMENTED_N_NEXT,
1081  .next_nodes = {
1082  [IP6_MAPT_FRAGMENTED_NEXT_IP4_LOOKUP] = "ip4-lookup",
1084  [IP6_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1085  },
1086 };
1087 
1089  .function = ip6_map_t_icmp,
1090  .name = "ip6-map-t-icmp",
1091  .vector_size = sizeof (u32),
1092  .format_trace = format_map_trace,
1094 
1095  .n_errors = MAP_N_ERROR,
1096  .error_strings = map_t_error_strings,
1097 
1098  .n_next_nodes = IP6_MAPT_ICMP_N_NEXT,
1099  .next_nodes = {
1100  [IP6_MAPT_ICMP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1102  [IP6_MAPT_ICMP_NEXT_DROP] = "error-drop",
1103  },
1104 };
1105 
1107  .function = ip6_map_t_tcp_udp,
1108  .name = "ip6-map-t-tcp-udp",
1109  .vector_size = sizeof (u32),
1110  .format_trace = format_map_trace,
1112 
1113  .n_errors = MAP_N_ERROR,
1114  .error_strings = map_t_error_strings,
1115 
1116  .n_next_nodes = IP6_MAPT_TCP_UDP_N_NEXT,
1117  .next_nodes = {
1118  [IP6_MAPT_TCP_UDP_NEXT_IP4_LOOKUP] = "ip4-lookup",
1120  [IP6_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1121  },
1122 };
1123 
1125  .function = ip6_map_t,
1126  .name = "ip6-map-t",
1127  .vector_size = sizeof(u32),
1128  .format_trace = format_map_trace,
1130 
1131  .n_errors = MAP_N_ERROR,
1132  .error_strings = map_t_error_strings,
1133 
1134  .n_next_nodes = IP6_MAPT_N_NEXT,
1135  .next_nodes = {
1136  [IP6_MAPT_NEXT_MAPT_TCP_UDP] = "ip6-map-t-tcp-udp",
1137  [IP6_MAPT_NEXT_MAPT_ICMP] = "ip6-map-t-icmp",
1138  [IP6_MAPT_NEXT_MAPT_FRAGMENTED] = "ip6-map-t-fragmented",
1139  [IP6_MAPT_NEXT_DROP] = "error-drop",
1140  },
1141 };
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
#define map_ip4_reass_lock()
Definition: map.h:433
static_always_inline u8 ip6_translate_tos(const ip6_header_t *ip6)
Definition: ip6_map_t.c:82
static uword ip6_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:590
ip6_mapt_next_t
Definition: ip6_map_t.c:21
ip4_address_t src_address
Definition: ip4_packet.h:138
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
#define PREDICT_TRUE(x)
Definition: clib.h:98
u64 as_u64[2]
Definition: ip6_packet.h:50
#define NULL
Definition: clib.h:55
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Definition: map.h:488
static_always_inline int ip6_icmp_to_icmp6_in_place(icmp46_header_t *icmp, u32 icmp_len, i32 *sender_port, ip6_header_t **inner_ip6)
Definition: ip6_map_t.c:106
i32 ip6_get_port(ip6_header_t *ip6, map_dir_e dir, u16 buffer_len)
Definition: map.c:101
static uword ip6_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:820
uword ip_csum_t
Definition: ip_packet.h:86
u16 flags_and_fragment_offset
Definition: ip4_packet.h:121
vlib_error_t * errors
Definition: node.h:378
#define u16_net_add(u, val)
Definition: map.h:507
ip6_address_t src_address
Definition: ip6_packet.h:293
#define frag_id_6to4(id)
Definition: map.h:509
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
static_always_inline u32 ip6_map_t_embedded_address(map_domain_t *d, ip6_address_t *addr)
Definition: map.h:522
#define static_always_inline
Definition: clib.h:85
ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_checksum.c:43
static u8 icmp6_to_icmp_updater_pointer_table[]
Definition: ip6_map_t.c:92
ip4_address_t dst_address
Definition: ip4_packet.h:138
vlib_combined_counter_main_t * domain_counters
Definition: map.h:190
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Definition: counter.h:210
int i32
Definition: types.h:81
ip6_address_t * rules
Definition: map.h:84
u8 ea_bits_len
Definition: map.h:92
static_always_inline i32 ip6_map_fragment_get(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d)
Definition: ip6_map_t.c:68
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
#define ip6_frag_hdr_more(hdr)
Definition: ip6_packet.h:388
always_inline u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:138
#define pool_elt_at_index(p, i)
Definition: pool.h:346
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:81
#define IP4_FRAG_NODE_NAME
Definition: ip_frag.h:43
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:330
map_domain_t * domains
Definition: map.h:186
uword os_get_cpu_number(void)
Definition: unix-misc.c:206
vlib_node_registration_t ip6_map_t_tcp_udp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_tcp_udp_node)
Definition: ip6_map_t.c:1106
static_always_inline int ip6_map_fragment_cache(ip6_header_t *ip6, ip6_frag_hdr_t *frag, map_domain_t *d, u16 port)
Definition: ip6_map_t.c:51
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:359
static_always_inline map_domain_t * ip6_map_get_domain(u32 adj_index, ip4_address_t *addr, u32 *map_domain_index, u8 *error)
Definition: map.h:393
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1168
#define PREDICT_FALSE(x)
Definition: clib.h:97
map_main_t map_main
Definition: map.h:297
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
always_inline ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:113
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:129
vlib_node_registration_t ip6_map_t_fragmented_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_fragmented_node)
Definition: ip6_map_t.c:1070
always_inline u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:194
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:122
ip6_mapt_tcp_udp_next_t
Definition: ip6_map_t.c:36
u16 n_vectors
Definition: node.h:307
static uword ip6_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:369
#define clib_memcpy(a, b, c)
Definition: string.h:63
#define foreach_map_error
Definition: map.h:264
always_inline ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:97
ip6_mapt_icmp_next_t
Definition: ip6_map_t.c:29
vlib_node_registration_t ip6_map_t_icmp_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_icmp_node)
Definition: ip6_map_t.c:1088
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:385
u16 cached_next_index
Definition: node.h:422
#define ASSERT(truth)
#define u8_ptr_add(ptr, index)
Definition: map.h:506
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:300
#define map_ip4_reass_unlock()
Definition: map.h:434
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1101
u64 uword
Definition: types.h:112
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:280
Definition: defs.h:46
unsigned short u16
Definition: types.h:57
u16 mtu
Definition: map.h:88
static uword ip6_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip6_map_t.c:432
u16 payload_length
Definition: ip6_packet.h:284
static_always_inline u32 map_get_ip4(ip6_address_t *addr)
Definition: map.h:366
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:197
unsigned char u8
Definition: types.h:56
i32 port
Definition: map.h:128
static char * map_t_error_strings[]
Definition: ip6_map_t.c:1064
vlib_node_registration_t ip6_map_t_node
(constructor) VLIB_REGISTER_NODE (ip6_map_t_node)
Definition: ip6_map_t.c:1124
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
always_inline vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:61
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
Definition: ip4_packet.h:158
static_always_inline void ip6_map_t_classify(vlib_buffer_t *p0, ip6_header_t *ip60, map_domain_t *d0, i32 *src_port0, u8 *error0, ip6_mapt_next_t *next0, u32 l4_len0, ip6_frag_hdr_t *frag0)
Definition: ip6_map_t.c:780
u8 ip_version_and_header_length
Definition: ip4_packet.h:108
ip6_mapt_fragmented_next_t
Definition: ip6_map_t.c:43
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
ip6_address_t dst_address
Definition: ip6_packet.h:293