FD.io VPP  v16.06
Vector Packet Processing
ip4_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 
19 #define IP4_MAP_T_DUAL_LOOP 1
20 
21 typedef enum {
28 
29 typedef enum {
35 
36 typedef enum {
42 
43 typedef enum {
49 
50 //This is used to pass information within the buffer data.
51 //Buffer structure being too small to contain big structures like this.
52 typedef CLIB_PACKED(struct {
53  ip6_address_t daddr;
54  ip6_address_t saddr;
55  //IPv6 header + Fragmentation header will be here
56  //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
57  u8 unused[28];
58 }) ip4_mapt_pseudo_header_t;
59 
60 #define frag_id_4to6(id) (id)
61 
62 //TODO: Find the right place in memory for this.
63 static u8 icmp_to_icmp6_updater_pointer_table[] =
64  { 0, 1, 4, 4,~0,
65  ~0,~0,~0, 7, 6,
66  ~0,~0, 8, 8, 8,
67  8, 24, 24, 24, 24 };
68 
69 
72 {
73  u32 *ignore = NULL;
76  ip4->fragment_id,
77  (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
78  &ignore);
79  if (r)
80  r->port = port;
81 
83  return !r;
84 }
85 
88 {
89  u32 *ignore = NULL;
92  ip4->fragment_id,
93  (ip4->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
94  &ignore);
95  i32 ret = r?r->port:-1;
97  return ret;
98 }
99 
100 
101 /* Statelessly translates an ICMP packet into ICMPv6.
102  *
103  * Warning: The checksum will need to be recomputed.
104  *
105  */
107 ip4_icmp_to_icmp6_in_place (icmp46_header_t *icmp, u32 icmp_len,
108  i32 *receiver_port, ip4_header_t **inner_ip4)
109 {
110  *inner_ip4 = NULL;
111  switch (icmp->type) {
112  case ICMP4_echo_reply:
113  *receiver_port = ((u16 *)icmp)[2];
114  icmp->type = ICMP6_echo_reply;
115  break;
116  case ICMP4_echo_request:
117  *receiver_port = ((u16 *)icmp)[2];
118  icmp->type = ICMP6_echo_request;
119  break;
120  case ICMP4_destination_unreachable:
121  *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
122  *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
123 
124  switch (icmp->code) {
125  case ICMP4_destination_unreachable_destination_unreachable_net: //0
126  case ICMP4_destination_unreachable_destination_unreachable_host: //1
127  icmp->type = ICMP6_destination_unreachable;
128  icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
129  break;
130  case ICMP4_destination_unreachable_protocol_unreachable: //2
131  icmp->type = ICMP6_parameter_problem;
132  icmp->code = ICMP6_parameter_problem_unrecognized_next_header;
133  break;
134  case ICMP4_destination_unreachable_port_unreachable: //3
135  icmp->type = ICMP6_destination_unreachable;
136  icmp->code = ICMP6_destination_unreachable_port_unreachable;
137  break;
138  case ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set: //4
139  icmp->type = ICMP6_packet_too_big;
140  icmp->code = 0;
141  {
142  u32 advertised_mtu = clib_net_to_host_u32(*((u32 *)(icmp + 1)));
143  if (advertised_mtu)
144  advertised_mtu += 20;
145  else
146  advertised_mtu = 1000; //FIXME ! (RFC 1191 - plateau value)
147 
148  //FIXME: = minimum(advertised MTU+20, MTU_of_IPv6_nexthop, (MTU_of_IPv4_nexthop)+20)
149  *((u32 *)(icmp + 1)) = clib_host_to_net_u32(advertised_mtu);
150  }
151  break;
152 
153  case ICMP4_destination_unreachable_source_route_failed: //5
154  case ICMP4_destination_unreachable_destination_network_unknown: //6
155  case ICMP4_destination_unreachable_destination_host_unknown: //7
156  case ICMP4_destination_unreachable_source_host_isolated: //8
157  case ICMP4_destination_unreachable_network_unreachable_for_type_of_service: //11
158  case ICMP4_destination_unreachable_host_unreachable_for_type_of_service: //12
159  icmp->type = ICMP6_destination_unreachable;
160  icmp->code = ICMP6_destination_unreachable_no_route_to_destination;
161  break;
162  case ICMP4_destination_unreachable_network_administratively_prohibited: //9
163  case ICMP4_destination_unreachable_host_administratively_prohibited: //10
164  case ICMP4_destination_unreachable_communication_administratively_prohibited: //13
165  case ICMP4_destination_unreachable_precedence_cutoff_in_effect: //15
166  icmp->type = ICMP6_destination_unreachable;
167  icmp->code = ICMP6_destination_unreachable_destination_administratively_prohibited;
168  break;
169  case ICMP4_destination_unreachable_host_precedence_violation: //14
170  default:
171  return -1;
172  }
173  break;
174 
175  case ICMP4_time_exceeded: //11
176  *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
177  *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
178  icmp->type = ICMP6_time_exceeded;
179  //icmp->code = icmp->code //unchanged
180  break;
181 
182  case ICMP4_parameter_problem:
183  *inner_ip4 = (ip4_header_t *)(((u8 *) icmp) + 8);
184  *receiver_port = ip4_get_port(*inner_ip4, MAP_SENDER, icmp_len - 8);
185 
186  switch (icmp->code) {
187  case ICMP4_parameter_problem_pointer_indicates_error:
188  case ICMP4_parameter_problem_bad_length:
189  icmp->type = ICMP6_parameter_problem;
190  icmp->code = ICMP6_parameter_problem_erroneous_header_field;
191  {
192  u8 ptr = icmp_to_icmp6_updater_pointer_table[*((u8 *)(icmp + 1))];
193  if (ptr == 0xff)
194  return -1;
195 
196  *((u32 *)(icmp + 1)) = clib_host_to_net_u32(ptr);
197  }
198  break;
199  default:
200  //All other codes cause dropping the packet
201  return -1;
202  }
203  break;
204 
205  default:
206  //All other types cause dropping the packet
207  return -1;
208  break;
209  }
210  return 0;
211 }
212 
214 _ip4_map_t_icmp (map_domain_t *d, vlib_buffer_t *p, u8 *error)
215 {
216  ip4_header_t *ip4, *inner_ip4;
217  ip6_header_t *ip6, *inner_ip6;
218  u32 ip_len;
219  icmp46_header_t *icmp;
220  i32 recv_port;
221  ip_csum_t csum;
222  u16 *inner_L4_checksum = 0;
223  ip6_frag_hdr_t *inner_frag;
224  u32 inner_frag_id;
225  u32 inner_frag_offset;
226  u8 inner_frag_more;
227 
228  ip4 = vlib_buffer_get_current(p);
229  ip_len = clib_net_to_host_u16(ip4->length);
230  ASSERT(ip_len <= p->current_length);
231 
232  icmp = (icmp46_header_t *)(ip4 + 1);
233  if (ip4_icmp_to_icmp6_in_place(icmp, ip_len - sizeof(*ip4),
234  &recv_port, &inner_ip4)) {
235  *error = MAP_ERROR_ICMP;
236  return;
237  }
238 
239  if (recv_port < 0) {
240  // In case of 1:1 mapping, we don't care about the port
241  if(d->ea_bits_len == 0 && d->rules) {
242  recv_port = 0;
243  } else {
244  *error = MAP_ERROR_ICMP;
245  return;
246  }
247  }
248 
249  if (inner_ip4) {
250  //We have 2 headers to translate.
251  //We need to make some room in the middle of the packet
252 
253  if (PREDICT_FALSE(ip4_is_fragment(inner_ip4))) {
254  //Here it starts getting really tricky
255  //We will add a fragmentation header in the inner packet
256 
257  if (!ip4_is_first_fragment(inner_ip4)) {
258  //For now we do not handle unless it is the first fragment
259  //Ideally we should handle the case as we are in slow path already
260  *error = MAP_ERROR_FRAGMENTED;
261  return;
262  }
263 
264  vlib_buffer_advance(p, - 2*(sizeof(*ip6) - sizeof(*ip4)) - sizeof(*inner_frag));
265  ip6 = vlib_buffer_get_current(p);
266  clib_memcpy(u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4)), ip4, 20 + 8);
267  ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
268  icmp = (icmp46_header_t *) (ip4 + 1);
269 
270  inner_ip6 = (ip6_header_t *) u8_ptr_add(inner_ip4, sizeof(*ip4) - sizeof(*ip6) - sizeof(*inner_frag));
271  inner_frag = (ip6_frag_hdr_t *) u8_ptr_add(inner_ip6, sizeof(*inner_ip6));
272  ip6->payload_length = u16_net_add(ip4->length, sizeof(*ip6) - 2*sizeof(*ip4) + sizeof(*inner_frag));
273  inner_frag_id = frag_id_4to6(inner_ip4->fragment_id);
274  inner_frag_offset = ip4_get_fragment_offset(inner_ip4);
275  inner_frag_more = !!(inner_ip4->flags_and_fragment_offset & clib_net_to_host_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS));
276  } else {
277  vlib_buffer_advance(p, - 2*(sizeof(*ip6) - sizeof(*ip4)));
278  ip6 = vlib_buffer_get_current(p);
279  clib_memcpy(u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4)), ip4, 20 + 8);
280  ip4 = (ip4_header_t *) u8_ptr_add(ip6, sizeof(*ip6) - sizeof(*ip4));
281  icmp = (icmp46_header_t *) u8_ptr_add(ip4, sizeof(*ip4));
282  inner_ip6 = (ip6_header_t *) u8_ptr_add(inner_ip4, sizeof(*ip4) - sizeof(*ip6));
283  ip6->payload_length = u16_net_add(ip4->length, sizeof(*ip6) - 2*sizeof(*ip4));
284  inner_frag = NULL;
285  }
286 
287  if (PREDICT_TRUE(inner_ip4->protocol == IP_PROTOCOL_TCP)) {
288  inner_L4_checksum = &((tcp_header_t *) (inner_ip4 + 1))->checksum;
289  *inner_L4_checksum = ip_csum_fold(ip_csum_sub_even(*inner_L4_checksum, *((u64 *) (&inner_ip4->src_address))));
290  } else if (PREDICT_TRUE(inner_ip4->protocol == IP_PROTOCOL_UDP)) {
291  inner_L4_checksum = &((udp_header_t *) (inner_ip4 + 1))->checksum;
292  if (!*inner_L4_checksum) {
293  //The inner packet was first translated, and therefore came from IPv6.
294  //As the packet was an IPv6 packet, the UDP checksum can't be NULL
295  *error = MAP_ERROR_ICMP;
296  return;
297  }
298  *inner_L4_checksum = ip_csum_fold(ip_csum_sub_even(*inner_L4_checksum, *((u64 *)(&inner_ip4->src_address))));
299  } else if (inner_ip4->protocol == IP_PROTOCOL_ICMP) {
300  //We have an ICMP inside an ICMP
301  //It needs to be translated, but not for error ICMP messages
302  icmp46_header_t *inner_icmp = (icmp46_header_t *) (inner_ip4 + 1);
303  csum = inner_icmp->checksum;
304  //Only types ICMP4_echo_request and ICMP4_echo_reply are handled by ip4_icmp_to_icmp6_in_place
305  csum = ip_csum_sub_even(csum, *((u16 *)inner_icmp));
306  inner_icmp->type = (inner_icmp->type == ICMP4_echo_request)?
307  ICMP6_echo_request:ICMP6_echo_reply;
308  csum = ip_csum_add_even(csum, *((u16 *)inner_icmp));
309  csum = ip_csum_add_even(csum, clib_host_to_net_u16(IP_PROTOCOL_ICMP6));
310  csum = ip_csum_add_even(csum, inner_ip4->length - sizeof(*inner_ip4));
311  inner_icmp->checksum = ip_csum_fold(csum);
312  inner_L4_checksum = &inner_icmp->checksum;
313  inner_ip4->protocol = IP_PROTOCOL_ICMP6;
314  } else {
315  ASSERT(0); // We had a port from that, so it is udp or tcp or ICMP
316  }
317 
318  //FIXME: Security check with the port found in the inner packet
319 
320  csum = *inner_L4_checksum; //Initial checksum of the inner L4 header
321  //FIXME: Shouldn't we remove ip addresses from there ?
322 
323  inner_ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (inner_ip4->tos << 20));
324  inner_ip6->payload_length = u16_net_add(inner_ip4->length, - sizeof(*inner_ip4));
325  inner_ip6->hop_limit = inner_ip4->ttl;
326  inner_ip6->protocol = inner_ip4->protocol;
327 
328  //Note that the source address is within the domain
329  //while the destination address is the one outside the domain
330  ip4_map_t_embedded_address(d, &inner_ip6->dst_address, &inner_ip4->dst_address);
331  inner_ip6->src_address.as_u64[0] = map_get_pfx_net(d, inner_ip4->src_address.as_u32, recv_port);
332  inner_ip6->src_address.as_u64[1] = map_get_sfx_net(d, inner_ip4->src_address.as_u32, recv_port);
333 
334  if (PREDICT_FALSE(inner_frag != NULL)) {
335  inner_frag->next_hdr = inner_ip6->protocol;
336  inner_frag->identification = inner_frag_id;
337  inner_frag->rsv = 0;
338  inner_frag->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(inner_frag_offset, inner_frag_more);
339  inner_ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
340  inner_ip6->payload_length = clib_host_to_net_u16(
341  clib_net_to_host_u16(inner_ip6->payload_length) + sizeof(*inner_frag));
342  }
343 
344  csum = ip_csum_add_even(csum, inner_ip6->src_address.as_u64[0]);
345  csum = ip_csum_add_even(csum, inner_ip6->src_address.as_u64[1]);
346  csum = ip_csum_add_even(csum, inner_ip6->dst_address.as_u64[0]);
347  csum = ip_csum_add_even(csum, inner_ip6->dst_address.as_u64[1]);
348  *inner_L4_checksum = ip_csum_fold(csum);
349 
350  } else {
351  vlib_buffer_advance(p, sizeof(*ip4) - sizeof(*ip6));
352  ip6 = vlib_buffer_get_current(p);
353  ip6->payload_length = clib_host_to_net_u16(clib_net_to_host_u16(ip4->length) - sizeof(*ip4));
354  }
355 
356  //Translate outer IPv6
357  ip6->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip4->tos << 20));
358 
359  ip6->hop_limit = ip4->ttl;
360  ip6->protocol = IP_PROTOCOL_ICMP6;
361 
363  ip6->dst_address.as_u64[0] = map_get_pfx_net(d, ip4->dst_address.as_u32, recv_port);
364  ip6->dst_address.as_u64[1] = map_get_sfx_net(d, ip4->dst_address.as_u32, recv_port);
365 
366  //Truncate when the packet exceeds the minimal IPv6 MTU
367  if (p->current_length > 1280) {
368  ip6->payload_length = clib_host_to_net_u16(1280 - sizeof(*ip6));
369  p->current_length = 1280; //Looks too simple to be correct...
370  }
371 
372  //TODO: We could do an easy diff-checksum for echo requests/replies
373  //Recompute ICMP checksum
374  icmp->checksum = 0;
375  csum = ip_csum_with_carry(0, ip6->payload_length);
376  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(ip6->protocol));
377  csum = ip_csum_with_carry(csum, ip6->src_address.as_u64[0]);
378  csum = ip_csum_with_carry(csum, ip6->src_address.as_u64[1]);
379  csum = ip_csum_with_carry(csum, ip6->dst_address.as_u64[0]);
380  csum = ip_csum_with_carry(csum, ip6->dst_address.as_u64[1]);
381  csum = ip_incremental_checksum(csum, icmp, clib_net_to_host_u16(ip6->payload_length));
382  icmp->checksum = ~ip_csum_fold (csum);
383 }
384 
385 static uword
387  vlib_node_runtime_t *node,
388  vlib_frame_t *frame)
389 {
390  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
391  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_t_icmp_node.index);
392  from = vlib_frame_vector_args(frame);
393  n_left_from = frame->n_vectors;
394  next_index = node->cached_next_index;
396  u32 cpu_index = os_get_cpu_number();
397 
398  while (n_left_from > 0) {
399  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
400 
401  while (n_left_from > 0 && n_left_to_next > 0) {
402  u32 pi0;
403  vlib_buffer_t *p0;
404  ip4_mapt_icmp_next_t next0;
405  u8 error0;
406  map_domain_t *d0;
407  u16 len0;
408 
410  pi0 = to_next[0] = from[0];
411  from += 1;
412  n_left_from -= 1;
413  to_next +=1;
414  n_left_to_next -= 1;
415  error0 = MAP_ERROR_NONE;
416 
417  p0 = vlib_get_buffer(vm, pi0);
418  vlib_buffer_advance(p0, sizeof(ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
419  len0 = clib_net_to_host_u16(((ip4_header_t *)vlib_buffer_get_current(p0))->length);
420  d0 = pool_elt_at_index(map_main.domains, vnet_buffer(p0)->map_t.map_domain_index);
421  _ip4_map_t_icmp(d0, p0, &error0);
422 
423  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
424  vnet_buffer(p0)->ip_frag.header_offset = 0;
425  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
426  vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
428  }
429  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE)) {
431  vnet_buffer(p0)->map_t.map_domain_index, 1,
432  len0);
433  } else {
434  next0 = IP4_MAPT_ICMP_NEXT_DROP;
435  }
436  p0->error = error_node->errors[error0];
437  vlib_validate_buffer_enqueue_x1(vm, node, next_index,
438  to_next, n_left_to_next, pi0,
439  next0);
440  }
441  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
442  }
443  return frame->n_vectors;
444 }
445 
446 static uword
448  vlib_node_runtime_t *node,
449  vlib_frame_t *frame)
450 {
451  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
452  from = vlib_frame_vector_args(frame);
453  n_left_from = frame->n_vectors;
454  next_index = node->cached_next_index;
455 
456  while (n_left_from > 0) {
457  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
458 
459  while (n_left_from > 0 && n_left_to_next > 0) {
460  u32 pi0;
461  vlib_buffer_t *p0;
462  ip4_header_t *ip40;
463  ip6_header_t *ip60;
464  ip6_frag_hdr_t *frag0;
465  ip4_mapt_pseudo_header_t *pheader0;
466  ip4_mapt_fragmented_next_t next0;
467 
469  pi0 = to_next[0] = from[0];
470  from += 1;
471  n_left_from -= 1;
472  to_next +=1;
473  n_left_to_next -= 1;
474 
475  p0 = vlib_get_buffer(vm, pi0);
476 
477  //Accessing pseudo header
478  pheader0 = vlib_buffer_get_current(p0);
479  vlib_buffer_advance(p0, sizeof(*pheader0));
480 
481  //Accessing ip4 header
482  ip40 = vlib_buffer_get_current(p0);
483  frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
484  ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0) - sizeof(*ip60));
485  vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
486 
487  //We know that the protocol was one of ICMP, TCP or UDP
488  //because the first fragment was found and cached
489  frag0->next_hdr = (ip40->protocol == IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip40->protocol;
490  frag0->identification = frag_id_4to6(ip40->fragment_id);
491  frag0->rsv = 0;
492  frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(
494  clib_net_to_host_u16(ip40->flags_and_fragment_offset) & IP4_HEADER_FLAG_MORE_FRAGMENTS);
495 
496  ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
497  ip60->payload_length = clib_host_to_net_u16(clib_net_to_host_u16(ip40->length) - sizeof(*ip40) + sizeof(*frag0));
498  ip60->hop_limit = ip40->ttl;
499  ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
500  ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
501  ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
502  ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
503  ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
504 
505  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
506  vnet_buffer(p0)->ip_frag.header_offset = 0;
507  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
508  vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
510  }
511 
512  vlib_validate_buffer_enqueue_x1(vm, node, next_index,
513  to_next, n_left_to_next, pi0,
514  next0);
515  }
516  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
517  }
518  return frame->n_vectors;
519 }
520 
521 static uword
523  vlib_node_runtime_t *node,
524  vlib_frame_t *frame)
525 {
526  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
527  from = vlib_frame_vector_args(frame);
528  n_left_from = frame->n_vectors;
529  next_index = node->cached_next_index;
530 
531  while (n_left_from > 0) {
532  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
533 
534 #ifdef IP4_MAP_T_DUAL_LOOP
535  while (n_left_from >= 4 && n_left_to_next >= 2) {
536  u32 pi0, pi1;
537  vlib_buffer_t *p0, *p1;
538  ip4_header_t *ip40, *ip41;
539  ip6_header_t *ip60, *ip61;
540  ip_csum_t csum0, csum1;
541  u16 *checksum0, *checksum1;
542  ip6_frag_hdr_t *frag0, *frag1;
543  u32 frag_id0, frag_id1;
544  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
545  ip4_mapt_tcp_udp_next_t next0, next1;
546 
547  pi0 = to_next[0] = from[0];
548  pi1 = to_next[1] = from[1];
549  from += 2;
550  n_left_from -= 2;
551  to_next +=2;
552  n_left_to_next -= 2;
553 
556  p0 = vlib_get_buffer(vm, pi0);
557  p1 = vlib_get_buffer(vm, pi1);
558 
559  //Accessing pseudo header
560  pheader0 = vlib_buffer_get_current(p0);
561  pheader1 = vlib_buffer_get_current(p1);
562  vlib_buffer_advance(p0, sizeof(*pheader0));
563  vlib_buffer_advance(p1, sizeof(*pheader1));
564 
565  //Accessing ip4 header
566  ip40 = vlib_buffer_get_current(p0);
567  ip41 = vlib_buffer_get_current(p1);
568  checksum0 = (u16 *) u8_ptr_add(ip40, vnet_buffer(p0)->map_t.checksum_offset);
569  checksum1 = (u16 *) u8_ptr_add(ip41, vnet_buffer(p1)->map_t.checksum_offset);
570 
571  //UDP checksum is optional over IPv4 but mandatory for IPv6
572  //We do not check udp->length sanity but use our safe computed value instead
573  if (PREDICT_FALSE(!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) {
574  u16 udp_len = clib_host_to_net_u16(ip40->length) - sizeof(*ip40);
575  udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip40, sizeof(*ip40));
576  ip_csum_t csum;
577  csum = ip_incremental_checksum(0, udp, udp_len);
578  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
579  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
580  csum = ip_csum_with_carry(csum, *((u64 *)(&ip40->src_address)));
581  *checksum0 = ~ip_csum_fold(csum);
582  }
583  if (PREDICT_FALSE(!*checksum1 && ip41->protocol == IP_PROTOCOL_UDP)) {
584  u16 udp_len = clib_host_to_net_u16(ip41->length) - sizeof(*ip40);
585  udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip41, sizeof(*ip40));
586  ip_csum_t csum;
587  csum = ip_incremental_checksum(0, udp, udp_len);
588  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
589  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
590  csum = ip_csum_with_carry(csum, *((u64 *)(&ip41->src_address)));
591  *checksum1 = ~ip_csum_fold(csum);
592  }
593 
594  csum0 = ip_csum_sub_even(*checksum0, ip40->src_address.as_u32);
595  csum1 = ip_csum_sub_even(*checksum1, ip41->src_address.as_u32);
596  csum0 = ip_csum_sub_even(csum0, ip40->dst_address.as_u32);
597  csum1 = ip_csum_sub_even(csum1, ip41->dst_address.as_u32);
598 
599  // Deal with fragmented packets
601  clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
602  ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
603  frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
604  frag_id0 = frag_id_4to6(ip40->fragment_id);
605  vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
606  } else {
607  ip60 = (ip6_header_t *) (((u8 *)ip40) + sizeof(*ip40) - sizeof(*ip60));
608  vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60));
609  frag0 = NULL;
610  }
611 
613  clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
614  ip61 = (ip6_header_t *) u8_ptr_add(ip41, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
615  frag1 = (ip6_frag_hdr_t *) u8_ptr_add(ip41, sizeof(*ip40) - sizeof(*frag0));
616  frag_id1 = frag_id_4to6(ip41->fragment_id);
617  vlib_buffer_advance(p1, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
618  } else {
619  ip61 = (ip6_header_t *) (((u8 *)ip41) + sizeof(*ip40) - sizeof(*ip60));
620  vlib_buffer_advance(p1, sizeof(*ip40) - sizeof(*ip60));
621  frag1 = NULL;
622  }
623 
624  ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
625  ip61->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip41->tos << 20));
626  ip60->payload_length = u16_net_add(ip40->length, - sizeof(*ip40));
627  ip61->payload_length = u16_net_add(ip41->length, - sizeof(*ip40));
628  ip60->hop_limit = ip40->ttl;
629  ip61->hop_limit = ip41->ttl;
630  ip60->protocol = ip40->protocol;
631  ip61->protocol = ip41->protocol;
632 
633  if (PREDICT_FALSE(frag0 != NULL)) {
634  frag0->next_hdr = ip60->protocol;
635  frag0->identification = frag_id0;
636  frag0->rsv = 0;
637  frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
638  ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
639  ip60->payload_length = u16_net_add(ip60->payload_length, sizeof(*frag0));
640  }
641 
642  if (PREDICT_FALSE(frag1 != NULL)) {
643  frag1->next_hdr = ip61->protocol;
644  frag1->identification = frag_id1;
645  frag1->rsv = 0;
646  frag1->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
647  ip61->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
648  ip61->payload_length = u16_net_add(ip61->payload_length, sizeof(*frag0));
649  }
650 
651  //Finally copying the address
652  ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
653  ip61->dst_address.as_u64[0] = pheader1->daddr.as_u64[0];
654  ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
655  ip61->dst_address.as_u64[1] = pheader1->daddr.as_u64[1];
656  ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
657  ip61->src_address.as_u64[0] = pheader1->saddr.as_u64[0];
658  ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
659  ip61->src_address.as_u64[1] = pheader1->saddr.as_u64[1];
660 
661  csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[0]);
662  csum1 = ip_csum_add_even(csum1, ip61->src_address.as_u64[0]);
663  csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[1]);
664  csum1 = ip_csum_add_even(csum1, ip61->src_address.as_u64[1]);
665  csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[0]);
666  csum1 = ip_csum_add_even(csum1, ip61->dst_address.as_u64[0]);
667  csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[1]);
668  csum1 = ip_csum_add_even(csum1, ip61->dst_address.as_u64[1]);
669  *checksum0 = ip_csum_fold(csum0);
670  *checksum1 = ip_csum_fold(csum1);
671 
672  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
673  vnet_buffer(p0)->ip_frag.header_offset = 0;
674  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
675  vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
677  }
678 
679  if(vnet_buffer(p1)->map_t.mtu < p1->current_length) {
680  vnet_buffer(p1)->ip_frag.header_offset = 0;
681  vnet_buffer(p1)->ip_frag.mtu = vnet_buffer(p1)->map_t.mtu;
682  vnet_buffer(p1)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
684  }
685 
686  vlib_validate_buffer_enqueue_x2(vm, node, next_index,
687  to_next, n_left_to_next, pi0, pi1,
688  next0, next1);
689  }
690 #endif
691 
692  while (n_left_from > 0 && n_left_to_next > 0) {
693  u32 pi0;
694  vlib_buffer_t *p0;
695  ip4_header_t *ip40;
696  ip6_header_t *ip60;
697  ip_csum_t csum0;
698  u16 *checksum0;
699  ip6_frag_hdr_t *frag0;
700  u32 frag_id0;
701  ip4_mapt_pseudo_header_t *pheader0;
702  ip4_mapt_tcp_udp_next_t next0;
703 
704  pi0 = to_next[0] = from[0];
705  from += 1;
706  n_left_from -= 1;
707  to_next +=1;
708  n_left_to_next -= 1;
709 
711  p0 = vlib_get_buffer(vm, pi0);
712 
713  //Accessing pseudo header
714  pheader0 = vlib_buffer_get_current(p0);
715  vlib_buffer_advance(p0, sizeof(*pheader0));
716 
717  //Accessing ip4 header
718  ip40 = vlib_buffer_get_current(p0);
719  checksum0 = (u16 *) u8_ptr_add(ip40, vnet_buffer(p0)->map_t.checksum_offset);
720 
721  //UDP checksum is optional over IPv4 but mandatory for IPv6
722  //We do not check udp->length sanity but use our safe computed value instead
723  if (PREDICT_FALSE(!*checksum0 && ip40->protocol == IP_PROTOCOL_UDP)) {
724  u16 udp_len = clib_host_to_net_u16(ip40->length) - sizeof(*ip40);
725  udp_header_t *udp = (udp_header_t *) u8_ptr_add(ip40, sizeof(*ip40));
726  ip_csum_t csum;
727  csum = ip_incremental_checksum(0, udp, udp_len);
728  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(udp_len));
729  csum = ip_csum_with_carry(csum, clib_host_to_net_u16(IP_PROTOCOL_UDP));
730  csum = ip_csum_with_carry(csum, *((u64 *)(&ip40->src_address)));
731  *checksum0 = ~ip_csum_fold(csum);
732  }
733 
734  csum0 = ip_csum_sub_even(*checksum0, ip40->src_address.as_u32);
735  csum0 = ip_csum_sub_even(csum0, ip40->dst_address.as_u32);
736 
737  // Deal with fragmented packets
739  clib_host_to_net_u16(IP4_HEADER_FLAG_MORE_FRAGMENTS))) {
740  ip60 = (ip6_header_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
741  frag0 = (ip6_frag_hdr_t *) u8_ptr_add(ip40, sizeof(*ip40) - sizeof(*frag0));
742  frag_id0 = frag_id_4to6(ip40->fragment_id);
743  vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60) - sizeof(*frag0));
744  } else {
745  ip60 = (ip6_header_t *) (((u8 *)ip40) + sizeof(*ip40) - sizeof(*ip60));
746  vlib_buffer_advance(p0, sizeof(*ip40) - sizeof(*ip60));
747  frag0 = NULL;
748  }
749 
750  ip60->ip_version_traffic_class_and_flow_label = clib_host_to_net_u32((6 << 28) + (ip40->tos << 20));
751  ip60->payload_length = u16_net_add(ip40->length, - sizeof(*ip40));
752  ip60->hop_limit = ip40->ttl;
753  ip60->protocol = ip40->protocol;
754 
755  if (PREDICT_FALSE(frag0 != NULL)) {
756  frag0->next_hdr = ip60->protocol;
757  frag0->identification = frag_id0;
758  frag0->rsv = 0;
759  frag0->fragment_offset_and_more = ip6_frag_hdr_offset_and_more(0, 1);
760  ip60->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
761  ip60->payload_length = u16_net_add(ip60->payload_length, sizeof(*frag0));
762  }
763 
764  //Finally copying the address
765  ip60->dst_address.as_u64[0] = pheader0->daddr.as_u64[0];
766  ip60->dst_address.as_u64[1] = pheader0->daddr.as_u64[1];
767  ip60->src_address.as_u64[0] = pheader0->saddr.as_u64[0];
768  ip60->src_address.as_u64[1] = pheader0->saddr.as_u64[1];
769 
770  csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[0]);
771  csum0 = ip_csum_add_even(csum0, ip60->src_address.as_u64[1]);
772  csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[0]);
773  csum0 = ip_csum_add_even(csum0, ip60->dst_address.as_u64[1]);
774  *checksum0 = ip_csum_fold(csum0);
775 
776  if(vnet_buffer(p0)->map_t.mtu < p0->current_length) {
777  //Send to fragmentation node if necessary
778  vnet_buffer(p0)->ip_frag.header_offset = 0;
779  vnet_buffer(p0)->ip_frag.mtu = vnet_buffer(p0)->map_t.mtu;
780  vnet_buffer(p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
782  }
783 
784  vlib_validate_buffer_enqueue_x1(vm, node, next_index,
785  to_next, n_left_to_next, pi0,
786  next0);
787  }
788  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
789  }
790 
791  return frame->n_vectors;
792 }
793 
796  i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
797 {
800  if(d0->ea_bits_len == 0 && d0->rules) {
801  *dst_port0 = 0;
802  } else {
803  *dst_port0 = ip4_map_fragment_get_port(ip40);
804  *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
805  }
806  } else if (PREDICT_TRUE(ip40->protocol == IP_PROTOCOL_TCP)) {
807  vnet_buffer(p0)->map_t.checksum_offset = 36;
809  *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
810  *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 2));
811  } else if (PREDICT_TRUE(ip40->protocol == IP_PROTOCOL_UDP)) {
812  vnet_buffer(p0)->map_t.checksum_offset = 26;
814  *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
815  *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 2));
816  } else if (ip40->protocol == IP_PROTOCOL_ICMP) {
817  *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
818  if(d0->ea_bits_len == 0 && d0->rules)
819  *dst_port0 = 0;
820  else if (((icmp46_header_t *) u8_ptr_add(ip40, sizeof(*ip40)))->code == ICMP4_echo_reply ||
821  ((icmp46_header_t *) u8_ptr_add(ip40, sizeof(*ip40)))->code == ICMP4_echo_request)
822  *dst_port0 = (i32) *((u16 *)u8_ptr_add(ip40, sizeof(*ip40) + 6));
823  } else {
824  *error0 = MAP_ERROR_BAD_PROTOCOL;
825  }
826 }
827 
828 static uword
830  vlib_node_runtime_t *node,
831  vlib_frame_t *frame)
832 {
833  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
834  vlib_node_runtime_t *error_node = vlib_node_get_runtime(vm, ip4_map_t_node.index);
835  from = vlib_frame_vector_args(frame);
836  n_left_from = frame->n_vectors;
837  next_index = node->cached_next_index;
839  u32 cpu_index = os_get_cpu_number();
840 
841  while (n_left_from > 0) {
842  vlib_get_next_frame(vm, node, next_index, to_next, n_left_to_next);
843 
844 #ifdef IP4_MAP_T_DUAL_LOOP
845  while (n_left_from >= 4 && n_left_to_next >= 2) {
846  u32 pi0, pi1;
847  vlib_buffer_t *p0, *p1;
848  ip4_header_t *ip40, *ip41;
849  map_domain_t *d0, *d1;
850  ip4_mapt_next_t next0 = 0, next1 = 0;
851  u16 ip4_len0, ip4_len1;
852  u8 error0, error1;
853  i32 dst_port0, dst_port1;
854  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
855 
856  pi0 = to_next[0] = from[0];
857  pi1 = to_next[1] = from[1];
858  from += 2;
859  n_left_from -= 2;
860  to_next +=2;
861  n_left_to_next -= 2;
862  error0 = MAP_ERROR_NONE;
863  error1 = MAP_ERROR_NONE;
864 
865  p0 = vlib_get_buffer(vm, pi0);
866  p1 = vlib_get_buffer(vm, pi1);
867  ip40 = vlib_buffer_get_current(p0);
868  ip41 = vlib_buffer_get_current(p1);
869  ip4_len0 = clib_host_to_net_u16(ip40->length);
870  ip4_len1 = clib_host_to_net_u16(ip41->length);
871 
872  if (PREDICT_FALSE(p0->current_length < ip4_len0 ||
873  ip40->ip_version_and_header_length != 0x45)) {
874  error0 = MAP_ERROR_UNKNOWN;
875  next0 = IP4_MAPT_NEXT_DROP;
876  }
877 
878  if (PREDICT_FALSE(p1->current_length < ip4_len1 ||
879  ip41->ip_version_and_header_length != 0x45)) {
880  error1 = MAP_ERROR_UNKNOWN;
881  next1 = IP4_MAPT_NEXT_DROP;
882  }
883 
884  d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
885  &vnet_buffer(p0)->map_t.map_domain_index);
886  d1 = ip4_map_get_domain(vnet_buffer(p1)->ip.adj_index[VLIB_TX],
887  &vnet_buffer(p1)->map_t.map_domain_index);
888 
889  vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
890  vnet_buffer(p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
891 
892  dst_port0 = -1;
893  dst_port1 = -1;
894 
895  ip4_map_t_classify(p0, d0, ip40, ip4_len0, &dst_port0, &error0, &next0);
896  ip4_map_t_classify(p1, d1, ip41, ip4_len1, &dst_port1, &error1, &next1);
897 
898  //Add MAP-T pseudo header in front of the packet
899  vlib_buffer_advance(p0, - sizeof(*pheader0));
900  vlib_buffer_advance(p1, - sizeof(*pheader1));
901  pheader0 = vlib_buffer_get_current(p0);
902  pheader1 = vlib_buffer_get_current(p1);
903 
904  //Save addresses within the packet
905  ip4_map_t_embedded_address(d0, &pheader0->saddr, &ip40->src_address);
906  ip4_map_t_embedded_address(d1, &pheader1->saddr, &ip41->src_address);
907  pheader0->daddr.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
908  pheader0->daddr.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
909  pheader1->daddr.as_u64[0] = map_get_pfx_net(d1, ip41->dst_address.as_u32, (u16)dst_port1);
910  pheader1->daddr.as_u64[1] = map_get_sfx_net(d1, ip41->dst_address.as_u32, (u16)dst_port1);
911 
912  if (PREDICT_FALSE(ip4_is_first_fragment(ip40) && (dst_port0 != -1) &&
913  (d0->ea_bits_len != 0 || !d0->rules) &&
914  ip4_map_fragment_cache(ip40, dst_port0))) {
915  error0 = MAP_ERROR_FRAGMENT_MEMORY;
916  }
917 
918  if (PREDICT_FALSE(ip4_is_first_fragment(ip41) && (dst_port1 != -1) &&
919  (d1->ea_bits_len != 0 || !d1->rules) &&
920  ip4_map_fragment_cache(ip41, dst_port1))) {
921  error1 = MAP_ERROR_FRAGMENT_MEMORY;
922  }
923 
924  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) {
926  vnet_buffer(p0)->map_t.map_domain_index, 1,
927  clib_net_to_host_u16(ip40->length));
928  }
929 
930  if (PREDICT_TRUE(error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP)) {
932  vnet_buffer(p1)->map_t.map_domain_index, 1,
933  clib_net_to_host_u16(ip41->length));
934  }
935 
936  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
937  next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
938  p0->error = error_node->errors[error0];
939  p1->error = error_node->errors[error1];
940  vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next,
941  n_left_to_next, pi0, pi1, next0, next1);
942  }
943 #endif
944 
945  while (n_left_from > 0 && n_left_to_next > 0) {
946  u32 pi0;
947  vlib_buffer_t *p0;
948  ip4_header_t *ip40;
949  map_domain_t *d0;
950  ip4_mapt_next_t next0;
951  u16 ip4_len0;
952  u8 error0;
953  i32 dst_port0;
954  ip4_mapt_pseudo_header_t *pheader0;
955 
956  pi0 = to_next[0] = from[0];
957  from += 1;
958  n_left_from -= 1;
959  to_next +=1;
960  n_left_to_next -= 1;
961  error0 = MAP_ERROR_NONE;
962 
963  p0 = vlib_get_buffer(vm, pi0);
964  ip40 = vlib_buffer_get_current(p0);
965  ip4_len0 = clib_host_to_net_u16(ip40->length);
966  if (PREDICT_FALSE(p0->current_length < ip4_len0 ||
967  ip40->ip_version_and_header_length != 0x45)) {
968  error0 = MAP_ERROR_UNKNOWN;
969  next0 = IP4_MAPT_NEXT_DROP;
970  }
971 
972  d0 = ip4_map_get_domain(vnet_buffer(p0)->ip.adj_index[VLIB_TX],
973  &vnet_buffer(p0)->map_t.map_domain_index);
974 
975  vnet_buffer(p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
976 
977  dst_port0 = -1;
978  ip4_map_t_classify(p0, d0, ip40, ip4_len0, &dst_port0, &error0, &next0);
979 
980  //Add MAP-T pseudo header in front of the packet
981  vlib_buffer_advance(p0, - sizeof(*pheader0));
982  pheader0 = vlib_buffer_get_current(p0);
983 
984  //Save addresses within the packet
985  ip4_map_t_embedded_address(d0, &pheader0->saddr, &ip40->src_address);
986  pheader0->daddr.as_u64[0] = map_get_pfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
987  pheader0->daddr.as_u64[1] = map_get_sfx_net(d0, ip40->dst_address.as_u32, (u16)dst_port0);
988 
989  //It is important to cache at this stage because the result might be necessary
990  //for packets within the same vector.
991  //Actually, this approach even provides some limited out-of-order fragments support
992  if (PREDICT_FALSE(ip4_is_first_fragment(ip40) && (dst_port0 != -1) &&
993  (d0->ea_bits_len != 0 || !d0->rules) &&
994  ip4_map_fragment_cache(ip40, dst_port0))) {
995  error0 = MAP_ERROR_UNKNOWN;
996  }
997 
998  if (PREDICT_TRUE(error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP)) {
1000  vnet_buffer(p0)->map_t.map_domain_index, 1,
1001  clib_net_to_host_u16(ip40->length));
1002  }
1003 
1004  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
1005  p0->error = error_node->errors[error0];
1006  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1007  to_next, n_left_to_next, pi0,
1008  next0);
1009  }
1010  vlib_put_next_frame(vm, node, next_index, n_left_to_next);
1011  }
1012  return frame->n_vectors;
1013 }
1014 
1015 static char *map_t_error_strings[] = {
1016 #define _(sym,string) string,
1018 #undef _
1019 };
1020 
1021 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
1022  .function = ip4_map_t_fragmented,
1023  .name = "ip4-map-t-fragmented",
1024  .vector_size = sizeof(u32),
1025  .format_trace = format_map_trace,
1027 
1028  .n_errors = MAP_N_ERROR,
1029  .error_strings = map_t_error_strings,
1030 
1031  .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
1032  .next_nodes = {
1033  [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
1035  [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
1036  },
1037 };
1038 
1039 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
1040  .function = ip4_map_t_icmp,
1041  .name = "ip4-map-t-icmp",
1042  .vector_size = sizeof(u32),
1043  .format_trace = format_map_trace,
1045 
1046  .n_errors = MAP_N_ERROR,
1047  .error_strings = map_t_error_strings,
1048 
1049  .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
1050  .next_nodes = {
1051  [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
1053  [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
1054  },
1055 };
1056 
1057 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
1058  .function = ip4_map_t_tcp_udp,
1059  .name = "ip4-map-t-tcp-udp",
1060  .vector_size = sizeof(u32),
1061  .format_trace = format_map_trace,
1063 
1064  .n_errors = MAP_N_ERROR,
1065  .error_strings = map_t_error_strings,
1066 
1067  .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
1068  .next_nodes = {
1069  [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
1071  [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
1072  },
1073 };
1074 
1075 VLIB_REGISTER_NODE(ip4_map_t_node) = {
1076  .function = ip4_map_t,
1077  .name = "ip4-map-t",
1078  .vector_size = sizeof(u32),
1079  .format_trace = format_map_trace,
1081 
1082  .n_errors = MAP_N_ERROR,
1083  .error_strings = map_t_error_strings,
1084 
1085  .n_next_nodes = IP4_MAPT_N_NEXT,
1086  .next_nodes = {
1087  [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
1088  [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
1089  [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
1090  [IP4_MAPT_NEXT_DROP] = "error-drop",
1091  },
1092 };
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Definition: main.c:459
ip4_mapt_tcp_udp_next_t
Definition: ip4_map_t.c:36
#define map_ip4_reass_lock()
Definition: map.h:433
ip4_address_t src_address
Definition: ip4_packet.h:138
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define PREDICT_TRUE(x)
Definition: clib.h:98
always_inline int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:162
u64 as_u64[2]
Definition: ip6_packet.h:50
#define NULL
Definition: clib.h:55
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
Definition: ip4_map_t.c:795
ip4_mapt_next_t
Definition: ip4_map_t.c:21
uword ip_csum_t
Definition: ip_packet.h:86
always_inline int ip4_is_fragment(ip4_header_t *i)
Definition: ip4_packet.h:170
u16 flags_and_fragment_offset
Definition: ip4_packet.h:121
static_always_inline i32 ip4_map_fragment_get_port(ip4_header_t *ip4)
Definition: ip4_map_t.c:87
vlib_error_t * errors
Definition: node.h:378
#define u16_net_add(u, val)
Definition: map.h:507
ip6_address_t src_address
Definition: ip6_packet.h:293
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:184
#define frag_id_4to6(id)
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:386
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:829
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
Definition: map.h:512
#define static_always_inline
Definition: clib.h:85
ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_checksum.c:43
ip4_address_t dst_address
Definition: ip4_packet.h:138
vlib_combined_counter_main_t * domain_counters
Definition: map.h:190
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
Definition: counter.h:210
ip4_mapt_fragmented_next_t
Definition: ip4_map_t.c:43
int i32
Definition: types.h:81
ip6_address_t * rules
Definition: map.h:84
unsigned long u64
Definition: types.h:89
u8 ea_bits_len
Definition: map.h:92
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
Definition: node_funcs.h:202
static_always_inline int ip4_icmp_to_icmp6_in_place(icmp46_header_t *icmp, u32 icmp_len, i32 *receiver_port, ip4_header_t **inner_ip4)
Definition: ip4_map_t.c:107
always_inline int ip4_is_first_fragment(ip4_header_t *i)
Definition: ip4_packet.h:175
always_inline u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:138
#define pool_elt_at_index(p, i)
Definition: pool.h:346
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:81
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:330
map_domain_t * domains
Definition: map.h:186
uword os_get_cpu_number(void)
Definition: unix-misc.c:206
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:359
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1168
#define PREDICT_FALSE(x)
Definition: clib.h:97
map_main_t map_main
Definition: map.h:297
i32 ip4_get_port(ip4_header_t *ip, map_dir_e dir, u16 buffer_len)
Definition: map.c:66
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Definition: buffer_node.h:43
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Definition: buffer_node.h:83
always_inline ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:113
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:265
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:129
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
Definition: ip4_packet.h:122
u16 n_vectors
Definition: node.h:307
ip4_mapt_icmp_next_t
Definition: ip4_map_t.c:29
#define clib_memcpy(a, b, c)
Definition: string.h:63
#define foreach_map_error
Definition: map.h:264
always_inline ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:97
u16 cached_next_index
Definition: node.h:422
#define ASSERT(truth)
#define u8_ptr_add(ptr, index)
Definition: map.h:506
unsigned int u32
Definition: types.h:88
#define vnet_buffer(b)
Definition: buffer.h:300
#define map_ip4_reass_unlock()
Definition: map.h:434
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1101
always_inline ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:89
u64 uword
Definition: types.h:112
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:280
Definition: defs.h:46
unsigned short u16
Definition: types.h:57
u16 mtu
Definition: map.h:88
u16 payload_length
Definition: ip6_packet.h:284
static_always_inline map_domain_t * ip4_map_get_domain(u32 adj_index, u32 *map_domain_index)
Definition: map.h:375
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:197
unsigned char u8
Definition: types.h:56
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:522
i32 port
Definition: map.h:128
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:447
static_always_inline int ip4_map_fragment_cache(ip4_header_t *ip4, u16 port)
Definition: ip4_map_t.c:71
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:140
always_inline vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Definition: node_funcs.h:61
typedef CLIB_PACKED(struct{ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
Definition: ip4_map_t.c:52
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:391
u8 ip_version_and_header_length
Definition: ip4_packet.h:108
Definition: map.h:24
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:69
ip6_address_t dst_address
Definition: ip6_packet.h:293