FD.io VPP  v17.07.01-10-g3be13f0
Vector Packet Processing
ip4_map_t.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2015 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #include "map.h"
16 
17 #include "../ip/ip_frag.h"
18 #include <vnet/ip/ip4_to_ip6.h>
19 
20 #define IP4_MAP_T_DUAL_LOOP 1
21 
22 typedef enum
23 {
30 
31 typedef enum
32 {
38 
39 typedef enum
40 {
46 
47 typedef enum
48 {
54 
55 //This is used to pass information within the buffer data.
56 //Buffer structure being too small to contain big structures like this.
57 /* *INDENT-OFF* */
58 typedef CLIB_PACKED (struct {
59  ip6_address_t daddr;
60  ip6_address_t saddr;
61  //IPv6 header + Fragmentation header will be here
62  //sizeof(ip6) + sizeof(ip_frag) - sizeof(ip4)
63  u8 unused[28];
64 }) ip4_mapt_pseudo_header_t;
65 /* *INDENT-ON* */
66 
67 
69 ip4_map_fragment_cache (ip4_header_t * ip4, u16 port)
70 {
71  u32 *ignore = NULL;
73  map_ip4_reass_t *r =
75  ip4->fragment_id,
76  (ip4->protocol ==
77  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
78  &ignore);
79  if (r)
80  r->port = port;
81 
83  return !r;
84 }
85 
88 {
89  u32 *ignore = NULL;
91  map_ip4_reass_t *r =
93  ip4->fragment_id,
94  (ip4->protocol ==
95  IP_PROTOCOL_ICMP) ? IP_PROTOCOL_ICMP6 : ip4->protocol,
96  &ignore);
97  i32 ret = r ? r->port : -1;
99  return ret;
100 }
101 
102 typedef struct
103 {
107 
108 static int
110 {
111  icmp_to_icmp6_ctx_t *ctx = arg;
112 
114  ip6->dst_address.as_u64[0] =
115  map_get_pfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
116  ip6->dst_address.as_u64[1] =
117  map_get_sfx_net (ctx->d, ip4->dst_address.as_u32, ctx->recv_port);
118 
119  return 0;
120 }
121 
122 static int
124  void *arg)
125 {
126  icmp_to_icmp6_ctx_t *ctx = arg;
127 
128  //Note that the source address is within the domain
129  //while the destination address is the one outside the domain
131  ip6->src_address.as_u64[0] =
132  map_get_pfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
133  ip6->src_address.as_u64[1] =
134  map_get_sfx_net (ctx->d, ip4->src_address.as_u32, ctx->recv_port);
135 
136  return 0;
137 }
138 
139 static uword
141  vlib_node_runtime_t * node, vlib_frame_t * frame)
142 {
143  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
144  vlib_node_runtime_t *error_node =
145  vlib_node_get_runtime (vm, ip4_map_t_icmp_node.index);
146  from = vlib_frame_vector_args (frame);
147  n_left_from = frame->n_vectors;
148  next_index = node->cached_next_index;
150  u32 thread_index = vlib_get_thread_index ();
151 
152  while (n_left_from > 0)
153  {
154  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
155 
156  while (n_left_from > 0 && n_left_to_next > 0)
157  {
158  u32 pi0;
159  vlib_buffer_t *p0;
160  ip4_mapt_icmp_next_t next0;
161  u8 error0;
162  map_domain_t *d0;
163  u16 len0;
164  icmp_to_icmp6_ctx_t ctx0;
165  ip4_header_t *ip40;
166 
168  pi0 = to_next[0] = from[0];
169  from += 1;
170  n_left_from -= 1;
171  to_next += 1;
172  n_left_to_next -= 1;
173  error0 = MAP_ERROR_NONE;
174 
175  p0 = vlib_get_buffer (vm, pi0);
176  vlib_buffer_advance (p0, sizeof (ip4_mapt_pseudo_header_t)); //The pseudo-header is not used
177  len0 =
178  clib_net_to_host_u16 (((ip4_header_t *)
179  vlib_buffer_get_current (p0))->length);
180  d0 =
182  vnet_buffer (p0)->map_t.map_domain_index);
183 
184  ip40 = vlib_buffer_get_current (p0);
185  ctx0.recv_port = ip4_get_port (ip40, 1);
186  ctx0.d = d0;
187  if (ctx0.recv_port == 0)
188  {
189  // In case of 1:1 mapping, we don't care about the port
190  if (!(d0->ea_bits_len == 0 && d0->rules))
191  {
192  error0 = MAP_ERROR_ICMP;
193  goto err0;
194  }
195  }
196 
197  if (icmp_to_icmp6
198  (p0, ip4_to_ip6_set_icmp_cb, &ctx0,
200  {
201  error0 = MAP_ERROR_ICMP;
202  goto err0;
203  }
204 
205  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
206  {
207  vnet_buffer (p0)->ip_frag.header_offset = 0;
208  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
209  vnet_buffer (p0)->ip_frag.next_index = IP6_FRAG_NEXT_IP6_LOOKUP;
211  }
212  err0:
213  if (PREDICT_TRUE (error0 == MAP_ERROR_NONE))
214  {
216  thread_index,
217  vnet_buffer (p0)->
218  map_t.map_domain_index, 1,
219  len0);
220  }
221  else
222  {
223  next0 = IP4_MAPT_ICMP_NEXT_DROP;
224  }
225  p0->error = error_node->errors[error0];
226  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
227  to_next, n_left_to_next, pi0,
228  next0);
229  }
230  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
231  }
232  return frame->n_vectors;
233 }
234 
235 static int
236 ip4_to_ip6_set_cb (ip4_header_t * ip4, ip6_header_t * ip6, void *ctx)
237 {
238  ip4_mapt_pseudo_header_t *pheader = ctx;
239 
240  ip6->dst_address.as_u64[0] = pheader->daddr.as_u64[0];
241  ip6->dst_address.as_u64[1] = pheader->daddr.as_u64[1];
242  ip6->src_address.as_u64[0] = pheader->saddr.as_u64[0];
243  ip6->src_address.as_u64[1] = pheader->saddr.as_u64[1];
244 
245  return 0;
246 }
247 
248 static uword
250  vlib_node_runtime_t * node, vlib_frame_t * frame)
251 {
252  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
253  from = vlib_frame_vector_args (frame);
254  n_left_from = frame->n_vectors;
255  next_index = node->cached_next_index;
256  vlib_node_runtime_t *error_node =
257  vlib_node_get_runtime (vm, ip4_map_t_fragmented_node.index);
258 
259  while (n_left_from > 0)
260  {
261  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
262 
263  while (n_left_from > 0 && n_left_to_next > 0)
264  {
265  u32 pi0;
266  vlib_buffer_t *p0;
267  ip4_mapt_pseudo_header_t *pheader0;
268  ip4_mapt_fragmented_next_t next0;
269 
271  pi0 = to_next[0] = from[0];
272  from += 1;
273  n_left_from -= 1;
274  to_next += 1;
275  n_left_to_next -= 1;
276 
277  p0 = vlib_get_buffer (vm, pi0);
278 
279  //Accessing pseudo header
280  pheader0 = vlib_buffer_get_current (p0);
281  vlib_buffer_advance (p0, sizeof (*pheader0));
282 
283  if (ip4_to_ip6_fragmented (p0, ip4_to_ip6_set_cb, pheader0))
284  {
285  p0->error = error_node->errors[MAP_ERROR_FRAGMENT_DROPPED];
287  }
288  else
289  {
290  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
291  {
292  vnet_buffer (p0)->ip_frag.header_offset = 0;
293  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
294  vnet_buffer (p0)->ip_frag.next_index =
297  }
298  }
299 
300  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
301  to_next, n_left_to_next, pi0,
302  next0);
303  }
304  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
305  }
306  return frame->n_vectors;
307 }
308 
309 static uword
311  vlib_node_runtime_t * node, vlib_frame_t * frame)
312 {
313  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
314  from = vlib_frame_vector_args (frame);
315  n_left_from = frame->n_vectors;
316  next_index = node->cached_next_index;
317  vlib_node_runtime_t *error_node =
318  vlib_node_get_runtime (vm, ip4_map_t_tcp_udp_node.index);
319 
320 
321  while (n_left_from > 0)
322  {
323  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
324 
325 #ifdef IP4_MAP_T_DUAL_LOOP
326  while (n_left_from >= 4 && n_left_to_next >= 2)
327  {
328  u32 pi0, pi1;
329  vlib_buffer_t *p0, *p1;
330  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
331  ip4_mapt_tcp_udp_next_t next0, next1;
332 
333  pi0 = to_next[0] = from[0];
334  pi1 = to_next[1] = from[1];
335  from += 2;
336  n_left_from -= 2;
337  to_next += 2;
338  n_left_to_next -= 2;
339 
342  p0 = vlib_get_buffer (vm, pi0);
343  p1 = vlib_get_buffer (vm, pi1);
344 
345  //Accessing pseudo header
346  pheader0 = vlib_buffer_get_current (p0);
347  pheader1 = vlib_buffer_get_current (p1);
348  vlib_buffer_advance (p0, sizeof (*pheader0));
349  vlib_buffer_advance (p1, sizeof (*pheader1));
350 
351  if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
352  {
353  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
355  }
356  else
357  {
358  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
359  {
360  //Send to fragmentation node if necessary
361  vnet_buffer (p0)->ip_frag.header_offset = 0;
362  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
363  vnet_buffer (p0)->ip_frag.next_index =
366  }
367  }
368 
369  if (ip4_to_ip6_tcp_udp (p1, ip4_to_ip6_set_cb, pheader1))
370  {
371  p1->error = error_node->errors[MAP_ERROR_UNKNOWN];
373  }
374  else
375  {
376  if (vnet_buffer (p1)->map_t.mtu < p1->current_length)
377  {
378  //Send to fragmentation node if necessary
379  vnet_buffer (p1)->ip_frag.header_offset = 0;
380  vnet_buffer (p1)->ip_frag.mtu = vnet_buffer (p1)->map_t.mtu;
381  vnet_buffer (p1)->ip_frag.next_index =
384  }
385  }
386 
387  vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
388  to_next, n_left_to_next, pi0, pi1,
389  next0, next1);
390  }
391 #endif
392 
393  while (n_left_from > 0 && n_left_to_next > 0)
394  {
395  u32 pi0;
396  vlib_buffer_t *p0;
397  ip4_mapt_pseudo_header_t *pheader0;
398  ip4_mapt_tcp_udp_next_t next0;
399 
400  pi0 = to_next[0] = from[0];
401  from += 1;
402  n_left_from -= 1;
403  to_next += 1;
404  n_left_to_next -= 1;
405 
407  p0 = vlib_get_buffer (vm, pi0);
408 
409  //Accessing pseudo header
410  pheader0 = vlib_buffer_get_current (p0);
411  vlib_buffer_advance (p0, sizeof (*pheader0));
412 
413  if (ip4_to_ip6_tcp_udp (p0, ip4_to_ip6_set_cb, pheader0))
414  {
415  p0->error = error_node->errors[MAP_ERROR_UNKNOWN];
417  }
418  else
419  {
420  if (vnet_buffer (p0)->map_t.mtu < p0->current_length)
421  {
422  //Send to fragmentation node if necessary
423  vnet_buffer (p0)->ip_frag.header_offset = 0;
424  vnet_buffer (p0)->ip_frag.mtu = vnet_buffer (p0)->map_t.mtu;
425  vnet_buffer (p0)->ip_frag.next_index =
428  }
429  }
430  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
431  to_next, n_left_to_next, pi0,
432  next0);
433  }
434  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
435  }
436 
437  return frame->n_vectors;
438 }
439 
442  ip4_header_t * ip40, u16 ip4_len0, i32 * dst_port0,
443  u8 * error0, ip4_mapt_next_t * next0)
444 {
446  {
448  if (d0->ea_bits_len == 0 && d0->rules)
449  {
450  *dst_port0 = 0;
451  }
452  else
453  {
454  *dst_port0 = ip4_map_fragment_get_port (ip40);
455  *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
456  }
457  }
458  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_TCP))
459  {
460  vnet_buffer (p0)->map_t.checksum_offset = 36;
462  *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
463  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
464  }
465  else if (PREDICT_TRUE (ip40->protocol == IP_PROTOCOL_UDP))
466  {
467  vnet_buffer (p0)->map_t.checksum_offset = 26;
469  *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
470  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 2));
471  }
472  else if (ip40->protocol == IP_PROTOCOL_ICMP)
473  {
474  *next0 = IP4_MAPT_NEXT_MAPT_ICMP;
475  if (d0->ea_bits_len == 0 && d0->rules)
476  *dst_port0 = 0;
477  else if (((icmp46_header_t *) u8_ptr_add (ip40, sizeof (*ip40)))->code
478  == ICMP4_echo_reply
479  || ((icmp46_header_t *)
480  u8_ptr_add (ip40,
481  sizeof (*ip40)))->code == ICMP4_echo_request)
482  *dst_port0 = (i32) * ((u16 *) u8_ptr_add (ip40, sizeof (*ip40) + 6));
483  }
484  else
485  {
486  *error0 = MAP_ERROR_BAD_PROTOCOL;
487  }
488 }
489 
490 static uword
492 {
493  u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
494  vlib_node_runtime_t *error_node =
495  vlib_node_get_runtime (vm, ip4_map_t_node.index);
496  from = vlib_frame_vector_args (frame);
497  n_left_from = frame->n_vectors;
498  next_index = node->cached_next_index;
500  u32 thread_index = vlib_get_thread_index ();
501 
502  while (n_left_from > 0)
503  {
504  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
505 
506 #ifdef IP4_MAP_T_DUAL_LOOP
507  while (n_left_from >= 4 && n_left_to_next >= 2)
508  {
509  u32 pi0, pi1;
510  vlib_buffer_t *p0, *p1;
511  ip4_header_t *ip40, *ip41;
512  map_domain_t *d0, *d1;
513  ip4_mapt_next_t next0 = 0, next1 = 0;
514  u16 ip4_len0, ip4_len1;
515  u8 error0, error1;
516  i32 dst_port0, dst_port1;
517  ip4_mapt_pseudo_header_t *pheader0, *pheader1;
518 
519  pi0 = to_next[0] = from[0];
520  pi1 = to_next[1] = from[1];
521  from += 2;
522  n_left_from -= 2;
523  to_next += 2;
524  n_left_to_next -= 2;
525  error0 = MAP_ERROR_NONE;
526  error1 = MAP_ERROR_NONE;
527 
528  p0 = vlib_get_buffer (vm, pi0);
529  p1 = vlib_get_buffer (vm, pi1);
530  ip40 = vlib_buffer_get_current (p0);
531  ip41 = vlib_buffer_get_current (p1);
532  ip4_len0 = clib_host_to_net_u16 (ip40->length);
533  ip4_len1 = clib_host_to_net_u16 (ip41->length);
534 
535  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
536  ip40->ip_version_and_header_length != 0x45))
537  {
538  error0 = MAP_ERROR_UNKNOWN;
539  next0 = IP4_MAPT_NEXT_DROP;
540  }
541 
542  if (PREDICT_FALSE (p1->current_length < ip4_len1 ||
543  ip41->ip_version_and_header_length != 0x45))
544  {
545  error1 = MAP_ERROR_UNKNOWN;
546  next1 = IP4_MAPT_NEXT_DROP;
547  }
548 
549  vnet_buffer (p0)->map_t.map_domain_index =
550  vnet_buffer (p0)->ip.adj_index[VLIB_TX];
551  d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
552  vnet_buffer (p1)->map_t.map_domain_index =
553  vnet_buffer (p1)->ip.adj_index[VLIB_TX];
554  d1 = ip4_map_get_domain (vnet_buffer (p1)->map_t.map_domain_index);
555 
556  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
557  vnet_buffer (p1)->map_t.mtu = d1->mtu ? d1->mtu : ~0;
558 
559  dst_port0 = -1;
560  dst_port1 = -1;
561 
562  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
563  &next0);
564  ip4_map_t_classify (p1, d1, ip41, ip4_len1, &dst_port1, &error1,
565  &next1);
566 
567  //Add MAP-T pseudo header in front of the packet
568  vlib_buffer_advance (p0, -sizeof (*pheader0));
569  vlib_buffer_advance (p1, -sizeof (*pheader1));
570  pheader0 = vlib_buffer_get_current (p0);
571  pheader1 = vlib_buffer_get_current (p1);
572 
573  //Save addresses within the packet
574  ip4_map_t_embedded_address (d0, &pheader0->saddr,
575  &ip40->src_address);
576  ip4_map_t_embedded_address (d1, &pheader1->saddr,
577  &ip41->src_address);
578  pheader0->daddr.as_u64[0] =
579  map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
580  pheader0->daddr.as_u64[1] =
581  map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
582  pheader1->daddr.as_u64[0] =
583  map_get_pfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
584  pheader1->daddr.as_u64[1] =
585  map_get_sfx_net (d1, ip41->dst_address.as_u32, (u16) dst_port1);
586 
587  if (PREDICT_FALSE
588  (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
589  && (d0->ea_bits_len != 0 || !d0->rules)
590  && ip4_map_fragment_cache (ip40, dst_port0)))
591  {
592  error0 = MAP_ERROR_FRAGMENT_MEMORY;
593  }
594 
595  if (PREDICT_FALSE
596  (ip4_is_first_fragment (ip41) && (dst_port1 != -1)
597  && (d1->ea_bits_len != 0 || !d1->rules)
598  && ip4_map_fragment_cache (ip41, dst_port1)))
599  {
600  error1 = MAP_ERROR_FRAGMENT_MEMORY;
601  }
602 
603  if (PREDICT_TRUE
604  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
605  {
607  thread_index,
608  vnet_buffer (p0)->
609  map_t.map_domain_index, 1,
610  clib_net_to_host_u16
611  (ip40->length));
612  }
613 
614  if (PREDICT_TRUE
615  (error1 == MAP_ERROR_NONE && next1 != IP4_MAPT_NEXT_MAPT_ICMP))
616  {
618  thread_index,
619  vnet_buffer (p1)->
620  map_t.map_domain_index, 1,
621  clib_net_to_host_u16
622  (ip41->length));
623  }
624 
625  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
626  next1 = (error1 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next1;
627  p0->error = error_node->errors[error0];
628  p1->error = error_node->errors[error1];
629  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
630  n_left_to_next, pi0, pi1, next0,
631  next1);
632  }
633 #endif
634 
635  while (n_left_from > 0 && n_left_to_next > 0)
636  {
637  u32 pi0;
638  vlib_buffer_t *p0;
639  ip4_header_t *ip40;
640  map_domain_t *d0;
641  ip4_mapt_next_t next0;
642  u16 ip4_len0;
643  u8 error0;
644  i32 dst_port0;
645  ip4_mapt_pseudo_header_t *pheader0;
646 
647  pi0 = to_next[0] = from[0];
648  from += 1;
649  n_left_from -= 1;
650  to_next += 1;
651  n_left_to_next -= 1;
652  error0 = MAP_ERROR_NONE;
653 
654  p0 = vlib_get_buffer (vm, pi0);
655  ip40 = vlib_buffer_get_current (p0);
656  ip4_len0 = clib_host_to_net_u16 (ip40->length);
657  if (PREDICT_FALSE (p0->current_length < ip4_len0 ||
658  ip40->ip_version_and_header_length != 0x45))
659  {
660  error0 = MAP_ERROR_UNKNOWN;
661  next0 = IP4_MAPT_NEXT_DROP;
662  }
663 
664  vnet_buffer (p0)->map_t.map_domain_index =
665  vnet_buffer (p0)->ip.adj_index[VLIB_TX];
666  d0 = ip4_map_get_domain (vnet_buffer (p0)->map_t.map_domain_index);
667 
668  vnet_buffer (p0)->map_t.mtu = d0->mtu ? d0->mtu : ~0;
669 
670  dst_port0 = -1;
671  ip4_map_t_classify (p0, d0, ip40, ip4_len0, &dst_port0, &error0,
672  &next0);
673 
674  //Add MAP-T pseudo header in front of the packet
675  vlib_buffer_advance (p0, -sizeof (*pheader0));
676  pheader0 = vlib_buffer_get_current (p0);
677 
678  //Save addresses within the packet
679  ip4_map_t_embedded_address (d0, &pheader0->saddr,
680  &ip40->src_address);
681  pheader0->daddr.as_u64[0] =
682  map_get_pfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
683  pheader0->daddr.as_u64[1] =
684  map_get_sfx_net (d0, ip40->dst_address.as_u32, (u16) dst_port0);
685 
686  //It is important to cache at this stage because the result might be necessary
687  //for packets within the same vector.
688  //Actually, this approach even provides some limited out-of-order fragments support
689  if (PREDICT_FALSE
690  (ip4_is_first_fragment (ip40) && (dst_port0 != -1)
691  && (d0->ea_bits_len != 0 || !d0->rules)
692  && ip4_map_fragment_cache (ip40, dst_port0)))
693  {
694  error0 = MAP_ERROR_UNKNOWN;
695  }
696 
697  if (PREDICT_TRUE
698  (error0 == MAP_ERROR_NONE && next0 != IP4_MAPT_NEXT_MAPT_ICMP))
699  {
701  thread_index,
702  vnet_buffer (p0)->
703  map_t.map_domain_index, 1,
704  clib_net_to_host_u16
705  (ip40->length));
706  }
707 
708  next0 = (error0 != MAP_ERROR_NONE) ? IP4_MAPT_NEXT_DROP : next0;
709  p0->error = error_node->errors[error0];
710  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
711  to_next, n_left_to_next, pi0,
712  next0);
713  }
714  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
715  }
716  return frame->n_vectors;
717 }
718 
719 static char *map_t_error_strings[] = {
720 #define _(sym,string) string,
722 #undef _
723 };
724 
725 /* *INDENT-OFF* */
726 VLIB_REGISTER_NODE(ip4_map_t_fragmented_node) = {
727  .function = ip4_map_t_fragmented,
728  .name = "ip4-map-t-fragmented",
729  .vector_size = sizeof(u32),
730  .format_trace = format_map_trace,
731  .type = VLIB_NODE_TYPE_INTERNAL,
732 
733  .n_errors = MAP_N_ERROR,
734  .error_strings = map_t_error_strings,
735 
736  .n_next_nodes = IP4_MAPT_FRAGMENTED_N_NEXT,
737  .next_nodes = {
738  [IP4_MAPT_FRAGMENTED_NEXT_IP6_LOOKUP] = "ip6-lookup",
740  [IP4_MAPT_FRAGMENTED_NEXT_DROP] = "error-drop",
741  },
742 };
743 /* *INDENT-ON* */
744 
745 /* *INDENT-OFF* */
746 VLIB_REGISTER_NODE(ip4_map_t_icmp_node) = {
747  .function = ip4_map_t_icmp,
748  .name = "ip4-map-t-icmp",
749  .vector_size = sizeof(u32),
750  .format_trace = format_map_trace,
751  .type = VLIB_NODE_TYPE_INTERNAL,
752 
753  .n_errors = MAP_N_ERROR,
754  .error_strings = map_t_error_strings,
755 
756  .n_next_nodes = IP4_MAPT_ICMP_N_NEXT,
757  .next_nodes = {
758  [IP4_MAPT_ICMP_NEXT_IP6_LOOKUP] = "ip6-lookup",
760  [IP4_MAPT_ICMP_NEXT_DROP] = "error-drop",
761  },
762 };
763 /* *INDENT-ON* */
764 
765 /* *INDENT-OFF* */
766 VLIB_REGISTER_NODE(ip4_map_t_tcp_udp_node) = {
767  .function = ip4_map_t_tcp_udp,
768  .name = "ip4-map-t-tcp-udp",
769  .vector_size = sizeof(u32),
770  .format_trace = format_map_trace,
771  .type = VLIB_NODE_TYPE_INTERNAL,
772 
773  .n_errors = MAP_N_ERROR,
774  .error_strings = map_t_error_strings,
775 
776  .n_next_nodes = IP4_MAPT_TCP_UDP_N_NEXT,
777  .next_nodes = {
778  [IP4_MAPT_TCP_UDP_NEXT_IP6_LOOKUP] = "ip6-lookup",
780  [IP4_MAPT_TCP_UDP_NEXT_DROP] = "error-drop",
781  },
782 };
783 /* *INDENT-ON* */
784 
785 /* *INDENT-OFF* */
786 VLIB_REGISTER_NODE(ip4_map_t_node) = {
787  .function = ip4_map_t,
788  .name = "ip4-map-t",
789  .vector_size = sizeof(u32),
790  .format_trace = format_map_trace,
791  .type = VLIB_NODE_TYPE_INTERNAL,
792 
793  .n_errors = MAP_N_ERROR,
794  .error_strings = map_t_error_strings,
795 
796  .n_next_nodes = IP4_MAPT_N_NEXT,
797  .next_nodes = {
798  [IP4_MAPT_NEXT_MAPT_TCP_UDP] = "ip4-map-t-tcp-udp",
799  [IP4_MAPT_NEXT_MAPT_ICMP] = "ip4-map-t-icmp",
800  [IP4_MAPT_NEXT_MAPT_FRAGMENTED] = "ip4-map-t-fragmented",
801  [IP4_MAPT_NEXT_DROP] = "error-drop",
802  },
803 };
804 /* *INDENT-ON* */
805 
806 /*
807  * fd.io coding-style-patch-verification: ON
808  *
809  * Local Variables:
810  * eval: (c-set-style "gnu")
811  * End:
812  */
ip4_mapt_tcp_udp_next_t
Definition: ip4_map_t.c:39
#define map_ip4_reass_lock()
Definition: map.h:458
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
ip4_address_t src_address
Definition: ip4_packet.h:164
#define IP6_FRAG_NODE_NAME
Definition: ip_frag.h:44
#define PREDICT_TRUE(x)
Definition: clib.h:98
u64 as_u64[2]
Definition: ip6_packet.h:51
#define NULL
Definition: clib.h:55
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:459
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0)
Definition: ip4_map_t.c:441
ip4_mapt_next_t
Definition: ip4_map_t.c:22
static_always_inline i32 ip4_map_fragment_get_port(ip4_header_t *ip4)
Definition: ip4_map_t.c:87
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:419
ip6_address_t src_address
Definition: ip6_packet.h:341
IPv4 to IPv6 translation.
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:140
static int ip4_to_ip6_set_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *ctx)
Definition: ip4_map_t.c:236
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:491
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
Definition: map.h:517
#define static_always_inline
Definition: clib.h:85
ip4_address_t dst_address
Definition: ip4_packet.h:164
vlib_combined_counter_main_t * domain_counters
Definition: map.h:232
ip4_mapt_fragmented_next_t
Definition: ip4_map_t.c:47
static int ip4_get_fragment_offset(ip4_header_t *i)
Definition: ip4_packet.h:192
int i32
Definition: types.h:81
ip6_address_t * rules
Definition: map.h:83
u8 ea_bits_len
Definition: map.h:91
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
Definition: ip4_to_ip6.h:51
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:397
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:71
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:365
map_domain_t * domains
Definition: map.h:228
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
Definition: map.h:394
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:188
map_ip4_reass_t * map_ip4_reass_get(u32 src, u32 dst, u16 fragment_id, u8 protocol, u32 **pi_to_drop)
Definition: map.c:1463
#define PREDICT_FALSE(x)
Definition: clib.h:97
map_main_t map_main
Definition: map.h:332
static_always_inline map_domain_t * ip4_map_get_domain(u32 mdi)
Definition: map.h:410
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:366
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
map_domain_t * d
Definition: ip4_map_t.c:104
u16 n_vectors
Definition: node.h:345
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:185
ip4_mapt_icmp_next_t
Definition: ip4_map_t.c:31
static int ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 UDP/TCP packet to IPv6.
Definition: ip4_to_ip6.h:501
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
Definition: node_funcs.h:88
static int ip4_is_first_fragment(ip4_header_t *i)
Definition: ip4_packet.h:212
#define foreach_map_error
Definition: map.h:299
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:460
#define u8_ptr_add(ptr, index)
Definition: map.h:513
unsigned int u32
Definition: types.h:88
static int ip4_to_ip6_set_inner_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:123
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:201
#define map_ip4_reass_unlock()
Definition: map.h:459
u8 * format_map_trace(u8 *s, va_list *args)
Definition: map.c:1383
u64 uword
Definition: types.h:112
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
u16 mtu
Definition: map.h:87
unsigned char u8
Definition: types.h:56
static int ip4_to_ip6_set_icmp_cb(ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: ip4_map_t.c:109
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:310
i32 port
Definition: map.h:133
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: ip4_map_t.c:249
static int ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx)
Translate IPv4 fragmented packet to IPv6.
Definition: ip4_to_ip6.h:450
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:269
A collection of combined counters.
Definition: counter.h:180
#define vnet_buffer(b)
Definition: buffer.h:304
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:144
typedef CLIB_PACKED(struct{ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
Definition: ip4_map_t.c:58
u8 ip_version_and_header_length
Definition: ip4_packet.h:132
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
ip6_address_t dst_address
Definition: ip6_packet.h:341