FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
nat64_out2in.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv4 to IPv6 translation (otside to inside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_inlines.h>
22 #include <vnet/ip/ip4_to_ip6.h>
23 #include <vnet/fib/ip4_fib.h>
24 #include <vnet/udp/udp.h>
25 
26 typedef struct
27 {
31 
32 static u8 *
33 format_nat64_out2in_trace (u8 * s, va_list * args)
34 {
35  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
36  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
37  nat64_out2in_trace_t *t = va_arg (*args, nat64_out2in_trace_t *);
38 
39  s =
40  format (s, "NAT64-out2in: sw_if_index %d, next index %d", t->sw_if_index,
41  t->next_index);
42 
43  return s;
44 }
45 
46 #define foreach_nat64_out2in_error \
47 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
48 _(OUT2IN_PACKETS, "good out2in packets processed") \
49 _(NO_TRANSLATION, "no translation") \
50 _(UNKNOWN, "unknown") \
51 _(DROP_FRAGMENT, "drop fragment") \
52 _(TCP_PACKETS, "TCP packets") \
53 _(UDP_PACKETS, "UDP packets") \
54 _(ICMP_PACKETS, "ICMP packets") \
55 _(OTHER_PACKETS, "other protocol packets") \
56 _(FRAGMENTS, "fragments") \
57 _(CACHED_FRAGMENTS, "cached fragments") \
58 _(PROCESSED_FRAGMENTS, "processed fragments")
59 
60 
61 typedef enum
62 {
63 #define _(sym,str) NAT64_OUT2IN_ERROR_##sym,
65 #undef _
68 
69 static char *nat64_out2in_error_strings[] = {
70 #define _(sym,string) string,
72 #undef _
73 };
74 
75 typedef enum
76 {
82 
84 {
89 
90 static int
93 {
96  ip_csum_t csum;
97  u16 *checksum = NULL;
98  ip6_frag_hdr_t *frag;
99  u32 frag_id;
100  ip4_address_t old_src, old_dst;
101 
102  nat64_main_t *nm = &nat64_main;
103  nat64_db_bib_entry_t *bibe;
104  nat64_db_st_entry_t *ste;
105  ip46_address_t saddr;
106  ip46_address_t daddr;
107  ip6_address_t ip6_saddr;
108  u8 proto = vnet_buffer (b)->ip.reass.ip_proto;
109  u16 dport = vnet_buffer (b)->ip.reass.l4_dst_port;
110  u16 sport = vnet_buffer (b)->ip.reass.l4_src_port;
111  u32 sw_if_index, fib_index;
112  nat64_db_t *db = &nm->db[ctx->thread_index];
113 
114  ip4 = vlib_buffer_get_current (b);
115 
116  udp_header_t *udp = ip4_next_header (ip4);
117  tcp_header_t *tcp = ip4_next_header (ip4);
118  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
119  {
120  if (ip4->protocol == IP_PROTOCOL_UDP)
121  {
122  checksum = &udp->checksum;
123  //UDP checksum is optional over IPv4 but mandatory for IPv6
124  //We do not check udp->length sanity but use our safe computed value instead
125  if (PREDICT_FALSE (!*checksum))
126  {
127  u16 udp_len =
128  clib_host_to_net_u16 (ip4->length) - sizeof (*ip4);
129  csum = ip_incremental_checksum (0, udp, udp_len);
130  csum =
131  ip_csum_with_carry (csum, clib_host_to_net_u16 (udp_len));
132  csum =
133  ip_csum_with_carry (csum,
134  clib_host_to_net_u16 (IP_PROTOCOL_UDP));
135  csum =
136  ip_csum_with_carry (csum, *((u64 *) (&ip4->src_address)));
137  *checksum = ~ip_csum_fold (csum);
138  }
139  }
140  else
141  {
142  checksum = &tcp->checksum;
143  }
144  }
145 
146  old_src.as_u32 = ip4->src_address.as_u32;
147  old_dst.as_u32 = ip4->dst_address.as_u32;
148 
149  // Deal with fragmented packets
150  u16 frag_offset = ip4_get_fragment_offset (ip4);
151  if (PREDICT_FALSE (ip4_get_fragment_more (ip4) || frag_offset))
152  {
153  ip6 =
154  (ip6_header_t *) u8_ptr_add (ip4,
155  sizeof (*ip4) - sizeof (*ip6) -
156  sizeof (*frag));
157  frag =
158  (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
159  frag_id = frag_id_4to6 (ip4->fragment_id);
160  vlib_buffer_advance (b, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
161  }
162  else
163  {
164  ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
165  vlib_buffer_advance (b, sizeof (*ip4) - sizeof (*ip6));
166  frag = NULL;
167  }
168 
170  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
171  ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
172  ip6->hop_limit = ip4->ttl;
173  ip6->protocol = ip4->protocol;
174 
175  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
176  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
177 
178  clib_memset (&saddr, 0, sizeof (saddr));
179  saddr.ip4.as_u32 = ip4->src_address.as_u32;
180  clib_memset (&daddr, 0, sizeof (daddr));
181  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
182 
183  ste =
184  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
185  fib_index, 0);
186  if (ste)
187  {
188  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
189  if (!bibe)
190  return -1;
191  }
192  else
193  {
194  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, fib_index, 0);
195 
196  if (!bibe)
197  return -1;
198 
199  nat64_compose_ip6 (&ip6_saddr, &old_src, bibe->fib_index);
200  ste =
201  nat64_db_st_entry_create (ctx->thread_index, db, bibe, &ip6_saddr,
202  &saddr.ip4, sport);
203 
204  if (!ste)
205  return -1;
206 
208  db->st.st_entries_num);
209  }
210 
211  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
212  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
213 
214  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
215  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
216 
217  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
218 
219  nat64_session_reset_timeout (ste, ctx->vm);
220 
221  if (PREDICT_FALSE (frag != NULL))
222  {
223  frag->next_hdr = ip6->protocol;
224  frag->identification = frag_id;
225  frag->rsv = 0;
226  frag->fragment_offset_and_more =
227  ip6_frag_hdr_offset_and_more (frag_offset, 1);
228  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
229  ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
230  }
231 
232  if (!vnet_buffer (b)->ip.reass.is_non_first_fragment)
233  {
234  udp->dst_port = bibe->in_port;
235 
236  if (proto == IP_PROTOCOL_TCP)
237  {
238  nat64_tcp_session_set_state (ste, tcp, 0);
239  }
240 
241  csum = ip_csum_sub_even (*checksum, dport);
242  csum = ip_csum_add_even (csum, udp->dst_port);
243  csum = ip_csum_sub_even (csum, old_src.as_u32);
244  csum = ip_csum_sub_even (csum, old_dst.as_u32);
245  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
246  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
247  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
248  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
249  *checksum = ip_csum_fold (csum);
250  }
251 
252  return 0;
253 }
254 
255 static int
257  ip6_header_t * ip6, void *arg)
258 {
259  nat64_main_t *nm = &nat64_main;
261  nat64_db_bib_entry_t *bibe;
262  nat64_db_st_entry_t *ste;
263  ip46_address_t saddr, daddr;
264  ip6_address_t ip6_saddr;
265  u32 sw_if_index, fib_index;
266  icmp46_header_t *icmp = ip4_next_header (ip4);
267  nat64_db_t *db = &nm->db[ctx->thread_index];
268 
269  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
270  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
271 
272  clib_memset (&saddr, 0, sizeof (saddr));
273  saddr.ip4.as_u32 = ip4->src_address.as_u32;
274  clib_memset (&daddr, 0, sizeof (daddr));
275  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
276 
277  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
278  {
279  u16 out_id = ((u16 *) (icmp))[2];
280  ste =
281  nat64_db_st_entry_find (db, &daddr, &saddr, out_id, 0,
282  IP_PROTOCOL_ICMP, fib_index, 0);
283 
284  if (ste)
285  {
286  bibe =
287  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
288  ste->bibe_index);
289  if (!bibe)
290  return -1;
291  }
292  else
293  {
294  bibe =
295  nat64_db_bib_entry_find (db, &daddr, out_id,
296  IP_PROTOCOL_ICMP, fib_index, 0);
297  if (!bibe)
298  return -1;
299 
300  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
301  ste =
303  bibe, &ip6_saddr, &saddr.ip4, 0);
304 
305  if (!ste)
306  return -1;
307 
309  db->st.st_entries_num);
310  }
311 
312  nat64_session_reset_timeout (ste, ctx->vm);
313 
314  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
315  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
316 
317  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
318  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
319  ((u16 *) (icmp))[2] = bibe->in_port;
320 
321  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
322  }
323  else
324  {
325  ip6_header_t *inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
326 
328  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX]);
329  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
330  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
331  }
332 
333  return 0;
334 }
335 
336 static int
338  ip6_header_t * ip6, void *arg)
339 {
340  nat64_main_t *nm = &nat64_main;
342  nat64_db_bib_entry_t *bibe;
343  nat64_db_st_entry_t *ste;
344  ip46_address_t saddr, daddr;
345  u32 sw_if_index, fib_index;
346  u8 proto = ip4->protocol;
347  nat64_db_t *db = &nm->db[ctx->thread_index];
348 
349  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
350  fib_index =
352 
353  clib_memset (&saddr, 0, sizeof (saddr));
354  saddr.ip4.as_u32 = ip4->src_address.as_u32;
355  clib_memset (&daddr, 0, sizeof (daddr));
356  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
357 
358  if (proto == IP_PROTOCOL_ICMP6)
359  {
360  icmp46_header_t *icmp = ip4_next_header (ip4);
361  u16 out_id = ((u16 *) (icmp))[2];
362  proto = IP_PROTOCOL_ICMP;
363 
364  if (!
365  (icmp->type == ICMP6_echo_request
366  || icmp->type == ICMP6_echo_reply))
367  return -1;
368 
369  ste =
370  nat64_db_st_entry_find (db, &saddr, &daddr, out_id, 0, proto,
371  fib_index, 0);
372  if (!ste)
373  return -1;
374 
375  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
376  if (!bibe)
377  return -1;
378 
379  ip6->dst_address.as_u64[0] = ste->in_r_addr.as_u64[0];
380  ip6->dst_address.as_u64[1] = ste->in_r_addr.as_u64[1];
381  ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
382  ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
383  ((u16 *) (icmp))[2] = bibe->in_port;
384 
385  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
386  }
387  else
388  {
389  udp_header_t *udp = ip4_next_header (ip4);
390  tcp_header_t *tcp = ip4_next_header (ip4);
391  u16 dport = udp->dst_port;
392  u16 sport = udp->src_port;
393  u16 *checksum;
394  ip_csum_t csum;
395 
396  ste =
397  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
398  fib_index, 0);
399  if (!ste)
400  return -1;
401 
402  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
403  if (!bibe)
404  return -1;
405 
406  nat64_compose_ip6 (&ip6->dst_address, &daddr.ip4, bibe->fib_index);
407  ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
408  ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
409  udp->src_port = bibe->in_port;
410 
411  if (proto == IP_PROTOCOL_UDP)
412  checksum = &udp->checksum;
413  else
414  checksum = &tcp->checksum;
415  if (*checksum)
416  {
417  csum = ip_csum_sub_even (*checksum, sport);
418  csum = ip_csum_add_even (csum, udp->src_port);
419  *checksum = ip_csum_fold (csum);
420  }
421 
422  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
423  }
424 
425  return 0;
426 }
427 
428 static int
431 {
433  ip6_header_t *ip6;
434  ip6_frag_hdr_t *frag;
435  u32 frag_id;
436 
437  nat64_main_t *nm = &nat64_main;
438  nat64_db_bib_entry_t *bibe;
439  nat64_db_st_entry_t *ste;
440  ip46_address_t saddr, daddr;
441  ip6_address_t ip6_saddr;
442  u32 sw_if_index, fib_index;
443  u8 proto = ip4->protocol;
444  nat64_db_t *db = &nm->db[ctx->thread_index];
445 
446  // Deal with fragmented packets
447  u16 frag_offset = ip4_get_fragment_offset (ip4);
448  if (PREDICT_FALSE (ip4_get_fragment_more (ip4) || frag_offset))
449  {
450  ip6 =
451  (ip6_header_t *) u8_ptr_add (ip4,
452  sizeof (*ip4) - sizeof (*ip6) -
453  sizeof (*frag));
454  frag =
455  (ip6_frag_hdr_t *) u8_ptr_add (ip4, sizeof (*ip4) - sizeof (*frag));
456  frag_id = frag_id_4to6 (ip4->fragment_id);
457  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6) - sizeof (*frag));
458  }
459  else
460  {
461  ip6 = (ip6_header_t *) (((u8 *) ip4) + sizeof (*ip4) - sizeof (*ip6));
462  vlib_buffer_advance (p, sizeof (*ip4) - sizeof (*ip6));
463  frag = NULL;
464  }
465 
467  clib_host_to_net_u32 ((6 << 28) + (ip4->tos << 20));
468  ip6->payload_length = u16_net_add (ip4->length, -sizeof (*ip4));
469  ip6->hop_limit = ip4->ttl;
470  ip6->protocol = ip4->protocol;
471 
472  if (PREDICT_FALSE (frag != NULL))
473  {
474  frag->next_hdr = ip6->protocol;
475  frag->identification = frag_id;
476  frag->rsv = 0;
477  frag->fragment_offset_and_more =
478  ip6_frag_hdr_offset_and_more (frag_offset, 1);
479  ip6->protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
480  ip6->payload_length = u16_net_add (ip6->payload_length, sizeof (*frag));
481  }
482 
483  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
484  fib_index = ip4_fib_table_get_index_for_sw_if_index (sw_if_index);
485 
486  clib_memset (&saddr, 0, sizeof (saddr));
487  saddr.ip4.as_u32 = ip4->src_address.as_u32;
488  clib_memset (&daddr, 0, sizeof (daddr));
489  daddr.ip4.as_u32 = ip4->dst_address.as_u32;
490 
491  ste =
492  nat64_db_st_entry_find (db, &daddr, &saddr, 0, 0, proto, fib_index, 0);
493  if (ste)
494  {
495  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
496  if (!bibe)
497  return -1;
498  }
499  else
500  {
501  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, fib_index, 0);
502 
503  if (!bibe)
504  return -1;
505 
506  nat64_compose_ip6 (&ip6_saddr, &ip4->src_address, bibe->fib_index);
507  ste = nat64_db_st_entry_create (ctx->thread_index, db,
508  bibe, &ip6_saddr, &saddr.ip4, 0);
509 
510  if (!ste)
511  return -1;
512 
514  db->st.st_entries_num);
515  }
516 
517  nat64_session_reset_timeout (ste, ctx->vm);
518 
519  ip6->src_address.as_u64[0] = ste->in_r_addr.as_u64[0];
520  ip6->src_address.as_u64[1] = ste->in_r_addr.as_u64[1];
521 
522  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
523  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
524 
525  vnet_buffer (ctx->b)->sw_if_index[VLIB_TX] = bibe->fib_index;
526 
527  return 0;
528 }
529 
533 {
534  u32 n_left_from, *from, *to_next;
535  nat64_out2in_next_t next_index;
536  nat64_main_t *nm = &nat64_main;
537  u32 pkts_processed = 0;
539  u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
540  0, fragments = 0;
541 
542  from = vlib_frame_vector_args (frame);
543  n_left_from = frame->n_vectors;
544  next_index = node->cached_next_index;
545  while (n_left_from > 0)
546  {
547  u32 n_left_to_next;
548 
549  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
550 
551  while (n_left_from > 0 && n_left_to_next > 0)
552  {
553  u32 bi0;
554  vlib_buffer_t *b0;
555  u32 next0;
556  ip4_header_t *ip40;
557  u32 proto0;
559  udp_header_t *udp0;
560 
561  /* speculatively enqueue b0 to the current next frame */
562  bi0 = from[0];
563  to_next[0] = bi0;
564  from += 1;
565  to_next += 1;
566  n_left_from -= 1;
567  n_left_to_next -= 1;
568 
569  b0 = vlib_get_buffer (vm, bi0);
570  ip40 = vlib_buffer_get_current (b0);
571 
572  ctx0.b = b0;
573  ctx0.vm = vm;
574  ctx0.thread_index = thread_index;
575 
577 
578  proto0 = ip_proto_to_nat_proto (ip40->protocol);
579 
580  if (PREDICT_FALSE (proto0 == NAT_PROTOCOL_OTHER))
581  {
582  if (nat64_out2in_unk_proto (vm, b0, &ctx0))
583  {
584  next0 = NAT64_OUT2IN_NEXT_DROP;
585  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
586  }
587  other_packets++;
588  goto trace0;
589  }
590 
591  if (proto0 == NAT_PROTOCOL_ICMP)
592  {
593  icmp_packets++;
594  if (icmp_to_icmp6
595  (b0, nat64_out2in_icmp_set_cb, &ctx0,
597  {
598  next0 = NAT64_OUT2IN_NEXT_DROP;
599  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
600  goto trace0;
601  }
602  }
603  else
604  {
605  if (proto0 == NAT_PROTOCOL_TCP)
606  tcp_packets++;
607  else
608  udp_packets++;
609 
610  if (nat64_out2in_tcp_udp (vm, b0, &ctx0))
611  {
612  udp0 = ip4_next_header (ip40);
613  /*
614  * Send DHCP packets to the ipv4 stack, or we won't
615  * be able to use dhcp client on the outside interface
616  */
617  if ((proto0 == NAT_PROTOCOL_UDP)
618  && (udp0->dst_port ==
619  clib_host_to_net_u16 (UDP_DST_PORT_dhcp_to_client)))
620  {
622  goto trace0;
623  }
624  next0 = NAT64_OUT2IN_NEXT_DROP;
625  b0->error = node->errors[NAT64_OUT2IN_ERROR_NO_TRANSLATION];
626  goto trace0;
627  }
628  }
629 
630  trace0:
631  if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)
632  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
633  {
635  vlib_add_trace (vm, node, b0, sizeof (*t));
636  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
637  t->next_index = next0;
638  }
639 
640  pkts_processed += next0 == NAT64_OUT2IN_NEXT_IP6_LOOKUP;
641 
642  /* verify speculative enqueue, maybe switch current next frame */
643  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
644  n_left_to_next, bi0, next0);
645  }
646  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
647  }
649  NAT64_OUT2IN_ERROR_OUT2IN_PACKETS,
650  pkts_processed);
652  NAT64_OUT2IN_ERROR_TCP_PACKETS, tcp_packets);
654  NAT64_OUT2IN_ERROR_UDP_PACKETS, udp_packets);
656  NAT64_OUT2IN_ERROR_ICMP_PACKETS, icmp_packets);
658  NAT64_OUT2IN_ERROR_OTHER_PACKETS,
659  other_packets);
661  NAT64_OUT2IN_ERROR_FRAGMENTS, fragments);
662 
663  return frame->n_vectors;
664 }
665 
666 /* *INDENT-OFF* */
668  .name = "nat64-out2in",
669  .vector_size = sizeof (u32),
670  .format_trace = format_nat64_out2in_trace,
673  .error_strings = nat64_out2in_error_strings,
674  .n_next_nodes = NAT64_OUT2IN_N_NEXT,
675  /* edit / add dispositions here */
676  .next_nodes = {
677  [NAT64_OUT2IN_NEXT_DROP] = "error-drop",
678  [NAT64_OUT2IN_NEXT_IP6_LOOKUP] = "ip6-lookup",
679  [NAT64_OUT2IN_NEXT_IP4_LOOKUP] = "ip4-lookup",
680  },
681 };
682 /* *INDENT-ON* */
683 
685 {
693 
694 #define foreach_nat64_out2in_handoff_error \
695 _(CONGESTION_DROP, "congestion drop") \
696 _(SAME_WORKER, "same worker") \
697 _(DO_HANDOFF, "do handoff")
698 
699 typedef enum
700 {
701 #define _(sym,str) NAT64_OUT2IN_HANDOFF_ERROR_##sym,
703 #undef _
706 
708 #define _(sym,string) string,
710 #undef _
711 };
712 
713 typedef struct
714 {
717 
718 static u8 *
720 {
721  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
722  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
724  va_arg (*args, nat64_out2in_handoff_trace_t *);
725 
726  s =
727  format (s, "NAT64-OUT2IN-HANDOFF: next-worker %d", t->next_worker_index);
728 
729  return s;
730 }
731 
735 {
736  nat64_main_t *nm = &nat64_main;
737  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
738  u32 n_enq, n_left_from, *from;
739  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
740  u32 fq_index;
742  u32 do_handoff = 0, same_worker = 0;
743 
744  from = vlib_frame_vector_args (frame);
745  n_left_from = frame->n_vectors;
746  vlib_get_buffers (vm, from, bufs, n_left_from);
747 
748  b = bufs;
749  ti = thread_indices;
750 
751  fq_index = nm->fq_out2in_index;
752 
753  while (n_left_from > 0)
754  {
755  ip4_header_t *ip0;
756 
757  ip0 = vlib_buffer_get_current (b[0]);
758  ti[0] = nat64_get_worker_out2in (b[0], ip0);
759 
760  if (ti[0] != thread_index)
761  do_handoff++;
762  else
763  same_worker++;
764 
765  if (PREDICT_FALSE
766  ((node->flags & VLIB_NODE_FLAG_TRACE)
767  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
768  {
770  vlib_add_trace (vm, node, b[0], sizeof (*t));
771  t->next_worker_index = ti[0];
772  }
773 
774  n_left_from -= 1;
775  ti += 1;
776  b += 1;
777  }
778 
779  n_enq =
780  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
781  frame->n_vectors, 1);
782 
783  if (n_enq < frame->n_vectors)
784  vlib_node_increment_counter (vm, node->node_index,
785  NAT64_OUT2IN_HANDOFF_ERROR_CONGESTION_DROP,
786  frame->n_vectors - n_enq);
787  vlib_node_increment_counter (vm, node->node_index,
788  NAT64_OUT2IN_HANDOFF_ERROR_SAME_WORKER,
789  same_worker);
790  vlib_node_increment_counter (vm, node->node_index,
791  NAT64_OUT2IN_HANDOFF_ERROR_DO_HANDOFF,
792  do_handoff);
793 
794  return frame->n_vectors;
795 }
796 
797 /* *INDENT-OFF* */
799  .name = "nat64-out2in-handoff",
800  .vector_size = sizeof (u32),
801  .format_trace = format_nat64_out2in_handoff_trace,
804  .error_strings = nat64_out2in_handoff_error_strings,
805 
806  .n_next_nodes = 1,
807 
808  .next_nodes = {
809  [0] = "error-drop",
810  },
811 };
812 /* *INDENT-ON* */
813 
814 /*
815  * fd.io coding-style-patch-verification: ON
816  *
817  * Local Variables:
818  * eval: (c-set-style "gnu")
819  * End:
820  */
nat64_out2in_handoff_error_t
Definition: nat64_out2in.c:699
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
u32 out2in_node_index
Definition: nat64.h:119
#define CLIB_UNUSED(x)
Definition: clib.h:86
static char * nat64_out2in_error_strings[]
Definition: nat64_out2in.c:69
ip4_address_t src_address
Definition: ip4_packet.h:170
struct nat64_out2in_frag_set_ctx_t_ nat64_out2in_frag_set_ctx_t
unsigned long u64
Definition: types.h:89
vlib_buffer_t * b
Definition: nat64_out2in.c:85
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:989
nat64_out2in_error_t
Definition: nat64_out2in.c:61
u32 thread_index
Definition: main.h:218
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:209
nat64_out2in_next_t
Definition: nat64_out2in.c:75
uword ip_csum_t
Definition: ip_packet.h:244
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:247
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
#define VLIB_NODE_FN(node)
Definition: node.h:202
static int nat64_out2in_icmp_set_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:256
struct _tcp_header tcp_header_t
ip6_address_t src_address
Definition: ip6_packet.h:310
struct nat64_out2in_set_ctx_t_ nat64_out2in_set_ctx_t
unsigned char u8
Definition: types.h:56
IPv4 to IPv6 translation.
u32 st_entries_num
Definition: nat64_db.h:123
vlib_node_registration_t nat64_out2in_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_handoff_node)
Definition: nat64_out2in.c:798
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:43
u32 ip4_fib_table_get_index_for_sw_if_index(u32 sw_if_index)
Definition: ip4_fib.c:230
static nat_protocol_t ip_proto_to_nat_proto(u8 ip_proto)
Common NAT inline functions.
Definition: inlines.h:22
vl_api_interface_index_t sw_if_index
Definition: gre.api:53
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:376
static u16 ip4_get_fragment_more(const ip4_header_t *i)
Definition: ip4_packet.h:206
vl_api_ip6_address_t ip6
Definition: one.api:424
ip4_address_t dst_address
Definition: ip4_packet.h:170
#define frag_id_4to6(id)
Definition: ip4_to_ip6.h:40
static void * ip4_next_header(ip4_header_t *i)
Definition: ip4_packet.h:241
unsigned int u32
Definition: types.h:88
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:907
#define VLIB_FRAME_SIZE
Definition: node.h:380
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:868
vl_api_ip_proto_t proto
Definition: acl_types.api:50
long ctx[MAX_CONNS]
Definition: main.c:144
#define foreach_nat64_out2in_handoff_error
Definition: nat64_out2in.c:694
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
static char * nat64_out2in_handoff_error_strings[]
Definition: nat64_out2in.c:707
static u8 * format_nat64_out2in_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:33
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:118
vl_api_ip4_address_t ip4
Definition: one.api:376
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1037
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:111
static int nat64_out2in_tcp_udp(vlib_main_t *vm, vlib_buffer_t *b, nat64_out2in_set_ctx_t *ctx)
Definition: nat64_out2in.c:91
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static u8 * format_nat64_out2in_handoff_trace(u8 *s, va_list *args)
Definition: nat64_out2in.c:719
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
Definition: ip4_packet.h:200
#define ARRAY_LEN(x)
Definition: clib.h:66
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static int nat64_out2in_unk_proto(vlib_main_t *vm, vlib_buffer_t *p, nat64_out2in_set_ctx_t *ctx)
Definition: nat64_out2in.c:429
nat64_main_t nat64_main
Definition: nat64.c:29
u32 fq_out2in_index
Definition: nat64.h:87
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:557
u32 nat64_get_worker_out2in(vlib_buffer_t *b, ip4_header_t *ip)
Get worker thread index for NAT64 out2in.
Definition: nat64.c:128
static int nat64_out2in_inner_icmp_set_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
Definition: nat64_out2in.c:337
ip_dscp_t tos
Definition: ip4_packet.h:141
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:272
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:302
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
Definition: ip4_to_ip6.h:220
vlib_node_registration_t nat64_out2in_node
(constructor) VLIB_REGISTER_NODE (nat64_out2in_node)
Definition: nat64_out2in.c:667
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
Definition: buffer.h:248
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
NAT64 global declarations.
u32 ip_version_traffic_class_and_flow_label
Definition: ip6_packet.h:297
Definition: defs.h:47
u16 payload_length
Definition: ip6_packet.h:301
vl_api_address_t ip
Definition: l2.api:501
VLIB buffer representation.
Definition: buffer.h:102
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:494
#define vnet_buffer(b)
Definition: buffer.h:417
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
#define u16_net_add(u, val)
Definition: ip_types.h:44
#define ip6_frag_hdr_offset_and_more(offset, more)
Definition: ip6_packet.h:673
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:280
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:304
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:318
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:300
Definition: defs.h:46
#define foreach_nat64_out2in_error
Definition: nat64_out2in.c:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:255
ip6_address_t dst_address
Definition: ip6_packet.h:310