FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
nat64_in2out.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 /**
16  * @file
17  * @brief NAT64 IPv6 to IPv4 translation (inside to outside network)
18  */
19 
20 #include <nat/nat64.h>
21 #include <nat/nat_reass.h>
22 #include <nat/nat_inlines.h>
23 #include <vnet/ip/ip6_to_ip4.h>
24 #include <vnet/fib/fib_table.h>
25 
26 typedef struct
27 {
32 
33 static u8 *
34 format_nat64_in2out_trace (u8 * s, va_list * args)
35 {
36  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
37  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
38  nat64_in2out_trace_t *t = va_arg (*args, nat64_in2out_trace_t *);
39  char *tag;
40 
41  tag = t->is_slow_path ? "NAT64-in2out-slowpath" : "NAT64-in2out";
42 
43  s =
44  format (s, "%s: sw_if_index %d, next index %d", tag, t->sw_if_index,
45  t->next_index);
46 
47  return s;
48 }
49 
50 typedef struct
51 {
56 
57 static u8 *
58 format_nat64_in2out_reass_trace (u8 * s, va_list * args)
59 {
60  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
63  va_arg (*args, nat64_in2out_reass_trace_t *);
64 
65  s =
66  format (s, "NAT64-in2out-reass: sw_if_index %d, next index %d, status %s",
67  t->sw_if_index, t->next_index,
68  t->cached ? "cached" : "translated");
69 
70  return s;
71 }
72 
73 
74 #define foreach_nat64_in2out_error \
75 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \
76 _(IN2OUT_PACKETS, "good in2out packets processed") \
77 _(NO_TRANSLATION, "no translation") \
78 _(UNKNOWN, "unknown") \
79 _(DROP_FRAGMENT, "drop fragment") \
80 _(MAX_REASS, "maximum reassemblies exceeded") \
81 _(MAX_FRAG, "maximum fragments per reassembly exceeded") \
82 _(TCP_PACKETS, "TCP packets") \
83 _(UDP_PACKETS, "UDP packets") \
84 _(ICMP_PACKETS, "ICMP packets") \
85 _(OTHER_PACKETS, "other protocol packets") \
86 _(FRAGMENTS, "fragments") \
87 _(CACHED_FRAGMENTS, "cached fragments") \
88 _(PROCESSED_FRAGMENTS, "processed fragments")
89 
90 
91 typedef enum
92 {
93 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym,
95 #undef _
98 
99 static char *nat64_in2out_error_strings[] = {
100 #define _(sym,string) string,
102 #undef _
103 };
104 
105 typedef enum
106 {
114 
116 {
121 
122 static inline u8
124 {
126  ip6_main_t *im6 = &ip6_main;
127  ip_lookup_main_t *lm6 = &im6->lookup_main;
128  ip_interface_address_t *ia = 0;
129 
130  /* *INDENT-OFF* */
131  foreach_ip_interface_address (lm6, ia, sw_if_index, 0,
132  ({
133  addr = ip_interface_address_get_address (lm6, ia);
134  if (0 == ip6_address_compare (addr, &ip6_addr))
135  return 1;
136  }));
137  /* *INDENT-ON* */
138 
139  return 0;
140 }
141 
142 /**
143  * @brief Check whether is a hairpinning.
144  *
145  * If the destination IP address of the packet is an IPv4 address assigned to
146  * the NAT64 itself, then the packet is a hairpin packet.
147  *
148  * param dst_addr Destination address of the packet.
149  *
150  * @returns 1 if hairpinning, otherwise 0.
151  */
154 {
155  nat64_main_t *nm = &nat64_main;
156  int i;
157 
158  for (i = 0; i < vec_len (nm->addr_pool); i++)
159  {
160  if (nm->addr_pool[i].addr.as_u32 == dst_addr->as_u32[3])
161  return 1;
162  }
163 
164  return 0;
165 }
166 
167 static int
169  void *arg)
170 {
171  nat64_main_t *nm = &nat64_main;
173  nat64_db_bib_entry_t *bibe;
174  nat64_db_st_entry_t *ste;
175  ip46_address_t saddr, daddr;
176  u32 sw_if_index, fib_index;
177  udp_header_t *udp = ip6_next_header (ip6);
178  u8 proto = ip6->protocol;
179  u16 sport = udp->src_port;
180  u16 dport = udp->dst_port;
181  nat64_db_t *db = &nm->db[ctx->thread_index];
182 
183  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
184  fib_index =
186 
187  saddr.as_u64[0] = ip6->src_address.as_u64[0];
188  saddr.as_u64[1] = ip6->src_address.as_u64[1];
189  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
190  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
191 
192  ste =
193  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
194  fib_index, 1);
195 
196  if (ste)
197  {
198  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
199  if (!bibe)
200  return -1;
201  }
202  else
203  {
204  bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
205 
206  if (!bibe)
207  {
208  u16 out_port;
209  ip4_address_t out_addr;
211  (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
212  &out_port, ctx->thread_index))
213  return -1;
214 
215  bibe =
217  &ip6->src_address, &out_addr, sport,
218  out_port, fib_index, proto, 0);
219  if (!bibe)
220  return -1;
221 
223  db->bib.bib_entries_num);
224  }
225 
226  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
227  ste =
228  nat64_db_st_entry_create (ctx->thread_index, db, bibe,
229  &ip6->dst_address, &daddr.ip4, dport);
230  if (!ste)
231  return -1;
232 
234  db->st.st_entries_num);
235  }
236 
237  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
238  udp->src_port = bibe->out_port;
239 
240  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
241 
242  if (proto == IP_PROTOCOL_TCP)
243  {
244  u16 *checksum;
245  ip_csum_t csum;
246  tcp_header_t *tcp = ip6_next_header (ip6);
247 
248  nat64_tcp_session_set_state (ste, tcp, 1);
249  checksum = &tcp->checksum;
250  csum = ip_csum_sub_even (*checksum, sport);
251  csum = ip_csum_add_even (csum, udp->src_port);
252  mss_clamping (nm->sm, tcp, &csum);
253  *checksum = ip_csum_fold (csum);
254  }
255 
256  nat64_session_reset_timeout (ste, ctx->vm);
257 
258  return 0;
259 }
260 
261 static int
263 {
264  nat64_main_t *nm = &nat64_main;
266  nat64_db_bib_entry_t *bibe;
267  nat64_db_st_entry_t *ste;
268  ip46_address_t saddr, daddr;
269  u32 sw_if_index, fib_index;
270  icmp46_header_t *icmp = ip6_next_header (ip6);
271  nat64_db_t *db = &nm->db[ctx->thread_index];
272 
273  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
274  fib_index =
276 
277  saddr.as_u64[0] = ip6->src_address.as_u64[0];
278  saddr.as_u64[1] = ip6->src_address.as_u64[1];
279  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
280  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
281 
282  if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
283  {
284  u16 in_id = ((u16 *) (icmp))[2];
285  ste =
286  nat64_db_st_entry_find (db, &saddr, &daddr, in_id, 0,
287  IP_PROTOCOL_ICMP, fib_index, 1);
288 
289  if (ste)
290  {
291  bibe =
292  nat64_db_bib_entry_by_index (db, IP_PROTOCOL_ICMP,
293  ste->bibe_index);
294  if (!bibe)
295  return -1;
296  }
297  else
298  {
299  bibe =
300  nat64_db_bib_entry_find (db, &saddr, in_id,
301  IP_PROTOCOL_ICMP, fib_index, 1);
302 
303  if (!bibe)
304  {
305  u16 out_id;
306  ip4_address_t out_addr;
308  (fib_index, SNAT_PROTOCOL_ICMP, &out_addr, &out_id,
309  ctx->thread_index))
310  return -1;
311 
312  bibe =
314  &ip6->src_address, &out_addr,
315  in_id, out_id, fib_index,
316  IP_PROTOCOL_ICMP, 0);
317  if (!bibe)
318  return -1;
319 
321  db->bib.bib_entries_num);
322  }
323 
324  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
325  ste =
326  nat64_db_st_entry_create (ctx->thread_index, db, bibe,
327  &ip6->dst_address, &daddr.ip4, 0);
328  if (!ste)
329  return -1;
330 
332  db->st.st_entries_num);
333  }
334 
335  nat64_session_reset_timeout (ste, ctx->vm);
336 
337  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
338  ((u16 *) (icmp))[2] = bibe->out_port;
339 
340  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
341  }
342  else
343  {
344  if (!vec_len (nm->addr_pool))
345  return -1;
346 
347  ip4->src_address.as_u32 = nm->addr_pool[0].addr.as_u32;
348  nat64_extract_ip4 (&ip6->dst_address, &ip4->dst_address, fib_index);
349  }
350 
351  return 0;
352 }
353 
354 static int
356  void *arg)
357 {
358  nat64_main_t *nm = &nat64_main;
360  nat64_db_st_entry_t *ste;
361  nat64_db_bib_entry_t *bibe;
362  ip46_address_t saddr, daddr;
363  u32 sw_if_index, fib_index;
364  u8 proto = ip6->protocol;
365  nat64_db_t *db = &nm->db[ctx->thread_index];
366 
367  sw_if_index = vnet_buffer (ctx->b)->sw_if_index[VLIB_RX];
368  fib_index =
370 
371  saddr.as_u64[0] = ip6->src_address.as_u64[0];
372  saddr.as_u64[1] = ip6->src_address.as_u64[1];
373  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
374  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
375 
376  if (proto == IP_PROTOCOL_ICMP6)
377  {
378  icmp46_header_t *icmp = ip6_next_header (ip6);
379  u16 in_id = ((u16 *) (icmp))[2];
380  proto = IP_PROTOCOL_ICMP;
381 
382  if (!
383  (icmp->type == ICMP4_echo_request
384  || icmp->type == ICMP4_echo_reply))
385  return -1;
386 
387  ste =
388  nat64_db_st_entry_find (db, &daddr, &saddr, in_id, 0, proto,
389  fib_index, 1);
390  if (!ste)
391  return -1;
392 
393  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
394  if (!bibe)
395  return -1;
396 
397  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
398  ((u16 *) (icmp))[2] = bibe->out_port;
399  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
400  }
401  else
402  {
403  udp_header_t *udp = ip6_next_header (ip6);
404  tcp_header_t *tcp = ip6_next_header (ip6);
405  u16 *checksum;
406  ip_csum_t csum;
407 
408  u16 sport = udp->src_port;
409  u16 dport = udp->dst_port;
410 
411  ste =
412  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
413  fib_index, 1);
414  if (!ste)
415  return -1;
416 
417  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
418  if (!bibe)
419  return -1;
420 
421  ip4->dst_address.as_u32 = bibe->out_addr.as_u32;
422  udp->dst_port = bibe->out_port;
423  ip4->src_address.as_u32 = ste->out_r_addr.as_u32;
424 
425  if (proto == IP_PROTOCOL_TCP)
426  checksum = &tcp->checksum;
427  else
428  checksum = &udp->checksum;
429  csum = ip_csum_sub_even (*checksum, dport);
430  csum = ip_csum_add_even (csum, udp->dst_port);
431  *checksum = ip_csum_fold (csum);
432  }
433 
434  return 0;
435 }
436 
438 {
446 
447 static int
448 unk_proto_st_walk (nat64_db_st_entry_t * ste, void *arg)
449 {
450  nat64_main_t *nm = &nat64_main;
452  nat64_db_bib_entry_t *bibe;
453  ip46_address_t saddr, daddr;
454  nat64_db_t *db = &nm->db[ctx->thread_index];
455 
456  if (ip46_address_is_equal (&ste->in_r_addr, &ctx->dst_addr))
457  {
458  bibe = nat64_db_bib_entry_by_index (db, ste->proto, ste->bibe_index);
459  if (!bibe)
460  return -1;
461 
462  if (ip46_address_is_equal (&bibe->in_addr, &ctx->src_addr)
463  && bibe->fib_index == ctx->fib_index)
464  {
465  clib_memset (&saddr, 0, sizeof (saddr));
466  saddr.ip4.as_u32 = bibe->out_addr.as_u32;
467  clib_memset (&daddr, 0, sizeof (daddr));
468  nat64_extract_ip4 (&ctx->dst_addr, &daddr.ip4, ctx->fib_index);
469 
471  (db, &daddr, &saddr, 0, 0, ctx->proto, ctx->fib_index, 0))
472  return -1;
473 
474  ctx->out_addr.as_u32 = bibe->out_addr.as_u32;
475  return 1;
476  }
477  }
478 
479  return 0;
480 }
481 
482 static int
484  void *arg)
485 {
486  nat64_main_t *nm = &nat64_main;
487  nat64_in2out_set_ctx_t *s_ctx = arg;
488  nat64_db_bib_entry_t *bibe;
489  nat64_db_st_entry_t *ste;
490  ip46_address_t saddr, daddr, addr;
491  u32 sw_if_index, fib_index;
492  u8 proto = ip6->protocol;
493  int i;
494  nat64_db_t *db = &nm->db[s_ctx->thread_index];
495 
496  sw_if_index = vnet_buffer (s_ctx->b)->sw_if_index[VLIB_RX];
497  fib_index =
499 
500  saddr.as_u64[0] = ip6->src_address.as_u64[0];
501  saddr.as_u64[1] = ip6->src_address.as_u64[1];
502  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
503  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
504 
505  ste =
506  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
507 
508  if (ste)
509  {
510  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
511  if (!bibe)
512  return -1;
513  }
514  else
515  {
516  bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
517 
518  if (!bibe)
519  {
520  /* Choose same out address as for TCP/UDP session to same dst */
522  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
523  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
524  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
525  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
526  .out_addr.as_u32 = 0,
527  .fib_index = fib_index,
528  .proto = proto,
529  .thread_index = s_ctx->thread_index,
530  };
531 
532  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
533 
534  if (!ctx.out_addr.as_u32)
535  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
536 
537  /* Verify if out address is not already in use for protocol */
538  clib_memset (&addr, 0, sizeof (addr));
539  addr.ip4.as_u32 = ctx.out_addr.as_u32;
540  if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
541  ctx.out_addr.as_u32 = 0;
542 
543  if (!ctx.out_addr.as_u32)
544  {
545  for (i = 0; i < vec_len (nm->addr_pool); i++)
546  {
547  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
548  if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
549  break;
550  }
551  }
552 
553  if (!ctx.out_addr.as_u32)
554  return -1;
555 
556  bibe =
558  &ip6->src_address, &ctx.out_addr,
559  0, 0, fib_index, proto, 0);
560  if (!bibe)
561  return -1;
562 
564  db->bib.bib_entries_num);
565  }
566 
567  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
568  ste =
569  nat64_db_st_entry_create (s_ctx->thread_index, db, bibe,
570  &ip6->dst_address, &daddr.ip4, 0);
571  if (!ste)
572  return -1;
573 
575  db->st.st_entries_num);
576  }
577 
578  nat64_session_reset_timeout (ste, s_ctx->vm);
579 
580  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
581  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
582 
583  return 0;
584 }
585 
586 
587 
588 static int
591 {
592  nat64_main_t *nm = &nat64_main;
593  nat64_db_bib_entry_t *bibe;
594  nat64_db_st_entry_t *ste;
595  ip46_address_t saddr, daddr;
596  u32 sw_if_index, fib_index;
597  udp_header_t *udp = ip6_next_header (ip6);
598  tcp_header_t *tcp = ip6_next_header (ip6);
599  u8 proto = ip6->protocol;
600  u16 sport = udp->src_port;
601  u16 dport = udp->dst_port;
602  u16 *checksum;
603  ip_csum_t csum;
604  nat64_db_t *db = &nm->db[thread_index];
605 
606  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
607  fib_index =
609 
610  saddr.as_u64[0] = ip6->src_address.as_u64[0];
611  saddr.as_u64[1] = ip6->src_address.as_u64[1];
612  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
613  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
614 
615  if (proto == IP_PROTOCOL_UDP)
616  checksum = &udp->checksum;
617  else
618  checksum = &tcp->checksum;
619 
620  csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
621  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
622  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
623  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
624  csum = ip_csum_sub_even (csum, sport);
625  csum = ip_csum_sub_even (csum, dport);
626 
627  ste =
628  nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
629  fib_index, 1);
630 
631  if (ste)
632  {
633  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
634  if (!bibe)
635  return -1;
636  }
637  else
638  {
639  bibe = nat64_db_bib_entry_find (db, &saddr, sport, proto, fib_index, 1);
640 
641  if (!bibe)
642  {
643  u16 out_port;
644  ip4_address_t out_addr;
646  (fib_index, ip_proto_to_snat_proto (proto), &out_addr,
647  &out_port, thread_index))
648  return -1;
649 
650  bibe =
651  nat64_db_bib_entry_create (thread_index, db, &ip6->src_address,
652  &out_addr, sport, out_port, fib_index,
653  proto, 0);
654  if (!bibe)
655  return -1;
656 
657  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
658  db->bib.bib_entries_num);
659  }
660 
661  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
662  ste =
663  nat64_db_st_entry_create (thread_index, db, bibe, &ip6->dst_address,
664  &daddr.ip4, dport);
665  if (!ste)
666  return -1;
667 
668  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
669  db->st.st_entries_num);
670  }
671 
672  if (proto == IP_PROTOCOL_TCP)
673  nat64_tcp_session_set_state (ste, tcp, 1);
674 
675  nat64_session_reset_timeout (ste, vm);
676 
677  sport = udp->src_port = bibe->out_port;
678  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
679 
680  clib_memset (&daddr, 0, sizeof (daddr));
681  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
682 
683  bibe = 0;
684  /* *INDENT-OFF* */
685  vec_foreach (db, nm->db)
686  {
687  bibe = nat64_db_bib_entry_find (db, &daddr, dport, proto, 0, 0);
688 
689  if (bibe)
690  break;
691  }
692  /* *INDENT-ON* */
693 
694  if (!bibe)
695  return -1;
696 
697  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
698  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
699  udp->dst_port = bibe->in_port;
700 
701  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
702  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
703  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
704  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
705  csum = ip_csum_add_even (csum, udp->src_port);
706  csum = ip_csum_add_even (csum, udp->dst_port);
707  *checksum = ip_csum_fold (csum);
708 
709  return 0;
710 }
711 
712 static int
715 {
716  nat64_main_t *nm = &nat64_main;
717  nat64_db_bib_entry_t *bibe;
718  nat64_db_st_entry_t *ste;
719  icmp46_header_t *icmp = ip6_next_header (ip6);
720  ip6_header_t *inner_ip6;
721  ip46_address_t saddr, daddr;
722  u32 sw_if_index, fib_index;
723  u8 proto;
724  udp_header_t *udp;
725  tcp_header_t *tcp;
726  u16 *checksum, sport, dport;
727  ip_csum_t csum;
728  nat64_db_t *db = &nm->db[thread_index];
729 
730  if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
731  return -1;
732 
733  inner_ip6 = (ip6_header_t *) u8_ptr_add (icmp, 8);
734 
735  proto = inner_ip6->protocol;
736 
737  if (proto == IP_PROTOCOL_ICMP6)
738  return -1;
739 
740  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
741  fib_index =
743 
744  saddr.as_u64[0] = inner_ip6->src_address.as_u64[0];
745  saddr.as_u64[1] = inner_ip6->src_address.as_u64[1];
746  daddr.as_u64[0] = inner_ip6->dst_address.as_u64[0];
747  daddr.as_u64[1] = inner_ip6->dst_address.as_u64[1];
748 
749  udp = ip6_next_header (inner_ip6);
750  tcp = ip6_next_header (inner_ip6);
751 
752  sport = udp->src_port;
753  dport = udp->dst_port;
754 
755  if (proto == IP_PROTOCOL_UDP)
756  checksum = &udp->checksum;
757  else
758  checksum = &tcp->checksum;
759 
760  csum = ip_csum_sub_even (*checksum, inner_ip6->src_address.as_u64[0]);
761  csum = ip_csum_sub_even (csum, inner_ip6->src_address.as_u64[1]);
762  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[0]);
763  csum = ip_csum_sub_even (csum, inner_ip6->dst_address.as_u64[1]);
764  csum = ip_csum_sub_even (csum, sport);
765  csum = ip_csum_sub_even (csum, dport);
766 
767  ste =
768  nat64_db_st_entry_find (db, &daddr, &saddr, dport, sport, proto,
769  fib_index, 1);
770  if (!ste)
771  return -1;
772 
773  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
774  if (!bibe)
775  return -1;
776 
777  dport = udp->dst_port = bibe->out_port;
778  nat64_compose_ip6 (&inner_ip6->dst_address, &bibe->out_addr, fib_index);
779 
780  clib_memset (&saddr, 0, sizeof (saddr));
781  clib_memset (&daddr, 0, sizeof (daddr));
782  saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
783  daddr.ip4.as_u32 = bibe->out_addr.as_u32;
784 
785  ste = 0;
786  /* *INDENT-OFF* */
787  vec_foreach (db, nm->db)
788  {
789  ste = nat64_db_st_entry_find (db, &saddr, &daddr, sport, dport, proto,
790  0, 0);
791 
792  if (ste)
793  break;
794  }
795  /* *INDENT-ON* */
796 
797  if (!ste)
798  return -1;
799 
800  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
801  if (!bibe)
802  return -1;
803 
804  inner_ip6->src_address.as_u64[0] = bibe->in_addr.as_u64[0];
805  inner_ip6->src_address.as_u64[1] = bibe->in_addr.as_u64[1];
806  udp->src_port = bibe->in_port;
807 
808  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[0]);
809  csum = ip_csum_add_even (csum, inner_ip6->src_address.as_u64[1]);
810  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[0]);
811  csum = ip_csum_add_even (csum, inner_ip6->dst_address.as_u64[1]);
812  csum = ip_csum_add_even (csum, udp->src_port);
813  csum = ip_csum_add_even (csum, udp->dst_port);
814  *checksum = ip_csum_fold (csum);
815 
816  if (!vec_len (nm->addr_pool))
817  return -1;
818 
819  nat64_compose_ip6 (&ip6->src_address, &nm->addr_pool[0].addr, fib_index);
820  ip6->dst_address.as_u64[0] = inner_ip6->src_address.as_u64[0];
821  ip6->dst_address.as_u64[1] = inner_ip6->src_address.as_u64[1];
822 
823  icmp->checksum = 0;
824  csum = ip_csum_with_carry (0, ip6->payload_length);
825  csum = ip_csum_with_carry (csum, clib_host_to_net_u16 (ip6->protocol));
826  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[0]);
827  csum = ip_csum_with_carry (csum, ip6->src_address.as_u64[1]);
828  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[0]);
829  csum = ip_csum_with_carry (csum, ip6->dst_address.as_u64[1]);
830  csum =
831  ip_incremental_checksum (csum, icmp,
832  clib_net_to_host_u16 (ip6->payload_length));
833  icmp->checksum = ~ip_csum_fold (csum);
834 
835  return 0;
836 }
837 
838 static int
841 {
842  nat64_main_t *nm = &nat64_main;
843  nat64_db_bib_entry_t *bibe;
844  nat64_db_st_entry_t *ste;
845  ip46_address_t saddr, daddr, addr;
846  u32 sw_if_index, fib_index;
847  u8 proto = ip6->protocol;
848  int i;
849  nat64_db_t *db = &nm->db[thread_index];
850 
851  sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_RX];
852  fib_index =
854 
855  saddr.as_u64[0] = ip6->src_address.as_u64[0];
856  saddr.as_u64[1] = ip6->src_address.as_u64[1];
857  daddr.as_u64[0] = ip6->dst_address.as_u64[0];
858  daddr.as_u64[1] = ip6->dst_address.as_u64[1];
859 
860  ste =
861  nat64_db_st_entry_find (db, &saddr, &daddr, 0, 0, proto, fib_index, 1);
862 
863  if (ste)
864  {
865  bibe = nat64_db_bib_entry_by_index (db, proto, ste->bibe_index);
866  if (!bibe)
867  return -1;
868  }
869  else
870  {
871  bibe = nat64_db_bib_entry_find (db, &saddr, 0, proto, fib_index, 1);
872 
873  if (!bibe)
874  {
875  /* Choose same out address as for TCP/UDP session to same dst */
877  .src_addr.as_u64[0] = ip6->src_address.as_u64[0],
878  .src_addr.as_u64[1] = ip6->src_address.as_u64[1],
879  .dst_addr.as_u64[0] = ip6->dst_address.as_u64[0],
880  .dst_addr.as_u64[1] = ip6->dst_address.as_u64[1],
881  .out_addr.as_u32 = 0,
882  .fib_index = fib_index,
883  .proto = proto,
884  .thread_index = thread_index,
885  };
886 
887  nat64_db_st_walk (db, IP_PROTOCOL_TCP, unk_proto_st_walk, &ctx);
888 
889  if (!ctx.out_addr.as_u32)
890  nat64_db_st_walk (db, IP_PROTOCOL_UDP, unk_proto_st_walk, &ctx);
891 
892  /* Verify if out address is not already in use for protocol */
893  clib_memset (&addr, 0, sizeof (addr));
894  addr.ip4.as_u32 = ctx.out_addr.as_u32;
895  if (nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
896  ctx.out_addr.as_u32 = 0;
897 
898  if (!ctx.out_addr.as_u32)
899  {
900  for (i = 0; i < vec_len (nm->addr_pool); i++)
901  {
902  addr.ip4.as_u32 = nm->addr_pool[i].addr.as_u32;
903  if (!nat64_db_bib_entry_find (db, &addr, 0, proto, 0, 0))
904  break;
905  }
906  }
907 
908  if (!ctx.out_addr.as_u32)
909  return -1;
910 
911  bibe =
912  nat64_db_bib_entry_create (thread_index, db, &ip6->src_address,
913  &ctx.out_addr, 0, 0, fib_index, proto,
914  0);
915  if (!bibe)
916  return -1;
917 
918  vlib_set_simple_counter (&nm->total_bibs, thread_index, 0,
919  db->bib.bib_entries_num);
920  }
921 
922  nat64_extract_ip4 (&ip6->dst_address, &daddr.ip4, fib_index);
923  ste =
924  nat64_db_st_entry_create (thread_index, db, bibe, &ip6->dst_address,
925  &daddr.ip4, 0);
926  if (!ste)
927  return -1;
928 
929  vlib_set_simple_counter (&nm->total_sessions, thread_index, 0,
930  db->st.st_entries_num);
931  }
932 
933  nat64_session_reset_timeout (ste, vm);
934 
935  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, fib_index);
936 
937  clib_memset (&daddr, 0, sizeof (daddr));
938  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
939 
940  bibe = 0;
941  /* *INDENT-OFF* */
942  vec_foreach (db, nm->db)
943  {
944  bibe = nat64_db_bib_entry_find (db, &daddr, 0, proto, 0, 0);
945 
946  if (bibe)
947  break;
948  }
949  /* *INDENT-ON* */
950 
951  if (!bibe)
952  return -1;
953 
954  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
955  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
956 
957  return 0;
958 }
959 
960 static inline uword
962  vlib_frame_t * frame, u8 is_slow_path)
963 {
964  u32 n_left_from, *from, *to_next;
965  nat64_in2out_next_t next_index;
966  u32 pkts_processed = 0;
967  u32 stats_node_index;
969  nat64_main_t *nm = &nat64_main;
970 
971  u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
972  0, fragments = 0;
973 
974  stats_node_index =
975  is_slow_path ? nm->in2out_slowpath_node_index : nm->in2out_node_index;
976 
977  from = vlib_frame_vector_args (frame);
978  n_left_from = frame->n_vectors;
979  next_index = node->cached_next_index;
980 
981  while (n_left_from > 0)
982  {
983  u32 n_left_to_next;
984 
985  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
986 
987  while (n_left_from > 0 && n_left_to_next > 0)
988  {
989  u32 bi0;
990  vlib_buffer_t *b0;
991  u32 next0;
992  ip6_header_t *ip60;
993  u16 l4_offset0, frag_offset0;
994  u8 l4_protocol0;
995  u32 proto0;
997  u32 sw_if_index0;
998 
999  /* speculatively enqueue b0 to the current next frame */
1000  bi0 = from[0];
1001  to_next[0] = bi0;
1002  from += 1;
1003  to_next += 1;
1004  n_left_from -= 1;
1005  n_left_to_next -= 1;
1006 
1007  b0 = vlib_get_buffer (vm, bi0);
1008  ip60 = vlib_buffer_get_current (b0);
1009 
1010  ctx0.b = b0;
1011  ctx0.vm = vm;
1012  ctx0.thread_index = thread_index;
1013 
1015 
1016  if (PREDICT_FALSE
1017  (ip6_parse
1018  (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1019  &frag_offset0)))
1020  {
1021  next0 = NAT64_IN2OUT_NEXT_DROP;
1022  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1023  goto trace0;
1024  }
1025 
1026  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1027 
1028  if (nat64_not_translate (sw_if_index0, ip60->dst_address))
1029  {
1031  goto trace0;
1032  }
1033 
1034  proto0 = ip_proto_to_snat_proto (l4_protocol0);
1035 
1036  if (is_slow_path)
1037  {
1038  if (PREDICT_TRUE (proto0 == ~0))
1039  {
1040  other_packets++;
1041  if (is_hairpinning (&ip60->dst_address))
1042  {
1045  (vm, b0, ip60, thread_index))
1046  {
1047  next0 = NAT64_IN2OUT_NEXT_DROP;
1048  b0->error =
1049  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1050  }
1051  goto trace0;
1052  }
1053 
1054  if (ip6_to_ip4 (b0, nat64_in2out_unk_proto_set_cb, &ctx0))
1055  {
1056  next0 = NAT64_IN2OUT_NEXT_DROP;
1057  b0->error =
1058  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1059  goto trace0;
1060  }
1061  }
1062  goto trace0;
1063  }
1064  else
1065  {
1066  if (PREDICT_FALSE (proto0 == ~0))
1067  {
1069  goto trace0;
1070  }
1071  }
1072 
1073  if (PREDICT_FALSE
1074  (ip60->protocol == IP_PROTOCOL_IPV6_FRAGMENTATION))
1075  {
1076  next0 = NAT64_IN2OUT_NEXT_REASS;
1077  fragments++;
1078  goto trace0;
1079  }
1080 
1081  if (proto0 == SNAT_PROTOCOL_ICMP)
1082  {
1083  icmp_packets++;
1084  if (is_hairpinning (&ip60->dst_address))
1085  {
1088  (vm, b0, ip60, thread_index))
1089  {
1090  next0 = NAT64_IN2OUT_NEXT_DROP;
1091  b0->error =
1092  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1093  }
1094  goto trace0;
1095  }
1096 
1097  if (icmp6_to_icmp
1098  (b0, nat64_in2out_icmp_set_cb, &ctx0,
1100  {
1101  next0 = NAT64_IN2OUT_NEXT_DROP;
1102  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1103  goto trace0;
1104  }
1105  }
1106  else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
1107  {
1108  if (proto0 == SNAT_PROTOCOL_TCP)
1109  tcp_packets++;
1110  else
1111  udp_packets++;
1112 
1113  if (is_hairpinning (&ip60->dst_address))
1114  {
1117  (vm, b0, ip60, thread_index))
1118  {
1119  next0 = NAT64_IN2OUT_NEXT_DROP;
1120  b0->error =
1121  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1122  }
1123  goto trace0;
1124  }
1125 
1126  if (ip6_to_ip4_tcp_udp
1127  (b0, nat64_in2out_tcp_udp_set_cb, &ctx0, 0))
1128  {
1129  next0 = NAT64_IN2OUT_NEXT_DROP;
1130  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1131  goto trace0;
1132  }
1133  }
1134 
1135  trace0:
1137  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1138  {
1140  vlib_add_trace (vm, node, b0, sizeof (*t));
1141  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1142  t->next_index = next0;
1143  t->is_slow_path = is_slow_path;
1144  }
1145 
1146  pkts_processed += next0 == NAT64_IN2OUT_NEXT_IP4_LOOKUP;
1147 
1148  /* verify speculative enqueue, maybe switch current next frame */
1149  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
1150  n_left_to_next, bi0, next0);
1151  }
1152  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1153  }
1154  vlib_node_increment_counter (vm, stats_node_index,
1155  NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1156  pkts_processed);
1157  vlib_node_increment_counter (vm, stats_node_index,
1158  NAT64_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
1159  vlib_node_increment_counter (vm, stats_node_index,
1160  NAT64_IN2OUT_ERROR_UDP_PACKETS, udp_packets);
1161  vlib_node_increment_counter (vm, stats_node_index,
1162  NAT64_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
1163  vlib_node_increment_counter (vm, stats_node_index,
1164  NAT64_IN2OUT_ERROR_OTHER_PACKETS,
1165  other_packets);
1166  vlib_node_increment_counter (vm, stats_node_index,
1167  NAT64_IN2OUT_ERROR_FRAGMENTS, fragments);
1168 
1169  return frame->n_vectors;
1170 }
1171 
1173  vlib_node_runtime_t * node,
1174  vlib_frame_t * frame)
1175 {
1176  return nat64_in2out_node_fn_inline (vm, node, frame, 0);
1177 }
1178 
1179 /* *INDENT-OFF* */
1181  .name = "nat64-in2out",
1182  .vector_size = sizeof (u32),
1183  .format_trace = format_nat64_in2out_trace,
1185  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1186  .error_strings = nat64_in2out_error_strings,
1187  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1188  /* edit / add dispositions here */
1189  .next_nodes = {
1190  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1191  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1192  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1193  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1194  [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1195  },
1196 };
1197 /* *INDENT-ON* */
1198 
1200  vlib_node_runtime_t * node,
1201  vlib_frame_t * frame)
1202 {
1203  return nat64_in2out_node_fn_inline (vm, node, frame, 1);
1204 }
1205 
1206 /* *INDENT-OFF* */
1208  .name = "nat64-in2out-slowpath",
1209  .vector_size = sizeof (u32),
1210  .format_trace = format_nat64_in2out_trace,
1212  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1213  .error_strings = nat64_in2out_error_strings,
1214  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1215  /* edit / add dispositions here */
1216  .next_nodes = {
1217  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1218  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1219  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1220  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1221  [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1222  },
1223 };
1224 /* *INDENT-ON* */
1225 
1227 {
1235 
1236 static int
1238 {
1239  nat64_main_t *nm = &nat64_main;
1241  nat64_db_st_entry_t *ste;
1242  nat64_db_bib_entry_t *bibe;
1243  udp_header_t *udp;
1244  nat64_db_t *db = &nm->db[ctx->thread_index];
1245 
1246  ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1247  if (!ste)
1248  return -1;
1249 
1250  bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1251  if (!bibe)
1252  return -1;
1253 
1254  nat64_session_reset_timeout (ste, ctx->vm);
1255 
1256  if (ctx->first_frag)
1257  {
1258  udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1259 
1260  if (ctx->proto == IP_PROTOCOL_TCP)
1261  {
1262  u16 *checksum;
1263  ip_csum_t csum;
1264  tcp_header_t *tcp = (tcp_header_t *) udp;
1265 
1266  nat64_tcp_session_set_state (ste, tcp, 1);
1267  checksum = &tcp->checksum;
1268  csum = ip_csum_sub_even (*checksum, tcp->src_port);
1269  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[0]);
1270  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1271  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1272  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1273  csum = ip_csum_add_even (csum, bibe->out_port);
1274  csum = ip_csum_add_even (csum, bibe->out_addr.as_u32);
1275  csum = ip_csum_add_even (csum, ste->out_r_addr.as_u32);
1276  *checksum = ip_csum_fold (csum);
1277  }
1278 
1279  udp->src_port = bibe->out_port;
1280  }
1281 
1282  ip4->src_address.as_u32 = bibe->out_addr.as_u32;
1283  ip4->dst_address.as_u32 = ste->out_r_addr.as_u32;
1284 
1285  return 0;
1286 }
1287 
1288 static int
1291 {
1292  nat64_main_t *nm = &nat64_main;
1293  nat64_db_st_entry_t *ste;
1294  nat64_db_bib_entry_t *bibe;
1295  udp_header_t *udp = (udp_header_t *) u8_ptr_add (ip6, ctx->l4_offset);
1296  tcp_header_t *tcp = (tcp_header_t *) udp;
1297  u16 sport = udp->src_port;
1298  u16 dport = udp->dst_port;
1299  u16 *checksum;
1300  ip_csum_t csum;
1301  ip46_address_t daddr;
1302  nat64_db_t *db = &nm->db[ctx->thread_index];
1303 
1304  if (ctx->first_frag)
1305  {
1306  if (ctx->proto == IP_PROTOCOL_UDP)
1307  checksum = &udp->checksum;
1308  else
1309  checksum = &tcp->checksum;
1310 
1311  csum = ip_csum_sub_even (*checksum, ip6->src_address.as_u64[0]);
1312  csum = ip_csum_sub_even (csum, ip6->src_address.as_u64[1]);
1313  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[0]);
1314  csum = ip_csum_sub_even (csum, ip6->dst_address.as_u64[1]);
1315  csum = ip_csum_sub_even (csum, sport);
1316  csum = ip_csum_sub_even (csum, dport);
1317  }
1318 
1319  ste = nat64_db_st_entry_by_index (db, ctx->proto, ctx->sess_index);
1320  if (!ste)
1321  return -1;
1322 
1323  bibe = nat64_db_bib_entry_by_index (db, ctx->proto, ste->bibe_index);
1324  if (!bibe)
1325  return -1;
1326 
1327  if (ctx->proto == IP_PROTOCOL_TCP)
1328  nat64_tcp_session_set_state (ste, tcp, 1);
1329 
1330  nat64_session_reset_timeout (ste, ctx->vm);
1331 
1332  sport = bibe->out_port;
1333  dport = ste->r_port;
1334 
1335  nat64_compose_ip6 (&ip6->src_address, &bibe->out_addr, bibe->fib_index);
1336 
1337  clib_memset (&daddr, 0, sizeof (daddr));
1338  daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1339 
1340  bibe = 0;
1341  /* *INDENT-OFF* */
1342  vec_foreach (db, nm->db)
1343  {
1344  bibe = nat64_db_bib_entry_find (db, &daddr, dport, ctx->proto, 0, 0);
1345 
1346  if (bibe)
1347  break;
1348  }
1349  /* *INDENT-ON* */
1350 
1351  if (!bibe)
1352  return -1;
1353 
1354  ip6->dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1355  ip6->dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1356 
1357  if (ctx->first_frag)
1358  {
1359  udp->dst_port = bibe->in_port;
1360  udp->src_port = sport;
1361  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[0]);
1362  csum = ip_csum_add_even (csum, ip6->src_address.as_u64[1]);
1363  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[0]);
1364  csum = ip_csum_add_even (csum, ip6->dst_address.as_u64[1]);
1365  csum = ip_csum_add_even (csum, udp->src_port);
1366  csum = ip_csum_add_even (csum, udp->dst_port);
1367  *checksum = ip_csum_fold (csum);
1368  }
1369 
1370  return 0;
1371 }
1372 
1374  vlib_node_runtime_t * node,
1375  vlib_frame_t * frame)
1376 {
1377  u32 n_left_from, *from, *to_next;
1378  nat64_in2out_next_t next_index;
1379  u32 pkts_processed = 0, cached_fragments = 0;
1380  u32 *fragments_to_drop = 0;
1381  u32 *fragments_to_loopback = 0;
1382  nat64_main_t *nm = &nat64_main;
1384 
1385  from = vlib_frame_vector_args (frame);
1386  n_left_from = frame->n_vectors;
1387  next_index = node->cached_next_index;
1388 
1389  while (n_left_from > 0)
1390  {
1391  u32 n_left_to_next;
1392 
1393  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1394 
1395  while (n_left_from > 0 && n_left_to_next > 0)
1396  {
1397  u32 bi0;
1398  vlib_buffer_t *b0;
1399  u32 next0;
1400  u8 cached0 = 0;
1401  ip6_header_t *ip60;
1402  u16 l4_offset0, frag_offset0;
1403  u8 l4_protocol0;
1404  nat_reass_ip6_t *reass0;
1405  ip6_frag_hdr_t *frag0;
1406  nat64_db_bib_entry_t *bibe0;
1407  nat64_db_st_entry_t *ste0;
1408  udp_header_t *udp0;
1409  snat_protocol_t proto0;
1410  u32 sw_if_index0, fib_index0;
1411  ip46_address_t saddr0, daddr0;
1413  nat64_db_t *db = &nm->db[thread_index];
1414 
1415  /* speculatively enqueue b0 to the current next frame */
1416  bi0 = from[0];
1417  to_next[0] = bi0;
1418  from += 1;
1419  to_next += 1;
1420  n_left_from -= 1;
1421  n_left_to_next -= 1;
1422 
1423  b0 = vlib_get_buffer (vm, bi0);
1425 
1426  sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1427  fib_index0 =
1429  sw_if_index0);
1430 
1431  ctx0.thread_index = thread_index;
1432 
1434  {
1435  next0 = NAT64_IN2OUT_NEXT_DROP;
1436  b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1437  goto trace0;
1438  }
1439 
1440  ip60 = (ip6_header_t *) vlib_buffer_get_current (b0);
1441 
1442  if (PREDICT_FALSE
1443  (ip6_parse
1444  (ip60, b0->current_length, &l4_protocol0, &l4_offset0,
1445  &frag_offset0)))
1446  {
1447  next0 = NAT64_IN2OUT_NEXT_DROP;
1448  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1449  goto trace0;
1450  }
1451 
1452  if (PREDICT_FALSE
1453  (!(l4_protocol0 == IP_PROTOCOL_TCP
1454  || l4_protocol0 == IP_PROTOCOL_UDP)))
1455  {
1456  next0 = NAT64_IN2OUT_NEXT_DROP;
1457  b0->error = node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT];
1458  goto trace0;
1459  }
1460 
1461  udp0 = (udp_header_t *) u8_ptr_add (ip60, l4_offset0);
1462  frag0 = (ip6_frag_hdr_t *) u8_ptr_add (ip60, frag_offset0);
1463  proto0 = ip_proto_to_snat_proto (l4_protocol0);
1464 
1465  reass0 = nat_ip6_reass_find_or_create (ip60->src_address,
1466  ip60->dst_address,
1467  frag0->identification,
1468  l4_protocol0,
1469  1, &fragments_to_drop);
1470 
1471  if (PREDICT_FALSE (!reass0))
1472  {
1473  next0 = NAT64_IN2OUT_NEXT_DROP;
1474  b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_REASS];
1475  goto trace0;
1476  }
1477 
1478  if (PREDICT_TRUE (ip6_frag_hdr_offset (frag0)))
1479  {
1480  ctx0.first_frag = 0;
1481  if (PREDICT_FALSE (reass0->sess_index == (u32) ~ 0))
1482  {
1484  (thread_index, reass0, bi0, &fragments_to_drop))
1485  {
1486  b0->error = node->errors[NAT64_IN2OUT_ERROR_MAX_FRAG];
1487  next0 = NAT64_IN2OUT_NEXT_DROP;
1488  goto trace0;
1489  }
1490  cached0 = 1;
1491  goto trace0;
1492  }
1493  }
1494  else
1495  {
1496  ctx0.first_frag = 1;
1497 
1498  saddr0.as_u64[0] = ip60->src_address.as_u64[0];
1499  saddr0.as_u64[1] = ip60->src_address.as_u64[1];
1500  daddr0.as_u64[0] = ip60->dst_address.as_u64[0];
1501  daddr0.as_u64[1] = ip60->dst_address.as_u64[1];
1502 
1503  ste0 =
1504  nat64_db_st_entry_find (db, &saddr0, &daddr0,
1505  udp0->src_port, udp0->dst_port,
1506  l4_protocol0, fib_index0, 1);
1507  if (!ste0)
1508  {
1509  bibe0 =
1510  nat64_db_bib_entry_find (db, &saddr0, udp0->src_port,
1511  l4_protocol0, fib_index0, 1);
1512  if (!bibe0)
1513  {
1514  u16 out_port0;
1515  ip4_address_t out_addr0;
1517  (fib_index0, proto0, &out_addr0, &out_port0,
1518  thread_index))
1519  {
1520  next0 = NAT64_IN2OUT_NEXT_DROP;
1521  b0->error =
1522  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1523  goto trace0;
1524  }
1525 
1526  bibe0 =
1527  nat64_db_bib_entry_create (thread_index, db,
1528  &ip60->src_address,
1529  &out_addr0, udp0->src_port,
1530  out_port0, fib_index0,
1531  l4_protocol0, 0);
1532  if (!bibe0)
1533  {
1534  next0 = NAT64_IN2OUT_NEXT_DROP;
1535  b0->error =
1536  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1537  goto trace0;
1538  }
1539  vlib_set_simple_counter (&nm->total_bibs, thread_index,
1540  0, db->bib.bib_entries_num);
1541  }
1542  nat64_extract_ip4 (&ip60->dst_address, &daddr0.ip4,
1543  fib_index0);
1544  ste0 =
1545  nat64_db_st_entry_create (thread_index, db, bibe0,
1546  &ip60->dst_address, &daddr0.ip4,
1547  udp0->dst_port);
1548  if (!ste0)
1549  {
1550  next0 = NAT64_IN2OUT_NEXT_DROP;
1551  b0->error =
1552  node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1553  goto trace0;
1554  }
1555 
1556  vlib_set_simple_counter (&nm->total_sessions, thread_index,
1557  0, db->st.st_entries_num);
1558  }
1559  reass0->sess_index = nat64_db_st_entry_get_index (db, ste0);
1560 
1561  nat_ip6_reass_get_frags (reass0, &fragments_to_loopback);
1562  }
1563 
1564  ctx0.sess_index = reass0->sess_index;
1565  ctx0.proto = l4_protocol0;
1566  ctx0.vm = vm;
1567  ctx0.l4_offset = l4_offset0;
1568 
1569  if (PREDICT_FALSE (is_hairpinning (&ip60->dst_address)))
1570  {
1572  if (nat64_in2out_frag_hairpinning (b0, ip60, &ctx0))
1573  {
1574  next0 = NAT64_IN2OUT_NEXT_DROP;
1575  b0->error = node->errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1576  }
1577  goto trace0;
1578  }
1579  else
1580  {
1582  {
1583  next0 = NAT64_IN2OUT_NEXT_DROP;
1584  b0->error = node->errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1585  goto trace0;
1586  }
1587  }
1588 
1589  trace0:
1590  if (PREDICT_FALSE
1591  ((node->flags & VLIB_NODE_FLAG_TRACE)
1592  && (b0->flags & VLIB_BUFFER_IS_TRACED)))
1593  {
1595  vlib_add_trace (vm, node, b0, sizeof (*t));
1596  t->cached = cached0;
1597  t->sw_if_index = sw_if_index0;
1598  t->next_index = next0;
1599  }
1600 
1601  if (cached0)
1602  {
1603  n_left_to_next++;
1604  to_next--;
1605  cached_fragments++;
1606  }
1607  else
1608  {
1609  pkts_processed += next0 != NAT64_IN2OUT_NEXT_DROP;
1610 
1611  /* verify speculative enqueue, maybe switch current next frame */
1612  vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1613  to_next, n_left_to_next,
1614  bi0, next0);
1615  }
1616 
1617  if (n_left_from == 0 && vec_len (fragments_to_loopback))
1618  {
1619  from = vlib_frame_vector_args (frame);
1620  u32 len = vec_len (fragments_to_loopback);
1621  if (len <= VLIB_FRAME_SIZE)
1622  {
1623  clib_memcpy_fast (from, fragments_to_loopback,
1624  sizeof (u32) * len);
1625  n_left_from = len;
1626  vec_reset_length (fragments_to_loopback);
1627  }
1628  else
1629  {
1630  clib_memcpy_fast (from, fragments_to_loopback +
1631  (len - VLIB_FRAME_SIZE),
1632  sizeof (u32) * VLIB_FRAME_SIZE);
1633  n_left_from = VLIB_FRAME_SIZE;
1634  _vec_len (fragments_to_loopback) = len - VLIB_FRAME_SIZE;
1635  }
1636  }
1637  }
1638 
1639  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1640  }
1641 
1643  NAT64_IN2OUT_ERROR_PROCESSED_FRAGMENTS,
1644  pkts_processed);
1646  NAT64_IN2OUT_ERROR_CACHED_FRAGMENTS,
1647  cached_fragments);
1648 
1649  nat_send_all_to_node (vm, fragments_to_drop, node,
1650  &node->errors[NAT64_IN2OUT_ERROR_DROP_FRAGMENT],
1652 
1653  vec_free (fragments_to_drop);
1654  vec_free (fragments_to_loopback);
1655  return frame->n_vectors;
1656 }
1657 
1658 /* *INDENT-OFF* */
1660  .name = "nat64-in2out-reass",
1661  .vector_size = sizeof (u32),
1662  .format_trace = format_nat64_in2out_reass_trace,
1664  .n_errors = ARRAY_LEN (nat64_in2out_error_strings),
1665  .error_strings = nat64_in2out_error_strings,
1666  .n_next_nodes = NAT64_IN2OUT_N_NEXT,
1667  /* edit / add dispositions here */
1668  .next_nodes = {
1669  [NAT64_IN2OUT_NEXT_DROP] = "error-drop",
1670  [NAT64_IN2OUT_NEXT_IP4_LOOKUP] = "ip4-lookup",
1671  [NAT64_IN2OUT_NEXT_IP6_LOOKUP] = "ip6-lookup",
1672  [NAT64_IN2OUT_NEXT_SLOWPATH] = "nat64-in2out-slowpath",
1673  [NAT64_IN2OUT_NEXT_REASS] = "nat64-in2out-reass",
1674  },
1675 };
1676 /* *INDENT-ON* */
1677 
1678 #define foreach_nat64_in2out_handoff_error \
1679 _(CONGESTION_DROP, "congestion drop") \
1680 _(SAME_WORKER, "same worker") \
1681 _(DO_HANDOFF, "do handoff")
1682 
1683 typedef enum
1684 {
1685 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym,
1687 #undef _
1690 
1692 #define _(sym,string) string,
1694 #undef _
1695 };
1696 
1697 typedef struct
1698 {
1701 
1702 static u8 *
1704 {
1705  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1706  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1708  va_arg (*args, nat64_in2out_handoff_trace_t *);
1709 
1710  s =
1711  format (s, "NAT64-IN2OUT-HANDOFF: next-worker %d", t->next_worker_index);
1712 
1713  return s;
1714 }
1715 
1717  vlib_node_runtime_t * node,
1718  vlib_frame_t * frame)
1719 {
1720  nat64_main_t *nm = &nat64_main;
1721  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
1722  u32 n_enq, n_left_from, *from;
1723  u16 thread_indices[VLIB_FRAME_SIZE], *ti;
1724  u32 fq_index;
1726  u32 do_handoff = 0, same_worker = 0;
1727 
1728  from = vlib_frame_vector_args (frame);
1729  n_left_from = frame->n_vectors;
1730  vlib_get_buffers (vm, from, bufs, n_left_from);
1731 
1732  b = bufs;
1733  ti = thread_indices;
1734 
1735  fq_index = nm->fq_in2out_index;
1736 
1737  while (n_left_from > 0)
1738  {
1739  ip6_header_t *ip0;
1740 
1741  ip0 = vlib_buffer_get_current (b[0]);
1742  ti[0] = nat64_get_worker_in2out (&ip0->src_address);
1743 
1744  if (ti[0] != thread_index)
1745  do_handoff++;
1746  else
1747  same_worker++;
1748 
1749  if (PREDICT_FALSE
1750  ((node->flags & VLIB_NODE_FLAG_TRACE)
1751  && (b[0]->flags & VLIB_BUFFER_IS_TRACED)))
1752  {
1754  vlib_add_trace (vm, node, b[0], sizeof (*t));
1755  t->next_worker_index = ti[0];
1756  }
1757 
1758  n_left_from -= 1;
1759  ti += 1;
1760  b += 1;
1761  }
1762 
1763  n_enq =
1764  vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
1765  frame->n_vectors, 1);
1766 
1767  if (n_enq < frame->n_vectors)
1768  vlib_node_increment_counter (vm, node->node_index,
1769  NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1770  frame->n_vectors - n_enq);
1771  vlib_node_increment_counter (vm, node->node_index,
1772  NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1773  same_worker);
1774  vlib_node_increment_counter (vm, node->node_index,
1775  NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1776  do_handoff);
1777 
1778  return frame->n_vectors;
1779 }
1780 
1781 /* *INDENT-OFF* */
1783  .name = "nat64-in2out-handoff",
1784  .vector_size = sizeof (u32),
1785  .format_trace = format_nat64_in2out_handoff_trace,
1788  .error_strings = nat64_in2out_handoff_error_strings,
1789 
1790  .n_next_nodes = 1,
1791 
1792  .next_nodes = {
1793  [0] = "error-drop",
1794  },
1795 };
1796 /* *INDENT-ON* */
1797 
1798 /*
1799  * fd.io coding-style-patch-verification: ON
1800  *
1801  * Local Variables:
1802  * eval: (c-set-style "gnu")
1803  * End:
1804  */
u32 in2out_reass_node_index
Definition: nat64.h:118
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
Definition: lookup.h:213
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
nat64_db_t * db
BIB and session DB per thread.
Definition: nat64.h:83
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
Definition: nat64_in2out.c:448
#define CLIB_UNUSED(x)
Definition: clib.h:83
static int nat64_in2out_frag_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static int nat64_in2out_frag_hairpinning(vlib_buffer_t *b, ip6_header_t *ip6, nat64_in2out_frag_set_ctx_t *ctx)
ip4_address_t src_address
Definition: ip4_packet.h:170
snat_address_t * addr_pool
Address pool vector.
Definition: nat64.h:74
static int icmp6_to_icmp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, ip6_to_ip4_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
Definition: ip6_to_ip4.h:249
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
Definition: nat64.c:1135
#define PREDICT_TRUE(x)
Definition: clib.h:113
u64 as_u64[2]
Definition: ip6_packet.h:51
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
nat64_in2out_next_t
Definition: nat64_in2out.c:105
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
Definition: fib_table.c:972
void nat_ip6_reass_get_frags(nat_reass_ip6_t *reass, u32 **bi)
Get cached fragments.
Definition: nat_reass.c:631
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
Definition: nat64_db.c:209
int i
uword ip_csum_t
Definition: ip_packet.h:219
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
Definition: ip_packet.h:222
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
int nat_ip6_reass_add_fragment(u32 thread_index, nat_reass_ip6_t *reass, u32 bi, u32 **bi_to_drop)
Cache fragment.
Definition: nat_reass.c:599
#define VLIB_NODE_FN(node)
Definition: node.h:202
u32 in2out_node_index
Definition: nat64.h:116
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
Definition: nat64_in2out.c:34
nat64_db_st_entry_t * nat64_db_st_entry_by_index(nat64_db_t *db, u8 proto, u32 ste_index)
Get ST entry by index and protocol.
Definition: nat64_db.c:628
nat64_db_bib_t bib
Definition: nat64_db.h:138
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:470
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
struct _tcp_header tcp_header_t
vhost_vring_addr_t addr
Definition: vhost_user.h:147
ip6_address_t src_address
Definition: ip6_packet.h:383
unsigned char u8
Definition: types.h:56
u32 st_entries_num
Definition: nat64_db.h:123
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:589
#define u8_ptr_add(ptr, index)
Definition: ip_types.h:34
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
Definition: nat64.c:100
#define static_always_inline
Definition: clib.h:100
vl_api_interface_index_t sw_if_index
Definition: gre.api:50
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
Definition: nat64_db.c:376
ip4_address_t dst_address
Definition: ip4_packet.h:170
vlib_node_registration_t nat64_in2out_reass_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_reass_node)
static_always_inline int ip6_parse(const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
Definition: ip6_to_ip4.h:59
snat_main_t * sm
Definition: nat64.h:124
unsigned int u32
Definition: types.h:88
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
Definition: nat64.c:935
#define VLIB_FRAME_SIZE
Definition: node.h:378
static int ip6_to_ip4_tcp_udp(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx, u8 udp_checksum)
Translate IPv6 UDP/TCP packet to IPv4.
Definition: ip6_to_ip4.h:481
u32 in2out_slowpath_node_index
Definition: nat64.h:117
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:136
static void mss_clamping(snat_main_t *sm, tcp_header_t *tcp, ip_csum_t *sum)
Definition: nat_inlines.h:568
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
Definition: nat64.c:896
int nat64_alloc_out_addr_and_port(u32 fib_index, snat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
Definition: nat64.c:552
long ctx[MAX_CONNS]
Definition: main.c:144
unsigned short u16
Definition: types.h:57
static u8 * format_nat64_in2out_reass_trace(u8 *s, va_list *args)
Definition: nat64_in2out.c:58
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
Definition: ip46_cli.c:60
nat64_db_st_t st
Definition: nat64_db.h:139
#define PREDICT_FALSE(x)
Definition: clib.h:112
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:355
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
Definition: counter.h:94
static char * nat64_in2out_error_strings[]
Definition: nat64_in2out.c:99
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:218
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:338
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
Definition: nat64.c:1065
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
Definition: node_funcs.h:1150
vlib_simple_counter_main_t total_sessions
Definition: nat64.h:111
u8 len
Definition: ip_types.api:90
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
u32 as_u32[4]
Definition: ip6_packet.h:50
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:839
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:262
u16 n_vectors
Definition: node.h:397
u8 nat_reass_is_drop_frag(u8 is_ip6)
Get status of virtual fragmentation reassembly.
Definition: nat_reass.c:168
static int nat64_in2out_tcp_udp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:168
#define vec_free(V)
Free vector&#39;s memory (no header).
Definition: vec.h:341
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
#define foreach_nat64_in2out_handoff_error
#define ARRAY_LEN(x)
Definition: clib.h:63
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:456
nat64_main_t nat64_main
Definition: nat64.c:28
static void * ip6_next_header(ip6_header_t *i)
Definition: ip6_packet.h:410
nat64_db_bib_entry_t * nat64_db_bib_entry_create(u32 thread_index, nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
Definition: nat64_db.c:53
#define ip6_frag_hdr_offset(hdr)
Definition: ip6_packet.h:642
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
Definition: node.h:515
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
Definition: nat64_db.c:557
nat64_in2out_handoff_error_t
nat64_in2out_error_t
Definition: nat64_in2out.c:91
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
Definition: nat64_db.c:325
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
Definition: nat64_in2out.c:713
ip6_main_t ip6_main
Definition: ip6_forward.c:2805
ip_lookup_main_t lookup_main
Definition: ip6.h:179
u32 fq_in2out_index
Worker handoff.
Definition: nat64.h:86
u32 nat64_db_st_entry_get_index(nat64_db_t *db, nat64_db_st_entry_t *ste)
Definition: nat64_db.c:605
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
Definition: nat64_in2out.c:123
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:247
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
Definition: nat64_db.c:302
u32 bib_entries_num
Definition: nat64_db.h:73
IPv6 to IPv4 translation.
ip4_address_t addr
Definition: nat.h:358
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
NAT64 global declarations.
u16 payload_length
Definition: ip6_packet.h:374
static u32 ip_proto_to_snat_proto(u8 ip_proto)
Definition: nat_inlines.h:147
static char * nat64_in2out_handoff_error_strings[]
#define ip46_address_is_equal(a1, a2)
Definition: ip6_packet.h:94
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
Definition: buffer.h:102
nat_reass_ip6_t * nat_ip6_reass_find_or_create(ip6_address_t src, ip6_address_t dst, u32 frag_id, u8 proto, u8 reset_timeout, u32 **bi_to_drop)
Find or create reassembly.
Definition: nat_reass.c:480
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
snat_protocol_t
Definition: nat.h:191
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
Definition: buffer_node.h:487
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
Definition: nat64_in2out.c:961
#define vnet_buffer(b)
Definition: buffer.h:365
static int nat64_in2out_unk_proto_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
Definition: nat64_in2out.c:483
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
Definition: nat64_in2out.c:153
#define vec_foreach(var, vec)
Vector iterator.
u16 flags
Copy of main node flags.
Definition: node.h:509
static void nat_send_all_to_node(vlib_main_t *vm, u32 *bi_vector, vlib_node_runtime_t *node, vlib_error_t *error, u32 next)
Definition: nat_inlines.h:225
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
Definition: lookup.h:199
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
Definition: buffer_funcs.h:244
NAT plugin virtual fragmentation reassembly.
#define VLIB_NODE_FLAG_TRACE
Definition: node.h:302
#define foreach_nat64_in2out_error
Definition: nat64_in2out.c:74
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
Definition: ip_packet.h:293
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static int ip6_to_ip4(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 packet to IPv4 (IP header only).
Definition: ip6_to_ip4.h:578
static u16 ip_csum_fold(ip_csum_t c)
Definition: ip_packet.h:275
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
Definition: defs.h:46
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
Definition: ip_packet.h:230
vl_api_fib_path_nh_proto_t proto
Definition: fib_types.api:125
ip6_address_t dst_address
Definition: ip6_packet.h:383
vlib_simple_counter_main_t total_bibs
Definition: nat64.h:110
static int ip6_to_ip4_fragmented(vlib_buffer_t *p, ip6_to_ip4_set_fn_t fn, void *ctx)
Translate IPv6 fragmented packet to IPv4.
Definition: ip6_to_ip4.h:425