40 tag = t->
is_slow_path ?
"NAT64-in2out-slowpath" :
"NAT64-in2out";
49 #define foreach_nat64_in2out_error \ 50 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \ 51 _(IN2OUT_PACKETS, "good in2out packets processed") \ 52 _(NO_TRANSLATION, "no translation") \ 53 _(UNKNOWN, "unknown") \ 54 _(DROP_FRAGMENT, "drop fragment") \ 55 _(TCP_PACKETS, "TCP packets") \ 56 _(UDP_PACKETS, "UDP packets") \ 57 _(ICMP_PACKETS, "ICMP packets") \ 58 _(OTHER_PACKETS, "other protocol packets") \ 59 _(FRAGMENTS, "fragments") \ 60 _(CACHED_FRAGMENTS, "cached fragments") \ 61 _(PROCESSED_FRAGMENTS, "processed fragments") 66 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym, 73 #define _(sym,string) string, 150 nat64_db_bib_entry_t *bibe;
151 nat64_db_st_entry_t *ste;
152 ip46_address_t old_saddr, old_daddr;
165 u32 ip_version_traffic_class_and_flow_label =
178 ip6_frag_hdr_t *hdr =
179 (ip6_frag_hdr_t *)
u8_ptr_add (ip6, frag_hdr_offset);
195 u16_net_add (payload_length,
sizeof (*ip4) +
sizeof (*ip6) - l4_offset);
198 clib_host_to_net_u16 (frag_offset |
200 ip4->
ttl = hop_limit;
201 ip4->
protocol = (proto == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : proto;
233 &old_saddr.ip6, &out_addr, sport,
234 out_port, fib_index, proto, 0);
245 &old_daddr.ip6, &new_daddr, dport);
264 if (proto == IP_PROTOCOL_UDP)
296 nat64_db_bib_entry_t *bibe;
297 nat64_db_st_entry_t *ste;
298 ip46_address_t saddr, daddr;
312 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
314 u16 in_id = ((
u16 *) (icmp))[2];
317 IP_PROTOCOL_ICMP, fib_index, 1);
331 IP_PROTOCOL_ICMP, fib_index, 1);
338 (fib_index, SNAT_PROTOCOL_ICMP, &out_addr, &out_id,
345 in_id, out_id, fib_index,
346 IP_PROTOCOL_ICMP, 0);
368 ((
u16 *) (icmp))[2] = bibe->out_port;
390 nat64_db_st_entry_t *ste;
391 nat64_db_bib_entry_t *bibe;
392 ip46_address_t saddr, daddr;
406 if (proto == IP_PROTOCOL_ICMP6)
409 u16 in_id = ((
u16 *) (icmp))[2];
410 proto = IP_PROTOCOL_ICMP;
413 (icmp->type == ICMP4_echo_request
414 || icmp->type == ICMP4_echo_reply))
428 ((
u16 *) (icmp))[2] = bibe->out_port;
455 if (proto == IP_PROTOCOL_TCP)
456 checksum = &tcp->checksum;
482 nat64_db_bib_entry_t *bibe;
483 ip46_address_t saddr, daddr;
496 saddr.ip4.as_u32 = bibe->out_addr.as_u32;
514 u16 l4_offset,
u16 frag_hdr_offset,
532 ip6_frag_hdr_t *hdr =
533 (ip6_frag_hdr_t *)
u8_ptr_add (ip6, frag_hdr_offset);
546 nat64_db_bib_entry_t *bibe;
547 nat64_db_st_entry_t *ste;
548 ip46_address_t saddr, daddr,
addr;
585 .out_addr.as_u32 = 0,
586 .fib_index = fib_index,
587 .proto = l4_protocol,
608 (db, &addr, 0, l4_protocol, 0, 0))
619 0, 0, fib_index, l4_protocol, 0);
647 sizeof (*ip4) +
sizeof (*ip6) - l4_offset);
650 clib_host_to_net_u16 (frag_offset |
665 nat64_db_bib_entry_t *bibe;
666 nat64_db_st_entry_t *ste;
667 ip46_address_t saddr, daddr;
689 if (proto == IP_PROTOCOL_UDP)
692 checksum = &tcp->checksum;
719 &out_port, thread_index))
724 &out_addr, sport, out_port, fib_index,
744 if (proto == IP_PROTOCOL_TCP)
757 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
798 nat64_db_bib_entry_t *bibe;
799 nat64_db_st_entry_t *ste;
802 ip46_address_t saddr, daddr;
807 u16 *checksum, sport, dport;
811 if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
818 if (proto == IP_PROTOCOL_ICMP6)
836 if (proto == IP_PROTOCOL_UDP)
839 checksum = &tcp->checksum;
858 dport = udp->
dst_port = bibe->out_port;
863 saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
864 daddr.ip4.as_u32 = bibe->out_addr.as_u32;
924 nat64_db_bib_entry_t *bibe;
925 nat64_db_st_entry_t *ste;
926 ip46_address_t saddr, daddr,
addr;
962 .out_addr.as_u32 = 0,
963 .fib_index = fib_index,
994 &ctx.
out_addr, 0, 0, fib_index, proto,
1019 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1045 u32 n_left_from, *from, *to_next;
1047 u32 pkts_processed = 0;
1048 u32 stats_node_index;
1052 u32 tcp_packets = 0, udp_packets = 0, icmp_packets = 0, other_packets =
1062 while (n_left_from > 0)
1068 while (n_left_from > 0 && n_left_to_next > 0)
1074 u16 l4_offset0, frag_hdr_offset0;
1086 n_left_to_next -= 1;
1100 &frag_hdr_offset0)))
1103 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1126 (vm, b0, ip60, thread_index))
1130 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1136 (vm, b0, l4_protocol0, l4_offset0, frag_hdr_offset0,
1141 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1156 if (proto0 == SNAT_PROTOCOL_ICMP)
1163 (vm, b0, ip60, thread_index))
1167 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1177 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1181 else if (proto0 == SNAT_PROTOCOL_TCP || proto0 == SNAT_PROTOCOL_UDP)
1183 if (proto0 == SNAT_PROTOCOL_TCP)
1192 (vm, b0, ip60, l4_offset0, thread_index))
1196 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1202 (vm, b0, l4_offset0, frag_hdr_offset0, &ctx0))
1205 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1212 && (b0->
flags & VLIB_BUFFER_IS_TRACED)))
1225 n_left_to_next, bi0, next0);
1230 NAT64_IN2OUT_ERROR_IN2OUT_PACKETS,
1233 NAT64_IN2OUT_ERROR_TCP_PACKETS, tcp_packets);
1235 NAT64_IN2OUT_ERROR_UDP_PACKETS, udp_packets);
1237 NAT64_IN2OUT_ERROR_ICMP_PACKETS, icmp_packets);
1239 NAT64_IN2OUT_ERROR_OTHER_PACKETS,
1242 NAT64_IN2OUT_ERROR_FRAGMENTS, fragments);
1256 .name =
"nat64-in2out",
1257 .vector_size =
sizeof (
u32),
1282 .name =
"nat64-in2out-slowpath",
1283 .vector_size =
sizeof (
u32),
1310 #define foreach_nat64_in2out_handoff_error \ 1311 _(CONGESTION_DROP, "congestion drop") \ 1312 _(SAME_WORKER, "same worker") \ 1313 _(DO_HANDOFF, "do handoff") 1317 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym, 1324 #define _(sym,string) string, 1354 u32 n_enq, n_left_from, *from;
1358 u32 do_handoff = 0, same_worker = 0;
1361 n_left_from =
frame->n_vectors;
1365 ti = thread_indices;
1369 while (n_left_from > 0)
1376 if (ti[0] != thread_index)
1383 && (b[0]->
flags & VLIB_BUFFER_IS_TRACED)))
1397 frame->n_vectors, 1);
1399 if (n_enq < frame->n_vectors)
1401 NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1402 frame->n_vectors - n_enq);
1404 NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1407 NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1410 return frame->n_vectors;
1415 .name =
"nat64-in2out-handoff",
1416 .vector_size =
sizeof (
u32),
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
nat64_db_t * db
BIB and session DB per thread.
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
snat_address_t * addr_pool
Address pool vector.
static int nat64_in2out_tcp_udp(vlib_main_t *vm, vlib_buffer_t *p, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *ctx)
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 l4_offset, u32 thread_index)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
u16 current_length
Nbytes between current data and the end of this buffer.
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
#define VLIB_NODE_FN(node)
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
vlib_error_t * errors
Vector of errors for this node.
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
struct _tcp_header tcp_header_t
#define u8_ptr_add(ptr, index)
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
#define static_always_inline
vl_api_interface_index_t sw_if_index
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
static int nat64_in2out_unk_proto(vlib_main_t *vm, vlib_buffer_t *p, u8 l4_protocol, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *s_ctx)
static_always_inline u8 ip6_translate_tos(u32 ip_version_traffic_class_and_flow_label)
Translate TOS value from IPv6 to IPv4.
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
u32 in2out_slowpath_node_index
vl_api_fib_path_type_t type
#define ip6_frag_hdr_more(hdr)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void mss_clamping(snat_main_t *sm, tcp_header_t *tcp, ip_csum_t *sum)
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
int nat64_alloc_out_addr_and_port(u32 fib_index, snat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
static char * nat64_in2out_error_strings[]
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
vlib_simple_counter_main_t total_sessions
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
#define VLIB_REGISTER_NODE(x,...)
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
#define foreach_nat64_in2out_handoff_error
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
static void * ip6_next_header(ip6_header_t *i)
nat64_db_bib_entry_t * nat64_db_bib_entry_create(u32 thread_index, nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
#define ip6_frag_hdr_offset(hdr)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
nat64_in2out_handoff_error_t
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
ip_lookup_main_t lookup_main
u32 fq_in2out_index
Worker handoff.
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
IPv6 to IPv4 translation.
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
NAT64 global declarations.
static u32 ip_proto_to_snat_proto(u8 ip_proto)
static char * nat64_in2out_handoff_error_strings[]
static int icmp6_to_icmp(vlib_main_t *vm, vlib_buffer_t *p, ip6_to_ip4_icmp_set_fn_t fn, void *ctx, ip6_to_ip4_icmp_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
#define vec_foreach(var, vec)
Vector iterator.
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
u16 flags
Copy of main node flags.
#define u16_net_add(u, val)
static_always_inline int ip6_parse(vlib_main_t *vm, vlib_buffer_t *b, const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
#define foreach_nat64_in2out_error
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
static u16 ip_csum_fold(ip_csum_t c)
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
vlib_simple_counter_main_t total_bibs