36 tag = t->
is_slow_path ?
"NAT64-in2out-slowpath" :
"NAT64-in2out";
45 #define foreach_nat64_in2out_error \ 46 _(UNSUPPORTED_PROTOCOL, "unsupported protocol") \ 47 _(NO_TRANSLATION, "no translation") \ 53 #define _(sym,str) NAT64_IN2OUT_ERROR_##sym, 60 #define _(sym,string) string, 137 nat64_db_bib_entry_t *bibe;
138 nat64_db_st_entry_t *ste;
139 ip46_address_t old_saddr, old_daddr;
152 u32 ip_version_traffic_class_and_flow_label =
165 ip6_frag_hdr_t *hdr =
166 (ip6_frag_hdr_t *)
u8_ptr_add (ip6, frag_hdr_offset);
182 u16_net_add (payload_length,
sizeof (*ip4) +
sizeof (*ip6) - l4_offset);
185 clib_host_to_net_u16 (frag_offset |
187 ip4->
ttl = hop_limit;
188 ip4->
protocol = (proto == IP_PROTOCOL_ICMP6) ? IP_PROTOCOL_ICMP : proto;
220 &old_saddr.ip6, &out_addr, sport,
221 out_port, fib_index, proto, 0);
232 &old_daddr.ip6, &new_daddr, dport);
251 if (proto == IP_PROTOCOL_UDP)
283 nat64_db_bib_entry_t *bibe;
284 nat64_db_st_entry_t *ste;
285 ip46_address_t saddr, daddr;
299 if (icmp->type == ICMP4_echo_request || icmp->type == ICMP4_echo_reply)
301 u16 in_id = ((
u16 *) (icmp))[2];
304 IP_PROTOCOL_ICMP, fib_index, 1);
318 IP_PROTOCOL_ICMP, fib_index, 1);
325 (fib_index, NAT_PROTOCOL_ICMP, &out_addr, &out_id,
332 in_id, out_id, fib_index,
333 IP_PROTOCOL_ICMP, 0);
355 ((
u16 *) (icmp))[2] = bibe->out_port;
377 nat64_db_st_entry_t *ste;
378 nat64_db_bib_entry_t *bibe;
379 ip46_address_t saddr, daddr;
393 if (proto == IP_PROTOCOL_ICMP6)
396 u16 in_id = ((
u16 *) (icmp))[2];
397 proto = IP_PROTOCOL_ICMP;
400 (icmp->type == ICMP4_echo_request
401 || icmp->type == ICMP4_echo_reply))
415 ((
u16 *) (icmp))[2] = bibe->out_port;
442 if (proto == IP_PROTOCOL_TCP)
443 checksum = &tcp->checksum;
469 nat64_db_bib_entry_t *bibe;
470 ip46_address_t saddr, daddr;
483 saddr.ip4.as_u32 = bibe->out_addr.as_u32;
501 u16 l4_offset,
u16 frag_hdr_offset,
519 ip6_frag_hdr_t *hdr =
520 (ip6_frag_hdr_t *)
u8_ptr_add (ip6, frag_hdr_offset);
533 nat64_db_bib_entry_t *bibe;
534 nat64_db_st_entry_t *ste;
535 ip46_address_t saddr, daddr,
addr;
572 .out_addr.as_u32 = 0,
573 .fib_index = fib_index,
574 .proto = l4_protocol,
595 (db, &addr, 0, l4_protocol, 0, 0))
606 0, 0, fib_index, l4_protocol, 0);
634 sizeof (*ip4) +
sizeof (*ip6) - l4_offset);
637 clib_host_to_net_u16 (frag_offset |
652 nat64_db_bib_entry_t *bibe;
653 nat64_db_st_entry_t *ste;
654 ip46_address_t saddr, daddr;
661 u16 *checksum = NULL;
676 if (proto == IP_PROTOCOL_UDP)
679 checksum = &tcp->checksum;
706 &out_port, thread_index))
711 &out_addr, sport, out_port, fib_index,
731 if (proto == IP_PROTOCOL_TCP)
744 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
760 ip6->
dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
761 ip6->
dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
785 nat64_db_bib_entry_t *bibe;
786 nat64_db_st_entry_t *ste;
789 ip46_address_t saddr, daddr;
794 u16 *checksum, sport, dport;
798 if (icmp->type == ICMP6_echo_request || icmp->type == ICMP6_echo_reply)
805 if (proto == IP_PROTOCOL_ICMP6)
812 saddr.as_u64[0] = inner_ip6->
src_address.as_u64[0];
813 saddr.as_u64[1] = inner_ip6->
src_address.as_u64[1];
814 daddr.as_u64[0] = inner_ip6->
dst_address.as_u64[0];
815 daddr.as_u64[1] = inner_ip6->
dst_address.as_u64[1];
823 if (proto == IP_PROTOCOL_UDP)
826 checksum = &tcp->checksum;
845 dport = udp->
dst_port = bibe->out_port;
850 saddr.ip4.as_u32 = ste->out_r_addr.as_u32;
851 daddr.ip4.as_u32 = bibe->out_addr.as_u32;
872 inner_ip6->
src_address.as_u64[0] = bibe->in_addr.as_u64[0];
873 inner_ip6->
src_address.as_u64[1] = bibe->in_addr.as_u64[1];
911 nat64_db_bib_entry_t *bibe;
912 nat64_db_st_entry_t *ste;
913 ip46_address_t saddr, daddr,
addr;
949 .out_addr.as_u32 = 0,
950 .fib_index = fib_index,
981 &ctx.
out_addr, 0, 0, fib_index, proto,
1006 daddr.ip4.as_u32 = ste->out_r_addr.as_u32;
1022 ip6->
dst_address.as_u64[0] = bibe->in_addr.as_u64[0];
1023 ip6->
dst_address.as_u64[1] = bibe->in_addr.as_u64[1];
1032 u32 n_left_from, *from, *to_next;
1041 while (n_left_from > 0)
1047 while (n_left_from > 0 && n_left_to_next > 0)
1053 u16 l4_offset0, frag_hdr_offset0;
1065 n_left_to_next -= 1;
1081 &frag_hdr_offset0)))
1084 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_UNKNOWN];
1101 thread_index, sw_if_index0,
1107 (vm, b0, ip60, thread_index))
1111 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1117 (vm, b0, l4_protocol0, l4_offset0, frag_hdr_offset0,
1122 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1137 if (proto0 == NAT_PROTOCOL_ICMP)
1140 thread_index, sw_if_index0, 1);
1145 (vm, b0, ip60, thread_index))
1149 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1159 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1163 else if (proto0 == NAT_PROTOCOL_TCP || proto0 == NAT_PROTOCOL_UDP)
1165 if (proto0 == NAT_PROTOCOL_TCP)
1167 thread_index, sw_if_index0, 1);
1170 thread_index, sw_if_index0, 1);
1176 (vm, b0, ip60, l4_offset0, thread_index))
1180 node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1186 (vm, b0, l4_offset0, frag_hdr_offset0, &ctx0))
1189 b0->
error = node->
errors[NAT64_IN2OUT_ERROR_NO_TRANSLATION];
1196 && (b0->
flags & VLIB_BUFFER_IS_TRACED)))
1208 thread_index, sw_if_index0, 1);
1214 n_left_to_next, bi0, next0);
1231 .name =
"nat64-in2out",
1232 .vector_size =
sizeof (
u32),
1257 .name =
"nat64-in2out-slowpath",
1258 .vector_size =
sizeof (
u32),
1285 #define foreach_nat64_in2out_handoff_error \ 1286 _(CONGESTION_DROP, "congestion drop") \ 1287 _(SAME_WORKER, "same worker") \ 1288 _(DO_HANDOFF, "do handoff") 1292 #define _(sym,str) NAT64_IN2OUT_HANDOFF_ERROR_##sym, 1299 #define _(sym,string) string, 1329 u32 n_enq, n_left_from, *from;
1333 u32 do_handoff = 0, same_worker = 0;
1336 n_left_from =
frame->n_vectors;
1340 ti = thread_indices;
1344 while (n_left_from > 0)
1351 if (ti[0] != thread_index)
1358 && (b[0]->
flags & VLIB_BUFFER_IS_TRACED)))
1372 frame->n_vectors, 1);
1374 if (n_enq < frame->n_vectors)
1376 NAT64_IN2OUT_HANDOFF_ERROR_CONGESTION_DROP,
1377 frame->n_vectors - n_enq);
1379 NAT64_IN2OUT_HANDOFF_ERROR_SAME_WORKER,
1382 NAT64_IN2OUT_HANDOFF_ERROR_DO_HANDOFF,
1385 return frame->n_vectors;
1390 .name =
"nat64-in2out-handoff",
1391 .vector_size =
sizeof (
u32),
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
nat64_db_t * db
BIB and session DB per thread.
static int unk_proto_st_walk(nat64_db_st_entry_t *ste, void *arg)
struct nat64_main_t::@102::@103 in2out
int nat64_alloc_out_addr_and_port(u32 fib_index, nat_protocol_t proto, ip4_address_t *addr, u16 *port, u32 thread_index)
Alloce IPv4 address and port pair from NAT64 pool.
nat64_address_t * addr_pool
Address pool vector.
static int nat64_in2out_tcp_udp(vlib_main_t *vm, vlib_buffer_t *p, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *ctx)
void nat64_extract_ip4(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Extract IPv4 address from the IPv4-embedded IPv6 addresses.
vlib_node_registration_t nat64_in2out_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_node)
static void * ip_interface_address_get_address(ip_lookup_main_t *lm, ip_interface_address_t *a)
static int nat64_in2out_tcp_udp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 l4_offset, u32 thread_index)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
u32 fib_table_get_index_for_sw_if_index(fib_protocol_t proto, u32 sw_if_index)
Get the index of the FIB bound to the interface.
u16 current_length
Nbytes between current data and the end of this buffer.
nat64_db_bib_entry_t * nat64_db_bib_entry_find(nat64_db_t *db, ip46_address_t *addr, u16 port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 BIB entry.
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
#define VLIB_NODE_FN(node)
static u8 * format_nat64_in2out_trace(u8 *s, va_list *args)
vlib_error_t * errors
Vector of errors for this node.
vlib_node_registration_t nat64_in2out_handoff_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_handoff_node)
struct _tcp_header tcp_header_t
#define u8_ptr_add(ptr, index)
u32 nat64_get_worker_in2out(ip6_address_t *addr)
Get worker thread index for NAT64 in2out.
#define static_always_inline
static nat_protocol_t ip_proto_to_nat_proto(u8 ip_proto)
Common NAT inline functions.
nat64_db_st_entry_t * nat64_db_st_entry_create(u32 thread_index, nat64_db_t *db, nat64_db_bib_entry_t *bibe, ip6_address_t *in_r_addr, ip4_address_t *out_r_addr, u16 r_port)
Create new NAT64 session table entry.
static int nat64_in2out_unk_proto(vlib_main_t *vm, vlib_buffer_t *p, u8 l4_protocol, u16 l4_offset, u16 frag_hdr_offset, nat64_in2out_set_ctx_t *s_ctx)
description fragment has unexpected format
struct nat64_main_t::@102 counters
static_always_inline u8 ip6_translate_tos(u32 ip_version_traffic_class_and_flow_label)
Translate TOS value from IPv6 to IPv4.
struct unk_proto_st_walk_ctx_t_ unk_proto_st_walk_ctx_t
void nat64_tcp_session_set_state(nat64_db_st_entry_t *ste, tcp_header_t *tcp, u8 is_ip6)
Set NAT64 TCP session state.
vl_api_fib_path_type_t type
#define ip6_frag_hdr_more(hdr)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
void nat64_session_reset_timeout(nat64_db_st_entry_t *ste, vlib_main_t *vm)
Reset NAT64 session timeout.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static void mss_clamping(u16 mss_clamping, tcp_header_t *tcp, ip_csum_t *sum)
int ip6_address_compare(ip6_address_t *a1, ip6_address_t *a2)
static int nat64_in2out_inner_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
static void vlib_set_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 value)
Set a simple counter.
static char * nat64_in2out_error_strings[]
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
void nat64_compose_ip6(ip6_address_t *ip6, ip4_address_t *ip4, u32 fib_index)
Compose IPv4-embedded IPv6 addresses.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
vlib_simple_counter_main_t total_sessions
static u8 * format_nat64_in2out_handoff_trace(u8 *s, va_list *args)
vl_api_mac_address_t dst_addr
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
#define VLIB_REGISTER_NODE(x,...)
static int nat64_in2out_unk_proto_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
static int nat64_in2out_icmp_set_cb(ip6_header_t *ip6, ip4_header_t *ip4, void *arg)
sll srl srl sll sra u16x4 i
vlib_node_registration_t nat64_in2out_slowpath_node
(constructor) VLIB_REGISTER_NODE (nat64_in2out_slowpath_node)
#define foreach_nat64_in2out_handoff_error
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
static void * ip6_next_header(ip6_header_t *i)
nat64_db_bib_entry_t * nat64_db_bib_entry_create(u32 thread_index, nat64_db_t *db, ip6_address_t *in_addr, ip4_address_t *out_addr, u16 in_port, u16 out_port, u32 fib_index, u8 proto, u8 is_static)
Create new NAT64 BIB entry.
vlib_main_t vlib_node_runtime_t * node
#define ip6_frag_hdr_offset(hdr)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
nat64_db_st_entry_t * nat64_db_st_entry_find(nat64_db_t *db, ip46_address_t *l_addr, ip46_address_t *r_addr, u16 l_port, u16 r_port, u8 proto, u32 fib_index, u8 is_ip6)
Find NAT64 session table entry.
nat64_in2out_handoff_error_t
void nat64_db_st_walk(nat64_db_t *db, u8 proto, nat64_db_st_walk_fn_t fn, void *ctx)
Walk NAT64 session table.
static int nat64_in2out_icmp_hairpinning(vlib_main_t *vm, vlib_buffer_t *b, ip6_header_t *ip6, u32 thread_index)
ip_lookup_main_t lookup_main
u32 fq_in2out_index
Worker handoff.
static u8 nat64_not_translate(u32 sw_if_index, ip6_address_t ip6_addr)
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
nat64_db_bib_entry_t * nat64_db_bib_entry_by_index(nat64_db_t *db, u8 proto, u32 bibe_index)
Get BIB entry by index and protocol.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
IPv6 to IPv4 translation.
static uword ip6_address_is_equal(const ip6_address_t *a, const ip6_address_t *b)
static char * nat64_in2out_handoff_error_strings[]
static int icmp6_to_icmp(vlib_main_t *vm, vlib_buffer_t *p, ip6_to_ip4_icmp_set_fn_t fn, void *ctx, ip6_to_ip4_icmp_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP6 packet to ICMP4.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define foreach_ip_interface_address(lm, a, sw_if_index, loop, body)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static_always_inline u32 vlib_buffer_enqueue_to_thread(vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices, u16 *thread_indices, u32 n_packets, int drop_on_congestion)
static uword nat64_in2out_node_fn_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u8 is_slow_path)
static_always_inline int is_hairpinning(ip6_address_t *dst_addr)
Check whether is a hairpinning.
#define vec_foreach(var, vec)
Vector iterator.
#define IP4_VERSION_AND_HEADER_LENGTH_NO_OPTIONS
u16 flags
Copy of main node flags.
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define u16_net_add(u, val)
static_always_inline int ip6_parse(vlib_main_t *vm, vlib_buffer_t *b, const ip6_header_t *ip6, u32 buff_len, u8 *l4_protocol, u16 *l4_offset, u16 *frag_hdr_offset)
Parse some useful information from IPv6 header.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define VLIB_NODE_FLAG_TRACE
#define foreach_nat64_in2out_error
struct nat64_in2out_set_ctx_t_ nat64_in2out_set_ctx_t
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
static u16 ip_csum_fold(ip_csum_t c)
struct nat64_in2out_frag_set_ctx_t_ nat64_in2out_frag_set_ctx_t
vl_api_interface_index_t sw_if_index
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)
vlib_simple_counter_main_t total_bibs