66 }) ip4_mapt_pseudo_header_t;
115 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
124 while (n_left_from > 0)
128 while (n_left_from > 0 && n_left_to_next > 0)
132 ip4_mapt_icmp_next_t next0;
140 pi0 = to_next[0] = from[0];
145 error0 = MAP_ERROR_NONE;
159 if (ctx0.recv_port == 0)
164 error0 = MAP_ERROR_ICMP;
173 error0 = MAP_ERROR_ICMP;
194 map_t.map_domain_index, 1,
203 to_next, n_left_to_next, pi0,
216 ip4_mapt_pseudo_header_t * pheader)
220 ip6_frag_hdr_t *frag;
223 frag = (ip6_frag_hdr_t *)
u8_ptr_add (ip4,
sizeof (*ip4) -
sizeof (*frag));
226 sizeof (*ip4) -
sizeof (*frag) -
236 frag->fragment_offset_and_more =
243 clib_host_to_net_u32 ((6 << 28) + (ip4->
tos << 20));
245 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->
length) -
246 sizeof (*ip4) +
sizeof (*frag));
248 ip6->
protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
250 ip6->
dst_address.as_u64[0] = pheader->daddr.as_u64[0];
251 ip6->
dst_address.as_u64[1] = pheader->daddr.as_u64[1];
252 ip6->
src_address.as_u64[0] = pheader->saddr.as_u64[0];
253 ip6->
src_address.as_u64[1] = pheader->saddr.as_u64[1];
262 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
269 while (n_left_from > 0)
273 while (n_left_from > 0 && n_left_to_next > 0)
277 ip4_mapt_pseudo_header_t *pheader0;
278 ip4_mapt_fragmented_next_t next0;
281 pi0 = to_next[0] = from[0];
295 p0->
error = error_node->
errors[MAP_ERROR_FRAGMENT_DROPPED];
315 to_next, n_left_to_next, pi0,
334 ip6_frag_hdr_t *frag;
340 if (ip4->
protocol == IP_PROTOCOL_UDP)
352 u16 udp_len = clib_host_to_net_u16 (ip4->
length) -
sizeof (*ip4);
366 csum = tcp->checksum;
370 checksum = &tcp->checksum;
382 sizeof (*ip4) -
sizeof (*ip6) -
385 (ip6_frag_hdr_t *)
u8_ptr_add (ip4,
sizeof (*ip4) -
sizeof (*frag));
391 ip6 = (
ip6_header_t *) (((
u8 *) ip4) +
sizeof (*ip4) -
sizeof (*ip6));
397 clib_host_to_net_u32 ((6 << 28) + (ip4->
tos << 20));
404 frag->identification = frag_id;
407 ip6->
protocol = IP_PROTOCOL_IPV6_FRAGMENTATION;
411 ip6->
dst_address.as_u64[0] = pheader->daddr.as_u64[0];
412 ip6->
dst_address.as_u64[1] = pheader->daddr.as_u64[1];
413 ip6->
src_address.as_u64[0] = pheader->saddr.as_u64[0];
414 ip6->
src_address.as_u64[1] = pheader->saddr.as_u64[1];
431 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
439 while (n_left_from > 0)
443 while (n_left_from > 0 && n_left_to_next > 0)
447 ip4_mapt_pseudo_header_t *pheader0;
448 ip4_mapt_tcp_udp_next_t next0;
450 pi0 = to_next[0] = from[0];
485 to_next, n_left_to_next, pi0,
497 u8 * error0, ip4_mapt_next_t * next0,
u16 l4_dst_port)
508 *dst_port0 = l4_dst_port;
509 *error0 = (*dst_port0 == -1) ? MAP_ERROR_FRAGMENT_MEMORY : *error0;
516 *error0 = ip4_len0 < 40 ? MAP_ERROR_MALFORMED : *error0;
517 *dst_port0 = l4_dst_port;
523 *error0 = ip4_len0 < 28 ? MAP_ERROR_MALFORMED : *error0;
524 *dst_port0 = l4_dst_port;
526 else if (ip40->
protocol == IP_PROTOCOL_ICMP)
531 else if (((icmp46_header_t *)
u8_ptr_add (ip40,
sizeof (*ip40)))->
type 533 || ((icmp46_header_t *)
535 sizeof (*ip40)))->
type == ICMP4_echo_request)
536 *dst_port0 = l4_dst_port;
540 *error0 = MAP_ERROR_BAD_PROTOCOL;
547 u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
556 while (n_left_from > 0)
560 while (n_left_from > 0 && n_left_to_next > 0)
566 ip4_mapt_next_t next0 = 0;
570 ip4_mapt_pseudo_header_t *pheader0;
572 pi0 = to_next[0] = from[0];
577 error0 = MAP_ERROR_NONE;
584 ip4_len0 = clib_host_to_net_u16 (ip40->
length);
588 error0 = MAP_ERROR_UNKNOWN;
606 ICMP4_time_exceeded_ttl_exceeded_in_transit,
608 p0->
error = error_node->
errors[MAP_ERROR_TIME_EXCEEDED];
627 ICMP4_destination_unreachable_fragmentation_needed_and_dont_fragment_set,
637 &next0, l4_dst_port);
641 && (clib_net_to_host_u16 (dst_port0) <
644 error0 = MAP_ERROR_SEC_CHECK;
654 pheader0->daddr.as_u64[0] =
656 pheader0->daddr.as_u64[1] =
665 map_t.map_domain_index, 1,
679 to_next, n_left_to_next, pi0,
689 .arc_name =
"ip4-unicast",
690 .node_name =
"ip4-map-t",
697 .name =
"ip4-map-t-fragmented",
698 .vector_size =
sizeof(
u32),
702 .n_errors = MAP_N_ERROR,
703 .error_counters = map_error_counters,
718 .name =
"ip4-map-t-icmp",
719 .vector_size =
sizeof(
u32),
723 .n_errors = MAP_N_ERROR,
724 .error_counters = map_error_counters,
739 .name =
"ip4-map-t-tcp-udp",
740 .vector_size =
sizeof(
u32),
744 .n_errors = MAP_N_ERROR,
745 .error_counters = map_error_counters,
761 .vector_size =
sizeof(
u32),
765 .n_errors = MAP_N_ERROR,
766 .error_counters = map_error_counters,
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static void map_add_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, u32 map_domain_index, u16 port)
static vlib_cli_command_t trace
(constructor) VLIB_CLI_COMMAND (trace)
VNET_FEATURE_INIT(ip4_map_t_feature, static)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
#define IP6_FRAG_NODE_NAME
u16 current_length
Nbytes between current data and the end of this buffer.
static_always_inline map_domain_t * ip4_map_get_domain(ip4_address_t *addr, u32 *map_domain_index, u8 *error)
static ip_csum_t ip_csum_with_carry(ip_csum_t sum, ip_csum_t x)
vlib_error_t * errors
Vector of errors for this node.
struct _tcp_header tcp_header_t
IPv4 to IPv6 translation.
static_always_inline void ip4_map_t_classify(vlib_buffer_t *p0, map_domain_t *d0, ip4_header_t *ip40, u16 ip4_len0, i32 *dst_port0, u8 *error0, ip4_mapt_next_t *next0, u16 l4_dst_port)
static uword ip4_map_t_icmp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static uword ip4_map_t(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static_always_inline void ip4_map_t_embedded_address(map_domain_t *d, ip6_address_t *ip6, const ip4_address_t *ip4)
#define u8_ptr_add(ptr, index)
#define static_always_inline
vlib_combined_counter_main_t * domain_counters
ip4_mapt_fragmented_next_t
static void * ip4_next_header(ip4_header_t *i)
static u16 ip4_get_port(ip4_header_t *ip, u8 sender)
Get TCP/UDP port number or ICMP id from IPv4 packet.
vl_api_fib_path_type_t type
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline u64 map_get_pfx_net(map_domain_t *d, u32 addr, u16 port)
static_always_inline u64 map_get_sfx_net(map_domain_t *d, u32 addr, u16 port)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static int ip4_to_ip6_set_icmp_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
#define IP4_HEADER_FLAG_MORE_FRAGMENTS
#define VLIB_REGISTER_NODE(x,...)
static int map_ip4_to_ip6_fragmented(vlib_buffer_t *p, ip4_mapt_pseudo_header_t *pheader)
static u16 ip4_get_fragment_offset(const ip4_header_t *i)
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
typedef CLIB_PACKED(struct { ip6_address_t daddr;ip6_address_t saddr;u8 unused[28];})
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
static int map_ip4_to_ip6_tcp_udp(vlib_buffer_t *p, ip4_mapt_pseudo_header_t *pheader)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static_always_inline bool ip4_map_ip6_lookup_bypass(vlib_buffer_t *p0, ip4_header_t *ip)
static ip_csum_t ip_csum_sub_even(ip_csum_t c, ip_csum_t x)
static int icmp_to_icmp6(vlib_buffer_t *p, ip4_to_ip6_set_fn_t fn, void *ctx, ip4_to_ip6_set_fn_t inner_fn, void *inner_ctx)
Translate ICMP4 packet to ICMP6.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
u8 * format_map_trace(u8 *s, va_list *args)
#define VNET_FEATURES(...)
static_always_inline void map_mss_clamping(tcp_header_t *tcp, ip_csum_t *sum, u16 mss_clamping)
static uword ip4_map_t_tcp_udp(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static uword ip4_map_t_fragmented(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
A collection of combined counters.
#define IP4_HEADER_FLAG_DONT_FRAGMENT
static int ip4_to_ip6_set_inner_icmp_cb(vlib_buffer_t *b, ip4_header_t *ip4, ip6_header_t *ip6, void *arg)
#define u16_net_add(u, val)
#define ip6_frag_hdr_offset_and_more(offset, more)
static ip_csum_t ip_incremental_checksum(ip_csum_t sum, void *_data, uword n_bytes)
static_always_inline void icmp4_error_set_vnet_buffer(vlib_buffer_t *b, u8 type, u8 code, u32 data)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip_csum_fold(ip_csum_t c)
static ip_csum_t ip_csum_add_even(ip_csum_t c, ip_csum_t x)