40 s =
format (s,
"VXLAN decap from vxlan_tunnel%d vni %d next %d error %d",
45 s =
format (s,
"VXLAN decap error - tunnel for vni %d does not exist",
55 if (sw_if_index != (
u32) ~ 0)
58 u32 * fib_index_by_sw_if_index = is_ip4 ?
62 return vec_elt (fib_index_by_sw_if_index, sw_if_index);
173 u32 pkts_decapsulated = 0;
177 memset (&last4, 0xff,
sizeof last4);
179 memset (&last6, 0xff,
sizeof last6);
186 while (n_left_from > 0)
188 u32 * to_next, n_left_to_next;
191 while (n_left_from >= 4 && n_left_to_next >= 2)
207 u32 bi0 = to_next[0] = from[0];
208 u32 bi1 = to_next[1] = from[1];
258 u8 error0 = 0, error1 = 0;
262 next0 = VXLAN_INPUT_NEXT_DROP;
266 error0 = VXLAN_ERROR_BAD_FLAGS;
268 (drop_counter, thread_index, stats_t0->
sw_if_index, 1, len0);
271 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
285 (rx_counter, thread_index, stats_t0->
sw_if_index, 1, len0);
292 next1 = VXLAN_INPUT_NEXT_DROP;
296 error1 = VXLAN_ERROR_BAD_FLAGS;
298 (drop_counter, thread_index, stats_t1->
sw_if_index, 1, len1);
301 error1 = VXLAN_ERROR_NO_SUCH_TUNNEL;
317 (rx_counter, thread_index, stats_t1->
sw_if_index, 1, len1);
338 to_next, n_left_to_next,
339 bi0, bi1, next0, next1);
342 while (n_left_from > 0 && n_left_to_next > 0)
344 u32 bi0 = to_next[0] = from[0];
380 next0 = VXLAN_INPUT_NEXT_DROP;
384 error0 = VXLAN_ERROR_BAD_FLAGS;
386 (drop_counter, thread_index, stats_t0->
sw_if_index, 1, len0);
389 error0 = VXLAN_ERROR_NO_SUCH_TUNNEL;
405 (rx_counter, thread_index, stats_t0->
sw_if_index, 1, len0);
418 to_next, n_left_to_next,
449 #define vxlan_error(n,s) s, 457 .name =
"vxlan4-input",
459 .vector_size =
sizeof (
u32),
466 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n, 480 .name =
"vxlan6-input",
482 .vector_size =
sizeof (
u32),
489 #define _(s,n) [VXLAN_INPUT_NEXT_##s] = n, 515 u32 * from, * to_next, n_left_from, n_left_to_next, next_index;
530 while (n_left_from > 0)
534 while (n_left_from >= 4 && n_left_to_next >= 2)
540 u32 bi0, ip_len0, udp_len0, flags0, next0;
541 u32 bi1, ip_len1, udp_len1, flags1, next1;
542 i32 len_diff0, len_diff1;
543 u8 error0, good_udp0, proto0;
544 u8 error1, good_udp1, proto1;
560 bi0 = to_next[0] = from[0];
561 bi1 = to_next[1] = from[1];
598 if (proto0 != IP_PROTOCOL_UDP)
606 if (udp0->
dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
630 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
637 ip_len0 = clib_net_to_host_u16 (ip40->
length);
640 udp_len0 = clib_net_to_host_u16 (udp0->
length);
641 len_diff0 = ip_len0 - udp_len0;
646 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
653 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
659 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
660 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
664 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
665 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
670 b0->
error = error0 ? error_node->
errors[error0] : 0;
680 if (proto1 != IP_PROTOCOL_UDP)
688 if (udp1->
dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
712 good_udp1 = (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
719 ip_len1 = clib_net_to_host_u16 (ip41->
length);
722 udp_len1 = clib_net_to_host_u16 (udp1->
length);
723 len_diff1 = ip_len1 - udp_len1;
728 if ((flags1 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
735 (flags1 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
741 error1 = good_udp1 ? 0 : IP4_ERROR_UDP_CHECKSUM;
742 error1 = (len_diff1 >= 0) ? error1 : IP4_ERROR_UDP_LENGTH;
746 error1 = good_udp1 ? 0 : IP6_ERROR_UDP_CHECKSUM;
747 error1 = (len_diff1 >= 0) ? error1 : IP6_ERROR_UDP_LENGTH;
752 b1->
error = error1 ? error_node->
errors[error1] : 0;
762 to_next, n_left_to_next,
763 bi0, bi1, next0, next1);
766 while (n_left_from > 0 && n_left_to_next > 0)
772 u32 bi0, ip_len0, udp_len0, flags0, next0;
774 u8 error0, good_udp0, proto0;
776 bi0 = to_next[0] = from[0];
798 if (proto0 != IP_PROTOCOL_UDP)
806 if (udp0->
dst_port != clib_host_to_net_u16 (UDP_DST_PORT_vxlan))
830 good_udp0 = (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
837 ip_len0 = clib_net_to_host_u16 (ip40->
length);
840 udp_len0 = clib_net_to_host_u16 (udp0->
length);
841 len_diff0 = ip_len0 - udp_len0;
846 if ((flags0 & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
853 (flags0 & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
859 error0 = good_udp0 ? 0 : IP4_ERROR_UDP_CHECKSUM;
860 error0 = (len_diff0 >= 0) ? error0 : IP4_ERROR_UDP_LENGTH;
864 error0 = good_udp0 ? 0 : IP6_ERROR_UDP_CHECKSUM;
865 error0 = (len_diff0 >= 0) ? error0 : IP6_ERROR_UDP_LENGTH;
870 b0->
error = error0 ? error_node->
errors[error0] : 0;
880 to_next, n_left_to_next,
900 .name =
"ip4-vxlan-bypass",
901 .vector_size =
sizeof (
u32),
931 .name =
"ip6-vxlan-bypass",
932 .vector_size =
sizeof (
u32),
952 #define foreach_vxlan_flow_input_next \ 953 _(DROP, "error-drop") \ 954 _(L2_INPUT, "l2-input") 958 #define _(s,n) VXLAN_FLOW_NEXT_##s, 964 #define foreach_vxlan_flow_error \ 965 _(NONE, "no error") \ 966 _(IP_CHECKSUM_ERROR, "Rx ip checksum errors") \ 967 _(IP_HEADER_ERROR, "Rx ip header errors") \ 968 _(UDP_CHECKSUM_ERROR, "Rx udp checksum errors") \ 969 _(UDP_LENGTH_ERROR, "Rx udp length errors") 973 #define _(f,s) VXLAN_FLOW_ERROR_##f, 993 if ((flags & VNET_BUFFER_F_L4_CHECKSUM_COMPUTED) == 0)
1000 return (flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0;
1009 u8 good_csum = (b->
flags & VNET_BUFFER_F_L4_CHECKSUM_CORRECT) != 0 ||
1019 u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1020 u16 expected = payload_len +
sizeof *hdr;
1021 return ip_len > expected || hdr->ip4.ttl == 0 || hdr->ip4.ip_version_and_header_length != 0x45;
1028 u16 ip_len = clib_net_to_host_u16 (hdr->ip4.length);
1029 u16 udp_len = clib_net_to_host_u16 (hdr->udp.length);
1030 return udp_len > ip_len;
1036 u8 error0 = VXLAN_FLOW_ERROR_NONE;
1038 error0 = VXLAN_FLOW_ERROR_IP_HEADER_ERROR;
1040 error0 = VXLAN_FLOW_ERROR_UDP_LENGTH_ERROR;
1042 error0 = VXLAN_FLOW_ERROR_UDP_CHECKSUM_ERROR;
1050 enum { payload_offset =
sizeof(ip4_vxlan_header_t) };
1061 u32 n_left_from = f->n_vectors;
1062 u32 next_index = VXLAN_FLOW_NEXT_L2_INPUT;
1064 while (n_left_from > 0)
1066 u32 n_left_to_next, *to_next;
1070 while (n_left_from > 3 && n_left_to_next > 3)
1072 u32 bi0 = to_next[0] = from[0];
1073 u32 bi1 = to_next[1] = from[1];
1074 u32 bi2 = to_next[2] = from[2];
1075 u32 bi3 = to_next[3] = from[3];
1096 u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT, next1 = VXLAN_FLOW_NEXT_L2_INPUT,
1097 next2 = VXLAN_FLOW_NEXT_L2_INPUT, next3 = VXLAN_FLOW_NEXT_L2_INPUT;
1103 u8 ip_err = ip_err0 | ip_err1 | ip_err2 | ip_err3;
1109 u8 udp_err = udp_err0 | udp_err1 | udp_err2 | udp_err3;
1115 u8 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1127 csum_err = csum_err0 | csum_err1 | csum_err2 | csum_err3;
1132 if (ip_err0 || udp_err0 || csum_err0)
1134 next0 = VXLAN_FLOW_NEXT_DROP;
1136 b0->
error = node->errors[error0];
1138 if (ip_err1 || udp_err1 || csum_err1)
1140 next1 = VXLAN_FLOW_NEXT_DROP;
1142 b1->
error = node->errors[error1];
1144 if (ip_err2 || udp_err2 || csum_err2)
1146 next2 = VXLAN_FLOW_NEXT_DROP;
1148 b2->
error = node->errors[error2];
1150 if (ip_err3 || udp_err3 || csum_err3)
1152 next3 = VXLAN_FLOW_NEXT_DROP;
1154 b3->
error = node->errors[error3];
1198 if (b0->
flags & VLIB_BUFFER_IS_TRACED)
1203 .
next_index = next0, .error = error0, .tunnel_index = t_index0, .vni = t0->
vni };
1205 if (b1->
flags & VLIB_BUFFER_IS_TRACED)
1210 .
next_index = next1, .error = error1, .tunnel_index = t_index1, .vni = t1->
vni };
1212 if (b2->
flags & VLIB_BUFFER_IS_TRACED)
1217 .
next_index = next2, .error = error2, .tunnel_index = t_index2, .vni = t2->
vni };
1219 if (b3->
flags & VLIB_BUFFER_IS_TRACED)
1224 .
next_index = next3, .error = error3, .tunnel_index = t_index3, .vni = t3->
vni };
1228 (vm, node, next_index, to_next, n_left_to_next,
1229 bi0, bi1, bi2, bi3, next0, next1, next2, next3);
1231 while (n_left_from > 0 && n_left_to_next > 0)
1233 u32 bi0 = to_next[0] = from[0];
1243 u32 next0 = VXLAN_FLOW_NEXT_L2_INPUT;
1251 if (ip_err0 || udp_err0 || csum_err0)
1253 next0 = VXLAN_FLOW_NEXT_DROP;
1255 b0->
error = node->errors[error0];
1273 .
next_index = next0, .error = error0, .tunnel_index = t_index0, .vni = t0->
vni };
1276 to_next, n_left_to_next,
1283 return f->n_vectors;
1287 #ifndef CLIB_MULTIARCH_VARIANT 1289 .name =
"vxlan-flow-input",
1291 .vector_size =
sizeof (
u32),
1300 #define _(s,n) [VXLAN_FLOW_NEXT_##s] = n, static_always_inline u8 vxlan_validate_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
#define foreach_vxlan_flow_error
clib_bihash_24_8_t vxlan6_tunnel_by_key
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static char * vxlan_flow_error_strings[]
vnet_interface_main_t interface_main
vlib_node_registration_t vxlan4_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_input_node)
#define foreach_vxlan_input_next
static int clib_bihash_key_compare_16_8(u64 *a, u64 *b)
static uword vxlan_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u32 is_ip4)
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
vlib_node_registration_t vxlan4_flow_input_node
(constructor) VLIB_REGISTER_NODE (vxlan4_flow_input_node)
u32 * fib_index_by_sw_if_index
Table index indexed by software interface.
vlib_node_registration_t ip4_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip4_vxlan_bypass_node)
#define VLIB_NODE_FN(node)
vlib_error_t * errors
Vector of errors for this node.
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static uword ip6_vxlan_bypass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static int clib_bihash_key_compare_24_8(u64 *a, u64 *b)
#define static_always_inline
#define VLIB_INIT_FUNCTION(x)
vlib_combined_counter_main_t * combined_sw_if_counters
static uword ip6_address_is_equal(ip6_address_t *a, ip6_address_t *b)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static vxlan_tunnel_t * vxlan6_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache6 *cache, u32 fib_index, ip6_header_t *ip6_0, vxlan_header_t *vxlan0, vxlan_tunnel_t **stats_t0)
static void * ip4_next_header(ip4_header_t *i)
static int ip4_is_fragment(ip4_header_t *i)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline u8 vxlan_check_ip_udp_len(vlib_buffer_t *b)
vlib_node_registration_t ip4_input_node
Global ip4 input node.
vlib_node_registration_t ip6_vxlan_bypass_node
(constructor) VLIB_REGISTER_NODE (ip6_vxlan_bypass_node)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static uword ip4_address_is_multicast(ip4_address_t *a)
static vxlan_tunnel_t * vxlan4_find_tunnel(vxlan_main_t *vxm, last_tunnel_cache4 *cache, u32 fib_index, ip4_header_t *ip4_0, vxlan_header_t *vxlan0, vxlan_tunnel_t **stats_t0)
u32 ip4_tcp_udp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
vlib_node_registration_t vxlan6_input_node
(constructor) VLIB_REGISTER_NODE (vxlan6_input_node)
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
vxlan4_tunnel_key_t last_tunnel_cache4
static_always_inline void vnet_feature_next(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
vlib_error_t error
Error code for buffers to be enqueued to error handler.
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define VLIB_REGISTER_NODE(x,...)
static u32 vnet_get_vni(vxlan_header_t *h)
static uword vxlan4_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
u32 flow_id
Generic flow identifier.
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
static void ip6_address_set_zero(ip6_address_t *a)
static_always_inline u8 vxlan_check_udp_csum(vlib_main_t *vm, vlib_buffer_t *b)
static char * vxlan_error_strings[]
static vlib_node_runtime_t * vlib_node_get_runtime(vlib_main_t *vm, u32 node_index)
Get node runtime by node index.
vxlan6_tunnel_key_t last_tunnel_cache6
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
static_always_inline u8 vxlan_check_ip(vlib_buffer_t *b, u16 payload_len)
static void * ip6_next_header(ip6_header_t *i)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
#define foreach_vxlan_flow_input_next
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define vec_elt(v, i)
Get vector value at index i.
struct _vlib_node_registration vlib_node_registration_t
template key/value backing page structure
static uword ip6_address_is_multicast(ip6_address_t *a)
static u32 buf_fib_index(vlib_buffer_t *b, u32 is_ip4)
u32 ip6_tcp_udp_icmp_validate_checksum(vlib_main_t *vm, vlib_buffer_t *p0)
void ip4_forward_next_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vlib_rx_or_tx_t which_adj_index)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static_always_inline u8 vxlan_err_code(u8 ip_err0, u8 udp_err0, u8 csum_err0)
u8 * format_ip4_forward_next_trace(u8 *s, va_list *args)
A collection of combined counters.
#define hash_get_mem(h, key)
VLIB_NODE_FUNCTION_MULTIARCH(l2t_decap_node, l2t_decap_node_fn)
static u8 * format_vxlan_rx_trace(u8 *s, va_list *args)
ip4_main_t ip4_main
Global ip4 main structure.
clib_bihash_16_8_t vxlan4_tunnel_by_key
clib_error_t * ip6_vxlan_bypass_init(vlib_main_t *vm)
u16 flags
Copy of main node flags.
#define VLIB_NODE_FLAG_TRACE
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static uword ip4_vxlan_bypass(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
u32 * fib_index_by_sw_if_index
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword ip_vxlan_bypass_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, u32 is_ip4)
static uword vxlan6_input(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
clib_error_t * ip4_vxlan_bypass_init(vlib_main_t *vm)
u8 * format_ip6_forward_next_trace(u8 *s, va_list *args)