23 #define foreach_vxlan_encap_error \ 24 _(ENCAPSULATED, "good packets encapsulated") \ 25 _(DEL_TUNNEL, "deleted tunnel packets") 28 #define _(sym,string) string, 34 #define _(sym,str) VXLAN_ENCAP_ERROR_##sym, 64 #define foreach_fixed_header4_offset \ 67 #define foreach_fixed_header6_offset \ 68 _(0) _(1) _(2) _(3) _(4) _(5) _(6) 75 u32 n_left_from, next_index, * from, * to_next;
79 u32 pkts_encapsulated = 0;
80 u16 old_l0 = 0, old_l1 = 0;
82 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
89 stats_n_packets = stats_n_bytes = 0;
91 while (n_left_from > 0)
96 to_next, n_left_to_next);
98 while (n_left_from >= 4 && n_left_to_next >= 2)
102 u32 flow_hash0, flow_hash1;
105 u32 sw_if_index0, sw_if_index1, len0, len1;
110 u64 * copy_src0, * copy_dst0;
111 u64 * copy_src1, * copy_dst1;
112 u32 * copy_src_last0, * copy_dst_last0;
113 u32 * copy_src_last1, * copy_dst_last1;
117 u8 is_ip4_0, is_ip4_1;
168 b0->
error = node->
errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
169 pkts_encapsulated --;
175 b1->
error = node->
errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
176 pkts_encapsulated --;
196 ip6_0 = (
void *)ip4_0;
198 ip6_1 = (
void *)ip4_1;
203 copy_dst0 = (
u64 *) ip4_0;
206 copy_dst1 = (
u64 *) ip4_1;
210 #define _(offs) copy_dst0[offs] = copy_src0[offs];
217 #define _(offs) copy_dst1[offs] = copy_src1[offs]; 226 copy_dst_last0 = (
u32 *)(©_dst0[4]);
227 copy_src_last0 = (
u32 *)(©_src0[4]);
228 copy_dst_last0[0] = copy_src_last0[0];
231 copy_dst_last1 = (
u32 *)(©_dst1[4]);
232 copy_src_last1 = (
u32 *)(©_src1[4]);
233 copy_dst_last1[0] = copy_src_last1[0];
323 pkts_encapsulated += 2;
327 stats_n_packets += 2;
328 stats_n_bytes += len0 + len1;
335 (sw_if_index1 != stats_sw_if_index)))
337 stats_n_packets -= 2;
338 stats_n_bytes -= len0 + len1;
339 if (sw_if_index0 == sw_if_index1)
344 cpu_index, stats_sw_if_index,
345 stats_n_packets, stats_n_bytes);
346 stats_sw_if_index = sw_if_index0;
348 stats_n_bytes = len0 + len1;
354 cpu_index, sw_if_index0, 1, len0);
357 cpu_index, sw_if_index1, 1, len1);
378 to_next, n_left_to_next,
379 bi0, bi1, next0, next1);
382 while (n_left_from > 0 && n_left_to_next > 0)
388 u32 sw_if_index0, len0;
393 u64 * copy_src0, * copy_dst0;
394 u32 * copy_src_last0, * copy_dst_last0;
426 b0->
error = node->
errors[VXLAN_ENCAP_ERROR_DEL_TUNNEL];
427 pkts_encapsulated --;
443 ip6_0 = (
void *)ip4_0;
448 copy_dst0 = (
u64 *) ip4_0;
452 #define _(offs) copy_dst0[offs] = copy_src0[offs];
461 copy_dst_last0 = (
u32 *)(©_dst0[4]);
462 copy_src_last0 = (
u32 *)(©_src0[4]);
464 copy_dst_last0[0] = copy_src_last0[0];
515 pkts_encapsulated ++;
518 stats_n_packets += 1;
519 stats_n_bytes += len0;
527 stats_n_packets -= 1;
528 stats_n_bytes -= len0;
532 cpu_index, stats_sw_if_index,
533 stats_n_packets, stats_n_bytes);
535 stats_n_bytes = len0;
536 stats_sw_if_index = sw_if_index0;
547 to_next, n_left_to_next,
556 VXLAN_ENCAP_ERROR_ENCAPSULATED,
564 cpu_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
573 .name =
"vxlan-encap",
574 .vector_size =
sizeof (
u32),
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
static uword vxlan_encap(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
bad routing header type(not 4)") sr_error (NO_MORE_SEGMENTS
vnet_interface_main_t interface_main
#define foreach_vxlan_encap_error
always_inline void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define vxlan4_dummy_rewrite
static char * vxlan_encap_error_strings[]
vlib_combined_counter_main_t * combined_sw_if_counters
always_inline void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 cpu_index, u32 index, u32 packet_increment, u32 byte_increment)
always_inline uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
always_inline u16 ip_csum_fold(ip_csum_t c)
#define foreach_fixed_header4_offset
uword os_get_cpu_number(void)
always_inline void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
u8 * format_vxlan_encap_trace(u8 *s, va_list *args)
#define vxlan6_dummy_rewrite
#define CLIB_PREFETCH(addr, size, type)
always_inline vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
#define foreach_fixed_header6_offset
vlib_node_registration_t vxlan_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan_encap_node)
#define VLIB_BUFFER_IS_TRACED
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
always_inline void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
#define VXLAN_TUNNEL_IS_IPV4
#define ip_csum_update(sum, old, new, type, field)
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define VLIB_REGISTER_NODE(x,...)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
uword runtime_data[(128-1 *sizeof(vlib_node_function_t *)-1 *sizeof(vlib_error_t *)-11 *sizeof(u32)-5 *sizeof(u16))/sizeof(uword)]
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.