26 #define foreach_vxlan_gbp_encap_error \ 27 _(ENCAPSULATED, "good packets encapsulated") 30 #define _(sym,string) string, 37 #define _(sym,str) VXLAN_GBP_ENCAP_ERROR_##sym, 57 #ifndef CLIB_MARCH_VARIANT 67 "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
79 u32 n_left_from, next_index, *from, *to_next;
85 u32 pkts_encapsulated = 0;
87 u32 sw_if_index0 = 0, sw_if_index1 = 0;
88 u32 next0 = 0, next1 = 0;
102 u8 const underlay_hdr_len = is_ip4 ?
103 sizeof (ip4_vxlan_gbp_header_t) :
sizeof (ip6_vxlan_gbp_header_t);
105 u32 const csum_flags = is_ip4 ? VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
106 VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
107 VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID :
108 VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
109 VNET_BUFFER_F_L3_HDR_OFFSET_VALID | VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
110 u32 const inner_packet_csum_offload_flags =
111 VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
112 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
113 u32 const inner_packet_removed_flags =
114 VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_IS_IP6 |
115 VNET_BUFFER_F_L2_HDR_OFFSET_VALID | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
116 VNET_BUFFER_F_L4_HDR_OFFSET_VALID;
118 while (n_left_from > 0)
124 while (n_left_from >= 4 && n_left_to_next >= 2)
142 u32 bi0 = to_next[0] = from[0];
143 u32 bi1 = to_next[1] = from[1];
150 if (csum_offload && (or_flags & inner_packet_csum_offload_flags))
153 if ((b[0]->
flags & VNET_BUFFER_F_GSO) == 0)
157 VNET_BUFFER_F_IS_IP4,
159 VNET_BUFFER_F_IS_IP6);
160 b[0]->
flags &= ~inner_packet_removed_flags;
162 if ((b[1]->
flags & VNET_BUFFER_F_GSO) == 0)
166 VNET_BUFFER_F_IS_IP4,
168 VNET_BUFFER_F_IS_IP6);
169 b[1]->
flags &= ~inner_packet_removed_flags;
193 sw_if_index1 = sw_if_index0;
196 dpoi_idx1 = dpoi_idx0;
206 dpoi_idx1 = t1->next_dpo.dpoi_index;
213 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
214 ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
224 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
225 u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
238 ip4_vxlan_gbp_header_t *hdr0 = underlay0;
239 ip4_vxlan_gbp_header_t *hdr1 = underlay1;
244 ip4_0->
length = clib_host_to_net_u16 (len0);
245 ip4_1->
length = clib_host_to_net_u16 (len1);
250 ip4_0->
tos = ip4_0_tos;
255 ip4_1->
tos = ip4_1_tos;
262 vxlan_gbp0 = &hdr0->vxlan_gbp;
263 vxlan_gbp1 = &hdr1->vxlan_gbp;
267 ip6_vxlan_gbp_header_t *hdr0 = underlay0;
268 ip6_vxlan_gbp_header_t *hdr1 = underlay1;
280 vxlan_gbp0 = &hdr0->vxlan_gbp;
281 vxlan_gbp1 = &hdr1->vxlan_gbp;
285 udp0->
length = payload_l0;
287 udp1->
length = payload_l1;
300 b[0]->
flags |= csum_flags;
303 b[1]->
flags |= csum_flags;
335 (vm, b[0], ip6_0, &bogus);
340 (vm, b[1], ip6_1, &bogus);
351 sw_if_index0, 1, len0);
353 sw_if_index1, 1, len1);
354 pkts_encapsulated += 2;
378 to_next, n_left_to_next,
379 bi0, bi1, next0, next1);
382 while (n_left_from > 0 && n_left_to_next > 0)
384 u32 bi0 = to_next[0] = from[0];
390 if (csum_offload && (b[0]->
flags & inner_packet_csum_offload_flags))
393 if ((b[0]->
flags & VNET_BUFFER_F_GSO) == 0)
397 VNET_BUFFER_F_IS_IP4,
399 VNET_BUFFER_F_IS_IP6);
400 b[0]->
flags &= ~inner_packet_removed_flags;
419 ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
427 u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
437 ip4_vxlan_gbp_header_t *hdr = underlay0;
441 ip4_0->
length = clib_host_to_net_u16 (len0);
446 ip4_0->
tos = ip4_0_tos;
451 vxlan_gbp0 = &hdr->vxlan_gbp;
455 ip6_vxlan_gbp_header_t *hdr = underlay0;
463 vxlan_gbp0 = &hdr->vxlan_gbp;
467 udp0->
length = payload_l0;
477 b[0]->
flags |= csum_flags;
500 (vm, b[0], ip6_0, &bogus);
510 sw_if_index0, 1, len0);
525 to_next, n_left_to_next,
534 VXLAN_GBP_ENCAP_ERROR_ENCAPSULATED,
562 .name =
"vxlan4-gbp-encap",
563 .vector_size =
sizeof (
u32),
576 .name =
"vxlan6-gbp-encap",
577 .vector_size =
sizeof (
u32),
#define vnet_rewrite_one_header(rw0, p0, most_likely_size)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
vl_api_wireguard_peer_flags_t flags
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_main_t interface_main
u32 index_t
A Data-Path Object is an object that represents actions that are applied to packets are they are swit...
vxlan_gbp_main_t vxlan_gbp_main
vlib_node_registration_t vxlan4_gbp_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan4_gbp_encap_node)
#define VLIB_NODE_FN(node)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
u8 * format_vxlan_gbp_header_gpflags(u8 *s, va_list *args)
vlib_combined_counter_main_t * combined_sw_if_counters
description fragment has unexpected format
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
u8 * format_vxlan_gbp_encap_trace(u8 *s, va_list *args)
vl_api_fib_path_type_t type
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static char * vxlan_gbp_encap_error_strings[]
static void vlib_node_increment_counter(vlib_main_t *vm, u32 node_index, u32 counter_index, u64 increment)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
#define foreach_vxlan_gbp_encap_error
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static uword vxlan_gbp_encap_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame, u8 is_ip4, u8 csum_offload)
index_t dpoi_index
the index of objects of that type
#define INDEX_INVALID
Invalid index - used when no index is known blazoned capitals INVALID speak volumes where ~0 does not...
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define ip_csum_update(sum, old, new, type, field)
A collection of combined counters.
vxlan_gbp_tunnel_t * tunnels
static_always_inline void vnet_calc_checksums_inline(vlib_main_t *vm, vlib_buffer_t *b, int is_ip4, int is_ip6)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u16 dpoi_next_node
The next VLIB node to follow.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
#define vnet_rewrite_two_headers(rw0, rw1, p0, p1, most_likely_size)
#define CLIB_CACHE_LINE_BYTES
#define STATIC_ASSERT_SIZEOF(d, s)
vlib_node_registration_t vxlan6_gbp_encap_node
(constructor) VLIB_REGISTER_NODE (vxlan6_gbp_encap_node)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
u8 qos_bits_t
Type, er, safety for us water based entities.
static u16 ip_csum_fold(ip_csum_t c)
vl_api_interface_index_t sw_if_index