|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
43 if (t->
flags & VNET_BUFFER_F_GSO)
45 s =
format (s,
"gso_sz %d gso_l4_hdr_sz %d\n%U",
66 u16 i = 0, n_tx_bytes = 0;
89 else if (gho->
gho_flags & GHO_F_OUTER_IP6)
125 else if (gho->
gho_flags & GHO_F_OUTER_IP6)
128 ip6->payload_length =
131 if (
proto == IP_PROTOCOL_UDP)
142 else if (gho->
gho_flags & GHO_F_OUTER_IP4)
158 u16 i = 0, n_tx_bytes = 0;
160 while (
i < n_tx_bufs)
178 u16 gso_size,
u16 first_data_size,
182 u16 first_packet_length = l234_sz + first_data_size;
206 ASSERT (n_bytes_b0 > first_packet_length);
207 n_bufs += ((n_bytes_b0 - first_packet_length + (
size - 1)) /
size);
212 if (n_alloc < n_bufs)
227 nb0->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID |
flags;
245 u16 gso_size,
u8 ** p_dst_ptr,
u16 * p_dst_left,
259 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
273 tcp->flags = tcp_flags;
277 ip6->payload_length =
286 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
301 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
336 u8 save_tcp_flags = 0;
337 u8 tcp_flags_no_fin_psh = 0;
338 u32 next_tcp_seq = 0;
342 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
344 save_tcp_flags = tcp->flags;
345 tcp_flags_no_fin_psh = tcp->flags & ~(
TCP_FLAG_FIN | TCP_FLAG_PSH);
349 sb0->
flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
352 next_tcp_seq += first_data_size;
356 (
vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
361 l234_sz + first_data_size);
363 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
367 u8 *src_ptr, *dst_ptr;
368 u16 src_left, dst_left;
386 &dst_left, next_tcp_seq, default_bflags,
391 while (total_src_left)
399 src_left -= bytes_to_copy;
400 src_ptr += bytes_to_copy;
401 total_src_left -= bytes_to_copy;
402 dst_left -= bytes_to_copy;
403 dst_ptr += bytes_to_copy;
404 next_tcp_seq += bytes_to_copy;
409 int has_next = (csb0->
flags & VLIB_BUFFER_NEXT_PRESENT);
422 ASSERT (total_src_left == 0);
426 if (0 == dst_left && total_src_left)
434 gso_size, &dst_ptr, &dst_left,
435 next_tcp_seq, default_bflags, gho);
464 node->node_index, drop_error_code);
473 int is_l2,
int is_ip4,
int is_ip6,
int do_segmentation)
494 if (!do_segmentation)
495 while (
from + 8 <= from_end && n_left_to_next >= 4)
497 u32 bi0, bi1, bi2, bi3;
498 u32 next0, next1, next2, next3;
499 u32 swif0, swif1, swif2, swif3;
528 (
b[0]->
flags & VNET_BUFFER_F_GSO))
535 (
b[1]->
flags & VNET_BUFFER_F_GSO))
543 (
b[2]->
flags & VNET_BUFFER_F_GSO))
550 (
b[3]->
flags & VNET_BUFFER_F_GSO))
554 if (
b[0]->
flags & VLIB_BUFFER_IS_TRACED)
563 if (
b[1]->
flags & VLIB_BUFFER_IS_TRACED)
572 if (
b[2]->
flags & VLIB_BUFFER_IS_TRACED)
581 if (
b[3]->
flags & VLIB_BUFFER_IS_TRACED)
603 n_left_to_next, bi0, bi1, bi2,
604 bi3, next0, next1, next2, next3);
608 while (
from + 1 <= from_end && n_left_to_next > 0)
614 u32 do_segmentation0 = 0;
621 (
b[0]->
flags & VNET_BUFFER_F_GSO))
622 do_segmentation0 = 1;
625 do_segmentation0 = do_segmentation;
628 to_next[0] = bi0 =
from[0];
634 if (
b[0]->
flags & VLIB_BUFFER_IS_TRACED)
644 if (do_segmentation0)
668 GHO_F_GENEVE_TUNNEL)))
681 inner_is_ip6 = (gho.
gho_flags & GHO_F_IP6) != 0;
686 is_l2, inner_is_ip6);
707 GHO_F_IPIP6_TUNNEL)))
717 while (n_tx_bufs > 0)
721 while (n_tx_bufs > 0 && n_left_to_next > 0)
723 sbi0 = to_next[0] = from_seg[0];
742 to_next, n_left_to_next);
755 n_left_to_next, bi0, next0);
761 return frame->n_vectors;
772 if (
frame->n_vectors > 0)
822 .vector_size =
sizeof (
u32),
827 .
name =
"gso-l2-ip4",
831 .vector_size =
sizeof (
u32),
836 .
name =
"gso-l2-ip6",
840 .vector_size =
sizeof (
u32),
849 .vector_size =
sizeof (
u32),
858 .arc_name =
"l2-output-ip4",
859 .node_name =
"gso-l2-ip4",
864 .arc_name =
"l2-output-ip6",
865 .node_name =
"gso-l2-ip6",
870 .arc_name =
"ip4-output",
871 .node_name =
"gso-ip4",
876 .arc_name =
"ip6-output",
877 .node_name =
"gso-ip6",
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
u32 next_buffer
Next buffer for this linked-list of buffers.
vnet_interface_main_t * im
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static_always_inline u16 tso_segment_ipip_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size, u16 first_data_size, generic_header_offset_t *gho)
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
vnet_hw_interface_capabilities_t caps
u16 ip6_tcp_udp_icmp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip6_header_t *ip0, int *bogus_lengthp)
nat44_ei_hairpin_src_next_t next_index
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
@ VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO
struct _tcp_header tcp_header_t
vlib_get_buffers(vm, from, b, n_left_from)
@ VLIB_NODE_TYPE_INTERNAL
vlib_main_t vlib_node_runtime_t * node
generic_header_offset_t gho
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
vlib_node_registration_t gso_l2_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip4_node)
vlib_node_registration_t gso_l2_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip6_node)
@ IP_LOOKUP_NEXT_MIDCHAIN
This packets follow a mid-chain adjacency.
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
vlib_simple_counter_main_t * sw_if_counters
u32 trace_handle
Specifies trace buffer handle if VLIB_PACKET_IS_TRACED flag is set.
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
vlib_node_registration_t gso_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_ip4_node)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 sbi0, vlib_buffer_t *sb0, generic_header_offset_t *gho, u32 n_bytes_b0, int is_l2, int is_ip6)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
static_always_inline void tso_segment_vxlan_tunnel_headers_fixup(vlib_main_t *vm, vlib_buffer_t *b, generic_header_offset_t *gho)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
@ VNET_INTERFACE_OUTPUT_ERROR_UNHANDLED_GSO_TYPE
#define VLIB_NODE_FN(node)
ip_lookup_next_t lookup_next_index
Next hop after ip4-lookup.
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
static_always_inline void vnet_buffer_offload_flags_clear(vlib_buffer_t *b, vnet_buffer_oflags_t oflags)
@ VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
u16 ip4_tcp_udp_compute_checksum(vlib_main_t *vm, vlib_buffer_t *p0, ip4_header_t *ip0)
#define static_always_inline
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
static_always_inline uword vnet_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_l2, int is_ip4, int is_ip6)
vlib_node_registration_t gso_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_ip6_node)
static u8 * format_gso_trace(u8 *s, va_list *args)
sll srl srl sll sra u16x4 i
vnet_feature_config_main_t * cm
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment)
static_always_inline uword vnet_gso_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int is_l2, int is_ip4, int is_ip6, int do_segmentation)
u32 flow_id
Generic flow identifier.
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags, generic_header_offset_t *gho)
static_always_inline void vnet_get_outer_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
struct _vlib_node_registration vlib_node_registration_t
u16 current_length
Nbytes between current data and the end of this buffer.
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
union ip_adjacency_t_::@144 sub_type
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
@ VNET_INTERFACE_COUNTER_TX_ERROR
description fragment has unexpected format
vlib_put_next_frame(vm, node, next_index, 0)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
vnet_interface_per_thread_data_t * per_thread_data
static_always_inline u16 tso_segment_vxlan_tunnel_fixup(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *sb0, generic_header_offset_t *gho)
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 sw_if_index, u32 drop_error_code)
static_always_inline void vnet_get_inner_header(vlib_buffer_t *b0, generic_header_offset_t *gho)
A collection of simple counters.
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define VNET_FEATURES(...)
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static u16 ip4_header_checksum(ip4_header_t *i)
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
#define clib_panic(format, args...)
static_always_inline void tso_fixup_segmented_buf(vlib_main_t *vm, vlib_buffer_t *b0, u8 tcp_flags, int is_l2, int is_ip6, generic_header_offset_t *gho)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
vl_api_interface_index_t sw_if_index
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static ip_adjacency_t * adj_get(adj_index_t adj_index)
Get a pointer to an adjacency object from its index.
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
vl_api_fib_path_type_t type
vnet_interface_main_t interface_main
@ VNET_INTERFACE_OUTPUT_NEXT_DROP
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
struct ip_adjacency_t_::@144::@146 midchain
IP_LOOKUP_NEXT_MIDCHAIN.
vl_api_wireguard_peer_flags_t flags