41 if (t->
flags & VNET_BUFFER_F_GSO)
43 s =
format (s,
"gso_sz %d gso_l4_hdr_sz %d",
48 s =
format (s,
"non-gso buffer");
58 u16 gso_size,
u16 first_data_size,
63 u16 first_packet_length = l234_sz + first_data_size;
87 ASSERT (n_bytes_b0 > first_packet_length);
88 n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) /
size);
107 nb0->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID |
flags;
118 u16 gso_size,
u8 ** p_dst_ptr,
u16 * p_dst_left,
132 tcp->seq_number = clib_host_to_net_u32 (next_tcp_seq);
146 tcp->flags = tcp_flags;
174 u8 save_tcp_flags = 0;
175 u8 tcp_flags_no_fin_psh = 0;
176 u32 next_tcp_seq = 0;
180 next_tcp_seq = clib_net_to_host_u32 (tcp->seq_number);
182 save_tcp_flags = tcp->flags;
183 tcp_flags_no_fin_psh = tcp->flags & ~(
TCP_FLAG_FIN | TCP_FLAG_PSH);
187 sb0->
flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
190 next_tcp_seq += first_data_size;
194 (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
199 l234_sz + first_data_size);
201 u32 total_src_left = n_bytes_b0 - l234_sz - first_data_size;
205 u8 *src_ptr, *dst_ptr;
206 u16 src_left, dst_left;
223 &dst_left, next_tcp_seq, default_bflags,
228 while (total_src_left)
236 src_left -= bytes_to_copy;
237 src_ptr += bytes_to_copy;
238 total_src_left -= bytes_to_copy;
239 dst_left -= bytes_to_copy;
240 dst_ptr += bytes_to_copy;
241 next_tcp_seq += bytes_to_copy;
246 int has_next = (csb0->
flags & VLIB_BUFFER_NEXT_PRESENT);
259 ASSERT (total_src_left == 0);
263 if (0 == dst_left && total_src_left)
271 gso_size, &dst_ptr, &dst_left,
272 next_tcp_seq, default_bflags, gho);
310 int is_ip6,
int do_segmentation)
316 u32 *from_end = from + n_left_from;
325 while (n_left_from > 0)
331 if (!do_segmentation)
332 while (from + 8 <= from_end && n_left_to_next >= 4)
334 u32 bi0, bi1, bi2, bi3;
335 u32 next0, next1, next2, next3;
336 u32 swif0, swif1, swif2, swif3;
364 (b[0]->
flags & VNET_BUFFER_F_GSO))
371 (b[1]->
flags & VNET_BUFFER_F_GSO))
378 (b[2]->
flags & VNET_BUFFER_F_GSO))
385 (b[3]->
flags & VNET_BUFFER_F_GSO))
389 if (b[0]->
flags & VLIB_BUFFER_IS_TRACED)
396 if (b[1]->
flags & VLIB_BUFFER_IS_TRACED)
403 if (b[2]->
flags & VLIB_BUFFER_IS_TRACED)
410 if (b[3]->
flags & VLIB_BUFFER_IS_TRACED)
430 n_left_to_next, bi0, bi1, bi2,
431 bi3, next0, next1, next2, next3);
435 while (from + 1 <= from_end && n_left_to_next > 0)
441 u32 do_segmentation0 = 0;
448 (b[0]->
flags & VNET_BUFFER_F_GSO))
449 do_segmentation0 = 1;
452 do_segmentation0 = do_segmentation;
455 to_next[0] = bi0 = from[0];
461 if (b[0]->
flags & VLIB_BUFFER_IS_TRACED)
469 if (do_segmentation0)
502 while (n_tx_bufs > 0)
506 if (n_tx_bufs >= n_left_to_next)
508 while (n_left_to_next > 0)
510 sbi0 = to_next[0] = from_seg[0];
527 to_next, n_left_to_next);
529 while (n_tx_bufs > 0)
531 sbi0 = to_next[0] = from_seg[0];
557 n_left_to_next, bi0, next0);
617 .vector_size =
sizeof (
u32),
622 .
name =
"gso-l2-ip4",
626 .vector_size =
sizeof (
u32),
631 .
name =
"gso-l2-ip6",
635 .vector_size =
sizeof (
u32),
644 .vector_size =
sizeof (
u32),
653 .arc_name =
"l2-output-ip4",
654 .node_name =
"gso-l2-ip4",
659 .arc_name =
"l2-output-ip6",
660 .node_name =
"gso-l2-ip6",
665 .arc_name =
"ip4-output",
666 .node_name =
"gso-ip4",
672 .arc_name =
"ip6-output",
673 .node_name =
"gso-ip6",
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static_always_inline uword vnet_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, int is_ip6)
vnet_interface_per_thread_data_t * per_thread_data
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static_always_inline u32 tso_segment_buffer(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, u32 sbi0, vlib_buffer_t *sb0, gso_header_offset_t *gho, u32 n_bytes_b0, int is_ip6)
Allocate the necessary number of ptd->split_buffers, and segment the possibly chained buffer(s) from ...
vnet_main_t * vnet_get_main(void)
static vnet_hw_interface_t * vnet_get_sup_hw_interface(vnet_main_t *vnm, u32 sw_if_index)
VNET_FEATURE_INIT(lb_nat4_in2out_node_fn, static)
vnet_interface_main_t interface_main
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define clib_memcpy_fast(a, b, c)
u16 current_length
Nbytes between current data and the end of this buffer.
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
#define VLIB_NODE_FN(node)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
struct _tcp_header tcp_header_t
#define static_always_inline
vl_api_interface_index_t sw_if_index
vlib_node_registration_t gso_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_ip4_node)
vnet_hw_interface_flags_t flags
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
A collection of simple counters.
vl_api_fib_path_type_t type
vlib_node_registration_t gso_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_ip6_node)
static_always_inline void drop_one_buffer_and_count(vlib_main_t *vm, vnet_main_t *vnm, vlib_node_runtime_t *node, u32 *pbi0, u32 sw_if_index, u32 drop_error_code)
uword vlib_error_drop_buffers(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 next_buffer_stride, u32 n_buffers, u32 next_index, u32 drop_error_node, u32 drop_error_code)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
u32 trace_handle
Specifies trace buffer handle if VLIB_PACKET_IS_TRACED flag is set.
vlib_simple_counter_main_t * sw_if_counters
u32 node_index
Node index.
static_always_inline void tso_fixup_segmented_buf(vlib_buffer_t *b0, u8 tcp_flags, int is_ip6, gso_header_offset_t *gho)
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static_always_inline u16 tso_alloc_tx_bufs(vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd, vlib_buffer_t *b0, u32 n_bytes_b0, u16 l234_sz, u16 gso_size, u16 first_data_size, gso_header_offset_t *gho)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline void vnet_feature_next(u32 *next0, vlib_buffer_t *b0)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
u16 cached_next_index
Next frame index that vector arguments were last enqueued to last time this node ran.
static_always_inline void tso_init_buf_from_template(vlib_main_t *vm, vlib_buffer_t *nb0, vlib_buffer_t *b0, u16 template_data_sz, u16 gso_size, u8 **p_dst_ptr, u16 *p_dst_left, u32 next_tcp_seq, u32 flags, gso_header_offset_t *gho)
static u8 * format_gso_trace(u8 *s, va_list *args)
vlib_node_registration_t gso_l2_ip6_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip6_node)
vlib_node_registration_t gso_l2_ip4_node
(constructor) VLIB_REGISTER_NODE (gso_l2_ip4_node)
#define VNET_FEATURES(...)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static_always_inline uword vnet_gso_node_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vnet_main_t *vnm, vnet_hw_interface_t *hi, int is_ip6, int do_segmentation)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define clib_panic(format, args...)
u32 opaque[10]
Opaque data used by sub-graphs for their own purposes.
static_always_inline void tso_init_buf_from_template_base(vlib_buffer_t *nb0, vlib_buffer_t *b0, u32 flags, u16 length)
static_always_inline gso_header_offset_t vnet_gso_header_offset_parser(vlib_buffer_t *b0, int is_ip6)