36 return d->
qword[1] & 0x0f;
60 if (!is_tso && !(b->
flags & ((VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
61 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
62 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))))
64 u32 is_tcp = is_tso || b->
flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
65 u32 is_udp = !is_tso && b->
flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
66 u32 is_ip4 = b->
flags & VNET_BUFFER_F_IS_IP4;
68 ASSERT (!is_tcp || !is_udp);
73 u16 l2_len = l3_hdr_offset - l2_hdr_offset;
74 u16 l3_len = l4_hdr_offset - l3_hdr_offset;
109 clib_host_to_net_u16 (clib_net_to_host_u16 (ip4->
length) -
110 (l4_hdr_offset - l3_hdr_offset));
111 sum = ~
ip_csum (&psh,
sizeof (psh));
120 sum = ~
ip_csum (&psh,
sizeof (psh));
124 sum = clib_net_to_host_u16 (sum);
172 u32 * buffers,
u32 n_packets,
int use_va_dma)
176 const u32 offload_mask = VNET_BUFFER_F_OFFLOAD_IP_CKSUM |
177 VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM |
179 u64 one_by_one_offload_flags = 0;
182 u16 *
slot, n_desc_left, n_packets_left = n_packets;
192 if (n_desc_left == 0)
196 while (n_packets_left && n_desc_left)
199 if (n_packets_left < 8 || n_desc_left < 4)
214 if (or_flags & (VLIB_BUFFER_NEXT_PRESENT | offload_mask))
234 d[0].
qword[1] = ((
u64) b[0]->current_length) << 34 | bits;
235 d[1].
qword[1] = ((
u64) b[1]->current_length) << 34 | bits;
236 d[2].
qword[1] = ((
u64) b[2]->current_length) << 34 | bits;
237 d[3].
qword[1] = ((
u64) b[3]->current_length) << 34 | bits;
248 one_by_one_offload_flags = 0;
249 txq->
bufs[next] = buffers[0];
251 is_tso = ! !(b[0]->
flags & VNET_BUFFER_F_GSO);
256 if (is_tso || b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
258 n_desc_needed = 1 + is_tso;
262 while (b0->
flags & VLIB_BUFFER_NEXT_PRESENT)
273 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
292 txq->
bufs[next + 1] = txq->
bufs[next];
299 while (b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
306 d[0].
qword[1] = (((
u64) b[0]->current_length) << 34) |
325 (((
u64) b[0]->current_length) << 34) | bits |
326 one_by_one_offload_flags;
347 while (n_packets_left && n_desc_left)
350 txq->
bufs[next &
mask] = buffers[0];
353 one_by_one_offload_flags = 0;
354 is_tso = ! !(b[0]->
flags & VNET_BUFFER_F_GSO);
359 if (is_tso || b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
361 n_desc_needed = 1 + is_tso;
364 while (b0->
flags & VLIB_BUFFER_NEXT_PRESENT)
375 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
391 txq->
bufs[(next + 1) & mask] = txq->
bufs[next & mask];
396 d = txq->
descs + (next & mask);
398 while (b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
405 d[0].
qword[1] = (((
u64) b[0]->current_length) << 34) |
424 (((
u64) b[0]->current_length) << 34) | bits |
425 one_by_one_offload_flags;
438 u16 rs_slot = slot[0] = (next - 1) & mask;
439 d = txq->
descs + rs_slot;
446 return n_packets - n_packets_left;
456 u8 qid = thread_index;
464 n_left =
frame->n_vectors;
470 i32 complete_slot = -1;
481 complete_slot = slot[0];
486 if (complete_slot >= 0)
489 mask = txq->
size - 1;
491 n_free = (complete_slot + 1 -
first) & mask;
499 if (ad->flags & AVF_DEVICE_F_VA_DMA)
515 AVF_TX_ERROR_NO_FREE_SLOTS, n_left);
520 return frame->n_vectors - n_left;
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
#define AVF_TXD_OFFSET_MACLEN(val)
static uword vlib_buffer_get_current_va(vlib_buffer_t *b)
#define AVF_TXD_CTX_SEG_TLEN(val)
u32 ctx_desc_placeholder_bi
vl_api_wireguard_peer_flags_t flags
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
#define AVF_TXD_CTX_CMD_TSO
#define AVF_TXD_CMD_IIPT_IPV4
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
struct _tcp_header tcp_header_t
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
VNET_DEVICE_CLASS_TX_FN() avf_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define AVF_TXD_CMD_L4T_TCP
#define AVF_TXD_CMD_L4T_UDP
#define VNET_DEVICE_CLASS_TX_FN(devclass)
static heap_elt_t * first(heap_header_t *h)
#define clib_ring_deq(ring)
static_always_inline int avf_tx_fill_ctx_desc(vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d, vlib_buffer_t *b)
#define clib_ring_get_first(ring)
u32 node_index
Node index.
static_always_inline u16 ip_csum(void *data, u16 n_left)
static_always_inline u64 avf_tx_prepare_cksum(vlib_buffer_t *b, u8 is_tso)
static_always_inline u16 avf_tx_enqueue(vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq, u32 *buffers, u32 n_packets, int use_va_dma)
#define AVF_TXD_OFFSET_IPLEN(val)
#define clib_ring_enq(ring)
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
vlib_main_t vlib_node_runtime_t * node
static_always_inline u8 avf_tx_desc_get_dtyp(avf_tx_desc_t *d)
#define AVF_TXD_CTX_SEG_MSS(val)
#define clib_atomic_store_rel_n(a, b)
static_always_inline void vlib_buffer_copy_indices(u32 *dst, u32 *src, u32 n_indices)
#define AVF_TXD_CMD_IIPT_IPV6
u32 next_buffer
Next buffer for this linked-list of buffers.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static int tcp_header_bytes(tcp_header_t *t)
#define AVF_TXD_OFFSET_L4LEN(val)
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.