|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
36 return d->
qword[1] & 0x0f;
60 if (!is_tso && !(
b->
flags & VNET_BUFFER_F_OFFLOAD))
64 u32 is_tcp = is_tso || oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
65 u32 is_udp = !is_tso && oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
67 if (!is_tcp && !is_udp)
70 u32 is_ip4 =
b->
flags & VNET_BUFFER_F_IS_IP4;
73 ASSERT (!(is_tcp && is_udp));
78 u16 l2_len = l3_hdr_offset - l2_hdr_offset;
79 u16 l3_len = l4_hdr_offset - l3_hdr_offset;
100 ip6->payload_length = 0;
106 psh.
src =
ip4->src_address.as_u32;
107 psh.
dst =
ip4->dst_address.as_u32;
111 clib_host_to_net_u16 (clib_net_to_host_u16 (
ip4->length) -
112 (l4_hdr_offset - l3_hdr_offset));
113 sum = ~
ip_csum (&psh,
sizeof (psh));
118 psh.
src =
ip6->src_address;
119 psh.
dst =
ip6->dst_address;
120 psh.
proto = clib_host_to_net_u32 ((
u32)
ip6->protocol);
121 psh.
l4len = is_tso ? 0 :
ip6->payload_length;
122 sum = ~
ip_csum (&psh,
sizeof (psh));
126 sum = clib_net_to_host_u16 (sum);
165 #if defined CLIB_HAVE_VEC512
168 u64x8u *dv = (u64x8u *) d;
169 u64x8u *sv = (u64x8u *) s;
179 #elif defined CLIB_HAVE_VEC256
182 u64x4u *dv = (u64x4u *) d;
183 u64x4u *sv = (u64x4u *) s;
193 #elif defined CLIB_HAVE_VEC128
196 u64x2u *dv = (u64x2u *) d;
197 u64x2u *sv = (u64x2u *) s;
220 u64 cmd,
int use_va_dma)
230 u32 *buffers,
u32 n_packets,
u16 *n_enq_descs,
int use_va_dma)
233 u16 n_free_desc, n_desc_left, n_packets_left = n_packets;
240 if (n_desc_left == 0)
243 while (n_packets_left && n_desc_left)
247 if (n_packets_left < 8 || n_desc_left < 4)
263 (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD |
288 (
flags & (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_GSO)) == 0))
299 u16 n_desc_needed = 1;
302 if (
flags & VLIB_BUFFER_NEXT_PRESENT)
306 while (
next->flags & VLIB_BUFFER_NEXT_PRESENT)
313 if (
flags & VNET_BUFFER_F_GSO)
321 AVF_TX_ERROR_SEGMENT_SIZE_EXCEEDED, 1);
330 if (
flags & VNET_BUFFER_F_GSO)
340 else if (
flags & VNET_BUFFER_F_OFFLOAD)
346 while (
b[0]->
flags & VLIB_BUFFER_NEXT_PRESENT)
368 *n_enq_descs = n_free_desc - n_desc_left;
369 return n_packets - n_packets_left;
397 i32 complete_slot = -1;
408 complete_slot =
slot[0];
413 if (complete_slot >= 0)
427 if (ad->flags & AVF_DEVICE_F_VA_DMA)
446 n_desc - n_not_wrap);
450 n_desc - n_not_wrap);
475 AVF_TX_ERROR_NO_FREE_SLOTS,
n_left);
u32 next_buffer
Next buffer for this linked-list of buffers.
#define AVF_TXD_CTX_SEG_MSS(val)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static int tcp_header_bytes(tcp_header_t *t)
static_always_inline void avf_tx_fill_data_desc(vlib_main_t *vm, avf_tx_desc_t *d, vlib_buffer_t *b, u64 cmd, int use_va_dma)
#define AVF_TXD_OFFSET_L4LEN(val)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
#define AVF_TXD_CMD_L4T_TCP
vlib_buffer_copy_indices(to, tmp, n_free)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static_always_inline u16 avf_tx_prepare(vlib_main_t *vm, vlib_node_runtime_t *node, avf_txq_t *txq, u32 *buffers, u32 n_packets, u16 *n_enq_descs, int use_va_dma)
static_always_inline void avf_tx_copy_desc(avf_tx_desc_t *d, avf_tx_desc_t *s, u32 n_descs)
struct _tcp_header tcp_header_t
vlib_main_t vlib_node_runtime_t * node
static heap_elt_t * first(heap_header_t *h)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
#define VNET_DEVICE_CLASS_TX_FN(devclass)
#define AVF_TXD_CMD_IIPT_IPV4
avf_tx_desc_t * tmp_descs
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
static_always_inline u64 avf_tx_prepare_cksum(vlib_buffer_t *b, u8 is_tso)
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
#define AVF_TXD_OFFSET_IPLEN(val)
VNET_DEVICE_CLASS_TX_FN() avf_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
#define AVF_TXD_CMD_IIPT_IPV6
volatile u8 ref_count
Reference count for this buffer.
static_always_inline u16 ip_csum(void *data, u16 n_left)
#define AVF_TXD_OFFSET_MACLEN(val)
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
vnet_hw_if_tx_frame_t * tf
#define AVF_TXD_CMD_L4T_UDP
static void avf_tail_write(volatile u32 *addr, u32 val)
#define AVF_TXD_CTX_SEG_TLEN(val)
#define clib_ring_get_first(ring)
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
static uword vlib_buffer_get_current_va(vlib_buffer_t *b)
static_always_inline u8 avf_tx_desc_get_dtyp(avf_tx_desc_t *d)
#define clib_ring_deq(ring)
static void vlib_buffer_free_one(vlib_main_t *vm, u32 buffer_index)
Free one buffer Shorthand to free a single buffer chain.
#define AVF_TXD_CTX_CMD_TSO
#define clib_ring_enq(ring)
static_always_inline u32 avf_tx_fill_ctx_desc(vlib_main_t *vm, avf_txq_t *txq, avf_tx_desc_t *d, vlib_buffer_t *b)
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
vl_api_wireguard_peer_flags_t flags