9 #define AF_XDP_TX_RETRIES 5 25 XSK_UNALIGNED_BUF_OFFSET_SHIFT,
"wrong size");
31 compl = xsk_ring_cons__comp_addr (&txq->
cq, idx);
32 n =
clib_min (n_free, size - (idx & mask));
39 #ifdef CLIB_HAVE_VEC256 44 const u32x8 mask = { 0, 2, 4, 6, 1, 3, 5, 7 };
48 *(u32x4u *) (bi + 0) = u32x8_extract_lo (b2);
49 *(u32x4u *) (bi + 4) = u32x8_extract_lo (b3);
77 compl = xsk_ring_cons__comp_addr (&txq->
cq, 0);
83 xsk_ring_cons__release (&txq->
cq, n_free);
95 xsk_ring_prod__submit (&txq->
tx, n_tx);
97 if (!xsk_ring_prod__needs_wakeup (&txq->
tx))
102 ret = sendto (txq->
xsk_fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
130 struct xdp_desc *desc;
136 n_tx = xsk_ring_prod__reserve (&txq->
tx, n_tx, &idx);
144 desc = xsk_ring_prod__tx_desc (&txq->
tx, idx);
145 n =
clib_min (n_tx, size - (idx & mask));
155 b[0]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
157 desc[0].addr = offset |
addr;
163 b[1]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
165 desc[1].addr = offset |
addr;
171 b[2]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
173 desc[2].addr = offset |
addr;
179 b[3]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
181 desc[3].addr = offset |
addr;
193 b[0]->current_data) << XSK_UNALIGNED_BUF_OFFSET_SHIFT;
195 desc[0].addr = offset |
addr;
204 desc = xsk_ring_prod__tx_desc (&txq->
tx, 0);
227 n_tx =
frame->n_vectors;
248 AF_XDP_TX_ERROR_NO_FREE_SLOTS, n_tx - n);
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Optimized string handling code, including c11-compliant "safe C library" variants.
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
u16 current_length
Nbytes between current data and the end of this buffer.
#define CLIB_LOG2_CACHE_LINE_BYTES
vlib_buffer_main_t * buffer_main
foreach_avx2_vec256i static foreach_avx2_vec256u u32x8 u32x8_permute(u32x8 v, u32x8 idx)
#define af_xdp_device_error(dev, fmt,...)
VNET_DEVICE_CLASS_TX_FN() af_xdp_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
af_xdp_device_t * devices
#define static_always_inline
af_xdp_main_t af_xdp_main
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VNET_DEVICE_CLASS_TX_FN(devclass)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline void af_xdp_device_output_free(vlib_main_t *vm, const vlib_node_runtime_t *node, af_xdp_txq_t *txq)
u32 node_index
Node index.
sll srl srl sll sra u16x4 i
static_always_inline void af_xdp_device_output_tx_db(vlib_main_t *vm, const vlib_node_runtime_t *node, af_xdp_device_t *ad, af_xdp_txq_t *txq, const u32 n_tx)
vlib_main_t vlib_node_runtime_t * node
#define AF_XDP_TX_RETRIES
static uword pointer_to_uword(const void *p)
static_always_inline u32 af_xdp_device_output_tx_try(vlib_main_t *vm, const vlib_node_runtime_t *node, af_xdp_device_t *ad, af_xdp_txq_t *txq, u32 n_tx, u32 *bi)
#define STATIC_ASSERT(truth,...)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
VLIB buffer representation.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
struct clib_bihash_value offset
template key/value backing page structure
static_always_inline void vlib_get_buffers(vlib_main_t *vm, u32 *bi, vlib_buffer_t **b, int count)
Translate array of buffer indices into buffer pointers.
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)