|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
26 #define foreach_avf_input_error \
27 _(BUFFER_ALLOC, "buffer alloc error")
31 #define _(f,s) AVF_INPUT_ERROR_##f,
43 #define AVF_INPUT_REFILL_TRESHOLD 32
48 #ifdef CLIB_HAVE_VEC256
50 u64x4_store_unaligned (v, (
void *) d);
82 AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
137 u32 tlnifb = 0,
i = 0;
148 b->
flags |= VLIB_BUFFER_NEXT_PRESENT;
156 hb->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
167 for (n = 0; n < n_rx_packets; n++)
200 uword n_rx_bytes = 0;
269 u32 n_trace, n_rx_packets = 0, n_rx_bytes = 0;
272 u32 *bi, *to_next, n_left_to_next;
279 #ifdef CLIB_HAVE_VEC256
280 u64x4 q1x4, or_q1x4 = { 0 };
283 #elif defined(CLIB_HAVE_VEC128)
284 u32x4 q1x4_lo, q1x4_hi, or_q1x4 = { 0 };
322 #ifdef CLIB_HAVE_VEC256
326 q1x4 =
u64x4_gather ((
void *) &d[0].qword[1], (
void *) &d[1].qword[1],
327 (
void *) &d[2].qword[1], (
void *) &d[3].qword[1]);
330 if (!u64x4_is_equal (q1x4 & dd_eop_mask4, dd_eop_mask4))
335 u64x4_store_unaligned (q1x4, ptd->
qw1s + n_rx_packets);
336 #elif defined(CLIB_HAVE_VEC128)
341 u32x4_gather ((
void *) &d[0].qword[1], (
void *) &d[1].qword[1],
342 (
void *) &d[2].qword[1], (
void *) &d[3].qword[1]);
345 if (!u32x4_is_equal (q1x4_lo & dd_eop_mask4, dd_eop_mask4))
349 (
void *) &d[0].qword[1] + 4, (
void *) &d[1].qword[1] + 4,
350 (
void *) &d[2].qword[1] + 4, (
void *) &d[3].qword[1] + 4);
353 ptd->
qw1s[n_rx_packets + 0] = (
u64) q1x4_hi[0] << 32 | (
u64) q1x4_lo[0];
354 ptd->
qw1s[n_rx_packets + 1] = (
u64) q1x4_hi[1] << 32 | (
u64) q1x4_lo[1];
355 ptd->
qw1s[n_rx_packets + 2] = (
u64) q1x4_hi[2] << 32 | (
u64) q1x4_lo[2];
356 ptd->
qw1s[n_rx_packets + 3] = (
u64) q1x4_hi[3] << 32 | (
u64) q1x4_lo[3];
358 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
363 (
void *) &d[0].fdid_flex_hi, (
void *) &d[1].fdid_flex_hi,
364 (
void *) &d[2].fdid_flex_hi, (
void *) &d[3].fdid_flex_hi);
365 u32x4_store_unaligned (fdidx4, ptd->
flow_ids + n_rx_packets);
395 tail_next = (tail_next + 1) &
mask;
402 or_qw1 |= tail->
qw1s[tail_desc] = td[0].
qword[1];
408 n_tail_desc += tail_desc;
411 or_qw1 |= ptd->
qw1s[n_rx_packets] = d[0].
qword[1];
425 if (n_rx_packets == 0)
429 rxq->
n_enqueued -= n_rx_packets + n_tail_desc;
434 #if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
435 or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
455 for (n = 0; n < n_rx_packets; n++)
532 n_left_to_next -= n_rx_packets;
542 if (ad->
flags & AVF_DEVICE_F_VA_DMA)
561 if ((ad->
flags & AVF_DEVICE_F_ADMIN_UP) == 0)
577 .sibling_of =
"device-input",
580 .state = VLIB_NODE_STATE_DISABLED,
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
format_function_t format_avf_input_trace
avf_rx_tail_t tails[AVF_RX_VECTOR_SZ]
u8 buffer_pool_index
index of buffer pool this buffer belongs.
u64 qw1s[AVF_RX_MAX_DESC_IN_CHAIN - 1]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
nat44_ei_hairpin_src_next_t next_index
#define AVF_RXD_ERROR_IPE
vlib_buffer_copy_indices(to, tmp, n_free)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword pointer_to_uword(const void *p)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
vlib_get_buffers(vm, from, b, n_left_from)
vlib_main_t vlib_node_runtime_t * node
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
u16 next[AVF_RX_VECTOR_SZ]
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
u32 buffers[AVF_RX_MAX_DESC_IN_CHAIN - 1]
static uword vlib_buffer_get_pa(vlib_main_t *vm, vlib_buffer_t *b)
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static void vlib_frame_no_append(vlib_frame_t *f)
#define CLIB_PREFETCH(addr, size, type)
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 flow_ids[AVF_RX_VECTOR_SZ]
#define VLIB_NODE_FN(node)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
static_always_inline uword vlib_get_thread_index(void)
#define AVF_RX_MAX_DESC_IN_CHAIN
#define static_always_inline
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
sll srl srl sll sra u16x4 i
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
#define AVF_RXD_STATUS_DD
volatile u8 ref_count
Reference count for this buffer.
#define AVF_RXD_LEN_SHIFT
u32 flow_id
Generic flow identifier.
@ VNET_INTERFACE_COUNTER_RX
u64 qw1s[AVF_RX_VECTOR_SZ]
#define CLIB_CACHE_LINE_BYTES
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
struct _vlib_node_registration vlib_node_registration_t
vlib_combined_counter_main_t * combined_sw_if_counters
static_always_inline u64x4 u64x4_gather(void *p0, void *p1, void *p2, void *p3)
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
static void avf_tail_write(volatile u32 *addr, u32 val)
#define AVF_RXD_STATUS_FLM
vlib_buffer_t buffer_template
vlib_put_next_frame(vm, node, next_index, 0)
static_always_inline int avf_rxd_is_not_eop(avf_rx_desc_t *d)
#define AVF_RXD_STATUS_EOP
for(i=1;i<=collision_buckets;i++)
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
u32 per_interface_next_index
avf_flow_lookup_entry_t * flow_lookup_entries
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
static_always_inline u32x4 u32x4_gather(void *p0, void *p1, void *p2, void *p3)
static_always_inline int avf_rxd_is_not_dd(avf_rx_desc_t *d)
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
vlib_buffer_t * bufs[AVF_RX_VECTOR_SZ]
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vnet_interface_main_t interface_main
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
static_always_inline avf_device_t * avf_get_device(u32 dev_instance)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)