|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
27 #define foreach_af_xdp_input_error \
28 _ (SYSCALL_REQUIRED, "syscall required") \
29 _ (SYSCALL_FAILURES, "syscall failures")
33 #define _(f,s) AF_XDP_INPUT_ERROR_##f,
80 xsk_ring_prod__submit (&rxq->
fq, n_alloc);
83 !xsk_ring_prod__needs_wakeup (&rxq->
fq))
88 AF_XDP_INPUT_ERROR_SYSCALL_REQUIRED, 1);
92 struct pollfd fd = { .fd = rxq->
xsk_fd, .events = POLLIN | POLLOUT };
93 int ret = poll (&fd, 1, 0);
100 AF_XDP_INPUT_ERROR_SYSCALL_FAILURES, 1);
115 u32 n_alloc, n, n_wrap;
121 n_alloc = xsk_prod_nb_free (&rxq->
fq, 16);
128 n = xsk_ring_prod__reserve (&rxq->
fq, n_alloc, &idx);
131 fill = xsk_ring_prod__fill_addr (&rxq->
fq, idx);
133 n_wrap = n_alloc - n;
135 #define bi2addr(bi) ((bi) << CLIB_LOG2_CACHE_LINE_BYTES)
141 #ifdef CLIB_HAVE_VEC256
142 u64x4 b0 = u64x4_from_u32x4 (*(u32x4u *) (bi + 0));
143 u64x4 b1 = u64x4_from_u32x4 (*(u32x4u *) (bi + 4));
144 *(u64x4u *) (fill + 0) =
bi2addr (b0);
145 *(u64x4u *) (fill + 4) =
bi2addr (b1);
171 fill = xsk_ring_prod__fill_addr (&rxq->
fq, 0);
183 const u32 hw_if_index)
212 u32 n = n_rx, *bi = bis, bytes = 0;
214 #define addr2bi(addr) ((addr) >> CLIB_LOG2_CACHE_LINE_BYTES)
218 const struct xdp_desc *desc = xsk_ring_cons__rx_desc (&rxq->
rx, idx);
225 idx = (idx + 1) &
mask;
277 xsk_ring_cons__release (&rxq->
rx, n_rx);
289 u32 n_rx_packets, n_rx_bytes;
337 if ((ad->
flags & AF_XDP_DEVICE_F_ADMIN_UP) == 0)
345 #ifndef CLIB_MARCH_VARIANT
358 .name =
"af_xdp-input",
359 .sibling_of =
"device-input",
362 .state = VLIB_NODE_STATE_DISABLED,
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
nat44_ei_hairpin_src_next_t next_index
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
vlib_get_buffers(vm, from, b, n_left_from)
vlib_main_t vlib_node_runtime_t * node
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
#define af_xdp_device_error(dev, fmt,...)
format_function_t format_af_xdp_input_trace
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
static vlib_buffer_known_state_t vlib_buffer_is_known(vlib_main_t *vm, u32 buffer_index)
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
vlib_buffer_t * buffer_template
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
af_xdp_main_t af_xdp_main
#define VLIB_NODE_FN(node)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
clib_spinlock_t syscall_lock
#define static_always_inline
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
@ VNET_INTERFACE_COUNTER_RX
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
struct _vlib_node_registration vlib_node_registration_t
vlib_combined_counter_main_t * combined_sw_if_counters
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
@ AF_XDP_RXQ_MODE_INTERRUPT
vlib_put_next_frame(vm, node, next_index, 0)
#define vec_foreach(var, vec)
Vector iterator.
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
static_always_inline int clib_spinlock_trylock_if_init(clib_spinlock_t *p)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static vlib_main_t * vlib_get_main(void)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
@ VLIB_BUFFER_KNOWN_ALLOCATED
u32 per_interface_next_index
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
vl_api_interface_index_t sw_if_index
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vnet_interface_main_t interface_main
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)