|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
42 "dpdk flags not in lower word, fix needed");
49 struct rte_mbuf *mb_seg = 0;
57 b->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
60 while (nb_seg < mb->nb_segs)
72 (mb_seg->buf_addr + mb_seg->data_off) - (
void *) b_seg->
data;
77 b_chain->
flags |= VLIB_BUFFER_NEXT_PRESENT;
81 mb_seg = mb_seg->next;
166 uword n_rx_packets,
int maybe_multiseg,
u32 *or_flagsp)
170 struct rte_mbuf **mb = ptd->
mbufs;
242 *or_flagsp = or_flags;
255 for (n = 0; n < n_rx_packets; n++)
257 if ((ptd->
flags[n] & PKT_RX_FDIR_ID) == 0)
261 ptd->
mbufs[n]->hash.fdir.hi);
284 u16 current_offset = 0;
290 current_offset +=
sizeof (e[0]);
291 ethertype = clib_net_to_host_u16 (e->
type);
295 ethertype = clib_net_to_host_u16 (vlan->
type);
296 current_offset +=
sizeof (*vlan);
297 if (ethertype == ETHERNET_TYPE_VLAN)
300 current_offset +=
sizeof (*vlan);
301 ethertype = clib_net_to_host_u16 (vlan->
type);
304 data += current_offset;
305 if (ethertype == ETHERNET_TYPE_IP4)
327 for (n = 0; n < n_rx_packets; n++)
330 if (ptd->
flags[n] & PKT_RX_LRO)
332 b0->
flags |= VNET_BUFFER_F_GSO;
343 uword n_rx_packets = 0, n_rx_bytes;
348 struct rte_mbuf **mb;
359 if ((xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
365 n = rte_eth_rx_burst (xd->
port_id, queue_id,
366 ptd->
mbufs + n_rx_packets,
374 if (n_rx_packets == 0)
379 bt->
error =
node->errors[DPDK_ERROR_NONE];
396 if (xd->
flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG)
407 for (n = 0; n < n_rx_packets; n++)
413 (or_flags & PKT_RX_FDIR)))
419 sizeof (
struct rte_mbuf));
426 u32 *to_next, n_left_to_next;
431 sizeof (
struct rte_mbuf));
449 if (xd->
flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM &&
450 (or_flags & PKT_RX_IP_CKSUM_BAD) == 0)
454 n_left_to_next -= n_rx_packets;
465 sizeof (
struct rte_mbuf));
475 if (single_next == 0)
491 sizeof b0[0] -
sizeof b0->
pre_data);
510 n_rx_packets, n_rx_bytes);
522 uword n_rx_packets = 0;
544 .name =
"dpdk-input",
545 .sibling_of =
"device-input",
549 .state = VLIB_NODE_STATE_DISABLED,
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
u8 buffer_pool_index
index of buffer pool this buffer belongs.
static int tcp_header_bytes(tcp_header_t *t)
nat44_ei_hairpin_src_next_t next_index
format_function_t format_dpdk_rx_trace
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline int ethernet_frame_is_tagged(u16 type)
struct _tcp_header tcp_header_t
static_always_inline u16 dpdk_lro_find_l4_hdr_sz(vlib_buffer_t *b)
vlib_main_t vlib_node_runtime_t * node
static_always_inline int vnet_device_input_have_features(u32 sw_if_index)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
static_always_inline void dpdk_prefetch_buffer_x4(struct rte_mbuf *mb[])
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
static void vlib_frame_no_append(vlib_frame_t *f)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
u32 per_interface_next_index
static_always_inline uword dpdk_process_subseq_segs(vlib_main_t *vm, vlib_buffer_t *b, struct rte_mbuf *mb, vlib_buffer_t *bt)
static void vlib_buffer_advance(vlib_buffer_t *b, word l)
Advance current data pointer by the supplied (signed!) amount.
static_always_inline uword dpdk_process_rx_burst(vlib_main_t *vm, dpdk_per_thread_data_t *ptd, uword n_rx_packets, int maybe_multiseg, u32 *or_flagsp)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define foreach_dpdk_error
#define VLIB_NODE_FN(node)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_main_t * vnet_get_main(void)
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
#define static_always_inline
#define vlib_buffer_from_rte_mbuf(x)
static_always_inline void dpdk_process_flow_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
dpdk_per_thread_data_t * per_thread_data
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
volatile u8 ref_count
Reference count for this buffer.
STATIC_ASSERT(STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l2_hdr_offset)==STRUCT_OFFSET_OF(vnet_buffer_opaque_t, l3_hdr_offset) - 2, "l3_hdr_offset must follow l2_hdr_offset")
u32 flow_id
Generic flow identifier.
@ VNET_INTERFACE_COUNTER_RX
static_always_inline void dpdk_prefetch_mbuf_x4(struct rte_mbuf *mb[])
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
struct _vlib_node_registration vlib_node_registration_t
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
u16 next[DPDK_RX_BURST_SZ]
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
static_always_inline u32 dpdk_device_input(vlib_main_t *vm, dpdk_main_t *dm, dpdk_device_t *xd, vlib_node_runtime_t *node, u32 thread_index, u16 queue_id)
dpdk_flow_lookup_entry_t * flow_lookup_entries
vlib_put_next_frame(vm, node, next_index, 0)
static_always_inline u32 dpdk_ol_flags_extract(struct rte_mbuf **mb, u32 *flags, int count)
Main DPDK input node.
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
static_always_inline void clib_prefetch_load(void *p)
#define ETH_INPUT_FRAME_F_IP4_CKSUM_OK
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
u32 buffers[DPDK_RX_BURST_SZ]
static char * dpdk_error_strings[]
static_always_inline void dpdk_process_lro_offload(dpdk_device_t *xd, dpdk_per_thread_data_t *ptd, uword n_rx_packets)
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
u32 flags[DPDK_RX_BURST_SZ]
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
dpdk_rx_queue_t * rx_queues
vlib_buffer_t buffer_template
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
vl_api_wireguard_peer_flags_t flags