|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
33 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
55 (
struct rte_ether_addr *)
address, 0);
58 (
struct rte_ether_addr *)
address);
118 struct rte_mbuf *mb, *first_mb, *last_mb;
124 rte_pktmbuf_reset (mb);
126 first_mb->nb_segs = 1;
132 while (maybe_multiseg && (
b->
flags & VLIB_BUFFER_NEXT_PRESENT))
137 rte_pktmbuf_reset (mb);
178 n_sent = rte_eth_tx_burst (xd->
port_id, queue_id, mb,
n_left);
205 while (n_sent &&
n_left && (n_retry > 0));
222 int is_ip4 =
b->
flags & VNET_BUFFER_F_IS_IP4;
223 u32 tso =
b->
flags & VNET_BUFFER_F_GSO, max_pkt_len;
224 u32 ip_cksum, tcp_cksum, udp_cksum, outer_hdr_len = 0;
225 u32 outer_ip_cksum, vxlan_tunnel;
234 ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
235 tcp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
236 udp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
237 outer_ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_OUTER_IP_CKSUM;
238 vxlan_tunnel = oflags & VNET_BUFFER_OFFLOAD_F_TNL_VXLAN;
240 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
241 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
242 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
243 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
247 ol_flags |= outer_ip_cksum ? PKT_TX_OUTER_IPV4 | PKT_TX_OUTER_IP_CKSUM :
249 ol_flags |= PKT_TX_TUNNEL_VXLAN;
258 outer_hdr_len = mb->outer_l2_len + mb->outer_l3_len;
265 mb->outer_l2_len = 0;
266 mb->outer_l3_len = 0;
275 outer_hdr_len + mb->l2_len + mb->l3_len + mb->l4_len + mb->tso_segsz;
276 if (mb->tso_segsz != 0 && mb->pkt_len > max_pkt_len)
277 ol_flags |= (tcp_cksum ? PKT_TX_TCP_SEG : PKT_TX_UDP_SEG);
280 mb->ol_flags |= ol_flags;
284 if (xd->
flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
285 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
304 u32 tx_pkts = 0, all_or_flags = 0;
307 struct rte_mbuf **mb;
314 (
void **) ptd->
mbufs, n_packets,
315 -(
i32) sizeof (
struct rte_mbuf));
320 #if (CLIB_N_PREFETCHES >= 8)
336 all_or_flags |= or_flags;
338 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
353 if (
PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
354 (or_flags & VNET_BUFFER_F_OFFLOAD)))
364 if (
b[0]->
flags & VLIB_BUFFER_IS_TRACED)
366 if (
b[1]->
flags & VLIB_BUFFER_IS_TRACED)
368 if (
b[2]->
flags & VLIB_BUFFER_IS_TRACED)
370 if (
b[3]->
flags & VLIB_BUFFER_IS_TRACED)
377 #elif (CLIB_N_PREFETCHES >= 4)
394 all_or_flags |= or_flags;
396 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
407 if (
PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
408 (or_flags & VNET_BUFFER_F_OFFLOAD)))
416 if (
b[0]->
flags & VLIB_BUFFER_IS_TRACED)
418 if (
b[1]->
flags & VLIB_BUFFER_IS_TRACED)
430 all_or_flags |=
b[0]->
flags;
436 if (
b[0]->
flags & VLIB_BUFFER_IS_TRACED)
444 tx_pkts = n_packets = mb - ptd->
mbufs;
465 rte_pktmbuf_free (ptd->
mbufs[n_packets -
n_left - 1]);
478 rte_eth_stats_reset (xd->
port_id);
479 rte_eth_xstats_reset (xd->
port_id);
490 if (xd->
flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
495 if ((xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
500 xd->
flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
509 if ((xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
511 xd->
flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
559 if ((xd->
flags & DPDK_DEVICE_FLAG_PMD) == 0)
563 if (xd->
pmd != VNET_DPDK_PMD_IXGBEVF)
576 vlan_offload = rte_eth_dev_get_vlan_offload (xd->
port_id);
577 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
579 if ((
r = rte_eth_dev_set_vlan_offload (xd->
port_id, vlan_offload)))
589 rte_eth_dev_vlan_filter (xd->
port_id,
600 xd->
flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
602 xd->
flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
613 u32 hw_if_index =
hi->hw_if_index;
617 struct rte_eth_rss_reta_entry64 *reta_conf = NULL;
618 struct rte_eth_dev_info dev_info;
620 u16 *valid_queue = NULL;
621 u16 valid_queue_count = 0;
625 rte_eth_dev_info_get (xd->
port_id, &dev_info);
648 clib_memset (reta, 0, dev_info.reta_size * sizeof (*reta));
650 valid_queue_count = 0;
653 if (
i >= dev_info.nb_rx_queues)
658 reta[valid_queue_count++] =
i;
663 if (valid_queue_count == 0)
670 for (
i = valid_queue_count, j = 0;
i < dev_info.reta_size;
i++, j++)
672 j = j % valid_queue_count;
673 reta[
i] = valid_queue[j];
678 (
struct rte_eth_rss_reta_entry64 *)
clib_mem_alloc (dev_info.reta_size /
679 RTE_RETA_GROUP_SIZE *
680 sizeof (*reta_conf));
681 if (reta_conf == NULL)
688 dev_info.reta_size / RTE_RETA_GROUP_SIZE *
689 sizeof (*reta_conf));
691 for (
i = 0;
i < dev_info.reta_size;
i++)
693 uint32_t reta_id =
i / RTE_RETA_GROUP_SIZE;
694 uint32_t reta_pos =
i % RTE_RETA_GROUP_SIZE;
696 reta_conf[reta_id].mask = UINT64_MAX;
697 reta_conf[reta_id].reta[reta_pos] = reta[
i];
701 rte_eth_dev_rss_reta_update (xd->
port_id, reta_conf, dev_info.reta_size);
728 if (!(xd->
flags & DPDK_DEVICE_FLAG_INT_SUPPORTED))
731 !(xd->
flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
732 rv = rte_eth_dev_rx_intr_disable (xd->
port_id, qid);
739 else if (!(xd->
flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
740 rv = rte_eth_dev_rx_intr_enable (xd->
port_id, qid);
773 #define UP_DOWN_FLAG_EVENT 1
781 uword *event_data = 0;
800 flags = event_data[1];
822 .name =
"admin-up-down-process",
823 .process_log2_n_stack_bytes = 17,
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
u32 next_buffer
Next buffer for this linked-list of buffers.
static void dpdk_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
vnet_interface_main_t * im
u8 buffer_pool_index
index of buffer pool this buffer belongs.
clib_file_main_t file_main
clib_error_t * vnet_sw_interface_set_flags(vnet_main_t *vnm, u32 sw_if_index, vnet_sw_interface_flags_t flags)
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
VNET_DEVICE_CLASS_TX_FN() dpdk_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
struct vnet_sub_interface_t::@374 eth
static clib_error_t * dpdk_set_mac_address(vnet_hw_interface_t *hi, const u8 *old_address, const u8 *address)
void dpdk_device_start(dpdk_device_t *xd)
static char * dpdk_tx_func_error_strings[]
struct vnet_sub_interface_t::@374::@375::@377 flags
format_function_t format_dpdk_flow
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
static void clib_mem_free(void *p)
vlib_main_t vlib_node_runtime_t * node
#define clib_error_return(e, args...)
VNET_DEVICE_CLASS(af_xdp_device_class)
vl_api_tunnel_mode_t mode
@ VNET_SW_INTERFACE_FLAG_ADMIN_UP
@ VNET_HW_IF_RX_MODE_POLLING
vlib_node_registration_t admin_up_down_process_node
(constructor) VLIB_REGISTER_NODE (admin_up_down_process_node)
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
static void dpdk_clear_hw_interface_counters(u32 instance)
vlib_simple_counter_main_t * sw_if_counters
vnet_hw_interface_t * hw_interfaces
#define clib_error_report(e)
vnet_hw_if_output_node_runtime_t * r
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
static clib_error_t * dpdk_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
dpdk_tx_queue_t * tx_queues
#define CLIB_PREFETCH(addr, size, type)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type,...
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
u32 per_interface_next_index
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
static_always_inline void dpdk_buffer_tx_offload(dpdk_device_t *xd, vlib_buffer_t *b, struct rte_mbuf *mb)
#define clib_error_create(args...)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
vnet_main_t * vnet_get_main(void)
#define VLIB_NODE_FLAG_TRACE
static uword clib_bitmap_count_set_bits(uword *ai)
Return the number of set bits in a bitmap.
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
#define static_always_inline
#define vlib_buffer_from_rte_mbuf(x)
dpdk_per_thread_data_t * per_thread_data
vnet_flow_dev_ops_function_t dpdk_flow_ops_fn
static_always_inline u32 tx_burst_vector_internal(vlib_main_t *vm, dpdk_device_t *xd, struct rte_mbuf **mb, u32 n_left)
vnet_feature_config_main_t * cm
manual_print typedef address
volatile u8 ref_count
Reference count for this buffer.
struct _vlib_node_registration vlib_node_registration_t
static clib_error_t * dpdk_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
u16 current_length
Nbytes between current data and the end of this buffer.
@ UNIX_FILE_UPDATE_DELETE
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
#define rte_mbuf_from_vlib_buffer(x)
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
static clib_error_t * dpdk_interface_set_rss_queues(struct vnet_main_t *vnm, struct vnet_hw_interface_t *hi, clib_bitmap_t *bitmap)
@ VNET_INTERFACE_COUNTER_TX_ERROR
void dpdk_update_link_state(dpdk_device_t *xd, f64 now)
void dpdk_device_stop(dpdk_device_t *xd)
u8 admin_up_down_in_progress
#define VLIB_BUFFER_PRE_DATA_SIZE
static_always_inline void dpdk_validate_rte_mbuf(vlib_main_t *vm, vlib_buffer_t *b, int maybe_multiseg)
static_always_inline void clib_prefetch_load(void *p)
static clib_error_t * dpdk_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
#define foreach_dpdk_tx_func_error
static void dpdk_tx_trace_buffer(dpdk_main_t *dm, vlib_node_runtime_t *node, dpdk_device_t *xd, u16 queue_id, vlib_buffer_t *buffer)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
A collection of simple counters.
static_always_inline __clib_unused void dpdk_prefetch_buffer(vlib_main_t *vm, struct rte_mbuf *mb)
static clib_error_t * dpdk_add_del_mac_address(vnet_hw_interface_t *hi, const u8 *address, u8 is_add)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static vlib_main_t * vlib_get_main(void)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
vnet_interface_output_runtime_t * rt
static f64 vlib_time_now(vlib_main_t *vm)
static_always_inline void clib_prefetch_store(void *p)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
static uword admin_up_down_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
#define clib_bitmap_foreach(i, ai)
Macro to iterate across set bits in a bitmap.
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
static void dpdk_update_counters(dpdk_device_t *xd, f64 now)
format_function_t format_dpdk_device
vl_api_interface_index_t sw_if_index
format_function_t format_dpdk_device_name
static void * clib_mem_alloc(uword size)
format_function_t format_dpdk_tx_trace
vnet_interface_main_t interface_main
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
dpdk_rx_queue_t * rx_queues
#define UP_DOWN_FLAG_EVENT
VLIB buffer representation.
#define VLIB_REGISTER_NODE(x,...)
vl_api_wireguard_peer_flags_t flags