27 #define foreach_dpdk_tx_func_error \ 28 _(BAD_RETVAL, "DPDK tx function returned an error") \ 29 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \ 30 _(REPL_FAIL, "Tx packet drops (replication failure)") 34 #define _(f,s) DPDK_TX_FUNC_ERROR_##f, 40 #ifndef CLIB_MARCH_VARIANT 54 error = rte_eth_dev_default_mac_addr_set (xd->
port_id,
55 (
struct ether_addr *) address);
70 static struct rte_mbuf *
74 struct rte_mbuf **mbufs = 0, *s, *d;
76 unsigned socket_id = rte_socket_id ();
84 if (rte_pktmbuf_alloc_bulk (dm->
pktmbuf_pools[socket_id], mbufs, nb_segs))
91 d->nb_segs = s->nb_segs;
92 d->data_len = s->data_len;
93 d->pkt_len = s->pkt_len;
94 d->data_off = s->data_off;
95 clib_memcpy (d->buf_addr, s->buf_addr, RTE_PKTMBUF_HEADROOM + s->data_len);
97 for (i = 1; i < nb_segs; i++)
102 d->data_len = s->data_len;
104 RTE_PKTMBUF_HEADROOM + s->data_len);
129 sizeof (buffer[0]) -
sizeof (buffer->
pre_data));
139 struct rte_mbuf *mb, *first_mb, *last_mb;
147 rte_pktmbuf_reset (mb);
148 while (maybe_multiseg && (b2->
flags & VLIB_BUFFER_NEXT_PRESENT))
152 rte_pktmbuf_reset (mb);
157 first_mb->nb_segs = 1;
163 while (maybe_multiseg && (b->
flags & VLIB_BUFFER_NEXT_PRESENT))
190 struct rte_mbuf **mb,
u32 n_left)
209 while (__sync_lock_test_and_set (xd->
lockp[queue_id], 1))
211 queue_id = (queue_id + 1) % xd->
tx_q_used;
223 n_sent = rte_ring_sp_enqueue_burst (hqos->
swq, (
void **) mb,
229 n_sent = rte_eth_tx_burst (xd->
port_id, queue_id, mb, n_left);
238 *xd->
lockp[queue_id] = 0;
258 while (n_sent && n_left && (n_retry > 0));
276 struct rte_mbuf *mb_new;
285 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
286 b->
flags |= VLIB_BUFFER_REPL_FAIL;
298 u32 ip_cksum = b->
flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
299 u32 tcp_cksum = b->
flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
300 u32 udp_cksum = b->
flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
301 int is_ip4 = b->
flags & VNET_BUFFER_F_IS_IP4;
305 if (
PREDICT_TRUE ((ip_cksum | tcp_cksum | udp_cksum) == 0))
311 mb->outer_l3_len = 0;
312 mb->outer_l2_len = 0;
313 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
314 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
315 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
316 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
317 mb->ol_flags |= ol_flags;
321 if (xd->
flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
322 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
338 u32 n_packets = f->n_vectors;
342 int queue_id = thread_index;
343 u32 tx_pkts = 0, all_or_flags = 0;
346 struct rte_mbuf **mb;
372 (
void **) ptd->
mbufs, n_packets,
373 -(
i32) sizeof (
struct rte_mbuf));
394 all_or_flags |= or_flags;
401 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
416 if (
PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
418 (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM
419 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM
420 | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))))
430 if (b[0]->
flags & VLIB_BUFFER_IS_TRACED)
432 if (b[1]->
flags & VLIB_BUFFER_IS_TRACED)
434 if (b[2]->
flags & VLIB_BUFFER_IS_TRACED)
436 if (b[3]->
flags & VLIB_BUFFER_IS_TRACED)
446 all_or_flags |= b[0]->
flags;
453 if (b[0]->
flags & VLIB_BUFFER_IS_TRACED)
463 struct rte_mbuf **mb_old;
466 mb_old = mb = ptd->
mbufs;
484 tx_pkts = n_packets = mb - ptd->
mbufs;
505 rte_pktmbuf_free (ptd->
mbufs[n_packets - n_left - 1]);
514 _vec_len (dm->
recycle[thread_index]) = 0;
520 #ifndef CLIB_MARCH_VARIANT 548 if (xd->
flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
555 if ((xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
557 xd->
flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
565 if ((xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
567 xd->
flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
586 if (node_index == ~0)
615 if ((xd->
flags & DPDK_DEVICE_FLAG_PMD) == 0)
619 if ((xd->
pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->
pmd != VNET_DPDK_PMD_I40EVF))
632 vlan_offload = rte_eth_dev_get_vlan_offload (xd->
port_id);
633 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
635 if ((r = rte_eth_dev_set_vlan_offload (xd->
port_id, vlan_offload)))
645 rte_eth_dev_vlan_filter (xd->
port_id,
656 xd->
flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
658 xd->
flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
685 static void __clib_constructor
696 #define UP_DOWN_FLAG_EVENT 1 698 #ifndef CLIB_MARCH_VARIANT 705 uword *event_data = 0;
723 sw_if_index = event_data[0];
724 flags = event_data[1];
746 .name =
"admin-up-down-process",
747 .process_log2_n_stack_bytes = 17,
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
format_function_t format_dpdk_tx_trace
format_function_t format_dpdk_flow
#define vlib_buffer_from_rte_mbuf(x)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
vnet_main_t * vnet_get_main(void)
vnet_interface_main_t interface_main
vnet_device_class_t dpdk_device_class
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static f64 vlib_time_now(vlib_main_t *vm)
static char * dpdk_tx_func_error_strings[]
#define VLIB_BUFFER_PRE_DATA_SIZE
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
u32 per_interface_next_index
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
#define VNET_HW_INTERFACE_FLAG_LINK_UP
struct rte_eth_xstat * last_cleared_xstats
static uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static void dpdk_tx_trace_buffer(dpdk_main_t *dm, vlib_node_runtime_t *node, dpdk_device_t *xd, u16 queue_id, vlib_buffer_t *buffer)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define vec_add(V, E, N)
Add N elements to end of vector V (no header, unspecified alignment)
struct rte_eth_stats stats
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define static_always_inline
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
uword CLIB_MULTIARCH_FN() dpdk_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
static void dpdk_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
static struct rte_mbuf * dpdk_replicate_packet_mb(vlib_buffer_t *b)
static vlib_node_registration_t admin_up_down_process_node
(constructor) VLIB_REGISTER_NODE (admin_up_down_process_node)
A collection of simple counters.
dpdk_device_hqos_per_worker_thread_t * hqos_wt
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
static void __clib_constructor dpdk_interface_tx_multiarch_select(void)
vnet_hw_interface_t * hw_interfaces
#define rte_mbuf_from_vlib_buffer(x)
void dpdk_device_start(dpdk_device_t *xd)
u16 current_length
Nbytes between current data and the end of this buffer.
dpdk_per_thread_data_t * per_thread_data
void dpdk_hqos_metadata_set(dpdk_device_hqos_per_worker_thread_t *hqos, struct rte_mbuf **pkts, u32 n_pkts)
static void pcap_add_buffer(pcap_main_t *pm, vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
Add buffer (vlib_buffer_t) to the trace.
vlib_simple_counter_main_t * sw_if_counters
u32 node_index
Node index.
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
#define VLIB_REGISTER_NODE(x,...)
#define CLIB_PREFETCH(addr, size, type)
static void dpdk_update_counters(dpdk_device_t *xd, f64 now)
#define vec_free(V)
Free vector's memory (no header).
vlib_node_function_t __clib_weak dpdk_interface_tx_avx512
#define clib_warning(format, args...)
#define clib_memcpy(a, b, c)
static_always_inline void dpdk_prefetch_buffer(vlib_main_t *vm, struct rte_mbuf *mb)
struct vnet_sub_interface_t::@200::@201::@203 flags
format_function_t format_dpdk_device
static_always_inline void dpdk_validate_rte_mbuf(vlib_main_t *vm, vlib_buffer_t *b, int maybe_multiseg)
void dpdk_device_stop(dpdk_device_t *xd)
static_always_inline u32 tx_burst_vector_internal(vlib_main_t *vm, dpdk_device_t *xd, struct rte_mbuf **mb, u32 n_left)
struct rte_eth_xstat * xstats
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
format_function_t format_dpdk_device_name
uword admin_up_down_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
u32 next_buffer
Next buffer for this linked-list of buffers.
struct rte_mempool ** pktmbuf_pools
#define clib_error_report(e)
VNET_DEVICE_CLASS(bond_dev_class)
void dpdk_update_link_state(dpdk_device_t *xd, f64 now)
dpdk_portid_t device_index
u8 n_add_refs
Number of additional references to this buffer.
static vlib_main_t * vlib_get_main(void)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
struct vnet_sub_interface_t::@200 eth
static void dpdk_clear_hw_interface_counters(u32 instance)
vlib_node_function_t __clib_weak dpdk_interface_tx_avx2
struct rte_mbuf * mbufs[DPDK_RX_BURST_SZ]
#define foreach_dpdk_tx_func_error
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u8 admin_up_down_in_progress
static clib_error_t * dpdk_set_mac_address(vnet_hw_interface_t *hi, char *address)
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
dpdk_pcap_t pcap[VLIB_N_RX_TX]
#define UP_DOWN_FLAG_EVENT
static clib_error_t * dpdk_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
vnet_flow_dev_ops_function_t dpdk_flow_ops_fn
clib_error_t * vnet_sw_interface_set_flags(vnet_main_t *vnm, u32 sw_if_index, u32 flags)
struct rte_eth_stats last_cleared_stats
static clib_error_t * dpdk_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
#define VLIB_NODE_FLAG_TRACE
static_always_inline void dpdk_buffer_recycle(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b, u32 bi, struct rte_mbuf **mbp)
static_always_inline void dpdk_buffer_tx_offload(dpdk_device_t *xd, vlib_buffer_t *b, struct rte_mbuf *mb)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
#define CLIB_MULTIARCH_FN(fn)