27 #define foreach_dpdk_tx_func_error \ 28 _(BAD_RETVAL, "DPDK tx function returned an error") \ 29 _(RING_FULL, "Tx packet drops (ring full)") \ 30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \ 31 _(REPL_FAIL, "Tx packet drops (replication failure)") 34 #define _(f,s) DPDK_TX_FUNC_ERROR_##f, 54 (
struct ether_addr *) address);
65 struct ether_addr mc_addr_vec[],
int naddr)
71 error=rte_eth_dev_set_mc_addr_list(xd->
device_index, mc_addr_vec, naddr);
84 struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
85 u8 nb_segs, nb_segs_left;
87 unsigned socket_id = rte_socket_id();
89 ASSERT (bm->pktmbuf_pools[socket_id]);
90 pkt_mb = rte_mbuf_from_vlib_buffer(b);
91 nb_segs = pkt_mb->nb_segs;
92 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
97 "(nb_segs = %d, nb_segs_left = %d)!",
98 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
100 rte_pktmbuf_free(first_mb);
103 new_mb = rte_pktmbuf_alloc (bm->pktmbuf_pools[socket_id]);
107 rte_pktmbuf_free(first_mb);
117 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
118 first_mb->nb_segs = pkt_mb->nb_segs;
119 first_mb->port = pkt_mb->port;
120 #ifdef DAW_FIXME // TX Offload support TBD 121 first_mb->vlan_macip = pkt_mb->vlan_macip;
122 first_mb->hash = pkt_mb->hash;
123 first_mb->ol_flags = pkt_mb->ol_flags
128 ASSERT(prev_mb_next != 0);
129 *prev_mb_next = new_mb;
135 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
136 copy_bytes = pkt_mb->data_len + RTE_PKTMBUF_HEADROOM;
137 ASSERT(copy_bytes <= pkt_mb->buf_len);
138 clib_memcpy(new_mb->buf_addr, pkt_mb->buf_addr, copy_bytes);
140 prev_mb_next = &new_mb->next;
141 pkt_mb = pkt_mb->next;
145 __rte_mbuf_sanity_check(first_mb, 1);
154 struct rte_mbuf * first_mb = 0, * new_mb, * pkt_mb, ** prev_mb_next = 0;
155 u8 nb_segs, nb_segs_left;
156 unsigned socket_id = rte_socket_id();
158 ASSERT (bm->pktmbuf_pools[socket_id]);
159 pkt_mb = rte_mbuf_from_vlib_buffer(b);
160 nb_segs = pkt_mb->nb_segs;
161 for (nb_segs_left = nb_segs; nb_segs_left; nb_segs_left--)
166 "(nb_segs = %d, nb_segs_left = %d)!",
167 nb_segs - nb_segs_left, nb_segs, nb_segs_left);
169 rte_pktmbuf_free(first_mb);
172 new_mb = rte_pktmbuf_clone(pkt_mb, bm->pktmbuf_pools[socket_id]);
176 rte_pktmbuf_free(first_mb);
186 rte_pktmbuf_pkt_len (first_mb) = pkt_mb->pkt_len;
187 first_mb->nb_segs = pkt_mb->nb_segs;
188 first_mb->port = pkt_mb->port;
189 #ifdef DAW_FIXME // TX Offload support TBD 190 first_mb->vlan_macip = pkt_mb->vlan_macip;
191 first_mb->hash = pkt_mb->hash;
192 first_mb->ol_flags = pkt_mb->ol_flags
197 ASSERT(prev_mb_next != 0);
198 *prev_mb_next = new_mb;
204 rte_pktmbuf_data_len (new_mb) = pkt_mb->data_len;
206 prev_mb_next = &new_mb->next;
207 pkt_mb = pkt_mb->next;
211 __rte_mbuf_sanity_check(first_mb, 1);
228 struct rte_mbuf * mb;
230 mb = rte_mbuf_from_vlib_buffer(buffer);
261 struct rte_mbuf ** tx_vector)
321 while (__sync_lock_test_and_set (xd->
lockp[queue_id], 1))
323 queue_id = (queue_id + 1) % xd->
tx_q_used;
363 while (__sync_lock_test_and_set (xd->
lockp[queue_id], 1));
365 #if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0) 374 offset = dq->
queue_id * VIRTIO_QNUM;
379 int i;
u32 bytes = 0;
380 struct rte_mbuf **pkts = &tx_vector[tx_tail];
381 for (i = 0; i < (tx_head - tx_tail); i++) {
382 struct rte_mbuf *buff = pkts[
i];
383 bytes += rte_pktmbuf_data_len(buff);
387 rv = rte_vhost_enqueue_burst(&xd->
vu_vhost_dev, offset + VIRTIO_RXQ,
394 vring->
bytes += bytes;
408 rte_pktmbuf_free (tx_vector[tx_tail+c]);
418 int i;
u32 bytes = 0;
419 struct rte_mbuf **pkts = &tx_vector[tx_tail];
421 struct rte_mbuf *buff = pkts[
i];
422 bytes += rte_pktmbuf_data_len(buff);
424 rv = rte_vhost_enqueue_burst(&xd->
vu_vhost_dev, offset + VIRTIO_RXQ,
432 vring->
bytes += bytes;
446 rte_pktmbuf_free (tx_vector[tx_tail+c]);
453 *xd->
lockp[queue_id] = 0;
461 rv = rte_kni_tx_burst(xd->
kni,
474 rv = rte_kni_tx_burst(xd->
kni,
495 *xd->
lockp[queue_id] = 0;
513 }
while (rv && n_packets && (n_retry>0));
532 struct rte_mbuf ** tx_vector;
581 struct rte_mbuf ** tx_vector;
593 tx_vector = xd->tx_vectors[queue_id];
612 u32 bi0 = from[n_packets];
614 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer(b0);
615 rte_pktmbuf_free (mb0);
643 struct rte_mbuf * mb0, * mb1;
644 struct rte_mbuf * prefmb0, * prefmb1;
648 u16 new_data_len0, new_data_len1;
649 u16 new_pkt_len0, new_pkt_len1;
657 prefmb0 = rte_mbuf_from_vlib_buffer(pref0);
658 prefmb1 = rte_mbuf_from_vlib_buffer(pref1);
672 mb0 = rte_mbuf_from_vlib_buffer(b0);
673 mb1 = rte_mbuf_from_vlib_buffer(b1);
684 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
697 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
711 new_data_len0 = (
u16)((
i16) mb0->data_len + delta0);
712 new_data_len1 = (
u16)((
i16) mb1->data_len + delta1);
713 new_pkt_len0 = (
u16)((
i16) mb0->pkt_len + delta0);
714 new_pkt_len1 = (
u16)((
i16) mb1->pkt_len + delta1);
718 mb0->data_len = new_data_len0;
719 mb1->data_len = new_data_len1;
720 mb0->pkt_len = new_pkt_len0;
721 mb1->pkt_len = new_pkt_len1;
763 struct rte_mbuf * mb0;
774 mb0 = rte_mbuf_from_vlib_buffer(b0);
781 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
792 new_data_len0 = (
u16)((
i16) mb0->data_len + delta0);
793 new_pkt_len0 = (
u16)((
i16) mb0->pkt_len + delta0);
796 mb0->data_len = new_data_len0;
797 mb0->pkt_len = new_pkt_len0;
824 tx_pkts = n_on_ring - n_packets;
857 rte_pktmbuf_free (tx_vector[ring->
tx_tail + n_packets]);
869 _vec_len(dm->
recycle[my_cpu]) = 0;
878 u32 new_dev_instance)
884 clib_warning(
"cannot renumber non-vhost-user interface (sw_if_index: %d)",
930 for (i = 0; i < xd->
rx_q_used * VIRTIO_QNUM; i++) {
937 #ifdef RTE_LIBRTE_KNI 939 kni_config_network_if(
u8 port_id,
u8 if_up)
956 ETH_LINK_FULL_DUPLEX : 0);
961 kni_change_mtu(
u8 port_id,
unsigned new_mtu)
993 #ifdef RTE_LIBRTE_KNI 998 struct rte_kni_conf conf;
999 struct rte_kni_ops ops;
1002 memset(&conf, 0,
sizeof(conf));
1003 snprintf(conf.name, RTE_KNI_NAMESIZE,
"vpp%u", xd->
kni_port_id);
1005 memset(&ops, 0,
sizeof(ops));
1007 ops.change_mtu = kni_change_mtu;
1008 ops.config_network_if = kni_config_network_if;
1010 xd->
kni = rte_kni_alloc(bm->pktmbuf_pools[rte_socket_id()], &conf, &ops);
1024 rte_kni_release(xd->
kni);
1036 ETH_LINK_FULL_DUPLEX );
1080 if (xd->
pmd != VNET_DPDK_PMD_VMXNET3)
1094 if (xd->
pmd != VNET_DPDK_PMD_VMXNET3)
1099 clib_warning (
"rte_eth_dev_%s error: %d", is_up ?
"start" :
"stop",
1117 if (node_index == ~0)
1138 int r, vlan_offload;
1148 if ((xd->
pmd != VNET_DPDK_PMD_IXGBEVF) &&
1149 (xd->
pmd != VNET_DPDK_PMD_I40EVF))
1160 vlan_offload = rte_eth_dev_get_vlan_offload(xd->
device_index);
1161 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
1163 if ((r = rte_eth_dev_set_vlan_offload(xd->
device_index, vlan_offload))) {
1191 .no_flatten_output_chains = 1,
1201 #define UP_DOWN_FLAG_EVENT 1 1216 uword *event_data = 0;
1229 for (index=0; index<
vec_len(event_data); index++)
1231 sw_if_index = event_data[index] >> 32;
1232 flags = (
u32) event_data[index];
1234 switch (event_type) {
1253 .name =
"admin-up-down-process",
1254 .process_log2_n_stack_bytes = 17,
1271 (((
uword)sw_if_index << 32) | flags));
1284 if (us < 10)
return 0;
1307 f64 delay = 1e-6 * us;
struct vnet_sub_interface_t::@91 eth
void(* dpdk_flowcontrol_callback_t)(vlib_main_t *vm, u32 hw_if_index, u32 n_packets)
always_inline void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
i8 dpdk_get_cpu_socket(vnet_hw_interface_t *hi)
sll srl srl sll sra u16x4 i
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
always_inline uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
#define UP_DOWN_FLAG_EVENT
vlib_node_runtime_t node_runtime
struct rte_eth_stats last_stats
void dpdk_vhost_user_send_interrupt(vlib_main_t *vm, dpdk_device_t *xd, int idx)
vnet_interface_main_t interface_main
clib_error_t * dpdk_set_mac_address(vnet_hw_interface_t *hi, char *address)
vnet_device_class_t dpdk_device_class
static int dpdk_device_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
int dpdk_io_thread_release(void)
u32 vhost_coalesce_frames
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
struct virtio_net vu_vhost_dev
vlib_buffer_main_t * buffer_main
u32 per_interface_next_index
always_inline void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
#define clib_error_report(e)
always_inline uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
#define VNET_HW_INTERFACE_FLAG_LINK_UP
struct rte_mbuf * dpdk_zerocopy_replicate_packet_mb(vlib_buffer_t *b)
format_function_t format_dpdk_tx_dma_trace
static clib_error_t * dpdk_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
always_inline vlib_main_t * vlib_get_main(void)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
struct rte_eth_stats stats
VNET_DEVICE_CLASS(af_packet_device_class)
vnet_main_t * vnet_get_main(void)
u8 dpdk_vhost_user_want_interrupt(dpdk_device_t *xd, int idx)
struct rte_mbuf *** tx_vectors
static_always_inline u32 tx_burst_vector_internal(vlib_main_t *vm, dpdk_device_t *xd, struct rte_mbuf **tx_vector)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define static_always_inline
vlib_node_function_t * function
vlib_node_registration_t dpdk_input_node
(constructor) VLIB_REGISTER_NODE (dpdk_input_node)
dpdk_vu_vring vrings[VHOST_MAX_QUEUE_PAIRS *2]
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
always_inline uword vlib_buffer_length_in_chain(vlib_main_t *vm, vlib_buffer_t *b)
Get length in bytes of the buffer chain.
#define clib_warning(format, args...)
always_inline void * vlib_frame_vector_args(vlib_frame_t *f)
struct rte_eth_xstats * last_cleared_xstats
struct rte_mbuf * dpdk_replicate_packet_mb(vlib_buffer_t *b)
vnet_hw_interface_t * hw_interfaces
static clib_error_t * dpdk_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
u16 current_length
Nbytes between current data and the end of this buffer.
dpdk_device_and_queue_t ** devices_by_cpu
dpdk_flowcontrol_callback_t flowcontrol_callback
static char * dpdk_tx_func_error_strings[]
always_inline void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 cpu_index, u32 index, u32 increment)
always_inline uword * vlib_process_wait_for_event(vlib_main_t *vm)
uword os_get_cpu_number(void)
unsigned short int uint16_t
static void pcap_add_buffer(pcap_main_t *pm, vlib_main_t *vm, u32 buffer_index, u32 n_bytes_in_trace)
static void dpdk_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
vlib_simple_counter_main_t * sw_if_counters
void dpdk_set_flowcontrol_callback(vlib_main_t *vm, dpdk_flowcontrol_callback_t callback)
static uword dpdk_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *f)
#define CLIB_PREFETCH(addr, size, type)
static void dpdk_update_counters(dpdk_device_t *xd, f64 now)
#define clib_memcpy(a, b, c)
format_function_t format_dpdk_device
always_inline vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
#define DPDK_TX_RING_SIZE
format_function_t format_dpdk_device_name
#define VLIB_BUFFER_REPL_FAIL
u32 clone_count
Specifies whether this buffer should be reinitialized when freed.
#define VLIB_NODE_FLAG_TRACE
void dpdk_update_link_state(dpdk_device_t *xd, f64 now)
always_inline uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
#define VLIB_BUFFER_IS_TRACED
dpdk_pmd_t dpdk_get_pmd_type(vnet_hw_interface_t *hi)
static void dpdk_clear_hw_interface_counters(u32 instance)
vlib_node_registration_t admin_up_down_process_node
(constructor) VLIB_REGISTER_NODE (admin_up_down_process_node)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
dpdk_device_type_t dev_type
#define foreach_dpdk_tx_func_error
u8 admin_up_down_in_progress
struct vnet_sub_interface_t::@91::@92::@94 flags
int rte_delay_us_override(unsigned us)
struct rte_eth_xstats * xstats
#define VLIB_BUFFER_DATA_SIZE
static void dpdk_tx_trace_buffer(dpdk_main_t *dm, vlib_node_runtime_t *node, dpdk_device_t *xd, u16 queue_id, u32 buffer_index, vlib_buffer_t *buffer)
always_inline void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u32 dpdk_interface_tx_vector(vlib_main_t *vm, u32 dev_instance)
always_inline void * vec_header(void *v, uword header_bytes)
Find a user vector header.
always_inline uword vlib_in_process_context(vlib_main_t *vm)
void post_sw_interface_set_flags(vlib_main_t *vm, u32 sw_if_index, u32 flags)
#define VLIB_REGISTER_NODE(x,...)
u32 dpdk_get_admin_up_down_in_progress(void)
always_inline vlib_process_t * vlib_get_current_process(vlib_main_t *vm)
volatile u32 io_thread_release
#define vec_foreach(var, vec)
Vector iterator.
always_inline f64 vlib_time_now(vlib_main_t *vm)
clib_error_t * vnet_sw_interface_set_flags(vnet_main_t *vnm, u32 sw_if_index, u32 flags)
struct rte_eth_stats last_cleared_stats
static uword admin_up_down_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
#define clib_error_return(e, args...)
#define CLIB_CACHE_LINE_BYTES
u32 flags
buffer flags: VLIB_BUFFER_IS_TRACED: trace this buffer.
clib_error_t * dpdk_get_hw_interface_stats(u32 hw_if_index, struct rte_eth_stats *dest)
uword runtime_data[(128-1 *sizeof(vlib_node_function_t *)-1 *sizeof(vlib_error_t *)-11 *sizeof(u32)-5 *sizeof(u16))/sizeof(uword)]
always_inline vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
clib_error_t * dpdk_set_mc_filter(vnet_hw_interface_t *hi, struct ether_addr mc_addr_vec[], int naddr)
uword * dpdk_device_by_kni_port_id
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".
#define VLIB_PROCESS_IS_RUNNING