36 str, xd->
port_id, rv, rte_strerror (rv));
47 struct rte_eth_dev_info dev_info;
57 if (xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP)
64 if (xd->
pmd == VNET_DPDK_PMD_I40E)
66 if ((xd->
flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0)
67 xd->
port_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT;
69 xd->
port_conf.fdir_conf.mode = RTE_FDIR_MODE_NONE;
72 rte_eth_dev_info_get (xd->
port_id, &dev_info);
74 bitmap = xd->
port_conf.txmode.offloads & ~dev_info.tx_offload_capa;
77 dpdk_log_warn (
"unsupported tx offloads requested on port %u: %U",
82 bitmap = xd->
port_conf.rxmode.offloads & ~dev_info.rx_offload_capa;
85 dpdk_log_warn (
"unsupported rx offloads requested on port %u: %U",
109 rte_eth_tx_queue_setup (xd->
port_id, j,
123 u16 socket_id = rte_lcore_to_socket_id (lcore);
134 SOCKET_ID_ANY, 0, mp);
147 if (xd->
flags & DPDK_DEVICE_FLAG_ADMIN_UP)
156 xd->
flags |= DPDK_DEVICE_FLAG_PMD_INIT_FAIL;
165 if (xd->
flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
168 rv = rte_eth_dev_start (xd->
port_id);
178 rte_eth_dev_default_mac_addr_set (xd->
port_id,
179 (
struct ether_addr *)
185 if (xd->
flags & DPDK_DEVICE_FLAG_PROMISC)
186 rte_eth_promiscuous_enable (xd->
port_id);
188 rte_eth_promiscuous_disable (xd->
port_id);
190 rte_eth_allmulticast_enable (xd->
port_id);
192 if (xd->
pmd == VNET_DPDK_PMD_BOND)
195 int nlink = rte_eth_bond_slaves_get (xd->
port_id, slink, 16);
199 rte_eth_allmulticast_enable (dpdk_port);
210 if (xd->
flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
213 rte_eth_allmulticast_disable (xd->
port_id);
214 rte_eth_dev_stop (xd->
port_id);
218 if (xd->
pmd == VNET_DPDK_PMD_BOND)
221 int nlink = rte_eth_bond_slaves_get (xd->
port_id, slink, 16);
225 rte_eth_dev_stop (dpdk_port);
244 uword event_type, *event_data = 0;
253 for (i = 0; i <
vec_len (event_data); i++)
255 dpdk_port = event_data[
i];
272 .name =
"send-garp-na-process",
284 (vm, send_garp_na_proc_node.index,
SEND_GARP_NA, *dpdk_port);
289 enum rte_eth_event_type type,
void *param)
291 struct rte_eth_link link;
294 RTE_SET_USED (param);
295 if (type != RTE_ETH_EVENT_INTR_LSC)
297 dpdk_log_info (
"Unknown event %d received for port %d", type, port_id);
301 rte_eth_link_get_nowait (port_id, &link);
302 u8 link_up = link.link_status;
304 if (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE)
306 uword bd_port = xd->bond_port;
307 int bd_mode = rte_eth_bond_mode_get (bd_port);
309 "slave of port %d BondEthernet%d in mode %d",
310 port_id, (link_up) ?
"UP" :
"DOWN",
311 bd_port, xd->bond_instance_num, bd_mode);
312 if (bd_mode == BONDING_MODE_ACTIVE_BACKUP)
319 xd->flags |= DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
321 xd->flags &= ~DPDK_DEVICE_FLAG_BOND_SLAVE_UP;
327 port_id, (
unsigned) link.link_speed,
328 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
329 "full-duplex" :
"half-duplex");
339 enum rte_eth_event_type type,
341 void *ret_param __attribute__ ((unused)))
347 struct rte_pci_device *
350 const struct rte_bus *bus;
352 bus = rte_bus_find_by_device (info->device);
353 if (bus && !strcmp (bus->name,
"pci"))
354 return RTE_DEV_TO_PCI (info->device);
format_function_t format_dpdk_tx_offload_caps
void vl_api_force_rpc_call_main_thread(void *fp, u8 *data, u32 data_length)
static uword * vlib_process_wait_for_event(vlib_main_t *vm)
vnet_main_t * vnet_get_main(void)
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static f64 vlib_time_now(vlib_main_t *vm)
struct rte_pci_device * dpdk_get_pci_device(const struct rte_eth_dev_info *info)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
int dpdk_port_state_callback(dpdk_portid_t port_id, enum rte_eth_event_type type, void *param, void *ret_param)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
void send_ip6_na(vlib_main_t *vm, u32 sw_if_index)
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
static uword vlib_process_suspend(vlib_main_t *vm, f64 dt)
Suspend a vlib cooperative multi-tasking thread for a period of time.
#define dpdk_log_warn(...)
static uword vlib_process_get_events(vlib_main_t *vm, uword **data_vector)
Return the first event type which has occurred and a vector of per-event data of that type...
#define clib_error_return(e, args...)
void dpdk_device_setup(dpdk_device_t *xd)
struct rte_eth_conf port_conf
static uword send_garp_na_process(vlib_main_t *vm, vlib_node_runtime_t *rt, vlib_frame_t *f)
struct rte_eth_txconf tx_conf
vlib_worker_thread_t * vlib_worker_threads
void dpdk_device_start(dpdk_device_t *xd)
static_always_inline uword vnet_get_device_input_thread_index(vnet_main_t *vnm, u32 hw_if_index, u16 queue_id)
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
vnet_sw_interface_flags_t flags
static void garp_na_proc_callback(uword *dpdk_port)
#define dpdk_log_info(...)
void send_ip4_garp(vlib_main_t *vm, u32 sw_if_index)
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define dpdk_log_err(...)
static vlib_node_registration_t send_garp_na_proc_node
(constructor) VLIB_REGISTER_NODE (send_garp_na_proc_node)
static int dpdk_port_state_callback_inline(dpdk_portid_t port_id, enum rte_eth_event_type type, void *param)
void dpdk_device_stop(dpdk_device_t *xd)
void dpdk_device_error(dpdk_device_t *xd, char *str, int rv)
format_function_t format_dpdk_device_name
void dpdk_update_link_state(dpdk_device_t *xd, f64 now)
static vlib_main_t * vlib_get_main(void)
struct _vlib_node_registration vlib_node_registration_t
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
format_function_t format_dpdk_rx_offload_caps
#define clib_error_free(e)
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
enum @429 dpdk_send_garp_na_process_event_t
#define CLIB_CACHE_LINE_BYTES
static u8 vlib_buffer_pool_get_default_for_numa(vlib_main_t *vm, u32 numa_node)
u8 * buffer_pool_for_queue
CLIB vectors are ubiquitous dynamically resized arrays with by user defined "headers".