21 #include <sys/ioctl.h> 22 #include <sys/socket.h> 25 #include <sys/types.h> 27 #include <netinet/in.h> 30 #include <linux/if_arp.h> 31 #include <linux/if_tun.h> 50 #define VHOST_USER_DOWN_DISCARD_COUNT 256 56 #define VHOST_USER_RX_BUFFER_STARVATION 32 66 #define VHOST_USER_RX_COPY_THRESHOLD 64 70 #define foreach_vhost_user_input_func_error \ 71 _(NO_ERROR, "no error") \ 72 _(NO_BUFFER, "no available buffer") \ 73 _(MMAP_FAIL, "mmap failure") \ 74 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \ 75 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \ 76 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)") 80 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f, 100 vring_desc_t *hdr_desc = 0;
101 virtio_net_hdr_mrg_rxbuf_t *hdr;
104 memset (t, 0,
sizeof (*t));
108 hdr_desc = &txvq->
desc[desc_current];
119 if (!(txvq->
desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
120 !(txvq->
desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
127 if (!hdr_desc || !(hdr =
map_guest_mem (vui, hdr_desc->addr, &hint)))
134 memcpy (&t->
hdr, hdr, len > hdr_desc->len ? hdr_desc->len : len);
140 u16 copy_len,
u32 * map_hint)
142 void *src0, *src1, *src2, *src3;
165 clib_memcpy ((
void *) cpy[0].dst, src0, cpy[0].len);
166 clib_memcpy ((
void *) cpy[1].dst, src1, cpy[1].len);
196 u32 discarded_packets = 0;
198 while (discarded_packets != discard_max)
203 u16 desc_chain_head =
219 return discarded_packets;
225 static __clib_unused
void 232 b_current->
flags = 0;
233 while (b_current != b_head)
239 b_current->
flags = 0;
244 static __clib_unused
u32 252 u16 n_rx_packets = 0;
255 u32 n_left_to_next, *to_next;
292 txvq->
used->flags = 0;
328 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
365 interface_main.sw_if_counters +
371 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
379 while (n_left > 0 && n_left_to_next > 0)
384 u32 desc_data_offset;
385 vring_desc_t *desc_table = txvq->
desc;
403 to_next[0] = bi_current;
409 cpus[thread_index].rx_buffers)
410 [vum->
cpus[thread_index].
411 rx_buffers_len - 1], LOAD);
423 b_head->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
448 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
462 desc_data_offset = desc_table[desc_current].len;
468 if (desc_data_offset == desc_table[desc_current].len)
473 desc_current = desc_table[desc_current].next;
474 desc_data_offset = 0;
514 b_current->
flags |= VLIB_BUFFER_NEXT_PRESENT;
515 bi_current = bi_next;
523 desc_table[desc_current].
len - desc_data_offset;
525 cpy->
len = (cpy->
len > desc_data_l) ? desc_data_l : cpy->
len;
528 cpy->
src = desc_table[desc_current].addr + desc_data_offset;
530 desc_data_offset += cpy->
len;
562 u32 bi = to_next[-1];
564 to_next, n_left_to_next,
579 copy_len, &map_hint)))
582 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
599 copy_len, &map_hint)))
602 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
636 uword n_rx_packets = 0;
645 (node->state == VLIB_NODE_STATE_POLLING))
657 #ifndef CLIB_MARCH_VARIANT 661 .name =
"vhost-user-input",
662 .sibling_of =
"device-input",
665 .state = VLIB_NODE_STATE_DISABLED,
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
u32 virtio_ring_flags
The device index.
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
#define clib_smp_swap(addr, new)
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
vnet_interface_main_t interface_main
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static f64 vlib_time_now(vlib_main_t *vm)
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
#define VRING_AVAIL_F_NO_INTERRUPT
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
#define VLIB_NODE_FN(node)
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
vnet_hw_interface_rx_mode
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
vlib_combined_counter_main_t * combined_sw_if_counters
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
#define VRING_USED_F_NO_NOTIFY
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
u16 current_length
Nbytes between current data and the end of this buffer.
#define VIRTQ_DESC_F_INDIRECT
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
vhost_user_main_t vhost_user_main
u32 node_index
Node index.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define VHOST_VRING_IDX_RX(qid)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
u16 device_index
The interface queue index (Not the virtio vring idx)
vhost_user_intf_t * vhost_user_interfaces
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define CLIB_PREFETCH(addr, size, type)
#define clib_memcpy(a, b, c)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
u16 first_desc_len
Runtime queue flags.
#define VLIB_BUFFER_DATA_SIZE
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N]
#define VHOST_USER_RX_BUFFERS_N
#define VIRTQ_DESC_F_NEXT
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
u32 next_buffer
Next buffer for this linked-list of buffers.
static u32 vlib_buffer_alloc_from_free_list(vlib_main_t *vm, u32 *buffers, u32 n_buffers, vlib_buffer_free_list_index_t index)
Allocate buffers from specific freelist into supplied array.
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
struct _vlib_node_registration vlib_node_registration_t
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
#define vhost_user_log_dirty_ring(vui, vq, member)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
#define vec_foreach(var, vec)
Vector iterator.
u16 flags
Copy of main node flags.
#define CLIB_MEMORY_BARRIER()
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE