21 #include <sys/ioctl.h> 22 #include <sys/socket.h> 25 #include <sys/types.h> 27 #include <netinet/in.h> 30 #include <linux/if_arp.h> 31 #include <linux/if_tun.h> 51 #define VHOST_USER_DOWN_DISCARD_COUNT 256 57 #define VHOST_USER_RX_BUFFER_STARVATION 32 67 #define VHOST_USER_RX_COPY_THRESHOLD 64 71 #define foreach_vhost_user_input_func_error \ 72 _(NO_ERROR, "no error") \ 73 _(NO_BUFFER, "no available buffer") \ 74 _(MMAP_FAIL, "mmap failure") \ 75 _(INDIRECT_OVERFLOW, "indirect descriptor overflows table") \ 76 _(UNDERSIZED_FRAME, "undersized ethernet frame received (< 14 bytes)") \ 77 _(FULL_RX_QUEUE, "full rx queue (possible driver tx drop)") 81 #define _(f,s) VHOST_USER_INPUT_FUNC_ERROR_##f, 102 virtio_net_hdr_mrg_rxbuf_t *hdr;
109 hdr_desc = &txvq->
desc[desc_current];
120 if (!(txvq->
desc[desc_current].
flags & VIRTQ_DESC_F_NEXT) &&
121 !(txvq->
desc[desc_current].
flags & VIRTQ_DESC_F_INDIRECT))
135 memcpy (&t->
hdr, hdr, len > hdr_desc->
len ? hdr_desc->
len : len);
141 u16 copy_len,
u32 * map_hint)
143 void *src0, *src1, *src2, *src3;
197 u32 discarded_packets = 0;
202 while (discarded_packets != discard_max)
204 if (avail_idx == last_avail_idx)
207 u16 desc_chain_head = txvq->
avail->
ring[last_avail_idx & mask];
209 txvq->
used->
ring[last_used_idx & mask].
id = desc_chain_head;
222 return discarded_packets;
235 b_current->
flags = 0;
236 while (b_current != b_head)
242 b_current->
flags = 0;
249 virtio_net_hdr_t * hdr)
253 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
257 u16 ethertype = clib_net_to_host_u16 (eh->
type);
264 ethertype = clib_net_to_host_u16 (vlan->
type);
265 l2hdr_sz +=
sizeof (*vlan);
266 if (ethertype == ETHERNET_TYPE_VLAN)
269 ethertype = clib_net_to_host_u16 (vlan->
type);
270 l2hdr_sz +=
sizeof (*vlan);
276 b0->
flags |= (VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
277 VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
278 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
284 b0->
flags |= (VNET_BUFFER_F_IS_IP4 |
285 VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
291 b0->
flags |= VNET_BUFFER_F_IS_IP6;
294 if (l4_proto == IP_PROTOCOL_TCP)
299 b0->
flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
301 else if (l4_proto == IP_PROTOCOL_UDP)
304 b0->
flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
308 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_UDP)
312 b0->
flags |= VNET_BUFFER_F_GSO;
314 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
318 b0->
flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4);
320 else if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
324 b0->
flags |= (VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6);
337 u16 n_rx_packets = 0;
340 u32 n_left_to_next, *to_next;
348 u32 current_config_index = ~(
u32) 0;
417 VHOST_USER_INPUT_FUNC_ERROR_FULL_RX_QUEUE, 1);
450 interface_main.sw_if_counters +
456 VHOST_USER_INPUT_FUNC_ERROR_NO_BUFFER, flush);
496 u32 desc_data_offset;
509 desc_current = txvq->
avail->
ring[last_avail_idx & mask];
513 to_next[0] = bi_current;
521 txvq->
used->
ring[last_used_idx & mask].
id = desc_current;
527 b_head->
flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
551 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
560 virtio_net_hdr_mrg_rxbuf_t *hdr;
568 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
571 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
573 if ((desc_data_offset == desc_table[desc_current].
len) &&
576 current = desc_table[desc_current].
next;
582 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL,
588 b_data = (
u8 *) hdr + desc_data_offset;
597 if (desc_data_offset == desc_table[desc_current].
len)
602 desc_current = desc_table[desc_current].
next;
603 desc_data_offset = 0;
636 b_current->
flags |= VLIB_BUFFER_NEXT_PRESENT;
637 bi_current = bi_next;
645 u32 desc_data_l = desc_table[desc_current].
len - desc_data_offset;
647 cpy->
len = (cpy->
len > desc_data_l) ? desc_data_l : cpy->
len;
650 cpy->
src = desc_table[desc_current].
addr + desc_data_offset;
652 desc_data_offset += cpy->
len;
676 if (current_config_index != ~(
u32) 0)
679 vnet_buffer (b_head)->feature_arc_index = feature_arc_idx;
692 copy_len, &map_hint)))
695 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
701 txvq->
used->
idx = last_used_idx;
716 VHOST_USER_INPUT_FUNC_ERROR_MMAP_FAIL, 1);
738 n_rx_packets, n_rx_bytes);
751 uword n_rx_packets = 0;
759 if ((node->state == VLIB_NODE_STATE_POLLING) ||
764 if (vui->
features & (1ULL << FEAT_VIRTIO_NET_F_CSUM))
781 .name =
"vhost-user-input",
782 .sibling_of =
"device-input",
786 .state = VLIB_NODE_STATE_DISABLED,
vnet_config_main_t config_main
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static void vnet_device_increment_rx_packets(u32 thread_index, u64 count)
u32 virtio_ring_flags
The device index.
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
vnet_interface_main_t interface_main
#define CLIB_MEMORY_STORE_BARRIER()
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
#define clib_memcpy_fast(a, b, c)
vring_used_elem_t ring[0]
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
static f64 vlib_time_now(vlib_main_t *vm)
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
u32 * config_index_by_sw_if_index
u16 current_length
Nbytes between current data and the end of this buffer.
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
#define VRING_AVAIL_F_NO_INTERRUPT
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
static_always_inline int vnet_have_features(u8 arc, u32 sw_if_index)
#define VLIB_NODE_FN(node)
struct _tcp_header tcp_header_t
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
vnet_hw_interface_rx_mode
#define static_always_inline
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
vlib_combined_counter_main_t * combined_sw_if_counters
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
#define VRING_USED_F_NO_NOTIFY
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
vlib_error_t error
Error code for buffers to be enqueued to error handler.
#define VHOST_USER_COPY_ARRAY_N
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
#define VIRTQ_DESC_F_INDIRECT
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static void * vnet_get_config_data(vnet_config_main_t *cm, u32 *config_index, u32 *next_index, u32 n_data_bytes)
vhost_user_main_t vhost_user_main
u32 node_index
Node index.
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define VHOST_VRING_IDX_RX(qid)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
u16 device_index
The interface queue index (Not the virtio vring idx)
vhost_user_intf_t * vhost_user_interfaces
#define VLIB_REGISTER_NODE(x,...)
#define CLIB_PREFETCH(addr, size, type)
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
u16 first_desc_len
Runtime queue flags.
vl_api_vxlan_gbp_api_tunnel_mode_t mode
u32 rx_buffers[VHOST_USER_RX_BUFFERS_N]
#define VHOST_USER_RX_BUFFERS_N
#define clib_atomic_swap_acq_n(a, b)
#define VIRTQ_DESC_F_NEXT
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
static_always_inline int ethernet_frame_is_tagged(u16 type)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define vec_elt(v, i)
Get vector value at index i.
u8 device_input_feature_arc_index
Feature arc index for device-input.
struct _vlib_node_registration vlib_node_registration_t
u32 next_buffer
Next buffer for this linked-list of buffers.
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
VLIB buffer representation.
#define vhost_user_log_dirty_ring(vui, vq, member)
static int tcp_header_bytes(tcp_header_t *t)
#define vec_foreach(var, vec)
Vector iterator.
u16 flags
Copy of main node flags.
static void vlib_frame_no_append(vlib_frame_t *f)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
vnet_feature_config_main_t * feature_config_mains
feature config main objects
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
vnet_feature_main_t feature_main
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE