18 #include <sys/types.h> 22 #include <linux/if_tun.h> 23 #include <sys/ioctl.h> 24 #include <linux/virtio_net.h> 25 #include <linux/vhost.h> 26 #include <sys/eventfd.h> 38 #define foreach_virtio_input_error \ 43 #define _(f,s) TAP_INPUT_ERROR_##f, 61 struct virtio_net_hdr_v1 hdr;
72 s =
format (s,
"virtio: hw_if_index %d next-index %d vring %u len %u",
74 s =
format (s,
"\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u " 75 "gso_size %u csum_start %u csum_offset %u num_buffers %u",
77 t->
hdr.flags, t->
hdr.gso_type, t->
hdr.hdr_len, t->
hdr.gso_size,
78 t->
hdr.csum_start, t->
hdr.csum_offset, t->
hdr.num_buffers);
85 const int hdr_sz =
sizeof (
struct virtio_net_hdr_v1);
86 u16 used, next, avail, n_slots;
92 if (sz - used < sz / 8)
97 avail = vring->
avail->idx;
103 struct vring_desc *d = &vring->
desc[next];;
107 d->flags = VRING_DESC_F_WRITE;
108 vring->
avail->ring[avail & mask] = next;
110 next = (next + 1) & mask;
115 vring->
avail->idx = avail;
135 const int hdr_sz =
sizeof (
struct virtio_net_hdr_v1);
137 u32 n_rx_packets = 0;
149 u32 next0 = next_index;
152 while (n_left && n_left_to_next)
155 struct vring_used_elem *e = &vring->
used->ring[last & mask];
156 struct virtio_net_hdr_v1 *hdr;
158 u16 len = e->len - hdr_sz;
162 num_buffers = hdr->num_buffers;
167 b0->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
176 while (num_buffers > 1)
179 e = &vring->
used->ring[last & mask];
189 pb->
flags |= VLIB_BUFFER_NEXT_PRESENT;
232 n_left_to_next, bi0, next0);
266 if (mif->
flags & VIRTIO_IF_FLAG_ADMIN_UP)
279 .name =
"virtio-input",
280 .sibling_of =
"device-input",
283 .state = VLIB_NODE_STATE_INTERRUPT,
u32 per_interface_next_index
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
u8 runtime_data[0]
Function dependent node-runtime data.
vnet_main_t * vnet_get_main(void)
vnet_interface_main_t interface_main
#define CLIB_MEMORY_STORE_BARRIER()
static_always_inline void virtio_refill_vring(vlib_main_t *vm, virtio_vring_t *vring)
static_always_inline uword virtio_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, virtio_if_t *vif, u16 qid)
static heap_elt_t * last(heap_header_t *h)
static uword virtio_input_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
struct vring_avail * avail
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define static_always_inline
vlib_combined_counter_main_t * combined_sw_if_counters
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static u8 * format_virtio_input_trace(u8 *s, va_list *args)
static char * virtio_input_error_strings[]
#define foreach_virtio_input_error
u16 current_length
Nbytes between current data and the end of this buffer.
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
#define VLIB_REGISTER_NODE(x,...)
static_always_inline uword vlib_get_thread_index(void)
#define VIRTIO_RING_FLAG_MASK_INT
#define clib_memcpy(a, b, c)
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
#define VLIB_BUFFER_DATA_SIZE
u32 next_buffer
Next buffer for this linked-list of buffers.
static uword pointer_to_uword(const void *p)
virtio_main_t virtio_main
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
#define foreach_device_and_queue(var, vec)
static u32 vlib_buffer_alloc_to_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Allocate buffers into ring.
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
VLIB_NODE_FUNCTION_MULTIARCH(ethernet_input_not_l2_node, ethernet_input_not_l2)