18 #include <sys/types.h> 22 #include <linux/if_tun.h> 23 #include <sys/ioctl.h> 24 #include <sys/eventfd.h> 37 #define foreach_virtio_input_error \ 42 #define _(f,s) VIRTIO_INPUT_ERROR_##f, 60 struct virtio_net_hdr_v1 hdr;
71 s =
format (s,
"virtio: hw_if_index %d next-index %d vring %u len %u",
73 s =
format (s,
"\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u " 74 "gso_size %u csum_start %u csum_offset %u num_buffers %u",
76 t->
hdr.flags, t->
hdr.gso_type, t->
hdr.hdr_len, t->
hdr.gso_size,
77 t->
hdr.csum_start, t->
hdr.csum_offset, t->
hdr.num_buffers);
85 u16 used, next, avail, n_slots;
92 if (sz - used < sz / 8)
99 avail = vring->
avail->idx;
102 vring->
size, n_slots,
110 struct vring_desc *d = &vring->
desc[next];;
125 d->flags = VRING_DESC_F_WRITE;
126 vring->
avail->ring[avail & mask] = next;
128 next = (next + 1) & mask;
133 vring->
avail->idx = avail;
146 u8 * l4_proto,
u8 * l4_hdr_sz)
151 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
156 u16 ethertype = clib_net_to_host_u16 (eh->
type);
163 ethertype = clib_net_to_host_u16 (vlan->
type);
164 l2hdr_sz +=
sizeof (*vlan);
165 if (ethertype == ETHERNET_TYPE_VLAN)
168 ethertype = clib_net_to_host_u16 (vlan->
type);
169 l2hdr_sz +=
sizeof (*vlan);
182 (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
184 (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
185 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
186 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
195 b0->
flags |= (VNET_BUFFER_F_IS_IP6 |
196 VNET_BUFFER_F_L2_HDR_OFFSET_VALID
197 | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
198 VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
200 if (*l4_proto == IP_PROTOCOL_TCP)
202 b0->
flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
205 (b0)->l4_hdr_offset);
209 else if (*l4_proto == IP_PROTOCOL_UDP)
211 b0->
flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
214 (b0)->l4_hdr_offset);
215 *l4_hdr_sz =
sizeof (*udp);
224 u8 l4_proto,
u8 l4_hdr_sz)
226 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
228 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
231 b0->
flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4;
233 if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
235 ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
238 b0->
flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6;
245 int gso_enabled,
int checksum_offload_enabled)
254 u32 n_rx_packets = 0;
270 u32 next0 = next_index;
273 while (n_left && n_left_to_next)
275 u8 l4_proto, l4_hdr_sz;
277 struct vring_used_elem *e = &vring->
used->ring[last & mask];
278 struct virtio_net_hdr_v1 *hdr;
280 u16 len = e->len - hdr_sz;
284 if (hdr_sz ==
sizeof (
struct virtio_net_hdr_v1))
285 num_buffers = hdr->num_buffers;
290 b0->
flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
292 if (checksum_offload_enabled)
306 while (num_buffers > 1)
309 e = &vring->
used->ring[last & mask];
318 pb->
flags |= VLIB_BUFFER_NEXT_PRESENT;
362 n_left_to_next, bi0, next0);
396 if (vif->
flags & VIRTIO_IF_FLAG_ADMIN_UP)
415 .name =
"virtio-input",
416 .sibling_of =
"device-input",
420 .state = VLIB_NODE_STATE_INTERRUPT,
u32 per_interface_next_index
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
vnet_main_t * vnet_get_main(void)
vnet_interface_main_t interface_main
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define CLIB_MEMORY_STORE_BARRIER()
#define clib_memcpy_fast(a, b, c)
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
u16 current_length
Nbytes between current data and the end of this buffer.
static heap_elt_t * last(heap_header_t *h)
#define VLIB_NODE_FN(node)
static u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
struct _tcp_header tcp_header_t
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
static_always_inline void virtio_needs_csum(vlib_buffer_t *b0, struct virtio_net_hdr_v1 *hdr, u8 *l4_proto, u8 *l4_hdr_sz)
struct vring_avail * avail
#define static_always_inline
vlib_combined_counter_main_t * combined_sw_if_counters
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static u8 * format_virtio_input_trace(u8 *s, va_list *args)
static char * virtio_input_error_strings[]
#define foreach_virtio_input_error
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
virtio_vring_t * rxq_vrings
#define VLIB_REGISTER_NODE(x,...)
#define VIRTIO_RING_FLAG_MASK_INT
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
vlib_main_t vlib_node_runtime_t * node
static_always_inline int ethernet_frame_is_tagged(u16 type)
static uword pointer_to_uword(const void *p)
virtio_main_t virtio_main
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
#define foreach_device_and_queue(var, vec)
static_always_inline uword virtio_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, virtio_if_t *vif, u16 qid, int gso_enabled, int checksum_offload_enabled)
u32 next_buffer
Next buffer for this linked-list of buffers.
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
VLIB buffer representation.
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
static int tcp_header_bytes(tcp_header_t *t)
static_always_inline void virtio_refill_vring(vlib_main_t *vm, virtio_if_t *vif, virtio_vring_t *vring, const int hdr_sz)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static int ip4_header_bytes(const ip4_header_t *i)
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
static_always_inline void fill_gso_buffer_flags(vlib_buffer_t *b0, struct virtio_net_hdr_v1 *hdr, u8 l4_proto, u8 l4_hdr_sz)
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static_always_inline void virtio_kick(vlib_main_t *vm, virtio_vring_t *vring, virtio_if_t *vif)