18 #include <sys/types.h> 32 #define foreach_virtio_tx_func_error \ 33 _(NO_FREE_SLOTS, "no free tx slots") \ 34 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \ 35 _(PENDING_MSGS, "pending msgs in tx ring") \ 36 _(NO_TX_QUEUES, "no tx queues") \ 37 _(OUT_OF_ORDER, "out-of-order buffers in used ring") 41 #define _(f,s) VIRTIO_TX_ERROR_##f, 56 u32 dev_instance = va_arg (*args,
u32);
57 int verbose = va_arg (*args,
int);
60 s =
format (s,
"VIRTIO interface");
72 s =
format (s,
"Unimplemented...");
79 ASSERT (n_buffers <= ring_size);
101 u16 out_of_order_count = 0;
108 struct vring_used_elem *e = &vring->
used->ring[last & mask];
110 slot = n_buffers = e->id;
112 while (e->id == (n_buffers & mask))
119 e = &vring->
used->ring[last & mask];
122 sz, (n_buffers - slot));
124 used -= (n_buffers -
slot);
133 out_of_order_count++;
142 if (out_of_order_count)
152 struct virtio_net_hdr_v1 *hdr)
154 if (b->
flags & VNET_BUFFER_F_IS_IP4)
158 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
160 if (b->
flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
162 else if (b->
flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
171 if (b->
flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
174 else if (b->
flags & VNET_BUFFER_F_IS_IP6)
177 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
179 if (b->
flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
181 else if (b->
flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
189 u16 mask,
int do_gso,
int csum_offload)
193 struct vring_desc *d;
194 d = &vring->
desc[next];
200 if (do_gso && (b->
flags & VNET_BUFFER_F_GSO))
202 if (b->
flags & VNET_BUFFER_F_IS_IP4)
206 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
209 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
219 if (b->
flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
222 else if (b->
flags & VNET_BUFFER_F_IS_IP6)
225 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
228 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
233 else if (csum_offload
234 && (b->
flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
235 VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)))
259 u32 indirect_buffer = 0;
265 indirect_desc->
flags |= VLIB_BUFFER_NEXT_PRESENT;
267 bi = indirect_buffer;
269 struct vring_desc *
id =
288 id->flags = VRING_DESC_F_NEXT;
295 while (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
297 id->flags = VRING_DESC_F_NEXT;
313 while (b->
flags & VLIB_BUFFER_NEXT_PRESENT)
315 id->flags = VRING_DESC_F_NEXT;
326 d->len = count *
sizeof (
struct vring_desc);
327 d->flags = VRING_DESC_F_INDIRECT;
330 vring->
avail->ring[avail & mask] = next;
337 u16 req,
u16 next,
u32 * first_free_desc_index,
338 u16 * free_desc_count)
344 if (vring->
buffers[next & mask] == ~0)
346 if (*first_free_desc_index == ~0)
348 *first_free_desc_index = (next & mask);
350 (*free_desc_count)++;
357 if (start + *free_desc_count ==
i)
359 (*free_desc_count)++;
374 int do_gso,
int csum_offload)
380 u16 used, next, avail;
398 avail = vring->
avail->idx;
400 u16 free_desc_count = 0;
404 u32 first_free_desc_index = ~0;
407 &first_free_desc_index, &free_desc_count);
410 next = first_free_desc_index;
413 free_desc_count = sz - used;
415 while (n_left && free_desc_count)
420 do_gso, csum_offload);
424 next = (next + n_added) & mask;
434 vring->
avail->idx = avail;
486 if (node_index == ~0)
535 vif->
flags |= VIRTIO_IF_FLAG_ADMIN_UP;
541 vif->
flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
u32 per_interface_next_index
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
#define foreach_virtio_tx_func_error
vnet_main_t * vnet_get_main(void)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
#define CLIB_MEMORY_STORE_BARRIER()
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u16 current_length
Nbytes between current data and the end of this buffer.
static heap_elt_t * last(heap_header_t *h)
#define STRUCT_OFFSET_OF(t, f)
struct _tcp_header tcp_header_t
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static_always_inline u16 add_buffer_to_slot(vlib_main_t *vm, virtio_if_t *vif, virtio_vring_t *vring, u32 bi, u16 avail, u16 next, u16 mask, int do_gso, int csum_offload)
static char * virtio_tx_func_error_strings[]
vnet_hw_interface_rx_mode
struct vring_avail * avail
#define static_always_inline
VNET_DEVICE_CLASS_TX_FN() virtio_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
vnet_hw_interface_flags_t flags
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static void virtio_clear_hw_interface_counters(u32 instance)
#define clib_error_return(e, args...)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
vl_api_gre_tunnel_mode_t mode
static clib_error_t * virtio_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
static_always_inline void virtio_memset_ring_u32(u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
u32 node_index
Node index.
static u8 * format_virtio_device(u8 *s, va_list *args)
static void virtio_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
virtio_vring_t * rxq_vrings
format_function_t format_virtio_device_name
static_always_inline uword virtio_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, virtio_if_t *vif, int do_gso, int csum_offload)
static u8 * format_virtio_tx_trace(u8 *s, va_list *args)
#define VIRTIO_RING_FLAG_MASK_INT
static clib_error_t * virtio_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
static_always_inline void set_checksum_offsets(vlib_main_t *vm, virtio_if_t *vif, vlib_buffer_t *b, struct virtio_net_hdr_v1 *hdr)
static clib_error_t * virtio_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
vlib_main_t vlib_node_runtime_t * node
#define VIRTIO_FEATURE(X)
static_always_inline void virtio_find_free_desc(virtio_vring_t *vring, u16 size, u16 mask, u16 req, u16 next, u32 *first_free_desc_index, u16 *free_desc_count)
static uword pointer_to_uword(const void *p)
virtio_main_t virtio_main
static vlib_main_t * vlib_get_main(void)
#define VRING_TX_OUT_OF_ORDER
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
u32 next_buffer
Next buffer for this linked-list of buffers.
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
VLIB buffer representation.
static_always_inline void virtio_free_used_device_desc(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static u16 ip4_header_checksum(ip4_header_t *i)
static_always_inline gso_header_offset_t vnet_gso_header_offset_parser(vlib_buffer_t *b0, int is_ip6)
virtio_vring_t * txq_vrings
static_always_inline void virtio_kick(vlib_main_t *vm, virtio_vring_t *vring, virtio_if_t *vif)
VNET_DEVICE_CLASS(avf_device_class,)
static_always_inline void clib_memset_u32(void *p, u32 val, uword count)