|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
18 #include <sys/types.h>
34 #define VIRTIO_TX_MAX_CHAIN_LEN 127
36 #define foreach_virtio_tx_func_error \
37 _(NO_FREE_SLOTS, "no free tx slots") \
38 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
39 _(PENDING_MSGS, "pending msgs in tx ring") \
40 _(INDIRECT_DESC_ALLOC_FAILED, "indirect descriptor allocation failed - packet drop") \
41 _(OUT_OF_ORDER, "out-of-order buffers in used ring") \
42 _(GSO_PACKET_DROP, "gso disabled on itf -- gso packet drop") \
43 _(CSUM_OFFLOAD_PACKET_DROP, "checksum offload disabled on itf -- csum offload packet drop")
47 #define _(f,s) VIRTIO_TX_ERROR_##f,
62 u32 dev_instance = va_arg (*args,
u32);
63 int verbose = va_arg (*args,
int);
66 s =
format (s,
"VIRTIO interface");
91 s =
format (s,
"%Ubuffer 0x%x: %U\n",
114 int is_ip4 = 0,
is_ip6 = 0;
132 VNET_BUFFER_F_IS_IP4,
133 b0->
flags & VNET_BUFFER_F_IS_IP6);
177 u16 out_of_order_count = 0;
217 out_of_order_count++;
226 if (out_of_order_count)
238 vring_packed_desc_t *d;
292 if (
b->
flags & VNET_BUFFER_F_IS_IP4)
300 if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
304 else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
315 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
318 else if (
b->
flags & VNET_BUFFER_F_IS_IP6)
325 if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
329 else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
341 if (
b->
flags & VNET_BUFFER_F_IS_IP4)
349 hdr->hdr_len = gho.
hdr_sz;
359 if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
362 else if (
b->
flags & VNET_BUFFER_F_IS_IP6)
369 hdr->hdr_len = gho.
hdr_sz;
380 int hdr_sz,
int do_gso,
int csum_offload,
int is_pci,
381 int is_tun,
int is_indirect,
int is_any_layout)
389 u32 drop_inline = ~0;
393 if (
b->
flags & VNET_BUFFER_F_GSO)
399 drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
403 else if (
b->
flags & VNET_BUFFER_F_OFFLOAD)
409 drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
426 else if (is_indirect)
436 u32 indirect_buffer = 0;
439 drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
445 indirect_desc->
flags |= VLIB_BUFFER_NEXT_PRESENT;
447 bi = indirect_buffer;
474 while (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
485 if (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
487 VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
499 while (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
510 if (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
512 VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
526 u16 n_buffers_in_chain = 1;
533 while (b_temp->
flags & VLIB_BUFFER_NEXT_PRESENT)
535 n_buffers_in_chain++;
539 if (n_buffers_in_chain > free_desc_count)
540 return n_buffers_in_chain;
545 while (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
550 ~(VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID);
576 if (drop_inline != ~0)
586 u16 next,
int hdr_sz,
int do_gso,
int csum_offload,
587 int is_pci,
int is_tun,
int is_indirect,
595 u32 drop_inline = ~0;
599 if (
b->
flags & VNET_BUFFER_F_GSO)
605 drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
609 else if (
b->
flags & VNET_BUFFER_F_OFFLOAD)
615 drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
632 else if (is_indirect)
642 u32 indirect_buffer = 0;
645 drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
651 indirect_desc->
flags |= VLIB_BUFFER_NEXT_PRESENT;
653 bi = indirect_buffer;
655 vring_packed_desc_t *
id =
680 while (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
691 if (
b->
flags & VLIB_BUFFER_NEXT_PRESENT)
693 VIRTIO_TX_ERROR_TRUNC_PACKET, 1);
700 d->len =
count *
sizeof (vring_packed_desc_t);
724 if (drop_inline != ~0)
737 const int csum_offload)
739 int is_pci = (
type == VIRTIO_IF_TYPE_PCI);
740 int is_tun = (
type == VIRTIO_IF_TYPE_TUN);
758 while (n_buffers_left && used < sz)
766 vm,
node, vif, vring, bi,
next, hdr_sz, do_gso, csum_offload,
767 is_pci, is_tun, is_indirect, is_any_layout);
782 while (
n_left && used < sz)
787 vm,
node, vif, vring, buffers[0],
next, hdr_sz, do_gso, csum_offload,
788 is_pci, is_tun, is_indirect, is_any_layout);
809 if (vring->
device_event->flags != VRING_EVENT_F_DISABLE)
819 u16 *free_desc_count)
827 if (*first_free_desc_index == ~0)
829 *first_free_desc_index = (
next &
mask);
831 (*free_desc_count)++;
838 if (start + *free_desc_count ==
i)
840 (*free_desc_count)++;
857 u16 n_left,
int do_gso,
int csum_offload)
860 int is_pci = (
type == VIRTIO_IF_TYPE_PCI);
861 int is_tun = (
type == VIRTIO_IF_TYPE_TUN);
875 u16 free_desc_count = 0;
879 u32 first_free_desc_index = ~0;
882 &first_free_desc_index, &free_desc_count);
885 next = first_free_desc_index;
888 free_desc_count = sz - used;
894 while (n_buffers_left && free_desc_count)
904 hdr_sz, do_gso, csum_offload, is_pci,
905 is_tun, is_indirect, is_any_layout);
918 free_desc_count -= n_added;
922 while (
n_left && free_desc_count)
928 avail,
next,
mask, hdr_sz, do_gso, csum_offload,
929 is_pci, is_tun, is_indirect, is_any_layout);
945 free_desc_count -= n_added;
971 do_gso, csum_offload);
975 do_gso, csum_offload);
1016 int packed = vif->is_packed;
1021 if (vif->packet_coalesce)
1028 u16 retry_count = 2;
1034 if (vif->type == VIRTIO_IF_TYPE_TAP)
1039 else if (vif->type == VIRTIO_IF_TYPE_PCI)
1044 else if (vif->type == VIRTIO_IF_TYPE_TUN)
1052 if (
n_left && retry_count--)
1055 if (vif->packet_buffering &&
n_left)
1067 VIRTIO_TX_ERROR_NO_FREE_SLOTS);
1174 vif->
flags |= VIRTIO_IF_FLAG_ADMIN_UP;
1180 vif->
flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
static void virtio_free_used_device_desc_packed(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
u32 next_buffer
Next buffer for this linked-list of buffers.
static_always_inline u32 vnet_gro_inline(vlib_main_t *vm, gro_flow_table_t *flow_table, u32 *from, u16 n_left_from, u32 *to)
coalesce buffers with flow tables
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
static u8 * format_virtio_tx_trace(u8 *s, va_list *va)
gro_flow_table_t * flow_table
static clib_error_t * virtio_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
static char * virtio_tx_func_error_strings[]
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
#define VNET_HW_INTERFACE_CAP_SUPPORTS_L4_TX_CKSUM
vnet_hw_interface_capabilities_t caps
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
static u16 virtio_interface_tx_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, int packed, int do_gso, int csum_offload)
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static uword pointer_to_uword(const void *p)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
generic_header_offset_t gho
static_always_inline void virtio_kick(vlib_main_t *vm, virtio_vring_t *vring, virtio_if_t *vif)
#define foreach_virtio_tx_func_error
struct _tcp_header tcp_header_t
nat44_ei_interface_t * interfaces
static void virtio_tx_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0, u32 bi, int is_tun)
vlib_main_t vlib_node_runtime_t * node
static void virtio_free_used_device_desc(vlib_main_t *vm, virtio_vring_t *vring, uword node_index, int packed)
static u16 add_buffer_to_slot_packed(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, u32 bi, u16 next, int hdr_sz, int do_gso, int csum_offload, int is_pci, int is_tun, int is_indirect, int is_any_layout)
#define clib_error_return(e, args...)
VNET_DEVICE_CLASS(af_xdp_device_class)
static_always_inline u16 virtio_vring_n_buffers(virtio_vring_buffering_t *buffering)
u32 per_interface_next_index
vl_api_tunnel_mode_t mode
static void virtio_set_rx_interrupt(virtio_if_t *vif, virtio_vring_t *vring)
@ VNET_SW_INTERFACE_FLAG_ADMIN_UP
@ VNET_HW_IF_RX_MODE_POLLING
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
#define VRING_DESC_F_USED
@ VNET_HW_INTERFACE_FLAG_LINK_UP
#define VIRTIO_FEATURE(X)
vlib_simple_counter_main_t * sw_if_counters
static void virtio_set_rx_polling(virtio_if_t *vif, virtio_vring_t *vring)
u32 interrupt_queues_count
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
static void virtio_find_free_desc(virtio_vring_t *vring, u16 size, u16 mask, u16 req, u16 next, u32 *first_free_desc_index, u16 *free_desc_count)
static u16 virtio_interface_tx_split_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, int do_gso, int csum_offload)
vlib_node_registration_t virtio_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (virtio_send_interrupt_node)
static uword virtio_interface_tx_packed_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, const int do_gso, const int csum_offload)
virtio_main_t virtio_main
#define STRUCT_OFFSET_OF(t, f)
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
static void virtio_interface_drop_inline(vlib_main_t *vm, virtio_if_t *vif, uword node_index, u32 *buffers, u16 n, virtio_tx_func_error_t error)
VNET_DEVICE_CLASS_TX_FN() virtio_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
#define VIRTIO_NET_HDR_GSO_TCPV4
static_always_inline void clib_memset_u32(void *p, u32 val, uword count)
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
static clib_error_t * virtio_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
#define CLIB_MEMORY_STORE_BARRIER()
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
static void virtio_free_used_device_desc_split(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
#define VRING_DESC_F_AVAIL
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
vnet_main_t * vnet_get_main(void)
#define VIRTIO_EVENT_STOP_TIMER
@ VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
static void virtio_memset_ring_u32(u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
static_always_inline u32 virtio_vring_buffering_read_from_front(virtio_vring_buffering_t *buffering)
#define VRING_DESC_F_INDIRECT
static heap_elt_t * last(heap_header_t *h)
#define VRING_DESC_F_NEXT
#define GRO_TO_VECTOR_SIZE(X)
sll srl srl sll sra u16x4 i
#define clib_atomic_load_seq_cst(a)
#define VRING_AVAIL_F_NO_INTERRUPT
#define CLIB_MEMORY_BARRIER()
#define VRING_USED_F_NO_NOTIFY
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
u16 current_length
Nbytes between current data and the end of this buffer.
static void set_checksum_offsets(vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
vring_desc_event_t * driver_event
static u16 add_buffer_to_slot(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, u32 bi, u16 free_desc_count, u16 avail, u16 next, u16 mask, int hdr_sz, int do_gso, int csum_offload, int is_pci, int is_tun, int is_indirect, int is_any_layout)
#define VRING_TX_OUT_OF_ORDER
static void virtio_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
static u8 * format_virtio_device(u8 *s, va_list *args)
description fragment has unexpected format
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
#define VIRTIO_NET_HDR_GSO_TCPV6
vring_packed_desc_t * packed_desc
#define VIRTIO_EVENT_START_TIMER
#define clib_atomic_store_seq_cst(a, b)
#define VIRTIO_NET_HDR_F_NEEDS_CSUM
#define VIRTIO_TX_MAX_CHAIN_LEN
virtio_vring_t * rxq_vrings
static u16 virtio_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, virtio_if_type_t type, u32 *buffers, u16 n_left, int packed)
format_function_t format_virtio_device_name
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
static vlib_main_t * vlib_get_main(void)
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
static u16 ip4_header_checksum(ip4_header_t *i)
vring_used_elem_t ring[0]
virtio_vring_buffering_t * buffering
static void set_gso_offsets(vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
vring_desc_event_t * device_event
static void virtio_clear_hw_interface_counters(u32 instance)
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
static_always_inline u16 virtio_vring_buffering_store_packets(virtio_vring_buffering_t *buffering, u32 *bi, u16 n_store)
static clib_error_t * virtio_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
vl_api_fib_path_type_t type
@ VNET_INTERFACE_COUNTER_DROP
vnet_interface_main_t interface_main
format_function_t format_vnet_buffer
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
VLIB buffer representation.
vl_api_wireguard_peer_flags_t flags