15 #ifndef __VIRTIO_VHOST_USER_INLINE_H__ 16 #define __VIRTIO_VHOST_USER_INLINE_H__ 25 ((vui->
regions[i].guest_phys_addr +
26 vui->
regions[i].memory_size) > addr)))
32 __m128i rl, rh, al, ah, r;
33 al = _mm_set1_epi64x (addr + 1);
34 ah = _mm_set1_epi64x (addr);
37 rl = _mm_cmpgt_epi64 (al, rl);
39 rh = _mm_cmpgt_epi64 (rh, ah);
40 r = _mm_and_si128 (rl, rh);
43 rl = _mm_cmpgt_epi64 (al, rl);
45 rh = _mm_cmpgt_epi64 (rh, ah);
46 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
49 rl = _mm_cmpgt_epi64 (al, rl);
51 rh = _mm_cmpgt_epi64 (rh, ah);
52 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
55 rl = _mm_cmpgt_epi64 (al, rl);
57 rh = _mm_cmpgt_epi64 (rh, ah);
58 r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
60 r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
70 #elif __aarch64__ && __ARM_NEON 71 uint64x2_t al, ah, rl, rh, r;
74 al = vdupq_n_u64 (addr + 1);
75 ah = vdupq_n_u64 (addr);
79 rl = vcgtq_u64 (al, rl);
81 rh = vcgtq_u64 (rh, ah);
82 r = vandq_u64 (rl, rh);
83 u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
89 goto vhost_map_guest_mem_done;
94 rl = vcgtq_u64 (al, rl);
96 rh = vcgtq_u64 (rh, ah);
97 r = vandq_u64 (rl, rh);
98 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
104 goto vhost_map_guest_mem_done;
109 rl = vcgtq_u64 (al, rl);
111 rh = vcgtq_u64 (rh, ah);
112 r = vandq_u64 (rl, rh);
113 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114 u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
118 vhost_map_guest_mem_done:
119 if (i < vui->nregions)
128 if ((vui->
regions[i].guest_phys_addr <= addr) &&
141 .format =
"failed to map guest mem addr %lx",
161 if ((vui->
regions[i].userspace_addr <= addr) &&
172 #define VHOST_LOG_PAGE 0x1000 189 vu_log_debug (vui,
"vhost_user_log_dirty_pages(): out of range\n");
203 #define vhost_user_log_dirty_ring(vui, vq, member) \ 204 if (PREDICT_FALSE(vq->log_used)) { \ 205 vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \ 206 sizeof(vq->used->member), 0); \ 223 s =
format (s,
"vhost-user interface is deleted");
233 if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \ 234 s = format (s, "%U %s %s\n", format_white_space, indent, #n, st); 237 s =
format (s,
"%U virtio_net_hdr first_desc_len %u\n",
240 s =
format (s,
"%U flags 0x%02x gso_type %u\n",
242 t->
hdr.hdr.flags, t->
hdr.hdr.gso_type);
245 s =
format (s,
"%U num_buff %u",
259 rv = write (fd, &x,
sizeof (x));
263 (
"Error: Could not write to unix socket for callfd %d", fd);
318 vring_packed_desc_t *desc_table = vring->
packed_desc;
339 virtio_net_hdr_mrg_rxbuf_t * hdr,
340 u16 * n_descs_processed)
344 *n_descs_processed -= (hdr->num_buffers - 1);
345 for (i = 0; i < hdr->num_buffers - 1; i++)
351 u16 * n_descs_processed)
353 while (*n_descs_processed)
356 (*n_descs_processed)--;
vlib_main_t vlib_global_main
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
vnet_main_t * vnet_get_main(void)
u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS]
static f64 vlib_time_now(vlib_main_t *vm)
vring_packed_desc_t * packed_desc
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
static_always_inline void vhost_user_advance_last_avail_idx(vhost_user_vring_t *vring)
#define pool_is_free(P, E)
Use free bitmap to query whether given element is free.
static_always_inline void vhost_user_dequeue_descs(vhost_user_vring_t *rxvq, virtio_net_hdr_mrg_rxbuf_t *hdr, u16 *n_descs_processed)
#define count_trailing_zeros(x)
#define VIRTIO_FEATURE(X)
#define vu_log_debug(dev, f,...)
#define static_always_inline
#define UNIX_GET_FD(unixfd_idx)
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
format_function_t format_vnet_sw_interface_name
static_always_inline void vhost_user_dequeue_chained_descs(vhost_user_vring_t *rxvq, u16 *n_descs_processed)
vhost_user_main_t vhost_user_main
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]
static_always_inline u8 vui_is_link_up(vhost_user_intf_t *vui)
static_always_inline void vhost_user_update_gso_interface_count(vhost_user_intf_t *vui, u8 add)
The fine-grained event logger allows lightweight, thread-safe event logging at minimum cost...
u16 device_index
The interface queue index (Not the virtio vring idx)
vhost_user_intf_t * vhost_user_interfaces
sll srl srl sll sra u16x4 i
static_always_inline u64 vhost_user_is_packed_ring_supported(vhost_user_intf_t *vui)
#define ELOG_TYPE_DECLARE(f)
u16 first_desc_len
Runtime queue flags.
static_always_inline u8 vhost_user_packed_desc_available(vhost_user_vring_t *vring, u16 idx)
vlib_main_t vlib_node_runtime_t * node
static_always_inline void vhost_user_advance_last_avail_table_idx(vhost_user_intf_t *vui, vhost_user_vring_t *vring, u8 chained)
static_always_inline void vhost_user_undo_advanced_last_avail_idx(vhost_user_vring_t *vring)
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
static uword pointer_to_uword(const void *p)
#define VRING_DESC_F_NEXT
void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS]
#define clib_unix_warning(format, args...)
#define VHOST_MEMORY_MAX_NREGIONS
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS]
#define CLIB_MEMORY_BARRIER()
static_always_inline void vhost_user_advance_last_used_idx(vhost_user_vring_t *vring)
static_always_inline void * map_user_mem(vhost_user_intf_t *vui, uword addr)
#define VRING_DESC_F_AVAIL