FD.io VPP  v21.06-3-gbb25fbf28
Vector Packet Processing
vhost_user_inline.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #ifndef __VIRTIO_VHOST_USER_INLINE_H__
16 #define __VIRTIO_VHOST_USER_INLINE_H__
17 /* vhost-user inline functions */
18 #include <vppinfra/elog.h>
19 
22 {
23  int i = *hint;
24  if (PREDICT_TRUE ((vui->regions[i].guest_phys_addr <= addr) &&
25  ((vui->regions[i].guest_phys_addr +
26  vui->regions[i].memory_size) > addr)))
27  {
28  return (void *) (vui->region_mmap_addr[i] + addr -
29  vui->regions[i].guest_phys_addr);
30  }
31 #if __SSE4_2__
32  __m128i rl, rh, al, ah, r;
33  al = _mm_set1_epi64x (addr + 1);
34  ah = _mm_set1_epi64x (addr);
35 
36  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[0]);
37  rl = _mm_cmpgt_epi64 (al, rl);
38  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[0]);
39  rh = _mm_cmpgt_epi64 (rh, ah);
40  r = _mm_and_si128 (rl, rh);
41 
42  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[2]);
43  rl = _mm_cmpgt_epi64 (al, rl);
44  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[2]);
45  rh = _mm_cmpgt_epi64 (rh, ah);
46  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x22);
47 
48  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[4]);
49  rl = _mm_cmpgt_epi64 (al, rl);
50  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[4]);
51  rh = _mm_cmpgt_epi64 (rh, ah);
52  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x44);
53 
54  rl = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_lo[6]);
55  rl = _mm_cmpgt_epi64 (al, rl);
56  rh = _mm_loadu_si128 ((__m128i *) & vui->region_guest_addr_hi[6]);
57  rh = _mm_cmpgt_epi64 (rh, ah);
58  r = _mm_blend_epi16 (r, _mm_and_si128 (rl, rh), 0x88);
59 
60  r = _mm_shuffle_epi8 (r, _mm_set_epi64x (0, 0x0e060c040a020800));
61  i = count_trailing_zeros (_mm_movemask_epi8 (r) |
63 
64  if (i < vui->nregions)
65  {
66  *hint = i;
67  return (void *) (vui->region_mmap_addr[i] + addr -
68  vui->regions[i].guest_phys_addr);
69  }
70 #elif __aarch64__ && __ARM_NEON
71  uint64x2_t al, ah, rl, rh, r;
72  uint32_t u32 = 0;
73 
74  al = vdupq_n_u64 (addr + 1);
75  ah = vdupq_n_u64 (addr);
76 
77  /*First Iteration */
78  rl = vld1q_u64 (&vui->region_guest_addr_lo[0]);
79  rl = vcgtq_u64 (al, rl);
80  rh = vld1q_u64 (&vui->region_guest_addr_hi[0]);
81  rh = vcgtq_u64 (rh, ah);
82  r = vandq_u64 (rl, rh);
83  u32 |= (vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1);
84  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 1);
85 
86  if (u32)
87  {
89  goto vhost_map_guest_mem_done;
90  }
91 
92  /*Second Iteration */
93  rl = vld1q_u64 (&vui->region_guest_addr_lo[2]);
94  rl = vcgtq_u64 (al, rl);
95  rh = vld1q_u64 (&vui->region_guest_addr_hi[2]);
96  rh = vcgtq_u64 (rh, ah);
97  r = vandq_u64 (rl, rh);
98  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 2);
99  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 3);
100 
101  if (u32)
102  {
104  goto vhost_map_guest_mem_done;
105  }
106 
107  /*Third Iteration */
108  rl = vld1q_u64 (&vui->region_guest_addr_lo[4]);
109  rl = vcgtq_u64 (al, rl);
110  rh = vld1q_u64 (&vui->region_guest_addr_hi[4]);
111  rh = vcgtq_u64 (rh, ah);
112  r = vandq_u64 (rl, rh);
113  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 0) & 0x1) << 6);
114  u32 |= ((vgetq_lane_u8 (vreinterpretq_u8_u64 (r), 8) & 0x1) << 7);
115 
117 
118 vhost_map_guest_mem_done:
119  if (i < vui->nregions)
120  {
121  *hint = i;
122  return (void *) (vui->region_mmap_addr[i] + addr -
123  vui->regions[i].guest_phys_addr);
124  }
125 #else
126  for (i = 0; i < vui->nregions; i++)
127  {
128  if ((vui->regions[i].guest_phys_addr <= addr) &&
129  ((vui->regions[i].guest_phys_addr + vui->regions[i].memory_size) >
130  addr))
131  {
132  *hint = i;
133  return (void *) (vui->region_mmap_addr[i] + addr -
134  vui->regions[i].guest_phys_addr);
135  }
136  }
137 #endif
138  /* *INDENT-OFF* */
139  ELOG_TYPE_DECLARE (el) =
140  {
141  .format = "failed to map guest mem addr %lx",
142  .format_args = "i8",
143  };
144  /* *INDENT-ON* */
145  struct
146  {
147  uword addr;
148  } *ed;
150  ed->addr = addr;
151  *hint = 0;
152  return 0;
153 }
154 
157 {
158  int i;
159  for (i = 0; i < vui->nregions; i++)
160  {
161  if ((vui->regions[i].userspace_addr <= addr) &&
162  ((vui->regions[i].userspace_addr + vui->regions[i].memory_size) >
163  addr))
164  {
165  return (void *) (vui->region_mmap_addr[i] + addr -
166  vui->regions[i].userspace_addr);
167  }
168  }
169  return 0;
170 }
171 
172 #define VHOST_LOG_PAGE 0x1000
173 
176  u64 addr, u64 len, u8 is_host_address)
177 {
178  if (PREDICT_TRUE (vui->log_base_addr == 0
179  || !(vui->features & VIRTIO_FEATURE (VHOST_F_LOG_ALL))))
180  {
181  return;
182  }
183  if (is_host_address)
184  {
186  }
187  if (PREDICT_FALSE ((addr + len - 1) / VHOST_LOG_PAGE / 8 >= vui->log_size))
188  {
189  vu_log_debug (vui, "vhost_user_log_dirty_pages(): out of range\n");
190  return;
191  }
192 
194  u64 page = addr / VHOST_LOG_PAGE;
195  while (page * VHOST_LOG_PAGE < addr + len)
196  {
197  ((u8 *) vui->log_base_addr)[page / 8] |= 1 << page % 8;
198  page++;
199  }
200 }
201 
202 
203 #define vhost_user_log_dirty_ring(vui, vq, member) \
204  if (PREDICT_FALSE(vq->log_used)) { \
205  vhost_user_log_dirty_pages_2(vui, vq->log_guest_addr + STRUCT_OFFSET_OF(vring_used_t, member), \
206  sizeof(vq->used->member), 0); \
207  }
208 
210 format_vhost_trace (u8 * s, va_list * va)
211 {
212  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
213  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
216  vhost_trace_t *t = va_arg (*va, vhost_trace_t *);
219  u32 indent;
220 
221  if (pool_is_free (vum->vhost_user_interfaces, vui))
222  {
223  s = format (s, "vhost-user interface is deleted");
224  return s;
225  }
226  sw = vnet_get_sw_interface (vnm, vui->sw_if_index);
227  indent = format_get_indent (s);
228  s = format (s, "%U %U queue %d\n", format_white_space, indent,
229  format_vnet_sw_interface_name, vnm, sw, t->qid);
230 
231  s = format (s, "%U virtio flags:\n", format_white_space, indent);
232 #define _(n,i,st) \
233  if (t->virtio_ring_flags & (1 << VIRTIO_TRACE_F_##n)) \
234  s = format (s, "%U %s %s\n", format_white_space, indent, #n, st);
236 #undef _
237  s = format (s, "%U virtio_net_hdr first_desc_len %u\n",
238  format_white_space, indent, t->first_desc_len);
239 
240  s = format (s, "%U flags 0x%02x gso_type %u\n",
241  format_white_space, indent,
242  t->hdr.hdr.flags, t->hdr.hdr.gso_type);
243 
244  if (vui->virtio_net_hdr_sz == 12)
245  s = format (s, "%U num_buff %u",
246  format_white_space, indent, t->hdr.num_buffers);
247 
248  return s;
249 }
250 
253 {
254  return (vui->features & VIRTIO_FEATURE (VIRTIO_F_RING_PACKED));
255 }
256 
259 {
260  return (vui->features & VIRTIO_FEATURE (VIRTIO_RING_F_EVENT_IDX));
261 }
262 
265 {
267  u64 x = 1;
268  int fd = UNIX_GET_FD (vq->callfd_idx);
269  int rv;
270 
271  rv = write (fd, &x, sizeof (x));
272  if (PREDICT_FALSE (rv <= 0))
273  {
275  ("Error: Could not write to unix socket for callfd %d", fd);
276  return;
277  }
278 
279  vq->n_since_last_int = 0;
281 }
282 
285 {
286  volatile u16 *event_idx = (u16 *) & (vq->used->ring[vq->qsz_mask + 1]);
287 
288  return *event_idx;
289 }
290 
293 {
294  volatile u16 *event_idx = (u16 *) & (vq->avail->ring[vq->qsz_mask + 1]);
295 
296  return *event_idx;
297 }
298 
301 {
302  return ((u16) (new_idx - event_idx - 1) < (u16) (new_idx - old_idx));
303 }
304 
307 {
309  u8 first_kick = vq->first_kick;
311 
312  vq->first_kick = 1;
314  PREDICT_FALSE (!first_kick))
315  {
316  vhost_user_kick (vm, vq);
317  vq->last_kick = event_idx;
318  }
319  else
320  {
321  vq->n_since_last_int = 0;
323  }
324 }
325 
328  vhost_user_vring_t * vq)
329 {
331  u8 first_kick = vq->first_kick;
332  u16 off_wrap;
333  u16 event_idx;
334  u16 new_idx = vq->last_used_idx;
335  u16 old_idx = vq->last_kick;
336 
337  if (PREDICT_TRUE (vq->avail_event->flags == VRING_EVENT_F_DESC))
338  {
340  off_wrap = vq->avail_event->off_wrap;
341  event_idx = off_wrap & 0x7fff;
342  if (vq->used_wrap_counter != (off_wrap >> 15))
343  event_idx -= (vq->qsz_mask + 1);
344 
345  if (new_idx <= old_idx)
346  old_idx -= (vq->qsz_mask + 1);
347 
348  vq->first_kick = 1;
349  vq->last_kick = event_idx;
350  if (vhost_user_need_event (event_idx, new_idx, old_idx) ||
351  PREDICT_FALSE (!first_kick))
352  vhost_user_kick (vm, vq);
353  else
354  {
355  vq->n_since_last_int = 0;
357  }
358  }
359  else
360  vhost_user_kick (vm, vq);
361 }
362 
365  vhost_user_vring_t * vq)
366 {
368  {
371  else
373  }
374  else
375  vhost_user_kick (vm, vq);
376 }
377 
380 {
381  return vui->admin_up && vui->is_ready;
382 }
383 
386 {
388 
389  if (vui->enable_gso)
390  {
391  if (add)
392  {
393  vum->gso_count++;
394  }
395  else
396  {
397  ASSERT (vum->gso_count > 0);
398  vum->gso_count--;
399  }
400  }
401 }
402 
405 {
406  return (((vring->packed_desc[idx].flags & VRING_DESC_F_AVAIL) ==
407  vring->avail_wrap_counter));
408 }
409 
412 {
413  vring->last_avail_idx++;
414  if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
415  {
417  vring->last_avail_idx = 0;
418  }
419 }
420 
423  vhost_user_vring_t * vring,
424  u8 chained)
425 {
426  if (chained)
427  {
428  vring_packed_desc_t *desc_table = vring->packed_desc;
429 
430  /* pick up the slot of the next avail idx */
431  while (desc_table[vring->last_avail_idx & vring->qsz_mask].flags &
434  }
435 
437 }
438 
441 {
442  if (PREDICT_FALSE ((vring->last_avail_idx & vring->qsz_mask) == 0))
444 
445  if (PREDICT_FALSE (vring->last_avail_idx == 0))
446  vring->last_avail_idx = vring->qsz_mask;
447  else
448  vring->last_avail_idx--;
449 }
450 
453  virtio_net_hdr_mrg_rxbuf_t * hdr,
454  u16 * n_descs_processed)
455 {
456  u16 i;
457 
458  *n_descs_processed -= (hdr->num_buffers - 1);
459  for (i = 0; i < hdr->num_buffers - 1; i++)
461 }
462 
465  u16 * n_descs_processed)
466 {
467  while (*n_descs_processed)
468  {
470  (*n_descs_processed)--;
471  }
472 }
473 
476 {
477  vring->last_used_idx++;
478  if (PREDICT_FALSE ((vring->last_used_idx & vring->qsz_mask) == 0))
479  {
480  vring->used_wrap_counter ^= 1;
481  vring->last_used_idx = 0;
482  }
483 }
484 
485 #endif
486 
487 /*
488  * fd.io coding-style-patch-verification: ON
489  *
490  * Local Variables:
491  * eval: (c-set-style "gnu")
492  * End:
493  */
vhost_trace_t::qid
u16 qid
Definition: vhost_user.h:302
vhost_user_update_gso_interface_count
static_always_inline void vhost_user_update_gso_interface_count(vhost_user_intf_t *vui, u8 add)
Definition: vhost_user_inline.h:385
vhost_user_send_call
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:364
vhost_user_vring_t
Definition: vhost_user.h:180
vhost_user_intf_t::nregions
u32 nregions
Definition: vhost_user.h:257
vhost_user_intf_t::region_guest_addr_lo
u64 region_guest_addr_lo[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:260
count_trailing_zeros
#define count_trailing_zeros(x)
Definition: clib.h:161
vui_is_link_up
static_always_inline u8 vui_is_link_up(vhost_user_intf_t *vui)
Definition: vhost_user_inline.h:379
vnet_sw_interface_t
Definition: interface.h:868
vhost_user_log_dirty_pages_2
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
Definition: vhost_user_inline.h:175
vhost_user_intf_t::features
u64 features
Definition: vhost_user.h:252
vhost_user_vring_t::avail_wrap_counter
u16 avail_wrap_counter
Definition: vhost_user.h:230
elog.h
VHOST_MEMORY_MAX_NREGIONS
#define VHOST_MEMORY_MAX_NREGIONS
Definition: vhost_user.h:23
pointer_to_uword
static uword pointer_to_uword(const void *p)
Definition: types.h:131
vring_avail_t::ring
u16 ring[0]
Definition: virtio_std.h:105
vhost_user_avail_event_idx
static_always_inline u16 vhost_user_avail_event_idx(vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:284
vhost_user_main_t
Definition: vhost_user.h:328
vhost_user_intf_t::log_base_addr
void * log_base_addr
Definition: vhost_user.h:278
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
u16
unsigned short u16
Definition: types.h:57
vhost_trace_t::device_index
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:303
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vhost_user_send_call_event_idx
static_always_inline void vhost_user_send_call_event_idx(vlib_main_t *vm, vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:306
vhost_user_advance_last_avail_table_idx
static_always_inline void vhost_user_advance_last_avail_table_idx(vhost_user_intf_t *vui, vhost_user_vring_t *vring, u8 chained)
Definition: vhost_user_inline.h:422
vnet_get_sw_interface
static vnet_sw_interface_t * vnet_get_sw_interface(vnet_main_t *vnm, u32 sw_if_index)
Definition: interface_funcs.h:58
VIRTIO_FEATURE
#define VIRTIO_FEATURE(X)
Definition: virtio_std.h:67
vhost_user_main_t::coalesce_time
f64 coalesce_time
Definition: vhost_user.h:335
vhost_user_vring_t::first_kick
u8 first_kick
Definition: vhost_user.h:232
addr
vhost_vring_addr_t addr
Definition: vhost_user.h:130
r
vnet_hw_if_output_node_runtime_t * r
Definition: interface_output.c:1071
vhost_user_intf_t::log_size
u64 log_size
Definition: vhost_user.h:279
vhost_user_vring_t::avail_event
vring_desc_event_t * avail_event
Definition: vhost_user.h:195
vhost_user_undo_advanced_last_avail_idx
static_always_inline void vhost_user_undo_advanced_last_avail_idx(vhost_user_vring_t *vring)
Definition: vhost_user_inline.h:440
vhost_user_packed_desc_available
static_always_inline u8 vhost_user_packed_desc_available(vhost_user_vring_t *vring, u16 idx)
Definition: vhost_user_inline.h:404
clib_unix_warning
#define clib_unix_warning(format, args...)
Definition: error.h:68
foreach_virtio_trace_flags
@ foreach_virtio_trace_flags
Definition: vhost_user.h:80
vhost_user_intf_t::sw_if_index
u32 sw_if_index
Definition: vhost_user.h:249
vhost_user_intf_t::regions
vhost_user_memory_region_t regions[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:258
vhost_user_is_packed_ring_supported
static_always_inline u64 vhost_user_is_packed_ring_supported(vhost_user_intf_t *vui)
Definition: vhost_user_inline.h:252
CLIB_COMPILER_BARRIER
#define CLIB_COMPILER_BARRIER()
Definition: clib.h:134
vhost_user_advance_last_used_idx
static_always_inline void vhost_user_advance_last_used_idx(vhost_user_vring_t *vring)
Definition: vhost_user_inline.h:475
ELOG_TYPE_DECLARE
#define ELOG_TYPE_DECLARE(f)
Definition: elog.h:442
len
u8 len
Definition: ip_types.api:103
nregions
u32 nregions
Definition: vhost_user.h:123
VRING_DESC_F_AVAIL
#define VRING_DESC_F_AVAIL
Definition: virtio_std.h:75
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
vnet_get_main
vnet_main_t * vnet_get_main(void)
Definition: pnat_test_stubs.h:56
vhost_trace_t::hdr
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:306
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_global_main
vlib_global_main_t vlib_global_main
Definition: main.c:1786
static_always_inline
#define static_always_inline
Definition: clib.h:112
pool_is_free
#define pool_is_free(P, E)
Use free bitmap to query whether given element is free.
Definition: pool.h:294
uword
u64 uword
Definition: types.h:112
map_user_mem
static_always_inline void * map_user_mem(vhost_user_intf_t *vui, uword addr)
Definition: vhost_user_inline.h:156
vhost_user_used_event_idx
static_always_inline u16 vhost_user_used_event_idx(vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:292
VRING_DESC_F_NEXT
#define VRING_DESC_F_NEXT
Definition: virtio_std.h:71
vhost_user_vring_t::last_used_idx
u16 last_used_idx
Definition: vhost_user.h:185
VHOST_LOG_PAGE
#define VHOST_LOG_PAGE
Definition: vhost_user_inline.h:172
i
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
vhost_user_vring_t::packed_desc
vring_packed_desc_t * packed_desc
Definition: vhost_user.h:190
vhost_user_vring_t::int_deadline
f64 int_deadline
Definition: vhost_user.h:205
vhost_user_vring_t::last_avail_idx
u16 last_avail_idx
Definition: vhost_user.h:184
vhost_user_intf_t::is_ready
u32 is_ready
Definition: vhost_user.h:242
CLIB_MEMORY_BARRIER
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
vhost_user_vring_t::callfd_idx
u32 callfd_idx
Definition: vhost_user.h:214
vlib_global_main_t::elog_main
elog_main_t elog_main
Definition: main.h:300
vhost_user_dequeue_descs
static_always_inline void vhost_user_dequeue_descs(vhost_user_vring_t *rxvq, virtio_net_hdr_mrg_rxbuf_t *hdr, u16 *n_descs_processed)
Definition: vhost_user_inline.h:452
vhost_user_intf_t::region_mmap_addr
void * region_mmap_addr[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:259
vhost_user_vring_t::avail
vring_avail_t * avail
Definition: vhost_user.h:194
vhost_user_main_t::gso_count
u32 gso_count
Definition: vhost_user.h:351
vhost_user_intf_t::enable_gso
u8 enable_gso
Definition: vhost_user.h:285
vhost_user_dequeue_chained_descs
static_always_inline void vhost_user_dequeue_chained_descs(vhost_user_vring_t *rxvq, u16 *n_descs_processed)
Definition: vhost_user_inline.h:464
vnet_main_t
Definition: vnet.h:76
vhost_user_intf_t::admin_up
u32 admin_up
Definition: vhost_user.h:243
event_idx
#define event_idx
Definition: tls_async.c:41
vhost_user_intf_t::virtio_net_hdr_sz
int virtio_net_hdr_sz
Definition: vhost_user.h:275
u64
unsigned long u64
Definition: types.h:89
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
format_get_indent
static u32 format_get_indent(u8 *s)
Definition: format.h:72
vhost_user_main
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:55
u32
unsigned int u32
Definition: types.h:88
vhost_trace_t
Definition: vhost_user.h:300
vhost_user_kick
static_always_inline void vhost_user_kick(vlib_main_t *vm, vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:264
vhost_user_vring_t::used_wrap_counter
u16 used_wrap_counter
Definition: vhost_user.h:229
vhost_user_intf_t::region_guest_addr_hi
u64 region_guest_addr_hi[VHOST_MEMORY_MAX_NREGIONS]
Definition: vhost_user.h:261
vhost_user_is_event_idx_supported
static_always_inline u64 vhost_user_is_event_idx_supported(vhost_user_intf_t *vui)
Definition: vhost_user_inline.h:258
vlib_main_t
Definition: main.h:102
vlib_node_t
Definition: node.h:247
vhost_user_intf_t
Definition: vhost_user.h:239
vhost_user_send_call_event_idx_packed
static_always_inline void vhost_user_send_call_event_idx_packed(vlib_main_t *vm, vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:327
format_vhost_trace
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
Definition: vhost_user_inline.h:210
u8
unsigned char u8
Definition: types.h:56
vring_used_t::ring
vring_used_elem_t ring[0]
Definition: virtio_std.h:119
map_guest_mem
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
Definition: vhost_user_inline.h:21
UNIX_GET_FD
#define UNIX_GET_FD(unixfd_idx)
Definition: vhost_user.h:65
vu_log_debug
#define vu_log_debug(dev, f,...)
Definition: vhost_user.h:45
vhost_user_main_t::vhost_user_interfaces
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:332
rv
int __clib_unused rv
Definition: application.c:491
ELOG_DATA
#define ELOG_DATA(em, f)
Definition: elog.h:484
vhost_user_vring_t::used
vring_used_t * used
Definition: vhost_user.h:199
vlib_time_now
static f64 vlib_time_now(vlib_main_t *vm)
Definition: main.h:325
vhost_trace_t::first_desc_len
u16 first_desc_len
Runtime queue flags
Definition: vhost_user.h:305
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vhost_user_need_event
static_always_inline u16 vhost_user_need_event(u16 event_idx, u16 new_idx, u16 old_idx)
Definition: vhost_user_inline.h:300
vhost_user_vring_t::qsz_mask
u16 qsz_mask
Definition: vhost_user.h:183
vhost_user_vring_t::last_kick
u16 last_kick
Definition: vhost_user.h:231
vhost_user_advance_last_avail_idx
static_always_inline void vhost_user_advance_last_avail_idx(vhost_user_vring_t *vring)
Definition: vhost_user_inline.h:411
format_white_space
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
format_vnet_sw_interface_name
format_function_t format_vnet_sw_interface_name
Definition: interface_funcs.h:453
vhost_user_vring_t::n_since_last_int
u16 n_since_last_int
Definition: vhost_user.h:186