FD.io VPP  v18.07.1-19-g511ce25
Vector Packet Processing
vhost_user_output.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * vhost-user-output
4  *
5  * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19 
20 #include <fcntl.h> /* for open */
21 #include <sys/ioctl.h>
22 #include <sys/socket.h>
23 #include <sys/un.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/uio.h> /* for iovec */
27 #include <netinet/in.h>
28 #include <sys/vfs.h>
29 
30 #include <linux/if_arp.h>
31 #include <linux/if_tun.h>
32 
33 #include <vlib/vlib.h>
34 #include <vlib/unix/unix.h>
35 
36 #include <vnet/ip/ip.h>
37 
38 #include <vnet/ethernet/ethernet.h>
39 #include <vnet/devices/devices.h>
40 #include <vnet/feature/feature.h>
41 
44 
45 /*
46  * On the transmit side, we keep processing the buffers from vlib in the while
47  * loop and prepare the copy order to be executed later. However, the static
48  * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49  * entries. In order to not corrupt memory, we have to do the copy when the
50  * static array reaches the copy threshold. We subtract 40 in case the code
51  * goes into the inner loop for a maximum of 64k frames which may require
52  * more array entries.
53  */
54 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 40)
55 
57 
58 #define foreach_vhost_user_tx_func_error \
59  _(NONE, "no error") \
60  _(NOT_READY, "vhost vring not ready") \
61  _(DOWN, "vhost interface is down") \
62  _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
63  _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
64  _(MMAP_FAIL, "mmap failure") \
65  _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
66 
67 typedef enum
68 {
69 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
71 #undef _
74 
75 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
76 #define _(n,s) s,
78 #undef _
79 };
80 
81 static __clib_unused u8 *
82 format_vhost_user_interface_name (u8 * s, va_list * args)
83 {
84  u32 i = va_arg (*args, u32);
85  u32 show_dev_instance = ~0;
87 
89  show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
90 
91  if (show_dev_instance != ~0)
92  i = show_dev_instance;
93 
94  s = format (s, "VirtualEthernet0/0/%d", i);
95  return s;
96 }
97 
98 static __clib_unused int
100 {
101  // FIXME: check if the new dev instance is already used
104  hi->dev_instance, ~0);
105 
107  new_dev_instance;
108 
109  DBG_SOCK ("renumbered vhost-user interface dev_instance %d to %d",
110  hi->dev_instance, new_dev_instance);
111 
112  return 0;
113 }
114 
115 /**
116  * @brief Try once to lock the vring
117  * @return 0 on success, non-zero on failure.
118  */
121 {
122  return __sync_lock_test_and_set (vui->vring_locks[qid], 1);
123 }
124 
125 /**
126  * @brief Spin until the vring is successfully locked
127  */
130 {
131  while (vhost_user_vring_try_lock (vui, qid))
132  ;
133 }
134 
135 /**
136  * @brief Unlock the vring lock
137  */
140 {
141  *vui->vring_locks[qid] = 0;
142 }
143 
146  vhost_user_intf_t * vui, u16 qid,
147  vlib_buffer_t * b, vhost_user_vring_t * rxvq)
148 {
150  u32 last_avail_idx = rxvq->last_avail_idx;
151  u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
152  vring_desc_t *hdr_desc = 0;
153  u32 hint = 0;
154 
155  memset (t, 0, sizeof (*t));
156  t->device_index = vui - vum->vhost_user_interfaces;
157  t->qid = qid;
158 
159  hdr_desc = &rxvq->desc[desc_current];
160  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
161  {
162  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
163  /* Header is the first here */
164  hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
165  }
166  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
167  {
168  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
169  }
170  if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
171  !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
172  {
173  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
174  }
175 
176  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
177 }
178 
181  u16 copy_len, u32 * map_hint)
182 {
183  void *dst0, *dst1, *dst2, *dst3;
184  if (PREDICT_TRUE (copy_len >= 4))
185  {
186  if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
187  return 1;
188  if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
189  return 1;
190  while (PREDICT_TRUE (copy_len >= 4))
191  {
192  dst0 = dst2;
193  dst1 = dst3;
194 
195  if (PREDICT_FALSE
196  (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
197  return 1;
198  if (PREDICT_FALSE
199  (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
200  return 1;
201 
202  CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
203  CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
204 
205  clib_memcpy (dst0, (void *) cpy[0].src, cpy[0].len);
206  clib_memcpy (dst1, (void *) cpy[1].src, cpy[1].len);
207 
208  vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
209  vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
210  copy_len -= 2;
211  cpy += 2;
212  }
213  }
214  while (copy_len)
215  {
216  if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
217  return 1;
218  clib_memcpy (dst0, (void *) cpy->src, cpy->len);
219  vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
220  copy_len -= 1;
221  cpy += 1;
222  }
223  return 0;
224 }
225 
226 
227 uword
229  vlib_node_runtime_t * node,
230  vlib_frame_t * frame)
231 {
232  u32 *buffers = vlib_frame_args (frame);
233  u32 n_left = frame->n_vectors;
235  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
236  vhost_user_intf_t *vui =
238  u32 qid = ~0;
239  vhost_user_vring_t *rxvq;
240  u8 error;
241  u32 thread_index = vm->thread_index;
242  u32 map_hint = 0;
243  u8 retry = 8;
244  u16 copy_len;
245  u16 tx_headers_len;
246 
247  if (PREDICT_FALSE (!vui->admin_up))
248  {
249  error = VHOST_USER_TX_FUNC_ERROR_DOWN;
250  goto done3;
251  }
252 
253  if (PREDICT_FALSE (!vui->is_up))
254  {
255  error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
256  goto done3;
257  }
258 
259  qid =
261  (vui->per_cpu_tx_qid, thread_index));
262  rxvq = &vui->vrings[qid];
263  if (PREDICT_FALSE (vui->use_tx_spinlock))
264  vhost_user_vring_lock (vui, qid);
265 
266 retry:
267  error = VHOST_USER_TX_FUNC_ERROR_NONE;
268  tx_headers_len = 0;
269  copy_len = 0;
270  while (n_left > 0)
271  {
272  vlib_buffer_t *b0, *current_b0;
273  u16 desc_head, desc_index, desc_len;
274  vring_desc_t *desc_table;
275  uword buffer_map_addr;
276  u32 buffer_len;
277  u16 bytes_left;
278 
279  if (PREDICT_TRUE (n_left > 1))
280  vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
281 
282  b0 = vlib_get_buffer (vm, buffers[0]);
283 
284  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
285  {
286  vum->cpus[thread_index].current_trace =
287  vlib_add_trace (vm, node, b0,
288  sizeof (*vum->cpus[thread_index].current_trace));
289  vhost_user_tx_trace (vum->cpus[thread_index].current_trace,
290  vui, qid / 2, b0, rxvq);
291  }
292 
293  if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
294  {
295  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
296  goto done;
297  }
298 
299  desc_table = rxvq->desc;
300  desc_head = desc_index =
301  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
302 
303  /* Go deeper in case of indirect descriptor
304  * I don't know of any driver providing indirect for RX. */
305  if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
306  {
307  if (PREDICT_FALSE
308  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
309  {
310  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
311  goto done;
312  }
313  if (PREDICT_FALSE
314  (!(desc_table =
315  map_guest_mem (vui, rxvq->desc[desc_index].addr,
316  &map_hint))))
317  {
318  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
319  goto done;
320  }
321  desc_index = 0;
322  }
323 
324  desc_len = vui->virtio_net_hdr_sz;
325  buffer_map_addr = desc_table[desc_index].addr;
326  buffer_len = desc_table[desc_index].len;
327 
328  {
329  // Get a header from the header array
330  virtio_net_hdr_mrg_rxbuf_t *hdr =
331  &vum->cpus[thread_index].tx_headers[tx_headers_len];
332  tx_headers_len++;
333  hdr->hdr.flags = 0;
334  hdr->hdr.gso_type = 0;
335  hdr->num_buffers = 1; //This is local, no need to check
336 
337  // Prepare a copy order executed later for the header
338  vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
339  copy_len++;
340  cpy->len = vui->virtio_net_hdr_sz;
341  cpy->dst = buffer_map_addr;
342  cpy->src = (uword) hdr;
343  }
344 
345  buffer_map_addr += vui->virtio_net_hdr_sz;
346  buffer_len -= vui->virtio_net_hdr_sz;
347  bytes_left = b0->current_length;
348  current_b0 = b0;
349  while (1)
350  {
351  if (buffer_len == 0)
352  { //Get new output
353  if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
354  {
355  //Next one is chained
356  desc_index = desc_table[desc_index].next;
357  buffer_map_addr = desc_table[desc_index].addr;
358  buffer_len = desc_table[desc_index].len;
359  }
360  else if (vui->virtio_net_hdr_sz == 12) //MRG is available
361  {
362  virtio_net_hdr_mrg_rxbuf_t *hdr =
363  &vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
364 
365  //Move from available to used buffer
366  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
367  desc_head;
368  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
369  desc_len;
370  vhost_user_log_dirty_ring (vui, rxvq,
371  ring[rxvq->last_used_idx &
372  rxvq->qsz_mask]);
373 
374  rxvq->last_avail_idx++;
375  rxvq->last_used_idx++;
376  hdr->num_buffers++;
377  desc_len = 0;
378 
379  if (PREDICT_FALSE
380  (rxvq->last_avail_idx == rxvq->avail->idx))
381  {
382  //Dequeue queued descriptors for this packet
383  rxvq->last_used_idx -= hdr->num_buffers - 1;
384  rxvq->last_avail_idx -= hdr->num_buffers - 1;
385  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
386  goto done;
387  }
388 
389  desc_table = rxvq->desc;
390  desc_head = desc_index =
391  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
392  if (PREDICT_FALSE
393  (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
394  {
395  //It is seriously unlikely that a driver will put indirect descriptor
396  //after non-indirect descriptor.
397  if (PREDICT_FALSE
398  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
399  {
400  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
401  goto done;
402  }
403  if (PREDICT_FALSE
404  (!(desc_table =
405  map_guest_mem (vui,
406  rxvq->desc[desc_index].addr,
407  &map_hint))))
408  {
409  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
410  goto done;
411  }
412  desc_index = 0;
413  }
414  buffer_map_addr = desc_table[desc_index].addr;
415  buffer_len = desc_table[desc_index].len;
416  }
417  else
418  {
419  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
420  goto done;
421  }
422  }
423 
424  {
425  vhost_copy_t *cpy = &vum->cpus[thread_index].copy[copy_len];
426  copy_len++;
427  cpy->len = bytes_left;
428  cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
429  cpy->dst = buffer_map_addr;
430  cpy->src = (uword) vlib_buffer_get_current (current_b0) +
431  current_b0->current_length - bytes_left;
432 
433  bytes_left -= cpy->len;
434  buffer_len -= cpy->len;
435  buffer_map_addr += cpy->len;
436  desc_len += cpy->len;
437 
438  CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
439  }
440 
441  // Check if vlib buffer has more data. If not, get more or break.
442  if (PREDICT_TRUE (!bytes_left))
443  {
444  if (PREDICT_FALSE
445  (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
446  {
447  current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
448  bytes_left = current_b0->current_length;
449  }
450  else
451  {
452  //End of packet
453  break;
454  }
455  }
456  }
457 
458  //Move from available to used ring
459  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
460  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
461  vhost_user_log_dirty_ring (vui, rxvq,
462  ring[rxvq->last_used_idx & rxvq->qsz_mask]);
463  rxvq->last_avail_idx++;
464  rxvq->last_used_idx++;
465 
466  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
467  {
468  vum->cpus[thread_index].current_trace->hdr =
469  vum->cpus[thread_index].tx_headers[tx_headers_len - 1];
470  }
471 
472  n_left--; //At the end for error counting when 'goto done' is invoked
473 
474  /*
475  * Do the copy periodically to prevent
476  * vum->cpus[thread_index].copy array overflow and corrupt memory
477  */
479  {
480  if (PREDICT_FALSE
481  (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
482  copy_len, &map_hint)))
483  {
484  vlib_error_count (vm, node->node_index,
485  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
486  }
487  copy_len = 0;
488 
489  /* give buffers back to driver */
491  rxvq->used->idx = rxvq->last_used_idx;
492  vhost_user_log_dirty_ring (vui, rxvq, idx);
493  }
494  buffers++;
495  }
496 
497 done:
498  //Do the memory copies
499  if (PREDICT_FALSE
500  (vhost_user_tx_copy (vui, vum->cpus[thread_index].copy,
501  copy_len, &map_hint)))
502  {
503  vlib_error_count (vm, node->node_index,
504  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
505  }
506 
508  rxvq->used->idx = rxvq->last_used_idx;
509  vhost_user_log_dirty_ring (vui, rxvq, idx);
510 
511  /*
512  * When n_left is set, error is always set to something too.
513  * In case error is due to lack of remaining buffers, we go back up and
514  * retry.
515  * The idea is that it is better to waste some time on packets
516  * that have been processed already than dropping them and get
517  * more fresh packets with a good likelyhood that they will be dropped too.
518  * This technique also gives more time to VM driver to pick-up packets.
519  * In case the traffic flows from physical to virtual interfaces, this
520  * technique will end-up leveraging the physical NIC buffer in order to
521  * absorb the VM's CPU jitter.
522  */
523  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
524  {
525  retry--;
526  goto retry;
527  }
528 
529  /* interrupt (call) handling */
530  if ((rxvq->callfd_idx != ~0) &&
531  !(rxvq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT))
532  {
533  rxvq->n_since_last_int += frame->n_vectors - n_left;
534 
535  if (rxvq->n_since_last_int > vum->coalesce_frames)
536  vhost_user_send_call (vm, rxvq);
537  }
538 
539  vhost_user_vring_unlock (vui, qid);
540 
541 done3:
542  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
543  {
544  vlib_error_count (vm, node->node_index, error, n_left);
548  thread_index, vui->sw_if_index, n_left);
549  }
550 
551  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
552  return frame->n_vectors;
553 }
554 
555 static __clib_unused clib_error_t *
557  u32 qid, vnet_hw_interface_rx_mode mode)
558 {
559  vlib_main_t *vm = vnm->vlib_main;
560  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
562  vhost_user_intf_t *vui =
564  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
565 
566  if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
568  {
569  if (txvq->kickfd_idx == ~0)
570  {
571  // We cannot support interrupt mode if the driver opts out
572  return clib_error_return (0, "Driver does not support interrupt");
573  }
575  {
576  vum->ifq_count++;
577  // Start the timer if this is the first encounter on interrupt
578  // interface/queue
579  if ((vum->ifq_count == 1) &&
580  (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
584  }
585  }
586  else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
587  {
588  if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
590  vum->ifq_count)
591  {
592  vum->ifq_count--;
593  // Stop the timer if there is no more interrupt interface/queue
594  if ((vum->ifq_count == 0) &&
595  (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
599  }
600  }
601 
602  txvq->mode = mode;
604  txvq->used->flags = VRING_USED_F_NO_NOTIFY;
605  else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
607  txvq->used->flags = 0;
608  else
609  {
610  clib_warning ("BUG: unhandled mode %d changed for if %d queue %d", mode,
611  hw_if_index, qid);
612  return clib_error_return (0, "unsupported");
613  }
614 
615  return 0;
616 }
617 
618 static __clib_unused clib_error_t *
620  u32 flags)
621 {
622  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
624  vhost_user_intf_t *vui =
626  u32 hw_flags = 0;
627  vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
628  hw_flags = vui->admin_up ? VNET_HW_INTERFACE_FLAG_LINK_UP : 0;
629 
630  vnet_hw_interface_set_flags (vnm, vui->hw_if_index, hw_flags);
631 
632  return /* no error */ 0;
633 }
634 
635 #ifndef CLIB_MARCH_VARIANT
636 /* *INDENT-OFF* */
638  .name = "vhost-user",
639  .tx_function = vhost_user_tx,
640  .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
641  .tx_function_error_strings = vhost_user_tx_func_error_strings,
642  .format_device_name = format_vhost_user_interface_name,
643  .name_renumber = vhost_user_name_renumber,
644  .admin_up_down_function = vhost_user_interface_admin_up_down,
645  .rx_mode_change_function = vhost_user_interface_rx_mode_change,
646  .format_tx_trace = format_vhost_trace,
647 };
648 
649 #if __x86_64__
652 static void __clib_constructor
654 {
655  if (vhost_user_tx_avx512 && clib_cpu_supports_avx512f ())
657  else if (vhost_user_tx_avx2 && clib_cpu_supports_avx2 ())
659 }
660 #endif
661 #endif
662 
663 /* *INDENT-ON* */
664 
665 /*
666  * fd.io coding-style-patch-verification: ON
667  *
668  * Local Variables:
669  * eval: (c-set-style "gnu")
670  * End:
671  */
static __clib_unused u8 * format_vhost_user_interface_name(u8 *s, va_list *args)
static __clib_unused int vhost_user_name_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
vmrglw vmrglh hi
static __clib_unused clib_error_t * vhost_user_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static_always_inline int vhost_user_vring_try_lock(vhost_user_intf_t *vui, u32 qid)
Try once to lock the vring.
vring_desc_t * desc
Definition: vhost_user.h:232
u32 virtio_ring_flags
The device index.
Definition: vhost_user.h:312
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:314
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: interface.c:541
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
Definition: vhost_user.h:345
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:534
vhost_user_tx_func_error_t
#define foreach_vhost_user_tx_func_error
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:106
static __clib_unused char * vhost_user_tx_func_error_strings[]
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define VHOST_USER_TX_COPY_THRESHOLD
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
Definition: vhost_user.h:327
vring_avail_t * avail
Definition: vhost_user.h:233
uword CLIB_MULTIARCH_FN() vhost_user_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:179
int i
#define VHOST_USER_EVENT_START_TIMER
Definition: vhost_user.h:258
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: vhost_user.h:46
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:79
#define VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:458
static_always_inline void vhost_user_vring_lock(vhost_user_intf_t *vui, u32 qid)
Spin until the vring is successfully locked.
struct _vnet_device_class vnet_device_class_t
vring_used_t * used
Definition: vhost_user.h:234
vhost_trace_t * current_trace
Definition: vhost_user.h:331
unsigned char u8
Definition: types.h:56
vnet_hw_interface_rx_mode
Definition: interface.h:51
#define static_always_inline
Definition: clib.h:93
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:324
static_always_inline u32 vhost_user_tx_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
vlib_node_function_t __clib_weak vhost_user_tx_avx2
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
Definition: vhost_user.h:24
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VRING_USED_F_NO_NOTIFY
Definition: vhost_user.h:45
#define clib_error_return(e, args...)
Definition: error.h:99
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
unsigned int u32
Definition: types.h:88
VNET_DEVICE_CLASS(vhost_user_device_class)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:464
vlib_main_t * vlib_main
Definition: vnet.h:80
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:952
unsigned short u16
Definition: types.h:57
#define VIRTQ_DESC_F_INDIRECT
Definition: vhost_user.h:28
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:202
#define PREDICT_FALSE(x)
Definition: clib.h:105
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
vnet_main_t vnet_main
Definition: misc.c:43
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:810
static_always_inline void vhost_user_vring_unlock(vhost_user_intf_t *vui, u32 qid)
Unlock the vring lock.
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
Definition: node.h:54
static void __clib_constructor vhost_user_tx_multiarch_select(void)
u32 flags
Definition: vhost_user.h:110
#define VHOST_VRING_IDX_RX(qid)
Definition: vhost_user.h:23
vnet_device_class_t vhost_user_device_class
u32 * show_dev_instance_by_real_dev_instance
Definition: vhost_user.h:339
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:311
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:338
#define DBG_SOCK(args...)
Definition: vhost_user.h:48
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
static __clib_unused clib_error_t * vhost_user_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
#define VHOST_USER_EVENT_STOP_TIMER
Definition: vhost_user.h:259
#define clib_warning(format, args...)
Definition: error.h:59
#define clib_memcpy(a, b, c)
Definition: string.h:75
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:313
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:661
#define VIRTQ_DESC_F_NEXT
Definition: vhost_user.h:27
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:126
volatile u32 * vring_locks[VHOST_VRING_MAX_N]
Definition: vhost_user.h:288
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:284
vlib_node_registration_t vhost_user_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (vhost_user_send_interrupt_node)
Definition: vhost_user.c:53
static_always_inline void vhost_user_tx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
vlib_node_function_t __clib_weak vhost_user_tx_avx512
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
Definition: vhost_user.h:287
u64 uword
Definition: types.h:112
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
#define vhost_user_log_dirty_ring(vui, vq, member)
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:109
virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE]
Definition: vhost_user.h:326
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:486
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:59