FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
vhost_user_output.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * vhost-user-output
4  *
5  * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19 
20 #include <stddef.h>
21 #include <fcntl.h> /* for open */
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
24 #include <sys/un.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <sys/uio.h> /* for iovec */
28 #include <netinet/in.h>
29 #include <sys/vfs.h>
30 
31 #include <linux/if_arp.h>
32 #include <linux/if_tun.h>
33 
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
36 
37 #include <vnet/ethernet/ethernet.h>
38 #include <vnet/devices/devices.h>
39 #include <vnet/feature/feature.h>
40 
43 
45 /*
46  * On the transmit side, we keep processing the buffers from vlib in the while
47  * loop and prepare the copy order to be executed later. However, the static
48  * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
49  * entries. In order to not corrupt memory, we have to do the copy when the
50  * static array reaches the copy threshold. We subtract 40 in case the code
51  * goes into the inner loop for a maximum of 64k frames which may require
52  * more array entries. We subtract 200 because our default buffer size is
53  * 2048 and the default desc len is likely 1536. While it takes less than 40
54  * vlib buffers for the jumbo frame, it may take twice as much descriptors
55  * for the same jumbo frame. Use 200 for the extra head room.
56  */
57 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
58 
60 
61 #define foreach_vhost_user_tx_func_error \
62  _(NONE, "no error") \
63  _(NOT_READY, "vhost vring not ready") \
64  _(DOWN, "vhost interface is down") \
65  _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
66  _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
67  _(MMAP_FAIL, "mmap failure") \
68  _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
69 
70 typedef enum
71 {
72 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
74 #undef _
77 
78 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
79 #define _(n,s) s,
81 #undef _
82 };
83 
84 static __clib_unused u8 *
85 format_vhost_user_interface_name (u8 * s, va_list * args)
86 {
87  u32 i = va_arg (*args, u32);
88  u32 show_dev_instance = ~0;
90 
92  show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
93 
94  if (show_dev_instance != ~0)
95  i = show_dev_instance;
96 
97  s = format (s, "VirtualEthernet0/0/%d", i);
98  return s;
99 }
100 
101 static __clib_unused int
103 {
104  // FIXME: check if the new dev instance is already used
107  hi->dev_instance);
108 
110  hi->dev_instance, ~0);
111 
112  vum->show_dev_instance_by_real_dev_instance[hi->dev_instance] =
113  new_dev_instance;
114 
115  vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
116  hi->dev_instance, new_dev_instance);
117 
118  return 0;
119 }
120 
123  vhost_user_intf_t * vui, u16 qid,
125 {
127  u32 last_avail_idx = rxvq->last_avail_idx;
128  u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
129  vring_desc_t *hdr_desc = 0;
130  u32 hint = 0;
131 
132  clib_memset (t, 0, sizeof (*t));
133  t->device_index = vui - vum->vhost_user_interfaces;
134  t->qid = qid;
135 
136  hdr_desc = &rxvq->desc[desc_current];
137  if (rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT)
138  {
139  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
140  /* Header is the first here */
141  hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
142  }
143  if (rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT)
144  {
145  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
146  }
147  if (!(rxvq->desc[desc_current].flags & VRING_DESC_F_NEXT) &&
148  !(rxvq->desc[desc_current].flags & VRING_DESC_F_INDIRECT))
149  {
150  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
151  }
152 
153  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
154 }
155 
158  u16 copy_len, u32 * map_hint)
159 {
160  void *dst0, *dst1, *dst2, *dst3;
161  if (PREDICT_TRUE (copy_len >= 4))
162  {
163  if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
164  return 1;
165  if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
166  return 1;
167  while (PREDICT_TRUE (copy_len >= 4))
168  {
169  dst0 = dst2;
170  dst1 = dst3;
171 
172  if (PREDICT_FALSE
173  (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
174  return 1;
175  if (PREDICT_FALSE
176  (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
177  return 1;
178 
179  clib_prefetch_load ((void *) cpy[2].src);
180  clib_prefetch_load ((void *) cpy[3].src);
181 
182  clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
183  clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
184 
185  vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
186  vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
187  copy_len -= 2;
188  cpy += 2;
189  }
190  }
191  while (copy_len)
192  {
193  if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
194  return 1;
195  clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
196  vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
197  copy_len -= 1;
198  cpy += 1;
199  }
200  return 0;
201 }
202 
205  virtio_net_hdr_t * hdr)
206 {
207  generic_header_offset_t gho = { 0 };
208  int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
209  int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
210  vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
211 
212  ASSERT (!(is_ip4 && is_ip6));
213  vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6);
214  if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
215  {
216  ip4_header_t *ip4;
217 
218  ip4 =
220  ip4->checksum = ip4_header_checksum (ip4);
221  }
222 
223  /* checksum offload */
224  if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
225  {
226  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
227  hdr->csum_start = gho.l4_hdr_offset;
228  hdr->csum_offset = offsetof (udp_header_t, checksum);
229  }
230  else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
231  {
232  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
233  hdr->csum_start = gho.l4_hdr_offset;
234  hdr->csum_offset = offsetof (tcp_header_t, checksum);
235  }
236 
237  /* GSO offload */
238  if (b->flags & VNET_BUFFER_F_GSO)
239  {
240  if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
241  {
242  if (is_ip4 &&
243  (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO4)))
244  {
245  hdr->gso_size = vnet_buffer2 (b)->gso_size;
246  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
247  }
248  else if (is_ip6 &&
249  (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_TSO6)))
250  {
251  hdr->gso_size = vnet_buffer2 (b)->gso_size;
252  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
253  }
254  }
255  else if ((vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_UFO)) &&
256  (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
257  {
258  hdr->gso_size = vnet_buffer2 (b)->gso_size;
259  hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
260  }
261  }
262 }
263 
266  vhost_user_vring_t * rxvq,
267  u16 * n_descs_processed, u8 chained,
269 {
270  u16 desc_idx, flags;
271  vring_packed_desc_t *desc_table = rxvq->packed_desc;
272  u16 last_used_idx = rxvq->last_used_idx;
273 
274  if (PREDICT_FALSE (*n_descs_processed == 0))
275  return;
276 
277  if (rxvq->used_wrap_counter)
278  flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
280  else
281  flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
283 
285 
286  for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
287  {
288  if (rxvq->used_wrap_counter)
289  desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
291  else
292  desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
295  }
296 
297  desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
298 
299  *n_descs_processed = 0;
300 
301  if (chained)
302  {
303  vring_packed_desc_t *desc_table = rxvq->packed_desc;
304 
305  while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
308 
309  /* Advance past the current chained table entries */
311  }
312 
313  /* interrupt (call) handling */
314  if ((rxvq->callfd_idx != ~0) &&
315  (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
316  {
318 
319  rxvq->n_since_last_int += frame->n_vectors - n_left;
320  if (rxvq->n_since_last_int > vum->coalesce_frames)
321  vhost_user_send_call (vm, vui, rxvq);
322  }
323 }
324 
327  u16 qid, vlib_buffer_t * b,
328  vhost_user_vring_t * rxvq)
329 {
331  u32 last_avail_idx = rxvq->last_avail_idx;
332  u32 desc_current = last_avail_idx & rxvq->qsz_mask;
333  vring_packed_desc_t *hdr_desc = 0;
334  u32 hint = 0;
335 
336  clib_memset (t, 0, sizeof (*t));
337  t->device_index = vui - vum->vhost_user_interfaces;
338  t->qid = qid;
339 
340  hdr_desc = &rxvq->packed_desc[desc_current];
341  if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT)
342  {
343  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
344  /* Header is the first here */
345  hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
346  &hint);
347  }
348  if (rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT)
349  {
350  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
351  }
352  if (!(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_NEXT) &&
353  !(rxvq->packed_desc[desc_current].flags & VRING_DESC_F_INDIRECT))
354  {
355  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
356  }
357 
358  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
359 }
360 
364  vhost_user_vring_t *rxvq)
365 {
366  u32 *buffers = vlib_frame_vector_args (frame);
367  u32 n_left = frame->n_vectors;
369  u32 qid = rxvq->qid;
370  u8 error;
372  vhost_cpu_t *cpu = &vum->cpus[thread_index];
373  u32 map_hint = 0;
374  u8 retry = 8;
375  u16 copy_len;
376  u16 tx_headers_len;
377  vring_packed_desc_t *desc_table;
378  u32 or_flags;
379  u16 desc_head, desc_index, desc_len;
380  u16 n_descs_processed;
381  u8 indirect, chained;
382 
383 retry:
384  error = VHOST_USER_TX_FUNC_ERROR_NONE;
385  tx_headers_len = 0;
386  copy_len = 0;
387  n_descs_processed = 0;
388 
389  while (n_left > 0)
390  {
391  vlib_buffer_t *b0, *current_b0;
392  uword buffer_map_addr;
393  u32 buffer_len;
394  u16 bytes_left;
395  u32 total_desc_len = 0;
396  u16 n_entries = 0;
397 
398  indirect = 0;
399  chained = 0;
400  if (PREDICT_TRUE (n_left > 1))
401  vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
402 
403  b0 = vlib_get_buffer (vm, buffers[0]);
404  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
405  {
406  cpu->current_trace = vlib_add_trace (vm, node, b0,
407  sizeof (*cpu->current_trace));
408  vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
409  rxvq);
410  }
411 
412  desc_table = rxvq->packed_desc;
413  desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
414  if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
415  {
416  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
417  goto done;
418  }
419  /*
420  * Go deeper in case of indirect descriptor.
421  * To test it, turn off mrg_rxbuf.
422  */
423  if (desc_table[desc_head].flags & VRING_DESC_F_INDIRECT)
424  {
425  indirect = 1;
426  if (PREDICT_FALSE (desc_table[desc_head].len <
427  sizeof (vring_packed_desc_t)))
428  {
429  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
430  goto done;
431  }
432  n_entries = desc_table[desc_head].len >> 4;
433  desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
434  &map_hint);
435  if (PREDICT_FALSE (desc_table == 0))
436  {
437  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
438  goto done;
439  }
440  desc_index = 0;
441  }
442  else if (rxvq->packed_desc[desc_head].flags & VRING_DESC_F_NEXT)
443  chained = 1;
444 
445  desc_len = vui->virtio_net_hdr_sz;
446  buffer_map_addr = desc_table[desc_index].addr;
447  buffer_len = desc_table[desc_index].len;
448 
449  /* Get a header from the header array */
450  virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
451  tx_headers_len++;
452  hdr->hdr.flags = 0;
453  hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
454  hdr->num_buffers = 1;
455 
456  or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD);
457 
458  /* Guest supports csum offload and buffer requires checksum offload? */
459  if (or_flags &&
460  (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
461  vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
462 
463  /* Prepare a copy order executed later for the header */
464  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
465  vhost_copy_t *cpy = &cpu->copy[copy_len];
466  copy_len++;
467  cpy->len = vui->virtio_net_hdr_sz;
468  cpy->dst = buffer_map_addr;
469  cpy->src = (uword) hdr;
470 
471  buffer_map_addr += vui->virtio_net_hdr_sz;
472  buffer_len -= vui->virtio_net_hdr_sz;
473  bytes_left = b0->current_length;
474  current_b0 = b0;
475  while (1)
476  {
477  if (buffer_len == 0)
478  {
479  /* Get new output */
480  if (chained)
481  {
482  /*
483  * Next one is chained
484  * Test it with both indirect and mrg_rxbuf off
485  */
486  if (PREDICT_FALSE (!(desc_table[desc_index].flags &
488  {
489  /*
490  * Last descriptor in chain.
491  * Dequeue queued descriptors for this packet
492  */
494  &n_descs_processed);
495  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
496  goto done;
497  }
499  desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
500  n_descs_processed++;
501  buffer_map_addr = desc_table[desc_index].addr;
502  buffer_len = desc_table[desc_index].len;
503  total_desc_len += desc_len;
504  desc_len = 0;
505  }
506  else if (indirect)
507  {
508  /*
509  * Indirect table
510  * Test it with mrg_rxnuf off
511  */
512  if (PREDICT_TRUE (n_entries > 0))
513  n_entries--;
514  else
515  {
516  /* Dequeue queued descriptors for this packet */
518  &n_descs_processed);
519  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
520  goto done;
521  }
522  total_desc_len += desc_len;
523  desc_index = (desc_index + 1) & rxvq->qsz_mask;
524  buffer_map_addr = desc_table[desc_index].addr;
525  buffer_len = desc_table[desc_index].len;
526  desc_len = 0;
527  }
528  else if (vui->virtio_net_hdr_sz == 12)
529  {
530  /*
531  * MRG is available
532  * This is the default setting for the guest VM
533  */
534  virtio_net_hdr_mrg_rxbuf_t *hdr =
535  &cpu->tx_headers[tx_headers_len - 1];
536 
537  desc_table[desc_index].len = desc_len;
539  desc_head = desc_index =
540  rxvq->last_avail_idx & rxvq->qsz_mask;
541  hdr->num_buffers++;
542  n_descs_processed++;
543  desc_len = 0;
544 
546  (rxvq, desc_index)))
547  {
548  /* Dequeue queued descriptors for this packet */
549  vhost_user_dequeue_descs (rxvq, hdr,
550  &n_descs_processed);
551  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
552  goto done;
553  }
554 
555  buffer_map_addr = desc_table[desc_index].addr;
556  buffer_len = desc_table[desc_index].len;
557  }
558  else
559  {
560  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
561  goto done;
562  }
563  }
564 
565  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
566  vhost_copy_t *cpy = &cpu->copy[copy_len];
567  copy_len++;
568  cpy->len = bytes_left;
569  cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
570  cpy->dst = buffer_map_addr;
571  cpy->src = (uword) vlib_buffer_get_current (current_b0) +
572  current_b0->current_length - bytes_left;
573 
574  bytes_left -= cpy->len;
575  buffer_len -= cpy->len;
576  buffer_map_addr += cpy->len;
577  desc_len += cpy->len;
578 
580 
581  /* Check if vlib buffer has more data. If not, get more or break */
582  if (PREDICT_TRUE (!bytes_left))
583  {
584  if (PREDICT_FALSE
585  (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
586  {
587  current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
588  bytes_left = current_b0->current_length;
589  }
590  else
591  {
592  /* End of packet */
593  break;
594  }
595  }
596  }
597 
598  /* Move from available to used ring */
599  total_desc_len += desc_len;
600  rxvq->packed_desc[desc_head].len = total_desc_len;
601 
602  vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
603  n_descs_processed++;
604 
605  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
606  cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
607 
608  n_left--;
609 
610  /*
611  * Do the copy periodically to prevent
612  * cpu->copy array overflow and corrupt memory
613  */
614  if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
615  {
616  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
617  &map_hint)))
618  vlib_error_count (vm, node->node_index,
619  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
620  copy_len = 0;
621 
622  /* give buffers back to driver */
623  vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
624  chained, frame, n_left);
625  }
626 
627  buffers++;
628  }
629 
630 done:
631  if (PREDICT_TRUE (copy_len))
632  {
633  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
634  &map_hint)))
635  vlib_error_count (vm, node->node_index,
636  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
637 
638  vhost_user_mark_desc_available (vm, vui, rxvq, &n_descs_processed,
639  chained, frame, n_left);
640  }
641 
642  /*
643  * When n_left is set, error is always set to something too.
644  * In case error is due to lack of remaining buffers, we go back up and
645  * retry.
646  * The idea is that it is better to waste some time on packets
647  * that have been processed already than dropping them and get
648  * more fresh packets with a good likelyhood that they will be dropped too.
649  * This technique also gives more time to VM driver to pick-up packets.
650  * In case the traffic flows from physical to virtual interfaces, this
651  * technique will end-up leveraging the physical NIC buffer in order to
652  * absorb the VM's CPU jitter.
653  */
654  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
655  {
656  retry--;
657  goto retry;
658  }
659 
661 
662  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
663  {
664  vlib_error_count (vm, node->node_index, error, n_left);
668  }
669 
671  return frame->n_vectors;
672 }
673 
677 {
678  u32 *buffers = vlib_frame_vector_args (frame);
679  u32 n_left = frame->n_vectors;
681  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
682  vhost_user_intf_t *vui =
684  u32 qid;
685  vhost_user_vring_t *rxvq;
686  u8 error;
688  vhost_cpu_t *cpu = &vum->cpus[thread_index];
689  u32 map_hint = 0;
690  u8 retry = 8;
691  u16 copy_len;
692  u16 tx_headers_len;
693  u32 or_flags;
695 
696  if (PREDICT_FALSE (!vui->admin_up))
697  {
698  error = VHOST_USER_TX_FUNC_ERROR_DOWN;
699  goto done3;
700  }
701 
702  if (PREDICT_FALSE (!vui->is_ready))
703  {
704  error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
705  goto done3;
706  }
707 
708  qid = VHOST_VRING_IDX_RX (tf->queue_id);
709  rxvq = &vui->vrings[qid];
710  ASSERT (tf->queue_id == rxvq->qid);
711 
712  if (PREDICT_FALSE (rxvq->avail == 0))
713  {
714  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
715  goto done3;
716  }
717  if (tf->shared_queue)
719 
721  return (vhost_user_device_class_packed (vm, node, frame, vui, rxvq));
722 
723 retry:
724  error = VHOST_USER_TX_FUNC_ERROR_NONE;
725  tx_headers_len = 0;
726  copy_len = 0;
727  while (n_left > 0)
728  {
729  vlib_buffer_t *b0, *current_b0;
730  u16 desc_head, desc_index, desc_len;
731  vring_desc_t *desc_table;
732  uword buffer_map_addr;
733  u32 buffer_len;
734  u16 bytes_left;
735 
736  if (PREDICT_TRUE (n_left > 1))
737  vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
738 
739  b0 = vlib_get_buffer (vm, buffers[0]);
740 
741  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
742  {
743  cpu->current_trace = vlib_add_trace (vm, node, b0,
744  sizeof (*cpu->current_trace));
745  vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
746  }
747 
748  if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
749  {
750  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
751  goto done;
752  }
753 
754  desc_table = rxvq->desc;
755  desc_head = desc_index =
756  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
757 
758  /* Go deeper in case of indirect descriptor
759  * I don't know of any driver providing indirect for RX. */
760  if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
761  {
762  if (PREDICT_FALSE
763  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
764  {
765  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
766  goto done;
767  }
768  if (PREDICT_FALSE
769  (!(desc_table =
770  map_guest_mem (vui, rxvq->desc[desc_index].addr,
771  &map_hint))))
772  {
773  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
774  goto done;
775  }
776  desc_index = 0;
777  }
778 
779  desc_len = vui->virtio_net_hdr_sz;
780  buffer_map_addr = desc_table[desc_index].addr;
781  buffer_len = desc_table[desc_index].len;
782 
783  {
784  // Get a header from the header array
785  virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
786  tx_headers_len++;
787  hdr->hdr.flags = 0;
788  hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
789  hdr->num_buffers = 1; //This is local, no need to check
790 
791  or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD);
792 
793  /* Guest supports csum offload and buffer requires checksum offload? */
794  if (or_flags
795  && (vui->features & VIRTIO_FEATURE (VIRTIO_NET_F_GUEST_CSUM)))
796  vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
797 
798  // Prepare a copy order executed later for the header
799  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
800  vhost_copy_t *cpy = &cpu->copy[copy_len];
801  copy_len++;
802  cpy->len = vui->virtio_net_hdr_sz;
803  cpy->dst = buffer_map_addr;
804  cpy->src = (uword) hdr;
805  }
806 
807  buffer_map_addr += vui->virtio_net_hdr_sz;
808  buffer_len -= vui->virtio_net_hdr_sz;
809  bytes_left = b0->current_length;
810  current_b0 = b0;
811  while (1)
812  {
813  if (buffer_len == 0)
814  { //Get new output
815  if (desc_table[desc_index].flags & VRING_DESC_F_NEXT)
816  {
817  //Next one is chained
818  desc_index = desc_table[desc_index].next;
819  buffer_map_addr = desc_table[desc_index].addr;
820  buffer_len = desc_table[desc_index].len;
821  }
822  else if (vui->virtio_net_hdr_sz == 12) //MRG is available
823  {
824  virtio_net_hdr_mrg_rxbuf_t *hdr =
825  &cpu->tx_headers[tx_headers_len - 1];
826 
827  //Move from available to used buffer
828  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
829  desc_head;
830  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
831  desc_len;
832  vhost_user_log_dirty_ring (vui, rxvq,
833  ring[rxvq->last_used_idx &
834  rxvq->qsz_mask]);
835 
836  rxvq->last_avail_idx++;
837  rxvq->last_used_idx++;
838  hdr->num_buffers++;
839  desc_len = 0;
840 
841  if (PREDICT_FALSE
842  (rxvq->last_avail_idx == rxvq->avail->idx))
843  {
844  //Dequeue queued descriptors for this packet
845  rxvq->last_used_idx -= hdr->num_buffers - 1;
846  rxvq->last_avail_idx -= hdr->num_buffers - 1;
847  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
848  goto done;
849  }
850 
851  desc_table = rxvq->desc;
852  desc_head = desc_index =
853  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
854  if (PREDICT_FALSE
855  (rxvq->desc[desc_head].flags & VRING_DESC_F_INDIRECT))
856  {
857  //It is seriously unlikely that a driver will put indirect descriptor
858  //after non-indirect descriptor.
859  if (PREDICT_FALSE
860  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
861  {
862  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
863  goto done;
864  }
865  if (PREDICT_FALSE
866  (!(desc_table =
867  map_guest_mem (vui,
868  rxvq->desc[desc_index].addr,
869  &map_hint))))
870  {
871  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
872  goto done;
873  }
874  desc_index = 0;
875  }
876  buffer_map_addr = desc_table[desc_index].addr;
877  buffer_len = desc_table[desc_index].len;
878  }
879  else
880  {
881  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
882  goto done;
883  }
884  }
885 
886  {
887  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
888  vhost_copy_t *cpy = &cpu->copy[copy_len];
889  copy_len++;
890  cpy->len = bytes_left;
891  cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
892  cpy->dst = buffer_map_addr;
893  cpy->src = (uword) vlib_buffer_get_current (current_b0) +
894  current_b0->current_length - bytes_left;
895 
896  bytes_left -= cpy->len;
897  buffer_len -= cpy->len;
898  buffer_map_addr += cpy->len;
899  desc_len += cpy->len;
900 
901  clib_prefetch_load (&rxvq->desc);
902  }
903 
904  // Check if vlib buffer has more data. If not, get more or break.
905  if (PREDICT_TRUE (!bytes_left))
906  {
907  if (PREDICT_FALSE
908  (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
909  {
910  current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
911  bytes_left = current_b0->current_length;
912  }
913  else
914  {
915  //End of packet
916  break;
917  }
918  }
919  }
920 
921  //Move from available to used ring
922  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
923  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
924  vhost_user_log_dirty_ring (vui, rxvq,
925  ring[rxvq->last_used_idx & rxvq->qsz_mask]);
926  rxvq->last_avail_idx++;
927  rxvq->last_used_idx++;
928 
929  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
930  {
931  cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
932  }
933 
934  n_left--; //At the end for error counting when 'goto done' is invoked
935 
936  /*
937  * Do the copy periodically to prevent
938  * cpu->copy array overflow and corrupt memory
939  */
941  {
942  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
943  &map_hint)))
944  {
945  vlib_error_count (vm, node->node_index,
946  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
947  }
948  copy_len = 0;
949 
950  /* give buffers back to driver */
952  rxvq->used->idx = rxvq->last_used_idx;
953  vhost_user_log_dirty_ring (vui, rxvq, idx);
954  }
955  buffers++;
956  }
957 
958 done:
959  //Do the memory copies
960  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
961  &map_hint)))
962  {
963  vlib_error_count (vm, node->node_index,
964  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
965  }
966 
968  rxvq->used->idx = rxvq->last_used_idx;
969  vhost_user_log_dirty_ring (vui, rxvq, idx);
970 
971  /*
972  * When n_left is set, error is always set to something too.
973  * In case error is due to lack of remaining buffers, we go back up and
974  * retry.
975  * The idea is that it is better to waste some time on packets
976  * that have been processed already than dropping them and get
977  * more fresh packets with a good likelihood that they will be dropped too.
978  * This technique also gives more time to VM driver to pick-up packets.
979  * In case the traffic flows from physical to virtual interfaces, this
980  * technique will end-up leveraging the physical NIC buffer in order to
981  * absorb the VM's CPU jitter.
982  */
983  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
984  {
985  retry--;
986  goto retry;
987  }
988 
989  /* interrupt (call) handling */
990  if ((rxvq->callfd_idx != ~0) &&
992  {
993  rxvq->n_since_last_int += frame->n_vectors - n_left;
994 
995  if (rxvq->n_since_last_int > vum->coalesce_frames)
996  vhost_user_send_call (vm, vui, rxvq);
997  }
998 
1000 
1001 done3:
1002  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
1003  {
1004  vlib_error_count (vm, node->node_index, error, n_left);
1008  thread_index, vui->sw_if_index, n_left);
1009  }
1010 
1012  return frame->n_vectors;
1013 }
1014 
1015 static __clib_unused clib_error_t *
1018 {
1019  vlib_main_t *vm = vnm->vlib_main;
1020  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1022  vhost_user_intf_t *vui =
1024  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1025  vhost_cpu_t *cpu;
1026 
1027  if (mode == txvq->mode)
1028  return 0;
1029 
1030  if ((mode != VNET_HW_IF_RX_MODE_POLLING) &&
1033  {
1034  vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
1035  hw_if_index, qid);
1036  return clib_error_return (0, "unsupported");
1037  }
1038 
1039  if (txvq->thread_index == ~0)
1040  return clib_error_return (0, "Queue initialization is not finished yet");
1041 
1042  cpu = vec_elt_at_index (vum->cpus, txvq->thread_index);
1045  {
1046  if (txvq->kickfd_idx == ~0)
1047  {
1048  // We cannot support interrupt mode if the driver opts out
1049  return clib_error_return (0, "Driver does not support interrupt");
1050  }
1051  if (txvq->mode == VNET_HW_IF_RX_MODE_POLLING)
1052  {
1053  ASSERT (cpu->polling_q_count != 0);
1054  if (cpu->polling_q_count)
1055  cpu->polling_q_count--;
1056  vum->ifq_count++;
1057  // Start the timer if this is the first encounter on interrupt
1058  // interface/queue
1059  if ((vum->ifq_count == 1) &&
1060  ((vum->coalesce_time > 0.0) || (vum->coalesce_frames > 0)))
1064  }
1065  }
1066  else if (mode == VNET_HW_IF_RX_MODE_POLLING)
1067  {
1068  if (((txvq->mode == VNET_HW_IF_RX_MODE_INTERRUPT) ||
1069  (txvq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)) && vum->ifq_count)
1070  {
1071  cpu->polling_q_count++;
1072  vum->ifq_count--;
1073  // Stop the timer if there is no more interrupt interface/queue
1074  if (vum->ifq_count == 0)
1078  }
1079  }
1080 
1081  txvq->mode = mode;
1082  vhost_user_set_operation_mode (vui, txvq);
1083 
1084  return 0;
1085 }
1086 
1087 static __clib_unused clib_error_t *
1089  u32 flags)
1090 {
1091  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1093  vhost_user_intf_t *vui =
1095  u8 link_old, link_new;
1096 
1097  link_old = vui_is_link_up (vui);
1098 
1100 
1101  link_new = vui_is_link_up (vui);
1102 
1103  if (link_old != link_new)
1104  vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
1106 
1107  return /* no error */ 0;
1108 }
1109 
1110 /* *INDENT-OFF* */
1112  .name = "vhost-user",
1113  .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
1114  .tx_function_error_strings = vhost_user_tx_func_error_strings,
1115  .format_device_name = format_vhost_user_interface_name,
1116  .name_renumber = vhost_user_name_renumber,
1117  .admin_up_down_function = vhost_user_interface_admin_up_down,
1118  .rx_mode_change_function = vhost_user_interface_rx_mode_change,
1119  .format_tx_trace = format_vhost_trace,
1120 };
1121 
1122 /* *INDENT-ON* */
1123 
1124 /*
1125  * fd.io coding-style-patch-verification: ON
1126  *
1127  * Local Variables:
1128  * eval: (c-set-style "gnu")
1129  * End:
1130  */
vhost_trace_t::qid
u16 qid
Definition: vhost_user.h:305
vlib.h
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
vhost_user_send_call
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *vq)
Definition: vhost_user_inline.h:364
vlib_buffer_free
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:979
vhost_user_vring_t
Definition: vhost_user.h:180
vhost_user_vring_t::mode
u32 mode
Definition: vhost_user.h:219
VIRTIO_NET_HDR_GSO_NONE
#define VIRTIO_NET_HDR_GSO_NONE
Definition: virtio_std.h:143
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
vhost_user_inline.h
vui_is_link_up
static_always_inline u8 vui_is_link_up(vhost_user_intf_t *vui)
Definition: vhost_user_inline.h:379
VNET_HW_IF_RX_MODE_ADAPTIVE
@ VNET_HW_IF_RX_MODE_ADAPTIVE
Definition: interface.h:58
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vnet_device_class_t
struct _vnet_device_class vnet_device_class_t
vhost_user_device_class
vnet_device_class_t vhost_user_device_class
vhost_user_log_dirty_pages_2
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
Definition: vhost_user_inline.h:175
ip4
vl_api_ip4_address_t ip4
Definition: one.api:376
vhost_user_intf_t::features
u64 features
Definition: vhost_user.h:253
vhost_user_intf_t::hw_if_index
u32 hw_if_index
Definition: vhost_user.h:250
vhost_user_main_t::ifq_count
u32 ifq_count
Definition: vhost_user.h:349
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
pool_elt_at_index
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:549
vring_avail_t::ring
u16 ring[0]
Definition: virtio_std.h:105
vhost_user_main_t
Definition: vhost_user.h:332
generic_header_offset_t::l4_hdr_offset
i16 l4_hdr_offset
Definition: hdr_offset_parser.h:74
tcp_header_t
struct _tcp_header tcp_header_t
VNET_DEVICE_CLASS
VNET_DEVICE_CLASS(vhost_user_device_class)
vhost_copy_t::dst
uword dst
Definition: vhost_user.h:298
vhost_user_interface_rx_mode_change
static __clib_unused clib_error_t * vhost_user_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
Definition: vhost_user_output.c:1016
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
vnet_buffer_oflags_t
vnet_buffer_oflags_t
Definition: buffer.h:119
clib_error_return
#define clib_error_return(e, args...)
Definition: error.h:99
format_vhost_user_interface_name
static __clib_unused u8 * format_vhost_user_interface_name(u8 *s, va_list *args)
Definition: vhost_user_output.c:85
vhost_user_vring_t::thread_index
u32 thread_index
Definition: vhost_user.h:234
vnet_hw_if_tx_frame_t::shared_queue
u8 shared_queue
Definition: interface.h:624
vhost_user_device_class_packed
static_always_inline uword vhost_user_device_class_packed(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, vhost_user_intf_t *vui, vhost_user_vring_t *rxvq)
Definition: vhost_user_output.c:362
u16
unsigned short u16
Definition: types.h:57
vring_desc_t::addr
u64 addr
Definition: virtio_std.h:95
vhost_trace_t::device_index
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:306
mode
vl_api_tunnel_mode_t mode
Definition: gre.api:48
VHOST_USER_TX_COPY_THRESHOLD
#define VHOST_USER_TX_COPY_THRESHOLD
Definition: vhost_user_output.c:57
VNET_SW_INTERFACE_FLAG_ADMIN_UP
@ VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:844
VNET_HW_IF_RX_MODE_POLLING
@ VNET_HW_IF_RX_MODE_POLLING
Definition: interface.h:56
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vhost_cpu_t::copy
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
Definition: vhost_user.h:321
vnet_hw_if_tx_frame_t::queue_id
u32 queue_id
Definition: interface.h:626
vhost_user_advance_last_avail_table_idx
static_always_inline void vhost_user_advance_last_avail_table_idx(vhost_user_intf_t *vui, vhost_user_vring_t *vring, u8 chained)
Definition: vhost_user_inline.h:422
VRING_DESC_F_USED
#define VRING_DESC_F_USED
Definition: virtio_std.h:76
VNET_HW_INTERFACE_FLAG_LINK_UP
@ VNET_HW_INTERFACE_FLAG_LINK_UP
Definition: interface.h:509
VIRTIO_FEATURE
#define VIRTIO_FEATURE(X)
Definition: virtio_std.h:67
vhost_user_main_t::coalesce_time
f64 coalesce_time
Definition: vhost_user.h:339
vnet_interface_main_t::sw_if_counters
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:1023
hi
vl_api_ip4_address_t hi
Definition: arp.api:37
vnet_buffer2
#define vnet_buffer2(b)
Definition: buffer.h:505
vring_used_elem_t::id
u32 id
Definition: virtio_std.h:111
vnet_hw_interface_t::dev_instance
u32 dev_instance
Definition: interface.h:660
addr
vhost_vring_addr_t addr
Definition: vhost_user.h:130
vhost_user_vring_t::avail_event
vring_desc_event_t * avail_event
Definition: vhost_user.h:195
vlib_error_count
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
vlib_frame_t
Definition: node.h:372
vhost_user_log_dirty_ring
#define vhost_user_log_dirty_ring(vui, vq, member)
Definition: vhost_user_inline.h:203
vlib_process_signal_event
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1019
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
vhost_user_tx_func_error_strings
static __clib_unused char * vhost_user_tx_func_error_strings[]
Definition: vhost_user_output.c:78
vhost_cpu_t
Definition: vhost_user.h:315
udp_header_t
Definition: udp_packet.h:45
ip4_header_t
Definition: ip4_packet.h:87
vhost_user_packed_desc_available
static_always_inline u8 vhost_user_packed_desc_available(vhost_user_vring_t *vring, u16 idx)
Definition: vhost_user_inline.h:404
ethernet.h
VNET_DEVICE_CLASS_TX_FN
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:317
error
Definition: cJSON.c:88
VHOST_USER_TX_FUNC_N_ERROR
@ VHOST_USER_TX_FUNC_N_ERROR
Definition: vhost_user_output.c:75
VHOST_VRING_IDX_RX
#define VHOST_VRING_IDX_RX(qid)
Definition: vhost_user.h:33
vhost_user_intf_t::sw_if_index
u32 sw_if_index
Definition: vhost_user.h:250
vhost_copy_t
Definition: vhost_user.h:296
vhost_user_is_packed_ring_supported
static_always_inline u64 vhost_user_is_packed_ring_supported(vhost_user_intf_t *vui)
Definition: vhost_user_inline.h:252
vlib_increment_simple_counter
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:74
VIRTIO_NET_HDR_GSO_TCPV4
#define VIRTIO_NET_HDR_GSO_TCPV4
Definition: virtio_std.h:144
vhost_user_tx_func_error_t
vhost_user_tx_func_error_t
Definition: vhost_user_output.c:70
vhost_user_advance_last_used_idx
static_always_inline void vhost_user_advance_last_used_idx(vhost_user_vring_t *vring)
Definition: vhost_user_inline.h:475
vhost_user_vring_t::qid
i16 qid
Definition: vhost_user.h:227
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
len
u8 len
Definition: ip_types.api:103
vhost_user_handle_tx_offload
static_always_inline void vhost_user_handle_tx_offload(vhost_user_intf_t *vui, vlib_buffer_t *b, virtio_net_hdr_t *hdr)
Definition: vhost_user_output.c:204
vnet_interface_output_runtime_t::dev_instance
u32 dev_instance
Definition: interface_funcs.h:479
vhost_user_tx_trace
static_always_inline void vhost_user_tx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
Definition: vhost_user_output.c:122
feature.h
vring_desc_t::flags
u16 flags
Definition: virtio_std.h:97
vring_desc_t
Definition: virtio_std.h:93
VHOST_USER_EVENT_STOP_TIMER
#define VHOST_USER_EVENT_STOP_TIMER
Definition: vhost_user.h:238
vring_avail_t::flags
u16 flags
Definition: virtio_std.h:103
VRING_DESC_F_AVAIL
#define VRING_DESC_F_AVAIL
Definition: virtio_std.h:75
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
vhost_trace_t::virtio_ring_flags
u32 virtio_ring_flags
The device index.
Definition: vhost_user.h:307
vnet_get_hw_interface
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface_funcs.h:44
vhost_trace_t::hdr
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:309
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
vhost_user_interface_admin_up_down
static __clib_unused clib_error_t * vhost_user_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: vhost_user_output.c:1088
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
clib_spinlock_lock
static_always_inline void clib_spinlock_lock(clib_spinlock_t *p)
Definition: lock.h:82
vnet_hw_if_rx_mode
vnet_hw_if_rx_mode
Definition: interface.h:53
static_always_inline
#define static_always_inline
Definition: clib.h:112
VRING_DESC_F_INDIRECT
#define VRING_DESC_F_INDIRECT
Definition: virtio_std.h:73
vlib_prefetch_buffer_with_index
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:507
vu_log_err
#define vu_log_err(dev, f,...)
Definition: vhost_user.h:58
uword
u64 uword
Definition: types.h:112
VIRTIO_NET_HDR_GSO_UDP
#define VIRTIO_NET_HDR_GSO_UDP
Definition: virtio_std.h:145
vhost_copy_t::src
uword src
Definition: vhost_user.h:299
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
vhost_cpu_t::current_trace
vhost_trace_t * current_trace
Definition: vhost_user.h:325
VNET_HW_IF_RX_MODE_INTERRUPT
@ VNET_HW_IF_RX_MODE_INTERRUPT
Definition: interface.h:57
VRING_DESC_F_NEXT
#define VRING_DESC_F_NEXT
Definition: virtio_std.h:71
vhost_user_vring_t::last_used_idx
u16 last_used_idx
Definition: vhost_user.h:185
vhost_user_main_t::coalesce_frames
u32 coalesce_frames
Definition: vhost_user.h:338
vhost_user_vring_t::packed_desc
vring_packed_desc_t * packed_desc
Definition: vhost_user.h:190
vring_desc_t::len
u32 len
Definition: virtio_std.h:96
vhost_user_intf_t::vrings
vhost_user_vring_t * vrings
Definition: vhost_user.h:266
vnet_main_t::vlib_main
vlib_main_t * vlib_main
Definition: vnet.h:111
vhost_user_vring_t::last_avail_idx
u16 last_avail_idx
Definition: vhost_user.h:184
src
vl_api_address_t src
Definition: gre.api:54
VRING_AVAIL_F_NO_INTERRUPT
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: virtio_std.h:91
CLIB_MEMORY_BARRIER
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:137
vhost_cpu_t::polling_q_count
u32 polling_q_count
Definition: vhost_user.h:329
VHOST_VRING_IDX_TX
#define VHOST_VRING_IDX_TX(qid)
Definition: vhost_user.h:34
vhost_user_tx_copy
static_always_inline u32 vhost_user_tx_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
Definition: vhost_user_output.c:157
vhost_user_vring_t::callfd_idx
u32 callfd_idx
Definition: vhost_user.h:214
vhost_user_main_t::cpus
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
Definition: vhost_user.h:343
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_frame_scalar_args
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
vhost_user_dequeue_descs
static_always_inline void vhost_user_dequeue_descs(vhost_user_vring_t *rxvq, virtio_net_hdr_mrg_rxbuf_t *hdr, u16 *n_descs_processed)
Definition: vhost_user_inline.h:452
vhost_user_name_renumber
static __clib_unused int vhost_user_name_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
Definition: vhost_user_output.c:102
vhost_user_vring_t::desc
vring_desc_t * desc
Definition: vhost_user.h:189
generic_header_offset_t::l3_hdr_offset
i16 l3_hdr_offset
Definition: hdr_offset_parser.h:73
vhost_user_vring_t::avail
vring_avail_t * avail
Definition: vhost_user.h:194
tf
vnet_hw_if_tx_frame_t * tf
Definition: interface_output.c:1091
VHOST_USER_COPY_ARRAY_N
#define VHOST_USER_COPY_ARRAY_N
Definition: vhost_user.h:313
vhost_user_dequeue_chained_descs
static_always_inline void vhost_user_dequeue_chained_descs(vhost_user_vring_t *rxvq, u16 *n_descs_processed)
Definition: vhost_user_inline.h:464
is_ip6
bool is_ip6
Definition: ip.api:43
vnet_hw_interface_t
Definition: interface.h:638
vnet_main_t
Definition: vnet.h:76
vhost_user_intf_t::admin_up
u32 admin_up
Definition: vhost_user.h:244
VHOST_USER_EVENT_START_TIMER
#define VHOST_USER_EVENT_START_TIMER
Definition: vhost_user.h:237
vhost_user_intf_t::virtio_net_hdr_sz
int virtio_net_hdr_sz
Definition: vhost_user.h:276
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
foreach_vhost_user_tx_func_error
#define foreach_vhost_user_tx_func_error
Definition: vhost_user_output.c:61
vhost_user_main_t::show_dev_instance_by_real_dev_instance
u32 * show_dev_instance_by_real_dev_instance
Definition: vhost_user.h:337
vec_validate_init_empty
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header,...
Definition: vec.h:570
vhost_user_main
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
u32
unsigned int u32
Definition: types.h:88
vhost_trace_t
Definition: vhost_user.h:303
VIRTIO_NET_HDR_GSO_TCPV6
#define VIRTIO_NET_HDR_GSO_TCPV6
Definition: virtio_std.h:146
clib_prefetch_load
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:92
dst
vl_api_ip4_address_t dst
Definition: pnat.api:41
vhost_user_mark_desc_available
static_always_inline void vhost_user_mark_desc_available(vlib_main_t *vm, vhost_user_intf_t *vui, vhost_user_vring_t *rxvq, u16 *n_descs_processed, u8 chained, vlib_frame_t *frame, u32 n_left)
Definition: vhost_user_output.c:265
generic_header_offset_t
Definition: hdr_offset_parser.h:65
vhost_user_vring_t::used_wrap_counter
u16 used_wrap_counter
Definition: vhost_user.h:229
n_left
u32 n_left
Definition: interface_output.c:1096
vring_used_t::idx
u16 idx
Definition: virtio_std.h:118
VIRTIO_NET_HDR_F_NEEDS_CSUM
#define VIRTIO_NET_HDR_F_NEEDS_CSUM
Definition: virtio_std.h:140
clib_spinlock_unlock
static_always_inline void clib_spinlock_unlock(clib_spinlock_t *p)
Definition: lock.h:121
vnet_main
vnet_main_t vnet_main
Definition: misc.c:43
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
vhost_user_intf_t
Definition: vhost_user.h:240
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
format_vhost_trace
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
Definition: vhost_user_inline.h:210
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
vnet_hw_interface_set_flags
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:513
vnet_generic_header_offset_parser
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
Definition: hdr_offset_parser.h:471
unix.h
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
ip4_header_checksum
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
vhost_user.h
vring_used_t::ring
vring_used_elem_t ring[0]
Definition: virtio_std.h:119
map_guest_mem
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
Definition: vhost_user_inline.h:21
vhost_user_vring_t::vring_lock
clib_spinlock_t vring_lock
Definition: vhost_user.h:209
i
int i
Definition: flowhash_template.h:376
vhost_user_vring_t::kickfd_idx
u32 kickfd_idx
Definition: vhost_user.h:215
vring_desc_t::next
u16 next
Definition: virtio_std.h:98
devices.h
vu_log_debug
#define vu_log_debug(dev, f,...)
Definition: vhost_user.h:45
vhost_user_main_t::vhost_user_interfaces
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:336
vhost_user_vring_t::used
vring_used_t * used
Definition: vhost_user.h:199
vlib_node_runtime_t
Definition: node.h:454
vring_used_elem_t::len
u32 len
Definition: virtio_std.h:112
vhost_trace_t::first_desc_len
u16 first_desc_len
Runtime queue flags
Definition: vhost_user.h:308
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vhost_user_vring_t::qsz_mask
u16 qsz_mask
Definition: vhost_user.h:183
vhost_user_set_operation_mode
void vhost_user_set_operation_mode(vhost_user_intf_t *vui, vhost_user_vring_t *txvq)
Definition: vhost_user.c:385
hdr_offset_parser.h
vring_avail_t::idx
u16 idx
Definition: virtio_std.h:104
vhost_user_send_interrupt_node
vlib_node_registration_t vhost_user_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (vhost_user_send_interrupt_node)
Definition: vhost_user.c:53
vhost_copy_t::len
u32 len
Definition: vhost_user.h:300
VNET_INTERFACE_COUNTER_DROP
@ VNET_INTERFACE_COUNTER_DROP
Definition: interface.h:904
vhost_user_advance_last_avail_idx
static_always_inline void vhost_user_advance_last_avail_idx(vhost_user_vring_t *vring)
Definition: vhost_user_inline.h:411
vnet_main_t::interface_main
vnet_interface_main_t interface_main
Definition: vnet.h:81
vnet_hw_if_tx_frame_t
Definition: interface.h:622
vhost_cpu_t::tx_headers
virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE]
Definition: vhost_user.h:320
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
vnet_interface_output_runtime_t
Definition: interface_funcs.h:475
vhost_user_tx_trace_packed
static_always_inline void vhost_user_tx_trace_packed(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
Definition: vhost_user_output.c:326
vhost_user_vring_t::n_since_last_int
u16 n_since_last_int
Definition: vhost_user.h:186
flags
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105