FD.io VPP  v20.05.1-6-gf53edbc3b
Vector Packet Processing
vhost_user_output.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * vhost-user-output
4  *
5  * Copyright (c) 2014-2018 Cisco and/or its affiliates.
6  * Licensed under the Apache License, Version 2.0 (the "License");
7  * you may not use this file except in compliance with the License.
8  * You may obtain a copy of the License at:
9  *
10  * http://www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an "AS IS" BASIS,
14  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  *------------------------------------------------------------------
18  */
19 
20 #include <stddef.h>
21 #include <fcntl.h> /* for open */
22 #include <sys/ioctl.h>
23 #include <sys/socket.h>
24 #include <sys/un.h>
25 #include <sys/stat.h>
26 #include <sys/types.h>
27 #include <sys/uio.h> /* for iovec */
28 #include <netinet/in.h>
29 #include <sys/vfs.h>
30 
31 #include <linux/if_arp.h>
32 #include <linux/if_tun.h>
33 
34 #include <vlib/vlib.h>
35 #include <vlib/unix/unix.h>
36 
37 #include <vnet/ip/ip.h>
38 
39 #include <vnet/ethernet/ethernet.h>
40 #include <vnet/devices/devices.h>
41 #include <vnet/feature/feature.h>
42 
46 
48 /*
49  * On the transmit side, we keep processing the buffers from vlib in the while
50  * loop and prepare the copy order to be executed later. However, the static
51  * array which we keep the copy order is limited to VHOST_USER_COPY_ARRAY_N
52  * entries. In order to not corrupt memory, we have to do the copy when the
53  * static array reaches the copy threshold. We subtract 40 in case the code
54  * goes into the inner loop for a maximum of 64k frames which may require
55  * more array entries. We subtract 200 because our default buffer size is
56  * 2048 and the default desc len is likely 1536. While it takes less than 40
57  * vlib buffers for the jumbo frame, it may take twice as much descriptors
58  * for the same jumbo frame. Use 200 for the extra head room.
59  */
60 #define VHOST_USER_TX_COPY_THRESHOLD (VHOST_USER_COPY_ARRAY_N - 200)
61 
63 
64 #define foreach_vhost_user_tx_func_error \
65  _(NONE, "no error") \
66  _(NOT_READY, "vhost vring not ready") \
67  _(DOWN, "vhost interface is down") \
68  _(PKT_DROP_NOBUF, "tx packet drops (no available descriptors)") \
69  _(PKT_DROP_NOMRG, "tx packet drops (cannot merge descriptors)") \
70  _(MMAP_FAIL, "mmap failure") \
71  _(INDIRECT_OVERFLOW, "indirect descriptor table overflow")
72 
73 typedef enum
74 {
75 #define _(f,s) VHOST_USER_TX_FUNC_ERROR_##f,
77 #undef _
80 
81 static __clib_unused char *vhost_user_tx_func_error_strings[] = {
82 #define _(n,s) s,
84 #undef _
85 };
86 
87 static __clib_unused u8 *
88 format_vhost_user_interface_name (u8 * s, va_list * args)
89 {
90  u32 i = va_arg (*args, u32);
91  u32 show_dev_instance = ~0;
93 
95  show_dev_instance = vum->show_dev_instance_by_real_dev_instance[i];
96 
97  if (show_dev_instance != ~0)
98  i = show_dev_instance;
99 
100  s = format (s, "VirtualEthernet0/0/%d", i);
101  return s;
102 }
103 
104 static __clib_unused int
106 {
107  // FIXME: check if the new dev instance is already used
110  hi->dev_instance);
111 
113  hi->dev_instance, ~0);
114 
116  new_dev_instance;
117 
118  vu_log_debug (vui, "renumbered vhost-user interface dev_instance %d to %d",
119  hi->dev_instance, new_dev_instance);
120 
121  return 0;
122 }
123 
124 /**
125  * @brief Try once to lock the vring
126  * @return 0 on success, non-zero on failure.
127  */
130 {
131  return clib_atomic_test_and_set (vui->vring_locks[qid]);
132 }
133 
134 /**
135  * @brief Spin until the vring is successfully locked
136  */
139 {
140  while (vhost_user_vring_try_lock (vui, qid))
141  ;
142 }
143 
144 /**
145  * @brief Unlock the vring lock
146  */
149 {
150  clib_atomic_release (vui->vring_locks[qid]);
151 }
152 
155  vhost_user_intf_t * vui, u16 qid,
156  vlib_buffer_t * b, vhost_user_vring_t * rxvq)
157 {
159  u32 last_avail_idx = rxvq->last_avail_idx;
160  u32 desc_current = rxvq->avail->ring[last_avail_idx & rxvq->qsz_mask];
161  vring_desc_t *hdr_desc = 0;
162  u32 hint = 0;
163 
164  clib_memset (t, 0, sizeof (*t));
165  t->device_index = vui - vum->vhost_user_interfaces;
166  t->qid = qid;
167 
168  hdr_desc = &rxvq->desc[desc_current];
169  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
170  {
171  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
172  /* Header is the first here */
173  hdr_desc = map_guest_mem (vui, rxvq->desc[desc_current].addr, &hint);
174  }
175  if (rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
176  {
177  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
178  }
179  if (!(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
180  !(rxvq->desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
181  {
182  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
183  }
184 
185  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
186 }
187 
190  u16 copy_len, u32 * map_hint)
191 {
192  void *dst0, *dst1, *dst2, *dst3;
193  if (PREDICT_TRUE (copy_len >= 4))
194  {
195  if (PREDICT_FALSE (!(dst2 = map_guest_mem (vui, cpy[0].dst, map_hint))))
196  return 1;
197  if (PREDICT_FALSE (!(dst3 = map_guest_mem (vui, cpy[1].dst, map_hint))))
198  return 1;
199  while (PREDICT_TRUE (copy_len >= 4))
200  {
201  dst0 = dst2;
202  dst1 = dst3;
203 
204  if (PREDICT_FALSE
205  (!(dst2 = map_guest_mem (vui, cpy[2].dst, map_hint))))
206  return 1;
207  if (PREDICT_FALSE
208  (!(dst3 = map_guest_mem (vui, cpy[3].dst, map_hint))))
209  return 1;
210 
211  CLIB_PREFETCH ((void *) cpy[2].src, 64, LOAD);
212  CLIB_PREFETCH ((void *) cpy[3].src, 64, LOAD);
213 
214  clib_memcpy_fast (dst0, (void *) cpy[0].src, cpy[0].len);
215  clib_memcpy_fast (dst1, (void *) cpy[1].src, cpy[1].len);
216 
217  vhost_user_log_dirty_pages_2 (vui, cpy[0].dst, cpy[0].len, 1);
218  vhost_user_log_dirty_pages_2 (vui, cpy[1].dst, cpy[1].len, 1);
219  copy_len -= 2;
220  cpy += 2;
221  }
222  }
223  while (copy_len)
224  {
225  if (PREDICT_FALSE (!(dst0 = map_guest_mem (vui, cpy->dst, map_hint))))
226  return 1;
227  clib_memcpy_fast (dst0, (void *) cpy->src, cpy->len);
228  vhost_user_log_dirty_pages_2 (vui, cpy->dst, cpy->len, 1);
229  copy_len -= 1;
230  cpy += 1;
231  }
232  return 0;
233 }
234 
237  virtio_net_hdr_t * hdr)
238 {
239  generic_header_offset_t gho = { 0 };
240  int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
241  int is_ip6 = b->flags & VNET_BUFFER_F_IS_IP6;
242 
243  ASSERT (!(is_ip4 && is_ip6));
244  vnet_generic_header_offset_parser (b, &gho, 1 /* l2 */ , is_ip4, is_ip6);
245  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
246  {
247  ip4_header_t *ip4;
248 
249  ip4 =
251  ip4->checksum = ip4_header_checksum (ip4);
252  }
253 
254  /* checksum offload */
255  if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
256  {
257  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
258  hdr->csum_start = gho.l4_hdr_offset;
259  hdr->csum_offset = offsetof (udp_header_t, checksum);
260  udp_header_t *udp =
262  udp->checksum = 0;
263  }
264  else if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
265  {
266  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
267  hdr->csum_start = gho.l4_hdr_offset;
268  hdr->csum_offset = offsetof (tcp_header_t, checksum);
269  tcp_header_t *tcp =
271  tcp->checksum = 0;
272  }
273 
274  /* GSO offload */
275  if (b->flags & VNET_BUFFER_F_GSO)
276  {
277  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
278  {
279  if (is_ip4 &&
280  (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO4)))
281  {
282  hdr->gso_size = vnet_buffer2 (b)->gso_size;
283  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
284  }
285  else if (is_ip6 &&
286  (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_TSO6)))
287  {
288  hdr->gso_size = vnet_buffer2 (b)->gso_size;
289  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
290  }
291  }
292  else if ((vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_UFO)) &&
293  (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
294  {
295  hdr->gso_size = vnet_buffer2 (b)->gso_size;
296  hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
297  }
298  }
299 }
300 
303  u16 * n_descs_processed, u8 chained,
304  vlib_frame_t * frame, u32 n_left)
305 {
306  u16 desc_idx, flags;
307  vring_packed_desc_t *desc_table = rxvq->packed_desc;
308  u16 last_used_idx = rxvq->last_used_idx;
309 
310  if (PREDICT_FALSE (*n_descs_processed == 0))
311  return;
312 
313  if (rxvq->used_wrap_counter)
314  flags = desc_table[last_used_idx & rxvq->qsz_mask].flags |
316  else
317  flags = desc_table[last_used_idx & rxvq->qsz_mask].flags &
319 
321 
322  for (desc_idx = 1; desc_idx < *n_descs_processed; desc_idx++)
323  {
324  if (rxvq->used_wrap_counter)
325  desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags |=
327  else
328  desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &=
331  }
332 
333  desc_table[last_used_idx & rxvq->qsz_mask].flags = flags;
334 
335  *n_descs_processed = 0;
336 
337  if (chained)
338  {
339  vring_packed_desc_t *desc_table = rxvq->packed_desc;
340 
341  while (desc_table[rxvq->last_used_idx & rxvq->qsz_mask].flags &
344 
345  /* Advance past the current chained table entries */
347  }
348 
349  /* interrupt (call) handling */
350  if ((rxvq->callfd_idx != ~0) &&
351  (rxvq->avail_event->flags != VRING_EVENT_F_DISABLE))
352  {
354 
355  rxvq->n_since_last_int += frame->n_vectors - n_left;
356  if (rxvq->n_since_last_int > vum->coalesce_frames)
357  vhost_user_send_call (vm, rxvq);
358  }
359 }
360 
363  u16 qid, vlib_buffer_t * b,
364  vhost_user_vring_t * rxvq)
365 {
367  u32 last_avail_idx = rxvq->last_avail_idx;
368  u32 desc_current = last_avail_idx & rxvq->qsz_mask;
369  vring_packed_desc_t *hdr_desc = 0;
370  u32 hint = 0;
371 
372  clib_memset (t, 0, sizeof (*t));
373  t->device_index = vui - vum->vhost_user_interfaces;
374  t->qid = qid;
375 
376  hdr_desc = &rxvq->packed_desc[desc_current];
377  if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT)
378  {
379  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_INDIRECT;
380  /* Header is the first here */
381  hdr_desc = map_guest_mem (vui, rxvq->packed_desc[desc_current].addr,
382  &hint);
383  }
384  if (rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT)
385  {
386  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SIMPLE_CHAINED;
387  }
388  if (!(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_NEXT) &&
389  !(rxvq->packed_desc[desc_current].flags & VIRTQ_DESC_F_INDIRECT))
390  {
391  t->virtio_ring_flags |= 1 << VIRTIO_TRACE_F_SINGLE_DESC;
392  }
393 
394  t->first_desc_len = hdr_desc ? hdr_desc->len : 0;
395 }
396 
400 {
401  u32 *buffers = vlib_frame_vector_args (frame);
402  u32 n_left = frame->n_vectors;
404  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
405  vhost_user_intf_t *vui =
407  u32 qid;
408  vhost_user_vring_t *rxvq;
409  u8 error;
410  u32 thread_index = vm->thread_index;
411  vhost_cpu_t *cpu = &vum->cpus[thread_index];
412  u32 map_hint = 0;
413  u8 retry = 8;
414  u16 copy_len;
415  u16 tx_headers_len;
416  vring_packed_desc_t *desc_table;
417  u32 or_flags;
418  u16 desc_head, desc_index, desc_len;
419  u16 n_descs_processed;
420  u8 indirect, chained;
421 
422  qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
423  thread_index));
424  rxvq = &vui->vrings[qid];
425 
426 retry:
427  error = VHOST_USER_TX_FUNC_ERROR_NONE;
428  tx_headers_len = 0;
429  copy_len = 0;
430  n_descs_processed = 0;
431 
432  while (n_left > 0)
433  {
434  vlib_buffer_t *b0, *current_b0;
435  uword buffer_map_addr;
436  u32 buffer_len;
437  u16 bytes_left;
438  u32 total_desc_len = 0;
439  u16 n_entries = 0;
440 
441  indirect = 0;
442  chained = 0;
443  if (PREDICT_TRUE (n_left > 1))
444  vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
445 
446  b0 = vlib_get_buffer (vm, buffers[0]);
447  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
448  {
449  cpu->current_trace = vlib_add_trace (vm, node, b0,
450  sizeof (*cpu->current_trace));
451  vhost_user_tx_trace_packed (cpu->current_trace, vui, qid / 2, b0,
452  rxvq);
453  }
454 
455  desc_table = rxvq->packed_desc;
456  desc_head = desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
457  if (PREDICT_FALSE (!vhost_user_packed_desc_available (rxvq, desc_head)))
458  {
459  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
460  goto done;
461  }
462  /*
463  * Go deeper in case of indirect descriptor.
464  * To test it, turn off mrg_rxbuf.
465  */
466  if (desc_table[desc_head].flags & VIRTQ_DESC_F_INDIRECT)
467  {
468  indirect = 1;
469  if (PREDICT_FALSE (desc_table[desc_head].len <
470  sizeof (vring_packed_desc_t)))
471  {
472  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
473  goto done;
474  }
475  n_entries = desc_table[desc_head].len >> 4;
476  desc_table = map_guest_mem (vui, desc_table[desc_index].addr,
477  &map_hint);
478  if (PREDICT_FALSE (desc_table == 0))
479  {
480  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
481  goto done;
482  }
483  desc_index = 0;
484  }
485  else if (rxvq->packed_desc[desc_head].flags & VIRTQ_DESC_F_NEXT)
486  chained = 1;
487 
488  desc_len = vui->virtio_net_hdr_sz;
489  buffer_map_addr = desc_table[desc_index].addr;
490  buffer_len = desc_table[desc_index].len;
491 
492  /* Get a header from the header array */
493  virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
494  tx_headers_len++;
495  hdr->hdr.flags = 0;
496  hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
497  hdr->num_buffers = 1;
498 
499  or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
500  (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
501  (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
502 
503  /* Guest supports csum offload and buffer requires checksum offload? */
504  if (or_flags &&
505  (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
506  vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
507 
508  /* Prepare a copy order executed later for the header */
509  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
510  vhost_copy_t *cpy = &cpu->copy[copy_len];
511  copy_len++;
512  cpy->len = vui->virtio_net_hdr_sz;
513  cpy->dst = buffer_map_addr;
514  cpy->src = (uword) hdr;
515 
516  buffer_map_addr += vui->virtio_net_hdr_sz;
517  buffer_len -= vui->virtio_net_hdr_sz;
518  bytes_left = b0->current_length;
519  current_b0 = b0;
520  while (1)
521  {
522  if (buffer_len == 0)
523  {
524  /* Get new output */
525  if (chained)
526  {
527  /*
528  * Next one is chained
529  * Test it with both indirect and mrg_rxbuf off
530  */
531  if (PREDICT_FALSE (!(desc_table[desc_index].flags &
532  VIRTQ_DESC_F_NEXT)))
533  {
534  /*
535  * Last descriptor in chain.
536  * Dequeue queued descriptors for this packet
537  */
539  &n_descs_processed);
540  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
541  goto done;
542  }
544  desc_index = rxvq->last_avail_idx & rxvq->qsz_mask;
545  n_descs_processed++;
546  buffer_map_addr = desc_table[desc_index].addr;
547  buffer_len = desc_table[desc_index].len;
548  total_desc_len += desc_len;
549  desc_len = 0;
550  }
551  else if (indirect)
552  {
553  /*
554  * Indirect table
555  * Test it with mrg_rxnuf off
556  */
557  if (PREDICT_TRUE (n_entries > 0))
558  n_entries--;
559  else
560  {
561  /* Dequeue queued descriptors for this packet */
563  &n_descs_processed);
564  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
565  goto done;
566  }
567  total_desc_len += desc_len;
568  desc_index = (desc_index + 1) & rxvq->qsz_mask;
569  buffer_map_addr = desc_table[desc_index].addr;
570  buffer_len = desc_table[desc_index].len;
571  desc_len = 0;
572  }
573  else if (vui->virtio_net_hdr_sz == 12)
574  {
575  /*
576  * MRG is available
577  * This is the default setting for the guest VM
578  */
579  virtio_net_hdr_mrg_rxbuf_t *hdr =
580  &cpu->tx_headers[tx_headers_len - 1];
581 
582  desc_table[desc_index].len = desc_len;
584  desc_head = desc_index =
585  rxvq->last_avail_idx & rxvq->qsz_mask;
586  hdr->num_buffers++;
587  n_descs_processed++;
588  desc_len = 0;
589 
591  (rxvq, desc_index)))
592  {
593  /* Dequeue queued descriptors for this packet */
594  vhost_user_dequeue_descs (rxvq, hdr,
595  &n_descs_processed);
596  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
597  goto done;
598  }
599 
600  buffer_map_addr = desc_table[desc_index].addr;
601  buffer_len = desc_table[desc_index].len;
602  }
603  else
604  {
605  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
606  goto done;
607  }
608  }
609 
610  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
611  vhost_copy_t *cpy = &cpu->copy[copy_len];
612  copy_len++;
613  cpy->len = bytes_left;
614  cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
615  cpy->dst = buffer_map_addr;
616  cpy->src = (uword) vlib_buffer_get_current (current_b0) +
617  current_b0->current_length - bytes_left;
618 
619  bytes_left -= cpy->len;
620  buffer_len -= cpy->len;
621  buffer_map_addr += cpy->len;
622  desc_len += cpy->len;
623 
625 
626  /* Check if vlib buffer has more data. If not, get more or break */
627  if (PREDICT_TRUE (!bytes_left))
628  {
629  if (PREDICT_FALSE
630  (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
631  {
632  current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
633  bytes_left = current_b0->current_length;
634  }
635  else
636  {
637  /* End of packet */
638  break;
639  }
640  }
641  }
642 
643  /* Move from available to used ring */
644  total_desc_len += desc_len;
645  rxvq->packed_desc[desc_head].len = total_desc_len;
646 
647  vhost_user_advance_last_avail_table_idx (vui, rxvq, chained);
648  n_descs_processed++;
649 
650  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
651  cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
652 
653  n_left--;
654 
655  /*
656  * Do the copy periodically to prevent
657  * cpu->copy array overflow and corrupt memory
658  */
659  if (PREDICT_FALSE (copy_len >= VHOST_USER_TX_COPY_THRESHOLD) || chained)
660  {
661  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
662  &map_hint)))
663  vlib_error_count (vm, node->node_index,
664  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
665  copy_len = 0;
666 
667  /* give buffers back to driver */
668  vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed,
669  chained, frame, n_left);
670  }
671 
672  buffers++;
673  }
674 
675 done:
676  if (PREDICT_TRUE (copy_len))
677  {
678  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
679  &map_hint)))
680  vlib_error_count (vm, node->node_index,
681  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
682 
683  vhost_user_mark_desc_available (vm, rxvq, &n_descs_processed, chained,
684  frame, n_left);
685  }
686 
687  /*
688  * When n_left is set, error is always set to something too.
689  * In case error is due to lack of remaining buffers, we go back up and
690  * retry.
691  * The idea is that it is better to waste some time on packets
692  * that have been processed already than dropping them and get
693  * more fresh packets with a good likelyhood that they will be dropped too.
694  * This technique also gives more time to VM driver to pick-up packets.
695  * In case the traffic flows from physical to virtual interfaces, this
696  * technique will end-up leveraging the physical NIC buffer in order to
697  * absorb the VM's CPU jitter.
698  */
699  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
700  {
701  retry--;
702  goto retry;
703  }
704 
705  vhost_user_vring_unlock (vui, qid);
706 
707  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
708  {
709  vlib_error_count (vm, node->node_index, error, n_left);
712  VNET_INTERFACE_COUNTER_DROP, thread_index, vui->sw_if_index, n_left);
713  }
714 
715  vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
716  return frame->n_vectors;
717 }
718 
722 {
723  u32 *buffers = vlib_frame_vector_args (frame);
724  u32 n_left = frame->n_vectors;
726  vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
727  vhost_user_intf_t *vui =
729  u32 qid = ~0;
730  vhost_user_vring_t *rxvq;
731  u8 error;
732  u32 thread_index = vm->thread_index;
733  vhost_cpu_t *cpu = &vum->cpus[thread_index];
734  u32 map_hint = 0;
735  u8 retry = 8;
736  u16 copy_len;
737  u16 tx_headers_len;
738  u32 or_flags;
739 
740  if (PREDICT_FALSE (!vui->admin_up))
741  {
742  error = VHOST_USER_TX_FUNC_ERROR_DOWN;
743  goto done3;
744  }
745 
746  if (PREDICT_FALSE (!vui->is_ready))
747  {
748  error = VHOST_USER_TX_FUNC_ERROR_NOT_READY;
749  goto done3;
750  }
751 
752  qid = VHOST_VRING_IDX_RX (*vec_elt_at_index (vui->per_cpu_tx_qid,
753  thread_index));
754  rxvq = &vui->vrings[qid];
755  if (PREDICT_FALSE (rxvq->avail == 0))
756  {
757  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
758  goto done3;
759  }
760 
761  if (PREDICT_FALSE (vui->use_tx_spinlock))
762  vhost_user_vring_lock (vui, qid);
763 
765  return (vhost_user_device_class_packed (vm, node, frame));
766 
767 retry:
768  error = VHOST_USER_TX_FUNC_ERROR_NONE;
769  tx_headers_len = 0;
770  copy_len = 0;
771  while (n_left > 0)
772  {
773  vlib_buffer_t *b0, *current_b0;
774  u16 desc_head, desc_index, desc_len;
775  vring_desc_t *desc_table;
776  uword buffer_map_addr;
777  u32 buffer_len;
778  u16 bytes_left;
779 
780  if (PREDICT_TRUE (n_left > 1))
781  vlib_prefetch_buffer_with_index (vm, buffers[1], LOAD);
782 
783  b0 = vlib_get_buffer (vm, buffers[0]);
784 
785  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
786  {
787  cpu->current_trace = vlib_add_trace (vm, node, b0,
788  sizeof (*cpu->current_trace));
789  vhost_user_tx_trace (cpu->current_trace, vui, qid / 2, b0, rxvq);
790  }
791 
792  if (PREDICT_FALSE (rxvq->last_avail_idx == rxvq->avail->idx))
793  {
794  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
795  goto done;
796  }
797 
798  desc_table = rxvq->desc;
799  desc_head = desc_index =
800  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
801 
802  /* Go deeper in case of indirect descriptor
803  * I don't know of any driver providing indirect for RX. */
804  if (PREDICT_FALSE (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
805  {
806  if (PREDICT_FALSE
807  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
808  {
809  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
810  goto done;
811  }
812  if (PREDICT_FALSE
813  (!(desc_table =
814  map_guest_mem (vui, rxvq->desc[desc_index].addr,
815  &map_hint))))
816  {
817  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
818  goto done;
819  }
820  desc_index = 0;
821  }
822 
823  desc_len = vui->virtio_net_hdr_sz;
824  buffer_map_addr = desc_table[desc_index].addr;
825  buffer_len = desc_table[desc_index].len;
826 
827  {
828  // Get a header from the header array
829  virtio_net_hdr_mrg_rxbuf_t *hdr = &cpu->tx_headers[tx_headers_len];
830  tx_headers_len++;
831  hdr->hdr.flags = 0;
832  hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE;
833  hdr->num_buffers = 1; //This is local, no need to check
834 
835  or_flags = (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM) ||
836  (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM) ||
837  (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM);
838 
839  /* Guest supports csum offload and buffer requires checksum offload? */
840  if (or_flags
841  && (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
842  vhost_user_handle_tx_offload (vui, b0, &hdr->hdr);
843 
844  // Prepare a copy order executed later for the header
845  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
846  vhost_copy_t *cpy = &cpu->copy[copy_len];
847  copy_len++;
848  cpy->len = vui->virtio_net_hdr_sz;
849  cpy->dst = buffer_map_addr;
850  cpy->src = (uword) hdr;
851  }
852 
853  buffer_map_addr += vui->virtio_net_hdr_sz;
854  buffer_len -= vui->virtio_net_hdr_sz;
855  bytes_left = b0->current_length;
856  current_b0 = b0;
857  while (1)
858  {
859  if (buffer_len == 0)
860  { //Get new output
861  if (desc_table[desc_index].flags & VIRTQ_DESC_F_NEXT)
862  {
863  //Next one is chained
864  desc_index = desc_table[desc_index].next;
865  buffer_map_addr = desc_table[desc_index].addr;
866  buffer_len = desc_table[desc_index].len;
867  }
868  else if (vui->virtio_net_hdr_sz == 12) //MRG is available
869  {
870  virtio_net_hdr_mrg_rxbuf_t *hdr =
871  &cpu->tx_headers[tx_headers_len - 1];
872 
873  //Move from available to used buffer
874  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id =
875  desc_head;
876  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len =
877  desc_len;
878  vhost_user_log_dirty_ring (vui, rxvq,
879  ring[rxvq->last_used_idx &
880  rxvq->qsz_mask]);
881 
882  rxvq->last_avail_idx++;
883  rxvq->last_used_idx++;
884  hdr->num_buffers++;
885  desc_len = 0;
886 
887  if (PREDICT_FALSE
888  (rxvq->last_avail_idx == rxvq->avail->idx))
889  {
890  //Dequeue queued descriptors for this packet
891  rxvq->last_used_idx -= hdr->num_buffers - 1;
892  rxvq->last_avail_idx -= hdr->num_buffers - 1;
893  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF;
894  goto done;
895  }
896 
897  desc_table = rxvq->desc;
898  desc_head = desc_index =
899  rxvq->avail->ring[rxvq->last_avail_idx & rxvq->qsz_mask];
900  if (PREDICT_FALSE
901  (rxvq->desc[desc_head].flags & VIRTQ_DESC_F_INDIRECT))
902  {
903  //It is seriously unlikely that a driver will put indirect descriptor
904  //after non-indirect descriptor.
905  if (PREDICT_FALSE
906  (rxvq->desc[desc_head].len < sizeof (vring_desc_t)))
907  {
908  error = VHOST_USER_TX_FUNC_ERROR_INDIRECT_OVERFLOW;
909  goto done;
910  }
911  if (PREDICT_FALSE
912  (!(desc_table =
913  map_guest_mem (vui,
914  rxvq->desc[desc_index].addr,
915  &map_hint))))
916  {
917  error = VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL;
918  goto done;
919  }
920  desc_index = 0;
921  }
922  buffer_map_addr = desc_table[desc_index].addr;
923  buffer_len = desc_table[desc_index].len;
924  }
925  else
926  {
927  error = VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOMRG;
928  goto done;
929  }
930  }
931 
932  {
933  ASSERT (copy_len < VHOST_USER_COPY_ARRAY_N);
934  vhost_copy_t *cpy = &cpu->copy[copy_len];
935  copy_len++;
936  cpy->len = bytes_left;
937  cpy->len = (cpy->len > buffer_len) ? buffer_len : cpy->len;
938  cpy->dst = buffer_map_addr;
939  cpy->src = (uword) vlib_buffer_get_current (current_b0) +
940  current_b0->current_length - bytes_left;
941 
942  bytes_left -= cpy->len;
943  buffer_len -= cpy->len;
944  buffer_map_addr += cpy->len;
945  desc_len += cpy->len;
946 
947  CLIB_PREFETCH (&rxvq->desc, CLIB_CACHE_LINE_BYTES, LOAD);
948  }
949 
950  // Check if vlib buffer has more data. If not, get more or break.
951  if (PREDICT_TRUE (!bytes_left))
952  {
953  if (PREDICT_FALSE
954  (current_b0->flags & VLIB_BUFFER_NEXT_PRESENT))
955  {
956  current_b0 = vlib_get_buffer (vm, current_b0->next_buffer);
957  bytes_left = current_b0->current_length;
958  }
959  else
960  {
961  //End of packet
962  break;
963  }
964  }
965  }
966 
967  //Move from available to used ring
968  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].id = desc_head;
969  rxvq->used->ring[rxvq->last_used_idx & rxvq->qsz_mask].len = desc_len;
970  vhost_user_log_dirty_ring (vui, rxvq,
971  ring[rxvq->last_used_idx & rxvq->qsz_mask]);
972  rxvq->last_avail_idx++;
973  rxvq->last_used_idx++;
974 
975  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
976  {
977  cpu->current_trace->hdr = cpu->tx_headers[tx_headers_len - 1];
978  }
979 
980  n_left--; //At the end for error counting when 'goto done' is invoked
981 
982  /*
983  * Do the copy periodically to prevent
984  * cpu->copy array overflow and corrupt memory
985  */
987  {
988  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
989  &map_hint)))
990  {
991  vlib_error_count (vm, node->node_index,
992  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
993  }
994  copy_len = 0;
995 
996  /* give buffers back to driver */
998  rxvq->used->idx = rxvq->last_used_idx;
999  vhost_user_log_dirty_ring (vui, rxvq, idx);
1000  }
1001  buffers++;
1002  }
1003 
1004 done:
1005  //Do the memory copies
1006  if (PREDICT_FALSE (vhost_user_tx_copy (vui, cpu->copy, copy_len,
1007  &map_hint)))
1008  {
1009  vlib_error_count (vm, node->node_index,
1010  VHOST_USER_TX_FUNC_ERROR_MMAP_FAIL, 1);
1011  }
1012 
1014  rxvq->used->idx = rxvq->last_used_idx;
1015  vhost_user_log_dirty_ring (vui, rxvq, idx);
1016 
1017  /*
1018  * When n_left is set, error is always set to something too.
1019  * In case error is due to lack of remaining buffers, we go back up and
1020  * retry.
1021  * The idea is that it is better to waste some time on packets
1022  * that have been processed already than dropping them and get
1023  * more fresh packets with a good likelihood that they will be dropped too.
1024  * This technique also gives more time to VM driver to pick-up packets.
1025  * In case the traffic flows from physical to virtual interfaces, this
1026  * technique will end-up leveraging the physical NIC buffer in order to
1027  * absorb the VM's CPU jitter.
1028  */
1029  if (n_left && (error == VHOST_USER_TX_FUNC_ERROR_PKT_DROP_NOBUF) && retry)
1030  {
1031  retry--;
1032  goto retry;
1033  }
1034 
1035  /* interrupt (call) handling */
1036  if ((rxvq->callfd_idx != ~0) &&
1038  {
1039  rxvq->n_since_last_int += frame->n_vectors - n_left;
1040 
1041  if (rxvq->n_since_last_int > vum->coalesce_frames)
1042  vhost_user_send_call (vm, rxvq);
1043  }
1044 
1045  vhost_user_vring_unlock (vui, qid);
1046 
1047 done3:
1048  if (PREDICT_FALSE (n_left && error != VHOST_USER_TX_FUNC_ERROR_NONE))
1049  {
1050  vlib_error_count (vm, node->node_index, error, n_left);
1054  thread_index, vui->sw_if_index, n_left);
1055  }
1056 
1057  vlib_buffer_free (vm, vlib_frame_vector_args (frame), frame->n_vectors);
1058  return frame->n_vectors;
1059 }
1060 
1061 static __clib_unused clib_error_t *
1064 {
1065  vlib_main_t *vm = vnm->vlib_main;
1066  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1068  vhost_user_intf_t *vui =
1070  vhost_user_vring_t *txvq = &vui->vrings[VHOST_VRING_IDX_TX (qid)];
1071 
1072  if ((mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
1074  {
1075  if (txvq->kickfd_idx == ~0)
1076  {
1077  // We cannot support interrupt mode if the driver opts out
1078  return clib_error_return (0, "Driver does not support interrupt");
1079  }
1081  {
1082  vum->ifq_count++;
1083  // Start the timer if this is the first encounter on interrupt
1084  // interface/queue
1085  if ((vum->ifq_count == 1) &&
1086  (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1090  }
1091  }
1092  else if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
1093  {
1094  if (((txvq->mode == VNET_HW_INTERFACE_RX_MODE_INTERRUPT) ||
1096  vum->ifq_count)
1097  {
1098  vum->ifq_count--;
1099  // Stop the timer if there is no more interrupt interface/queue
1100  if ((vum->ifq_count == 0) &&
1101  (vum->coalesce_time > 0.0) && (vum->coalesce_frames > 0))
1105  }
1106  }
1107 
1108  txvq->mode = mode;
1111  else if ((mode == VNET_HW_INTERFACE_RX_MODE_ADAPTIVE) ||
1113  txvq->used->flags = 0;
1114  else
1115  {
1116  vu_log_err (vui, "unhandled mode %d changed for if %d queue %d", mode,
1117  hw_if_index, qid);
1118  return clib_error_return (0, "unsupported");
1119  }
1120 
1121  return 0;
1122 }
1123 
1124 static __clib_unused clib_error_t *
1126  u32 flags)
1127 {
1128  vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
1130  vhost_user_intf_t *vui =
1132  u8 link_old, link_new;
1133 
1134  link_old = vui_is_link_up (vui);
1135 
1136  vui->admin_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
1137 
1138  link_new = vui_is_link_up (vui);
1139 
1140  if (link_old != link_new)
1141  vnet_hw_interface_set_flags (vnm, vui->hw_if_index, link_new ?
1143 
1144  return /* no error */ 0;
1145 }
1146 
1147 /* *INDENT-OFF* */
1149  .name = "vhost-user",
1150  .tx_function_n_errors = VHOST_USER_TX_FUNC_N_ERROR,
1151  .tx_function_error_strings = vhost_user_tx_func_error_strings,
1152  .format_device_name = format_vhost_user_interface_name,
1153  .name_renumber = vhost_user_name_renumber,
1154  .admin_up_down_function = vhost_user_interface_admin_up_down,
1155  .rx_mode_change_function = vhost_user_interface_rx_mode_change,
1156  .format_tx_trace = format_vhost_trace,
1157 };
1158 
1159 /* *INDENT-ON* */
1160 
1161 /*
1162  * fd.io coding-style-patch-verification: ON
1163  *
1164  * Local Variables:
1165  * eval: (c-set-style "gnu")
1166  * End:
1167  */
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
u32 len
Definition: pci.h:191
static __clib_unused u8 * format_vhost_user_interface_name(u8 *s, va_list *args)
static __clib_unused int vhost_user_name_renumber(vnet_hw_interface_t *hi, u32 new_dev_instance)
static __clib_unused clib_error_t * vhost_user_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
static_always_inline int vhost_user_vring_try_lock(vhost_user_intf_t *vui, u32 qid)
Try once to lock the vring.
vring_desc_t * desc
Definition: vhost_user.h:291
u32 virtio_ring_flags
The device index.
Definition: vhost_user.h:393
virtio_net_hdr_mrg_rxbuf_t hdr
Length of the first data descriptor.
Definition: vhost_user.h:395
vhost_cpu_t * cpus
Per-CPU data for vhost-user.
Definition: vhost_user.h:428
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:525
#define vnet_buffer2(b)
Definition: buffer.h:482
vhost_user_tx_func_error_t
#define foreach_vhost_user_tx_func_error
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:119
static __clib_unused char * vhost_user_tx_func_error_strings[]
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define VHOST_USER_TX_COPY_THRESHOLD
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vring_used_elem_t ring[0]
Definition: pci.h:214
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vring_packed_desc_t * packed_desc
Definition: vhost_user.h:292
vhost_copy_t copy[VHOST_USER_COPY_ARRAY_N]
Definition: vhost_user.h:407
u16 next
Definition: pci.h:193
vring_avail_t * avail
Definition: vhost_user.h:296
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:218
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
vl_api_address_t src
Definition: gre.api:54
#define VHOST_USER_EVENT_START_TIMER
Definition: vhost_user.h:333
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: vhost_user.h:53
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:424
static void vlib_increment_simple_counter(vlib_simple_counter_main_t *cm, u32 thread_index, u32 index, u64 increment)
Increment a simple counter.
Definition: counter.h:78
static_always_inline void vhost_user_advance_last_avail_idx(vhost_user_vring_t *vring)
u16 idx
Definition: pci.h:199
static_always_inline void vhost_user_vring_lock(vhost_user_intf_t *vui, u32 qid)
Spin until the vring is successfully locked.
u16 flags
Definition: pci.h:212
struct _vnet_device_class vnet_device_class_t
struct _tcp_header tcp_header_t
vring_used_t * used
Definition: vhost_user.h:301
vhost_vring_addr_t addr
Definition: vhost_user.h:254
vhost_trace_t * current_trace
Definition: vhost_user.h:411
unsigned char u8
Definition: types.h:56
static_always_inline void vhost_user_dequeue_descs(vhost_user_vring_t *rxvq, virtio_net_hdr_mrg_rxbuf_t *hdr, u16 *n_descs_processed)
#define vu_log_debug(dev, f,...)
Definition: vhost_user.h:55
u64 addr
Definition: pci.h:190
vnet_hw_interface_rx_mode
Definition: interface.h:53
#define static_always_inline
Definition: clib.h:106
#define vlib_prefetch_buffer_with_index(vm, bi, type)
Prefetch buffer metadata by buffer index The first 64 bytes of buffer contains most header informatio...
Definition: buffer_funcs.h:476
static_always_inline u32 vhost_user_tx_copy(vhost_user_intf_t *vui, vhost_copy_t *cpy, u16 copy_len, u32 *map_hint)
#define VRING_EVENT_F_DISABLE
Definition: vhost_user.h:35
static_always_inline void * map_guest_mem(vhost_user_intf_t *vui, uword addr, u32 *hint)
#define VHOST_VRING_IDX_TX(qid)
Definition: vhost_user.h:24
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define VRING_USED_F_NO_NOTIFY
Definition: vhost_user.h:52
#define clib_error_return(e, args...)
Definition: error.h:99
static_always_inline u8 * format_vhost_trace(u8 *s, va_list *va)
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:305
unsigned int u32
Definition: types.h:88
#define clib_atomic_test_and_set(a)
Definition: atomics.h:42
bool is_ip6
Definition: ip.api:43
VNET_DEVICE_CLASS(vhost_user_device_class)
#define VHOST_USER_COPY_ARRAY_N
Definition: vhost_user.h:399
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:534
vlib_main_t * vlib_main
Definition: vnet.h:80
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:934
#define clib_atomic_release(a)
Definition: atomics.h:43
static_always_inline void vhost_user_tx_trace_packed(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
static_always_inline void vhost_user_dequeue_chained_descs(vhost_user_vring_t *rxvq, u16 *n_descs_processed)
unsigned short u16
Definition: types.h:57
#define VIRTQ_DESC_F_INDIRECT
Definition: vhost_user.h:29
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:229
#define PREDICT_FALSE(x)
Definition: clib.h:118
vl_api_ip4_address_t ip4
Definition: one.api:376
vhost_user_main_t vhost_user_main
Definition: vhost_user.c:56
vnet_main_t vnet_main
Definition: misc.c:43
static_always_inline uword vhost_user_device_class_packed(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
vlib_simple_counter_main_t * sw_if_counters
Definition: interface.h:866
u32 node_index
Node index.
Definition: node.h:498
vl_api_address_t dst
Definition: gre.api:55
static_always_inline void vhost_user_vring_unlock(vhost_user_intf_t *vui, u32 qid)
Unlock the vring lock.
static_always_inline u8 vui_is_link_up(vhost_user_intf_t *vui)
vlib_main_t * vm
Definition: in2out_ed.c:1599
vl_api_tunnel_mode_t mode
Definition: gre.api:48
static_always_inline void vhost_user_mark_desc_available(vlib_main_t *vm, vhost_user_vring_t *rxvq, u16 *n_descs_processed, u8 chained, vlib_frame_t *frame, u32 n_left)
u8 len
Definition: ip_types.api:92
#define VHOST_VRING_IDX_RX(qid)
Definition: vhost_user.h:23
vnet_device_class_t vhost_user_device_class
u32 * show_dev_instance_by_real_dev_instance
Definition: vhost_user.h:422
u16 device_index
The interface queue index (Not the virtio vring idx)
Definition: vhost_user.h:392
vhost_user_intf_t * vhost_user_interfaces
Definition: vhost_user.h:421
u32 flags
Definition: vhost_user.h:248
u16 n_vectors
Definition: node.h:399
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:80
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
static __clib_unused clib_error_t * vhost_user_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
static_always_inline u64 vhost_user_is_packed_ring_supported(vhost_user_intf_t *vui)
#define VHOST_USER_EVENT_STOP_TIMER
Definition: vhost_user.h:334
u16 first_desc_len
Runtime queue flags.
Definition: vhost_user.h:394
static_always_inline u8 vhost_user_packed_desc_available(vhost_user_vring_t *vring, u16 idx)
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1599
u16 flags
Definition: pci.h:198
static_always_inline void vhost_user_advance_last_avail_table_idx(vhost_user_intf_t *vui, vhost_user_vring_t *vring, u8 chained)
#define ASSERT(truth)
#define VIRTQ_DESC_F_USED
Definition: vhost_user.h:32
#define vu_log_err(dev, f,...)
Definition: vhost_user.h:68
#define VIRTQ_DESC_F_AVAIL
Definition: vhost_user.h:31
#define VIRTQ_DESC_F_NEXT
Definition: vhost_user.h:27
static_always_inline void vhost_user_send_call(vlib_main_t *vm, vhost_user_vring_t *vq)
volatile u32 * vring_locks[VHOST_VRING_MAX_N]
Definition: vhost_user.h:363
vring_desc_event_t * avail_event
Definition: vhost_user.h:297
vlib_node_registration_t vhost_user_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (vhost_user_send_interrupt_node)
Definition: vhost_user.c:53
vl_api_ip4_address_t hi
Definition: arp.api:37
static_always_inline void vhost_user_tx_trace(vhost_trace_t *t, vhost_user_intf_t *vui, u16 qid, vlib_buffer_t *b, vhost_user_vring_t *rxvq)
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
u16 ring[0]
Definition: pci.h:200
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
vhost_user_vring_t vrings[VHOST_VRING_MAX_N]
Definition: vhost_user.h:362
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:498
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:244
static_always_inline void vhost_user_log_dirty_pages_2(vhost_user_intf_t *vui, u64 addr, u64 len, u8 is_host_address)
#define vhost_user_log_dirty_ring(vui, vq, member)
u16 idx
Definition: pci.h:213
u16 flags
Definition: pci.h:192
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1600
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:130
static_always_inline void vhost_user_advance_last_used_idx(vhost_user_vring_t *vring)
virtio_net_hdr_mrg_rxbuf_t tx_headers[VLIB_FRAME_SIZE]
Definition: vhost_user.h:406
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header, unspecified alignment)
Definition: vec.h:554
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:247
static_always_inline void vhost_user_handle_tx_offload(vhost_user_intf_t *vui, vlib_buffer_t *b, virtio_net_hdr_t *hdr)