FD.io VPP  v21.01.1
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <fcntl.h>
21 
22 #include <vlib/vlib.h>
23 #include <vlib/unix/unix.h>
24 #include <vnet/vnet.h>
25 #include <vnet/ethernet/ethernet.h>
26 #include <vnet/gso/gro_func.h>
28 #include <vnet/ip/ip4_packet.h>
29 #include <vnet/ip/ip6_packet.h>
30 #include <vnet/tcp/tcp_packet.h>
31 #include <vnet/udp/udp_packet.h>
33 
34 #define foreach_virtio_tx_func_error \
35 _(NO_FREE_SLOTS, "no free tx slots") \
36 _(TRUNC_PACKET, "packet > buffer size -- truncated in tx ring") \
37 _(PENDING_MSGS, "pending msgs in tx ring") \
38 _(INDIRECT_DESC_ALLOC_FAILED, "indirect descriptor allocation failed - packet drop") \
39 _(OUT_OF_ORDER, "out-of-order buffers in used ring") \
40 _(GSO_PACKET_DROP, "gso disabled on itf -- gso packet drop") \
41 _(CSUM_OFFLOAD_PACKET_DROP, "checksum offload disabled on itf -- csum offload packet drop")
42 
43 typedef enum
44 {
45 #define _(f,s) VIRTIO_TX_ERROR_##f,
47 #undef _
50 
51 static char *virtio_tx_func_error_strings[] = {
52 #define _(n,s) s,
54 #undef _
55 };
56 
57 static u8 *
58 format_virtio_device (u8 * s, va_list * args)
59 {
60  u32 dev_instance = va_arg (*args, u32);
61  int verbose = va_arg (*args, int);
62  u32 indent = format_get_indent (s);
63 
64  s = format (s, "VIRTIO interface");
65  if (verbose)
66  {
67  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
68  dev_instance);
69  }
70  return s;
71 }
72 
73 typedef struct
74 {
80 
81 static u8 *
82 format_virtio_tx_trace (u8 * s, va_list * va)
83 {
84  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
85  CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
86  virtio_tx_trace_t *t = va_arg (*va, virtio_tx_trace_t *);
87  u32 indent = format_get_indent (s);
88 
89  s = format (s, "%Ubuffer 0x%x: %U\n",
90  format_white_space, indent,
92  s =
93  format (s, "%U%U\n", format_white_space, indent,
95  s =
96  format (s, "%U%U", format_white_space, indent,
98  sizeof (t->buffer.pre_data));
99  return s;
100 }
101 
104  vlib_buffer_t * b0, u32 bi, int is_tun)
105 {
107  t = vlib_add_trace (vm, node, b0, sizeof (t[0]));
108  t->sw_if_index = vnet_buffer (b0)->sw_if_index[VLIB_TX];
109  t->buffer_index = bi;
110  if (is_tun)
111  {
112  int is_ip4 = 0, is_ip6 = 0;
113 
114  switch (((u8 *) vlib_buffer_get_current (b0))[0] & 0xf0)
115  {
116  case 0x40:
117  is_ip4 = 1;
118  break;
119  case 0x60:
120  is_ip6 = 1;
121  break;
122  default:
123  break;
124  }
125  vnet_generic_header_offset_parser (b0, &t->gho, 0, is_ip4, is_ip6);
126  }
127  else
129  b0->flags &
130  VNET_BUFFER_F_IS_IP4,
131  b0->flags & VNET_BUFFER_F_IS_IP6);
132 
133  clib_memcpy_fast (&t->buffer, b0, sizeof (*b0) - sizeof (b0->pre_data));
135  sizeof (t->buffer.pre_data));
136 }
137 
140  u32 * buffers, u16 n,
142 {
143  vlib_error_count (vm, node_index, error, n);
144  vlib_buffer_free (vm, buffers, n);
145 }
146 
148 virtio_memset_ring_u32 (u32 * ring, u32 start, u32 ring_size, u32 n_buffers)
149 {
150  ASSERT (n_buffers <= ring_size);
151 
152  if (PREDICT_TRUE (start + n_buffers <= ring_size))
153  {
154  clib_memset_u32 (ring + start, ~0, n_buffers);
155  }
156  else
157  {
158  clib_memset_u32 (ring + start, ~0, ring_size - start);
159  clib_memset_u32 (ring, ~0, n_buffers - (ring_size - start));
160  }
161 }
162 
165  uword node_index)
166 {
167  u16 used = vring->desc_in_use;
168  u16 sz = vring->size;
169  u16 mask = sz - 1;
170  u16 last = vring->last_used_idx;
171  u16 n_left = vring->used->idx - last;
172  u16 out_of_order_count = 0;
173 
174  if (n_left == 0)
175  return;
176 
177  while (n_left)
178  {
179  vring_used_elem_t *e = &vring->used->ring[last & mask];
180  u16 slot, n_buffers;
181  slot = n_buffers = e->id;
182 
183  while (e->id == (n_buffers & mask))
184  {
185  n_left--;
186  last++;
187  n_buffers++;
188  vring_desc_t *d = &vring->desc[e->id];
189  u16 next;
190  while (d->flags & VRING_DESC_F_NEXT)
191  {
192  n_buffers++;
193  next = d->next;
194  d = &vring->desc[next];
195  }
196  if (n_left == 0)
197  break;
198  e = &vring->used->ring[last & mask];
199  }
200  vlib_buffer_free_from_ring (vm, vring->buffers, slot,
201  sz, (n_buffers - slot));
202  virtio_memset_ring_u32 (vring->buffers, slot, sz, (n_buffers - slot));
203  used -= (n_buffers - slot);
204 
205  if (n_left > 0)
206  {
207  vlib_buffer_free (vm, &vring->buffers[e->id], 1);
208  vring->buffers[e->id] = ~0;
209  used--;
210  last++;
211  n_left--;
212  out_of_order_count++;
213  vring->flags |= VRING_TX_OUT_OF_ORDER;
214  }
215  }
216 
217  /*
218  * Some vhost-backends give buffers back in out-of-order fashion in used ring.
219  * It impacts the overall virtio-performance.
220  */
221  if (out_of_order_count)
222  vlib_error_count (vm, node_index, VIRTIO_TX_ERROR_OUT_OF_ORDER,
223  out_of_order_count);
224 
225  vring->desc_in_use = used;
226  vring->last_used_idx = last;
227 }
228 
231  uword node_index)
232 {
233  vring_packed_desc_t *d;
234  u16 sz = vring->size;
235  u16 last = vring->last_used_idx;
236  u16 n_buffers = 0, start;
237  u16 flags;
238 
239  if (vring->desc_in_use == 0)
240  return;
241 
242  d = &vring->packed_desc[last];
243  flags = d->flags;
244  start = d->id;
245 
246  while ((flags & VRING_DESC_F_AVAIL) == (vring->used_wrap_counter << 7) &&
247  (flags & VRING_DESC_F_USED) == (vring->used_wrap_counter << 15))
248  {
249  last++;
250  n_buffers++;
251 
252  if (last >= sz)
253  {
254  last = 0;
255  vring->used_wrap_counter ^= 1;
256  }
257  d = &vring->packed_desc[last];
258  flags = d->flags;
259  }
260 
261  if (n_buffers)
262  {
263  vlib_buffer_free_from_ring (vm, vring->buffers, start, sz, n_buffers);
264  virtio_memset_ring_u32 (vring->buffers, start, sz, n_buffers);
265  vring->desc_in_use -= n_buffers;
266  vring->last_used_idx = last;
267  }
268 }
269 
272  uword node_index, int packed)
273 {
274  if (packed)
275  virtio_free_used_device_desc_packed (vm, vring, node_index);
276  else
277  virtio_free_used_device_desc_split (vm, vring, node_index);
278 
279 }
280 
282 set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
283  const int is_l2)
284 {
285  if (b->flags & VNET_BUFFER_F_IS_IP4)
286  {
287  ip4_header_t *ip4;
288  generic_header_offset_t gho = { 0 };
289  vnet_generic_header_offset_parser (b, &gho, is_l2, 1 /* ip4 */ ,
290  0 /* ip6 */ );
291  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
292  hdr->csum_start = gho.l4_hdr_offset; // 0x22;
293  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
294  {
295  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
296  }
297  else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
298  {
299  hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
300  }
301 
302  /*
303  * virtio devices do not support IP4 checksum offload. So driver takes care
304  * of it while doing tx.
305  */
306  ip4 =
308  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
309  ip4->checksum = ip4_header_checksum (ip4);
310  }
311  else if (b->flags & VNET_BUFFER_F_IS_IP6)
312  {
313  generic_header_offset_t gho = { 0 };
314  vnet_generic_header_offset_parser (b, &gho, is_l2, 0 /* ip4 */ ,
315  1 /* ip6 */ );
316  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
317  hdr->csum_start = gho.l4_hdr_offset; // 0x36;
318  if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
319  {
320  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
321  }
322  else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
323  {
324  hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
325  }
326  }
327 }
328 
330 set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
331  const int is_l2)
332 {
333  if (b->flags & VNET_BUFFER_F_IS_IP4)
334  {
335  ip4_header_t *ip4;
336  generic_header_offset_t gho = { 0 };
337  vnet_generic_header_offset_parser (b, &gho, is_l2, 1 /* ip4 */ ,
338  0 /* ip6 */ );
339  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
340  hdr->gso_size = vnet_buffer2 (b)->gso_size;
341  hdr->hdr_len = gho.hdr_sz;
342  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
343  hdr->csum_start = gho.l4_hdr_offset; // 0x22;
344  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
345  ip4 =
347  /*
348  * virtio devices do not support IP4 checksum offload. So driver takes care
349  * of it while doing tx.
350  */
351  if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
352  ip4->checksum = ip4_header_checksum (ip4);
353  }
354  else if (b->flags & VNET_BUFFER_F_IS_IP6)
355  {
356  generic_header_offset_t gho = { 0 };
357  vnet_generic_header_offset_parser (b, &gho, is_l2, 0 /* ip4 */ ,
358  1 /* ip6 */ );
359  hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
360  hdr->gso_size = vnet_buffer2 (b)->gso_size;
361  hdr->hdr_len = gho.hdr_sz;
362  hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
363  hdr->csum_start = gho.l4_hdr_offset; // 0x36;
364  hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
365  }
366 }
367 
370  virtio_vring_t * vring, u32 bi, u16 free_desc_count,
371  u16 avail, u16 next, u16 mask, int hdr_sz, int do_gso,
372  int csum_offload, int is_pci, int is_tun, int is_indirect,
373  int is_any_layout)
374 {
375  u16 n_added = 0;
376  vring_desc_t *d;
377  int is_l2 = !is_tun;
378  d = &vring->desc[next];
379  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
380  virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
381  u32 drop_inline = ~0;
382 
383  clib_memset_u8 (hdr, 0, hdr_sz);
384 
385  if (b->flags & VNET_BUFFER_F_GSO)
386  {
387  if (do_gso)
388  set_gso_offsets (b, hdr, is_l2);
389  else
390  {
391  drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
392  goto done;
393  }
394  }
395  else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
396  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
397  {
398  if (csum_offload)
399  set_checksum_offsets (b, hdr, is_l2);
400  else
401  {
402  drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
403  goto done;
404  }
405  }
406 
407  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
408  {
409  virtio_tx_trace (vm, node, b, bi, is_tun);
410  }
411 
412  if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
413  {
414  d->addr = ((is_pci) ? vlib_buffer_get_current_pa (vm, b) :
416  d->len = b->current_length + hdr_sz;
417  d->flags = 0;
418  }
419  else if (is_indirect)
420  {
421  /*
422  * We are using single vlib_buffer_t for indirect descriptor(s)
423  * chain. Single descriptor is 16 bytes and vlib_buffer_t
424  * has 2048 bytes space. So maximum long chain can have 128
425  * (=2048/16) indirect descriptors.
426  * It can easily support 65535 bytes of Jumbo frames with
427  * each data buffer size of 512 bytes minimum.
428  */
429  u32 indirect_buffer = 0;
430  if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
431  {
432  drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
433  goto done;
434  }
435 
436  vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
437  indirect_desc->current_data = 0;
438  indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
439  indirect_desc->next_buffer = bi;
440  bi = indirect_buffer;
441 
442  vring_desc_t *id =
443  (vring_desc_t *) vlib_buffer_get_current (indirect_desc);
444  u32 count = 1;
445  if (is_pci)
446  {
447  d->addr = vlib_physmem_get_pa (vm, id);
448  id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
449 
450  /*
451  * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
452  * should be presented in separate descriptor and data will start
453  * from next descriptor.
454  */
455  if (is_any_layout)
456  id->len = b->current_length + hdr_sz;
457  else
458  {
459  id->len = hdr_sz;
460  id->flags = VRING_DESC_F_NEXT;
461  id->next = count;
462  count++;
463  id++;
464  id->addr = vlib_buffer_get_current_pa (vm, b);
465  id->len = b->current_length;
466  }
467  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
468  {
469  id->flags = VRING_DESC_F_NEXT;
470  id->next = count;
471  count++;
472  id++;
473  b = vlib_get_buffer (vm, b->next_buffer);
474  id->addr = vlib_buffer_get_current_pa (vm, b);
475  id->len = b->current_length;
476  }
477  }
478  else /* VIRTIO_IF_TYPE_[TAP | TUN] */
479  {
480  d->addr = pointer_to_uword (id);
481  /* first buffer in chain */
482  id->addr = pointer_to_uword (vlib_buffer_get_current (b)) - hdr_sz;
483  id->len = b->current_length + hdr_sz;
484 
485  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
486  {
487  id->flags = VRING_DESC_F_NEXT;
488  id->next = count;
489  count++;
490  id++;
491  b = vlib_get_buffer (vm, b->next_buffer);
492  id->addr = pointer_to_uword (vlib_buffer_get_current (b));
493  id->len = b->current_length;
494  }
495  }
496  id->flags = 0;
497  id->next = 0;
498  d->len = count * sizeof (vring_desc_t);
500  }
501  else if (is_pci)
502  {
503  u16 count = next;
504  vlib_buffer_t *b_temp = b;
505  u16 n_buffers_in_chain = 1;
506 
507  /*
508  * Check the length of the chain for the required number of
509  * descriptors. Return from here, retry to get more descriptors,
510  * if chain length is greater than available descriptors.
511  */
512  while (b_temp->flags & VLIB_BUFFER_NEXT_PRESENT)
513  {
514  n_buffers_in_chain++;
515  b_temp = vlib_get_buffer (vm, b_temp->next_buffer);
516  }
517 
518  if (n_buffers_in_chain > free_desc_count)
519  return n_buffers_in_chain;
520 
521  d->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
522  d->len = b->current_length + hdr_sz;
523 
524  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
525  {
527  vring->buffers[count] = bi;
528  b->flags &=
529  ~(VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID);
530  bi = b->next_buffer;
531  b->next_buffer = 0;
532  n_added++;
533  count = (count + 1) & mask;
534  d->next = count;
535  d = &vring->desc[count];
536  b = vlib_get_buffer (vm, bi);
537  d->addr = vlib_buffer_get_current_pa (vm, b);
538  d->len = b->current_length;
539  }
540  d->flags = 0;
541  vring->buffers[count] = bi;
542  vring->avail->ring[avail & mask] = next;
543  n_added++;
544  return n_added;
545  }
546  else
547  {
548  ASSERT (0);
549  }
550  vring->buffers[next] = bi;
551  vring->avail->ring[avail & mask] = next;
552  n_added++;
553 
554 done:
555  if (drop_inline != ~0)
556  virtio_interface_drop_inline (vm, node->node_index, &bi, 1, drop_inline);
557 
558  return n_added;
559 }
560 
563  virtio_vring_t * vring, u32 bi, u16 next,
564  int hdr_sz, int do_gso, int csum_offload,
565  int is_pci, int is_tun, int is_indirect,
566  int is_any_layout)
567 {
568  u16 n_added = 0, flags = 0;
569  int is_l2 = !is_tun;
570  vring_packed_desc_t *d = &vring->packed_desc[next];
571  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
572  virtio_net_hdr_v1_t *hdr = vlib_buffer_get_current (b) - hdr_sz;
573  u32 drop_inline = ~0;
574 
575  clib_memset (hdr, 0, hdr_sz);
576 
577  if (b->flags & VNET_BUFFER_F_GSO)
578  {
579  if (do_gso)
580  set_gso_offsets (b, hdr, is_l2);
581  else
582  {
583  drop_inline = VIRTIO_TX_ERROR_GSO_PACKET_DROP;
584  goto done;
585  }
586  }
587  else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
588  VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
589  {
590  if (csum_offload)
591  set_checksum_offsets (b, hdr, is_l2);
592  else
593  {
594  drop_inline = VIRTIO_TX_ERROR_CSUM_OFFLOAD_PACKET_DROP;
595  goto done;
596  }
597  }
598  if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
599  {
600  virtio_tx_trace (vm, node, b, bi, is_tun);
601  }
602 
603  if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
604  {
605  d->addr =
606  ((is_pci) ? vlib_buffer_get_current_pa (vm,
607  b) :
609  d->len = b->current_length + hdr_sz;
610  }
611  else if (is_indirect)
612  {
613  /*
614  * We are using single vlib_buffer_t for indirect descriptor(s)
615  * chain. Single descriptor is 16 bytes and vlib_buffer_t
616  * has 2048 bytes space. So maximum long chain can have 128
617  * (=2048/16) indirect descriptors.
618  * It can easily support 65535 bytes of Jumbo frames with
619  * each data buffer size of 512 bytes minimum.
620  */
621  u32 indirect_buffer = 0;
622  if (PREDICT_FALSE (vlib_buffer_alloc (vm, &indirect_buffer, 1) == 0))
623  {
624  drop_inline = VIRTIO_TX_ERROR_INDIRECT_DESC_ALLOC_FAILED;
625  goto done;
626  }
627 
628  vlib_buffer_t *indirect_desc = vlib_get_buffer (vm, indirect_buffer);
629  indirect_desc->current_data = 0;
630  indirect_desc->flags |= VLIB_BUFFER_NEXT_PRESENT;
631  indirect_desc->next_buffer = bi;
632  bi = indirect_buffer;
633 
634  vring_packed_desc_t *id =
635  (vring_packed_desc_t *) vlib_buffer_get_current (indirect_desc);
636  u32 count = 1;
637  if (is_pci)
638  {
639  d->addr = vlib_physmem_get_pa (vm, id);
640  id->addr = vlib_buffer_get_current_pa (vm, b) - hdr_sz;
641 
642  /*
643  * If VIRTIO_F_ANY_LAYOUT is not negotiated, then virtio_net_hdr
644  * should be presented in separate descriptor and data will start
645  * from next descriptor.
646  */
647  if (is_any_layout)
648  id->len = b->current_length + hdr_sz;
649  else
650  {
651  id->len = hdr_sz;
652  id->flags = 0;
653  id->id = 0;
654  count++;
655  id++;
656  id->addr = vlib_buffer_get_current_pa (vm, b);
657  id->len = b->current_length;
658  }
659  while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
660  {
661  id->flags = 0;
662  id->id = 0;
663  count++;
664  id++;
665  b = vlib_get_buffer (vm, b->next_buffer);
666  id->addr = vlib_buffer_get_current_pa (vm, b);
667  id->len = b->current_length;
668  }
669  }
670  id->flags = 0;
671  id->id = 0;
672  d->len = count * sizeof (vring_packed_desc_t);
674  }
675  else
676  {
677  ASSERT (0);
678  }
679  if (vring->avail_wrap_counter)
680  {
683  }
684  else
685  {
688  }
689 
690  d->id = next;
691  d->flags = flags;
692  vring->buffers[next] = bi;
693  n_added++;
694 
695 done:
696  if (drop_inline != ~0)
697  virtio_interface_drop_inline (vm, node->node_index, &bi, 1, drop_inline);
698 
699  return n_added;
700 }
701 
705  virtio_if_t * vif,
707  virtio_vring_t * vring,
708  u32 * buffers, u16 n_left,
709  const int do_gso,
710  const int csum_offload)
711 {
712  int is_pci = (type == VIRTIO_IF_TYPE_PCI);
713  int is_tun = (type == VIRTIO_IF_TYPE_TUN);
714  int is_indirect =
715  ((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
716  int is_any_layout =
717  ((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
718  const int hdr_sz = vif->virtio_net_hdr_sz;
719  u16 sz = vring->size;
720  u16 used, next, n_buffers = 0, n_buffers_left = 0;
721  u16 n_vectors = n_left;
722 
723 
724  used = vring->desc_in_use;
725  next = vring->desc_next;
726 
727  if (vif->packet_buffering)
728  {
729  n_buffers = n_buffers_left = virtio_vring_n_buffers (vring->buffering);
730 
731  while (n_buffers_left && used < sz)
732  {
733  u16 n_added = 0;
734 
736  if (bi == ~0)
737  break;
738  n_added = add_buffer_to_slot_packed (vm, node,
739  vring, bi, next,
740  hdr_sz, do_gso, csum_offload,
741  is_pci, is_tun, is_indirect,
742  is_any_layout);
743  n_buffers_left--;
744  if (PREDICT_FALSE (n_added == 0))
745  continue;
746 
747  used++;
748  next++;
749  if (next >= sz)
750  {
751  next = 0;
752  vring->avail_wrap_counter ^= 1;
753  }
754  }
755  }
756 
757  while (n_left && used < sz)
758  {
759  u16 n_added = 0;
760 
761  n_added = add_buffer_to_slot_packed (vm, node,
762  vring, buffers[0], next,
763  hdr_sz, do_gso, csum_offload,
764  is_pci, is_tun, is_indirect,
765  is_any_layout);
766  buffers++;
767  n_left--;
768  if (PREDICT_FALSE (n_added == 0))
769  continue;
770 
771  used++;
772  next++;
773  if (next >= sz)
774  {
775  next = 0;
776  vring->avail_wrap_counter ^= 1;
777  }
778  }
779 
780  if (n_left != n_vectors || n_buffers != n_buffers_left)
781  {
783  vring->desc_next = next;
784  vring->desc_in_use = used;
786  if (vring->device_event->flags != VRING_EVENT_F_DISABLE)
787  virtio_kick (vm, vring, vif);
788  }
789 
790  return n_left;
791 }
792 
795  u16 req, u16 next, u32 * first_free_desc_index,
796  u16 * free_desc_count)
797 {
798  u16 start = 0;
799  /* next is used as hint: from where to start looking */
800  for (u16 i = 0; i < size; i++, next++)
801  {
802  if (vring->buffers[next & mask] == ~0)
803  {
804  if (*first_free_desc_index == ~0)
805  {
806  *first_free_desc_index = (next & mask);
807  start = i;
808  (*free_desc_count)++;
809  req--;
810  if (req == 0)
811  break;
812  }
813  else
814  {
815  if (start + *free_desc_count == i)
816  {
817  (*free_desc_count)++;
818  req--;
819  if (req == 0)
820  break;
821  }
822  else
823  break;
824  }
825  }
826  }
827 }
828 
832  virtio_if_t * vif,
834  virtio_vring_t * vring, u32 * buffers,
835  u16 n_left, int do_gso,
836  int csum_offload)
837 {
838  u16 used, next, avail, n_buffers = 0, n_buffers_left = 0;
839  int is_pci = (type == VIRTIO_IF_TYPE_PCI);
840  int is_tun = (type == VIRTIO_IF_TYPE_TUN);
841  int is_indirect =
842  ((vif->features & VIRTIO_FEATURE (VIRTIO_RING_F_INDIRECT_DESC)) != 0);
843  int is_any_layout =
844  ((vif->features & VIRTIO_FEATURE (VIRTIO_F_ANY_LAYOUT)) != 0);
845  u16 sz = vring->size;
846  int hdr_sz = vif->virtio_net_hdr_sz;
847  u16 mask = sz - 1;
848  u16 n_vectors = n_left;
849 
850  used = vring->desc_in_use;
851  next = vring->desc_next;
852  avail = vring->avail->idx;
853 
854  u16 free_desc_count = 0;
855 
857  {
858  u32 first_free_desc_index = ~0;
859 
860  virtio_find_free_desc (vring, sz, mask, n_left, next,
861  &first_free_desc_index, &free_desc_count);
862 
863  if (free_desc_count)
864  next = first_free_desc_index;
865  }
866  else
867  free_desc_count = sz - used;
868 
869  if (vif->packet_buffering)
870  {
871  n_buffers = n_buffers_left = virtio_vring_n_buffers (vring->buffering);
872 
873  while (n_buffers_left && free_desc_count)
874  {
875  u16 n_added = 0;
876 
878  if (bi == ~0)
879  break;
880 
881  n_added = add_buffer_to_slot (vm, node, vring, bi, free_desc_count,
882  avail, next, mask, hdr_sz, do_gso,
883  csum_offload, is_pci, is_tun,
884  is_indirect, is_any_layout);
885  if (PREDICT_FALSE (n_added == 0))
886  {
887  n_buffers_left--;
888  continue;
889  }
890  else if (PREDICT_FALSE (n_added > free_desc_count))
891  break;
892 
893  avail++;
894  next = (next + n_added) & mask;
895  used += n_added;
896  n_buffers_left--;
897  free_desc_count -= n_added;
898  }
899  }
900 
901  while (n_left && free_desc_count)
902  {
903  u16 n_added = 0;
904 
905  n_added = add_buffer_to_slot (vm, node, vring, buffers[0],
906  free_desc_count, avail, next, mask,
907  hdr_sz, do_gso, csum_offload, is_pci,
908  is_tun, is_indirect, is_any_layout);
909 
910  if (PREDICT_FALSE (n_added == 0))
911  {
912  buffers++;
913  n_left--;
914  continue;
915  }
916  else if (PREDICT_FALSE (n_added > free_desc_count))
917  break;
918 
919  avail++;
920  next = (next + n_added) & mask;
921  used += n_added;
922  buffers++;
923  n_left--;
924  free_desc_count -= n_added;
925  }
926 
927  if (n_left != n_vectors || n_buffers != n_buffers_left)
928  {
930  vring->avail->idx = avail;
931  vring->desc_next = next;
932  vring->desc_in_use = used;
933  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0)
934  virtio_kick (vm, vring, vif);
935  }
936 
937  return n_left;
938 }
939 
942  virtio_if_t * vif,
944  u32 * buffers, u16 n_left, int packed,
945  int do_gso, int csum_offload)
946 {
947  if (packed)
948  return virtio_interface_tx_packed_gso_inline (vm, node, vif, type, vring,
949  buffers, n_left,
950  do_gso, csum_offload);
951  else
952  return virtio_interface_tx_split_gso_inline (vm, node, vif, type, vring,
953  buffers, n_left,
954  do_gso, csum_offload);
955 }
956 
959  virtio_if_t * vif,
961  u32 * buffers, u16 n_left, int packed)
962 {
963  vnet_main_t *vnm = vnet_get_main ();
965 
967  return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
968  buffers, n_left, packed,
969  1 /* do_gso */ ,
970  1 /* checksum offload */ );
972  return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
973  buffers, n_left, packed,
974  0 /* no do_gso */ ,
975  1 /* checksum offload */ );
976  else
977  return virtio_interface_tx_gso_inline (vm, node, vif, type, vring,
978  buffers, n_left, packed,
979  0 /* no do_gso */ ,
980  0 /* no checksum offload */ );
981 }
982 
986 {
987  virtio_main_t *nm = &virtio_main;
988  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
990  u16 qid = vm->thread_index % vif->num_txqs;
991  virtio_vring_t *vring = vec_elt_at_index (vif->txq_vrings, qid);
992  u16 n_left = frame->n_vectors;
993  u32 *buffers = vlib_frame_vector_args (frame);
994  u32 to[GRO_TO_VECTOR_SIZE (n_left)];
995  int packed = vif->is_packed;
996 
998 
999  if (packed && (vring->device_event->flags != VRING_EVENT_F_DISABLE))
1000  virtio_kick (vm, vring, vif);
1001  else if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
1002  (vring->last_kick_avail_idx != vring->avail->idx))
1003  virtio_kick (vm, vring, vif);
1004 
1005  if (vif->packet_coalesce)
1006  {
1007  n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
1008  buffers = to;
1009  }
1010 
1011  u16 retry_count = 2;
1012 
1013 retry:
1014  /* free consumed buffers */
1015  virtio_free_used_device_desc (vm, vring, node->node_index, packed);
1016 
1017  if (vif->type == VIRTIO_IF_TYPE_TAP)
1018  n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1019  VIRTIO_IF_TYPE_TAP,
1020  &buffers[frame->n_vectors - n_left],
1021  n_left, packed);
1022  else if (vif->type == VIRTIO_IF_TYPE_PCI)
1023  n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1024  VIRTIO_IF_TYPE_PCI,
1025  &buffers[frame->n_vectors - n_left],
1026  n_left, packed);
1027  else if (vif->type == VIRTIO_IF_TYPE_TUN)
1028  n_left = virtio_interface_tx_inline (vm, node, vif, vring,
1029  VIRTIO_IF_TYPE_TUN,
1030  &buffers[frame->n_vectors - n_left],
1031  n_left, packed);
1032  else
1033  ASSERT (0);
1034 
1035  if (n_left && retry_count--)
1036  goto retry;
1037 
1038  if (vif->packet_buffering && n_left)
1039  {
1040  u16 n_buffered = virtio_vring_buffering_store_packets (vring->buffering,
1041  &buffers
1042  [frame->n_vectors
1043  - n_left],
1044  n_left);
1045  n_left -= n_buffered;
1046  }
1047  if (n_left)
1048  virtio_interface_drop_inline (vm, node->node_index,
1049  &buffers[frame->n_vectors - n_left], n_left,
1050  VIRTIO_TX_ERROR_NO_FREE_SLOTS);
1051 
1053 
1054  return frame->n_vectors - n_left;
1055 }
1056 
1057 static void
1059  u32 node_index)
1060 {
1061  virtio_main_t *apm = &virtio_main;
1062  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1064 
1065  /* Shut off redirection */
1066  if (node_index == ~0)
1067  {
1068  vif->per_interface_next_index = node_index;
1069  return;
1070  }
1071 
1074  node_index);
1075 }
1076 
1077 static void
1079 {
1080  /* Nothing for now */
1081 }
1082 
1085 {
1086  if (vif->is_packed)
1087  vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
1088  else
1090 }
1091 
1094 {
1095  if (vif->is_packed)
1096  vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
1097  else
1099 }
1100 
1101 static clib_error_t *
1104 {
1105  vlib_main_t *vm = vnm->vlib_main;
1106  virtio_main_t *mm = &virtio_main;
1107  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1109  virtio_vring_t *rx_vring = vec_elt_at_index (vif->rxq_vrings, qid);
1110 
1111  if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
1112  {
1113  virtio_set_rx_polling (vif, rx_vring);
1114  return clib_error_return (0, "interrupt mode is not supported");
1115  }
1116 
1117  if (mode == VNET_HW_IF_RX_MODE_POLLING)
1118  {
1119  if (vif->packet_coalesce || vif->packet_buffering)
1120  {
1121  if (mm->interrupt_queues_count > 0)
1122  mm->interrupt_queues_count--;
1123  if (mm->interrupt_queues_count == 0)
1127  }
1128  virtio_set_rx_polling (vif, rx_vring);
1129  }
1130  else
1131  {
1132  if (vif->packet_coalesce || vif->packet_buffering)
1133  {
1134  mm->interrupt_queues_count++;
1135  if (mm->interrupt_queues_count == 1)
1139  }
1140  virtio_set_rx_interrupt (vif, rx_vring);
1141  }
1142 
1143  rx_vring->mode = mode;
1144 
1145  return 0;
1146 }
1147 
1148 static clib_error_t *
1150 {
1151  virtio_main_t *mm = &virtio_main;
1152  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
1154 
1155  if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
1156  {
1157  vif->flags |= VIRTIO_IF_FLAG_ADMIN_UP;
1160  }
1161  else
1162  {
1163  vif->flags &= ~VIRTIO_IF_FLAG_ADMIN_UP;
1165  }
1166  return 0;
1167 }
1168 
1169 static clib_error_t *
1171  u32 hw_if_index,
1172  struct vnet_sw_interface_t *st, int is_add)
1173 {
1174  /* Nothing for now */
1175  return 0;
1176 }
1177 
1178 /* *INDENT-OFF* */
1180  .name = "virtio",
1181  .format_device_name = format_virtio_device_name,
1182  .format_device = format_virtio_device,
1183  .format_tx_trace = format_virtio_tx_trace,
1184  .tx_function_n_errors = VIRTIO_TX_N_ERROR,
1185  .tx_function_error_strings = virtio_tx_func_error_strings,
1186  .rx_redirect_to_node = virtio_set_interface_next_node,
1187  .clear_counters = virtio_clear_hw_interface_counters,
1188  .admin_up_down_function = virtio_interface_admin_up_down,
1189  .subif_add_del_function = virtio_subif_add_del_function,
1190  .rx_mode_change_function = virtio_interface_rx_mode_change,
1191 };
1192 
1193 /* *INDENT-ON* */
1194 
1195 /*
1196  * fd.io coding-style-patch-verification: ON
1197  *
1198  * Local Variables:
1199  * eval: (c-set-style "gnu")
1200  * End:
1201  */
u32 per_interface_next_index
Definition: virtio.h:132
gro_flow_table_t * flow_table
Definition: virtio.h:109
static_always_inline u16 virtio_interface_tx_split_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, int do_gso, int csum_offload)
Definition: device.c:830
vlib_buffer_t buffer
Definition: device.c:78
#define VRING_EVENT_F_DISABLE
Definition: virtio_std.h:81
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
Definition: node.c:670
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:463
static_always_inline u16 virtio_vring_n_buffers(virtio_vring_buffering_t *buffering)
VNET_DEVICE_CLASS_TX_FN() virtio_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:983
vring_desc_event_t * device_event
Definition: virtio.h:79
static_always_inline void virtio_set_rx_interrupt(virtio_if_t *vif, virtio_vring_t *vring)
Definition: device.c:1084
virtio_if_t * interfaces
Definition: virtio.h:219
#define CLIB_UNUSED(x)
Definition: clib.h:87
static clib_error_t * virtio_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:1149
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:937
#define VIRTIO_NET_HDR_F_NEEDS_CSUM
Definition: virtio_std.h:134
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define vnet_buffer2(b)
Definition: buffer.h:481
static clib_error_t * virtio_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
Definition: device.c:1102
#define PREDICT_TRUE(x)
Definition: clib.h:122
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:136
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vring_used_elem_t ring[0]
Definition: virtio_std.h:113
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:129
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u32 thread_index
Definition: main.h:250
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static_always_inline void virtio_free_used_device_desc_packed(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
Definition: device.c:230
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
#define VIRTIO_EVENT_START_TIMER
Definition: virtio.h:60
static u32 format_get_indent(u8 *s)
Definition: format.h:72
#define STRUCT_OFFSET_OF(t, f)
Definition: clib.h:70
vlib_main_t * vm
Definition: in2out_ed.c:1580
static_always_inline u32 virtio_vring_buffering_read_from_front(virtio_vring_buffering_t *buffering)
u16 mask
Definition: flow_types.api:52
clib_error_t * vnet_hw_interface_set_flags(vnet_main_t *vnm, u32 hw_if_index, vnet_hw_interface_flags_t flags)
Definition: interface.c:509
struct _tcp_header tcp_header_t
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1173
unsigned char u8
Definition: types.h:56
#define VIRTIO_FEATURE(X)
Definition: virtio_std.h:69
vring_desc_t * desc
Definition: virtio.h:71
static_always_inline u16 add_buffer_to_slot_packed(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_vring_t *vring, u32 bi, u16 next, int hdr_sz, int do_gso, int csum_offload, int is_pci, int is_tun, int is_indirect, int is_any_layout)
Definition: device.c:562
#define VRING_AVAIL_F_NO_INTERRUPT
Definition: virtio_std.h:85
u64 features
Definition: virtio.h:130
#define static_always_inline
Definition: clib.h:109
VNET_DEVICE_CLASS(af_xdp_device_class)
u32 hw_if_index
Definition: virtio.h:151
static clib_error_t * virtio_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:1170
static_always_inline uword virtio_interface_tx_packed_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, const int do_gso, const int csum_offload)
Definition: device.c:703
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
u8 support_int_mode
Definition: virtio.h:205
description fragment has unexpected format
Definition: map.api:433
vnet_hw_interface_flags_t flags
Definition: interface.h:538
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
clib_spinlock_t lockp
Definition: virtio.h:66
#define clib_error_return(e, args...)
Definition: error.h:99
const cJSON *const b
Definition: cJSON.h:255
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:316
unsigned int u32
Definition: types.h:88
#define foreach_virtio_tx_func_error
Definition: device.c:34
bool is_ip6
Definition: ip.api:43
static void vlib_buffer_free_from_ring(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring.
Definition: buffer_funcs.h:984
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define VRING_USED_F_NO_NOTIFY
Definition: virtio_std.h:84
Definition: cJSON.c:84
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:546
format_function_t format_vnet_buffer
Definition: buffer.h:497
static_always_inline void set_gso_offsets(vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
Definition: device.c:330
static_always_inline void virtio_find_free_desc(virtio_vring_t *vring, u16 size, u16 mask, u16 req, u16 next, u32 *first_free_desc_index, u16 *free_desc_count)
Definition: device.c:794
vlib_main_t * vlib_main
Definition: vnet.h:92
static_always_inline u16 virtio_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_vring_t *vring, virtio_if_type_t type, u32 *buffers, u16 n_left, int packed)
Definition: device.c:958
static void vlib_process_signal_event(vlib_main_t *vm, uword node_index, uword type_opaque, uword data)
Definition: node_funcs.h:1015
static_always_inline void virtio_free_used_device_desc_split(vlib_main_t *vm, virtio_vring_t *vring, uword node_index)
Definition: device.c:164
vring_avail_t * avail
Definition: virtio.h:73
vring_desc_event_t * driver_event
Definition: virtio.h:78
static __clib_warn_unused_result u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:677
unsigned short u16
Definition: types.h:57
vnet_hw_if_rx_mode mode
Definition: virtio.h:107
u32 size
Definition: vhost_user.h:106
static u8 * format_virtio_tx_trace(u8 *s, va_list *va)
Definition: device.c:82
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
static u8 * format_virtio_device(u8 *s, va_list *args)
Definition: device.c:58
#define PREDICT_FALSE(x)
Definition: clib.h:121
vl_api_ip4_address_t ip4
Definition: one.api:376
u32 node_index
Node index.
Definition: node.h:488
u32 interrupt_queues_count
Definition: virtio.h:215
#define VIRTIO_NET_HDR_GSO_TCPV4
Definition: virtio_std.h:138
static_always_inline void virtio_free_used_device_desc(vlib_main_t *vm, virtio_vring_t *vring, uword node_index, int packed)
Definition: device.c:271
static_always_inline void set_checksum_offsets(vlib_buffer_t *b, virtio_net_hdr_v1_t *hdr, const int is_l2)
Definition: device.c:282
vl_api_tunnel_mode_t mode
Definition: gre.api:48
int packet_coalesce
Definition: virtio.h:155
static void virtio_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:1058
static_always_inline void virtio_interface_drop_inline(vlib_main_t *vm, uword node_index, u32 *buffers, u16 n, virtio_tx_func_error_t error)
Definition: device.c:139
vring_used_t * used
Definition: virtio.h:72
u16 desc_next
Definition: virtio.h:86
u16 virtio_net_hdr_sz
Definition: virtio.h:148
u8 slot
Definition: pci_types.api:22
u8 * format_ethernet_header_with_length(u8 *s, va_list *args)
Definition: format.c:97
virtio_vring_t * rxq_vrings
Definition: virtio.h:135
format_function_t format_virtio_device_name
Definition: virtio.h:247
u16 last_used_idx
Definition: virtio.h:87
vlib_node_registration_t virtio_send_interrupt_node
(constructor) VLIB_REGISTER_NODE (virtio_send_interrupt_node)
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
u32 flags
Definition: virtio.h:131
u16 last_kick_avail_idx
Definition: virtio.h:88
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
virtio_if_type_t type
Definition: virtio.h:149
vring_packed_desc_t * packed_desc
Definition: virtio.h:77
#define ASSERT(truth)
u16 avail_wrap_counter
Definition: virtio.h:99
static_always_inline u32 vnet_gro_inline(vlib_main_t *vm, gro_flow_table_t *flow_table, u32 *from, u16 n_left_from, u32 *to)
coalesce buffers with flow tables
Definition: gro_func.h:559
u8 pre_data[VLIB_BUFFER_PRE_DATA_SIZE]
Space for inserting data before buffer start.
Definition: buffer.h:178
u32 buffer_index
Definition: device.c:75
virtio_tx_func_error_t
Definition: device.c:43
static_always_inline void clib_memset_u8(void *p, u8 val, uword count)
Definition: string.h:424
static uword pointer_to_uword(const void *p)
Definition: types.h:131
#define VRING_DESC_F_NEXT
Definition: virtio_std.h:73
static_always_inline void virtio_tx_trace(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_buffer_t *b0, u32 bi, int is_tun)
Definition: device.c:103
virtio_main_t virtio_main
Definition: virtio.c:35
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
#define VRING_TX_OUT_OF_ORDER
Definition: virtio.h:104
static_always_inline void vnet_generic_header_offset_parser(vlib_buffer_t *b0, generic_header_offset_t *gho, int is_l2, int is_ip4, int is_ip6)
u16 ring[0]
Definition: virtio_std.h:99
virtio_vring_buffering_t * buffering
Definition: virtio.h:108
static_always_inline u16 virtio_interface_tx_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, u32 *buffers, u16 n_left, int packed, int do_gso, int csum_offload)
Definition: device.c:941
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
u16 used_wrap_counter
Definition: virtio.h:100
Definition: defs.h:47
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
int packet_buffering
Definition: virtio.h:156
u32 instance
Definition: gre.api:51
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
static void virtio_clear_hw_interface_counters(u32 instance)
Definition: device.c:1078
#define GRO_TO_VECTOR_SIZE(X)
Definition: gro.h:27
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:297
static_always_inline u8 * format_generic_header_offset(u8 *s, va_list *args)
u32 * buffers
Definition: virtio.h:82
static_always_inline u16 add_buffer_to_slot(vlib_main_t *vm, vlib_node_runtime_t *node, virtio_vring_t *vring, u32 bi, u16 free_desc_count, u16 avail, u16 next, u16 mask, int hdr_sz, int do_gso, int csum_offload, int is_pci, int is_tun, int is_indirect, int is_any_layout)
Definition: device.c:369
static_always_inline u16 virtio_vring_buffering_store_packets(virtio_vring_buffering_t *buffering, u32 *bi, u16 n_store)
#define vnet_buffer(b)
Definition: buffer.h:417
#define VRING_DESC_F_USED
Definition: virtio_std.h:78
vnet_hw_if_rx_mode
Definition: interface.h:53
generic_header_offset_t gho
Definition: device.c:77
int is_packed
Definition: virtio.h:210
#define VIRTIO_EVENT_STOP_TIMER
Definition: virtio.h:61
u8 count
Definition: dhcp.api:208
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:133
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
virtio_if_type_t
Definition: virtio.h:50
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:106
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static u16 ip4_header_checksum(ip4_header_t *i)
Definition: ip4_packet.h:314
static_always_inline void virtio_memset_ring_u32(u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Definition: device.c:148
u16 desc_in_use
Definition: virtio.h:85
#define VRING_DESC_F_INDIRECT
Definition: virtio_std.h:75
static char * virtio_tx_func_error_strings[]
Definition: device.c:51
#define VIRTIO_NET_HDR_GSO_TCPV6
Definition: virtio_std.h:140
#define VRING_DESC_F_AVAIL
Definition: virtio_std.h:77
static_always_inline void virtio_set_rx_polling(virtio_if_t *vif, virtio_vring_t *vring)
Definition: device.c:1093
static_always_inline void virtio_kick(vlib_main_t *vm, virtio_vring_t *vring, virtio_if_t *vif)
Definition: virtio.h:251
static_always_inline void clib_memset_u32(void *p, u32 val, uword count)
Definition: string.h:332