FD.io VPP  v21.01.1
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #include <sys/types.h>
19 #include <sys/stat.h>
20 #include <fcntl.h>
21 #include <net/if.h>
22 #include <linux/if_tun.h>
23 #include <sys/ioctl.h>
24 #include <sys/eventfd.h>
25 
26 #include <vlib/vlib.h>
27 #include <vlib/unix/unix.h>
28 #include <vnet/ethernet/ethernet.h>
29 #include <vnet/devices/devices.h>
30 #include <vnet/feature/feature.h>
31 #include <vnet/gso/gro_func.h>
32 #include <vnet/ip/ip4_packet.h>
33 #include <vnet/ip/ip6_packet.h>
34 #include <vnet/udp/udp_packet.h>
36 
37 
38 #define foreach_virtio_input_error \
39  _(BUFFER_ALLOC, "buffer alloc error") \
40  _(UNKNOWN, "unknown")
41 
42 typedef enum
43 {
44 #define _(f,s) VIRTIO_INPUT_ERROR_##f,
46 #undef _
49 
50 static char *virtio_input_error_strings[] = {
51 #define _(n,s) s,
53 #undef _
54 };
55 
56 typedef struct
57 {
62  virtio_net_hdr_v1_t hdr;
64 
65 static u8 *
66 format_virtio_input_trace (u8 * s, va_list * args)
67 {
68  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
69  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
70  virtio_input_trace_t *t = va_arg (*args, virtio_input_trace_t *);
71  u32 indent = format_get_indent (s);
72 
73  s = format (s, "virtio: hw_if_index %d next-index %d vring %u len %u",
74  t->hw_if_index, t->next_index, t->ring, t->len);
75  s = format (s, "\n%Uhdr: flags 0x%02x gso_type 0x%02x hdr_len %u "
76  "gso_size %u csum_start %u csum_offset %u num_buffers %u",
77  format_white_space, indent + 2,
78  t->hdr.flags, t->hdr.gso_type, t->hdr.hdr_len, t->hdr.gso_size,
79  t->hdr.csum_start, t->hdr.csum_offset, t->hdr.num_buffers);
80  return s;
81 }
82 
86  const int hdr_sz, u32 node_index)
87 {
88  u16 used, next, avail, n_slots, n_refill;
89  u16 sz = vring->size;
90  u16 mask = sz - 1;
91 
92 more:
93  used = vring->desc_in_use;
94 
95  if (sz - used < sz / 8)
96  return;
97 
98  /* deliver free buffers in chunks of 64 */
99  n_refill = clib_min (sz - used, 64);
100 
101  next = vring->desc_next;
102  avail = vring->avail->idx;
103  n_slots =
105  vring->size, n_refill,
106  vring->buffer_pool_index);
107 
108  if (PREDICT_FALSE (n_slots != n_refill))
109  {
110  vlib_error_count (vm, node_index,
111  VIRTIO_INPUT_ERROR_BUFFER_ALLOC, n_refill - n_slots);
112  if (n_slots == 0)
113  return;
114  }
115 
116  while (n_slots)
117  {
118  vring_desc_t *d = &vring->desc[next];;
119  vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
120  /*
121  * current_data may not be initialized with 0 and may contain
122  * previous offset. Here we want to make sure, it should be 0
123  * initialized.
124  */
125  b->current_data = -hdr_sz;
126  memset (vlib_buffer_get_current (b), 0, hdr_sz);
127  d->addr =
128  ((type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
129  b) :
131  d->len = vlib_buffer_get_default_data_size (vm) + hdr_sz;
133  vring->avail->ring[avail & mask] = next;
134  avail++;
135  next = (next + 1) & mask;
136  n_slots--;
137  used++;
138  }
140  vring->avail->idx = avail;
141  vring->desc_next = next;
142  vring->desc_in_use = used;
143 
144  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0)
145  {
146  virtio_kick (vm, vring, vif);
147  }
148  goto more;
149 }
150 
154  const int hdr_sz, u32 node_index)
155 {
156  u16 used, next, n_slots, n_refill, flags = 0, first_desc_flags;
157  u16 sz = vring->size;
158 
159 more:
160  used = vring->desc_in_use;
161 
162  if (sz == used)
163  return;
164 
165  /* deliver free buffers in chunks of 64 */
166  n_refill = clib_min (sz - used, 64);
167 
168  next = vring->desc_next;
169  first_desc_flags = vring->packed_desc[next].flags;
170  n_slots =
172  sz, n_refill,
173  vring->buffer_pool_index);
174 
175  if (PREDICT_FALSE (n_slots != n_refill))
176  {
177  vlib_error_count (vm, node_index,
178  VIRTIO_INPUT_ERROR_BUFFER_ALLOC, n_refill - n_slots);
179  if (n_slots == 0)
180  return;
181  }
182 
183  while (n_slots)
184  {
185  vring_packed_desc_t *d = &vring->packed_desc[next];
186  vlib_buffer_t *b = vlib_get_buffer (vm, vring->buffers[next]);
187  /*
188  * current_data may not be initialized with 0 and may contain
189  * previous offset. Here we want to make sure, it should be 0
190  * initialized.
191  */
192  b->current_data = -hdr_sz;
193  memset (vlib_buffer_get_current (b), 0, hdr_sz);
194  d->addr =
195  ((type == VIRTIO_IF_TYPE_PCI) ? vlib_buffer_get_current_pa (vm,
196  b) :
198  d->len = vlib_buffer_get_default_data_size (vm) + hdr_sz;
199 
200  if (vring->avail_wrap_counter)
202  else
204 
205  d->id = next;
206  if (vring->desc_next == next)
207  first_desc_flags = flags;
208  else
209  d->flags = flags;
210 
211  next++;
212  if (next >= sz)
213  {
214  next = 0;
215  vring->avail_wrap_counter ^= 1;
216  }
217  n_slots--;
218  used++;
219  }
221  vring->packed_desc[vring->desc_next].flags = first_desc_flags;
222  vring->desc_next = next;
223  vring->desc_in_use = used;
225  if (vring->device_event->flags != VRING_EVENT_F_DISABLE)
226  {
227  virtio_kick (vm, vring, vif);
228  }
229 
230  goto more;
231 }
232 
234 virtio_needs_csum (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
235  u8 * l4_proto, u8 * l4_hdr_sz, virtio_if_type_t type)
236 {
237  if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
238  {
239  u16 ethertype = 0, l2hdr_sz = 0;
240 
241  if (type == VIRTIO_IF_TYPE_TUN)
242  {
243  switch (b0->data[0] & 0xf0)
244  {
245  case 0x40:
246  ethertype = ETHERNET_TYPE_IP4;
247  break;
248  case 0x60:
249  ethertype = ETHERNET_TYPE_IP6;
250  break;
251  }
252  }
253  else
254  {
255  ethernet_header_t *eh =
257  ethertype = clib_net_to_host_u16 (eh->type);
258  l2hdr_sz = sizeof (ethernet_header_t);
259 
260  if (ethernet_frame_is_tagged (ethertype))
261  {
262  ethernet_vlan_header_t *vlan =
263  (ethernet_vlan_header_t *) (eh + 1);
264 
265  ethertype = clib_net_to_host_u16 (vlan->type);
266  l2hdr_sz += sizeof (*vlan);
267  if (ethertype == ETHERNET_TYPE_VLAN)
268  {
269  vlan++;
270  ethertype = clib_net_to_host_u16 (vlan->type);
271  l2hdr_sz += sizeof (*vlan);
272  }
273  }
274  }
275 
276  vnet_buffer (b0)->l2_hdr_offset = 0;
277  vnet_buffer (b0)->l3_hdr_offset = l2hdr_sz;
278 
279  if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP4))
280  {
281  ip4_header_t *ip4 =
282  (ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
283  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
284  *l4_proto = ip4->protocol;
285  b0->flags |=
286  (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
287  b0->flags |=
288  (VNET_BUFFER_F_L2_HDR_OFFSET_VALID
289  | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
290  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
291  }
292  else if (PREDICT_TRUE (ethertype == ETHERNET_TYPE_IP6))
293  {
294  ip6_header_t *ip6 =
295  (ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
296  vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
297  /* FIXME IPv6 EH traversal */
298  *l4_proto = ip6->protocol;
299  b0->flags |= (VNET_BUFFER_F_IS_IP6 |
300  VNET_BUFFER_F_L2_HDR_OFFSET_VALID
301  | VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
302  VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
303  }
304  if (*l4_proto == IP_PROTOCOL_TCP)
305  {
306  b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
309  (b0)->l4_hdr_offset);
310  *l4_hdr_sz = tcp_header_bytes (tcp);
311  }
312  else if (*l4_proto == IP_PROTOCOL_UDP)
313  {
314  b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
317  (b0)->l4_hdr_offset);
318  *l4_hdr_sz = sizeof (*udp);
319  }
320  }
321 }
322 
324 fill_gso_buffer_flags (vlib_buffer_t * b0, virtio_net_hdr_v1_t * hdr,
325  u8 l4_proto, u8 l4_hdr_sz)
326 {
327  if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
328  {
329  ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
330  vnet_buffer2 (b0)->gso_size = hdr->gso_size;
331  vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
332  b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP4;
333  }
334  if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV6)
335  {
336  ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
337  vnet_buffer2 (b0)->gso_size = hdr->gso_size;
338  vnet_buffer2 (b0)->gso_l4_hdr_sz = l4_hdr_sz;
339  b0->flags |= VNET_BUFFER_F_GSO | VNET_BUFFER_F_IS_IP6;
340  }
341 }
342 
344 virtio_n_left_to_process (virtio_vring_t * vring, const int packed)
345 {
346  if (packed)
347  return vring->desc_in_use;
348  else
349  return vring->used->idx - vring->last_used_idx;
350 }
351 
353 virtio_get_slot_id (virtio_vring_t * vring, const int packed, u16 last,
354  u16 mask)
355 {
356  if (packed)
357  return vring->packed_desc[last].id;
358  else
359  return vring->used->ring[last & mask].id;
360 }
361 
363 virtio_get_len (virtio_vring_t * vring, const int packed, const int hdr_sz,
364  u16 last, u16 mask)
365 {
366  if (packed)
367  return vring->packed_desc[last].len - hdr_sz;
368  else
369  return vring->used->ring[last & mask].len - hdr_sz;
370 }
371 
372 #define increment_last(last, packed, vring) \
373  do { \
374  last++; \
375  if (packed && last >= vring->size) \
376  { \
377  last = 0; \
378  vring->used_wrap_counter ^= 1; \
379  } \
380  } while (0)
381 
384  vlib_frame_t * frame, virtio_if_t * vif,
386  int gso_enabled, int checksum_offload_enabled,
387  int packed)
388 {
389  vnet_main_t *vnm = vnet_get_main ();
390  u32 thread_index = vm->thread_index;
391  uword n_trace = vlib_get_trace_count (vm, node);
392  u32 next_index;
393  const int hdr_sz = vif->virtio_net_hdr_sz;
394  u32 *to_next = 0;
395  u32 n_rx_packets = 0;
396  u32 n_rx_bytes = 0;
397  u16 mask = vring->size - 1;
398  u16 last = vring->last_used_idx;
399  u16 n_left = virtio_n_left_to_process (vring, packed);
400  vlib_buffer_t bt;
401 
402  if (n_left == 0)
403  return 0;
404 
405  if (type == VIRTIO_IF_TYPE_TUN)
406  {
408  }
409  else
410  {
412  if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
413  next_index = vif->per_interface_next_index;
414 
415  /* only for l2, redirect if feature path enabled */
416  vnet_feature_start_device_input_x1 (vif->sw_if_index, &next_index, &bt);
417  }
418 
419  while (n_left)
420  {
421  u32 n_left_to_next;
422  u32 next0 = next_index;
423 
424  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
425 
426  while (n_left && n_left_to_next)
427  {
428  if (packed)
429  {
430  vring_packed_desc_t *d = &vring->packed_desc[last];
431  u16 flags = d->flags;
432  if ((flags & VRING_DESC_F_AVAIL) !=
433  (vring->used_wrap_counter << 7)
434  || (flags & VRING_DESC_F_USED) !=
435  (vring->used_wrap_counter << 15))
436  {
437  n_left = 0;
438  break;
439  }
440  }
441  u8 l4_proto = 0, l4_hdr_sz = 0;
442  u16 num_buffers = 1;
443  virtio_net_hdr_v1_t *hdr;
444  u16 slot = virtio_get_slot_id (vring, packed, last, mask);
445  u16 len = virtio_get_len (vring, packed, hdr_sz, last, mask);
446  u32 bi0 = vring->buffers[slot];
447  vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
448  hdr = vlib_buffer_get_current (b0);
449  if (hdr_sz == sizeof (virtio_net_hdr_v1_t))
450  num_buffers = hdr->num_buffers;
451 
452  b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
453  b0->current_data = 0;
454  b0->current_length = len;
455 
456  if (checksum_offload_enabled)
457  virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz, type);
458 
459  if (gso_enabled)
460  fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
461 
462  vnet_buffer (b0)->sw_if_index[VLIB_RX] = vif->sw_if_index;
463  vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
464 
465  /* if multisegment packet */
466  if (PREDICT_FALSE (num_buffers > 1))
467  {
468  vlib_buffer_t *pb, *cb;
469  pb = b0;
471  while (num_buffers > 1)
472  {
473  increment_last (last, packed, vring);
474  u16 cslot = virtio_get_slot_id (vring, packed, last, mask);
475  /* hdr size is 0 after 1st packet in chain buffers */
476  u16 clen = virtio_get_len (vring, packed, 0, last, mask);
477  u32 cbi = vring->buffers[cslot];
478  cb = vlib_get_buffer (vm, cbi);
479 
480  /* current buffer */
481  cb->current_length = clen;
482 
483  /* previous buffer */
484  pb->next_buffer = cbi;
485  pb->flags |= VLIB_BUFFER_NEXT_PRESENT;
486 
487  /* first buffer */
489 
490  pb = cb;
491  vring->desc_in_use--;
492  num_buffers--;
493  n_left--;
494  }
496  }
497 
498  if (type == VIRTIO_IF_TYPE_TUN)
499  {
500  switch (b0->data[0] & 0xf0)
501  {
502  case 0x40:
504  break;
505  case 0x60:
507  break;
508  default:
510  break;
511  }
512 
513  if (PREDICT_FALSE (vif->per_interface_next_index != ~0))
514  next0 = vif->per_interface_next_index;
515  }
516  else
517  {
518  /* copy feature arc data from template */
520  vnet_buffer (b0)->feature_arc_index =
521  vnet_buffer (&bt)->feature_arc_index;
522  }
523 
524  /* trace */
526 
527  if (PREDICT_FALSE (n_trace > 0 && vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */
528  1)))
529  {
531  vlib_set_trace_count (vm, node, --n_trace);
532  tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
533  tr->next_index = next0;
534  tr->hw_if_index = vif->hw_if_index;
535  tr->len = len;
536  clib_memcpy_fast (&tr->hdr, hdr, hdr_sz);
537  }
538 
539  /* enqueue buffer */
540  to_next[0] = bi0;
541  vring->desc_in_use--;
542  to_next += 1;
543  n_left_to_next--;
544  n_left--;
545  increment_last (last, packed, vring);
546 
547  /* only tun interfaces may have different next index */
548  if (type == VIRTIO_IF_TYPE_TUN)
549  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
550  n_left_to_next, bi0, next0);
551 
552  /* next packet */
553  n_rx_packets++;
554  n_rx_bytes += len;
555  }
556  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
557  }
558  vring->last_used_idx = last;
559 
561  + VNET_INTERFACE_COUNTER_RX, thread_index,
562  vif->sw_if_index, n_rx_packets,
563  n_rx_bytes);
564 
565  return n_rx_packets;
566 }
567 
570  vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
572 {
573  virtio_vring_t *vring = vec_elt_at_index (vif->rxq_vrings, qid);
574  const int hdr_sz = vif->virtio_net_hdr_sz;
575  u16 txq_id = vm->thread_index % vif->num_txqs;
576  virtio_vring_t *txq_vring = vec_elt_at_index (vif->txq_vrings, txq_id);
577  uword rv;
578 
579  if (clib_spinlock_trylock_if_init (&txq_vring->lockp))
580  {
581  if (vif->packet_coalesce)
583  (vm, txq_vring->flow_table);
584  else if (vif->packet_buffering)
586  (vm, txq_vring->buffering);
587  clib_spinlock_unlock_if_init (&txq_vring->lockp);
588  }
589 
590  if (vif->is_packed)
591  {
592  if (vring->device_event->flags != VRING_EVENT_F_DISABLE)
593  virtio_kick (vm, vring, vif);
594 
595  if (vif->gso_enabled)
596  rv =
597  virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
598  1, 1, 1);
599  else if (vif->csum_offload_enabled)
600  rv =
601  virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
602  0, 1, 1);
603  else
604  rv =
605  virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
606  0, 0, 1);
607 
608  virtio_refill_vring_packed (vm, vif, type, vring, hdr_sz,
609  node->node_index);
610  }
611  else
612  {
613  if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
614  vring->last_kick_avail_idx != vring->avail->idx)
615  virtio_kick (vm, vring, vif);
616 
617  if (vif->gso_enabled)
618  rv =
619  virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
620  1, 1, 0);
621  else if (vif->csum_offload_enabled)
622  rv =
623  virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
624  0, 1, 0);
625  else
626  rv =
627  virtio_device_input_gso_inline (vm, node, frame, vif, vring, type,
628  0, 0, 0);
629 
630  virtio_refill_vring_split (vm, vif, type, vring, hdr_sz,
631  node->node_index);
632  }
633  return rv;
634 }
635 
639 {
640  u32 n_rx = 0;
641  virtio_main_t *nm = &virtio_main;
642  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
644 
646  {
647  virtio_if_t *vif;
648  vif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
649  if (vif->flags & VIRTIO_IF_FLAG_ADMIN_UP)
650  {
651  if (vif->type == VIRTIO_IF_TYPE_TAP)
652  n_rx += virtio_device_input_inline (vm, node, frame, vif,
653  dq->queue_id,
654  VIRTIO_IF_TYPE_TAP);
655  else if (vif->type == VIRTIO_IF_TYPE_PCI)
656  n_rx += virtio_device_input_inline (vm, node, frame, vif,
657  dq->queue_id,
658  VIRTIO_IF_TYPE_PCI);
659  else if (vif->type == VIRTIO_IF_TYPE_TUN)
660  n_rx += virtio_device_input_inline (vm, node, frame, vif,
661  dq->queue_id,
662  VIRTIO_IF_TYPE_TUN);
663  }
664  }
665 
666  return n_rx;
667 }
668 
669 /* *INDENT-OFF* */
671  .name = "virtio-input",
672  .sibling_of = "device-input",
673  .format_trace = format_virtio_input_trace,
675  .type = VLIB_NODE_TYPE_INPUT,
676  .state = VLIB_NODE_STATE_INTERRUPT,
677  .n_errors = VIRTIO_INPUT_N_ERROR,
678  .error_strings = virtio_input_error_strings,
679 };
680 /* *INDENT-ON* */
681 
682 /*
683  * fd.io coding-style-patch-verification: ON
684  *
685  * Local Variables:
686  * eval: (c-set-style "gnu")
687  * End:
688  */
u32 per_interface_next_index
Definition: virtio.h:132
gro_flow_table_t * flow_table
Definition: virtio.h:109
#define VRING_EVENT_F_DISABLE
Definition: virtio_std.h:81
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:124
vlib_node_registration_t virtio_input_node
(constructor) VLIB_REGISTER_NODE (virtio_input_node)
Definition: node.c:670
static uword vlib_buffer_get_current_pa(vlib_main_t *vm, vlib_buffer_t *b)
Definition: buffer_funcs.h:463
static_always_inline int clib_spinlock_trylock_if_init(clib_spinlock_t *p)
Definition: lock.h:113
vring_desc_event_t * device_event
Definition: virtio.h:79
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:722
virtio_if_t * interfaces
Definition: virtio.h:219
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define clib_min(x, y)
Definition: clib.h:328
#define CLIB_UNUSED(x)
Definition: clib.h:87
static_always_inline u16 virtio_get_slot_id(virtio_vring_t *vring, const int packed, u16 last, u16 mask)
Definition: node.c:353
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:201
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:239
#define VIRTIO_NET_HDR_F_NEEDS_CSUM
Definition: virtio_std.h:134
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
#define vnet_buffer2(b)
Definition: buffer.h:481
vnet_interface_main_t interface_main
Definition: vnet.h:65
#define PREDICT_TRUE(x)
Definition: clib.h:122
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:110
int gso_enabled
Definition: virtio.h:137
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:136
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
u32 dev_instance
Definition: virtio.h:157
#define clib_memcpy_fast(a, b, c)
Definition: string.h:81
vring_used_elem_t ring[0]
Definition: virtio_std.h:113
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:306
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:129
u32 thread_index
Definition: main.h:250
static_always_inline u16 virtio_get_len(virtio_vring_t *vring, const int packed, const int hdr_sz, u16 last, u16 mask)
Definition: node.c:363
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:113
static heap_elt_t * last(heap_header_t *h)
Definition: heap.c:53
static u32 format_get_indent(u8 *s)
Definition: format.h:72
vlib_main_t * vm
Definition: in2out_ed.c:1580
#define VLIB_NODE_FN(node)
Definition: node.h:203
static u8 * format_virtio_input_trace(u8 *s, va_list *args)
Definition: node.c:66
u16 mask
Definition: flow_types.api:52
struct _tcp_header tcp_header_t
unsigned char u8
Definition: types.h:56
vring_desc_t * desc
Definition: virtio.h:71
static_always_inline void virtio_refill_vring_split(vlib_main_t *vm, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, const int hdr_sz, u32 node_index)
Definition: node.c:84
#define foreach_virtio_input_error
Definition: node.c:38
#define static_always_inline
Definition: clib.h:109
u32 hw_if_index
Definition: virtio.h:151
virtio_net_hdr_v1_t hdr
Definition: node.c:62
vl_api_ip6_address_t ip6
Definition: one.api:424
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:882
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
description fragment has unexpected format
Definition: map.api:433
virtio_input_error_t
Definition: node.c:42
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
clib_spinlock_t lockp
Definition: virtio.h:66
const cJSON *const b
Definition: cJSON.h:255
unsigned int u32
Definition: types.h:88
static char * virtio_input_error_strings[]
Definition: node.c:50
u16 num_txqs
Definition: virtio.h:134
vl_api_fib_path_type_t type
Definition: fib_types.api:123
#define increment_last(last, packed, vring)
Definition: node.c:372
#define VRING_USED_F_NO_NOTIFY
Definition: virtio_std.h:84
static_always_inline uword virtio_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, virtio_if_t *vif, u16 qid, virtio_if_type_t type)
Definition: node.c:569
vring_avail_t * avail
Definition: virtio.h:73
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:142
unsigned short u16
Definition: types.h:57
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:233
#define PREDICT_FALSE(x)
Definition: clib.h:121
vl_api_ip4_address_t ip4
Definition: one.api:376
static_always_inline void vnet_gro_flow_table_schedule_node_on_dispatcher(vlib_main_t *vm, gro_flow_table_t *flow_table)
Definition: gro_func.h:410
u32 node_index
Node index.
Definition: node.h:488
u8 buffer_pool_index
Definition: virtio.h:106
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:391
#define VIRTIO_NET_HDR_GSO_TCPV4
Definition: virtio_std.h:138
int packet_coalesce
Definition: virtio.h:155
u8 len
Definition: ip_types.api:103
vring_used_t * used
Definition: virtio.h:72
u16 desc_next
Definition: virtio.h:86
u16 virtio_net_hdr_sz
Definition: virtio.h:148
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:96
u8 slot
Definition: pci_types.api:22
virtio_vring_t * rxq_vrings
Definition: virtio.h:135
u16 last_used_idx
Definition: virtio.h:87
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:170
static_always_inline void fill_gso_buffer_flags(vlib_buffer_t *b0, virtio_net_hdr_v1_t *hdr, u8 l4_proto, u8 l4_hdr_sz)
Definition: node.c:324
u8 data[]
Packet data.
Definition: buffer.h:181
u32 flags
Definition: virtio.h:131
u16 last_kick_avail_idx
Definition: virtio.h:88
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:147
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:483
static_always_inline void virtio_vring_buffering_schedule_node_on_dispatcher(vlib_main_t *vm, virtio_vring_buffering_t *buffering)
static_always_inline u16 virtio_n_left_to_process(virtio_vring_t *vring, const int packed)
Definition: node.c:344
vlib_main_t vlib_node_runtime_t * node
Definition: in2out_ed.c:1580
virtio_if_type_t type
Definition: virtio.h:149
vring_packed_desc_t * packed_desc
Definition: virtio.h:77
#define ASSERT(truth)
u16 avail_wrap_counter
Definition: virtio.h:99
static_always_inline int ethernet_frame_is_tagged(u16 type)
Definition: ethernet.h:78
static uword pointer_to_uword(const void *p)
Definition: types.h:131
virtio_main_t virtio_main
Definition: virtio.c:35
static_always_inline void virtio_needs_csum(vlib_buffer_t *b0, virtio_net_hdr_v1_t *hdr, u8 *l4_proto, u8 *l4_hdr_sz, virtio_if_type_t type)
Definition: node.c:234
u16 ring[0]
Definition: virtio_std.h:99
virtio_vring_buffering_t * buffering
Definition: virtio.h:108
#define foreach_device_and_queue(var, vec)
Definition: devices.h:152
u16 used_wrap_counter
Definition: virtio.h:100
Definition: defs.h:47
int csum_offload_enabled
Definition: virtio.h:138
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:140
int packet_buffering
Definition: virtio.h:156
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: in2out_ed.c:1581
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:497
VLIB buffer representation.
Definition: buffer.h:102
u64 uword
Definition: types.h:112
static_always_inline uword virtio_device_input_gso_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, virtio_if_t *vif, virtio_vring_t *vring, virtio_if_type_t type, int gso_enabled, int checksum_offload_enabled, int packed)
Definition: node.c:383
u32 * buffers
Definition: virtio.h:82
#define vnet_buffer(b)
Definition: buffer.h:417
u32 sw_if_index
Definition: virtio.h:152
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
static int tcp_header_bytes(tcp_header_t *t)
Definition: tcp_packet.h:93
#define VRING_DESC_F_USED
Definition: virtio_std.h:78
int is_packed
Definition: virtio.h:210
#define CLIB_MEMORY_BARRIER()
Definition: clib.h:133
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:634
static int ip4_header_bytes(const ip4_header_t *i)
Definition: ip4_packet.h:190
virtio_if_type_t
Definition: virtio.h:50
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:215
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:167
#define VRING_DESC_F_WRITE
Definition: virtio_std.h:74
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:85
static_always_inline void virtio_refill_vring_packed(vlib_main_t *vm, virtio_if_t *vif, virtio_if_type_t type, virtio_vring_t *vring, const int hdr_sz, u32 node_index)
Definition: node.c:152
u16 desc_in_use
Definition: virtio.h:85
Definition: defs.h:46
#define VIRTIO_NET_HDR_GSO_TCPV6
Definition: virtio_std.h:140
virtio_vring_t * txq_vrings
Definition: virtio.h:136
#define VRING_DESC_F_AVAIL
Definition: virtio_std.h:77
static_always_inline void virtio_kick(vlib_main_t *vm, virtio_vring_t *vring, virtio_if_t *vif)
Definition: virtio.h:251