FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <memif/memif.h>
31 #include <memif/private.h>
32 
33 #define foreach_memif_input_error \
34  _ (BUFFER_ALLOC_FAIL, buffer_alloc, ERROR, "buffer allocation failed") \
35  _ (BAD_DESC, bad_desc, ERROR, "bad descriptor") \
36  _ (NOT_IP, not_ip, INFO, "not ip packet")
37 
38 typedef enum
39 {
40 #define _(f, n, s, d) MEMIF_INPUT_ERROR_##f,
42 #undef _
45 
47 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
49 #undef _
50 };
51 
52 typedef struct
53 {
58 
59 static __clib_unused u8 *
60 format_memif_input_trace (u8 * s, va_list * args)
61 {
62  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
63  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
64  memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
65  u32 indent = format_get_indent (s);
66 
67  s = format (s, "memif: hw_if_index %d next-index %d",
68  t->hw_if_index, t->next_index);
69  s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
70  t->ring);
71  return s;
72 }
73 
76 {
77  u8 *ptr = vlib_buffer_get_current (b);
78  u8 v = *ptr & 0xf0;
79 
80  if (PREDICT_TRUE (v == 0x40))
82  else if (PREDICT_TRUE (v == 0x60))
84 
85  b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
87 }
88 
91  memif_if_t * mif, vlib_buffer_t * b, u32 next, u16 qid,
92  uword * n_tracep)
93 {
94  if (PREDICT_TRUE
95  (b != 0 && vlib_trace_buffer (vm, node, next, b, /* follow_chain */ 0)))
96  {
98  vlib_set_trace_count (vm, node, --(*n_tracep));
99  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
100  tr->next_index = next;
101  tr->hw_if_index = mif->hw_if_index;
102  tr->ring = qid;
103  }
104 }
105 
108  u16 buffer_offset, u16 buffer_vec_index)
109 {
110  memif_copy_op_t *co;
112  co->data = data;
113  co->data_len = len;
114  co->buffer_offset = buffer_offset;
115  co->buffer_vec_index = buffer_vec_index;
116 }
117 
120  u32 buffer_size)
121 {
122  vlib_buffer_t *seg = b;
123  i32 bytes_left = b->current_length - buffer_size + b->current_data;
124 
125  if (PREDICT_TRUE (bytes_left <= 0))
126  return;
127 
128  b->current_length -= bytes_left;
130 
131  while (bytes_left)
132  {
133  seg->flags |= VLIB_BUFFER_NEXT_PRESENT;
134  seg->next_buffer = buffers[0];
135  seg = vlib_get_buffer (vm, buffers[0]);
136  buffers++;
137  seg->current_data = 0;
138  seg->current_length = clib_min (buffer_size, bytes_left);
139  bytes_left -= seg->current_length;
140  }
141 }
142 
147 {
148  vnet_main_t *vnm = vnet_get_main ();
149  memif_main_t *mm = &memif_main;
150  memif_ring_t *ring;
151  memif_queue_t *mq;
152  u16 buffer_size = vlib_buffer_get_default_data_size (vm);
153  uword n_trace;
155  u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi;
156  u32 n_rx_packets = 0, n_rx_bytes = 0;
157  u32 n_left, n_left_to_next;
159  vlib_buffer_t *b0, *b1, *b2, *b3;
162  thread_index);
163  vlib_buffer_t bt;
164  u16 cur_slot, last_slot, ring_size, n_slots, mask;
165  i16 start_offset;
166  u16 n_buffers = 0, n_alloc;
167  memif_copy_op_t *co;
168  memif_packet_op_t *po;
169  memif_region_index_t last_region = ~0;
170  void *last_region_shm = 0;
171  void *last_region_max = 0;
172 
173  mq = vec_elt_at_index (mif->rx_queues, qid);
174  ring = mq->ring;
175  ring_size = 1 << mq->log2_ring_size;
176  mask = ring_size - 1;
177 
178  /* assume that somebody will want to add ethernet header on the packet
179  so start with IP header at offset 14 */
180  start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
181 
182  /* for S2M rings, we are consumers of packet buffers, and for M2S rings we
183  are producers of empty buffers */
184  cur_slot = (type == MEMIF_RING_S2M) ? mq->last_head : mq->last_tail;
185 
186  if (type == MEMIF_RING_S2M)
187  last_slot = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
188  else
189  last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
190 
191  if (cur_slot == last_slot)
192  goto refill;
193  n_slots = last_slot - cur_slot;
194 
195  /* construct copy and packet vector out of ring slots */
196  while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
197  {
198  u32 dst_off, src_off, n_bytes_left;
199  u16 s0;
200  memif_desc_t *d0;
201  void *mb0;
202  po = ptd->packet_ops + n_rx_packets;
203  n_rx_packets++;
205  po->packet_len = 0;
206  src_off = 0;
207  dst_off = start_offset;
208 
209  next_slot:
210  clib_prefetch_load (&ring->desc[(cur_slot + 8) & mask]);
211  s0 = cur_slot & mask;
212  d0 = &ring->desc[s0];
213  n_bytes_left = d0->length;
214 
215  /* slave resets buffer length,
216  * so it can produce full size buffer for master
217  */
218  if (type == MEMIF_RING_M2S)
219  d0->length = mif->run.buffer_size;
220 
221  po->packet_len += n_bytes_left;
222  if (PREDICT_FALSE (last_region != d0->region))
223  {
224  last_region_shm = mif->regions[d0->region].shm;
225  last_region = d0->region;
226  last_region_max =
227  last_region_shm + mif->regions[last_region].region_size;
228  }
229  mb0 = last_region_shm + d0->offset;
230 
231  if (PREDICT_FALSE (mb0 + n_bytes_left > last_region_max))
232  vlib_error_count (vm, node->node_index, MEMIF_INPUT_ERROR_BAD_DESC, 1);
233  else
234  do
235  {
236  u32 dst_free = buffer_size - dst_off;
237  if (dst_free == 0)
238  {
239  dst_off = 0;
240  dst_free = buffer_size;
241  n_buffers++;
242  }
243  u32 bytes_to_copy = clib_min (dst_free, n_bytes_left);
244  memif_add_copy_op (ptd, mb0 + src_off, bytes_to_copy, dst_off,
245  n_buffers - 1);
246  n_bytes_left -= bytes_to_copy;
247  src_off += bytes_to_copy;
248  dst_off += bytes_to_copy;
249  }
250  while (PREDICT_FALSE (n_bytes_left));
251 
252  cur_slot++;
253  n_slots--;
254  if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
255  {
256  src_off = 0;
257  goto next_slot;
258  }
259  }
260 
261  /* allocate free buffers */
264  mq->buffer_pool_index);
265  if (PREDICT_FALSE (n_alloc != n_buffers))
266  {
267  if (n_alloc)
268  vlib_buffer_free (vm, ptd->buffers, n_alloc);
269  vlib_error_count (vm, node->node_index,
270  MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
271  goto refill;
272  }
273 
274  /* copy data */
275  n_left = vec_len (ptd->copy_ops);
276  co = ptd->copy_ops;
277  while (n_left >= 8)
278  {
279  clib_prefetch_load (co[4].data);
280  clib_prefetch_load (co[5].data);
281  clib_prefetch_load (co[6].data);
282  clib_prefetch_load (co[7].data);
283 
284  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
285  b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
286  b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
287  b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
288 
289  clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
290  co[0].data_len);
291  clib_memcpy_fast (b1->data + co[1].buffer_offset, co[1].data,
292  co[1].data_len);
293  clib_memcpy_fast (b2->data + co[2].buffer_offset, co[2].data,
294  co[2].data_len);
295  clib_memcpy_fast (b3->data + co[3].buffer_offset, co[3].data,
296  co[3].data_len);
297 
298  co += 4;
299  n_left -= 4;
300  }
301  while (n_left)
302  {
303  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
304  clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data,
305  co[0].data_len);
306  co += 1;
307  n_left -= 1;
308  }
309 
310  /* release slots from the ring */
311  if (type == MEMIF_RING_S2M)
312  {
313  __atomic_store_n (&ring->tail, cur_slot, __ATOMIC_RELEASE);
314  mq->last_head = cur_slot;
315  }
316  else
317  {
318  mq->last_tail = cur_slot;
319  }
320 
321  /* prepare buffer template and next indices */
322  vnet_buffer (&ptd->buffer_template)->sw_if_index[VLIB_RX] =
323  mif->sw_if_index;
324  vnet_buffer (&ptd->buffer_template)->feature_arc_index = 0;
325  ptd->buffer_template.current_data = start_offset;
328  ptd->buffer_template.ref_count = 1;
329 
331  {
333  if (mif->per_interface_next_index != ~0)
335  else
337  &ptd->buffer_template);
338 
339  vlib_get_new_next_frame (vm, node, next_index, to_next_bufs,
340  n_left_to_next);
342  {
343  vlib_next_frame_t *nf;
344  vlib_frame_t *f;
347  f = vlib_get_frame (vm, nf->frame);
349 
350  ef = vlib_frame_scalar_args (f);
351  ef->sw_if_index = mif->sw_if_index;
352  ef->hw_if_index = mif->hw_if_index;
354  }
355  }
356 
357  /* process buffer metadata */
358  u32 n_from = n_rx_packets;
359  po = ptd->packet_ops;
360  bi = to_next_bufs;
361 
362  /* copy template into local variable - will save per packet load */
364 
365  while (n_from >= 8)
366  {
367  b0 = vlib_get_buffer (vm, ptd->buffers[po[4].first_buffer_vec_index]);
368  b1 = vlib_get_buffer (vm, ptd->buffers[po[5].first_buffer_vec_index]);
369  b2 = vlib_get_buffer (vm, ptd->buffers[po[6].first_buffer_vec_index]);
370  b3 = vlib_get_buffer (vm, ptd->buffers[po[7].first_buffer_vec_index]);
371 
372  vlib_prefetch_buffer_header (b0, STORE);
373  vlib_prefetch_buffer_header (b1, STORE);
374  vlib_prefetch_buffer_header (b2, STORE);
375  vlib_prefetch_buffer_header (b3, STORE);
376 
377  /* enqueue buffer */
378  u32 fbvi[4];
379  fbvi[0] = po[0].first_buffer_vec_index;
380  fbvi[1] = po[1].first_buffer_vec_index;
381  fbvi[2] = po[2].first_buffer_vec_index;
382  fbvi[3] = po[3].first_buffer_vec_index;
383 
384  bi[0] = ptd->buffers[fbvi[0]];
385  bi[1] = ptd->buffers[fbvi[1]];
386  bi[2] = ptd->buffers[fbvi[2]];
387  bi[3] = ptd->buffers[fbvi[3]];
388 
389  b0 = vlib_get_buffer (vm, bi[0]);
390  b1 = vlib_get_buffer (vm, bi[1]);
391  b2 = vlib_get_buffer (vm, bi[2]);
392  b3 = vlib_get_buffer (vm, bi[3]);
393 
394  vlib_buffer_copy_template (b0, &bt);
395  vlib_buffer_copy_template (b1, &bt);
396  vlib_buffer_copy_template (b2, &bt);
397  vlib_buffer_copy_template (b3, &bt);
398 
399  b0->current_length = po[0].packet_len;
400  n_rx_bytes += b0->current_length;
401  b1->current_length = po[1].packet_len;
402  n_rx_bytes += b1->current_length;
403  b2->current_length = po[2].packet_len;
404  n_rx_bytes += b2->current_length;
405  b3->current_length = po[3].packet_len;
406  n_rx_bytes += b3->current_length;
407 
408  memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
409  memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size);
410  memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size);
411  memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size);
412 
414  {
415  next[0] = memif_next_from_ip_hdr (node, b0);
416  next[1] = memif_next_from_ip_hdr (node, b1);
417  next[2] = memif_next_from_ip_hdr (node, b2);
418  next[3] = memif_next_from_ip_hdr (node, b3);
419  }
420 
421  /* next */
422  n_from -= 4;
423  po += 4;
424  bi += 4;
425  next += 4;
426  }
427  while (n_from)
428  {
429  u32 fbvi[4];
430  /* enqueue buffer */
431  fbvi[0] = po[0].first_buffer_vec_index;
432  bi[0] = ptd->buffers[fbvi[0]];
433  b0 = vlib_get_buffer (vm, bi[0]);
434  vlib_buffer_copy_template (b0, &bt);
435  b0->current_length = po->packet_len;
436  n_rx_bytes += b0->current_length;
437 
438  memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size);
439 
441  {
442  next[0] = memif_next_from_ip_hdr (node, b0);
443  }
444 
445  /* next */
446  n_from -= 1;
447  po += 1;
448  bi += 1;
449  next += 1;
450  }
451 
452  /* packet trace if enabled */
453  if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node))))
454  {
455  u32 n_left = n_rx_packets;
456  bi = to_next_bufs;
457  next = nexts;
458  u32 ni = next_index;
459  while (n_trace && n_left)
460  {
461  vlib_buffer_t *b;
464  ni = next[0];
465  b = vlib_get_buffer (vm, bi[0]);
466  if (PREDICT_TRUE
467  (vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0)))
468  {
469  tr = vlib_add_trace (vm, node, b, sizeof (*tr));
470  tr->next_index = ni;
471  tr->hw_if_index = mif->hw_if_index;
472  tr->ring = qid;
473  n_trace--;
474  }
475 
476  /* next */
477  n_left--;
478  bi++;
479  next++;
480  }
481  vlib_set_trace_count (vm, node, n_trace);
482  }
483 
485  {
486  n_left_to_next -= n_rx_packets;
487  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
488  }
489  else
490  vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts, n_rx_packets);
491 
494  mif->sw_if_index, n_rx_packets,
495  n_rx_bytes);
496 
497  /* refill ring with empty buffers */
498 refill:
499  vec_reset_length (ptd->buffers);
500  vec_reset_length (ptd->copy_ops);
501 
502  if (type == MEMIF_RING_M2S)
503  {
504  u16 head = ring->head;
505  n_slots = ring_size - head + mq->last_tail;
506 
507  while (n_slots--)
508  {
509  u16 s = head++ & mask;
510  memif_desc_t *d = &ring->desc[s];
511  d->length = mif->run.buffer_size;
512  }
513 
514  __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
515  }
516 
517  return n_rx_packets;
518 }
519 
522  memif_if_t *mif, u16 qid,
524 {
525  vnet_main_t *vnm = vnet_get_main ();
526  memif_main_t *mm = &memif_main;
527  memif_ring_t *ring;
528  memif_queue_t *mq;
529  u32 next_index;
530  uword n_trace = vlib_get_trace_count (vm, node);
531  u32 n_rx_packets = 0, n_rx_bytes = 0;
532  u32 *to_next = 0, *buffers;
533  u32 bi0, bi1, bi2, bi3;
534  u16 slot, s0;
535  memif_desc_t *d0;
536  vlib_buffer_t *b0, *b1, *b2, *b3;
539  thread_index);
540  u16 cur_slot, last_slot, ring_size, n_slots, mask, head;
541  i16 start_offset;
542  u64 offset;
544  u16 n_alloc, n_from;
545 
546  mq = vec_elt_at_index (mif->rx_queues, qid);
547  ring = mq->ring;
548  ring_size = 1 << mq->log2_ring_size;
549  mask = ring_size - 1;
550 
553 
554  /* asume that somebody will want to add ethernet header on the packet
555  so start with IP header at offset 14 */
556  start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0;
558 
559  cur_slot = mq->last_tail;
560  last_slot = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
561  if (cur_slot == last_slot)
562  goto refill;
563  n_slots = last_slot - cur_slot;
564 
565  /* process ring slots */
568  while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ)
569  {
570  vlib_buffer_t *hb;
571 
572  s0 = cur_slot & mask;
573  bi0 = mq->buffers[s0];
574  ptd->buffers[n_rx_packets++] = bi0;
575 
576  clib_prefetch_load (&ring->desc[(cur_slot + 8) & mask]);
577  d0 = &ring->desc[s0];
578  hb = b0 = vlib_get_buffer (vm, bi0);
579  b0->current_data = start_offset;
580  b0->current_length = d0->length;
581  n_rx_bytes += d0->length;
582 
583  cur_slot++;
584  n_slots--;
585  if (PREDICT_FALSE ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots))
586  {
587  hb->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
589  next_slot:
590  s0 = cur_slot & mask;
591  d0 = &ring->desc[s0];
592  bi0 = mq->buffers[s0];
593 
594  /* previous buffer */
595  b0->next_buffer = bi0;
596  b0->flags |= VLIB_BUFFER_NEXT_PRESENT;
597 
598  /* current buffer */
599  b0 = vlib_get_buffer (vm, bi0);
600  b0->current_data = start_offset;
601  b0->current_length = d0->length;
603  n_rx_bytes += d0->length;
604 
605  cur_slot++;
606  n_slots--;
607  if ((d0->flags & MEMIF_DESC_FLAG_NEXT) && n_slots)
608  goto next_slot;
609  }
610  }
611 
612  /* release slots from the ring */
613  mq->last_tail = cur_slot;
614 
615  n_from = n_rx_packets;
616  buffers = ptd->buffers;
617 
618  while (n_from)
619  {
620  u32 n_left_to_next;
621  u32 next0, next1, next2, next3;
622 
623  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
624  while (n_from >= 8 && n_left_to_next >= 4)
625  {
626  b0 = vlib_get_buffer (vm, buffers[4]);
627  b1 = vlib_get_buffer (vm, buffers[5]);
628  b2 = vlib_get_buffer (vm, buffers[6]);
629  b3 = vlib_get_buffer (vm, buffers[7]);
630  vlib_prefetch_buffer_header (b0, STORE);
631  vlib_prefetch_buffer_header (b1, STORE);
632  vlib_prefetch_buffer_header (b2, STORE);
633  vlib_prefetch_buffer_header (b3, STORE);
634 
635  /* enqueue buffer */
636  to_next[0] = bi0 = buffers[0];
637  to_next[1] = bi1 = buffers[1];
638  to_next[2] = bi2 = buffers[2];
639  to_next[3] = bi3 = buffers[3];
640  to_next += 4;
641  n_left_to_next -= 4;
642  buffers += 4;
643 
644  b0 = vlib_get_buffer (vm, bi0);
645  b1 = vlib_get_buffer (vm, bi1);
646  b2 = vlib_get_buffer (vm, bi2);
647  b3 = vlib_get_buffer (vm, bi3);
648 
649  vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
650  vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
651  vnet_buffer (b1)->sw_if_index[VLIB_RX] = mif->sw_if_index;
652  vnet_buffer (b1)->sw_if_index[VLIB_TX] = ~0;
653  vnet_buffer (b2)->sw_if_index[VLIB_RX] = mif->sw_if_index;
654  vnet_buffer (b2)->sw_if_index[VLIB_TX] = ~0;
655  vnet_buffer (b3)->sw_if_index[VLIB_RX] = mif->sw_if_index;
656  vnet_buffer (b3)->sw_if_index[VLIB_TX] = ~0;
657 
659  {
660  next0 = memif_next_from_ip_hdr (node, b0);
661  next1 = memif_next_from_ip_hdr (node, b1);
662  next2 = memif_next_from_ip_hdr (node, b2);
663  next3 = memif_next_from_ip_hdr (node, b3);
664  }
666  {
667  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
668  {
669  next0 = mif->per_interface_next_index;
670  next1 = mif->per_interface_next_index;
671  next2 = mif->per_interface_next_index;
672  next3 = mif->per_interface_next_index;
673  }
674  else
675  {
676  next0 = next1 = next2 = next3 = next_index;
677  /* redirect if feature path enabled */
679  &next0, b0);
681  &next1, b1);
683  &next2, b2);
685  &next3, b3);
686  }
687  }
688 
689  /* trace */
690  if (PREDICT_FALSE (n_trace > 0))
691  {
692  memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
693  if (PREDICT_FALSE (n_trace > 0))
694  memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace);
695  if (PREDICT_FALSE (n_trace > 0))
696  memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace);
697  if (PREDICT_FALSE (n_trace > 0))
698  memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace);
699  }
700 
701  /* enqueue */
703  n_left_to_next, bi0, bi1, bi2, bi3,
704  next0, next1, next2, next3);
705 
706  /* next */
707  n_from -= 4;
708  }
709  while (n_from && n_left_to_next)
710  {
711  /* enqueue buffer */
712  to_next[0] = bi0 = buffers[0];
713  to_next += 1;
714  n_left_to_next--;
715  buffers += 1;
716 
717  b0 = vlib_get_buffer (vm, bi0);
718  vnet_buffer (b0)->sw_if_index[VLIB_RX] = mif->sw_if_index;
719  vnet_buffer (b0)->sw_if_index[VLIB_TX] = ~0;
720 
722  {
723  next0 = memif_next_from_ip_hdr (node, b0);
724  }
726  {
727  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
728  next0 = mif->per_interface_next_index;
729  else
730  {
731  next0 = next_index;
732  /* redirect if feature path enabled */
734  &next0, b0);
735  }
736  }
737 
738  /* trace */
739  if (PREDICT_FALSE (n_trace > 0))
740  memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace);
741 
742  /* enqueue */
744  n_left_to_next, bi0, next0);
745 
746  /* next */
747  n_from--;
748  }
749  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
750  }
751 
754  mif->sw_if_index, n_rx_packets,
755  n_rx_bytes);
756 
757  /* refill ring with empty buffers */
758 refill:
759  vec_reset_length (ptd->buffers);
760 
761  head = ring->head;
762  n_slots = ring_size - head + mq->last_tail;
763  slot = head & mask;
764 
765  n_slots &= ~7;
766 
767  if (n_slots < 32)
768  goto done;
769 
770  memif_desc_t desc_template, *dt = &desc_template;
771  clib_memset (dt, 0, sizeof (memif_desc_t));
772  dt->length = buffer_length;
773 
775  vm, mq->buffers, slot, ring_size, n_slots, mq->buffer_pool_index);
776  dt->region = mq->buffer_pool_index + 1;
777  offset = (u64) mif->regions[dt->region].shm - start_offset;
778 
779  if (PREDICT_FALSE (n_alloc != n_slots))
780  vlib_error_count (vm, node->node_index,
781  MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1);
782 
783  head += n_alloc;
784 
785  while (n_alloc)
786  {
787  memif_desc_t *d = ring->desc + slot;
788  u32 *bi = mq->buffers + slot;
789 
790  if (PREDICT_FALSE (((slot + 7 > mask) || (n_alloc < 8))))
791  goto one_by_one;
792 
793  clib_memcpy_fast (d + 0, dt, sizeof (memif_desc_t));
794  clib_memcpy_fast (d + 1, dt, sizeof (memif_desc_t));
795  clib_memcpy_fast (d + 2, dt, sizeof (memif_desc_t));
796  clib_memcpy_fast (d + 3, dt, sizeof (memif_desc_t));
797  clib_memcpy_fast (d + 4, dt, sizeof (memif_desc_t));
798  clib_memcpy_fast (d + 5, dt, sizeof (memif_desc_t));
799  clib_memcpy_fast (d + 6, dt, sizeof (memif_desc_t));
800  clib_memcpy_fast (d + 7, dt, sizeof (memif_desc_t));
801 
802  d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
803  d[1].offset = (u64) vlib_get_buffer (vm, bi[1])->data - offset;
804  d[2].offset = (u64) vlib_get_buffer (vm, bi[2])->data - offset;
805  d[3].offset = (u64) vlib_get_buffer (vm, bi[3])->data - offset;
806  d[4].offset = (u64) vlib_get_buffer (vm, bi[4])->data - offset;
807  d[5].offset = (u64) vlib_get_buffer (vm, bi[5])->data - offset;
808  d[6].offset = (u64) vlib_get_buffer (vm, bi[6])->data - offset;
809  d[7].offset = (u64) vlib_get_buffer (vm, bi[7])->data - offset;
810 
811  slot = (slot + 8) & mask;
812  n_alloc -= 8;
813  continue;
814 
815  one_by_one:
816  clib_memcpy_fast (d, dt, sizeof (memif_desc_t));
817  d[0].offset = (u64) vlib_get_buffer (vm, bi[0])->data - offset;
818 
819  slot = (slot + 1) & mask;
820  n_alloc -= 1;
821  }
822 
823  __atomic_store_n (&ring->head, head, __ATOMIC_RELEASE);
824 
825 done:
826  return n_rx_packets;
827 }
828 
829 
833 {
834  u32 n_rx = 0;
835  memif_main_t *mm = &memif_main;
838 
841  for (int i = 0; i < vec_len (pv); i++)
842  {
843  memif_if_t *mif;
844  u32 qid;
845  mif = vec_elt_at_index (mm->interfaces, pv[i].dev_instance);
846  qid = pv[i].queue_id;
847  if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
848  (mif->flags & MEMIF_IF_FLAG_CONNECTED))
849  {
850  if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
851  {
852  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
853  n_rx +=
854  memif_device_input_zc_inline (vm, node, mif, qid, mode_ip);
855  else
856  n_rx +=
857  memif_device_input_zc_inline (vm, node, mif, qid, mode_eth);
858  }
859  else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
860  {
861  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
862  n_rx += memif_device_input_inline (
863  vm, node, mif, MEMIF_RING_M2S, qid, mode_ip);
864  else
865  n_rx += memif_device_input_inline (
866  vm, node, mif, MEMIF_RING_M2S, qid, mode_eth);
867  }
868  else
869  {
870  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
871  n_rx += memif_device_input_inline (
872  vm, node, mif, MEMIF_RING_S2M, qid, mode_ip);
873  else
874  n_rx += memif_device_input_inline (
875  vm, node, mif, MEMIF_RING_S2M, qid, mode_eth);
876  }
877  }
878  }
879 
880  return n_rx;
881 }
882 
883 /* *INDENT-OFF* */
885  .name = "memif-input",
887  .sibling_of = "device-input",
888  .format_trace = format_memif_input_trace,
889  .type = VLIB_NODE_TYPE_INPUT,
890  .state = VLIB_NODE_STATE_INTERRUPT,
891  .n_errors = MEMIF_INPUT_N_ERROR,
892  .error_counters = memif_input_error_counters,
893 };
894 
895 /* *INDENT-ON* */
896 
897 
898 /*
899  * fd.io coding-style-patch-verification: ON
900  *
901  * Local Variables:
902  * eval: (c-set-style "gnu")
903  * End:
904  */
vec_reset_length
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
Definition: vec_bootstrap.h:194
memif_input_trace_t::ring
u16 ring
Definition: node.c:56
vlib.h
vlib_get_frame
static vlib_frame_t * vlib_get_frame(vlib_main_t *vm, vlib_frame_t *f)
Definition: node_funcs.h:273
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
vlib_buffer_free
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:979
vlib_buffer_t::buffer_pool_index
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:142
MEMIF_RING_S2M
@ MEMIF_RING_S2M
Definition: memif.h:51
format_memif_input_trace
static __clib_unused u8 * format_memif_input_trace(u8 *s, va_list *args)
Definition: node.c:60
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
memif_add_to_chain
static_always_inline void memif_add_to_chain(vlib_main_t *vm, vlib_buffer_t *b, u32 *buffers, u32 buffer_size)
Definition: node.c:119
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
n_buffers
u32 n_buffers
Definition: interface_output.c:421
vlib_buffer_alloc_to_ring_from_pool
static __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers into ring from specific buffer pool.
Definition: buffer_funcs.h:747
memif_packet_op_t::packet_len
u32 packet_len
Definition: private.h:214
next_index
nat44_ei_hairpin_src_next_t next_index
Definition: nat44_ei_hairpinning.c:412
vnet_hw_if_rxq_poll_vector_t::queue_id
u32 queue_id
Definition: interface.h:749
memif_ring_t::desc
memif_desc_t desc[]
Definition: memif.h:175
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
f
vlib_frame_t * f
Definition: interface_output.c:1098
memif_if_t::per_interface_next_index
u32 per_interface_next_index
Definition: private.h:174
memif_region_t::region_size
memif_region_size_t region_size
Definition: private.h:112
memif_copy_op_t::data_len
u32 data_len
Definition: private.h:222
memif_if_t::regions
memif_region_t * regions
Definition: private.h:182
next
u16 * next
Definition: nat44_ei_out2in.c:718
memif_if_t::sw_if_index
u32 sw_if_index
Definition: private.h:170
VNET_DEVICE_INPUT_NEXT_DROP
@ VNET_DEVICE_INPUT_NEXT_DROP
Definition: devices.h:29
memif.h
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
memif_queue_t::last_tail
u16 last_tail
Definition: private.h:133
foreach_memif_input_error
#define foreach_memif_input_error
Definition: node.c:33
MEMIF_INTERFACE_MODE_IP
@ MEMIF_INTERFACE_MODE_IP
Definition: memif.h:58
VLIB_NODE_TYPE_INPUT
@ VLIB_NODE_TYPE_INPUT
Definition: node.h:76
memif_packet_op_t
Definition: private.h:212
vlib_validate_buffer_enqueue_x4
#define vlib_validate_buffer_enqueue_x4(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, bi2, bi3, next0, next1, next2, next3)
Finish enqueueing four buffers forward in the graph.
Definition: buffer_node.h:140
memif_main_t::per_thread_data
memif_per_thread_data_t * per_thread_data
Definition: private.h:257
memif_input_node
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:884
u16
unsigned short u16
Definition: types.h:57
mode
vl_api_tunnel_mode_t mode
Definition: gre.api:48
memif_ring_t::tail
volatile uint16_t tail
Definition: memif.h:173
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
VLIB_RX
@ VLIB_RX
Definition: defs.h:46
vlib_error_desc_t
Definition: error.h:54
memif_per_thread_data_t::packet_ops
memif_packet_op_t packet_ops[MEMIF_RX_VECTOR_SZ]
Definition: private.h:234
memif_if_t::rx_queues
memif_queue_t * rx_queues
Definition: private.h:184
vlib_get_trace_count
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:212
memif_packet_op_t::first_buffer_vec_index
u16 first_buffer_vec_index
Definition: private.h:215
vlib_buffer_enqueue_to_next
vlib_buffer_enqueue_to_next(vm, node, from,(u16 *) nexts, frame->n_vectors)
memif_main_t
Definition: private.h:242
VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
@ VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT
Definition: devices.h:28
memif_queue_t::ring
memif_ring_t * ring
Definition: private.h:127
vlib_error_count
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
vlib_frame_t
Definition: node.h:372
memif_trace_buffer
static_always_inline void memif_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, vlib_buffer_t *b, u32 next, u16 qid, uword *n_tracep)
Definition: node.c:90
memif_next_from_ip_hdr
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: node.c:75
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
ethernet.h
memif_desc_t::region
memif_region_index_t region
Definition: memif.h:153
memif_queue_t::log2_ring_size
memif_log2_ring_size_t log2_ring_size
Definition: private.h:128
i32
signed int i32
Definition: types.h:77
vlib_frame_no_append
static void vlib_frame_no_append(vlib_frame_t *f)
Definition: node_funcs.h:281
memif_if_t::flags
u32 flags
Definition: private.h:167
i16
signed short i16
Definition: types.h:46
vlib_buffer_t::current_data
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
memif_if_t::run
struct memif_if_t::@729 run
memif_main_t::interfaces
memif_if_t * interfaces
Definition: private.h:250
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
vlib_buffer_t::error
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:145
len
u8 len
Definition: ip_types.api:103
slot
u8 slot
Definition: pci_types.api:22
VLIB_NODE_FN
#define VLIB_NODE_FN(node)
Definition: node.h:202
feature.h
memif_if_t::mode
memif_interface_mode_t mode
Definition: private.h:172
CLIB_UNUSED
#define CLIB_UNUSED(x)
Definition: clib.h:90
memif_per_thread_data_t
Definition: private.h:229
vnet_buffer
#define vnet_buffer(b)
Definition: buffer.h:441
memif_ring_type_t
memif_ring_type_t
Definition: memif.h:49
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
vnet_get_main
vnet_main_t * vnet_get_main(void)
Definition: pnat_test_stubs.h:56
memif_add_copy_op
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
Definition: node.c:107
VNET_DEVICE_INPUT_NEXT_IP6_INPUT
@ VNET_DEVICE_INPUT_NEXT_IP6_INPUT
Definition: devices.h:26
offset
struct clib_bihash_value offset
template key/value backing page structure
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
memif_input_trace_t::next_index
u32 next_index
Definition: node.c:54
rx_queue_funcs.h
vec_validate_aligned
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:534
static_always_inline
#define static_always_inline
Definition: clib.h:112
uword
u64 uword
Definition: types.h:112
if
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
MEMIF_DESC_FLAG_NEXT
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:152
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
vlib_buffer_copy_template
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:171
VLIB_NODE_FLAG_TRACE_SUPPORTED
#define VLIB_NODE_FLAG_TRACE_SUPPORTED
Definition: node.h:295
mask
vl_api_pnat_mask_t mask
Definition: pnat.api:45
memif_input_error_counters
static vlib_error_desc_t memif_input_error_counters[]
Definition: node.c:46
memif_region_t::shm
void * shm
Definition: private.h:111
vlib_buffer_t::ref_count
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:139
memif_per_thread_data_t::buffer_template
vlib_buffer_t buffer_template
Definition: private.h:239
VNET_INTERFACE_COUNTER_RX
@ VNET_INTERFACE_COUNTER_RX
Definition: interface.h:915
memif_region_index_t
uint16_t memif_region_index_t
Definition: memif.h:62
clib_min
#define clib_min(x, y)
Definition: clib.h:342
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
vlib_set_trace_count
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:226
memif_desc_t::flags
uint16_t flags
Definition: memif.h:151
memif_per_thread_data_t::copy_ops
memif_copy_op_t * copy_ops
Definition: private.h:235
vlib_node_registration_t
struct _vlib_node_registration vlib_node_registration_t
vnet_interface_main_t::combined_sw_if_counters
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:1024
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
vlib_frame_scalar_args
static void * vlib_frame_scalar_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:315
MEMIF_RING_M2S
@ MEMIF_RING_M2S
Definition: memif.h:52
memif_device_input_zc_inline
static_always_inline uword memif_device_input_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, u16 qid, memif_interface_mode_t mode)
Definition: node.c:521
data
u8 data[128]
Definition: ipsec_types.api:95
memif_queue_t
Definition: private.h:123
vec_add2_aligned
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:656
vlib_buffer_t::current_config_index
u32 current_config_index
Used by feature subgraph arcs to visit enabled feature nodes.
Definition: buffer.h:156
VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT
@ VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT
Definition: devices.h:24
ethernet_input_frame_t::sw_if_index
u32 sw_if_index
Definition: ethernet.h:58
memif_desc_t
Definition: memif.h:149
vnet_main_t
Definition: vnet.h:76
vlib_validate_buffer_enqueue_x1
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:224
clib_bihash_value
template key/value backing page structure
Definition: bihash_doc.h:44
buffer_length
size_t buffer_length
Definition: cJSON.h:149
memif_copy_op_t::buffer_vec_index
u16 buffer_vec_index
Definition: private.h:224
memif_if_t::buffer_size
u16 buffer_size
Definition: private.h:196
u64
unsigned long u64
Definition: types.h:89
format
description fragment has unexpected format
Definition: map.api:433
format_get_indent
static u32 format_get_indent(u8 *s)
Definition: format.h:72
memif_device_input_inline
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
Definition: node.c:144
memif_queue_t::buffers
u32 * buffers
Definition: private.h:134
vlib_put_next_frame
vlib_put_next_frame(vm, node, next_index, 0)
vlib_frame_t::flags
u16 flags
Definition: node.h:378
vlib_buffer_get_default_data_size
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
u32
unsigned int u32
Definition: types.h:88
clib_prefetch_load
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:92
memif_interface_mode_t
memif_interface_mode_t
Definition: memif.h:55
n_left
u32 n_left
Definition: interface_output.c:1096
private.h
vnet_hw_if_get_rxq_poll_vector
static_always_inline vnet_hw_if_rxq_poll_vector_t * vnet_hw_if_get_rxq_poll_vector(vlib_main_t *vm, vlib_node_runtime_t *node)
Definition: rx_queue_funcs.h:70
memif_copy_op_t
Definition: private.h:218
MEMIF_RX_VECTOR_SZ
#define MEMIF_RX_VECTOR_SZ
Definition: private.h:227
ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
#define ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX
Definition: ethernet.h:51
vlib_get_new_next_frame
#define vlib_get_new_next_frame(vm, node, next_index, vectors, n_vectors_left)
Definition: node_funcs.h:400
memif_main
memif_main_t memif_main
Definition: memif.c:43
ethernet_input_frame_t::hw_if_index
u32 hw_if_index
Definition: ethernet.h:59
memif_if_t::hw_if_index
u32 hw_if_index
Definition: private.h:169
memif_copy_op_t::data
void * data
Definition: private.h:221
memif_input_trace_t
Definition: node.c:52
clib_memset
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
vlib_main_t
Definition: main.h:102
memif_queue_t::buffer_pool_index
u8 buffer_pool_index
Definition: private.h:135
vlib_node_t
Definition: node.h:247
vlib_add_trace
void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace.c:628
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
memif_desc_t::length
uint32_t length
Definition: memif.h:154
unix.h
vlib_buffer_get_current
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:257
MEMIF_INTERFACE_MODE_ETHERNET
@ MEMIF_INTERFACE_MODE_ETHERNET
Definition: memif.h:57
memif_if_t
Definition: private.h:163
vlib_buffer_t::data
u8 data[]
Packet data.
Definition: buffer.h:204
vnet_feature_start_device_input_x1
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:343
ethernet_input_frame_t
Definition: ethernet.h:56
i
int i
Definition: flowhash_template.h:376
memif_input_error_t
memif_input_error_t
Definition: node.c:38
memif_per_thread_data_t::buffers
u32 * buffers
Definition: private.h:236
nexts
u16 nexts[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:718
memif_input_trace_t::hw_if_index
u32 hw_if_index
Definition: node.c:55
vlib_node_runtime_t
Definition: node.h:454
vlib_next_frame_t
Definition: node.h:393
vlib_trace_buffer
static __clib_warn_unused_result int vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:153
memif_desc_t::offset
memif_region_offset_t offset
Definition: memif.h:155
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
vlib_buffer_t::total_length_not_including_first_buffer
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:176
memif_copy_op_t::buffer_offset
i16 buffer_offset
Definition: private.h:223
vlib_get_next_frame
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:395
vlib_buffer_alloc_from_pool
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:597
VLIB_TX
@ VLIB_TX
Definition: defs.h:47
memif_ring_t
Definition: memif.h:165
memif_queue_t::last_head
u16 last_head
Definition: private.h:132
vnet_hw_if_rxq_poll_vector_t::dev_instance
u32 dev_instance
Definition: interface.h:748
memif_ring_t::head
volatile uint16_t head
Definition: memif.h:171
type
vl_api_fib_path_type_t type
Definition: fib_types.api:123
vlib_node_runtime_get_next_frame
static vlib_next_frame_t * vlib_node_runtime_get_next_frame(vlib_main_t *vm, vlib_node_runtime_t *n, u32 next_index)
Definition: node_funcs.h:321
vnet_main_t::interface_main
vnet_interface_main_t interface_main
Definition: vnet.h:81
vlib_increment_combined_counter
vlib_increment_combined_counter(ccm, ti, sw_if_index, n_buffers, n_bytes)
format_white_space
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
vlib_next_frame_t::frame
vlib_frame_t * frame
Definition: node.h:396
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
vnet_hw_if_rxq_poll_vector_t
Definition: interface.h:746
VLIB_REGISTER_NODE
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:169
MEMIF_INPUT_N_ERROR
@ MEMIF_INPUT_N_ERROR
Definition: node.c:43