FD.io VPP  v17.10-9-gd594711
Vector Packet Processing
node.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 #include <vnet/devices/devices.h>
28 #include <vnet/feature/feature.h>
29 
30 #include <memif/memif.h>
31 #include <memif/private.h>
32 
33 #define foreach_memif_input_error \
34  _(NOT_IP, "not ip packet")
35 
36 typedef enum
37 {
38 #define _(f,s) MEMIF_INPUT_ERROR_##f,
40 #undef _
43 
44 static char *memif_input_error_strings[] = {
45 #define _(n,s) s,
47 #undef _
48 };
49 
50 typedef struct
51 {
56 
57 static u8 *
58 format_memif_input_trace (u8 * s, va_list * args)
59 {
60  CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
61  CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
62  memif_input_trace_t *t = va_arg (*args, memif_input_trace_t *);
63  uword indent = format_get_indent (s);
64 
65  s = format (s, "memif: hw_if_index %d next-index %d",
66  t->hw_if_index, t->next_index);
67  s = format (s, "\n%Uslot: ring %u", format_white_space, indent + 2,
68  t->ring);
69  return s;
70 }
71 
74 {
75  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
76  vlib_prefetch_buffer_header (b, STORE);
78 }
79 
82  u32 prev_bi)
83 {
84  vlib_buffer_t *b = vlib_get_buffer (vm, bi);
85  vlib_buffer_t *first_b = vlib_get_buffer (vm, first_bi);
86  vlib_buffer_t *prev_b = vlib_get_buffer (vm, prev_bi);
87 
88  /* update first buffer */
90 
91  /* update previous buffer */
92  prev_b->next_buffer = bi;
94 
95  /* update current buffer */
96  b->next_buffer = 0;
97 }
98 
99 /**
100  * @brief Copy buffer from rx ring
101  *
102  * @param * vm (in)
103  * @param * mif (in) pointer to memif interface
104  * @param * ring (in) pointer to memif ring
105  * @param * rd (in) pointer to ring data
106  * @param ring_size (in) ring size
107  * @param * n_free_bufs (in/out) the number of free vlib buffers available
108  * @param ** first_b (out) the first vlib buffer pointer
109  * @param * first_bi (out) the first vlib buffer index
110  * @param * bi (in/out) the current buffer index
111  * #param * num_slots (in/out) the number of descriptors available to read
112  *
113  * @return total bytes read from rx ring also written to vlib buffers
114  */
117  memif_ring_t * ring, memif_queue_t * mq,
118  u16 ring_size, u32 n_buffer_bytes,
119  u32 * n_free_bufs, vlib_buffer_t ** first_b,
120  u32 * first_bi, u32 * bi, u16 * num_slots)
121 {
122  memif_main_t *nm = &memif_main;
123  u32 thread_index = vlib_get_thread_index ();
124  u32 total_bytes = 0, offset = 0;
125  u32 data_len;
126  u32 bytes_to_copy;
127  void *mb;
128  vlib_buffer_t *b;
129  u16 mask = ring_size - 1;
130  u32 prev_bi;
131  u16 last_head;
132 
133  while (*num_slots)
134  {
135  data_len = ring->desc[mq->last_head].length;
136  while (data_len && (*n_free_bufs))
137  {
138  /* get empty buffer */
139  u32 last_buf = vec_len (nm->rx_buffers[thread_index]) - 1;
140  prev_bi = *bi;
141  *bi = nm->rx_buffers[thread_index][last_buf];
142  b = vlib_get_buffer (vm, *bi);
143  _vec_len (nm->rx_buffers[thread_index]) = last_buf;
144  (*n_free_bufs)--;
145  if (PREDICT_FALSE (*n_free_bufs == 0))
146  {
147  *n_free_bufs +=
148  vlib_buffer_alloc (vm,
149  &nm->rx_buffers[thread_index]
150  [*n_free_bufs], ring_size);
151  _vec_len (nm->rx_buffers[thread_index]) = *n_free_bufs;
152  }
153 
154  if (last_buf > 4)
155  {
156  memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 2]);
157  memif_prefetch (vm, nm->rx_buffers[thread_index][last_buf - 3]);
158  }
159 
160  /* copy buffer */
161  bytes_to_copy =
162  data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
163  b->current_data = 0;
164  mb = memif_get_buffer (mif, ring, mq->last_head);
167  if (bytes_to_copy > CLIB_CACHE_LINE_BYTES)
170  bytes_to_copy - CLIB_CACHE_LINE_BYTES);
171 
172  /* fill buffer header */
173  b->current_length = bytes_to_copy;
174 
175  if (total_bytes == 0)
176  {
177  /* fill buffer metadata */
180  vnet_buffer (b)->sw_if_index[VLIB_RX] = mif->sw_if_index;
181  vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
182  *first_bi = *bi;
183  *first_b = vlib_get_buffer (vm, *first_bi);
184  }
185  else
186  memif_buffer_add_to_chain (vm, *bi, *first_bi, prev_bi);
187 
188  offset += bytes_to_copy;
189  total_bytes += bytes_to_copy;
190  data_len -= bytes_to_copy;
191  }
192  last_head = mq->last_head;
193  /* Advance to next descriptor */
194  mq->last_head = (mq->last_head + 1) & mask;
195  offset = 0;
196  (*num_slots)--;
197  if ((ring->desc[last_head].flags & MEMIF_DESC_FLAG_NEXT) == 0)
198  break;
199  }
200 
201  return (total_bytes);
202 }
203 
204 
207 {
208  u8 *ptr = vlib_buffer_get_current (b);
209  u8 v = *ptr & 0xf0;
210 
211  if (PREDICT_TRUE (v == 0x40))
213  else if (PREDICT_TRUE (v == 0x60))
215 
216  b->error = node->errors[MEMIF_INPUT_ERROR_NOT_IP];
218 }
219 
222  vlib_frame_t * frame, memif_if_t * mif,
223  memif_ring_type_t type, u16 qid,
225 {
226  vnet_main_t *vnm = vnet_get_main ();
227  memif_ring_t *ring;
228  memif_queue_t *mq;
229  u16 head;
230  u32 next_index;
231  uword n_trace = vlib_get_trace_count (vm, node);
232  memif_main_t *nm = &memif_main;
233  u32 n_rx_packets = 0;
234  u32 n_rx_bytes = 0;
235  u32 *to_next = 0;
236  u32 n_free_bufs;
237  u32 b0_total, b1_total;
238  u32 thread_index = vlib_get_thread_index ();
239  u16 ring_size, mask, num_slots;
240  u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm,
242 
243  mq = vec_elt_at_index (mif->rx_queues, qid);
244  ring = mq->ring;
245  ring_size = 1 << mq->log2_ring_size;
246  mask = ring_size - 1;
247 
248  if (mode == MEMIF_INTERFACE_MODE_IP)
249  {
251  }
252  else
253  {
255  }
256 
257  n_free_bufs = vec_len (nm->rx_buffers[thread_index]);
258  if (PREDICT_FALSE (n_free_bufs < ring_size))
259  {
260  vec_validate (nm->rx_buffers[thread_index],
261  ring_size + n_free_bufs - 1);
262  n_free_bufs +=
263  vlib_buffer_alloc (vm, &nm->rx_buffers[thread_index][n_free_bufs],
264  ring_size);
265  _vec_len (nm->rx_buffers[thread_index]) = n_free_bufs;
266  }
267 
268  head = ring->head;
269  if (head == mq->last_head)
270  return 0;
271 
272  if (head > mq->last_head)
273  num_slots = head - mq->last_head;
274  else
275  num_slots = ring_size - mq->last_head + head;
276 
277  while (num_slots)
278  {
279  u32 n_left_to_next;
280  u32 next0 = next_index;
281  u32 next1 = next_index;
282  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
283 
284  while (num_slots > 11 && n_left_to_next > 2)
285  {
286  if (PREDICT_TRUE (mq->last_head + 5 < ring_size))
287  {
288  CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 2),
289  CLIB_CACHE_LINE_BYTES, LOAD);
290  CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 3),
291  CLIB_CACHE_LINE_BYTES, LOAD);
292  CLIB_PREFETCH (&ring->desc[mq->last_head + 4],
293  CLIB_CACHE_LINE_BYTES, LOAD);
294  CLIB_PREFETCH (&ring->desc[mq->last_head + 5],
295  CLIB_CACHE_LINE_BYTES, LOAD);
296  }
297  else
298  {
300  (mif, ring, (mq->last_head + 2) % mask),
301  CLIB_CACHE_LINE_BYTES, LOAD);
303  (mif, ring, (mq->last_head + 3) % mask),
304  CLIB_CACHE_LINE_BYTES, LOAD);
305  CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) % mask],
306  CLIB_CACHE_LINE_BYTES, LOAD);
307  CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) % mask],
308  CLIB_CACHE_LINE_BYTES, LOAD);
309  }
310 
311  vlib_buffer_t *first_b0 = 0;
312  u32 bi0 = 0, first_bi0 = 0;
313  b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
314  ring_size,
315  n_buffer_bytes,
316  &n_free_bufs, &first_b0,
317  &first_bi0, &bi0,
318  &num_slots);
319 
320  vlib_buffer_t *first_b1 = 0;
321  u32 bi1 = 0, first_bi1 = 0;
322  b1_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
323  ring_size,
324  n_buffer_bytes,
325  &n_free_bufs, &first_b1,
326  &first_bi1, &bi1,
327  &num_slots);
328 
329  /* enqueue buffer */
330  to_next[0] = first_bi0;
331  to_next[1] = first_bi1;
332  to_next += 2;
333  n_left_to_next -= 2;
334 
335 
336  if (mode == MEMIF_INTERFACE_MODE_IP)
337  {
338  next0 = memif_next_from_ip_hdr (node, first_b0);
339  next1 = memif_next_from_ip_hdr (node, first_b1);
340  }
341  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
342  {
343  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
344  next0 = next1 = mif->per_interface_next_index;
345  else
346  /* redirect if feature path
347  * enabled */
349  &next0, &next1,
350  first_b0, first_b1);
351  }
352 
353  /* trace */
356 
357  if (PREDICT_FALSE (n_trace > 0))
358  {
359  /* b0 */
360  if (PREDICT_TRUE (first_b0 != 0))
361  {
363  vlib_trace_buffer (vm, node, next0, first_b0,
364  /* follow_chain */ 0);
365  vlib_set_trace_count (vm, node, --n_trace);
366  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
367  tr->next_index = next0;
368  tr->hw_if_index = mif->hw_if_index;
369  tr->ring = qid;
370  }
371  if (n_trace)
372  {
373  /* b1 */
374  if (PREDICT_TRUE (first_b1 != 0))
375  {
377  vlib_trace_buffer (vm, node, next1, first_b1,
378  /* follow_chain */ 0);
379  vlib_set_trace_count (vm, node, --n_trace);
380  tr = vlib_add_trace (vm, node, first_b1, sizeof (*tr));
381  tr->next_index = next1;
382  tr->hw_if_index = mif->hw_if_index;
383  tr->ring = qid;
384  }
385  }
386  }
387 
388  /* enqueue */
389  vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
390  n_left_to_next, first_bi0,
391  first_bi1, next0, next1);
392 
393  /* next packet */
394  n_rx_packets += 2;
395  n_rx_bytes += b0_total + b1_total;
396  }
397  while (num_slots && n_left_to_next)
398  {
399  vlib_buffer_t *first_b0 = 0;
400  u32 bi0 = 0, first_bi0 = 0;
401  b0_total = memif_copy_buffer_from_rx_ring (vm, mif, ring, mq,
402  ring_size,
403  n_buffer_bytes,
404  &n_free_bufs, &first_b0,
405  &first_bi0, &bi0,
406  &num_slots);
407 
408  if (mode == MEMIF_INTERFACE_MODE_IP)
409  {
410  next0 = memif_next_from_ip_hdr (node, first_b0);
411  }
412  else if (mode == MEMIF_INTERFACE_MODE_ETHERNET)
413  {
414  if (PREDICT_FALSE (mif->per_interface_next_index != ~0))
415  next0 = mif->per_interface_next_index;
416  else
417  /* redirect if feature path
418  * enabled */
420  &next0, first_b0);
421  }
422 
423  /* trace */
425 
426  if (PREDICT_FALSE (n_trace > 0))
427  {
428  if (PREDICT_TRUE (first_b0 != 0))
429  {
431  vlib_trace_buffer (vm, node, next0, first_b0,
432  /* follow_chain */ 0);
433  vlib_set_trace_count (vm, node, --n_trace);
434  tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
435  tr->next_index = next0;
436  tr->hw_if_index = mif->hw_if_index;
437  tr->ring = qid;
438  }
439  }
440 
441  /* enqueue buffer */
442  to_next[0] = first_bi0;
443  to_next += 1;
444  n_left_to_next--;
445 
446  /* enqueue */
447  vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
448  n_left_to_next, first_bi0, next0);
449 
450  /* next packet */
451  n_rx_packets++;
452  n_rx_bytes += b0_total;
453  }
454  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
455  }
457  ring->tail = head;
458 
460  + VNET_INTERFACE_COUNTER_RX, thread_index,
461  mif->hw_if_index, n_rx_packets,
462  n_rx_bytes);
463 
464  return n_rx_packets;
465 }
466 
467 static uword
469  vlib_frame_t * frame)
470 {
471  u32 n_rx = 0;
472  memif_main_t *nm = &memif_main;
473  vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
475 
477  {
478  memif_if_t *mif;
479  mif = vec_elt_at_index (nm->interfaces, dq->dev_instance);
480  if ((mif->flags & MEMIF_IF_FLAG_ADMIN_UP) &&
481  (mif->flags & MEMIF_IF_FLAG_CONNECTED))
482  {
483  if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
484  {
485  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
486  n_rx += memif_device_input_inline (vm, node, frame, mif,
487  MEMIF_RING_M2S, dq->queue_id,
489  else
490  n_rx += memif_device_input_inline (vm, node, frame, mif,
491  MEMIF_RING_M2S, dq->queue_id,
493  }
494  else
495  {
496  if (mif->mode == MEMIF_INTERFACE_MODE_IP)
497  n_rx += memif_device_input_inline (vm, node, frame, mif,
498  MEMIF_RING_S2M, dq->queue_id,
500  else
501  n_rx += memif_device_input_inline (vm, node, frame, mif,
502  MEMIF_RING_S2M, dq->queue_id,
504  }
505  }
506  }
507 
508  return n_rx;
509 }
510 
511 /* *INDENT-OFF* */
513  .function = memif_input_fn,
514  .name = "memif-input",
515  .sibling_of = "device-input",
516  .format_trace = format_memif_input_trace,
517  .type = VLIB_NODE_TYPE_INPUT,
518  .state = VLIB_NODE_STATE_INTERRUPT,
519  .n_errors = MEMIF_INPUT_N_ERROR,
520  .error_strings = memif_input_error_strings,
521 };
522 
524 /* *INDENT-ON* */
525 
526 
527 /*
528  * fd.io coding-style-patch-verification: ON
529  *
530  * Local Variables:
531  * eval: (c-set-style "gnu")
532  * End:
533  */
memif_if_t * interfaces
Definition: private.h:188
#define vec_validate(V, I)
Make sure vector is long enough for given index (no header, unspecified alignment) ...
Definition: vec.h:432
vnet_device_and_queue_t * devices_and_queues
Definition: devices.h:69
#define CLIB_UNUSED(x)
Definition: clib.h:79
static_always_inline uword memif_device_input_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, u16 qid, memif_interface_mode_t mode)
Definition: node.c:221
static u32 vlib_get_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt)
Definition: trace_funcs.h:143
static void vlib_increment_combined_counter(vlib_combined_counter_main_t *cm, u32 thread_index, u32 index, u64 n_packets, u64 n_bytes)
Increment a combined counter.
Definition: counter.h:211
u8 runtime_data[0]
Function dependent node-runtime data.
Definition: node.h:464
static u8 * format_memif_input_trace(u8 *s, va_list *args)
Definition: node.c:58
vnet_main_t * vnet_get_main(void)
Definition: misc.c:46
vnet_interface_main_t interface_main
Definition: vnet.h:56
#define PREDICT_TRUE(x)
Definition: clib.h:98
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:104
memif_interface_mode_t
Definition: memif.h:53
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
vlib_error_t * errors
Vector of errors for this node.
Definition: node.h:415
uint32_t length
Definition: memif.h:152
static void vlib_trace_buffer(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, vlib_buffer_t *b, int follow_chain)
Definition: trace_funcs.h:104
static_always_inline uword memif_copy_buffer_from_rx_ring(vlib_main_t *vm, memif_if_t *mif, memif_ring_t *ring, memif_queue_t *mq, u16 ring_size, u32 n_buffer_bytes, u32 *n_free_bufs, vlib_buffer_t **first_b, u32 *first_bi, u32 *bi, u16 *num_slots)
Copy buffer from rx ring.
Definition: node.c:116
#define VLIB_BUFFER_NEXT_PRESENT
Definition: buffer.h:95
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:68
#define static_always_inline
Definition: clib.h:85
static_always_inline void memif_buffer_add_to_chain(vlib_main_t *vm, u32 bi, u32 first_bi, u32 prev_bi)
Definition: node.c:81
static_always_inline u32 memif_next_from_ip_hdr(vlib_node_runtime_t *node, vlib_buffer_t *b)
Definition: node.c:206
static uword format_get_indent(u8 *s)
Definition: format.h:72
vlib_combined_counter_main_t * combined_sw_if_counters
Definition: interface.h:668
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
#define VLIB_BUFFER_TOTAL_LENGTH_VALID
Definition: buffer.h:97
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:169
uint16_t flags
Definition: memif.h:148
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
u32 per_interface_next_index
Definition: private.h:138
u16 last_head
Definition: private.h:104
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:512
memif_desc_t desc[0]
Definition: memif.h:174
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:72
uword dev_instance
Definition: private.h:135
#define v
Definition: acl.c:323
static void * vlib_buffer_get_current(vlib_buffer_t *b)
Get pointer to current data to process.
Definition: buffer.h:193
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX
Definition: buffer.h:422
#define PREDICT_FALSE(x)
Definition: clib.h:97
static u32 vlib_buffer_free_list_buffer_size(vlib_main_t *vm, u32 free_list_index)
Definition: buffer_funcs.h:426
static uword memif_input_fn(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: node.c:468
#define foreach_memif_input_error
Definition: node.c:33
#define vlib_validate_buffer_enqueue_x2(vm, node, next_index, to_next, n_left_to_next, bi0, bi1, next0, next1)
Finish enqueueing two buffers forward in the graph.
Definition: buffer_node.h:70
#define vlib_validate_buffer_enqueue_x1(vm, node, next_index, to_next, n_left_to_next, bi0, next0)
Finish enqueueing one buffer forward in the graph.
Definition: buffer_node.h:216
#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left)
Get pointer to next frame vector data by (vlib_node_runtime_t, next_index).
Definition: node_funcs.h:364
vlib_error_t error
Error code for buffers to be enqueued to error handler.
Definition: buffer.h:113
static_always_inline void vnet_feature_start_device_input_x2(u32 sw_if_index, u32 *next0, u32 *next1, vlib_buffer_t *b0, vlib_buffer_t *b1)
Definition: feature.h:259
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:82
vlib_main_t * vm
Definition: buffer.c:283
#define clib_memcpy(a, b, c)
Definition: string.h:69
void vlib_put_next_frame(vlib_main_t *vm, vlib_node_runtime_t *r, u32 next_index, u32 n_vectors_left)
Release pointer to next frame vector data.
Definition: main.c:454
u32 ** rx_buffers
Definition: private.h:195
memif_input_error_t
Definition: node.c:36
unsigned int u32
Definition: types.h:88
static_always_inline void memif_prefetch(vlib_main_t *vm, u32 bi)
Definition: node.c:73
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:149
static char * memif_input_error_strings[]
Definition: node.c:44
static_always_inline void * memif_get_buffer(memif_if_t *mif, memif_ring_t *ring, u16 slot)
Definition: private.h:232
u32 flags
Definition: private.h:131
memif_ring_t * ring
Definition: private.h:99
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:109
u32 hw_if_index
Definition: private.h:133
u64 uword
Definition: types.h:112
static void * vlib_add_trace(vlib_main_t *vm, vlib_node_runtime_t *r, vlib_buffer_t *b, u32 n_data_bytes)
Definition: trace_funcs.h:55
u32 total_length_not_including_first_buffer
Only valid for first buffer in chain.
Definition: buffer.h:141
template key/value backing page structure
Definition: bihash_doc.h:44
#define foreach_device_and_queue(var, vec)
Definition: devices.h:155
Definition: defs.h:47
unsigned short u16
Definition: types.h:57
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
unsigned char u8
Definition: types.h:56
#define VLIB_BUFFER_TRACE_TRAJECTORY_INIT(b)
Definition: buffer.h:517
memif_log2_ring_size_t log2_ring_size
Definition: private.h:100
#define vnet_buffer(b)
Definition: buffer.h:306
static_always_inline void vnet_feature_start_device_input_x1(u32 sw_if_index, u32 *next0, vlib_buffer_t *b0)
Definition: feature.h:227
#define VLIB_REGISTER_NODE(x,...)
Definition: node.h:143
u8 data[0]
Packet data.
Definition: buffer.h:157
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:170
memif_queue_t * rx_queues
Definition: private.h:149
static void vlib_set_trace_count(vlib_main_t *vm, vlib_node_runtime_t *rt, u32 count)
Definition: trace_funcs.h:159
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:67
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:75
memif_main_t memif_main
Definition: memif.c:43
static u32 vlib_buffer_alloc(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Allocate buffers into supplied array.
Definition: buffer_funcs.h:254
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
u32 sw_if_index
Definition: private.h:134
VLIB_NODE_FUNCTION_MULTIARCH(ethernet_input_not_l2_node, ethernet_input_not_l2)
Definition: node.c:1207
volatile uint16_t tail
Definition: memif.h:172
memif_interface_mode_t mode
Definition: private.h:136
Definition: defs.h:46