FD.io VPP  v18.07-rc0-415-g6c78436
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 
28 #include <memif/memif.h>
29 #include <memif/private.h>
30 
31 #define foreach_memif_tx_func_error \
32 _(NO_FREE_SLOTS, "no free tx slots") \
33 _(ROLLBACK, "no enough space in tx buffers")
34 
35 typedef enum
36 {
37 #define _(f,s) MEMIF_TX_ERROR_##f,
39 #undef _
42 
43 static __clib_unused char *memif_tx_func_error_strings[] = {
44 #define _(n,s) s,
46 #undef _
47 };
48 
49 #ifndef CLIB_MARCH_VARIANT
50 u8 *
51 format_memif_device_name (u8 * s, va_list * args)
52 {
53  u32 dev_instance = va_arg (*args, u32);
54  memif_main_t *mm = &memif_main;
55  memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
57 
59  s = format (s, "memif%lu/%lu", msf->socket_id, mif->id);
60  return s;
61 }
62 #endif
63 
64 static __clib_unused u8 *
65 format_memif_device (u8 * s, va_list * args)
66 {
67  u32 dev_instance = va_arg (*args, u32);
68  int verbose = va_arg (*args, int);
69  u32 indent = format_get_indent (s);
70 
71  s = format (s, "MEMIF interface");
72  if (verbose)
73  {
74  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
75  dev_instance);
76  }
77  return s;
78 }
79 
80 static __clib_unused u8 *
81 format_memif_tx_trace (u8 * s, va_list * args)
82 {
83  s = format (s, "Unimplemented...");
84  return s;
85 }
86 
89  u16 buffer_offset, u16 buffer_vec_index)
90 {
91  memif_copy_op_t *co;
93  co->data = data;
94  co->data_len = len;
95  co->buffer_offset = buffer_offset;
96  co->buffer_vec_index = buffer_vec_index;
97 }
98 
101  vlib_frame_t * frame, memif_if_t * mif,
102  memif_ring_type_t type, memif_queue_t * mq,
104 {
105  memif_ring_t *ring;
106  u32 *buffers = vlib_frame_args (frame);
107  u32 n_left = frame->n_vectors;
108  u32 n_copy_op;
109  u16 ring_size, mask, slot, free_slots;
110  int n_retries = 5;
111  vlib_buffer_t *b0, *b1, *b2, *b3;
112  memif_copy_op_t *co;
113  memif_region_index_t last_region = ~0;
114  void *last_region_shm = 0;
115 
116  ring = mq->ring;
117  ring_size = 1 << mq->log2_ring_size;
118  mask = ring_size - 1;
119 
120 retry:
121 
122  free_slots = ring->tail - mq->last_tail;
123  mq->last_tail += free_slots;
124  slot = (type == MEMIF_RING_S2M) ? ring->head : ring->tail;
125 
126  if (type == MEMIF_RING_S2M)
127  free_slots = ring_size - ring->head + mq->last_tail;
128  else
129  free_slots = ring->head - ring->tail;
130 
131  while (n_left && free_slots)
132  {
133  memif_desc_t *d0;
134  void *mb0;
135  i32 src_off;
136  u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
137  u32 saved_ptd_copy_ops_len = _vec_len (ptd->copy_ops);
138  u32 saved_ptd_buffers_len = _vec_len (ptd->buffers);
139  u16 saved_slot = slot;
140 
141  CLIB_PREFETCH (&ring->desc[(slot + 8) & mask], CLIB_CACHE_LINE_BYTES,
142  LOAD);
143 
144  d0 = &ring->desc[slot & mask];
145  if (PREDICT_FALSE (last_region != d0->region))
146  {
147  last_region_shm = mif->regions[d0->region].shm;
148  last_region = d0->region;
149  }
150  mb0 = last_region_shm + d0->offset;
151 
152  dst_off = 0;
153 
154  /* slave is the producer, so it should be able to reset buffer length */
155  dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
156 
157  if (PREDICT_TRUE (n_left >= 4))
158  vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
159  bi0 = buffers[0];
160 
161  next_in_chain:
162 
163  b0 = vlib_get_buffer (vm, bi0);
164  src_off = b0->current_data;
165  src_left = b0->current_length;
166 
167  while (src_left)
168  {
169  if (PREDICT_FALSE (dst_left == 0))
170  {
171  if (free_slots)
172  {
173  slot++;
174  free_slots--;
176  d0 = &ring->desc[slot & mask];
177  dst_off = 0;
178  dst_left =
179  (type ==
180  MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
181 
182  if (PREDICT_FALSE (last_region != d0->region))
183  {
184  last_region_shm = mif->regions[d0->region].shm;
185  last_region = d0->region;
186  }
187  mb0 = last_region_shm + d0->offset;
188  }
189  else
190  {
191  /* we need to rollback vectors before bailing out */
192  _vec_len (ptd->buffers) = saved_ptd_buffers_len;
193  _vec_len (ptd->copy_ops) = saved_ptd_copy_ops_len;
194  vlib_error_count (vm, node->node_index,
195  MEMIF_TX_ERROR_ROLLBACK, 1);
196  slot = saved_slot;
197  goto no_free_slots;
198  }
199  }
200  bytes_to_copy = clib_min (src_left, dst_left);
201  memif_add_copy_op (ptd, mb0 + dst_off, bytes_to_copy, src_off,
202  vec_len (ptd->buffers));
204  src_off += bytes_to_copy;
205  dst_off += bytes_to_copy;
206  src_left -= bytes_to_copy;
207  dst_left -= bytes_to_copy;
208  }
209 
210  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
211  {
212  bi0 = b0->next_buffer;
213  goto next_in_chain;
214  }
215 
216  d0->length = dst_off;
217  d0->flags = 0;
218 
219  free_slots -= 1;
220  slot += 1;
221 
222  buffers++;
223  n_left--;
224  }
225 no_free_slots:
226 
227  /* copy data */
228  n_copy_op = vec_len (ptd->copy_ops);
229  co = ptd->copy_ops;
230  while (n_copy_op >= 8)
231  {
232  CLIB_PREFETCH (co[4].data, CLIB_CACHE_LINE_BYTES, LOAD);
233  CLIB_PREFETCH (co[5].data, CLIB_CACHE_LINE_BYTES, LOAD);
234  CLIB_PREFETCH (co[6].data, CLIB_CACHE_LINE_BYTES, LOAD);
235  CLIB_PREFETCH (co[7].data, CLIB_CACHE_LINE_BYTES, LOAD);
236 
237  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
238  b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
239  b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
240  b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
241 
242  clib_memcpy (co[0].data, b0->data + co[0].buffer_offset,
243  co[0].data_len);
244  clib_memcpy (co[1].data, b1->data + co[1].buffer_offset,
245  co[1].data_len);
246  clib_memcpy (co[2].data, b2->data + co[2].buffer_offset,
247  co[2].data_len);
248  clib_memcpy (co[3].data, b3->data + co[3].buffer_offset,
249  co[3].data_len);
250 
251  co += 4;
252  n_copy_op -= 4;
253  }
254  while (n_copy_op)
255  {
256  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
257  clib_memcpy (co[0].data, b0->data + co[0].buffer_offset,
258  co[0].data_len);
259  co += 1;
260  n_copy_op -= 1;
261  }
262 
263  vec_reset_length (ptd->copy_ops);
264  vec_reset_length (ptd->buffers);
265 
267  if (type == MEMIF_RING_S2M)
268  ring->head = slot;
269  else
270  ring->tail = slot;
271 
272  if (n_left && n_retries--)
273  goto retry;
274 
276 
277  if (n_left)
278  {
279  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
280  n_left);
281  }
282 
283  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
284  {
285  u64 b = 1;
286  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
287  mq->int_count++;
288  }
289 
290  vlib_buffer_free (vm, vlib_frame_args (frame), frame->n_vectors);
291 
292  return frame->n_vectors;
293 }
294 
297  vlib_frame_t * frame, memif_if_t * mif,
298  memif_queue_t * mq,
300 {
301  memif_ring_t *ring = mq->ring;
302  u32 *buffers = vlib_frame_args (frame);
303  u32 n_left = frame->n_vectors;
304  u16 slot, free_slots, n_free;
305  u16 ring_size = 1 << mq->log2_ring_size;
306  u16 mask = ring_size - 1;
307  int n_retries = 5;
308  vlib_buffer_t *b0;
309 
310 retry:
311  n_free = ring->tail - mq->last_tail;
312  if (n_free >= 16)
313  {
315  mq->last_tail & mask,
316  ring_size, n_free);
317  mq->last_tail += n_free;
318  }
319 
320  slot = ring->head;
321  free_slots = ring_size - ring->head + mq->last_tail;
322 
323  while (n_left && free_slots)
324  {
325  u16 s0;
326  u16 slots_in_packet = 1;
327  memif_desc_t *d0;
328  u32 bi0;
329 
330  CLIB_PREFETCH (&ring->desc[(slot + 8) & mask], CLIB_CACHE_LINE_BYTES,
331  STORE);
332 
333  if (PREDICT_TRUE (n_left >= 4))
334  vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
335 
336  bi0 = buffers[0];
337 
338  next_in_chain:
339  s0 = slot & mask;
340  d0 = &ring->desc[s0];
341  mq->buffers[s0] = bi0;
342  b0 = vlib_get_buffer (vm, bi0);
343 
344  d0->region = b0->buffer_pool_index + 1;
345  d0->offset = (void *) b0->data + b0->current_data -
346  mif->regions[d0->region].shm;
347  d0->length = b0->current_length;
348 
349  free_slots--;
350  slot++;
351 
352  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
353  {
354  if (PREDICT_FALSE (free_slots == 0))
355  {
356  /* revert to last fully processed packet */
357  free_slots += slots_in_packet;
358  slot -= slots_in_packet;
359  goto no_free_slots;
360  }
361 
363  bi0 = b0->next_buffer;
364 
365  /* next */
366  slots_in_packet++;
367  goto next_in_chain;
368  }
369 
370  d0->flags = 0;
371 
372  /* next from */
373  buffers++;
374  n_left--;
375  }
376 no_free_slots:
377 
379  ring->head = slot;
380 
381  if (n_left && n_retries--)
382  goto retry;
383 
385 
386  if (n_left)
387  {
388  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
389  n_left);
390  vlib_buffer_free (vm, buffers, n_left);
391  }
392 
393  if ((ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
394  {
395  u64 b = 1;
396  CLIB_UNUSED (int r) = write (mq->int_fd, &b, sizeof (b));
397  mq->int_count++;
398  }
399 
400  return frame->n_vectors;
401 }
402 
403 uword
405  vlib_node_runtime_t * node,
406  vlib_frame_t * frame)
407 {
408  memif_main_t *nm = &memif_main;
409  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
411  memif_queue_t *mq;
412  u32 thread_index = vlib_get_thread_index ();
414  thread_index);
415  u8 tx_queues = vec_len (mif->tx_queues);
416 
417  if (tx_queues < vec_len (vlib_mains))
418  {
419  ASSERT (tx_queues > 0);
420  mq = vec_elt_at_index (mif->tx_queues, thread_index % tx_queues);
421  clib_spinlock_lock_if_init (&mif->lockp);
422  }
423  else
424  mq = vec_elt_at_index (mif->tx_queues, thread_index);
425 
426  if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
427  return memif_interface_tx_zc_inline (vm, node, frame, mif, mq, ptd);
428  else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
429  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_S2M,
430  mq, ptd);
431  else
432  return memif_interface_tx_inline (vm, node, frame, mif, MEMIF_RING_M2S,
433  mq, ptd);
434 }
435 
436 static __clib_unused void
438  u32 node_index)
439 {
440  memif_main_t *apm = &memif_main;
441  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
443 
444  /* Shut off redirection */
445  if (node_index == ~0)
446  {
447  mif->per_interface_next_index = node_index;
448  return;
449  }
450 
452  vlib_node_add_next (vlib_get_main (), memif_input_node.index, node_index);
453 }
454 
455 static __clib_unused void
457 {
458  /* Nothing for now */
459 }
460 
461 static __clib_unused clib_error_t *
464 {
465  memif_main_t *mm = &memif_main;
466  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
468  memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
469 
472  else
474 
475  return 0;
476 }
477 
478 static __clib_unused clib_error_t *
480 {
481  memif_main_t *mm = &memif_main;
482  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
484  static clib_error_t *error = 0;
485 
487  mif->flags |= MEMIF_IF_FLAG_ADMIN_UP;
488  else
489  mif->flags &= ~MEMIF_IF_FLAG_ADMIN_UP;
490 
491  return error;
492 }
493 
494 static __clib_unused clib_error_t *
496  u32 hw_if_index,
497  struct vnet_sw_interface_t *st, int is_add)
498 {
499  /* Nothing for now */
500  return 0;
501 }
502 
503 #ifndef CLIB_MARCH_VARIANT
504 /* *INDENT-OFF* */
506  .name = "memif",
507  .tx_function = memif_interface_tx,
508  .format_device_name = format_memif_device_name,
509  .format_device = format_memif_device,
510  .format_tx_trace = format_memif_tx_trace,
511  .tx_function_n_errors = MEMIF_TX_N_ERROR,
512  .tx_function_error_strings = memif_tx_func_error_strings,
513  .rx_redirect_to_node = memif_set_interface_next_node,
514  .clear_counters = memif_clear_hw_interface_counters,
515  .admin_up_down_function = memif_interface_admin_up_down,
516  .subif_add_del_function = memif_subif_add_del_function,
517  .rx_mode_change_function = memif_interface_rx_mode_change,
518 };
519 
520 #if __x86_64__
523 static void __clib_constructor
525 {
526  if (memif_interface_tx_avx512 && clib_cpu_supports_avx512f ())
528  else if (memif_interface_tx_avx2 && clib_cpu_supports_avx2 ())
530 }
531 #endif
532 #endif
533 
534 /* *INDENT-ON* */
535 
536 /*
537  * fd.io coding-style-patch-verification: ON
538  *
539  * Local Variables:
540  * eval: (c-set-style "gnu")
541  * End:
542  */
memif_if_t * interfaces
Definition: private.h:207
static __clib_unused u8 * format_memif_device(u8 *s, va_list *args)
Definition: device.c:65
#define clib_min(x, y)
Definition: clib.h:289
#define CLIB_UNUSED(x)
Definition: clib.h:79
static __clib_unused void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:437
memif_tx_func_error_t
Definition: device.c:35
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:534
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_ring_type_t type, memif_queue_t *mq, memif_per_thread_data_t *ptd)
Definition: device.c:100
#define PREDICT_TRUE(x)
Definition: clib.h:106
unsigned long u64
Definition: types.h:89
memif_socket_file_t * socket_files
Definition: private.h:210
#define CLIB_MEMORY_STORE_BARRIER()
Definition: clib.h:112
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:574
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:98
static __clib_unused clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_rx_mode mode)
Definition: device.c:462
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:140
uint16_t memif_region_index_t
Definition: memif.h:60
vlib_node_function_t __clib_weak memif_interface_tx_avx512
Definition: device.c:521
static u32 format_get_indent(u8 *s)
Definition: format.h:72
u8 * format(u8 *s, const char *fmt,...)
Definition: format.c:419
u32 * buffers
Definition: private.h:93
vlib_main_t ** vlib_mains
Definition: buffer.c:303
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1110
unsigned char u8
Definition: types.h:56
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
Definition: device.c:88
static __clib_unused u8 * format_memif_tx_trace(u8 *s, va_list *args)
Definition: device.c:81
uint32_t length
Definition: memif.h:152
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
uword CLIB_MULTIARCH_FN() memif_interface_tx(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:404
#define vec_add1_aligned(V, E, A)
Add 1 element to end of vector (alignment specified).
Definition: vec.h:533
static void __clib_constructor dpdk_interface_tx_multiarch_select(void)
Definition: device.c:524
vnet_hw_interface_rx_mode
Definition: interface.h:51
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:104
#define static_always_inline
Definition: clib.h:93
static __clib_unused void memif_clear_hw_interface_counters(u32 instance)
Definition: device.c:456
uword socket_file_index
Definition: private.h:135
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:113
u16 buffer_size
Definition: private.h:153
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:184
uint16_t flags
Definition: memif.h:149
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
vnet_device_class_t memif_device_class
u32 per_interface_next_index
Definition: private.h:131
unsigned int u32
Definition: types.h:88
memif_region_index_t region
Definition: memif.h:151
int int_fd
Definition: private.h:96
memif_copy_op_t * copy_ops
Definition: private.h:191
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:461
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:915
memif_desc_t desc[0]
Definition: memif.h:173
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:108
clib_spinlock_t lockp
Definition: private.h:123
unsigned short u16
Definition: types.h:57
void * data
Definition: private.h:177
#define PREDICT_FALSE(x)
Definition: clib.h:105
u32 node_index
Node index.
Definition: node.h:473
uword( vlib_node_function_t)(struct vlib_main_t *vm, struct vlib_node_runtime_t *node, struct vlib_frame_t *frame)
Definition: node.h:54
#define foreach_memif_tx_func_error
Definition: device.c:31
u16 n_vectors
Definition: node.h:380
static_always_inline uword vlib_get_thread_index(void)
Definition: threads.h:221
#define CLIB_PREFETCH(addr, size, type)
Definition: cache.h:77
vlib_main_t * vm
Definition: buffer.c:294
static __clib_unused clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:495
u16 buffer_vec_index
Definition: private.h:180
struct memif_if_t::@446 run
#define clib_memcpy(a, b, c)
Definition: string.h:75
u16 last_tail
Definition: private.h:92
i16 buffer_offset
Definition: private.h:179
memif_region_t * regions
Definition: private.h:139
signed int i32
Definition: types.h:81
#define VNET_SW_INTERFACE_FLAG_ADMIN_UP
Definition: interface.h:660
#define ASSERT(truth)
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:150
u32 flags
Definition: private.h:124
memif_ring_t * ring
Definition: private.h:86
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:126
static void * vlib_frame_args(vlib_frame_t *f)
Get pointer to frame scalar data.
Definition: node_funcs.h:284
VNET_DEVICE_CLASS(bond_dev_class)
#define MEMIF_RING_FLAG_MASK_INT
Definition: memif.h:168
static __clib_unused clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: device.c:479
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:23
u64 int_count
Definition: private.h:98
memif_region_offset_t offset
Definition: memif.h:153
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
u64 uword
Definition: types.h:112
void * shm
Definition: private.h:71
memif_interface_id_t id
Definition: private.h:125
memif_log2_ring_size_t log2_ring_size
Definition: private.h:87
static __clib_unused char * memif_tx_func_error_strings[]
Definition: device.c:43
vlib_node_function_t __clib_weak memif_interface_tx_avx2
Definition: device.c:522
uint16_t flags
Definition: memif.h:167
memif_per_thread_data_t * per_thread_data
Definition: private.h:214
u8 data[0]
Packet data.
Definition: buffer.h:172
memif_ring_type_t
Definition: memif.h:47
volatile uint16_t head
Definition: memif.h:169
static_always_inline uword memif_interface_tx_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame, memif_if_t *mif, memif_queue_t *mq, memif_per_thread_data_t *ptd)
Definition: device.c:296
memif_queue_t * rx_queues
Definition: private.h:141
u32 flags
Definition: vhost-user.h:77
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:62
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index, VLIB_BUFFER_IS_TRACED: trace this buffer.
Definition: buffer.h:111
memif_main_t memif_main
Definition: memif.c:43
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
Definition: buffer_funcs.h:614
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:82
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:57
volatile uint16_t tail
Definition: memif.h:171
u8 * format_memif_device_name(u8 *s, va_list *args)
Definition: device.c:51
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:59