FD.io VPP  v21.10.1-2-g0a485f517
Vector Packet Processing
device.c
Go to the documentation of this file.
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2016 Cisco and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17 
18 #define _GNU_SOURCE
19 #include <stdint.h>
20 #include <net/if.h>
21 #include <sys/ioctl.h>
22 #include <sys/uio.h>
23 
24 #include <vlib/vlib.h>
25 #include <vlib/unix/unix.h>
26 #include <vnet/ethernet/ethernet.h>
27 
28 #include <memif/memif.h>
29 #include <memif/private.h>
30 
31 #define foreach_memif_tx_func_error \
32  _ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots") \
33  _ (ROLLBACK, rollback, ERROR, "no enough space in tx buffers")
34 
35 typedef enum
36 {
37 #define _(f, n, s, d) MEMIF_TX_ERROR_##f,
39 #undef _
42 
44 #define _(f, n, s, d) { #n, d, VL_COUNTER_SEVERITY_##s },
46 #undef _
47 };
48 
49 #ifndef CLIB_MARCH_VARIANT
50 u8 *
51 format_memif_device_name (u8 * s, va_list * args)
52 {
53  u32 dev_instance = va_arg (*args, u32);
54  memif_main_t *mm = &memif_main;
55  memif_if_t *mif = pool_elt_at_index (mm->interfaces, dev_instance);
57 
59  s = format (s, "memif%lu/%lu", msf->socket_id, mif->id);
60  return s;
61 }
62 #endif
63 
64 static u8 *
65 format_memif_device (u8 * s, va_list * args)
66 {
67  u32 dev_instance = va_arg (*args, u32);
68  int verbose = va_arg (*args, int);
69  u32 indent = format_get_indent (s);
70 
71  s = format (s, "MEMIF interface");
72  if (verbose)
73  {
74  s = format (s, "\n%U instance %u", format_white_space, indent + 2,
75  dev_instance);
76  }
77  return s;
78 }
79 
80 static u8 *
81 format_memif_tx_trace (u8 * s, va_list * args)
82 {
83  s = format (s, "Unimplemented...");
84  return s;
85 }
86 
89  u16 buffer_offset, u16 buffer_vec_index)
90 {
91  memif_copy_op_t *co;
93  co->data = data;
94  co->data_len = len;
95  co->buffer_offset = buffer_offset;
96  co->buffer_vec_index = buffer_vec_index;
97 }
98 
101  u32 *buffers, memif_if_t *mif,
104 {
105  memif_ring_t *ring;
106  u32 n_copy_op;
107  u16 ring_size, mask, slot, free_slots;
108  int n_retries = 5;
109  vlib_buffer_t *b0, *b1, *b2, *b3;
110  memif_copy_op_t *co;
111  memif_region_index_t last_region = ~0;
112  void *last_region_shm = 0;
113  u16 head, tail;
114 
115  ring = mq->ring;
116  ring_size = 1 << mq->log2_ring_size;
117  mask = ring_size - 1;
118 
119 retry:
120 
121  if (type == MEMIF_RING_S2M)
122  {
123  slot = head = ring->head;
124  tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
125  mq->last_tail += tail - mq->last_tail;
126  free_slots = ring_size - head + mq->last_tail;
127  }
128  else
129  {
130  slot = tail = ring->tail;
131  head = __atomic_load_n (&ring->head, __ATOMIC_ACQUIRE);
132  mq->last_tail += tail - mq->last_tail;
133  free_slots = head - tail;
134  }
135 
136  while (n_left && free_slots)
137  {
138  memif_desc_t *d0;
139  void *mb0;
140  i32 src_off;
141  u32 bi0, dst_off, src_left, dst_left, bytes_to_copy;
142  u32 saved_ptd_copy_ops_len = _vec_len (ptd->copy_ops);
143  u32 saved_ptd_buffers_len = _vec_len (ptd->buffers);
144  u16 saved_slot = slot;
145 
146  clib_prefetch_load (&ring->desc[(slot + 8) & mask]);
147 
148  d0 = &ring->desc[slot & mask];
149  if (PREDICT_FALSE (last_region != d0->region))
150  {
151  last_region_shm = mif->regions[d0->region].shm;
152  last_region = d0->region;
153  }
154  mb0 = last_region_shm + d0->offset;
155 
156  dst_off = 0;
157 
158  /* slave is the producer, so it should be able to reset buffer length */
159  dst_left = (type == MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
160 
161  if (PREDICT_TRUE (n_left >= 4))
162  vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
163  bi0 = buffers[0];
164 
165  next_in_chain:
166 
167  b0 = vlib_get_buffer (vm, bi0);
168  src_off = b0->current_data;
169  src_left = b0->current_length;
170 
171  while (src_left)
172  {
173  if (PREDICT_FALSE (dst_left == 0))
174  {
175  if (free_slots)
176  {
177  slot++;
178  free_slots--;
179  d0->length = dst_off;
181  d0 = &ring->desc[slot & mask];
182  dst_off = 0;
183  dst_left =
184  (type ==
185  MEMIF_RING_S2M) ? mif->run.buffer_size : d0->length;
186 
187  if (PREDICT_FALSE (last_region != d0->region))
188  {
189  last_region_shm = mif->regions[d0->region].shm;
190  last_region = d0->region;
191  }
192  mb0 = last_region_shm + d0->offset;
193  }
194  else
195  {
196  /* we need to rollback vectors before bailing out */
197  _vec_len (ptd->buffers) = saved_ptd_buffers_len;
198  _vec_len (ptd->copy_ops) = saved_ptd_copy_ops_len;
199  vlib_error_count (vm, node->node_index,
200  MEMIF_TX_ERROR_ROLLBACK, 1);
201  slot = saved_slot;
202  goto no_free_slots;
203  }
204  }
205  bytes_to_copy = clib_min (src_left, dst_left);
206  memif_add_copy_op (ptd, mb0 + dst_off, bytes_to_copy, src_off,
207  vec_len (ptd->buffers));
209  src_off += bytes_to_copy;
210  dst_off += bytes_to_copy;
211  src_left -= bytes_to_copy;
212  dst_left -= bytes_to_copy;
213  }
214 
215  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
216  {
217  bi0 = b0->next_buffer;
218  goto next_in_chain;
219  }
220 
221  d0->length = dst_off;
222  d0->flags = 0;
223 
224  free_slots -= 1;
225  slot += 1;
226 
227  buffers++;
228  n_left--;
229  }
230 no_free_slots:
231 
232  /* copy data */
233  n_copy_op = vec_len (ptd->copy_ops);
234  co = ptd->copy_ops;
235  while (n_copy_op >= 8)
236  {
237  clib_prefetch_load (co[4].data);
238  clib_prefetch_load (co[5].data);
239  clib_prefetch_load (co[6].data);
240  clib_prefetch_load (co[7].data);
241 
242  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
243  b1 = vlib_get_buffer (vm, ptd->buffers[co[1].buffer_vec_index]);
244  b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]);
245  b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]);
246 
247  clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
248  co[0].data_len);
249  clib_memcpy_fast (co[1].data, b1->data + co[1].buffer_offset,
250  co[1].data_len);
251  clib_memcpy_fast (co[2].data, b2->data + co[2].buffer_offset,
252  co[2].data_len);
253  clib_memcpy_fast (co[3].data, b3->data + co[3].buffer_offset,
254  co[3].data_len);
255 
256  co += 4;
257  n_copy_op -= 4;
258  }
259  while (n_copy_op)
260  {
261  b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]);
262  clib_memcpy_fast (co[0].data, b0->data + co[0].buffer_offset,
263  co[0].data_len);
264  co += 1;
265  n_copy_op -= 1;
266  }
267 
268  vec_reset_length (ptd->copy_ops);
269  vec_reset_length (ptd->buffers);
270 
271  if (type == MEMIF_RING_S2M)
272  __atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
273  else
274  __atomic_store_n (&ring->tail, slot, __ATOMIC_RELEASE);
275 
276  if (n_left && n_retries--)
277  goto retry;
278 
279  return n_left;
280 }
281 
284  u32 *buffers, memif_if_t *mif, memif_queue_t *mq,
286 {
287  memif_ring_t *ring = mq->ring;
288  u16 slot, free_slots, n_free;
289  u16 ring_size = 1 << mq->log2_ring_size;
290  u16 mask = ring_size - 1;
291  int n_retries = 5;
292  vlib_buffer_t *b0;
293  u16 head, tail;
294 
295 retry:
296  tail = __atomic_load_n (&ring->tail, __ATOMIC_ACQUIRE);
297  slot = head = ring->head;
298 
299  n_free = tail - mq->last_tail;
300  if (n_free >= 16)
301  {
303  mq->last_tail & mask,
304  ring_size, n_free);
305  mq->last_tail += n_free;
306  }
307 
308  free_slots = ring_size - head + mq->last_tail;
309 
310  while (n_left && free_slots)
311  {
312  u16 s0;
313  u16 slots_in_packet = 1;
314  memif_desc_t *d0;
315  u32 bi0;
316 
317  clib_prefetch_store (&ring->desc[(slot + 8) & mask]);
318 
319  if (PREDICT_TRUE (n_left >= 4))
320  vlib_prefetch_buffer_header (vlib_get_buffer (vm, buffers[3]), LOAD);
321 
322  bi0 = buffers[0];
323 
324  next_in_chain:
325  s0 = slot & mask;
326  d0 = &ring->desc[s0];
327  mq->buffers[s0] = bi0;
328  b0 = vlib_get_buffer (vm, bi0);
329 
330  d0->region = b0->buffer_pool_index + 1;
331  d0->offset = (void *) b0->data + b0->current_data -
332  mif->regions[d0->region].shm;
333  d0->length = b0->current_length;
334 
335  free_slots--;
336  slot++;
337 
338  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_NEXT_PRESENT))
339  {
340  if (PREDICT_FALSE (free_slots == 0))
341  {
342  /* revert to last fully processed packet */
343  free_slots += slots_in_packet;
344  slot -= slots_in_packet;
345  goto no_free_slots;
346  }
347 
349  bi0 = b0->next_buffer;
350 
351  /* next */
352  slots_in_packet++;
353  goto next_in_chain;
354  }
355 
356  d0->flags = 0;
357 
358  /* next from */
359  buffers++;
360  n_left--;
361  }
362 no_free_slots:
363 
364  __atomic_store_n (&ring->head, slot, __ATOMIC_RELEASE);
365 
366  if (n_left && n_retries--)
367  goto retry;
368 
369  return n_left;
370 }
371 
375 {
377  vnet_interface_output_runtime_t *rund = (void *) node->runtime_data;
379  memif_queue_t *mq;
382  thread_index);
383  u8 tx_queues = vec_len (mif->tx_queues);
384  uword n_left;
385 
386  if (tx_queues < vlib_get_n_threads ())
387  {
388  ASSERT (tx_queues > 0);
389  mq = vec_elt_at_index (mif->tx_queues, thread_index % tx_queues);
390  }
391  else
392  mq = vec_elt_at_index (mif->tx_queues, thread_index);
393 
394  clib_spinlock_lock_if_init (&mif->lockp);
395 
397  n_left = frame->n_vectors;
398  if (mif->flags & MEMIF_IF_FLAG_ZERO_COPY)
399  n_left =
400  memif_interface_tx_zc_inline (vm, node, from, mif, mq, ptd, n_left);
401  else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE)
403  mq, ptd, n_left);
404  else
406  mq, ptd, n_left);
407 
408  clib_spinlock_unlock_if_init (&mif->lockp);
409 
410  if (n_left)
411  vlib_error_count (vm, node->node_index, MEMIF_TX_ERROR_NO_FREE_SLOTS,
412  n_left);
413 
414  if ((mq->ring->flags & MEMIF_RING_FLAG_MASK_INT) == 0 && mq->int_fd > -1)
415  {
416  u64 b = 1;
417  int __clib_unused r = write (mq->int_fd, &b, sizeof (b));
418  mq->int_count++;
419  }
420 
421  if ((mif->flags & MEMIF_IF_FLAG_ZERO_COPY) == 0)
422  vlib_buffer_free (vm, from, frame->n_vectors);
423  else if (n_left)
424  vlib_buffer_free (vm, from + frame->n_vectors - n_left, n_left);
425 
426  return frame->n_vectors - n_left;
427 }
428 
429 static void
431  u32 node_index)
432 {
433  memif_main_t *apm = &memif_main;
434  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
436 
437  /* Shut off redirection */
438  if (node_index == ~0)
439  {
441  return;
442  }
443 
446 }
447 
448 static void
450 {
451  /* Nothing for now */
452 }
453 
454 static clib_error_t *
457 {
458  memif_main_t *mm = &memif_main;
459  vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
461  memif_queue_t *mq = vec_elt_at_index (mif->rx_queues, qid);
462 
465  else
467 
468  return 0;
469 }
470 
471 static clib_error_t *
473  u32 hw_if_index,
474  struct vnet_sw_interface_t *st, int is_add)
475 {
476  /* Nothing for now */
477  return 0;
478 }
479 
480 /* *INDENT-OFF* */
482  .name = "memif",
483  .format_device_name = format_memif_device_name,
484  .format_device = format_memif_device,
485  .format_tx_trace = format_memif_tx_trace,
486  .tx_function_n_errors = MEMIF_TX_N_ERROR,
487  .tx_function_error_counters = memif_tx_func_error_counters,
488  .rx_redirect_to_node = memif_set_interface_next_node,
489  .clear_counters = memif_clear_hw_interface_counters,
490  .admin_up_down_function = memif_interface_admin_up_down,
491  .subif_add_del_function = memif_subif_add_del_function,
492  .rx_mode_change_function = memif_interface_rx_mode_change,
493 };
494 
495 /* *INDENT-ON* */
496 
497 /*
498  * fd.io coding-style-patch-verification: ON
499  *
500  * Local Variables:
501  * eval: (c-set-style "gnu")
502  * End:
503  */
vec_reset_length
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
Definition: vec_bootstrap.h:194
vlib.h
vlib_buffer_t::next_buffer
u32 next_buffer
Next buffer for this linked-list of buffers.
Definition: buffer.h:149
vlib_buffer_free
static void vlib_buffer_free(vlib_main_t *vm, u32 *buffers, u32 n_buffers)
Free buffers Frees the entire buffer chain for each buffer.
Definition: buffer_funcs.h:979
vlib_buffer_t::buffer_pool_index
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:142
MEMIF_RING_S2M
@ MEMIF_RING_S2M
Definition: memif.h:51
thread_index
u32 thread_index
Definition: nat44_ei_hairpinning.c:495
vnet_sw_interface_t
Definition: interface.h:869
frame
vlib_main_t vlib_node_runtime_t vlib_frame_t * frame
Definition: nat44_ei.c:3048
vlib_prefetch_buffer_header
#define vlib_prefetch_buffer_header(b, type)
Prefetch buffer metadata.
Definition: buffer.h:231
memif_if_t::socket_file_index
uword socket_file_index
Definition: private.h:178
vlib_node_add_next
static uword vlib_node_add_next(vlib_main_t *vm, uword node, uword next_node)
Definition: node_funcs.h:1177
memif_ring_t::desc
memif_desc_t desc[]
Definition: memif.h:175
clib_spinlock_lock_if_init
static_always_inline void clib_spinlock_lock_if_init(clib_spinlock_t *p)
Definition: lock.h:106
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
memif_interface_tx_inline
static_always_inline uword memif_interface_tx_inline(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, memif_if_t *mif, memif_ring_type_t type, memif_queue_t *mq, memif_per_thread_data_t *ptd, u32 n_left)
Definition: device.c:100
memif_if_t::per_interface_next_index
u32 per_interface_next_index
Definition: private.h:174
pool_elt_at_index
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
Definition: pool.h:549
memif_copy_op_t::data_len
u32 data_len
Definition: private.h:222
nat44_ei_main_s::interfaces
nat44_ei_interface_t * interfaces
Definition: nat44_ei.h:340
memif_if_t::regions
memif_region_t * regions
Definition: private.h:182
memif.h
node
vlib_main_t vlib_node_runtime_t * node
Definition: nat44_ei.c:3047
memif_queue_t::last_tail
u16 last_tail
Definition: private.h:133
memif_main_t::socket_files
memif_socket_file_t * socket_files
Definition: private.h:253
VNET_DEVICE_CLASS
VNET_DEVICE_CLASS(af_xdp_device_class)
memif_if_t::id
memif_interface_id_t id
Definition: private.h:168
memif_main_t::per_thread_data
memif_per_thread_data_t * per_thread_data
Definition: private.h:257
memif_input_node
vlib_node_registration_t memif_input_node
(constructor) VLIB_REGISTER_NODE (memif_input_node)
Definition: node.c:884
u16
unsigned short u16
Definition: types.h:57
memif_ring_t::flags
uint16_t flags
Definition: memif.h:169
mode
vl_api_tunnel_mode_t mode
Definition: gre.api:48
memif_ring_t::tail
volatile uint16_t tail
Definition: memif.h:173
VNET_HW_IF_RX_MODE_POLLING
@ VNET_HW_IF_RX_MODE_POLLING
Definition: interface.h:56
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vlib_error_desc_t
Definition: error.h:54
node_index
node node_index
Definition: interface_output.c:440
memif_if_t::rx_queues
memif_queue_t * rx_queues
Definition: private.h:184
memif_interface_admin_up_down
clib_error_t * memif_interface_admin_up_down(vnet_main_t *vnm, u32 hw_if_index, u32 flags)
Definition: memif.c:1076
memif_main_t
Definition: private.h:242
vnet_hw_interface_t::dev_instance
u32 dev_instance
Definition: interface.h:660
memif_queue_t::ring
memif_ring_t * ring
Definition: private.h:127
r
vnet_hw_if_output_node_runtime_t * r
Definition: interface_output.c:1089
format_memif_device
static u8 * format_memif_device(u8 *s, va_list *args)
Definition: device.c:65
vlib_error_count
static void vlib_error_count(vlib_main_t *vm, uword node_index, uword counter, uword increment)
Definition: error_funcs.h:57
vlib_frame_t
Definition: node.h:372
clib_memcpy_fast
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
Definition: string.h:92
ethernet.h
memif_desc_t::region
memif_region_index_t region
Definition: memif.h:153
VNET_DEVICE_CLASS_TX_FN
#define VNET_DEVICE_CLASS_TX_FN(devclass)
Definition: interface.h:317
memif_queue_t::log2_ring_size
memif_log2_ring_size_t log2_ring_size
Definition: private.h:128
i32
signed int i32
Definition: types.h:77
memif_queue_t::int_count
u64 int_count
Definition: private.h:140
memif_set_interface_next_node
static void memif_set_interface_next_node(vnet_main_t *vnm, u32 hw_if_index, u32 node_index)
Definition: device.c:430
vlib_buffer_t::current_data
i16 current_data
signed offset in data[], pre_data[] that we are currently processing.
Definition: buffer.h:119
memif_if_t::run
struct memif_if_t::@729 run
memif_main_t::interfaces
memif_if_t * interfaces
Definition: private.h:250
vec_len
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
Definition: vec_bootstrap.h:142
memif_add_copy_op
static_always_inline void memif_add_copy_op(memif_per_thread_data_t *ptd, void *data, u32 len, u16 buffer_offset, u16 buffer_vec_index)
Definition: device.c:88
memif_socket_file_t::socket_id
u32 socket_id
Definition: private.h:94
len
u8 len
Definition: ip_types.api:103
slot
u8 slot
Definition: pci_types.api:22
vnet_interface_output_runtime_t::dev_instance
u32 dev_instance
Definition: interface_funcs.h:479
memif_per_thread_data_t
Definition: private.h:229
memif_ring_type_t
memif_ring_type_t
Definition: memif.h:49
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
vnet_get_hw_interface
static vnet_hw_interface_t * vnet_get_hw_interface(vnet_main_t *vnm, u32 hw_if_index)
Definition: interface_funcs.h:44
PREDICT_FALSE
#define PREDICT_FALSE(x)
Definition: clib.h:124
vlib_frame_vector_args
static void * vlib_frame_vector_args(vlib_frame_t *f)
Get pointer to frame vector data.
Definition: node_funcs.h:301
vnet_hw_if_rx_mode
vnet_hw_if_rx_mode
Definition: interface.h:53
static_always_inline
#define static_always_inline
Definition: clib.h:112
uword
u64 uword
Definition: types.h:112
if
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
MEMIF_DESC_FLAG_NEXT
#define MEMIF_DESC_FLAG_NEXT
Definition: memif.h:152
memif_device_class
VNET_DEVICE_CLASS_TX_FN() memif_device_class(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
Definition: device.c:372
vlib_main_t::thread_index
u32 thread_index
Definition: main.h:215
format_memif_device_name
u8 * format_memif_device_name(u8 *s, va_list *args)
Definition: device.c:51
mask
vl_api_pnat_mask_t mask
Definition: pnat.api:45
memif_region_t::shm
void * shm
Definition: private.h:111
memif_region_index_t
uint16_t memif_region_index_t
Definition: memif.h:62
clib_min
#define clib_min(x, y)
Definition: clib.h:342
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:58
memif_desc_t::flags
uint16_t flags
Definition: memif.h:151
memif_per_thread_data_t::copy_ops
memif_copy_op_t * copy_ops
Definition: private.h:235
vlib_buffer_t::current_length
u16 current_length
Nbytes between current data and the end of this buffer.
Definition: buffer.h:122
MEMIF_TX_N_ERROR
@ MEMIF_TX_N_ERROR
Definition: device.c:40
MEMIF_RING_M2S
@ MEMIF_RING_M2S
Definition: memif.h:52
data
u8 data[128]
Definition: ipsec_types.api:95
memif_queue_t
Definition: private.h:123
vec_add2_aligned
#define vec_add2_aligned(V, P, N, A)
Add N elements to end of vector V, return pointer to new elements in P.
Definition: vec.h:656
memif_tx_func_error_t
memif_tx_func_error_t
Definition: device.c:35
memif_desc_t
Definition: memif.h:149
vnet_hw_interface_t
Definition: interface.h:638
vnet_main_t
Definition: vnet.h:76
memif_copy_op_t::buffer_vec_index
u16 buffer_vec_index
Definition: private.h:224
memif_if_t::buffer_size
u16 buffer_size
Definition: private.h:196
u64
unsigned long u64
Definition: types.h:89
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
vec_add1_aligned
#define vec_add1_aligned(V, E, A)
Add 1 element to end of vector (alignment specified).
Definition: vec.h:615
n_free
u32 n_free
Definition: interface_output.c:1096
format_get_indent
static u32 format_get_indent(u8 *s)
Definition: format.h:72
memif_queue_t::buffers
u32 * buffers
Definition: private.h:134
u32
unsigned int u32
Definition: types.h:88
clib_prefetch_load
static_always_inline void clib_prefetch_load(void *p)
Definition: cache.h:92
n_left
u32 n_left
Definition: interface_output.c:1096
instance
u32 instance
Definition: gre.api:51
private.h
memif_copy_op_t
Definition: private.h:218
nm
nat44_ei_main_t * nm
Definition: nat44_ei_hairpinning.c:413
memif_main
memif_main_t memif_main
Definition: memif.c:43
memif_copy_op_t::data
void * data
Definition: private.h:221
vlib_main_t
Definition: main.h:102
memif_interface_rx_mode_change
static clib_error_t * memif_interface_rx_mode_change(vnet_main_t *vnm, u32 hw_if_index, u32 qid, vnet_hw_if_rx_mode mode)
Definition: device.c:455
vlib_get_n_threads
static u32 vlib_get_n_threads()
Definition: global_funcs.h:23
vlib_get_main
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
memif_desc_t::length
uint32_t length
Definition: memif.h:154
unix.h
memif_clear_hw_interface_counters
static void memif_clear_hw_interface_counters(u32 instance)
Definition: device.c:449
memif_if_t
Definition: private.h:163
vlib_buffer_t::data
u8 data[]
Packet data.
Definition: buffer.h:204
memif_per_thread_data_t::buffers
u32 * buffers
Definition: private.h:236
MEMIF_RING_FLAG_MASK_INT
#define MEMIF_RING_FLAG_MASK_INT
Definition: memif.h:170
foreach_memif_tx_func_error
#define foreach_memif_tx_func_error
Definition: device.c:31
clib_prefetch_store
static_always_inline void clib_prefetch_store(void *p)
Definition: cache.h:98
memif_interface_tx_zc_inline
static_always_inline uword memif_interface_tx_zc_inline(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, memif_if_t *mif, memif_queue_t *mq, memif_per_thread_data_t *ptd, u32 n_left)
Definition: device.c:283
vlib_node_runtime_t
Definition: node.h:454
clib_spinlock_unlock_if_init
static_always_inline void clib_spinlock_unlock_if_init(clib_spinlock_t *p)
Definition: lock.h:129
memif_socket_file_t
Definition: private.h:91
memif_desc_t::offset
memif_region_offset_t offset
Definition: memif.h:155
from
from
Definition: nat44_ei_hairpinning.c:415
PREDICT_TRUE
#define PREDICT_TRUE(x)
Definition: clib.h:125
memif_copy_op_t::buffer_offset
i16 buffer_offset
Definition: private.h:223
memif_ring_t
Definition: memif.h:165
format_memif_tx_trace
static u8 * format_memif_tx_trace(u8 *s, va_list *args)
Definition: device.c:81
memif_subif_add_del_function
static clib_error_t * memif_subif_add_del_function(vnet_main_t *vnm, u32 hw_if_index, struct vnet_sw_interface_t *st, int is_add)
Definition: device.c:472
memif_ring_t::head
volatile uint16_t head
Definition: memif.h:171
type
vl_api_fib_path_type_t type
Definition: fib_types.api:123
format_white_space
u8 * format_white_space(u8 *s, va_list *va)
Definition: std-formats.c:129
memif_queue_t::int_fd
int int_fd
Definition: private.h:138
memif_tx_func_error_counters
static vlib_error_desc_t memif_tx_func_error_counters[]
Definition: device.c:43
vlib_buffer_t::flags
u32 flags
buffer flags: VLIB_BUFFER_FREE_LIST_INDEX_MASK: bits used to store free list index,...
Definition: buffer.h:133
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111
vnet_interface_output_runtime_t
Definition: interface_funcs.h:475
vlib_buffer_free_from_ring_no_next
static void vlib_buffer_free_from_ring_no_next(vlib_main_t *vm, u32 *ring, u32 start, u32 ring_size, u32 n_buffers)
Free buffers from ring without freeing tail buffers.
Definition: buffer_funcs.h:1051