FD.io VPP  v21.06-3-gbb25fbf28
Vector Packet Processing
buffer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2017-2019 Cisco and/or its affiliates.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at:
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include <unistd.h>
17 #include <errno.h>
18 
19 #include <rte_config.h>
20 #include <rte_mbuf.h>
21 #include <rte_ethdev.h>
22 #include <rte_vfio.h>
23 #include <rte_version.h>
24 
25 #include <vlib/vlib.h>
26 #include <dpdk/buffer.h>
27 
28 STATIC_ASSERT (VLIB_BUFFER_PRE_DATA_SIZE == RTE_PKTMBUF_HEADROOM,
29  "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
30 
31 extern struct rte_mbuf *dpdk_mbuf_template_by_pool_index;
32 #ifndef CLIB_MARCH_VARIANT
33 struct rte_mempool **dpdk_mempool_by_buffer_pool_index = 0;
35 struct rte_mbuf *dpdk_mbuf_template_by_pool_index = 0;
36 
39 {
40  uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
41  struct rte_mempool *mp, *nmp;
42  struct rte_pktmbuf_pool_private priv;
43  enum rte_iova_mode iova_mode;
44  u32 i;
45  u8 *name = 0;
46 
47  u32 elt_size =
48  sizeof (struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
49 
50  /* create empty mempools */
55 
56  /* normal mempool */
57  name = format (name, "vpp pool %u%c", bp->index, 0);
58  mp = rte_mempool_create_empty ((char *) name, bp->n_buffers,
59  elt_size, 512, sizeof (priv),
60  bp->numa_node, 0);
61  if (!mp)
62  {
63  vec_free (name);
64  return clib_error_return (0,
65  "failed to create normal mempool for numa node %u",
66  bp->index);
67  }
69 
70  /* non-cached mempool */
71  name = format (name, "vpp pool %u (no cache)%c", bp->index, 0);
72  nmp = rte_mempool_create_empty ((char *) name, bp->n_buffers,
73  elt_size, 0, sizeof (priv),
74  bp->numa_node, 0);
75  if (!nmp)
76  {
77  rte_mempool_free (mp);
78  vec_free (name);
79  return clib_error_return (0,
80  "failed to create non-cache mempool for numa nude %u",
81  bp->index);
82  }
83  vec_free (name);
84 
85  dpdk_mempool_by_buffer_pool_index[bp->index] = mp;
87 
88  mp->pool_id = nmp->pool_id = bp->index;
89 
90  rte_mempool_set_ops_byname (mp, "vpp", NULL);
91  rte_mempool_set_ops_byname (nmp, "vpp-no-cache", NULL);
92 
93  /* Call the mempool priv initializer */
94  memset (&priv, 0, sizeof (priv));
95  priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
97  priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
98  rte_pktmbuf_pool_init (mp, &priv);
99  rte_pktmbuf_pool_init (nmp, &priv);
100 
101  iova_mode = rte_eal_iova_mode ();
102 
103  /* populate mempool object buffer header */
104  for (i = 0; i < bp->n_buffers; i++)
105  {
106  struct rte_mempool_objhdr *hdr;
107  vlib_buffer_t *b = vlib_get_buffer (vm, bp->buffers[i]);
108  struct rte_mbuf *mb = rte_mbuf_from_vlib_buffer (b);
109  hdr = (struct rte_mempool_objhdr *) RTE_PTR_SUB (mb, sizeof (*hdr));
110  hdr->mp = mp;
111  hdr->iova = (iova_mode == RTE_IOVA_VA) ?
113  STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
114  STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
115  mp->populated_size++;
116  nmp->populated_size++;
117  }
118 
119  /* call the object initializers */
120  rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
121 
122  /* create mbuf header tempate from the first buffer in the pool */
127  (buffer_mem_start, *bp->buffers,
128  0)), sizeof (struct rte_mbuf));
129 
130  for (i = 0; i < bp->n_buffers; i++)
131  {
132  vlib_buffer_t *b;
133  b = vlib_buffer_ptr_from_index (buffer_mem_start, bp->buffers[i], 0);
134  vlib_buffer_copy_template (b, &bp->buffer_template);
135  }
136 
137  /* map DMA pages if at least one physical device exists */
138  if (rte_eth_dev_count_avail ())
139  {
140  uword i;
141  size_t page_sz;
142  vlib_physmem_map_t *pm;
143  int do_vfio_map = 1;
144 
145  pm = vlib_physmem_get_map (vm, bp->physmem_map_index);
146  page_sz = 1ULL << pm->log2_page_size;
147 
148  for (i = 0; i < pm->n_pages; i++)
149  {
150  char *va = ((char *) pm->base) + i * page_sz;
151  uword pa = (iova_mode == RTE_IOVA_VA) ?
152  pointer_to_uword (va) : pm->page_table[i];
153 
154  if (do_vfio_map &&
155 #if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0)
156  rte_vfio_dma_map (pointer_to_uword (va), pa, page_sz))
157 #else
158  rte_vfio_container_dma_map (RTE_VFIO_DEFAULT_CONTAINER_FD,
159  pointer_to_uword (va), pa, page_sz))
160 #endif
161  do_vfio_map = 0;
162 
163  struct rte_mempool_memhdr *memhdr;
164  memhdr = clib_mem_alloc (sizeof (*memhdr));
165  memhdr->mp = mp;
166  memhdr->addr = va;
167  memhdr->iova = pa;
168  memhdr->len = page_sz;
169  memhdr->free_cb = 0;
170  memhdr->opaque = 0;
171 
172  STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
173  mp->nb_mem_chunks++;
174  }
175  }
176 
177  return 0;
178 }
179 
180 static int
181 dpdk_ops_vpp_alloc (struct rte_mempool *mp)
182 {
183  clib_warning ("");
184  return 0;
185 }
186 
187 static void
188 dpdk_ops_vpp_free (struct rte_mempool *mp)
189 {
190  clib_warning ("");
191 }
192 
193 #endif
194 
197 {
198  /* Only non-replicated packets (b->ref_count == 1) expected */
199 
200  struct rte_mbuf *mb = obj;
202  ASSERT (b->ref_count == 1);
205 }
206 
207 int
208 CLIB_MULTIARCH_FN (dpdk_ops_vpp_enqueue) (struct rte_mempool * mp,
209  void *const *obj_table, unsigned n)
210 {
211  const int batch_size = 32;
213  vlib_buffer_t bt;
214  u8 buffer_pool_index = mp->pool_id;
215  vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
216  u32 bufs[batch_size];
217  u32 n_left = n;
218  void *const *obj = obj_table;
219 
221 
222  while (n_left >= 4)
223  {
224  dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
225  dpdk_ops_vpp_enqueue_one (&bt, obj[1]);
226  dpdk_ops_vpp_enqueue_one (&bt, obj[2]);
227  dpdk_ops_vpp_enqueue_one (&bt, obj[3]);
228  obj += 4;
229  n_left -= 4;
230  }
231 
232  while (n_left)
233  {
234  dpdk_ops_vpp_enqueue_one (&bt, obj[0]);
235  obj += 1;
236  n_left -= 1;
237  }
238 
239  while (n >= batch_size)
240  {
241  vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
242  batch_size,
243  sizeof (struct rte_mbuf));
244  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
245  n -= batch_size;
246  obj_table += batch_size;
247  }
248 
249  if (n)
250  {
251  vlib_get_buffer_indices_with_offset (vm, (void **) obj_table, bufs,
252  n, sizeof (struct rte_mbuf));
253  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n);
254  }
255 
256  return 0;
257 }
258 
260 
263  struct rte_mempool *new, void *obj,
264  vlib_buffer_t * bt)
265 {
266  struct rte_mbuf *mb = obj;
268 
269  if (clib_atomic_sub_fetch (&b->ref_count, 1) == 0)
270  {
271  u32 bi = vlib_get_buffer_index (vm, b);
274  return;
275  }
276 }
277 
278 int
280  void *const *obj_table,
281  unsigned n)
282 {
284  vlib_buffer_t bt;
285  struct rte_mempool *mp;
286  mp = dpdk_mempool_by_buffer_pool_index[cmp->pool_id];
287  u8 buffer_pool_index = cmp->pool_id;
288  vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
290 
291  while (n >= 4)
292  {
293  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
294  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[1], &bt);
295  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[2], &bt);
296  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[3], &bt);
297  obj_table += 4;
298  n -= 4;
299  }
300 
301  while (n)
302  {
303  dpdk_ops_vpp_enqueue_no_cache_one (vm, cmp, mp, obj_table[0], &bt);
304  obj_table += 1;
305  n -= 1;
306  }
307 
308  return 0;
309 }
310 
312 
314 dpdk_mbuf_init_from_template (struct rte_mbuf **mba, struct rte_mbuf *mt,
315  int count)
316 {
317  /* Assumptions about rte_mbuf layout */
318  STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_addr, 0);
319  STATIC_ASSERT_OFFSET_OF (struct rte_mbuf, buf_iova, 8);
320  STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8);
321  STATIC_ASSERT_SIZEOF_ELT (struct rte_mbuf, buf_iova, 8);
322  STATIC_ASSERT_SIZEOF (struct rte_mbuf, 128);
323 
324  while (count--)
325  {
326  struct rte_mbuf *mb = mba[0];
327  int i;
328  /* bytes 0 .. 15 hold buf_addr and buf_iova which we need to preserve */
329  /* copy bytes 16 .. 31 */
330  *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1);
331 
332  /* copy bytes 32 .. 127 */
333 #ifdef CLIB_HAVE_VEC256
334  for (i = 1; i < 4; i++)
335  *((u8x32 *) mb + i) = *((u8x32 *) mt + i);
336 #else
337  for (i = 2; i < 8; i++)
338  *((u8x16 *) mb + i) = *((u8x16 *) mt + i);
339 #endif
340  mba++;
341  }
342 }
343 
344 int
345 CLIB_MULTIARCH_FN (dpdk_ops_vpp_dequeue) (struct rte_mempool * mp,
346  void **obj_table, unsigned n)
347 {
348  const int batch_size = 32;
350  u32 bufs[batch_size], total = 0, n_alloc = 0;
351  u8 buffer_pool_index = mp->pool_id;
352  void **obj = obj_table;
353  struct rte_mbuf t = dpdk_mbuf_template_by_pool_index[buffer_pool_index];
354 
355  while (n >= batch_size)
356  {
357  n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, batch_size,
358  buffer_pool_index);
359  if (n_alloc != batch_size)
360  goto alloc_fail;
361 
362  vlib_get_buffers_with_offset (vm, bufs, obj, batch_size,
363  -(i32) sizeof (struct rte_mbuf));
364  dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, batch_size);
365  total += batch_size;
366  obj += batch_size;
367  n -= batch_size;
368  }
369 
370  if (n)
371  {
372  n_alloc = vlib_buffer_alloc_from_pool (vm, bufs, n, buffer_pool_index);
373 
374  if (n_alloc != n)
375  goto alloc_fail;
376 
378  -(i32) sizeof (struct rte_mbuf));
379  dpdk_mbuf_init_from_template ((struct rte_mbuf **) obj, &t, n);
380  }
381 
382  return 0;
383 
384 alloc_fail:
385  /* dpdk doesn't support partial alloc, so we need to return what we
386  already got */
387  if (n_alloc)
388  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, n_alloc);
389  obj = obj_table;
390  while (total)
391  {
392  vlib_get_buffer_indices_with_offset (vm, obj, bufs, batch_size,
393  sizeof (struct rte_mbuf));
394  vlib_buffer_pool_put (vm, buffer_pool_index, bufs, batch_size);
395 
396  obj += batch_size;
397  total -= batch_size;
398  }
399  return -ENOENT;
400 }
401 
403 
404 #ifndef CLIB_MARCH_VARIANT
405 
406 static int
407 dpdk_ops_vpp_dequeue_no_cache (struct rte_mempool *mp, void **obj_table,
408  unsigned n)
409 {
410  clib_error ("bug");
411  return 0;
412 }
413 
414 static unsigned
415 dpdk_ops_vpp_get_count (const struct rte_mempool *mp)
416 {
418  if (mp)
419  {
420  vlib_buffer_pool_t *pool = vlib_get_buffer_pool (vm, mp->pool_id);
421  if (pool)
422  {
423  return pool->n_avail;
424  }
425  }
426  return 0;
427 }
428 
429 static unsigned
430 dpdk_ops_vpp_get_count_no_cache (const struct rte_mempool *mp)
431 {
432  struct rte_mempool *cmp;
434  return dpdk_ops_vpp_get_count (cmp);
435 }
436 
437 clib_error_t *
439 {
440  clib_error_t *err;
441  vlib_buffer_pool_t *bp;
442 
443  struct rte_mempool_ops ops = { };
444 
445  strncpy (ops.name, "vpp", 4);
446  ops.alloc = dpdk_ops_vpp_alloc;
447  ops.free = dpdk_ops_vpp_free;
448  ops.get_count = dpdk_ops_vpp_get_count;
451  rte_mempool_register_ops (&ops);
452 
453  strncpy (ops.name, "vpp-no-cache", 13);
454  ops.get_count = dpdk_ops_vpp_get_count_no_cache;
456  ops.dequeue = dpdk_ops_vpp_dequeue_no_cache;
457  rte_mempool_register_ops (&ops);
458 
459  /* *INDENT-OFF* */
461  if (bp->start && (err = dpdk_buffer_pool_init (vm, bp)))
462  return err;
463  /* *INDENT-ON* */
464  return 0;
465 }
466 
467 VLIB_BUFFER_SET_EXT_HDR_SIZE (sizeof (struct rte_mempool_objhdr) +
468  sizeof (struct rte_mbuf));
469 
470 #endif
471 
472 /** @endcond */
473 /*
474  * fd.io coding-style-patch-verification: ON
475  *
476  * Local Variables:
477  * eval: (c-set-style "gnu")
478  * End:
479  */
vlib_physmem_map_t::base
void * base
Definition: physmem.h:49
vec_reset_length
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
Definition: vec_bootstrap.h:194
vlib.h
vlib_buffer_pool_t::start
uword start
Definition: buffer.h:453
vlib_buffer_t::buffer_pool_index
u8 buffer_pool_index
index of buffer pool this buffer belongs.
Definition: buffer.h:142
STATIC_ASSERT
STATIC_ASSERT(VLIB_BUFFER_PRE_DATA_SIZE==RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM")
bufs
vlib_buffer_t * bufs[VLIB_FRAME_SIZE]
Definition: nat44_ei_out2in.c:717
clib_memcpy
#define clib_memcpy(d, s, n)
Definition: string.h:197
vlib_physmem_map_t
Definition: physmem.h:45
vlib_get_buffer
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
Definition: buffer_funcs.h:111
pointer_to_uword
static uword pointer_to_uword(const void *p)
Definition: types.h:131
name
string name[64]
Definition: fib.api:25
vlib_get_buffers_with_offset
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
Definition: buffer_funcs.h:204
next
u16 * next
Definition: nat44_ei_out2in.c:718
vlib_buffer_pool_t
Definition: buffer.h:450
dpdk_mbuf_init_from_template
static_always_inline void dpdk_mbuf_init_from_template(struct rte_mbuf **mba, struct rte_mbuf *mt, int count)
Definition: buffer.c:314
clib_error_return
#define clib_error_return(e, args...)
Definition: error.h:99
CLIB_MULTIARCH_FN
#define CLIB_MULTIARCH_FN(fn)
Definition: cpu.h:53
vlib_main_t::buffer_main
vlib_buffer_main_t * buffer_main
Definition: main.h:165
vm
vlib_main_t * vm
X-connect all packets from the HOST to the PHY.
Definition: nat44_ei.c:3047
vlib_physmem_get_map
vlib_physmem_map_t * vlib_physmem_get_map(vlib_main_t *vm, u32 index)
Definition: physmem.c:86
vlib_get_buffer_pool
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
Definition: buffer_funcs.h:552
vlib_physmem_map_t::log2_page_size
u32 log2_page_size
Definition: physmem.h:52
dpdk_ops_vpp_dequeue
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: buffer.c:345
i32
signed int i32
Definition: types.h:77
u8x16
u8x16
Definition: vector_sse42.h:194
STATIC_ASSERT_OFFSET_OF
STATIC_ASSERT_OFFSET_OF(vlib_buffer_t, template_end, 64)
dpdk_ops_vpp_dequeue_no_cache
static int dpdk_ops_vpp_dequeue_no_cache(struct rte_mempool *mp, void **obj_table, unsigned n)
Definition: buffer.c:407
count
u8 count
Definition: dhcp.api:208
vlib_physmem_get_pa
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
Definition: physmem_funcs.h:103
CLIB_MARCH_FN_REGISTRATION
CLIB_MARCH_FN_REGISTRATION(dpdk_ops_vpp_enqueue)
dpdk_buffer_pool_init
clib_error_t * dpdk_buffer_pool_init(vlib_main_t *vm, vlib_buffer_pool_t *bp)
Definition: buffer.c:38
vlib_physmem_map_t::n_pages
u32 n_pages
Definition: physmem.h:50
vec_elt_at_index
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
Definition: vec_bootstrap.h:203
vlib_buffer_pool_t::buffer_template
vlib_buffer_t buffer_template
Definition: buffer.h:470
vlib_get_buffer_index
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
Definition: buffer_funcs.h:324
vec_validate_aligned
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
Definition: vec.h:534
static_always_inline
#define static_always_inline
Definition: clib.h:112
vlib_buffer_from_rte_mbuf
#define vlib_buffer_from_rte_mbuf(x)
Definition: buffer.h:20
uword
u64 uword
Definition: types.h:112
dpdk_ops_vpp_get_count
static unsigned dpdk_ops_vpp_get_count(const struct rte_mempool *mp)
Definition: buffer.c:415
buffer.h
STATIC_ASSERT_SIZEOF
#define STATIC_ASSERT_SIZEOF(d, s)
Definition: error_bootstrap.h:113
VLIB_BUFFER_HDR_SIZE
#define VLIB_BUFFER_HDR_SIZE
Definition: buffer.h:222
vlib_buffer_copy_template
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
Definition: buffer_funcs.h:171
i
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:261
vlib_buffer_t::ref_count
volatile u8 ref_count
Reference count for this buffer.
Definition: buffer.h:139
vlib_get_buffer_indices_with_offset
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
Definition: buffer_funcs.h:343
CLIB_CACHE_LINE_BYTES
#define CLIB_CACHE_LINE_BYTES
Definition: cache.h:59
vlib_buffer_main_t::buffer_mem_start
uword buffer_mem_start
Definition: buffer.h:480
clib_atomic_sub_fetch
#define clib_atomic_sub_fetch(a, b)
Definition: atomics.h:31
dpdk_no_cache_mempool_by_buffer_pool_index
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
Definition: buffer.c:34
rte_mbuf_from_vlib_buffer
#define rte_mbuf_from_vlib_buffer(x)
Definition: buffer.h:19
dpdk_mempool_by_buffer_pool_index
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
Definition: buffer.c:33
vlib_buffer_pool_put
static_always_inline void vlib_buffer_pool_put(vlib_main_t *vm, u8 buffer_pool_index, u32 *buffers, u32 n_buffers)
Definition: buffer_funcs.h:776
vec_free
#define vec_free(V)
Free vector's memory (no header).
Definition: vec.h:395
dpdk_mbuf_template_by_pool_index
struct rte_mbuf * dpdk_mbuf_template_by_pool_index
Definition: buffer.c:35
vlib_physmem_map_t::page_table
uword * page_table
Definition: physmem.h:51
vlib_buffer_main_t::buffer_pools
vlib_buffer_pool_t * buffer_pools
Definition: buffer.h:482
dpdk_ops_vpp_enqueue_no_cache_one
static_always_inline void dpdk_ops_vpp_enqueue_no_cache_one(vlib_main_t *vm, struct rte_mempool *old, struct rte_mempool *new, void *obj, vlib_buffer_t *bt)
Definition: buffer.c:262
STATIC_ASSERT_SIZEOF_ELT
#define STATIC_ASSERT_SIZEOF_ELT(d, e, s)
Definition: error_bootstrap.h:116
u8x32
u8x32
Definition: vector_avx2.h:116
format
description fragment has unexpected format
Definition: map.api:433
ASSERT
#define ASSERT(truth)
Definition: error_bootstrap.h:69
dpdk_ops_vpp_free
static void dpdk_ops_vpp_free(struct rte_mempool *mp)
Definition: buffer.c:188
VLIB_BUFFER_PRE_DATA_SIZE
#define VLIB_BUFFER_PRE_DATA_SIZE
Definition: buffer.h:51
vlib_buffer_get_default_data_size
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
Definition: buffer_funcs.h:122
u32
unsigned int u32
Definition: types.h:88
VLIB_BUFFER_SET_EXT_HDR_SIZE
VLIB_BUFFER_SET_EXT_HDR_SIZE(sizeof(struct rte_mempool_objhdr)+sizeof(struct rte_mbuf))
dpdk_ops_vpp_alloc
static int dpdk_ops_vpp_alloc(struct rte_mempool *mp)
Definition: buffer.c:181
vec_foreach
#define vec_foreach(var, vec)
Vector iterator.
Definition: vec_bootstrap.h:213
n_left
u32 n_left
Definition: interface_output.c:1078
vlib_buffer_pool_t::n_avail
u32 n_avail
Definition: buffer.h:461
vlib_main_t
Definition: main.h:102
dpdk_ops_vpp_enqueue_no_cache
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue_no_cache(struct rte_mempool *cmp, void *const *obj_table, unsigned n)
Definition: buffer.c:279
vlib_buffer_ptr_from_index
static void * vlib_buffer_ptr_from_index(uword buffer_mem_start, u32 buffer_index, uword offset)
Definition: buffer_funcs.h:97
vlib_get_main
static vlib_main_t * vlib_get_main(void)
Definition: global_funcs.h:38
b
vlib_buffer_t ** b
Definition: nat44_ei_out2in.c:717
u8
unsigned char u8
Definition: types.h:56
clib_error_t
Definition: clib_error.h:21
clib_error
#define clib_error(format, args...)
Definition: error.h:62
dpdk_ops_vpp_enqueue
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n)
Definition: buffer.c:208
clib_warning
#define clib_warning(format, args...)
Definition: error.h:59
dpdk_ops_vpp_get_count_no_cache
static unsigned dpdk_ops_vpp_get_count_no_cache(const struct rte_mempool *mp)
Definition: buffer.c:430
CLIB_MARCH_FN_POINTER
#define CLIB_MARCH_FN_POINTER(fn)
Definition: cpu.h:84
dpdk_ops_vpp_enqueue_one
static_always_inline void dpdk_ops_vpp_enqueue_one(vlib_buffer_t *bt, void *obj)
Definition: buffer.c:196
vlib_buffer_alloc_from_pool
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
Definition: buffer_funcs.h:597
clib_mem_alloc
static void * clib_mem_alloc(uword size)
Definition: mem.h:253
dpdk_buffer_pools_create
clib_error_t * dpdk_buffer_pools_create(vlib_main_t *vm)
Definition: buffer.c:438
vlib_buffer_t
VLIB buffer representation.
Definition: buffer.h:111