19 #include <rte_config.h> 21 #include <rte_ethdev.h> 28 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
31 #ifndef CLIB_MARCH_VARIANT 40 struct rte_mempool *mp, *nmp;
41 struct rte_pktmbuf_pool_private priv;
42 enum rte_iova_mode iova_mode;
47 sizeof (
struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
56 name =
format (name,
"vpp pool %u%c", bp->index, 0);
57 mp = rte_mempool_create_empty ((
char *) name,
vec_len (bp->buffers),
58 elt_size, 512, sizeof (priv),
64 "failed to create normal mempool for numa node %u",
70 name =
format (name,
"vpp pool %u (no cache)%c", bp->index, 0);
71 nmp = rte_mempool_create_empty ((
char *) name,
vec_len (bp->buffers),
72 elt_size, 0, sizeof (priv),
76 rte_mempool_free (mp);
79 "failed to create non-cache mempool for numa nude %u",
87 mp->pool_id = nmp->pool_id = bp->index;
89 rte_mempool_set_ops_byname (mp,
"vpp", NULL);
90 rte_mempool_set_ops_byname (nmp,
"vpp-no-cache", NULL);
93 memset (&priv, 0,
sizeof (priv));
97 rte_pktmbuf_pool_init (mp, &priv);
98 rte_pktmbuf_pool_init (nmp, &priv);
100 iova_mode = rte_eal_iova_mode ();
106 struct rte_mempool_objhdr *hdr;
109 hdr = (
struct rte_mempool_objhdr *) RTE_PTR_SUB (mb,
sizeof (*hdr));
111 hdr->iova = (iova_mode == RTE_IOVA_VA) ?
113 STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
114 STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
115 mp->populated_size++;
116 nmp->populated_size++;
121 rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
128 (buffer_mem_start, *bp->buffers,
129 0)), sizeof (
struct rte_mbuf));
141 if (rte_eth_dev_count_avail ())
151 for (i = 0; i < pm->
n_pages; i++)
153 char *va = ((
char *) pm->
base) + i * page_sz;
154 uword pa = (iova_mode == RTE_IOVA_VA) ?
161 struct rte_mempool_memhdr *memhdr;
166 memhdr->len = page_sz;
170 STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
198 struct rte_mbuf *mb = obj;
207 void *
const *obj_table,
unsigned n)
209 const int batch_size = 32;
212 u8 buffer_pool_index = mp->pool_id;
214 u32 bufs[batch_size];
216 void *
const *obj = obj_table;
237 while (n >= batch_size)
241 sizeof (
struct rte_mbuf));
244 obj_table += batch_size;
250 n,
sizeof (
struct rte_mbuf));
261 struct rte_mempool *
new,
void *obj,
264 struct rte_mbuf *mb = obj;
278 void *
const *obj_table,
283 struct rte_mempool *mp;
285 u8 buffer_pool_index = cmp->pool_id;
324 struct rte_mbuf *mb = mba[0];
328 *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1);
331 #ifdef CLIB_HAVE_VEC256 332 for (i = 1; i < 4; i++)
333 *((u8x32 *) mb +
i) = *((u8x32 *) mt +
i);
335 for (i = 2; i < 8; i++)
336 *((u8x16 *) mb +
i) = *((u8x16 *) mt +
i);
344 void **obj_table,
unsigned n)
346 const int batch_size = 32;
348 u32 bufs[batch_size], total = 0, n_alloc = 0;
349 u8 buffer_pool_index = mp->pool_id;
350 void **obj = obj_table;
353 while (n >= batch_size)
357 if (n_alloc != batch_size)
361 -(
i32)
sizeof (
struct rte_mbuf));
376 -(
i32)
sizeof (
struct rte_mbuf));
391 sizeof (
struct rte_mbuf));
402 #ifndef CLIB_MARCH_VARIANT 422 struct rte_mempool *cmp;
433 struct rte_mempool_ops ops = { };
435 strncpy (ops.name,
"vpp", 4);
441 rte_mempool_register_ops (&ops);
443 strncpy (ops.name,
"vpp-no-cache", 13);
447 rte_mempool_register_ops (&ops);
458 sizeof (
struct rte_mbuf));
#define vlib_buffer_from_rte_mbuf(x)
#define CLIB_MARCH_FN_POINTER(fn)
STATIC_ASSERT_OFFSET_OF(vlib_buffer_t, template_end, 64)
#define clib_error(format, args...)
static int dpdk_ops_vpp_alloc(struct rte_mempool *mp)
static_always_inline void dpdk_mbuf_init_from_template(struct rte_mbuf **mba, struct rte_mbuf *mt, int count)
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
#define VLIB_BUFFER_PRE_DATA_SIZE
vlib_buffer_main_t * buffer_main
VLIB_BUFFER_SET_EXT_HDR_SIZE(sizeof(struct rte_mempool_objhdr)+sizeof(struct rte_mbuf))
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
vlib_physmem_map_t * vlib_physmem_get_map(vlib_main_t *vm, u32 index)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
u8 buffer_pool_index
index of buffer pool this buffer belongs.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
#define static_always_inline
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
clib_error_t * dpdk_buffer_pool_init(vlib_main_t *vm, vlib_buffer_pool_t *bp)
static void dpdk_ops_vpp_free(struct rte_mempool *mp)
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
static_always_inline void dpdk_ops_vpp_enqueue_one(vlib_buffer_t *bt, void *obj)
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define rte_mbuf_from_vlib_buffer(x)
vlib_buffer_t buffer_template
vlib_buffer_pool_t * buffer_pools
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
STATIC_ASSERT(VLIB_BUFFER_PRE_DATA_SIZE==RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM")
#define clib_atomic_sub_fetch(a, b)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define STATIC_ASSERT_SIZEOF_ELT(d, e, s)
static_always_inline void vlib_buffer_pool_put(vlib_main_t *vm, u8 buffer_pool_index, u32 *buffers, u32 n_buffers)
#define vec_free(V)
Free vector's memory (no header).
#define clib_warning(format, args...)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue_no_cache(struct rte_mempool *cmp, void *const *obj_table, unsigned n)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n)
CLIB_MARCH_FN_REGISTRATION(dpdk_ops_vpp_enqueue)
#define VLIB_BUFFER_HDR_SIZE
static u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
struct rte_mbuf * dpdk_mbuf_template_by_pool_index
static_always_inline void dpdk_ops_vpp_enqueue_no_cache_one(vlib_main_t *vm, struct rte_mempool *old, struct rte_mempool *new, void *obj, vlib_buffer_t *bt)
static void * vlib_buffer_ptr_from_index(uword buffer_mem_start, u32 buffer_index, uword offset)
static void * clib_mem_alloc(uword size)
static uword pointer_to_uword(const void *p)
static vlib_main_t * vlib_get_main(void)
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
clib_error_t * dpdk_buffer_pools_create(vlib_main_t *vm)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
VLIB buffer representation.
static unsigned dpdk_ops_vpp_get_count_no_cache(const struct rte_mempool *mp)
static unsigned dpdk_ops_vpp_get_count(const struct rte_mempool *mp)
#define vec_foreach(var, vec)
Vector iterator.
static int dpdk_ops_vpp_dequeue_no_cache(struct rte_mempool *mp, void **obj_table, unsigned n)
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
#define CLIB_CACHE_LINE_BYTES
#define STATIC_ASSERT_SIZEOF(d, s)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
#define CLIB_MULTIARCH_FN(fn)