19 #include <rte_config.h> 21 #include <rte_ethdev.h> 23 #include <rte_version.h> 29 "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM");
32 #ifndef CLIB_MARCH_VARIANT 41 struct rte_mempool *mp, *nmp;
42 struct rte_pktmbuf_pool_private priv;
43 enum rte_iova_mode iova_mode;
48 sizeof (
struct rte_mbuf) + sizeof (vlib_buffer_t) + bp->data_size;
57 name =
format (name,
"vpp pool %u%c", bp->index, 0);
58 mp = rte_mempool_create_empty ((
char *) name, bp->n_buffers,
59 elt_size, 512, sizeof (priv),
65 "failed to create normal mempool for numa node %u",
71 name =
format (name,
"vpp pool %u (no cache)%c", bp->index, 0);
72 nmp = rte_mempool_create_empty ((
char *) name, bp->n_buffers,
73 elt_size, 0, sizeof (priv),
77 rte_mempool_free (mp);
80 "failed to create non-cache mempool for numa nude %u",
88 mp->pool_id = nmp->pool_id = bp->index;
90 rte_mempool_set_ops_byname (mp,
"vpp", NULL);
91 rte_mempool_set_ops_byname (nmp,
"vpp-no-cache", NULL);
94 memset (&priv, 0,
sizeof (priv));
98 rte_pktmbuf_pool_init (mp, &priv);
99 rte_pktmbuf_pool_init (nmp, &priv);
101 iova_mode = rte_eal_iova_mode ();
104 for (i = 0; i < bp->n_buffers; i++)
106 struct rte_mempool_objhdr *hdr;
109 hdr = (
struct rte_mempool_objhdr *) RTE_PTR_SUB (mb,
sizeof (*hdr));
111 hdr->iova = (iova_mode == RTE_IOVA_VA) ?
113 STAILQ_INSERT_TAIL (&mp->elt_list, hdr, next);
114 STAILQ_INSERT_TAIL (&nmp->elt_list, hdr, next);
115 mp->populated_size++;
116 nmp->populated_size++;
120 rte_mempool_obj_iter (mp, rte_pktmbuf_init, 0);
127 (buffer_mem_start, *bp->buffers,
128 0)), sizeof (
struct rte_mbuf));
130 for (i = 0; i < bp->n_buffers; i++)
138 if (rte_eth_dev_count_avail ())
148 for (i = 0; i < pm->
n_pages; i++)
150 char *va = ((
char *) pm->
base) + i * page_sz;
151 uword pa = (iova_mode == RTE_IOVA_VA) ?
155 #
if RTE_VERSION < RTE_VERSION_NUM(19, 11, 0, 0)
158 rte_vfio_container_dma_map (RTE_VFIO_DEFAULT_CONTAINER_FD,
163 struct rte_mempool_memhdr *memhdr;
168 memhdr->len = page_sz;
172 STAILQ_INSERT_TAIL (&mp->mem_list, memhdr, next);
200 struct rte_mbuf *mb = obj;
209 void *
const *obj_table,
unsigned n)
211 const int batch_size = 32;
214 u8 buffer_pool_index = mp->pool_id;
216 u32 bufs[batch_size];
218 void *
const *obj = obj_table;
239 while (n >= batch_size)
243 sizeof (
struct rte_mbuf));
246 obj_table += batch_size;
252 n,
sizeof (
struct rte_mbuf));
263 struct rte_mempool *
new,
void *obj,
266 struct rte_mbuf *mb = obj;
280 void *
const *obj_table,
285 struct rte_mempool *mp;
287 u8 buffer_pool_index = cmp->pool_id;
326 struct rte_mbuf *mb = mba[0];
330 *((u8x16 *) mb + 1) = *((u8x16 *) mt + 1);
333 #ifdef CLIB_HAVE_VEC256 334 for (i = 1; i < 4; i++)
335 *((u8x32 *) mb +
i) = *((u8x32 *) mt +
i);
337 for (i = 2; i < 8; i++)
338 *((u8x16 *) mb +
i) = *((u8x16 *) mt +
i);
346 void **obj_table,
unsigned n)
348 const int batch_size = 32;
350 u32 bufs[batch_size], total = 0, n_alloc = 0;
351 u8 buffer_pool_index = mp->pool_id;
352 void **obj = obj_table;
355 while (n >= batch_size)
359 if (n_alloc != batch_size)
363 -(
i32)
sizeof (
struct rte_mbuf));
378 -(
i32)
sizeof (
struct rte_mbuf));
393 sizeof (
struct rte_mbuf));
404 #ifndef CLIB_MARCH_VARIANT 424 struct rte_mempool *cmp;
435 struct rte_mempool_ops ops = { };
437 strncpy (ops.name,
"vpp", 4);
443 rte_mempool_register_ops (&ops);
445 strncpy (ops.name,
"vpp-no-cache", 13);
449 rte_mempool_register_ops (&ops);
460 sizeof (
struct rte_mbuf));
#define vlib_buffer_from_rte_mbuf(x)
#define CLIB_MARCH_FN_POINTER(fn)
STATIC_ASSERT_OFFSET_OF(vlib_buffer_t, template_end, 64)
#define clib_error(format, args...)
static int dpdk_ops_vpp_alloc(struct rte_mempool *mp)
static_always_inline void dpdk_mbuf_init_from_template(struct rte_mbuf **mba, struct rte_mbuf *mt, int count)
static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool(vlib_main_t *vm, u8 buffer_pool_index)
#define VLIB_BUFFER_PRE_DATA_SIZE
vlib_buffer_main_t * buffer_main
VLIB_BUFFER_SET_EXT_HDR_SIZE(sizeof(struct rte_mempool_objhdr)+sizeof(struct rte_mbuf))
static_always_inline void vlib_get_buffers_with_offset(vlib_main_t *vm, u32 *bi, void **b, int count, i32 offset)
Translate array of buffer indices into buffer pointers with offset.
vlib_physmem_map_t * vlib_physmem_get_map(vlib_main_t *vm, u32 index)
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
u8 buffer_pool_index
index of buffer pool this buffer belongs.
#define vec_reset_length(v)
Reset vector length to zero NULL-pointer tolerant.
#define clib_memcpy(d, s, n)
#define static_always_inline
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define clib_error_return(e, args...)
clib_error_t * dpdk_buffer_pool_init(vlib_main_t *vm, vlib_buffer_pool_t *bp)
static void dpdk_ops_vpp_free(struct rte_mempool *mp)
static u32 vlib_get_buffer_index(vlib_main_t *vm, void *p)
Translate buffer pointer into buffer index.
static_always_inline void dpdk_ops_vpp_enqueue_one(vlib_buffer_t *bt, void *obj)
static_always_inline void vlib_buffer_copy_template(vlib_buffer_t *b, vlib_buffer_t *bt)
#define rte_mbuf_from_vlib_buffer(x)
vlib_buffer_t buffer_template
vlib_buffer_pool_t * buffer_pools
static_always_inline void vlib_get_buffer_indices_with_offset(vlib_main_t *vm, void **b, u32 *bi, uword count, i32 offset)
Translate array of buffer pointers into buffer indices with offset.
STATIC_ASSERT(VLIB_BUFFER_PRE_DATA_SIZE==RTE_PKTMBUF_HEADROOM, "VLIB_BUFFER_PRE_DATA_SIZE must be equal to RTE_PKTMBUF_HEADROOM")
#define clib_atomic_sub_fetch(a, b)
static_always_inline u32 vlib_buffer_get_default_data_size(vlib_main_t *vm)
#define STATIC_ASSERT_SIZEOF_ELT(d, e, s)
static_always_inline void vlib_buffer_pool_put(vlib_main_t *vm, u8 buffer_pool_index, u32 *buffers, u32 n_buffers)
sll srl srl sll sra u16x4 i
#define vec_free(V)
Free vector's memory (no header).
#define clib_warning(format, args...)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue_no_cache(struct rte_mempool *cmp, void *const *obj_table, unsigned n)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_enqueue(struct rte_mempool *mp, void *const *obj_table, unsigned n)
CLIB_MARCH_FN_REGISTRATION(dpdk_ops_vpp_enqueue)
#define VLIB_BUFFER_HDR_SIZE
struct rte_mbuf * dpdk_mbuf_template_by_pool_index
static_always_inline void dpdk_ops_vpp_enqueue_no_cache_one(vlib_main_t *vm, struct rte_mempool *old, struct rte_mempool *new, void *obj, vlib_buffer_t *bt)
static void * vlib_buffer_ptr_from_index(uword buffer_mem_start, u32 buffer_index, uword offset)
static void * clib_mem_alloc(uword size)
static uword pointer_to_uword(const void *p)
static vlib_main_t * vlib_get_main(void)
static u64 vlib_physmem_get_pa(vlib_main_t *vm, void *mem)
clib_error_t * dpdk_buffer_pools_create(vlib_main_t *vm)
struct rte_mempool ** dpdk_no_cache_mempool_by_buffer_pool_index
VLIB buffer representation.
static unsigned dpdk_ops_vpp_get_count_no_cache(const struct rte_mempool *mp)
static unsigned dpdk_ops_vpp_get_count(const struct rte_mempool *mp)
#define vec_foreach(var, vec)
Vector iterator.
static int dpdk_ops_vpp_dequeue_no_cache(struct rte_mempool *mp, void **obj_table, unsigned n)
struct rte_mempool ** dpdk_mempool_by_buffer_pool_index
#define CLIB_CACHE_LINE_BYTES
#define STATIC_ASSERT_SIZEOF(d, s)
int CLIB_MULTIARCH_FN() dpdk_ops_vpp_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
volatile u8 ref_count
Reference count for this buffer.
static vlib_buffer_t * vlib_get_buffer(vlib_main_t *vm, u32 buffer_index)
Translate buffer index into buffer pointer.
static __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool(vlib_main_t *vm, u32 *buffers, u32 n_buffers, u8 buffer_pool_index)
Allocate buffers from specific pool into supplied array.
#define CLIB_MULTIARCH_FN(fn)