|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
18 #include <sys/types.h>
31 #if __SIZEOF_POINTER__ >= 8
32 #define DEFAULT_RESERVED_MB 16384
34 #define DEFAULT_RESERVED_MB 256
63 if (pt == 0 || pt[0] == 0)
86 u32 n_blocks,
u32 block_align,
u32 numa_node)
92 u32 alloc_chunk_index;
98 u32 i, start = 0, prev = ~0;
100 for (
i = 0;
i <
a->subpages_per_page;
i++)
124 off = (block_align - (
c->start & (block_align - 1))) & (block_align - 1);
126 if (
c->used || n_blocks +
off >
c->size)
130 alloc_chunk_index =
c->next;
137 u32 offset_chunk_index;
141 offset_chunk_index = alloc_chunk_index;
142 alloc_chunk_index =
c - pp->
chunks;
148 c->prev = offset_chunk_index;
150 co->
next = alloc_chunk_index;
154 if (
c->size > n_blocks)
156 u32 tail_chunk_index;
160 tail_chunk_index = ct - pp->
chunks;
162 ct->
size =
c->size - n_blocks;
164 ct->
prev = alloc_chunk_index;
165 ct->
start =
c->start + n_blocks;
168 c->next = tail_chunk_index;
172 else if (
c->next != ~0)
187 uword seek, va, pa, p;
206 fd = open ((
char *)
"/proc/self/pagemap", O_RDONLY);
212 if (fd != -1 && lseek (fd, seek, SEEK_SET) == seek &&
213 read (fd, &pa,
sizeof (pa)) == (
sizeof (pa)) &&
228 u32 numa_node,
u32 n_pages)
232 int rv,
i, mmap_flags;
233 void *va = MAP_FAILED;
247 a->log2_subpage_sz, n_pages);
257 "numa node %u", numa_node);
261 mmap_flags = MAP_FIXED;
265 mmap_flags |= MAP_SHARED;
269 if ((ftruncate (
a->fd,
size)) == -1)
275 mmap_flags |= MAP_HUGETLB;
277 mmap_flags |= MAP_PRIVATE | MAP_ANONYMOUS;
282 if (mmap (va,
size, PROT_READ | PROT_WRITE, mmap_flags,
a->fd, 0) ==
286 "fd %d numa %d flags 0x%x", n_pages,
287 va,
a->fd, numa_node, mmap_flags);
293 mlock (va,
size) != 0)
312 if (
stats.per_numa[numa_node] != 1 &&
313 !(numa_node == 0 &&
stats.unknown == 1))
315 u16 allocated_at = ~0;
318 "unable to get information about numa allocation");
321 if (
stats.per_numa[
i] == 1)
325 "page allocated on the wrong numa node (%u), "
327 allocated_at, numa_node);
332 for (
i = 0;
i < n_pages;
i++)
355 return pp - (n_pages - 1);
358 if (va != MAP_FAILED)
362 mmap (va,
size, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
380 if (log2_page_sz == 0)
386 1 << (log2_page_sz - 10));
401 a->numa_node = numa_node;
403 a->log2_subpage_sz = log2_page_sz;
409 memset (
a, 0,
sizeof (*
a));
422 u32 n_blocks, block_align, *page_index;
440 a->name =
format (0,
"default-numa-%u%c", numa_node, 0);
441 a->numa_node = numa_node;
443 a->subpages_per_page = 1;
449 else if (
size > 1ULL <<
a->log2_subpage_sz)
500 if (ci1 == ~0 || ci2 == ~0)
523 u32 chunk_index, page_index;
545 c->size +=
next->size;
546 c->next =
next->next;
547 if (
next->next != ~0)
558 prev->
size +=
c->size;
559 prev->
next =
c->next;
562 memset (
c, 0,
sizeof (*
c));
572 int verbose = va_arg (*va,
int);
578 s =
format (s,
"free %u chunks %u free-chunks %d ",
586 s =
format (s,
"\n%U%12s%12s%8s%8s%8s%8s",
588 "chunk offset",
"size",
"used",
"index",
"prev",
"next");
591 s =
format (s,
"\n%U%12u%12u%8s%8d%8d%8d",
595 c->used ?
"yes" :
"no",
609 int verbose = va_arg (*va,
int);
615 s =
format (s,
"used-pages %u reserved-pages %u default-page-size %U "
634 s =
format (s,
"\n%Uarena '%s' pages %u subpage-size %U numa-node %u",
637 a->log2_subpage_sz,
a->numa_node);
639 s =
format (s,
" shared fd %d",
a->fd);
659 s =
format (s,
"%16s %13s %8s",
"virtual-addr",
"physical-addr",
"size");
662 uword *lookup_val, pa, va;
667 pa = va - *lookup_val;
#define CLIB_PMALLOC_ARENA_F_SHARED_MEM
__clib_export int clib_mem_set_default_numa_affinity()
static uword pow2_mask(uword x)
static uword pointer_to_uword(const void *p)
#define pool_elt_at_index(p, i)
Returns pointer to element at given index.
__clib_export u8 * format_pmalloc(u8 *s, va_list *va)
static u8 * format_pmalloc_page(u8 *s, va_list *va)
static uword pmalloc_size2pages(uword size, u32 log2_page_sz)
#define clib_error_return(e, args...)
#define CLIB_PMALLOC_NUMA_LOCAL
static heap_elt_t * first(heap_header_t *h)
#define pool_put(P, E)
Free an object E in pool P.
static void pmalloc_update_lookup_table(clib_pmalloc_main_t *pm, u32 first, u32 count)
clib_error_t * clib_sysfs_prealloc_hugepages(int numa_node, int log2_page_size, int nr)
static clib_pmalloc_page_t * pmalloc_map_pages(clib_pmalloc_main_t *pm, clib_pmalloc_arena_t *a, u32 numa_node, u32 n_pages)
static u32 clib_pmalloc_get_page_index(clib_pmalloc_main_t *pm, void *va)
#define PMALLOC_LOG2_BLOCK_SZ
vl_api_ikev2_sa_stats_t stats
__clib_export void clib_mem_get_page_stats(void *start, clib_mem_page_sz_t log2_page_size, uword n_pages, clib_mem_page_stats_t *stats)
__clib_export u8 * format_clib_error(u8 *s, va_list *va)
__clib_export int clib_mem_vm_create_fd(clib_mem_page_sz_t log2_page_size, char *fmt,...)
clib_pmalloc_chunk_t * chunks
static clib_pmalloc_chunk_t * get_chunk(clib_pmalloc_page_t *pp, u32 index)
#define hash_set(h, key, value)
clib_pmalloc_arena_t * arenas
static uword round_pow2(uword x, uword pow2)
__clib_export int clib_mem_set_numa_affinity(u8 numa_node, int force)
#define pool_foreach(VAR, POOL)
Iterate through pool.
#define CLIB_PMALLOC_F_NO_PAGEMAP
#define DEFAULT_RESERVED_MB
#define clib_error_create(args...)
#define vec_len(v)
Number of elements in vector (rvalue-only, NULL tolerant)
__clib_export void clib_pmalloc_free(clib_pmalloc_main_t *pm, void *va)
u32 * default_arena_for_numa_node
#define vec_add2(V, P, N)
Add N elements to end of vector V, return pointer to new elements in P.
#define vec_add1(V, E)
Add 1 element to end of vector (unspecified alignment).
void * clib_pmalloc_alloc_aligned(clib_pmalloc_main_t *pm, uword size, uword align)
static int pmalloc_chunks_mergeable(clib_pmalloc_arena_t *a, clib_pmalloc_page_t *pp, u32 ci1, u32 ci2)
#define vec_elt_at_index(v, i)
Get vector value at index i checking that i is in bounds.
#define vec_validate_aligned(V, I, A)
Make sure vector is long enough for given index (no header, specified alignment)
static void * alloc_chunk_from_page(clib_pmalloc_main_t *pm, clib_pmalloc_page_t *pp, u32 n_blocks, u32 block_align, u32 numa_node)
#define vec_foreach_index(var, v)
Iterate over vector indices.
clib_pmalloc_page_t * pages
__clib_export int clib_pmalloc_init(clib_pmalloc_main_t *pm, uword base_addr, uword size)
sll srl srl sll sra u16x4 i
#define pool_get(P, E)
Allocate an object E from a pool P (unspecified alignment).
__clib_export u32 clib_get_current_numa_node()
uword clib_mem_vm_reserve(uword start, uword size, clib_mem_page_sz_t log2_page_sz)
#define CLIB_CACHE_LINE_BYTES
__clib_export void * clib_pmalloc_alloc_from_arena(clib_pmalloc_main_t *pm, void *arena_va, uword size, uword align)
#define vec_free(V)
Free vector's memory (no header).
description fragment has unexpected format
#define vec_validate_init_empty(V, I, INIT)
Make sure vector is long enough for given index and initialize empty space (no header,...
@ CLIB_MEM_PAGE_SZ_DEFAULT
#define vec_foreach(var, vec)
Vector iterator.
clib_mem_page_sz_t def_log2_page_sz
#define clib_error_return_unix(e, args...)
static uword pool_elts(void *v)
Number of active elements in a pool.
static_always_inline clib_mem_page_sz_t clib_mem_get_log2_page_size(void)
#define hash_unset(h, key)
clib_memset(h->entries, 0, sizeof(h->entries[0]) *entries)
#define uword_to_pointer(u, type)
uword * chunk_index_by_va
#define clib_error_free(e)
__clib_export u64 * clib_mem_vm_get_paddr(void *mem, clib_mem_page_sz_t log2_page_size, int n_pages)
__clib_export uword clib_mem_get_default_hugepage_size(void)
__clib_export void * clib_pmalloc_alloc_aligned_on_numa(clib_pmalloc_main_t *pm, uword size, uword align, u32 numa_node)
__clib_export void * clib_pmalloc_create_shared_arena(clib_pmalloc_main_t *pm, char *name, uword size, u32 log2_page_sz, u32 numa_node)
static uword min_log2(uword x)
static uword is_pow2(uword x)
__clib_export u8 * format_pmalloc_map(u8 *s, va_list *va)
static clib_pmalloc_arena_t * clib_pmalloc_get_arena(clib_pmalloc_main_t *pm, void *va)
static void * clib_pmalloc_alloc_inline(clib_pmalloc_main_t *pm, clib_pmalloc_arena_t *a, uword size, uword align, u32 numa_node)