|
FD.io VPP
v21.10.1-2-g0a485f517
Vector Packet Processing
|
Go to the documentation of this file.
38 #ifndef _included_clib_mem_h
39 #define _included_clib_mem_h
52 #define CLIB_MAX_MHEAPS 256
53 #define CLIB_MAX_NUMAS 16
54 #define CLIB_MEM_VM_MAP_FAILED ((void *) ~0)
55 #define CLIB_MEM_ERROR (-1)
74 typedef struct _clib_mem_vm_map_hdr
89 #define CLIB_VM_MAP_HDR_NAME_MAX_LEN 64
93 struct _clib_mem_vm_map_hdr *prev, *
next;
96 #define foreach_clib_mem_heap_flag \
97 _(0, LOCKED, "locked") \
98 _(1, UNMAP_ON_DESTROY, "unmap-on-destroy")
102 #define _(i, v, s) CLIB_MEM_HEAP_F_##v = (1 << i),
161 #define VEC_NUMA_UNSPECIFIED (0xFF)
204 if (__os_thread_index != 0)
213 ASSERT (__os_thread_index > 0);
226 int os_out_of_memory_on_failure)
245 if (os_out_of_memory_on_failure)
290 #define clib_mem_alloc_aligned_no_fail(size,align) \
292 uword _clib_mem_alloc_size = (size); \
293 void * _clib_mem_alloc_p; \
294 _clib_mem_alloc_p = clib_mem_alloc_aligned (_clib_mem_alloc_size, (align)); \
295 if (! _clib_mem_alloc_p) \
296 clib_panic ("failed to allocate %d bytes", _clib_mem_alloc_size); \
300 #define clib_mem_alloc_no_fail(size) clib_mem_alloc_aligned_no_fail(size,1)
303 #define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
335 if (old_size < new_size)
336 copy_size = old_size;
338 copy_size = new_size;
433 flags |= MAP_ANONYMOUS;
436 mmap_addr = mmap (0,
size, PROT_READ | PROT_WRITE,
flags, -1, 0);
437 if (mmap_addr == (
void *) -1)
557 return log2_page_size;
576 u32 min_elts_per_chunk);
void * clib_mem_vm_map_stack(uword size, clib_mem_page_sz_t log2_page_size, char *fmt,...)
static uword clib_mem_round_to_page_size(uword size, clib_mem_page_sz_t log2_page_size)
int clib_mem_vm_unmap(void *base)
#define count_trailing_zeros(x)
static_always_inline void os_set_thread_index(uword thread_index)
struct _clib_mem_vm_map_hdr clib_mem_vm_map_hdr_t
static_always_inline clib_mem_page_sz_t clib_mem_log2_page_size_validate(clib_mem_page_sz_t log2_page_size)
static clib_mem_heap_t * clib_mem_get_heap(void)
uword clib_mem_vm_reserve(uword start, uword size, clib_mem_page_sz_t log2_page_sz)
void * clib_mem_get_heap_base(clib_mem_heap_t *heap)
void * clib_mem_bulk_handle_t
static void * clib_mem_realloc(void *p, uword new_size, uword old_size)
@ CLIB_MEM_PAGE_SZ_DEFAULT_HUGE
#define CLIB_MEM_POISON(a, s)
static uword pow2_mask(uword x)
static void clib_mem_free(void *p)
static_always_inline uword clib_mem_get_default_hugepage_size(void)
uword clib_mem_get_heap_free_space(clib_mem_heap_t *heap)
DLMALLOC_EXPORT size_t mspace_usable_size_with_delta(const void *p)
clib_mem_page_sz_t log2_sys_default_hugepage_sz
u8 * format_clib_mem_heap(u8 *s, va_list *va)
void os_out_of_memory(void)
uword clib_mem_get_heap_size(clib_mem_heap_t *heap)
void clib_mem_bulk_free(clib_mem_bulk_handle_t h, void *p)
static void * clib_mem_alloc_aligned_or_null(uword size, uword align)
static_always_inline uword clib_mem_page_bytes(clib_mem_page_sz_t log2_page_size)
clib_mem_vm_map_hdr_t * clib_mem_vm_get_next_map_hdr(clib_mem_vm_map_hdr_t *hdr)
vl_api_ikev2_sa_stats_t stats
uword clib_mem_trace_enable_disable(uword enable)
static void clib_mem_set_thread_index(void)
static void * clib_mem_set_per_numa_heap(void *new_heap)
static_always_inline void * clib_memcpy_fast(void *restrict dst, const void *restrict src, size_t n)
int clib_mem_set_default_numa_affinity()
static void * clib_mem_alloc_or_null(uword size)
void * per_cpu_mheaps[CLIB_MAX_MHEAPS]
static uword round_pow2(uword x, uword pow2)
static void * clib_mem_get_per_numa_heap(u32 numa_id)
DLMALLOC_EXPORT void * mspace_get_aligned(mspace msp, unsigned long n_user_data_bytes, unsigned long align, unsigned long align_offset)
int clib_mem_set_numa_affinity(u8 numa_node, int force)
static clib_mem_heap_t * clib_mem_get_per_cpu_heap(void)
clib_mem_heap_t * clib_mem_create_heap(void *base, uword size, int is_locked, char *fmt,...)
static uword clib_mem_size(void *p)
vhost_user_memory_t memory
void * clib_mem_vm_map(void *start, uword size, clib_mem_page_sz_t log2_page_size, char *fmt,...)
void * clib_mem_vm_map_shared(void *start, uword size, int fd, uword offset, char *fmt,...)
void * per_numa_mheaps[CLIB_MAX_NUMAS]
#define static_always_inline
void clib_mem_destroy_heap(clib_mem_heap_t *heap)
#define foreach_clib_mem_heap_flag
static void clib_mem_free_s(void *p)
void clib_mem_destroy(void)
static errno_t memset_s_inline(void *s, rsize_t smax, int c, rsize_t n)
#define CLIB_VM_MAP_HDR_NAME_MAX_LEN
static_always_inline void clib_mem_set_log2_default_hugepage_size(clib_mem_page_sz_t log2_page_sz)
static void clib_mem_vm_free(void *addr, uword size)
clib_mem_bulk_handle_t clib_mem_bulk_init(u32 elt_sz, u32 align, u32 min_elts_per_chunk)
static_always_inline int vlib_mem_get_next_numa_node(int numa)
clib_mem_page_sz_t log2_page_sz
u64 * clib_mem_vm_get_paddr(void *mem, clib_mem_page_sz_t log2_page_size, int n_pages)
u8 * format_clib_mem_page_stats(u8 *s, va_list *va)
static_always_inline uword os_get_thread_index(void)
void * clib_mem_init_thread_safe(void *memory, uword memory_size)
clib_mem_page_sz_t log2_page_sz
u8 * format_clib_mem_bulk(u8 *s, va_list *args)
int clib_mem_vm_create_fd(clib_mem_page_sz_t log2_page_size, char *fmt,...)
template key/value backing page structure
static_always_inline clib_mem_page_sz_t clib_mem_get_log2_default_hugepage_size()
static void * clib_mem_vm_alloc(uword size)
void * clib_mem_bulk_alloc(clib_mem_bulk_handle_t h)
clib_mem_main_t clib_mem_main
void clib_mem_get_page_stats(void *start, clib_mem_page_sz_t log2_page_size, uword n_pages, clib_mem_page_stats_t *stats)
uword clib_mem_get_fd_page_size(int fd)
@ CLIB_MEM_PAGE_SZ_DEFAULT
#define CLIB_MEM_UNPOISON(a, s)
static void * clib_mem_set_per_cpu_heap(void *new_heap)
void clib_mem_vm_randomize_va(uword *requested_va, clib_mem_page_sz_t log2_page_size)
static_always_inline uword clib_mem_get_page_size(void)
DLMALLOC_EXPORT void mspace_put(mspace msp, void *p)
clib_mem_page_sz_t log2_default_hugepage_sz
static_always_inline clib_mem_page_sz_t clib_mem_get_log2_page_size(void)
void * clib_mem_vm_map_internal(void *base, clib_mem_page_sz_t log2_page_sz, uword size, int fd, uword offset, char *name)
void * clib_mem_init(void *base, uword size)
clib_mem_heap_flag_t flags
void mheap_trace(clib_mem_heap_t *v, int enable)
clib_mem_vm_map_hdr_t * last_map
static_always_inline uword os_get_numa_index(void)
static void * clib_mem_alloc_aligned_at_offset(uword size, uword align, uword align_offset, int os_out_of_memory_on_failure)
int clib_mem_is_traced(void)
static void * clib_mem_alloc_aligned(uword size, uword align)
DLMALLOC_EXPORT int mspace_is_heap_object(mspace msp, void *p)
clib_mem_page_sz_t clib_mem_get_fd_log2_page_size(int fd)
void * clib_mem_init_with_page_size(uword memory_size, clib_mem_page_sz_t log2_page_sz)
void clib_mem_bulk_destroy(clib_mem_bulk_handle_t h)
void clib_mem_get_heap_usage(clib_mem_heap_t *heap, clib_mem_usage_t *usage)
static_always_inline clib_error_t * clib_mem_get_last_error(void)
@ CLIB_MEM_PAGE_SZ_UNKNOWN
void clib_mem_main_init()
uword bytes_free_reclaimed
static clib_mem_heap_t * clib_mem_set_heap(clib_mem_heap_t *heap)
clib_mem_page_sz_t log2_page_sz
#define clib_atomic_bool_cmp_and_swap(addr, old, new)
static uword clib_mem_size_nocheck(void *p)
static void * clib_mem_alloc(uword size)
void clib_mem_trace(int enable)
static uword clib_mem_is_heap_object(void *p)
u8 * format_clib_mem_usage(u8 *s, va_list *args)
vl_api_wireguard_peer_flags_t flags