|
FD.io VPP
v21.06-3-gbb25fbf28
Vector Packet Processing
|
Go to the documentation of this file.
14 #pragma warning( disable : 4146 )
23 #if DLM_ABORT_ON_ASSERT_FAILURE
25 #define assert(x) if(!(x)) DLM_ABORT
35 #if !defined(WIN32) && !defined(LACKS_TIME_H)
38 #ifndef LACKS_STDLIB_H
41 #ifndef LACKS_STRING_H
45 #ifndef LACKS_STRINGS_H
50 #ifndef LACKS_SYS_MMAN_H
52 #if (defined(linux) && !defined(__USE_GNU))
64 #ifndef LACKS_UNISTD_H
67 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
68 extern void* sbrk(ptrdiff_t);
75 #if defined (__SVR4) && defined (__sun)
77 #elif !defined(LACKS_SCHED_H)
80 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
83 #elif defined(_MSC_VER)
89 LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
90 LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
95 #pragma intrinsic (_InterlockedCompareExchange)
96 #pragma intrinsic (_InterlockedExchange)
97 #define interlockedcompareexchange _InterlockedCompareExchange
98 #define interlockedexchange _InterlockedExchange
99 #elif defined(WIN32) && defined(__GNUC__)
100 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
101 #define interlockedexchange __sync_lock_test_and_set
107 #define LOCK_AT_FORK 0
111 #if defined(_MSC_VER) && _MSC_VER>=1300
112 #ifndef BitScanForward
116 unsigned char _BitScanForward(
unsigned long *
index,
unsigned long mask);
117 unsigned char _BitScanReverse(
unsigned long *
index,
unsigned long mask);
122 #define BitScanForward _BitScanForward
123 #define BitScanReverse _BitScanReverse
124 #pragma intrinsic(_BitScanForward)
125 #pragma intrinsic(_BitScanReverse)
130 #ifndef malloc_getpagesize
132 # ifndef _SC_PAGE_SIZE
133 # define _SC_PAGE_SIZE _SC_PAGESIZE
136 # ifdef _SC_PAGE_SIZE
137 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
139 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
140 extern size_t getpagesize();
141 # define malloc_getpagesize getpagesize()
144 # define malloc_getpagesize getpagesize()
146 # ifndef LACKS_SYS_PARAM_H
147 # include <sys/param.h>
149 # ifdef EXEC_PAGESIZE
150 # define malloc_getpagesize EXEC_PAGESIZE
154 # define malloc_getpagesize NBPG
156 # define malloc_getpagesize (NBPG * CLSIZE)
160 # define malloc_getpagesize NBPC
163 # define malloc_getpagesize PAGESIZE
165 # define malloc_getpagesize ((size_t)4096U)
179 #define SIZE_T_SIZE (sizeof(size_t))
180 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
184 #define SIZE_T_ZERO ((size_t)0)
185 #define SIZE_T_ONE ((size_t)1)
186 #define SIZE_T_TWO ((size_t)2)
187 #define SIZE_T_FOUR ((size_t)4)
188 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
189 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
190 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
191 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
194 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
197 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
200 #define align_offset(A)\
201 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
202 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
214 #define MFAIL ((void*)(MAX_SIZE_T))
215 #define CMFAIL ((char*)(MFAIL))
220 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
221 #define MMAP_PROT (PROT_READ|PROT_WRITE)
222 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
223 #define MAP_ANONYMOUS MAP_ANON
226 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
227 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
233 #define MMAP_FLAGS (MAP_PRIVATE)
234 static int dev_zero_fd = -1;
235 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
236 (dev_zero_fd = open("/dev/zero", O_RDWR), \
237 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
238 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
241 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
247 void* ptr = VirtualAlloc(0,
size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
248 return (ptr != 0)? ptr:
MFAIL;
253 void* ptr = VirtualAlloc(0,
size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
255 return (ptr != 0)? ptr:
MFAIL;
260 MEMORY_BASIC_INFORMATION minfo;
261 char* cptr = (
char*)ptr;
263 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
265 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
266 minfo.State != MEM_COMMIT || minfo.RegionSize >
size)
268 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
270 cptr += minfo.RegionSize;
271 size -= minfo.RegionSize;
276 #define MMAP_DEFAULT(s) win32mmap(s)
277 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
278 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
284 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
293 #define CALL_MORECORE(S) MORECORE(S)
295 #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
298 #define CALL_MORECORE(S) MFAIL
305 #define USE_MMAP_BIT (SIZE_T_ONE)
308 #define CALL_MMAP(s) MMAP(s)
310 #define CALL_MMAP(s) MMAP_DEFAULT(s)
313 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
315 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
318 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
320 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
323 #define USE_MMAP_BIT (SIZE_T_ZERO)
325 #define MMAP(s) MFAIL
326 #define MUNMAP(a, s) (-1)
327 #define DIRECT_MMAP(s) MFAIL
328 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
329 #define CALL_MMAP(s) MMAP(s)
330 #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
336 #if HAVE_MMAP && HAVE_MREMAP
338 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
340 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
343 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
347 #define USE_NONCONTIGUOUS_BIT (4U)
350 #define USE_NOEXPAND_BIT (8U)
353 #define USE_TRACE_BIT (16U)
356 #define EXTERN_BIT (8U)
390 #define USE_LOCK_BIT (0U)
391 #define INITIAL_LOCK(l) (0)
392 #define DESTROY_LOCK(l) (0)
393 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
394 #define RELEASE_MALLOC_GLOBAL_LOCK()
412 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
413 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
414 #define CLEAR_LOCK(sl) __sync_lock_release(sl)
416 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
422 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2"
424 :
"r" (val),
"m" (*(sl)),
"0"(cmp)
433 __asm__ __volatile__ (
"lock; xchgl %0, %1"
435 :
"m" (*(sl)),
"0"(prev)
439 #define CAS_LOCK(sl) x86_cas_lock(sl)
440 #define CLEAR_LOCK(sl) x86_clear_lock(sl)
443 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
444 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
449 #define SPINS_PER_YIELD 63
450 #if defined(_MSC_VER)
451 #define SLEEP_EX_DURATION 50
452 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
453 #elif defined (__SVR4) && defined (__sun)
454 #define SPIN_LOCK_YIELD thr_yield();
455 #elif !defined(LACKS_SCHED_H)
456 #define SPIN_LOCK_YIELD sched_yield();
458 #define SPIN_LOCK_YIELD
461 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
464 static int spin_acquire_lock(
int *sl) {
466 while (*(
volatile int *)sl != 0 || CAS_LOCK(sl)) {
467 if ((++spins & SPINS_PER_YIELD) == 0) {
475 #define TRY_LOCK(sl) !CAS_LOCK(sl)
476 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
477 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
478 #define INITIAL_LOCK(sl) (*sl = 0)
479 #define DESTROY_LOCK(sl) (0)
480 static MLOCK_T malloc_global_mutex = 0;
485 #define THREAD_ID_T DWORD
486 #define CURRENT_THREAD GetCurrentThreadId()
487 #define EQ_OWNER(X,Y) ((X) == (Y))
494 #define THREAD_ID_T pthread_t
495 #define CURRENT_THREAD pthread_self()
496 #define EQ_OWNER(X,Y) pthread_equal(X, Y)
499 struct malloc_recursive_lock {
502 THREAD_ID_T threadid;
505 #define MLOCK_T struct malloc_recursive_lock
506 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
508 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
515 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
516 THREAD_ID_T mythreadid = CURRENT_THREAD;
519 if (*((
volatile int *)(&lk->sl)) == 0) {
520 if (!CAS_LOCK(&lk->sl)) {
521 lk->threadid = mythreadid;
526 else if (EQ_OWNER(lk->threadid, mythreadid)) {
530 if ((++spins & SPINS_PER_YIELD) == 0) {
536 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
537 THREAD_ID_T mythreadid = CURRENT_THREAD;
538 if (*((
volatile int *)(&lk->sl)) == 0) {
539 if (!CAS_LOCK(&lk->sl)) {
540 lk->threadid = mythreadid;
545 else if (EQ_OWNER(lk->threadid, mythreadid)) {
552 #define RELEASE_LOCK(lk) recursive_release_lock(lk)
553 #define TRY_LOCK(lk) recursive_try_lock(lk)
554 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
555 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
556 #define DESTROY_LOCK(lk) (0)
560 #define MLOCK_T CRITICAL_SECTION
561 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
562 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
563 #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
564 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
565 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
566 #define NEED_GLOBAL_LOCK_INIT
568 static MLOCK_T malloc_global_mutex;
569 static volatile LONG malloc_global_mutex_status;
572 static void init_malloc_global_mutex() {
574 long stat = malloc_global_mutex_status;
579 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
580 InitializeCriticalSection(&malloc_global_mutex);
581 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
589 #define MLOCK_T pthread_mutex_t
590 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
591 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
592 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
593 #define INITIAL_LOCK(lk) pthread_init_lock(lk)
594 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
596 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
599 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
601 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
602 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
605 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
607 static int pthread_init_lock (MLOCK_T *lk) {
608 pthread_mutexattr_t attr;
609 if (pthread_mutexattr_init(&attr))
return 1;
610 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
611 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
613 if (pthread_mutex_init(lk, &attr))
return 1;
614 if (pthread_mutexattr_destroy(&attr))
return 1;
621 #define USE_LOCK_BIT (2U)
623 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
624 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
627 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
628 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
786 #define MCHUNK_SIZE (sizeof(mchunk))
789 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
791 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
795 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
797 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
800 #define MIN_CHUNK_SIZE\
801 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
804 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
805 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
807 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
810 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
811 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
814 #define pad_request(req) \
815 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
818 #define request2size(req) \
819 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
832 #define PINUSE_BIT (SIZE_T_ONE)
833 #define CINUSE_BIT (SIZE_T_TWO)
834 #define FLAG4_BIT (SIZE_T_FOUR)
835 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
836 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
839 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
842 #define cinuse(p) ((p)->head & CINUSE_BIT)
843 #define pinuse(p) ((p)->head & PINUSE_BIT)
844 #define flag4inuse(p) ((p)->head & FLAG4_BIT)
845 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
846 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
848 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
850 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
851 #define set_flag4(p) ((p)->head |= FLAG4_BIT)
852 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
855 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
856 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
859 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
860 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
863 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
866 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
867 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
870 #define set_size_and_pinuse_of_free_chunk(p, s)\
871 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
874 #define set_free_with_pinuse(p, s, n)\
875 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
878 #define overhead_for(p)\
879 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
883 #define calloc_must_clear(p) (!is_mmapped(p))
885 #define calloc_must_clear(p) (1)
996 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1062 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1063 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1156 #define NSMALLBINS (32U)
1157 #define NTREEBINS (32U)
1158 #define SMALLBIN_SHIFT (3U)
1159 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1160 #define TREEBIN_SHIFT (8U)
1161 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1162 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1163 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1213 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1220 #define is_global(M) ((M) == &_gm_)
1224 #define is_initialized(M) ((M)->top != 0)
1230 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1231 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1233 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1235 #define disable_lock(M)
1238 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1239 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1241 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1243 #define disable_mmap(M)
1246 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1247 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1248 #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
1249 #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
1250 #define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
1251 #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
1252 #define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT)
1254 #define set_lock(M,L)\
1255 ((M)->mflags = (L)?\
1256 ((M)->mflags | USE_LOCK_BIT) :\
1257 ((M)->mflags & ~USE_LOCK_BIT))
1260 #define page_align(S)\
1261 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1264 #define granularity_align(S)\
1265 (((S) + (mparams.granularity - SIZE_T_ONE))\
1266 & ~(mparams.granularity - SIZE_T_ONE))
1271 #define mmap_align(S) granularity_align(S)
1273 #define mmap_align(S) page_align(S)
1277 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1279 #define is_page_aligned(S)\
1280 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1281 #define is_granularity_aligned(S)\
1282 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1285 #define segment_holds(S, A)\
1286 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1295 if ((sp = sp->
next) == 0)
1305 if ((
char*)sp >= ss->
base && (
char*)sp < ss->base + ss->
size)
1307 if ((sp = sp->
next) == 0)
1312 #ifndef MORECORE_CANNOT_TRIM
1313 #define should_trim(M,s) ((s) > (M)->trim_check)
1315 #define should_trim(M,s) (0)
1323 #define TOP_FOOT_SIZE\
1324 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1336 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1337 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1341 #define PREACTION(M) (0)
1345 #define POSTACTION(M)
1358 #if PROCEED_ON_ERROR
1361 int malloc_corruption_error_count;
1364 static void reset_on_error(
mstate m);
1366 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1367 #define USAGE_ERROR_ACTION(m, p)
1371 #ifndef CORRUPTION_ERROR_ACTION
1372 #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
1375 #ifndef USAGE_ERROR_ACTION
1376 #define USAGE_ERROR_ACTION(m,p) DLM_ABORT
1386 #define check_free_chunk(M,P)
1387 #define check_inuse_chunk(M,P)
1388 #define check_malloced_chunk(M,P,N)
1389 #define check_mmapped_chunk(M,P)
1390 #define check_malloc_state(M)
1391 #define check_top_chunk(M,P)
1394 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
1395 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1396 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
1397 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1398 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1399 #define check_malloc_state(M) do_check_malloc_state(M)
1406 static void do_check_malloced_chunk(
mstate m,
void*
mem,
size_t s);
1410 static void do_check_malloc_state(
mstate m);
1412 static size_t traverse_and_check(
mstate m);
1417 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1418 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1419 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1420 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1423 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1424 #define treebin_at(M,i) (&((M)->treebins[i]))
1427 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1428 #define compute_tree_index(S, I)\
1430 unsigned int X = S >> TREEBIN_SHIFT;\
1433 else if (X > 0xFFFF)\
1436 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1437 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1441 #elif defined (__INTEL_COMPILER)
1442 #define compute_tree_index(S, I)\
1444 size_t X = S >> TREEBIN_SHIFT;\
1447 else if (X > 0xFFFF)\
1450 unsigned int K = _bit_scan_reverse (X); \
1451 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1455 #elif defined(_MSC_VER) && _MSC_VER>=1300
1456 #define compute_tree_index(S, I)\
1458 size_t X = S >> TREEBIN_SHIFT;\
1461 else if (X > 0xFFFF)\
1465 _BitScanReverse((DWORD *) &K, (DWORD) X);\
1466 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1471 #define compute_tree_index(S, I)\
1473 size_t X = S >> TREEBIN_SHIFT;\
1476 else if (X > 0xFFFF)\
1479 unsigned int Y = (unsigned int)X;\
1480 unsigned int N = ((Y - 0x100) >> 16) & 8;\
1481 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1483 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1484 K = 14 - N + ((Y <<= K) >> 15);\
1485 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1491 #define bit_for_tree_index(i) \
1492 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1495 #define leftshift_for_tree_index(i) \
1496 ((i == NTREEBINS-1)? 0 : \
1497 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1500 #define minsize_for_tree_index(i) \
1501 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1502 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1508 #define idx2bit(i) ((binmap_t)(1) << (i))
1511 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1512 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1513 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1515 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1516 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1517 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1520 #define least_bit(x) ((x) & -(x))
1523 #define left_bits(x) ((x<<1) | -(x<<1))
1526 #define same_or_left_bits(x) ((x) | -(x))
1530 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1531 #define compute_bit2idx(X, I)\
1534 J = __builtin_ctz(X); \
1538 #elif defined (__INTEL_COMPILER)
1539 #define compute_bit2idx(X, I)\
1542 J = _bit_scan_forward (X); \
1546 #elif defined(_MSC_VER) && _MSC_VER>=1300
1547 #define compute_bit2idx(X, I)\
1550 _BitScanForward((DWORD *) &J, X);\
1554 #elif USE_BUILTIN_FFS
1555 #define compute_bit2idx(X, I) I = ffs(X)-1
1558 #define compute_bit2idx(X, I)\
1560 unsigned int Y = X - 1;\
1561 unsigned int K = Y >> (16-4) & 16;\
1562 unsigned int N = K; Y >>= K;\
1563 N += K = Y >> (8-3) & 8; Y >>= K;\
1564 N += K = Y >> (4-2) & 4; Y >>= K;\
1565 N += K = Y >> (2-1) & 2; Y >>= K;\
1566 N += K = Y >> (1-0) & 1; Y >>= K;\
1567 I = (bindex_t)(N + Y);\
1602 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1604 #define ok_next(p, n) ((char*)(p) < (char*)(n))
1606 #define ok_inuse(p) is_inuse(p)
1608 #define ok_pinuse(p) pinuse(p)
1611 #define ok_address(M, a) (1)
1612 #define ok_next(b, n) (1)
1613 #define ok_inuse(p) (1)
1614 #define ok_pinuse(p) (1)
1617 #if (FOOTERS && !INSECURE)
1626 #define ok_magic(M) (1)
1631 #if defined(__GNUC__) && __GNUC__ >= 3
1632 #define RTCHECK(e) __builtin_expect(e, 1)
1634 #define RTCHECK(e) (e)
1637 #define RTCHECK(e) (1)
1644 #define mark_inuse_foot(M,p,s)
1649 #define set_inuse(M,p,s)\
1650 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1651 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1654 #define set_inuse_and_pinuse(M,p,s)\
1655 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1656 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1659 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1660 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1665 #define mark_inuse_foot(M,p,s)\
1666 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1668 #define get_mstate_for(p)\
1669 ((mstate)(((mchunkptr)((char*)(p) +\
1670 (chunksize(p))))->prev_foot ^ mparams.magic))
1672 #define set_inuse(M,p,s)\
1673 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1674 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1675 mark_inuse_foot(M,p,s))
1677 #define set_inuse_and_pinuse(M,p,s)\
1678 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1679 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1680 mark_inuse_foot(M,p,s))
1682 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1683 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1684 mark_inuse_foot(M, p, s))
1691 static void pre_fork(
void) { ACQUIRE_LOCK(&(
gm)->mutex); }
1692 static void post_fork_parent(
void) { RELEASE_LOCK(&(
gm)->mutex); }
1693 static void post_fork_child(
void) {
INITIAL_LOCK(&(
gm)->mutex); }
1698 #ifdef NEED_GLOBAL_LOCK_INIT
1699 if (malloc_global_mutex_status <= 0)
1700 init_malloc_global_mutex();
1714 SYSTEM_INFO system_info;
1715 GetSystemInfo(&system_info);
1716 psize = system_info.dwPageSize;
1728 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
1730 (
sizeof(
int) < 4) ||
1741 #if MORECORE_CONTIGUOUS
1753 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1757 #ifndef DLM_MAGIC_CONSTANT
1760 unsigned char buf[
sizeof(size_t)];
1762 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
1763 read(fd,
buf,
sizeof(
buf)) ==
sizeof(
buf)) {
1770 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1771 #elif defined(LACKS_TIME_H)
1772 magic = (size_t)&
magic ^ (
size_t)0x55555555U;
1774 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1776 magic |= (size_t)8U;
1777 magic &= ~(size_t)7U;
1795 switch(param_number) {
1853 do_check_any_chunk(m, p);
1859 do_check_mmapped_chunk(m, p);
1866 do_check_any_chunk(m, p);
1870 if (p != m->
dv && p != m->
top) {
1886 static void do_check_malloced_chunk(
mstate m,
void*
mem,
size_t s) {
1890 do_check_inuse_chunk(m, p);
1931 if (u->
child[0] != 0) {
1934 do_check_tree(m, u->
child[0]);
1936 if (u->
child[1] != 0) {
1939 do_check_tree(m, u->
child[1]);
1954 int empty = (m->
treemap & (1U <<
i)) == 0;
1958 do_check_tree(m, t);
1965 unsigned int empty = (m->
smallmap & (1U <<
i)) == 0;
1969 for (; p !=
b; p = p->
bk) {
1973 do_check_free_chunk(m, p);
1980 do_check_inuse_chunk(m, q);
1996 }
while ((p = p->
fd) !=
b);
2014 }
while ((u = u->
fd) != t);
2022 static size_t traverse_and_check(
mstate m) {
2036 do_check_inuse_chunk(m, q);
2039 assert(q == m->
dv || bin_find(m, q));
2041 do_check_free_chunk(m, q);
2054 static void do_check_malloc_state(
mstate m) {
2059 do_check_smallbin(m,
i);
2061 do_check_treebin(m,
i);
2064 do_check_any_chunk(m, m->
dv);
2071 do_check_top_chunk(m, m->
top);
2077 total = traverse_and_check(m);
2088 struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2117 nm.fordblks = mfree;
2127 #if !NO_MALLOC_STATS
2153 fprintf(stderr,
"max system bytes = %10lu\n", (
unsigned long)(maxfp));
2154 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long)(fp));
2155 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long)(used));
2170 #define insert_small_chunk(M, P, S) {\
2171 bindex_t I = small_index(S);\
2172 mchunkptr B = smallbin_at(M, I);\
2174 assert(S >= MIN_CHUNK_SIZE);\
2175 if (!smallmap_is_marked(M, I))\
2176 mark_smallmap(M, I);\
2177 else if (RTCHECK(ok_address(M, B->fd)))\
2180 CORRUPTION_ERROR_ACTION(M);\
2189 #define unlink_small_chunk(M, P, S) {\
2190 mchunkptr F = P->fd;\
2191 mchunkptr B = P->bk;\
2192 bindex_t I = small_index(S);\
2195 assert(chunksize(P) == small_index2size(I));\
2196 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2198 clear_smallmap(M, I);\
2200 else if (RTCHECK(B == smallbin_at(M,I) ||\
2201 (ok_address(M, B) && B->fd == P))) {\
2206 CORRUPTION_ERROR_ACTION(M);\
2210 CORRUPTION_ERROR_ACTION(M);\
2215 #define unlink_first_small_chunk(M, B, P, I) {\
2216 mchunkptr F = P->fd;\
2219 assert(chunksize(P) == small_index2size(I));\
2221 clear_smallmap(M, I);\
2223 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2228 CORRUPTION_ERROR_ACTION(M);\
2234 #define replace_dv(M, P, S) {\
2235 size_t DVS = M->dvsize;\
2236 assert(is_small(DVS));\
2238 mchunkptr DV = M->dv;\
2239 insert_small_chunk(M, DV, DVS);\
2248 #define insert_large_chunk(M, X, S) {\
2251 compute_tree_index(S, I);\
2252 H = treebin_at(M, I);\
2254 X->child[0] = X->child[1] = 0;\
2255 if (!treemap_is_marked(M, I)) {\
2256 mark_treemap(M, I);\
2258 X->parent = (tchunkptr)H;\
2263 size_t K = S << leftshift_for_tree_index(I);\
2265 if (chunksize(T) != S) {\
2266 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2270 else if (RTCHECK(ok_address(M, C))) {\
2277 CORRUPTION_ERROR_ACTION(M);\
2282 tchunkptr F = T->fd;\
2283 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2291 CORRUPTION_ERROR_ACTION(M);\
2316 #define unlink_large_chunk(M, X) {\
2317 tchunkptr XP = X->parent;\
2320 tchunkptr F = X->fd;\
2322 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2327 CORRUPTION_ERROR_ACTION(M);\
2332 if (((R = *(RP = &(X->child[1]))) != 0) ||\
2333 ((R = *(RP = &(X->child[0]))) != 0)) {\
2335 while ((*(CP = &(R->child[1])) != 0) ||\
2336 (*(CP = &(R->child[0])) != 0)) {\
2339 if (RTCHECK(ok_address(M, RP)))\
2342 CORRUPTION_ERROR_ACTION(M);\
2347 tbinptr* H = treebin_at(M, X->index);\
2349 if ((*H = R) == 0) \
2350 clear_treemap(M, X->index);\
2352 else if (RTCHECK(ok_address(M, XP))) {\
2353 if (XP->child[0] == X) \
2359 CORRUPTION_ERROR_ACTION(M);\
2361 if (RTCHECK(ok_address(M, R))) {\
2364 if ((C0 = X->child[0]) != 0) {\
2365 if (RTCHECK(ok_address(M, C0))) {\
2370 CORRUPTION_ERROR_ACTION(M);\
2372 if ((C1 = X->child[1]) != 0) {\
2373 if (RTCHECK(ok_address(M, C1))) {\
2378 CORRUPTION_ERROR_ACTION(M);\
2382 CORRUPTION_ERROR_ACTION(M);\
2389 #define insert_chunk(M, P, S)\
2390 if (is_small(S)) insert_small_chunk(M, P, S)\
2391 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2393 #define unlink_chunk(M, P, S)\
2394 if (is_small(S)) unlink_small_chunk(M, P, S)\
2395 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2401 #define internal_malloc(m, b) mspace_malloc(m, b)
2402 #define internal_free(m, mem) mspace_free(m,mem);
2405 #define internal_malloc(m, b)\
2406 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2407 #define internal_free(m, mem)\
2408 if (m == gm) dlfree(mem); else mspace_free(m,mem);
2410 #define internal_malloc(m, b) dlmalloc(b)
2411 #define internal_free(m, mem) dlfree(mem)
2445 if (m->
least_addr == 0 || mm < m->least_addr)
2472 oldmmsize, newmmsize,
flags);
2481 if (cp < m->least_addr)
2517 bin->
fd = bin->
bk = bin;
2521 #if PROCEED_ON_ERROR
2524 static void reset_on_error(
mstate m) {
2526 ++malloc_corruption_error_count;
2546 size_t psize = (
char*)oldfirst - (
char*)p;
2548 size_t qsize = psize - nb;
2551 assert((
char*)oldfirst > (
char*)q);
2556 if (oldfirst == m->
top) {
2557 size_t tsize = m->
topsize += qsize;
2562 else if (oldfirst == m->
dv) {
2563 size_t dsize = m->
dvsize += qsize;
2587 char* old_top = (
char*)m->
top;
2589 char* old_end = oldsp->
base + oldsp->size;
2593 char* asp = rawsp +
offset;
2618 if ((
char*)(&(nextp->
head)) < old_end)
2626 if (csp != old_top) {
2628 size_t psize = csp - old_top;
2692 size_t ssize = asize;
2702 ssize += (
page_align((
size_t)base) - (size_t)base);
2706 (fp > m->
footprint && fp <= m->footprint_limit)) &&
2769 size_t ssize =
end - br;
2784 if (m->
least_addr == 0 || tbase < m->least_addr)
2808 while (sp != 0 && tbase != sp->
base + sp->
size)
2818 if (tbase < m->least_addr)
2821 while (sp != 0 && sp->
base != tbase + tsize)
2826 char* oldbase = sp->
base;
2836 if (nb < m->topsize) {
2837 size_t rsize = m->
topsize -= nb;
2857 size_t released = 0;
2862 char* base = sp->
base;
2905 size_t released = 0;
2920 sp->
size >= extra &&
2922 size_t newsize = sp->
size - extra;
2938 if (old_br == sp->
base + sp->
size) {
2941 if (rel_br !=
CMFAIL && new_br < old_br)
2942 released = old_br - new_br;
2949 if (released != 0) {
2950 sp->
size -= released;
2966 return (released != 0)? 1 : 0;
3005 size_t tsize = m->
topsize += psize;
3014 else if (
next == m->
dv) {
3015 size_t dsize = m->
dvsize += psize;
3060 if ((rsize = trem) == 0)
3065 if (
rt != 0 &&
rt != t)
3074 if (t == 0 && v == 0) {
3076 if (leftbits != 0) {
3094 if (v != 0 && rsize < (
size_t)(m->
dvsize - nb)) {
3192 smallbits =
gm->smallmap >> idx;
3194 if ((smallbits & 0x3U) != 0) {
3196 idx += ~smallbits & 1;
3207 else if (nb >
gm->dvsize) {
3208 if (smallbits != 0) {
3250 if (nb <= gm->dvsize) {
3251 size_t rsize =
gm->dvsize - nb;
3260 size_t dvs =
gm->dvsize;
3270 else if (nb < gm->topsize) {
3271 size_t rsize =
gm->topsize -= nb;
3322 fm->footprint -= psize;
3347 size_t tsize =
fm->topsize += psize;
3358 else if (
next ==
fm->dv) {
3359 size_t dsize =
fm->dvsize += psize;
3386 if (--
fm->release_checks == 0)
3406 if (n_elements != 0) {
3407 req = n_elements * elem_size;
3408 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
3409 (req / n_elements != elem_size))
3414 memset(
mem, 0, req);
3433 else if (oldsize >= nb) {
3434 size_t rsize = oldsize - nb;
3444 if (oldsize + m->
topsize > nb) {
3445 size_t newsize = oldsize + m->
topsize;
3446 size_t newtopsize = newsize - nb;
3455 else if (
next == m->
dv) {
3457 if (oldsize + dvs >= nb) {
3458 size_t dsize = oldsize + dvs - nb;
3469 size_t newsize = oldsize + dvs;
3479 if (oldsize + nextsize >= nb) {
3480 size_t rsize = oldsize + nextsize - nb;
3483 size_t newsize = oldsize + nextsize;
3507 if ((alignment & (alignment-
SIZE_T_ONE)) != 0) {
3509 while (
a < alignment)
a <<= 1;
3525 if ((((
size_t)(
mem)) & (alignment - 1)) != 0) {
3534 char* br = (
char*)
mem2chunk((
size_t)(((size_t)((
char*)
mem + alignment -
3540 size_t leadsize = pos - (
char*)(p);
3541 size_t newsize =
chunksize(p) - leadsize;
3545 newp->
head = newsize;
3559 size_t remainder_size =
size - nb;
3562 set_inuse(m, remainder, remainder_size);
3569 assert(((
size_t)
mem & (alignment - 1)) == 0);
3590 size_t element_size;
3591 size_t contents_size;
3595 size_t remainder_size;
3605 if (n_elements == 0)
3612 if (n_elements == 0)
3615 array_size =
request2size(n_elements * (
sizeof(
void*)));
3621 contents_size = n_elements * element_size;
3626 for (
i = 0;
i != n_elements; ++
i)
3630 size = contents_size + array_size;
3652 memset((
size_t*)
mem, 0, remainder_size -
SIZE_T_SIZE - array_size);
3657 size_t array_chunk_size;
3659 array_chunk_size = remainder_size - contents_size;
3660 marray = (
void**) (
chunk2mem(array_chunk));
3662 remainder_size = contents_size;
3666 for (
i = 0; ; ++
i) {
3668 if (
i != n_elements-1) {
3669 if (element_size != 0)
3670 size = element_size;
3673 remainder_size -=
size;
3684 if (marray != chunks) {
3686 if (element_size != 0) {
3687 assert(remainder_size == element_size);
3694 for (
i = 0;
i != n_elements; ++
i)
3714 void** fence = &(array[nelem]);
3715 for (
a = array;
a != fence; ++
a) {
3721 if (get_mstate_for(p) != m) {
3753 #if MALLOC_INSPECT_ALL
3754 static void internal_inspect_all(
mstate m,
3755 void(*handler)(
void *start,
3758 void* callback_arg),
3763 for (s = &m->
seg; s != 0; s = s->
next) {
3777 start = (
void*)((
char*)q +
sizeof(
struct malloc_chunk));
3783 if (start < (
void*)
next)
3784 handler(start,
next, used, arg);
3806 #ifdef REALLOC_ZERO_BYTES_FREES
3807 else if (bytes == 0) {
3817 mstate m = get_mstate_for(oldp);
3834 memcpy(
mem, oldmem, (oc < bytes)? oc : bytes);
3855 mstate m = get_mstate_for(oldp);
3886 size_t d = alignment /
sizeof(
void*);
3887 size_t r = alignment %
sizeof(
void*);
3888 if (
r != 0 || d == 0 || (d & (d-
SIZE_T_ONE)) != 0)
3920 size_t sz = elem_size;
3921 return ialloc(
gm, n_elements, &sz, 3, chunks);
3926 return ialloc(
gm, n_elements, sizes, 0, chunks);
3933 #if MALLOC_INSPECT_ALL
3934 void dlmalloc_inspect_all(
void(*handler)(
void *start,
3937 void* callback_arg),
3941 internal_inspect_all(
gm, handler, arg);
3958 return gm->footprint;
3962 return gm->max_footprint;
3966 size_t maf =
gm->footprint_limit;
3978 return gm->footprint_limit = result;
3987 #if !NO_MALLOC_STATS
4012 static mstate init_user_mstate(
char* tbase,
size_t tsize) {
4017 memset(m, 0, msize);
4044 char* tbase = (
char*)(
CALL_MMAP(tsize));
4046 m = init_user_mstate(tbase, tsize);
4061 m = init_user_mstate((
char*)base, capacity);
4093 char* base = sp->
base;
4115 this_seg = &ms->
seg;
4117 *addrp = this_seg->
base;
4118 *sizep = this_seg->
size;
4130 this_seg = &ms->
seg;
4135 base = this_seg->
base;
4136 if (pp >= base && pp < (base + this_seg->
size))
4138 this_seg = this_seg->
next;
4165 int was_enabled = 0;
4175 return (was_enabled);
4190 unsigned long n_user_data_bytes,
4191 unsigned long align,
4194 unsigned long searchp;
4202 n_user_data_bytes +=
sizeof(unsigned);
4214 n_user_data_bytes -=
sizeof(unsigned);
4233 n_user_data_bytes += align;
4240 searchp = (
unsigned long)(
rv +
sizeof (
unsigned));
4248 unsigned long where_now, delta;
4251 delta = align - where_now;
4256 wwp = (
unsigned *)(searchp -
sizeof(
unsigned));
4257 *wwp = (searchp - (((
unsigned long)
rv) +
sizeof (*wwp)));
4265 return (
void *) searchp;
4271 char *object_header;
4276 wwp = (
unsigned *)p_arg;
4280 object_header = (
char *)wwp;
4281 object_header -= *wwp;
4292 #if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
4296 memset (object_header, 0x13, psize);
4322 char *object_header;
4326 wwp = (
unsigned *)p;
4330 object_header = (
char *)wwp;
4331 object_header -= *wwp;
4335 usable_size -= (*wwp +
sizeof (*wwp));
4361 if ((smallbits & 0x3U) != 0) {
4363 idx += ~smallbits & 1;
4374 else if (nb > ms->
dvsize) {
4375 if (smallbits != 0) {
4417 if (nb <= ms->dvsize) {
4418 size_t rsize = ms->
dvsize - nb;
4437 else if (nb < ms->topsize) {
4438 size_t rsize = ms->
topsize -= nb;
4483 fm->footprint -= psize;
4508 size_t tsize =
fm->topsize += psize;
4519 else if (
next ==
fm->dv) {
4520 size_t dsize =
fm->dvsize += psize;
4547 if (--
fm->release_checks == 0)
4569 if (n_elements != 0) {
4570 req = n_elements * elem_size;
4571 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4572 (req / n_elements != elem_size))
4577 memset(
mem, 0, req);
4589 #ifdef REALLOC_ZERO_BYTES_FREES
4590 else if (bytes == 0) {
4600 mstate m = get_mstate_for(oldp);
4617 memcpy(
mem, oldmem, (oc < bytes)? oc : bytes);
4626 void* mspace_realloc_in_place(
mspace msp,
void* oldmem,
size_t bytes) {
4638 mstate m = get_mstate_for(oldp);
4670 size_t elem_size,
void* chunks[]) {
4671 size_t sz = elem_size;
4677 return ialloc(ms, n_elements, &sz, 3, chunks);
4681 size_t sizes[],
void* chunks[]) {
4687 return ialloc(ms, n_elements, sizes, 0, chunks);
4690 size_t mspace_bulk_free(
mspace msp,
void* array[],
size_t nelem) {
4694 #if MALLOC_INSPECT_ALL
4695 void mspace_inspect_all(
mspace msp,
4696 void(*handler)(
void *start,
4699 void* callback_arg),
4704 internal_inspect_all(ms, handler, arg);
4729 #if !NO_MALLOC_STATS
4765 size_t mspace_footprint_limit(
mspace msp) {
4778 size_t mspace_set_footprint_limit(
mspace msp,
size_t bytes) {
#define unlink_chunk(M, P, S)
#define check_malloc_state(M)
#define request2size(req)
static CLIB_NOSANITIZE_ADDR void dispose_chunk(mstate m, mchunkptr p, size_t psize)
#define malloc_getpagesize
void * dlcalloc(size_t n_elements, size_t elem_size)
int dlmallopt(int param_number, int value)
static CLIB_NOSANITIZE_ADDR msegmentptr segment_holding(mstate m, char *addr)
#define DEFAULT_GRANULARITY
static CLIB_NOSANITIZE_ADDR int has_segment_link(mstate m, msegmentptr ss)
static CLIB_NOSANITIZE_ADDR size_t release_unused_segments(mstate m)
#define segment_holds(S, A)
#define is_extern_segment(S)
static int change_mparam(int param_number, int value)
struct malloc_tree_chunk * bk
void ** dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
DLMALLOC_EXPORT void * mspace_memalign(mspace msp, size_t alignment, size_t bytes)
struct malloc_state * mstate
int dlposix_memalign(void **pp, size_t alignment, size_t bytes)
DLMALLOC_EXPORT void ** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
static void init_bins(mstate m)
void * dlmalloc(size_t bytes)
size_t dlmalloc_footprint_limit(void)
DLMALLOC_EXPORT size_t mspace_usable_size_with_delta(const void *p)
DLMALLOC_EXPORT int mspace_enable_disable_trace(mspace msp, int enable)
static CLIB_NOSANITIZE_ADDR void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
size_t dlmalloc_set_footprint_limit(size_t bytes)
struct malloc_chunk * sbinptr
static CLIB_NOSANITIZE_ADDR void * internal_memalign(mstate m, size_t alignment, size_t bytes)
#define smallmap_is_marked(M, i)
static CLIB_NOSANITIZE_ADDR void init_top(mstate m, mchunkptr p, size_t psize)
struct malloc_tree_chunk * child[2]
#define check_malloced_chunk(M, P, N)
mchunkptr smallbins[(NSMALLBINS+1) *2]
#define calloc_must_clear(p)
#define is_initialized(M)
#define MORECORE_CONTIGUOUS
vnet_hw_if_output_node_runtime_t * r
static void internal_malloc_stats(mstate m)
DLMALLOC_EXPORT void mheap_get_trace(uword offset, uword size)
DLMALLOC_EXPORT int mspace_is_traced(mspace msp)
static CLIB_NOSANITIZE_ADDR int sys_trim(mstate m, size_t pad)
#define insert_large_chunk(M, X, S)
DLMALLOC_EXPORT void mspace_disable_expand(mspace msp)
void * dlmemalign(size_t alignment, size_t bytes)
struct malloc_segment * next
struct malloc_chunk * mchunkptr
#define leftmost_child(t)
#define is_mmapped_segment(S)
DLMALLOC_EXPORT void * mspace_get_aligned(mspace msp, unsigned long n_user_data_bytes, unsigned long align, unsigned long align_offset)
#define USE_NONCONTIGUOUS_BIT
void * dlrealloc(void *oldmem, size_t bytes)
DLMALLOC_EXPORT void mspace_get_address_and_size(mspace msp, char **addrp, size_t *sizep)
struct malloc_segment * msegmentptr
DLMALLOC_EXPORT void * mspace_least_addr(mspace msp)
#define MAX_SMALL_REQUEST
#define check_mmapped_chunk(M, P)
DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp)
size_t dlbulk_free(void *array[], size_t nelem)
DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem)
#define use_noncontiguous(M)
struct malloc_tree_chunk * fd
#define CLIB_NOSANITIZE_ADDR
#define is_page_aligned(S)
struct clib_bihash_value offset
template key/value backing page structure
#define set_free_with_pinuse(p, s, n)
#define internal_malloc(m, b)
struct malloc_tree_chunk * tbinptr
f64 end
end of the time range
#define small_index2size(i)
DLMALLOC_EXPORT size_t mspace_footprint(mspace msp)
void * dlpvalloc(size_t bytes)
#define check_top_chunk(M, P)
#define MAX_RELEASE_CHECK_RATE
static CLIB_NOSANITIZE_ADDR void * sys_alloc(mstate m, size_t nb)
DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked)
if(node->flags &VLIB_NODE_FLAG_TRACE) vnet_interface_output_trace(vm
#define check_free_chunk(M, P)
#define CORRUPTION_ERROR_ACTION(m)
#define chunk_minus_offset(p, s)
#define replace_dv(M, P, S)
size_t dlmalloc_max_footprint(void)
struct malloc_tree_chunk * parent
DLMALLOC_EXPORT void mspace_put_no_offset(mspace msp, void *p)
DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad)
sll srl srl sll sra u16x4 i
static CLIB_NOSANITIZE_ADDR struct dlmallinfo internal_mallinfo(mstate m)
DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp)
DLMALLOC_EXPORT void * mspace_malloc(mspace msp, size_t bytes)
#define disable_expand(M)
#define ensure_initialization()
#define unlink_first_small_chunk(M, B, P, I)
#define leftshift_for_tree_index(i)
#define CALL_DIRECT_MMAP(s)
#define internal_free(m, mem)
DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable)
void * dlrealloc_in_place(void *oldmem, size_t bytes)
#define CALL_MUNMAP(a, s)
static CLIB_NOSANITIZE_ADDR void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
#define disable_contiguous(M)
size_t dlmalloc_usable_size(void *mem)
#define CALL_MORECORE(S)
Define CALL_MORECORE.
#define DEFAULT_MMAP_THRESHOLD
#define MALLOC_FAILURE_ACTION
#define insert_small_chunk(M, P, S)
#define set_inuse_and_pinuse(M, p, s)
#define compute_bit2idx(X, I)
void * dlvalloc(size_t bytes)
template key/value backing page structure
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
int dlmalloc_trim(size_t pad)
DLMALLOC_EXPORT void ** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
#define CALL_MREMAP(addr, osz, nsz, mv)
Define CALL_MREMAP.
#define NO_SEGMENT_TRAVERSAL
#define smallbin_at(M, i)
DLMALLOC_EXPORT struct dlmallinfo mspace_mallinfo(mspace msp)
#define FOUR_SIZE_T_SIZES
#define granularity_align(S)
#define DLM_MAGIC_CONSTANT
static void * mmap_alloc(mstate m, size_t nb)
#define compute_tree_index(S, I)
DLMALLOC_EXPORT void * mspace_realloc(mspace msp, void *mem, size_t newsize)
struct malloc_tree_chunk * tchunkptr
static int init_mparams(void)
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move)
DLMALLOC_EXPORT void mspace_put(mspace msp, void *p)
#define set_inuse(M, p, s)
DLMALLOC_EXPORT void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, int locked)
static CLIB_NOSANITIZE_ADDR void * tmalloc_large(mstate m, size_t nb)
#define should_trim(M, s)
#define check_inuse_chunk(M, P)
void ** dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
vnet_interface_output_runtime_t * rt
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
#define minsize_for_tree_index(i)
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
static struct malloc_params mparams
DLMALLOC_EXPORT int mspace_is_heap_object(mspace msp, void *p)
static size_t internal_bulk_free(mstate m, void *array[], size_t nelem)
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags)
static uword max_pow2(uword x)
u8 pad[3]
log2 (size of the packing page block)
#define treemap_is_marked(M, i)
#define chunk_plus_offset(p, s)
#define set_size_and_pinuse_of_free_chunk(p, s)
tbinptr treebins[NTREEBINS]
#define SYS_ALLOC_PADDING
size_t dlmalloc_footprint(void)
#define USAGE_ERROR_ACTION(m, p)
DLMALLOC_EXPORT void mheap_put_trace(uword offset, uword size)
#define RELEASE_MALLOC_GLOBAL_LOCK()
#define insert_chunk(M, P, S)
DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem)
#define mark_inuse_foot(M, p, s)
#define align_as_chunk(A)
DLMALLOC_EXPORT int mspace_mallopt(int, int)
DLMALLOC_EXPORT size_t destroy_mspace(mspace msp)
#define USE_MMAP_BIT
Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP.
static CLIB_NOSANITIZE_ADDR void * tmalloc_small(mstate m, size_t nb)
#define unlink_large_chunk(M, X)
#define DEFAULT_TRIM_THRESHOLD
vl_api_wireguard_peer_flags_t flags