13 #pragma warning( disable : 4146 ) 22 #if DLM_ABORT_ON_ASSERT_FAILURE 24 #define assert(x) if(!(x)) DLM_ABORT 34 #if !defined(WIN32) && !defined(LACKS_TIME_H) 37 #ifndef LACKS_STDLIB_H 40 #ifndef LACKS_STRING_H 44 #ifndef LACKS_STRINGS_H 49 #ifndef LACKS_SYS_MMAN_H 51 #if (defined(linux) && !defined(__USE_GNU)) 63 #ifndef LACKS_UNISTD_H 66 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) 67 extern void* sbrk(ptrdiff_t);
74 #if defined (__SVR4) && defined (__sun) 76 #elif !defined(LACKS_SCHED_H) 79 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS 82 #elif defined(_MSC_VER) 88 LONG __cdecl _InterlockedCompareExchange(LONG
volatile *Dest, LONG Exchange, LONG Comp);
89 LONG __cdecl _InterlockedExchange(LONG
volatile *Target, LONG Value);
94 #pragma intrinsic (_InterlockedCompareExchange) 95 #pragma intrinsic (_InterlockedExchange) 96 #define interlockedcompareexchange _InterlockedCompareExchange 97 #define interlockedexchange _InterlockedExchange 98 #elif defined(WIN32) && defined(__GNUC__) 99 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) 100 #define interlockedexchange __sync_lock_test_and_set 106 #define LOCK_AT_FORK 0 110 #if defined(_MSC_VER) && _MSC_VER>=1300 111 #ifndef BitScanForward 115 unsigned char _BitScanForward(
unsigned long *index,
unsigned long mask);
116 unsigned char _BitScanReverse(
unsigned long *index,
unsigned long mask);
121 #define BitScanForward _BitScanForward 122 #define BitScanReverse _BitScanReverse 123 #pragma intrinsic(_BitScanForward) 124 #pragma intrinsic(_BitScanReverse) 129 #ifndef malloc_getpagesize 131 # ifndef _SC_PAGE_SIZE 132 # define _SC_PAGE_SIZE _SC_PAGESIZE 135 # ifdef _SC_PAGE_SIZE 136 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) 138 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) 139 extern size_t getpagesize();
140 # define malloc_getpagesize getpagesize() 143 # define malloc_getpagesize getpagesize() 145 # ifndef LACKS_SYS_PARAM_H 146 # include <sys/param.h> 148 # ifdef EXEC_PAGESIZE 149 # define malloc_getpagesize EXEC_PAGESIZE 153 # define malloc_getpagesize NBPG 155 # define malloc_getpagesize (NBPG * CLSIZE) 159 # define malloc_getpagesize NBPC 162 # define malloc_getpagesize PAGESIZE 164 # define malloc_getpagesize ((size_t)4096U) 178 #define SIZE_T_SIZE (sizeof(size_t)) 179 #define SIZE_T_BITSIZE (sizeof(size_t) << 3) 183 #define SIZE_T_ZERO ((size_t)0) 184 #define SIZE_T_ONE ((size_t)1) 185 #define SIZE_T_TWO ((size_t)2) 186 #define SIZE_T_FOUR ((size_t)4) 187 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) 188 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) 189 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) 190 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) 193 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) 196 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) 199 #define align_offset(A)\ 200 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ 201 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) 213 #define MFAIL ((void*)(MAX_SIZE_T)) 214 #define CMFAIL ((char*)(MFAIL)) 219 #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) 220 #define MMAP_PROT (PROT_READ|PROT_WRITE) 221 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) 222 #define MAP_ANONYMOUS MAP_ANON 225 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) 226 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) 232 #define MMAP_FLAGS (MAP_PRIVATE) 233 static int dev_zero_fd = -1;
234 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ 235 (dev_zero_fd = open("/dev/zero", O_RDWR), \ 236 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ 237 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) 240 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) 246 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
247 return (ptr != 0)? ptr:
MFAIL;
252 void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
254 return (ptr != 0)? ptr:
MFAIL;
259 MEMORY_BASIC_INFORMATION minfo;
260 char* cptr = (
char*)ptr;
262 if (VirtualQuery(cptr, &minfo,
sizeof(minfo)) == 0)
264 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
265 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
267 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
269 cptr += minfo.RegionSize;
270 size -= minfo.RegionSize;
275 #define MMAP_DEFAULT(s) win32mmap(s) 276 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) 277 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) 283 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) 292 #define CALL_MORECORE(S) MORECORE(S) 294 #define CALL_MORECORE(S) MORECORE_DEFAULT(S) 297 #define CALL_MORECORE(S) MFAIL 304 #define USE_MMAP_BIT (SIZE_T_ONE) 307 #define CALL_MMAP(s) MMAP(s) 309 #define CALL_MMAP(s) MMAP_DEFAULT(s) 312 #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) 314 #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) 317 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) 319 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) 322 #define USE_MMAP_BIT (SIZE_T_ZERO) 324 #define MMAP(s) MFAIL 325 #define MUNMAP(a, s) (-1) 326 #define DIRECT_MMAP(s) MFAIL 327 #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) 328 #define CALL_MMAP(s) MMAP(s) 329 #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) 335 #if HAVE_MMAP && HAVE_MREMAP 337 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) 339 #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) 342 #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL 346 #define USE_NONCONTIGUOUS_BIT (4U) 349 #define USE_NOEXPAND_BIT (8U) 352 #define USE_TRACE_BIT (16U) 355 #define EXTERN_BIT (8U) 389 #define USE_LOCK_BIT (0U) 390 #define INITIAL_LOCK(l) (0) 391 #define DESTROY_LOCK(l) (0) 392 #define ACQUIRE_MALLOC_GLOBAL_LOCK() 393 #define RELEASE_MALLOC_GLOBAL_LOCK() 411 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) 412 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) 413 #define CLEAR_LOCK(sl) __sync_lock_release(sl) 415 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) 421 __asm__ __volatile__ (
"lock; cmpxchgl %1, %2" 423 :
"r" (val),
"m" (*(sl)),
"0"(cmp)
432 __asm__ __volatile__ (
"lock; xchgl %0, %1" 434 :
"m" (*(sl)),
"0"(prev)
438 #define CAS_LOCK(sl) x86_cas_lock(sl) 439 #define CLEAR_LOCK(sl) x86_clear_lock(sl) 442 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1) 443 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0) 448 #define SPINS_PER_YIELD 63 449 #if defined(_MSC_VER) 450 #define SLEEP_EX_DURATION 50 451 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) 452 #elif defined (__SVR4) && defined (__sun) 453 #define SPIN_LOCK_YIELD thr_yield(); 454 #elif !defined(LACKS_SCHED_H) 455 #define SPIN_LOCK_YIELD sched_yield(); 457 #define SPIN_LOCK_YIELD 460 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 462 static int spin_acquire_lock(
int *sl) {
464 while (*(
volatile int *)sl != 0 || CAS_LOCK(sl)) {
465 if ((++spins & SPINS_PER_YIELD) == 0) {
473 #define TRY_LOCK(sl) !CAS_LOCK(sl) 474 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) 475 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) 476 #define INITIAL_LOCK(sl) (*sl = 0) 477 #define DESTROY_LOCK(sl) (0) 478 static MLOCK_T malloc_global_mutex = 0;
483 #define THREAD_ID_T DWORD 484 #define CURRENT_THREAD GetCurrentThreadId() 485 #define EQ_OWNER(X,Y) ((X) == (Y)) 492 #define THREAD_ID_T pthread_t 493 #define CURRENT_THREAD pthread_self() 494 #define EQ_OWNER(X,Y) pthread_equal(X, Y) 497 struct malloc_recursive_lock {
500 THREAD_ID_T threadid;
503 #define MLOCK_T struct malloc_recursive_lock 504 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
506 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
513 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
514 THREAD_ID_T mythreadid = CURRENT_THREAD;
517 if (*((
volatile int *)(&lk->sl)) == 0) {
518 if (!CAS_LOCK(&lk->sl)) {
519 lk->threadid = mythreadid;
524 else if (EQ_OWNER(lk->threadid, mythreadid)) {
528 if ((++spins & SPINS_PER_YIELD) == 0) {
534 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
535 THREAD_ID_T mythreadid = CURRENT_THREAD;
536 if (*((
volatile int *)(&lk->sl)) == 0) {
537 if (!CAS_LOCK(&lk->sl)) {
538 lk->threadid = mythreadid;
543 else if (EQ_OWNER(lk->threadid, mythreadid)) {
550 #define RELEASE_LOCK(lk) recursive_release_lock(lk) 551 #define TRY_LOCK(lk) recursive_try_lock(lk) 552 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) 553 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) 554 #define DESTROY_LOCK(lk) (0) 558 #define MLOCK_T CRITICAL_SECTION 559 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) 560 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) 561 #define TRY_LOCK(lk) TryEnterCriticalSection(lk) 562 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) 563 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) 564 #define NEED_GLOBAL_LOCK_INIT 566 static MLOCK_T malloc_global_mutex;
567 static volatile LONG malloc_global_mutex_status;
570 static void init_malloc_global_mutex() {
572 long stat = malloc_global_mutex_status;
577 interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
578 InitializeCriticalSection(&malloc_global_mutex);
579 interlockedexchange(&malloc_global_mutex_status, (LONG)1);
587 #define MLOCK_T pthread_mutex_t 588 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) 589 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) 590 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) 591 #define INITIAL_LOCK(lk) pthread_init_lock(lk) 592 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) 594 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) 597 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
599 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP 600 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) 603 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
605 static int pthread_init_lock (MLOCK_T *lk) {
606 pthread_mutexattr_t attr;
607 if (pthread_mutexattr_init(&attr))
return 1;
608 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 609 if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE))
return 1;
611 if (pthread_mutex_init(lk, &attr))
return 1;
612 if (pthread_mutexattr_destroy(&attr))
return 1;
619 #define USE_LOCK_BIT (2U) 621 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK 622 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); 625 #ifndef RELEASE_MALLOC_GLOBAL_LOCK 626 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); 784 #define MCHUNK_SIZE (sizeof(mchunk)) 787 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) 789 #define CHUNK_OVERHEAD (SIZE_T_SIZE) 793 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) 795 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) 798 #define MIN_CHUNK_SIZE\ 799 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) 802 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) 803 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) 805 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) 808 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) 809 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) 812 #define pad_request(req) \ 813 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) 816 #define request2size(req) \ 817 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) 830 #define PINUSE_BIT (SIZE_T_ONE) 831 #define CINUSE_BIT (SIZE_T_TWO) 832 #define FLAG4_BIT (SIZE_T_FOUR) 833 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) 834 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) 837 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) 840 #define cinuse(p) ((p)->head & CINUSE_BIT) 841 #define pinuse(p) ((p)->head & PINUSE_BIT) 842 #define flag4inuse(p) ((p)->head & FLAG4_BIT) 843 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) 844 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) 846 #define chunksize(p) ((p)->head & ~(FLAG_BITS)) 848 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) 849 #define set_flag4(p) ((p)->head |= FLAG4_BIT) 850 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) 853 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) 854 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) 857 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) 858 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) 861 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) 864 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) 865 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) 868 #define set_size_and_pinuse_of_free_chunk(p, s)\ 869 ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) 872 #define set_free_with_pinuse(p, s, n)\ 873 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) 876 #define overhead_for(p)\ 877 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) 881 #define calloc_must_clear(p) (!is_mmapped(p)) 883 #define calloc_must_clear(p) (1) 994 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) 1060 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) 1061 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) 1154 #define NSMALLBINS (32U) 1155 #define NTREEBINS (32U) 1156 #define SMALLBIN_SHIFT (3U) 1157 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) 1158 #define TREEBIN_SHIFT (8U) 1159 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) 1160 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) 1161 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) 1211 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) 1218 #define is_global(M) ((M) == &_gm_) 1222 #define is_initialized(M) ((M)->top != 0) 1228 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) 1229 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) 1231 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) 1233 #define disable_lock(M) 1236 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) 1237 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) 1239 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) 1241 #define disable_mmap(M) 1244 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) 1245 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) 1246 #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT) 1247 #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT) 1248 #define use_trace(M) ((M)->mflags & USE_TRACE_BIT) 1249 #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT) 1250 #define disable_trace(M) ((M)->mflags |= USE_TRACE_BIT) 1252 #define set_lock(M,L)\ 1253 ((M)->mflags = (L)?\ 1254 ((M)->mflags | USE_LOCK_BIT) :\ 1255 ((M)->mflags & ~USE_LOCK_BIT)) 1258 #define page_align(S)\ 1259 (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) 1262 #define granularity_align(S)\ 1263 (((S) + (mparams.granularity - SIZE_T_ONE))\ 1264 & ~(mparams.granularity - SIZE_T_ONE)) 1269 #define mmap_align(S) granularity_align(S) 1271 #define mmap_align(S) page_align(S) 1275 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) 1277 #define is_page_aligned(S)\ 1278 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) 1279 #define is_granularity_aligned(S)\ 1280 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) 1283 #define segment_holds(S, A)\ 1284 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) 1288 msegmentptr sp = &m->
seg;
1290 if (addr >= sp->
base && addr < sp->base + sp->
size)
1292 if ((sp = sp->
next) == 0)
1299 msegmentptr sp = &m->
seg;
1301 if ((
char*)sp >= ss->
base && (
char*)sp < ss->base + ss->
size)
1303 if ((sp = sp->
next) == 0)
1308 #ifndef MORECORE_CANNOT_TRIM 1309 #define should_trim(M,s) ((s) > (M)->trim_check) 1311 #define should_trim(M,s) (0) 1319 #define TOP_FOOT_SIZE\ 1320 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) 1332 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) 1333 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } 1337 #define PREACTION(M) (0) 1341 #define POSTACTION(M) 1354 #if PROCEED_ON_ERROR 1357 int malloc_corruption_error_count;
1360 static void reset_on_error(mstate m);
1362 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) 1363 #define USAGE_ERROR_ACTION(m, p) 1367 #ifndef CORRUPTION_ERROR_ACTION 1368 #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT 1371 #ifndef USAGE_ERROR_ACTION 1372 #define USAGE_ERROR_ACTION(m,p) DLM_ABORT 1382 #define check_free_chunk(M,P) 1383 #define check_inuse_chunk(M,P) 1384 #define check_malloced_chunk(M,P,N) 1385 #define check_mmapped_chunk(M,P) 1386 #define check_malloc_state(M) 1387 #define check_top_chunk(M,P) 1390 #define check_free_chunk(M,P) do_check_free_chunk(M,P) 1391 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) 1392 #define check_top_chunk(M,P) do_check_top_chunk(M,P) 1393 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) 1394 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) 1395 #define check_malloc_state(M) do_check_malloc_state(M) 1397 static void do_check_any_chunk(mstate m, mchunkptr p);
1398 static void do_check_top_chunk(mstate m, mchunkptr p);
1399 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1400 static void do_check_inuse_chunk(mstate m, mchunkptr p);
1401 static void do_check_free_chunk(mstate m, mchunkptr p);
1402 static void do_check_malloced_chunk(mstate m,
void*
mem,
size_t s);
1403 static void do_check_tree(mstate m, tchunkptr t);
1404 static void do_check_treebin(mstate m,
bindex_t i);
1405 static void do_check_smallbin(mstate m,
bindex_t i);
1406 static void do_check_malloc_state(mstate m);
1407 static int bin_find(mstate m, mchunkptr x);
1408 static size_t traverse_and_check(mstate m);
1413 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) 1414 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) 1415 #define small_index2size(i) ((i) << SMALLBIN_SHIFT) 1416 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) 1419 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) 1420 #define treebin_at(M,i) (&((M)->treebins[i])) 1423 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) 1424 #define compute_tree_index(S, I)\ 1426 unsigned int X = S >> TREEBIN_SHIFT;\ 1429 else if (X > 0xFFFF)\ 1432 unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ 1433 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 1437 #elif defined (__INTEL_COMPILER) 1438 #define compute_tree_index(S, I)\ 1440 size_t X = S >> TREEBIN_SHIFT;\ 1443 else if (X > 0xFFFF)\ 1446 unsigned int K = _bit_scan_reverse (X); \ 1447 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 1451 #elif defined(_MSC_VER) && _MSC_VER>=1300 1452 #define compute_tree_index(S, I)\ 1454 size_t X = S >> TREEBIN_SHIFT;\ 1457 else if (X > 0xFFFF)\ 1461 _BitScanReverse((DWORD *) &K, (DWORD) X);\ 1462 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ 1467 #define compute_tree_index(S, I)\ 1469 size_t X = S >> TREEBIN_SHIFT;\ 1472 else if (X > 0xFFFF)\ 1475 unsigned int Y = (unsigned int)X;\ 1476 unsigned int N = ((Y - 0x100) >> 16) & 8;\ 1477 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ 1479 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ 1480 K = 14 - N + ((Y <<= K) >> 15);\ 1481 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ 1487 #define bit_for_tree_index(i) \ 1488 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) 1491 #define leftshift_for_tree_index(i) \ 1492 ((i == NTREEBINS-1)? 0 : \ 1493 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) 1496 #define minsize_for_tree_index(i) \ 1497 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ 1498 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) 1504 #define idx2bit(i) ((binmap_t)(1) << (i)) 1507 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) 1508 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) 1509 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) 1511 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) 1512 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) 1513 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) 1516 #define least_bit(x) ((x) & -(x)) 1519 #define left_bits(x) ((x<<1) | -(x<<1)) 1522 #define same_or_left_bits(x) ((x) | -(x)) 1526 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) 1527 #define compute_bit2idx(X, I)\ 1530 J = __builtin_ctz(X); \ 1534 #elif defined (__INTEL_COMPILER) 1535 #define compute_bit2idx(X, I)\ 1538 J = _bit_scan_forward (X); \ 1542 #elif defined(_MSC_VER) && _MSC_VER>=1300 1543 #define compute_bit2idx(X, I)\ 1546 _BitScanForward((DWORD *) &J, X);\ 1550 #elif USE_BUILTIN_FFS 1551 #define compute_bit2idx(X, I) I = ffs(X)-1 1554 #define compute_bit2idx(X, I)\ 1556 unsigned int Y = X - 1;\ 1557 unsigned int K = Y >> (16-4) & 16;\ 1558 unsigned int N = K; Y >>= K;\ 1559 N += K = Y >> (8-3) & 8; Y >>= K;\ 1560 N += K = Y >> (4-2) & 4; Y >>= K;\ 1561 N += K = Y >> (2-1) & 2; Y >>= K;\ 1562 N += K = Y >> (1-0) & 1; Y >>= K;\ 1563 I = (bindex_t)(N + Y);\ 1598 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr) 1600 #define ok_next(p, n) ((char*)(p) < (char*)(n)) 1602 #define ok_inuse(p) is_inuse(p) 1604 #define ok_pinuse(p) pinuse(p) 1607 #define ok_address(M, a) (1) 1608 #define ok_next(b, n) (1) 1609 #define ok_inuse(p) (1) 1610 #define ok_pinuse(p) (1) 1613 #if (FOOTERS && !INSECURE) 1621 #define ok_magic(M) (1) 1626 #if defined(__GNUC__) && __GNUC__ >= 3 1627 #define RTCHECK(e) __builtin_expect(e, 1) 1629 #define RTCHECK(e) (e) 1632 #define RTCHECK(e) (1) 1639 #define mark_inuse_foot(M,p,s) 1644 #define set_inuse(M,p,s)\ 1645 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ 1646 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) 1649 #define set_inuse_and_pinuse(M,p,s)\ 1650 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ 1651 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) 1654 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ 1655 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) 1660 #define mark_inuse_foot(M,p,s)\ 1661 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) 1663 #define get_mstate_for(p)\ 1664 ((mstate)(((mchunkptr)((char*)(p) +\ 1665 (chunksize(p))))->prev_foot ^ mparams.magic)) 1667 #define set_inuse(M,p,s)\ 1668 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ 1669 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ 1670 mark_inuse_foot(M,p,s)) 1672 #define set_inuse_and_pinuse(M,p,s)\ 1673 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ 1674 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ 1675 mark_inuse_foot(M,p,s)) 1677 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ 1678 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ 1679 mark_inuse_foot(M, p, s)) 1686 static void pre_fork(
void) { ACQUIRE_LOCK(&(
gm)->mutex); }
1687 static void post_fork_parent(
void) { RELEASE_LOCK(&(
gm)->mutex); }
1688 static void post_fork_child(
void) {
INITIAL_LOCK(&(
gm)->mutex); }
1693 #ifdef NEED_GLOBAL_LOCK_INIT 1694 if (malloc_global_mutex_status <= 0)
1695 init_malloc_global_mutex();
1709 SYSTEM_INFO system_info;
1710 GetSystemInfo(&system_info);
1711 psize = system_info.dwPageSize;
1723 if ((
sizeof(
size_t) !=
sizeof(
char*)) ||
1725 (
sizeof(
int) < 4) ||
1736 #if MORECORE_CONTIGUOUS 1748 pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1752 #ifndef DLM_MAGIC_CONSTANT 1755 unsigned char buf[
sizeof(size_t)];
1757 if ((fd = open(
"/dev/urandom", O_RDONLY)) >= 0 &&
1758 read(fd, buf,
sizeof(buf)) ==
sizeof(buf)) {
1759 magic = *((
size_t *) buf);
1765 magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1766 #elif defined(LACKS_TIME_H) 1767 magic = (size_t)&magic ^ (
size_t)0x55555555U;
1769 magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1771 magic |= (size_t)8U;
1772 magic &= ~(size_t)7U;
1789 val = (value == -1)?
MAX_SIZE_T : (
size_t)value;
1790 switch(param_number) {
1813 static void do_check_any_chunk(mstate m, mchunkptr p) {
1819 static void do_check_top_chunk(mstate m, mchunkptr p) {
1833 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1847 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1848 do_check_any_chunk(m, p);
1854 do_check_mmapped_chunk(m, p);
1858 static void do_check_free_chunk(mstate m, mchunkptr p) {
1861 do_check_any_chunk(m, p);
1865 if (p != m->
dv && p != m->
top) {
1881 static void do_check_malloced_chunk(mstate m,
void*
mem,
size_t s) {
1885 do_check_inuse_chunk(m, p);
1895 static void do_check_tree(mstate m, tchunkptr t) {
1908 do_check_any_chunk(m, ((mchunkptr)u));
1909 assert(u->index == tindex);
1915 if (u->parent == 0) {
1916 assert(u->child[0] == 0);
1917 assert(u->child[1] == 0);
1923 assert (u->parent->child[0] == u ||
1924 u->parent->child[1] == u ||
1925 *((tbinptr*)(u->parent)) == u);
1926 if (u->child[0] != 0) {
1927 assert(u->child[0]->parent == u);
1928 assert(u->child[0] != u);
1929 do_check_tree(m, u->child[0]);
1931 if (u->child[1] != 0) {
1932 assert(u->child[1]->parent == u);
1933 assert(u->child[1] != u);
1934 do_check_tree(m, u->child[1]);
1936 if (u->child[0] != 0 && u->child[1] != 0) {
1946 static void do_check_treebin(mstate m,
bindex_t i) {
1949 int empty = (m->
treemap & (1U <<
i)) == 0;
1953 do_check_tree(m, t);
1957 static void do_check_smallbin(mstate m,
bindex_t i) {
1959 mchunkptr p = b->
bk;
1960 unsigned int empty = (m->
smallmap & (1U <<
i)) == 0;
1964 for (; p != b; p = p->
bk) {
1968 do_check_free_chunk(m, p);
1975 do_check_inuse_chunk(m, q);
1981 static int bin_find(mstate m, mchunkptr x) {
1991 }
while ((p = p->
fd) != b);
2000 while (t != 0 &&
chunksize(t) != size) {
2007 if (u == (tchunkptr)x)
2009 }
while ((u = u->
fd) != t);
2017 static size_t traverse_and_check(mstate m) {
2020 msegmentptr s = &m->
seg;
2024 mchunkptr lastq = 0;
2031 do_check_inuse_chunk(m, q);
2034 assert(q == m->
dv || bin_find(m, q));
2036 do_check_free_chunk(m, q);
2049 static void do_check_malloc_state(mstate m) {
2054 do_check_smallbin(m, i);
2056 do_check_treebin(m, i);
2059 do_check_any_chunk(m, m->
dv);
2066 do_check_top_chunk(m, m->
top);
2072 total = traverse_and_check(m);
2082 struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2090 msegmentptr s = &m->
seg;
2121 #if !NO_MALLOC_STATS 2130 msegmentptr s = &m->
seg;
2147 fprintf(stderr,
"max system bytes = %10lu\n", (
unsigned long)(maxfp));
2148 fprintf(stderr,
"system bytes = %10lu\n", (
unsigned long)(fp));
2149 fprintf(stderr,
"in use bytes = %10lu\n", (
unsigned long)(used));
2164 #define insert_small_chunk(M, P, S) {\ 2165 bindex_t I = small_index(S);\ 2166 mchunkptr B = smallbin_at(M, I);\ 2168 assert(S >= MIN_CHUNK_SIZE);\ 2169 if (!smallmap_is_marked(M, I))\ 2170 mark_smallmap(M, I);\ 2171 else if (RTCHECK(ok_address(M, B->fd)))\ 2174 CORRUPTION_ERROR_ACTION(M);\ 2183 #define unlink_small_chunk(M, P, S) {\ 2184 mchunkptr F = P->fd;\ 2185 mchunkptr B = P->bk;\ 2186 bindex_t I = small_index(S);\ 2189 assert(chunksize(P) == small_index2size(I));\ 2190 if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ 2192 clear_smallmap(M, I);\ 2194 else if (RTCHECK(B == smallbin_at(M,I) ||\ 2195 (ok_address(M, B) && B->fd == P))) {\ 2200 CORRUPTION_ERROR_ACTION(M);\ 2204 CORRUPTION_ERROR_ACTION(M);\ 2209 #define unlink_first_small_chunk(M, B, P, I) {\ 2210 mchunkptr F = P->fd;\ 2213 assert(chunksize(P) == small_index2size(I));\ 2215 clear_smallmap(M, I);\ 2217 else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ 2222 CORRUPTION_ERROR_ACTION(M);\ 2228 #define replace_dv(M, P, S) {\ 2229 size_t DVS = M->dvsize;\ 2230 assert(is_small(DVS));\ 2232 mchunkptr DV = M->dv;\ 2233 insert_small_chunk(M, DV, DVS);\ 2242 #define insert_large_chunk(M, X, S) {\ 2245 compute_tree_index(S, I);\ 2246 H = treebin_at(M, I);\ 2248 X->child[0] = X->child[1] = 0;\ 2249 if (!treemap_is_marked(M, I)) {\ 2250 mark_treemap(M, I);\ 2252 X->parent = (tchunkptr)H;\ 2257 size_t K = S << leftshift_for_tree_index(I);\ 2259 if (chunksize(T) != S) {\ 2260 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ 2264 else if (RTCHECK(ok_address(M, C))) {\ 2271 CORRUPTION_ERROR_ACTION(M);\ 2276 tchunkptr F = T->fd;\ 2277 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ 2285 CORRUPTION_ERROR_ACTION(M);\ 2310 #define unlink_large_chunk(M, X) {\ 2311 tchunkptr XP = X->parent;\ 2314 tchunkptr F = X->fd;\ 2316 if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ 2321 CORRUPTION_ERROR_ACTION(M);\ 2326 if (((R = *(RP = &(X->child[1]))) != 0) ||\ 2327 ((R = *(RP = &(X->child[0]))) != 0)) {\ 2329 while ((*(CP = &(R->child[1])) != 0) ||\ 2330 (*(CP = &(R->child[0])) != 0)) {\ 2333 if (RTCHECK(ok_address(M, RP)))\ 2336 CORRUPTION_ERROR_ACTION(M);\ 2341 tbinptr* H = treebin_at(M, X->index);\ 2343 if ((*H = R) == 0) \ 2344 clear_treemap(M, X->index);\ 2346 else if (RTCHECK(ok_address(M, XP))) {\ 2347 if (XP->child[0] == X) \ 2353 CORRUPTION_ERROR_ACTION(M);\ 2355 if (RTCHECK(ok_address(M, R))) {\ 2358 if ((C0 = X->child[0]) != 0) {\ 2359 if (RTCHECK(ok_address(M, C0))) {\ 2364 CORRUPTION_ERROR_ACTION(M);\ 2366 if ((C1 = X->child[1]) != 0) {\ 2367 if (RTCHECK(ok_address(M, C1))) {\ 2372 CORRUPTION_ERROR_ACTION(M);\ 2376 CORRUPTION_ERROR_ACTION(M);\ 2383 #define insert_chunk(M, P, S)\ 2384 if (is_small(S)) insert_small_chunk(M, P, S)\ 2385 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } 2387 #define unlink_chunk(M, P, S)\ 2388 if (is_small(S)) unlink_small_chunk(M, P, S)\ 2389 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } 2395 #define internal_malloc(m, b) mspace_malloc(m, b) 2396 #define internal_free(m, mem) mspace_free(m,mem); 2399 #define internal_malloc(m, b)\ 2400 ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) 2401 #define internal_free(m, mem)\ 2402 if (m == gm) dlfree(mem); else mspace_free(m,mem); 2404 #define internal_malloc(m, b) dlmalloc(b) 2405 #define internal_free(m, mem) dlfree(mem) 2439 if (m->
least_addr == 0 || mm < m->least_addr)
2466 oldmmsize, newmmsize,
flags);
2468 mchunkptr newp = (
mchunkptr)(cp + offset);
2475 if (cp < m->least_addr)
2490 static void init_top(mstate m, mchunkptr p,
size_t psize) {
2510 bin->
fd = bin->
bk = bin;
2514 #if PROCEED_ON_ERROR 2517 static void reset_on_error(mstate m) {
2519 ++malloc_corruption_error_count;
2538 size_t psize = (
char*)oldfirst - (
char*)p;
2540 size_t qsize = psize - nb;
2543 assert((
char*)oldfirst > (
char*)q);
2548 if (oldfirst == m->
top) {
2549 size_t tsize = m->
topsize += qsize;
2554 else if (oldfirst == m->
dv) {
2555 size_t dsize = m->
dvsize += qsize;
2578 char* old_top = (
char*)m->
top;
2580 char* old_end = oldsp->
base + oldsp->size;
2584 char* asp = rawsp +
offset;
2589 mchunkptr p = tnext;
2609 if ((
char*)(&(nextp->
head)) < old_end)
2617 if (csp != old_top) {
2619 size_t psize = csp - old_top;
2682 size_t ssize = asize;
2692 ssize += (
page_align((
size_t)base) - (size_t)base);
2696 (fp > m->
footprint && fp <= m->footprint_limit)) &&
2759 size_t ssize = end - br;
2774 if (m->
least_addr == 0 || tbase < m->least_addr)
2796 msegmentptr sp = &m->
seg;
2798 while (sp != 0 && tbase != sp->
base + sp->
size)
2808 if (tbase < m->least_addr)
2811 while (sp != 0 && sp->
base != tbase + tsize)
2816 char* oldbase = sp->
base;
2826 if (nb < m->topsize) {
2827 size_t rsize = m->
topsize -= nb;
2828 mchunkptr p = m->
top;
2846 size_t released = 0;
2848 msegmentptr pred = &m->
seg;
2849 msegmentptr sp = pred->
next;
2851 char* base = sp->
base;
2853 msegmentptr next = sp->
next;
2893 size_t released = 0;
2908 sp->
size >= extra &&
2910 size_t newsize = sp->
size - extra;
2926 if (old_br == sp->
base + sp->
size) {
2929 if (rel_br !=
CMFAIL && new_br < old_br)
2930 released = old_br - new_br;
2937 if (released != 0) {
2938 sp->
size -= released;
2954 return (released != 0)? 1 : 0;
2991 if (next == m->
top) {
2992 size_t tsize = m->
topsize += psize;
3001 else if (next == m->
dv) {
3002 size_t dsize = m->
dvsize += psize;
3046 if ((rsize = trem) == 0)
3051 if (rt != 0 && rt != t)
3060 if (t == 0 && v == 0) {
3062 if (leftbits != 0) {
3080 if (v != 0 && rsize < (
size_t)(m->
dvsize - nb)) {
3177 smallbits =
gm->smallmap >> idx;
3179 if ((smallbits & 0x3U) != 0) {
3181 idx += ~smallbits & 1;
3192 else if (nb >
gm->dvsize) {
3193 if (smallbits != 0) {
3235 if (nb <= gm->dvsize) {
3236 size_t rsize =
gm->dvsize - nb;
3237 mchunkptr p =
gm->dv;
3245 size_t dvs =
gm->dvsize;
3255 else if (nb < gm->topsize) {
3256 size_t rsize =
gm->topsize -= nb;
3257 mchunkptr p =
gm->top;
3289 mstate
fm = get_mstate_for(p);
3331 if (next == fm->
top) {
3332 size_t tsize = fm->
topsize += psize;
3343 else if (next == fm->
dv) {
3344 size_t dsize = fm->
dvsize += psize;
3391 if (n_elements != 0) {
3392 req = n_elements * elem_size;
3393 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
3394 (req / n_elements != elem_size))
3399 memset(mem, 0, req);
3418 else if (oldsize >= nb) {
3419 size_t rsize = oldsize - nb;
3428 else if (next == m->
top) {
3429 if (oldsize + m->
topsize > nb) {
3430 size_t newsize = oldsize + m->
topsize;
3431 size_t newtopsize = newsize - nb;
3440 else if (next == m->
dv) {
3442 if (oldsize + dvs >= nb) {
3443 size_t dsize = oldsize + dvs - nb;
3454 size_t newsize = oldsize + dvs;
3462 else if (!
cinuse(next)) {
3464 if (oldsize + nextsize >= nb) {
3465 size_t rsize = oldsize + nextsize - nb;
3468 size_t newsize = oldsize + nextsize;
3491 if ((alignment & (alignment-
SIZE_T_ONE)) != 0) {
3493 while (a < alignment) a <<= 1;
3509 if ((((
size_t)(mem)) & (alignment - 1)) != 0) {
3518 char* br = (
char*)
mem2chunk((
size_t)(((size_t)((
char*)mem + alignment -
3524 size_t leadsize = pos - (
char*)(p);
3525 size_t newsize =
chunksize(p) - leadsize;
3529 newp->
head = newsize;
3543 size_t remainder_size = size - nb;
3546 set_inuse(m, remainder, remainder_size);
3553 assert(((
size_t)mem & (alignment - 1)) == 0);
3574 size_t element_size;
3575 size_t contents_size;
3579 size_t remainder_size;
3581 mchunkptr array_chunk;
3589 if (n_elements == 0)
3596 if (n_elements == 0)
3599 array_size =
request2size(n_elements * (
sizeof(
void*)));
3605 contents_size = n_elements * element_size;
3610 for (i = 0; i != n_elements; ++
i)
3614 size = contents_size + array_size;
3636 memset((
size_t*)mem, 0, remainder_size -
SIZE_T_SIZE - array_size);
3641 size_t array_chunk_size;
3643 array_chunk_size = remainder_size - contents_size;
3644 marray = (
void**) (
chunk2mem(array_chunk));
3646 remainder_size = contents_size;
3650 for (i = 0; ; ++
i) {
3652 if (i != n_elements-1) {
3653 if (element_size != 0)
3654 size = element_size;
3657 remainder_size -=
size;
3668 if (marray != chunks) {
3670 if (element_size != 0) {
3671 assert(remainder_size == element_size);
3678 for (i = 0; i != n_elements; ++
i)
3698 void** fence = &(array[nelem]);
3699 for (a = array; a != fence; ++
a) {
3705 if (get_mstate_for(p) != m) {
3715 if (b != fence && *b ==
chunk2mem(next)) {
3716 size_t newsize =
chunksize(next) + psize;
3737 #if MALLOC_INSPECT_ALL 3738 static void internal_inspect_all(mstate m,
3739 void(*handler)(
void *start,
3742 void* callback_arg),
3745 mchunkptr top = m->
top;
3747 for (s = &m->
seg; s != 0; s = s->
next) {
3761 start = (
void*)((
char*)q +
sizeof(
struct malloc_chunk));
3767 if (start < (
void*)next)
3768 handler(start, next, used, arg);
3790 #ifdef REALLOC_ZERO_BYTES_FREES 3791 else if (bytes == 0) {
3801 mstate m = get_mstate_for(oldp);
3818 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3839 mstate m = get_mstate_for(oldp);
3870 size_t d = alignment /
sizeof(
void*);
3871 size_t r = alignment %
sizeof(
void*);
3872 if (r != 0 || d == 0 || (d & (d-
SIZE_T_ONE)) != 0)
3904 size_t sz = elem_size;
3905 return ialloc(
gm, n_elements, &sz, 3, chunks);
3910 return ialloc(
gm, n_elements, sizes, 0, chunks);
3917 #if MALLOC_INSPECT_ALL 3918 void dlmalloc_inspect_all(
void(*handler)(
void *start,
3921 void* callback_arg),
3925 internal_inspect_all(
gm, handler, arg);
3942 return gm->footprint;
3946 return gm->max_footprint;
3950 size_t maf =
gm->footprint_limit;
3962 return gm->footprint_limit = result;
3971 #if !NO_MALLOC_STATS 3996 static mstate init_user_mstate(
char* tbase,
size_t tsize) {
4001 memset(m, 0, msize);
4028 char* tbase = (
char*)(
CALL_MMAP(tsize));
4030 m = init_user_mstate(tbase, tsize);
4045 m = init_user_mstate((
char*)base, capacity);
4073 msegmentptr sp = &ms->
seg;
4076 char* base = sp->
base;
4098 this_seg = &ms->
seg;
4100 *addrp = this_seg->
base;
4101 *sizep = this_seg->
size;
4112 this_seg = &ms->
seg;
4117 base = this_seg->
base;
4118 if (pp >= base && pp < (base + this_seg->
size))
4120 this_seg = this_seg->
next;
4131 mstate ms = (
mstate) msp;
4145 int was_enabled = 0;
4155 return (was_enabled);
4168 unsigned long n_user_data_bytes,
4169 unsigned long align,
4172 unsigned long searchp;
4180 n_user_data_bytes +=
sizeof(unsigned);
4185 if (align <
sizeof (
uword)) {
4197 wwp = (
unsigned *)rv;
4199 rv +=
sizeof (unsigned);
4212 if (align > 4<<10 || align_offset == ~0UL) {
4213 n_user_data_bytes -=
sizeof(unsigned);
4214 assert(align_offset == 0);
4230 align_offset &= (align - 1);
4232 n_user_data_bytes += align;
4239 searchp = (
unsigned long)(rv +
sizeof (
unsigned));
4242 while ((searchp + align_offset) % align)
4247 unsigned long where_now, delta;
4250 delta = align - where_now;
4255 wwp = (
unsigned *)(searchp -
sizeof(
unsigned));
4256 *wwp = (searchp - (((
unsigned long) rv) +
sizeof (*wwp)));
4264 return (
void *) searchp;
4269 char *object_header;
4274 wwp = (
unsigned *)p_arg;
4278 object_header = (
char *)wwp;
4279 object_header -= *wwp;
4294 memset (object_header, 0x13, psize);
4319 char *object_header;
4323 wwp = (
unsigned *)p;
4327 object_header = (
char *)wwp;
4328 object_header -= *wwp;
4332 usable_size -= (*wwp +
sizeof (*wwp));
4357 if ((smallbits & 0x3U) != 0) {
4359 idx += ~smallbits & 1;
4370 else if (nb > ms->
dvsize) {
4371 if (smallbits != 0) {
4413 if (nb <= ms->dvsize) {
4414 size_t rsize = ms->
dvsize - nb;
4415 mchunkptr p = ms->
dv;
4433 else if (nb < ms->topsize) {
4434 size_t rsize = ms->
topsize -= nb;
4435 mchunkptr p = ms->
top;
4459 mstate
fm = get_mstate_for(p);
4502 if (next == fm->
top) {
4503 size_t tsize = fm->
topsize += psize;
4514 else if (next == fm->
dv) {
4515 size_t dsize = fm->
dvsize += psize;
4556 void*
mspace_calloc(mspace msp,
size_t n_elements,
size_t elem_size) {
4564 if (n_elements != 0) {
4565 req = n_elements * elem_size;
4566 if (((n_elements | elem_size) & ~(
size_t)0xffff) &&
4567 (req / n_elements != elem_size))
4572 memset(mem, 0, req);
4584 #ifdef REALLOC_ZERO_BYTES_FREES 4585 else if (bytes == 0) {
4595 mstate m = get_mstate_for(oldp);
4612 memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4621 void* mspace_realloc_in_place(mspace msp,
void* oldmem,
size_t bytes) {
4633 mstate m = get_mstate_for(oldp);
4665 size_t elem_size,
void* chunks[]) {
4666 size_t sz = elem_size;
4672 return ialloc(ms, n_elements, &sz, 3, chunks);
4676 size_t sizes[],
void* chunks[]) {
4682 return ialloc(ms, n_elements, sizes, 0, chunks);
4685 size_t mspace_bulk_free(mspace msp,
void* array[],
size_t nelem) {
4689 #if MALLOC_INSPECT_ALL 4690 void mspace_inspect_all(mspace msp,
4691 void(*handler)(
void *start,
4694 void* callback_arg),
4699 internal_inspect_all(ms, handler, arg);
4724 #if !NO_MALLOC_STATS 4760 size_t mspace_footprint_limit(mspace msp) {
4773 size_t mspace_set_footprint_limit(mspace msp,
size_t bytes) {
4792 struct dlmallinfo mspace_mallinfo(mspace msp) {
#define segment_holds(S, A)
size_t dlmalloc_footprint(void)
void ** dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
#define chunk_plus_offset(p, s)
DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad)
u8 pad[3]
log2 (size of the packing page block)
DLMALLOC_EXPORT void * mspace_malloc(mspace msp, size_t bytes)
void * dlmemalign(size_t alignment, size_t bytes)
DLMALLOC_EXPORT void ** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
#define treemap_is_marked(M, i)
#define smallmap_is_marked(M, i)
static size_t release_unused_segments(mstate m)
static int sys_trim(mstate m, size_t pad)
Optimized string handling code, including c11-compliant "safe C library" variants.
static struct malloc_params mparams
#define DEFAULT_MMAP_THRESHOLD
#define is_mmapped_segment(S)
struct malloc_segment * msegmentptr
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags)
#define MALLOC_FAILURE_ACTION
#define insert_large_chunk(M, X, S)
#define insert_chunk(M, P, S)
#define is_initialized(M)
#define NO_SEGMENT_TRAVERSAL
struct malloc_tree_chunk * parent
size_t dlbulk_free(void *array[], size_t nelem)
DLMALLOC_EXPORT void * mspace_realloc(mspace msp, void *mem, size_t newsize)
#define is_page_aligned(S)
#define DLM_MAGIC_CONSTANT
void * dlpvalloc(size_t bytes)
#define malloc_getpagesize
#define internal_malloc(m, b)
#define leftmost_child(t)
MALLINFO_FIELD_TYPE hblkhd
#define replace_dv(M, P, S)
static void init_top(mstate m, mchunkptr p, size_t psize)
#define USE_MMAP_BIT
Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP.
#define unlink_chunk(M, P, S)
#define unlink_large_chunk(M, X)
struct malloc_state * mstate
DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, int locked)
DLMALLOC_EXPORT int mspace_is_heap_object(mspace msp, void *p)
#define small_index2size(i)
MALLINFO_FIELD_TYPE uordblks
#define leftshift_for_tree_index(i)
#define CALL_DIRECT_MMAP(s)
#define check_mmapped_chunk(M, P)
DLMALLOC_EXPORT void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
static void dispose_chunk(mstate m, mchunkptr p, size_t psize)
struct malloc_chunk * sbinptr
#define check_malloc_state(M)
MALLINFO_FIELD_TYPE arena
#define DEFAULT_GRANULARITY
struct malloc_tree_chunk * fd
static void * sys_alloc(mstate m, size_t nb)
#define unlink_first_small_chunk(M, B, P, I)
#define check_malloced_chunk(M, P, N)
size_t dlmalloc_usable_size(void *mem)
#define check_top_chunk(M, P)
DLMALLOC_EXPORT void mspace_put_no_offset(mspace msp, void *p)
#define internal_free(m, mem)
MALLINFO_FIELD_TYPE ordblks
#define disable_expand(M)
static void init_bins(mstate m)
void * dlmalloc(size_t bytes)
static void internal_malloc_stats(mstate m)
void * dlvalloc(size_t bytes)
int dlmalloc_trim(size_t pad)
struct malloc_segment * next
DLMALLOC_EXPORT void * mspace_memalign(mspace msp, size_t alignment, size_t bytes)
#define insert_small_chunk(M, P, S)
DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp)
struct malloc_chunk * mchunkptr
DLMALLOC_EXPORT void mheap_put_trace(uword offset, uword size)
static msegmentptr segment_holding(mstate m, char *addr)
MALLINFO_FIELD_TYPE usmblks
DLMALLOC_EXPORT void mspace_disable_expand(mspace msp)
#define compute_tree_index(S, I)
#define DEFAULT_TRIM_THRESHOLD
#define calloc_must_clear(p)
#define MAX_SMALL_REQUEST
#define USE_NONCONTIGUOUS_BIT
static int init_mparams(void)
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
void * dlrealloc(void *oldmem, size_t bytes)
DLMALLOC_EXPORT int mspace_is_traced(mspace msp)
static void * tmalloc_small(mstate m, size_t nb)
static void * mmap_alloc(mstate m, size_t nb)
struct malloc_tree_chunk * tbinptr
DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked)
struct malloc_tree_chunk * tchunkptr
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
#define chunk_minus_offset(p, s)
DLMALLOC_EXPORT void mspace_get_address_and_size(mspace msp, char **addrp, size_t *sizep)
DLMALLOC_EXPORT int mspace_mallopt(int, int)
DLMALLOC_EXPORT void ** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
size_t dlmalloc_max_footprint(void)
MALLINFO_FIELD_TYPE keepcost
#define check_inuse_chunk(M, P)
DLMALLOC_EXPORT int mspace_enable_disable_trace(mspace msp, int enable)
static uword max_pow2(uword x)
#define set_free_with_pinuse(p, s, n)
static struct dlmallinfo internal_mallinfo(mstate m)
#define SYS_ALLOC_PADDING
#define set_inuse(M, p, s)
#define use_noncontiguous(M)
#define MAX_RELEASE_CHECK_RATE
DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem)
#define CALL_MUNMAP(a, s)
#define USAGE_ERROR_ACTION(m, p)
#define MORECORE_CONTIGUOUS
#define RELEASE_MALLOC_GLOBAL_LOCK()
struct malloc_tree_chunk * bk
#define set_size_and_pinuse_of_free_chunk(p, s)
#define check_free_chunk(M, P)
#define CORRUPTION_ERROR_ACTION(m)
#define ensure_initialization()
#define disable_contiguous(M)
#define mark_inuse_foot(M, p, s)
struct malloc_tree_chunk * child[2]
template key/value backing page structure
DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem)
#define request2size(req)
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
#define align_as_chunk(A)
void * dlrealloc_in_place(void *oldmem, size_t bytes)
DLMALLOC_EXPORT void * mspace_get_aligned(mspace msp, unsigned long n_user_data_bytes, unsigned long align, unsigned long align_offset)
#define smallbin_at(M, i)
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move)
#define CALL_MORECORE(S)
Define CALL_MORECORE.
struct clib_bihash_value offset
template key/value backing page structure
static void * tmalloc_large(mstate m, size_t nb)
DLMALLOC_EXPORT size_t destroy_mspace(mspace msp)
#define set_inuse_and_pinuse(M, p, s)
#define compute_bit2idx(X, I)
#define CALL_MREMAP(addr, osz, nsz, mv)
Define CALL_MREMAP.
DLMALLOC_EXPORT size_t mspace_footprint(mspace msp)
DLMALLOC_EXPORT void mheap_get_trace(uword offset, uword size)
DLMALLOC_EXPORT void mspace_put(mspace msp, void *p)
#define is_extern_segment(S)
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
MALLINFO_FIELD_TYPE fordblks
DLMALLOC_EXPORT size_t mspace_usable_size_with_delta(const void *p)
void ** dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
int dlmallopt(int param_number, int value)
size_t dlmalloc_set_footprint_limit(size_t bytes)
DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable)
#define minsize_for_tree_index(i)
#define FOUR_SIZE_T_SIZES
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
#define granularity_align(S)
static int change_mparam(int param_number, int value)
int dlposix_memalign(void **pp, size_t alignment, size_t bytes)
DLMALLOC_EXPORT void * mspace_least_addr(mspace msp)
static int has_segment_link(mstate m, msegmentptr ss)
size_t dlmalloc_footprint_limit(void)
static size_t internal_bulk_free(mstate m, void *array[], size_t nelem)
#define should_trim(M, s)
DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp)
void * dlcalloc(size_t n_elements, size_t elem_size)