FD.io VPP  v19.08.3-2-gbabecb413
Vector Packet Processing
dlmalloc.c
Go to the documentation of this file.
1 /*
2  This is a version (aka dlmalloc) of malloc/free/realloc written by
3  Doug Lea and released to the public domain, as explained at
4  http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
5  comments, complaints, performance data, etc to dl@cs.oswego.edu
6 */
7 
8 #include <vppinfra/dlmalloc.h>
9 
10 /*------------------------------ internal #includes ---------------------- */
11 
12 #ifdef _MSC_VER
13 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
14 #endif /* _MSC_VER */
15 #if !NO_MALLOC_STATS
16 #include <stdio.h> /* for printing in malloc_stats */
17 #endif /* NO_MALLOC_STATS */
18 #ifndef LACKS_ERRNO_H
19 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
20 #endif /* LACKS_ERRNO_H */
21 #ifdef DEBUG
22 #if DLM_ABORT_ON_ASSERT_FAILURE
23 #undef assert
24 #define assert(x) if(!(x)) DLM_ABORT
25 #else /* DLM_ABORT_ON_ASSERT_FAILURE */
26 #include <assert.h>
27 #endif /* DLM_ABORT_ON_ASSERT_FAILURE */
28 #else /* DEBUG */
29 #ifndef assert
30 #define assert(x)
31 #endif
32 #define DEBUG 0
33 #endif /* DEBUG */
34 #if !defined(WIN32) && !defined(LACKS_TIME_H)
35 #include <time.h> /* for magic initialization */
36 #endif /* WIN32 */
37 #ifndef LACKS_STDLIB_H
38 #include <stdlib.h> /* for abort() */
39 #endif /* LACKS_STDLIB_H */
40 #ifndef LACKS_STRING_H
41 #include <string.h> /* for memset etc */
42 #endif /* LACKS_STRING_H */
43 #if USE_BUILTIN_FFS
44 #ifndef LACKS_STRINGS_H
45 #include <strings.h> /* for ffs */
46 #endif /* LACKS_STRINGS_H */
47 #endif /* USE_BUILTIN_FFS */
48 #if HAVE_MMAP
49 #ifndef LACKS_SYS_MMAN_H
50 /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
51 #if (defined(linux) && !defined(__USE_GNU))
52 #define __USE_GNU 1
53 #include <sys/mman.h> /* for mmap */
54 #undef __USE_GNU
55 #else
56 #include <sys/mman.h> /* for mmap */
57 #endif /* linux */
58 #endif /* LACKS_SYS_MMAN_H */
59 #ifndef LACKS_FCNTL_H
60 #include <fcntl.h>
61 #endif /* LACKS_FCNTL_H */
62 #endif /* HAVE_MMAP */
63 #ifndef LACKS_UNISTD_H
64 #include <unistd.h> /* for sbrk, sysconf */
65 #else /* LACKS_UNISTD_H */
66 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
67 extern void* sbrk(ptrdiff_t);
68 #endif /* FreeBSD etc */
69 #endif /* LACKS_UNISTD_H */
70 
71 /* Declarations for locking */
72 #if USE_LOCKS
73 #ifndef WIN32
74 #if defined (__SVR4) && defined (__sun) /* solaris */
75 #include <thread.h>
76 #elif !defined(LACKS_SCHED_H)
77 #include <sched.h>
78 #endif /* solaris or LACKS_SCHED_H */
79 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
80 #include <pthread.h>
81 #endif /* USE_RECURSIVE_LOCKS ... */
82 #elif defined(_MSC_VER)
83 #ifndef _M_AMD64
84 /* These are already defined on AMD64 builds */
85 #ifdef __cplusplus
86 extern "C" {
87 #endif /* __cplusplus */
88 LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
89 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
90 #ifdef __cplusplus
91 }
92 #endif /* __cplusplus */
93 #endif /* _M_AMD64 */
94 #pragma intrinsic (_InterlockedCompareExchange)
95 #pragma intrinsic (_InterlockedExchange)
96 #define interlockedcompareexchange _InterlockedCompareExchange
97 #define interlockedexchange _InterlockedExchange
98 #elif defined(WIN32) && defined(__GNUC__)
99 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
100 #define interlockedexchange __sync_lock_test_and_set
101 #endif /* Win32 */
102 #else /* USE_LOCKS */
103 #endif /* USE_LOCKS */
104 
105 #ifndef LOCK_AT_FORK
106 #define LOCK_AT_FORK 0
107 #endif
108 
109 /* Declarations for bit scanning on win32 */
110 #if defined(_MSC_VER) && _MSC_VER>=1300
111 #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
112 #ifdef __cplusplus
113 extern "C" {
114 #endif /* __cplusplus */
115 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
116 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
117 #ifdef __cplusplus
118 }
119 #endif /* __cplusplus */
120 
121 #define BitScanForward _BitScanForward
122 #define BitScanReverse _BitScanReverse
123 #pragma intrinsic(_BitScanForward)
124 #pragma intrinsic(_BitScanReverse)
125 #endif /* BitScanForward */
126 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
127 
128 #ifndef WIN32
129 #ifndef malloc_getpagesize
130 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
131 # ifndef _SC_PAGE_SIZE
132 # define _SC_PAGE_SIZE _SC_PAGESIZE
133 # endif
134 # endif
135 # ifdef _SC_PAGE_SIZE
136 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
137 # else
138 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
139  extern size_t getpagesize();
140 # define malloc_getpagesize getpagesize()
141 # else
142 # ifdef WIN32 /* use supplied emulation of getpagesize */
143 # define malloc_getpagesize getpagesize()
144 # else
145 # ifndef LACKS_SYS_PARAM_H
146 # include <sys/param.h>
147 # endif
148 # ifdef EXEC_PAGESIZE
149 # define malloc_getpagesize EXEC_PAGESIZE
150 # else
151 # ifdef NBPG
152 # ifndef CLSIZE
153 # define malloc_getpagesize NBPG
154 # else
155 # define malloc_getpagesize (NBPG * CLSIZE)
156 # endif
157 # else
158 # ifdef NBPC
159 # define malloc_getpagesize NBPC
160 # else
161 # ifdef PAGESIZE
162 # define malloc_getpagesize PAGESIZE
163 # else /* just guess */
164 # define malloc_getpagesize ((size_t)4096U)
165 # endif
166 # endif
167 # endif
168 # endif
169 # endif
170 # endif
171 # endif
172 #endif
173 #endif
174 
175 /* ------------------- size_t and alignment properties -------------------- */
176 
177 /* The byte and bit size of a size_t */
178 #define SIZE_T_SIZE (sizeof(size_t))
179 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
180 
181 /* Some constants coerced to size_t */
182 /* Annoying but necessary to avoid errors on some platforms */
183 #define SIZE_T_ZERO ((size_t)0)
184 #define SIZE_T_ONE ((size_t)1)
185 #define SIZE_T_TWO ((size_t)2)
186 #define SIZE_T_FOUR ((size_t)4)
187 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
188 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
189 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
190 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
191 
192 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
193 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
194 
195 /* True if address a has acceptable alignment */
196 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
197 
198 /* the number of bytes to offset an address to align it */
199 #define align_offset(A)\
200  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
201  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
202 
203 /* -------------------------- MMAP preliminaries ------------------------- */
204 
205 /*
206  If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
207  checks to fail so compiler optimizer can delete code rather than
208  using so many "#if"s.
209 */
210 
211 
212 /* MORECORE and MMAP must return MFAIL on failure */
213 #define MFAIL ((void*)(MAX_SIZE_T))
214 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
215 
216 #if HAVE_MMAP
217 
218 #ifndef WIN32
219 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
220 #define MMAP_PROT (PROT_READ|PROT_WRITE)
221 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
222 #define MAP_ANONYMOUS MAP_ANON
223 #endif /* MAP_ANON */
224 #ifdef MAP_ANONYMOUS
225 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
226 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
227 #else /* MAP_ANONYMOUS */
228 /*
229  Nearly all versions of mmap support MAP_ANONYMOUS, so the following
230  is unlikely to be needed, but is supplied just in case.
231 */
232 #define MMAP_FLAGS (MAP_PRIVATE)
233 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
234 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
235  (dev_zero_fd = open("/dev/zero", O_RDWR), \
236  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
237  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
238 #endif /* MAP_ANONYMOUS */
239 
240 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
241 
242 #else /* WIN32 */
243 
244 /* Win32 MMAP via VirtualAlloc */
245 static FORCEINLINE void* win32mmap(size_t size) {
246  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
247  return (ptr != 0)? ptr: MFAIL;
248 }
249 
250 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
251 static FORCEINLINE void* win32direct_mmap(size_t size) {
252  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
253  PAGE_READWRITE);
254  return (ptr != 0)? ptr: MFAIL;
255 }
256 
257 /* This function supports releasing coalesed segments */
258 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
259  MEMORY_BASIC_INFORMATION minfo;
260  char* cptr = (char*)ptr;
261  while (size) {
262  if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
263  return -1;
264  if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
265  minfo.State != MEM_COMMIT || minfo.RegionSize > size)
266  return -1;
267  if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
268  return -1;
269  cptr += minfo.RegionSize;
270  size -= minfo.RegionSize;
271  }
272  return 0;
273 }
274 
275 #define MMAP_DEFAULT(s) win32mmap(s)
276 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
277 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
278 #endif /* WIN32 */
279 #endif /* HAVE_MMAP */
280 
281 #if HAVE_MREMAP
282 #ifndef WIN32
283 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
284 #endif /* WIN32 */
285 #endif /* HAVE_MREMAP */
286 
287 /**
288  * Define CALL_MORECORE
289  */
290 #if HAVE_MORECORE
291  #ifdef MORECORE
292  #define CALL_MORECORE(S) MORECORE(S)
293  #else /* MORECORE */
294  #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
295  #endif /* MORECORE */
296 #else /* HAVE_MORECORE */
297  #define CALL_MORECORE(S) MFAIL
298 #endif /* HAVE_MORECORE */
299 
300 /**
301  * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
302  */
303 #if HAVE_MMAP
304  #define USE_MMAP_BIT (SIZE_T_ONE)
305 
306  #ifdef MMAP
307  #define CALL_MMAP(s) MMAP(s)
308  #else /* MMAP */
309  #define CALL_MMAP(s) MMAP_DEFAULT(s)
310  #endif /* MMAP */
311  #ifdef MUNMAP
312  #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
313  #else /* MUNMAP */
314  #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
315  #endif /* MUNMAP */
316  #ifdef DIRECT_MMAP
317  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
318  #else /* DIRECT_MMAP */
319  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
320  #endif /* DIRECT_MMAP */
321 #else /* HAVE_MMAP */
322  #define USE_MMAP_BIT (SIZE_T_ZERO)
323 
324  #define MMAP(s) MFAIL
325  #define MUNMAP(a, s) (-1)
326  #define DIRECT_MMAP(s) MFAIL
327  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
328  #define CALL_MMAP(s) MMAP(s)
329  #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
330 #endif /* HAVE_MMAP */
331 
332 /**
333  * Define CALL_MREMAP
334  */
335 #if HAVE_MMAP && HAVE_MREMAP
336  #ifdef MREMAP
337  #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
338  #else /* MREMAP */
339  #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
340  #endif /* MREMAP */
341 #else /* HAVE_MMAP && HAVE_MREMAP */
342  #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
343 #endif /* HAVE_MMAP && HAVE_MREMAP */
344 
345 /* mstate bit set if contiguous morecore disabled or failed */
346 #define USE_NONCONTIGUOUS_BIT (4U)
347 
348 /* mstate bit set if no expansion allowed */
349 #define USE_NOEXPAND_BIT (8U)
350 
351 /* trace allocations if set */
352 #define USE_TRACE_BIT (16U)
353 
354 /* segment bit set in create_mspace_with_base */
355 #define EXTERN_BIT (8U)
356 
357 
358 /* --------------------------- Lock preliminaries ------------------------ */
359 
360 /*
361  When locks are defined, there is one global lock, plus
362  one per-mspace lock.
363 
364  The global lock_ensures that mparams.magic and other unique
365  mparams values are initialized only once. It also protects
366  sequences of calls to MORECORE. In many cases sys_alloc requires
367  two calls, that should not be interleaved with calls by other
368  threads. This does not protect against direct calls to MORECORE
369  by other threads not using this lock, so there is still code to
370  cope the best we can on interference.
371 
372  Per-mspace locks surround calls to malloc, free, etc.
373  By default, locks are simple non-reentrant mutexes.
374 
375  Because lock-protected regions generally have bounded times, it is
376  OK to use the supplied simple spinlocks. Spinlocks are likely to
377  improve performance for lightly contended applications, but worsen
378  performance under heavy contention.
379 
380  If USE_LOCKS is > 1, the definitions of lock routines here are
381  bypassed, in which case you will need to define the type MLOCK_T,
382  and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
383  and TRY_LOCK. You must also declare a
384  static MLOCK_T malloc_global_mutex = { initialization values };.
385 
386 */
387 
388 #if !USE_LOCKS
389 #define USE_LOCK_BIT (0U)
390 #define INITIAL_LOCK(l) (0)
391 #define DESTROY_LOCK(l) (0)
392 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
393 #define RELEASE_MALLOC_GLOBAL_LOCK()
394 
395 #else
396 #if USE_LOCKS > 1
397 /* ----------------------- User-defined locks ------------------------ */
398 /* Define your own lock implementation here */
399 /* #define INITIAL_LOCK(lk) ... */
400 /* #define DESTROY_LOCK(lk) ... */
401 /* #define ACQUIRE_LOCK(lk) ... */
402 /* #define RELEASE_LOCK(lk) ... */
403 /* #define TRY_LOCK(lk) ... */
404 /* static MLOCK_T malloc_global_mutex = ... */
405 
406 #elif USE_SPIN_LOCKS
407 
408 /* First, define CAS_LOCK and CLEAR_LOCK on ints */
409 /* Note CAS_LOCK defined to return 0 on success */
410 
411 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
412 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
413 #define CLEAR_LOCK(sl) __sync_lock_release(sl)
414 
415 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
416 /* Custom spin locks for older gcc on x86 */
417 static FORCEINLINE int x86_cas_lock(int *sl) {
418  int ret;
419  int val = 1;
420  int cmp = 0;
421  __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
422  : "=a" (ret)
423  : "r" (val), "m" (*(sl)), "0"(cmp)
424  : "memory", "cc");
425  return ret;
426 }
427 
428 static FORCEINLINE void x86_clear_lock(int* sl) {
429  assert(*sl != 0);
430  int prev = 0;
431  int ret;
432  __asm__ __volatile__ ("lock; xchgl %0, %1"
433  : "=r" (ret)
434  : "m" (*(sl)), "0"(prev)
435  : "memory");
436 }
437 
438 #define CAS_LOCK(sl) x86_cas_lock(sl)
439 #define CLEAR_LOCK(sl) x86_clear_lock(sl)
440 
441 #else /* Win32 MSC */
442 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
443 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
444 
445 #endif /* ... gcc spins locks ... */
446 
447 /* How to yield for a spin lock */
448 #define SPINS_PER_YIELD 63
449 #if defined(_MSC_VER)
450 #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
451 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
452 #elif defined (__SVR4) && defined (__sun) /* solaris */
453 #define SPIN_LOCK_YIELD thr_yield();
454 #elif !defined(LACKS_SCHED_H)
455 #define SPIN_LOCK_YIELD sched_yield();
456 #else
457 #define SPIN_LOCK_YIELD
458 #endif /* ... yield ... */
459 
460 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
461 /* Plain spin locks use single word (embedded in malloc_states) */
462 static int spin_acquire_lock(int *sl) {
463  int spins = 0;
464  while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
465  if ((++spins & SPINS_PER_YIELD) == 0) {
466  SPIN_LOCK_YIELD;
467  }
468  }
469  return 0;
470 }
471 
472 #define MLOCK_T int
473 #define TRY_LOCK(sl) !CAS_LOCK(sl)
474 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
475 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
476 #define INITIAL_LOCK(sl) (*sl = 0)
477 #define DESTROY_LOCK(sl) (0)
478 static MLOCK_T malloc_global_mutex = 0;
479 
480 #else /* USE_RECURSIVE_LOCKS */
481 /* types for lock owners */
482 #ifdef WIN32
483 #define THREAD_ID_T DWORD
484 #define CURRENT_THREAD GetCurrentThreadId()
485 #define EQ_OWNER(X,Y) ((X) == (Y))
486 #else
487 /*
488  Note: the following assume that pthread_t is a type that can be
489  initialized to (casted) zero. If this is not the case, you will need to
490  somehow redefine these or not use spin locks.
491 */
492 #define THREAD_ID_T pthread_t
493 #define CURRENT_THREAD pthread_self()
494 #define EQ_OWNER(X,Y) pthread_equal(X, Y)
495 #endif
496 
497 struct malloc_recursive_lock {
498  int sl;
499  unsigned int c;
500  THREAD_ID_T threadid;
501 };
502 
503 #define MLOCK_T struct malloc_recursive_lock
504 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
505 
506 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
507  assert(lk->sl != 0);
508  if (--lk->c == 0) {
509  CLEAR_LOCK(&lk->sl);
510  }
511 }
512 
513 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
514  THREAD_ID_T mythreadid = CURRENT_THREAD;
515  int spins = 0;
516  for (;;) {
517  if (*((volatile int *)(&lk->sl)) == 0) {
518  if (!CAS_LOCK(&lk->sl)) {
519  lk->threadid = mythreadid;
520  lk->c = 1;
521  return 0;
522  }
523  }
524  else if (EQ_OWNER(lk->threadid, mythreadid)) {
525  ++lk->c;
526  return 0;
527  }
528  if ((++spins & SPINS_PER_YIELD) == 0) {
529  SPIN_LOCK_YIELD;
530  }
531  }
532 }
533 
534 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
535  THREAD_ID_T mythreadid = CURRENT_THREAD;
536  if (*((volatile int *)(&lk->sl)) == 0) {
537  if (!CAS_LOCK(&lk->sl)) {
538  lk->threadid = mythreadid;
539  lk->c = 1;
540  return 1;
541  }
542  }
543  else if (EQ_OWNER(lk->threadid, mythreadid)) {
544  ++lk->c;
545  return 1;
546  }
547  return 0;
548 }
549 
550 #define RELEASE_LOCK(lk) recursive_release_lock(lk)
551 #define TRY_LOCK(lk) recursive_try_lock(lk)
552 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
553 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
554 #define DESTROY_LOCK(lk) (0)
555 #endif /* USE_RECURSIVE_LOCKS */
556 
557 #elif defined(WIN32) /* Win32 critical sections */
558 #define MLOCK_T CRITICAL_SECTION
559 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
560 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
561 #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
562 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
563 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
564 #define NEED_GLOBAL_LOCK_INIT
565 
566 static MLOCK_T malloc_global_mutex;
567 static volatile LONG malloc_global_mutex_status;
568 
569 /* Use spin loop to initialize global lock */
570 static void init_malloc_global_mutex() {
571  for (;;) {
572  long stat = malloc_global_mutex_status;
573  if (stat > 0)
574  return;
575  /* transition to < 0 while initializing, then to > 0) */
576  if (stat == 0 &&
577  interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
578  InitializeCriticalSection(&malloc_global_mutex);
579  interlockedexchange(&malloc_global_mutex_status, (LONG)1);
580  return;
581  }
582  SleepEx(0, FALSE);
583  }
584 }
585 
586 #else /* pthreads-based locks */
587 #define MLOCK_T pthread_mutex_t
588 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
589 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
590 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
591 #define INITIAL_LOCK(lk) pthread_init_lock(lk)
592 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
593 
594 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
595 /* Cope with old-style linux recursive lock initialization by adding */
596 /* skipped internal declaration from pthread.h */
597 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
598  int __kind));
599 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
600 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
601 #endif /* USE_RECURSIVE_LOCKS ... */
602 
603 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
604 
605 static int pthread_init_lock (MLOCK_T *lk) {
606  pthread_mutexattr_t attr;
607  if (pthread_mutexattr_init(&attr)) return 1;
608 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
609  if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
610 #endif
611  if (pthread_mutex_init(lk, &attr)) return 1;
612  if (pthread_mutexattr_destroy(&attr)) return 1;
613  return 0;
614 }
615 
616 #endif /* ... lock types ... */
617 
618 /* Common code for all lock types */
619 #define USE_LOCK_BIT (2U)
620 
621 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
622 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
623 #endif
624 
625 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
626 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
627 #endif
628 
629 #endif /* USE_LOCKS */
630 
631 /* ----------------------- Chunk representations ------------------------ */
632 
633 /*
634  (The following includes lightly edited explanations by Colin Plumb.)
635 
636  The malloc_chunk declaration below is misleading (but accurate and
637  necessary). It declares a "view" into memory allowing access to
638  necessary fields at known offsets from a given base.
639 
640  Chunks of memory are maintained using a `boundary tag' method as
641  originally described by Knuth. (See the paper by Paul Wilson
642  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
643  techniques.) Sizes of free chunks are stored both in the front of
644  each chunk and at the end. This makes consolidating fragmented
645  chunks into bigger chunks fast. The head fields also hold bits
646  representing whether chunks are free or in use.
647 
648  Here are some pictures to make it clearer. They are "exploded" to
649  show that the state of a chunk can be thought of as extending from
650  the high 31 bits of the head field of its header through the
651  prev_foot and PINUSE_BIT bit of the following chunk header.
652 
653  A chunk that's in use looks like:
654 
655  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
656  | Size of previous chunk (if P = 0) |
657  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
658  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
659  | Size of this chunk 1| +-+
660  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
661  | |
662  +- -+
663  | |
664  +- -+
665  | :
666  +- size - sizeof(size_t) available payload bytes -+
667  : |
668  chunk-> +- -+
669  | |
670  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
671  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
672  | Size of next chunk (may or may not be in use) | +-+
673  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
674 
675  And if it's free, it looks like this:
676 
677  chunk-> +- -+
678  | User payload (must be in use, or we would have merged!) |
679  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
680  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
681  | Size of this chunk 0| +-+
682  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
683  | Next pointer |
684  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
685  | Prev pointer |
686  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
687  | :
688  +- size - sizeof(struct chunk) unused bytes -+
689  : |
690  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
691  | Size of this chunk |
692  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
693  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
694  | Size of next chunk (must be in use, or we would have merged)| +-+
695  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
696  | :
697  +- User payload -+
698  : |
699  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
700  |0|
701  +-+
702  Note that since we always merge adjacent free chunks, the chunks
703  adjacent to a free chunk must be in use.
704 
705  Given a pointer to a chunk (which can be derived trivially from the
706  payload pointer) we can, in O(1) time, find out whether the adjacent
707  chunks are free, and if so, unlink them from the lists that they
708  are on and merge them with the current chunk.
709 
710  Chunks always begin on even word boundaries, so the mem portion
711  (which is returned to the user) is also on an even word boundary, and
712  thus at least double-word aligned.
713 
714  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
715  chunk size (which is always a multiple of two words), is an in-use
716  bit for the *previous* chunk. If that bit is *clear*, then the
717  word before the current chunk size contains the previous chunk
718  size, and can be used to find the front of the previous chunk.
719  The very first chunk allocated always has this bit set, preventing
720  access to non-existent (or non-owned) memory. If pinuse is set for
721  any given chunk, then you CANNOT determine the size of the
722  previous chunk, and might even get a memory addressing fault when
723  trying to do so.
724 
725  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
726  the chunk size redundantly records whether the current chunk is
727  inuse (unless the chunk is mmapped). This redundancy enables usage
728  checks within free and realloc, and reduces indirection when freeing
729  and consolidating chunks.
730 
731  Each freshly allocated chunk must have both cinuse and pinuse set.
732  That is, each allocated chunk borders either a previously allocated
733  and still in-use chunk, or the base of its memory arena. This is
734  ensured by making all allocations from the `lowest' part of any
735  found chunk. Further, no free chunk physically borders another one,
736  so each free chunk is known to be preceded and followed by either
737  inuse chunks or the ends of memory.
738 
739  Note that the `foot' of the current chunk is actually represented
740  as the prev_foot of the NEXT chunk. This makes it easier to
741  deal with alignments etc but can be very confusing when trying
742  to extend or adapt this code.
743 
744  The exceptions to all this are
745 
746  1. The special chunk `top' is the top-most available chunk (i.e.,
747  the one bordering the end of available memory). It is treated
748  specially. Top is never included in any bin, is used only if
749  no other chunk is available, and is released back to the
750  system if it is very large (see M_TRIM_THRESHOLD). In effect,
751  the top chunk is treated as larger (and thus less well
752  fitting) than any other available chunk. The top chunk
753  doesn't update its trailing size field since there is no next
754  contiguous chunk that would have to index off it. However,
755  space is still allocated for it (TOP_FOOT_SIZE) to enable
756  separation or merging when space is extended.
757 
758  3. Chunks allocated via mmap, have both cinuse and pinuse bits
759  cleared in their head fields. Because they are allocated
760  one-by-one, each must carry its own prev_foot field, which is
761  also used to hold the offset this chunk has within its mmapped
762  region, which is needed to preserve alignment. Each mmapped
763  chunk is trailed by the first two fields of a fake next-chunk
764  for sake of usage checks.
765 
766 */
767 
768 struct malloc_chunk {
769  size_t prev_foot; /* Size of previous chunk (if free). */
770  size_t head; /* Size and inuse bits. */
771  struct malloc_chunk* fd; /* double links -- used only if free. */
772  struct malloc_chunk* bk;
773 };
774 
775 typedef struct malloc_chunk mchunk;
776 typedef struct malloc_chunk* mchunkptr;
777 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
778 typedef unsigned int bindex_t; /* Described below */
779 typedef unsigned int binmap_t; /* Described below */
780 typedef unsigned int flag_t; /* The type of various bit flag sets */
781 
782 /* ------------------- Chunks sizes and alignments ----------------------- */
783 
784 #define MCHUNK_SIZE (sizeof(mchunk))
785 
786 #if FOOTERS
787 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
788 #else /* FOOTERS */
789 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
790 #endif /* FOOTERS */
791 
792 /* MMapped chunks need a second word of overhead ... */
793 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
794 /* ... and additional padding for fake next-chunk at foot */
795 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
796 
797 /* The smallest size we can malloc is an aligned minimal chunk */
798 #define MIN_CHUNK_SIZE\
799  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
800 
801 /* conversion from malloc headers to user pointers, and back */
802 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
803 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
804 /* chunk associated with aligned address A */
805 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
806 
807 /* Bounds on request (not chunk) sizes. */
808 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
809 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
810 
811 /* pad request bytes into a usable size */
812 #define pad_request(req) \
813  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
814 
815 /* pad request, checking for minimum (but not maximum) */
816 #define request2size(req) \
817  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
818 
819 
820 /* ------------------ Operations on head and foot fields ----------------- */
821 
822 /*
823  The head field of a chunk is or'ed with PINUSE_BIT when previous
824  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
825  use, unless mmapped, in which case both bits are cleared.
826 
827  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
828 */
829 
830 #define PINUSE_BIT (SIZE_T_ONE)
831 #define CINUSE_BIT (SIZE_T_TWO)
832 #define FLAG4_BIT (SIZE_T_FOUR)
833 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
834 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
835 
836 /* Head value for fenceposts */
837 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
838 
839 /* extraction of fields from head words */
840 #define cinuse(p) ((p)->head & CINUSE_BIT)
841 #define pinuse(p) ((p)->head & PINUSE_BIT)
842 #define flag4inuse(p) ((p)->head & FLAG4_BIT)
843 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
844 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
845 
846 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
847 
848 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
849 #define set_flag4(p) ((p)->head |= FLAG4_BIT)
850 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
851 
852 /* Treat space at ptr +/- offset as a chunk */
853 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
854 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
855 
856 /* Ptr to next or previous physical malloc_chunk. */
857 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
858 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
859 
860 /* extract next chunk's pinuse bit */
861 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
862 
863 /* Get/set size at footer */
864 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
865 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
866 
867 /* Set size, pinuse bit, and foot */
868 #define set_size_and_pinuse_of_free_chunk(p, s)\
869  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
870 
871 /* Set size, pinuse bit, foot, and clear next pinuse */
872 #define set_free_with_pinuse(p, s, n)\
873  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
874 
875 /* Get the internal overhead associated with chunk p */
876 #define overhead_for(p)\
877  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
878 
879 /* Return true if malloced space is not necessarily cleared */
880 #if MMAP_CLEARS
881 #define calloc_must_clear(p) (!is_mmapped(p))
882 #else /* MMAP_CLEARS */
883 #define calloc_must_clear(p) (1)
884 #endif /* MMAP_CLEARS */
885 
886 /* ---------------------- Overlaid data structures ----------------------- */
887 
888 /*
889  When chunks are not in use, they are treated as nodes of either
890  lists or trees.
891 
892  "Small" chunks are stored in circular doubly-linked lists, and look
893  like this:
894 
895  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
896  | Size of previous chunk |
897  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
898  `head:' | Size of chunk, in bytes |P|
899  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
900  | Forward pointer to next chunk in list |
901  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
902  | Back pointer to previous chunk in list |
903  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
904  | Unused space (may be 0 bytes long) .
905  . .
906  . |
907 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
908  `foot:' | Size of chunk, in bytes |
909  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
910 
911  Larger chunks are kept in a form of bitwise digital trees (aka
912  tries) keyed on chunksizes. Because malloc_tree_chunks are only for
913  free chunks greater than 256 bytes, their size doesn't impose any
914  constraints on user chunk sizes. Each node looks like:
915 
916  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
917  | Size of previous chunk |
918  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
919  `head:' | Size of chunk, in bytes |P|
920  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
921  | Forward pointer to next chunk of same size |
922  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
923  | Back pointer to previous chunk of same size |
924  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
925  | Pointer to left child (child[0]) |
926  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
927  | Pointer to right child (child[1]) |
928  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
929  | Pointer to parent |
930  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
931  | bin index of this chunk |
932  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
933  | Unused space .
934  . |
935 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
936  `foot:' | Size of chunk, in bytes |
937  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
938 
939  Each tree holding treenodes is a tree of unique chunk sizes. Chunks
940  of the same size are arranged in a circularly-linked list, with only
941  the oldest chunk (the next to be used, in our FIFO ordering)
942  actually in the tree. (Tree members are distinguished by a non-null
943  parent pointer.) If a chunk with the same size an an existing node
944  is inserted, it is linked off the existing node using pointers that
945  work in the same way as fd/bk pointers of small chunks.
946 
947  Each tree contains a power of 2 sized range of chunk sizes (the
948  smallest is 0x100 <= x < 0x180), which is is divided in half at each
949  tree level, with the chunks in the smaller half of the range (0x100
950  <= x < 0x140 for the top nose) in the left subtree and the larger
951  half (0x140 <= x < 0x180) in the right subtree. This is, of course,
952  done by inspecting individual bits.
953 
954  Using these rules, each node's left subtree contains all smaller
955  sizes than its right subtree. However, the node at the root of each
956  subtree has no particular ordering relationship to either. (The
957  dividing line between the subtree sizes is based on trie relation.)
958  If we remove the last chunk of a given size from the interior of the
959  tree, we need to replace it with a leaf node. The tree ordering
960  rules permit a node to be replaced by any leaf below it.
961 
962  The smallest chunk in a tree (a common operation in a best-fit
963  allocator) can be found by walking a path to the leftmost leaf in
964  the tree. Unlike a usual binary tree, where we follow left child
965  pointers until we reach a null, here we follow the right child
966  pointer any time the left one is null, until we reach a leaf with
967  both child pointers null. The smallest chunk in the tree will be
968  somewhere along that path.
969 
970  The worst case number of steps to add, find, or remove a node is
971  bounded by the number of bits differentiating chunks within
972  bins. Under current bin calculations, this ranges from 6 up to 21
973  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
974  is of course much better.
975 */
976 
978  /* The first four fields must be compatible with malloc_chunk */
979  size_t prev_foot;
980  size_t head;
983 
987 };
988 
989 typedef struct malloc_tree_chunk tchunk;
990 typedef struct malloc_tree_chunk* tchunkptr;
991 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
992 
993 /* A little helper macro for trees */
994 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
995 
996 /* ----------------------------- Segments -------------------------------- */
997 
998 /*
999  Each malloc space may include non-contiguous segments, held in a
1000  list headed by an embedded malloc_segment record representing the
1001  top-most space. Segments also include flags holding properties of
1002  the space. Large chunks that are directly allocated by mmap are not
1003  included in this list. They are instead independently created and
1004  destroyed without otherwise keeping track of them.
1005 
1006  Segment management mainly comes into play for spaces allocated by
1007  MMAP. Any call to MMAP might or might not return memory that is
1008  adjacent to an existing segment. MORECORE normally contiguously
1009  extends the current space, so this space is almost always adjacent,
1010  which is simpler and faster to deal with. (This is why MORECORE is
1011  used preferentially to MMAP when both are available -- see
1012  sys_alloc.) When allocating using MMAP, we don't use any of the
1013  hinting mechanisms (inconsistently) supported in various
1014  implementations of unix mmap, or distinguish reserving from
1015  committing memory. Instead, we just ask for space, and exploit
1016  contiguity when we get it. It is probably possible to do
1017  better than this on some systems, but no general scheme seems
1018  to be significantly better.
1019 
1020  Management entails a simpler variant of the consolidation scheme
1021  used for chunks to reduce fragmentation -- new adjacent memory is
1022  normally prepended or appended to an existing segment. However,
1023  there are limitations compared to chunk consolidation that mostly
1024  reflect the fact that segment processing is relatively infrequent
1025  (occurring only when getting memory from system) and that we
1026  don't expect to have huge numbers of segments:
1027 
1028  * Segments are not indexed, so traversal requires linear scans. (It
1029  would be possible to index these, but is not worth the extra
1030  overhead and complexity for most programs on most platforms.)
1031  * New segments are only appended to old ones when holding top-most
1032  memory; if they cannot be prepended to others, they are held in
1033  different segments.
1034 
1035  Except for the top-most segment of an mstate, each segment record
1036  is kept at the tail of its segment. Segments are added by pushing
1037  segment records onto the list headed by &mstate.seg for the
1038  containing mstate.
1039 
1040  Segment flags control allocation/merge/deallocation policies:
1041  * If EXTERN_BIT set, then we did not allocate this segment,
1042  and so should not try to deallocate or merge with others.
1043  (This currently holds only for the initial segment passed
1044  into create_mspace_with_base.)
1045  * If USE_MMAP_BIT set, the segment may be merged with
1046  other surrounding mmapped segments and trimmed/de-allocated
1047  using munmap.
1048  * If neither bit is set, then the segment was obtained using
1049  MORECORE so can be merged with surrounding MORECORE'd segments
1050  and deallocated/trimmed using MORECORE with negative arguments.
1051 */
1052 
1054  char* base; /* base address */
1055  size_t size; /* allocated size */
1056  struct malloc_segment* next; /* ptr to next segment */
1057  flag_t sflags; /* mmap and extern flag */
1058 };
1059 
1060 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1061 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1062 
1063 typedef struct malloc_segment msegment;
1064 typedef struct malloc_segment* msegmentptr;
1065 
1066 /* ---------------------------- malloc_state ----------------------------- */
1067 
1068 /*
1069  A malloc_state holds all of the bookkeeping for a space.
1070  The main fields are:
1071 
1072  Top
1073  The topmost chunk of the currently active segment. Its size is
1074  cached in topsize. The actual size of topmost space is
1075  topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1076  fenceposts and segment records if necessary when getting more
1077  space from the system. The size at which to autotrim top is
1078  cached from mparams in trim_check, except that it is disabled if
1079  an autotrim fails.
1080 
1081  Designated victim (dv)
1082  This is the preferred chunk for servicing small requests that
1083  don't have exact fits. It is normally the chunk split off most
1084  recently to service another small request. Its size is cached in
1085  dvsize. The link fields of this chunk are not maintained since it
1086  is not kept in a bin.
1087 
1088  SmallBins
1089  An array of bin headers for free chunks. These bins hold chunks
1090  with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1091  chunks of all the same size, spaced 8 bytes apart. To simplify
1092  use in double-linked lists, each bin header acts as a malloc_chunk
1093  pointing to the real first node, if it exists (else pointing to
1094  itself). This avoids special-casing for headers. But to avoid
1095  waste, we allocate only the fd/bk pointers of bins, and then use
1096  repositioning tricks to treat these as the fields of a chunk.
1097 
1098  TreeBins
1099  Treebins are pointers to the roots of trees holding a range of
1100  sizes. There are 2 equally spaced treebins for each power of two
1101  from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1102  larger.
1103 
1104  Bin maps
1105  There is one bit map for small bins ("smallmap") and one for
1106  treebins ("treemap). Each bin sets its bit when non-empty, and
1107  clears the bit when empty. Bit operations are then used to avoid
1108  bin-by-bin searching -- nearly all "search" is done without ever
1109  looking at bins that won't be selected. The bit maps
1110  conservatively use 32 bits per map word, even if on 64bit system.
1111  For a good description of some of the bit-based techniques used
1112  here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1113  supplement at http://hackersdelight.org/). Many of these are
1114  intended to reduce the branchiness of paths through malloc etc, as
1115  well as to reduce the number of memory locations read or written.
1116 
1117  Segments
1118  A list of segments headed by an embedded malloc_segment record
1119  representing the initial space.
1120 
1121  Address check support
1122  The least_addr field is the least address ever obtained from
1123  MORECORE or MMAP. Attempted frees and reallocs of any address less
1124  than this are trapped (unless INSECURE is defined).
1125 
1126  Magic tag
1127  A cross-check field that should always hold same value as mparams.magic.
1128 
1129  Max allowed footprint
1130  The maximum allowed bytes to allocate from system (zero means no limit)
1131 
1132  Flags
1133  Bits recording whether to use MMAP, locks, or contiguous MORECORE
1134 
1135  Statistics
1136  Each space keeps track of current and maximum system memory
1137  obtained via MORECORE or MMAP.
1138 
1139  Trim support
1140  Fields holding the amount of unused topmost memory that should trigger
1141  trimming, and a counter to force periodic scanning to release unused
1142  non-topmost segments.
1143 
1144  Locking
1145  If USE_LOCKS is defined, the "mutex" lock is acquired and released
1146  around every public call using this mspace.
1147 
1148  Extension support
1149  A void* pointer and a size_t field that can be used to help implement
1150  extensions to this malloc.
1151 */
1152 
1153 /* Bin types, widths and sizes */
1154 #define NSMALLBINS (32U)
1155 #define NTREEBINS (32U)
1156 #define SMALLBIN_SHIFT (3U)
1157 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1158 #define TREEBIN_SHIFT (8U)
1159 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1160 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1161 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1162 
1166  size_t dvsize;
1167  size_t topsize;
1168  char* least_addr;
1169  mchunkptr dv;
1170  mchunkptr top;
1171  size_t trim_check;
1173  size_t magic;
1174  mchunkptr smallbins[(NSMALLBINS+1)*2];
1175  tbinptr treebins[NTREEBINS];
1176  size_t footprint;
1178  size_t footprint_limit; /* zero means no limit */
1180 #if USE_LOCKS
1181  MLOCK_T mutex; /* locate lock among fields that rarely change */
1182 #endif /* USE_LOCKS */
1184  void* extp; /* Unused but available for extensions */
1185  size_t exts;
1186 };
1187 
1188 typedef struct malloc_state* mstate;
1189 
1190 /* ------------- Global malloc_state and malloc_params ------------------- */
1191 
1192 /*
1193  malloc_params holds global properties, including those that can be
1194  dynamically set using mallopt. There is a single instance, mparams,
1195  initialized in init_mparams. Note that the non-zeroness of "magic"
1196  also serves as an initialization flag.
1197 */
1198 
1200  size_t magic;
1201  size_t page_size;
1202  size_t granularity;
1206 };
1207 
1208 static struct malloc_params mparams;
1209 
1210 /* Ensure mparams initialized */
1211 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1212 
1213 #if !ONLY_MSPACES
1214 
1215 /* The global malloc_state used for all non-"mspace" calls */
1216 static struct malloc_state _gm_;
1217 #define gm (&_gm_)
1218 #define is_global(M) ((M) == &_gm_)
1219 
1220 #endif /* !ONLY_MSPACES */
1221 
1222 #define is_initialized(M) ((M)->top != 0)
1223 
1224 /* -------------------------- system alloc setup ------------------------- */
1225 
1226 /* Operations on mflags */
1227 
1228 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1229 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1230 #if USE_LOCKS
1231 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1232 #else
1233 #define disable_lock(M)
1234 #endif
1235 
1236 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1237 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1238 #if HAVE_MMAP
1239 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1240 #else
1241 #define disable_mmap(M)
1242 #endif
1243 
1244 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1245 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1246 #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
1247 #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
1248 #define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
1249 #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
1250 #define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT)
1251 
1252 #define set_lock(M,L)\
1253  ((M)->mflags = (L)?\
1254  ((M)->mflags | USE_LOCK_BIT) :\
1255  ((M)->mflags & ~USE_LOCK_BIT))
1256 
1257 /* page-align a size */
1258 #define page_align(S)\
1259  (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1260 
1261 /* granularity-align a size */
1262 #define granularity_align(S)\
1263  (((S) + (mparams.granularity - SIZE_T_ONE))\
1264  & ~(mparams.granularity - SIZE_T_ONE))
1265 
1266 
1267 /* For mmap, use granularity alignment on windows, else page-align */
1268 #ifdef WIN32
1269 #define mmap_align(S) granularity_align(S)
1270 #else
1271 #define mmap_align(S) page_align(S)
1272 #endif
1273 
1274 /* For sys_alloc, enough padding to ensure can malloc request on success */
1275 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1276 
1277 #define is_page_aligned(S)\
1278  (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1279 #define is_granularity_aligned(S)\
1280  (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1281 
1282 /* True if segment S holds address A */
1283 #define segment_holds(S, A)\
1284  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1285 
1286 /* Return segment holding given address */
1287 static msegmentptr segment_holding(mstate m, char* addr) {
1288  msegmentptr sp = &m->seg;
1289  for (;;) {
1290  if (addr >= sp->base && addr < sp->base + sp->size)
1291  return sp;
1292  if ((sp = sp->next) == 0)
1293  return 0;
1294  }
1295 }
1296 
1297 /* Return true if segment contains a segment link */
1298 static int has_segment_link(mstate m, msegmentptr ss) {
1299  msegmentptr sp = &m->seg;
1300  for (;;) {
1301  if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1302  return 1;
1303  if ((sp = sp->next) == 0)
1304  return 0;
1305  }
1306 }
1307 
1308 #ifndef MORECORE_CANNOT_TRIM
1309 #define should_trim(M,s) ((s) > (M)->trim_check)
1310 #else /* MORECORE_CANNOT_TRIM */
1311 #define should_trim(M,s) (0)
1312 #endif /* MORECORE_CANNOT_TRIM */
1313 
1314 /*
1315  TOP_FOOT_SIZE is padding at the end of a segment, including space
1316  that may be needed to place segment records and fenceposts when new
1317  noncontiguous segments are added.
1318 */
1319 #define TOP_FOOT_SIZE\
1320  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1321 
1322 
1323 /* ------------------------------- Hooks -------------------------------- */
1324 
1325 /*
1326  PREACTION should be defined to return 0 on success, and nonzero on
1327  failure. If you are not using locking, you can redefine these to do
1328  anything you like.
1329 */
1330 
1331 #if USE_LOCKS
1332 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1333 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1334 #else /* USE_LOCKS */
1335 
1336 #ifndef PREACTION
1337 #define PREACTION(M) (0)
1338 #endif /* PREACTION */
1339 
1340 #ifndef POSTACTION
1341 #define POSTACTION(M)
1342 #endif /* POSTACTION */
1343 
1344 #endif /* USE_LOCKS */
1345 
1346 /*
1347  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
1348  USAGE_ERROR_ACTION is triggered on detected bad frees and
1349  reallocs. The argument p is an address that might have triggered the
1350  fault. It is ignored by the two predefined actions, but might be
1351  useful in custom actions that try to help diagnose errors.
1352 */
1353 
1354 #if PROCEED_ON_ERROR
1355 
1356 /* A count of the number of corruption errors causing resets */
1357 int malloc_corruption_error_count;
1358 
1359 /* default corruption action */
1360 static void reset_on_error(mstate m);
1361 
1362 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1363 #define USAGE_ERROR_ACTION(m, p)
1364 
1365 #else /* PROCEED_ON_ERROR */
1366 
1367 #ifndef CORRUPTION_ERROR_ACTION
1368 #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
1369 #endif /* CORRUPTION_ERROR_ACTION */
1370 
1371 #ifndef USAGE_ERROR_ACTION
1372 #define USAGE_ERROR_ACTION(m,p) DLM_ABORT
1373 #endif /* USAGE_ERROR_ACTION */
1374 
1375 #endif /* PROCEED_ON_ERROR */
1376 
1377 
1378 /* -------------------------- Debugging setup ---------------------------- */
1379 
1380 #if ! DEBUG
1381 
1382 #define check_free_chunk(M,P)
1383 #define check_inuse_chunk(M,P)
1384 #define check_malloced_chunk(M,P,N)
1385 #define check_mmapped_chunk(M,P)
1386 #define check_malloc_state(M)
1387 #define check_top_chunk(M,P)
1388 
1389 #else /* DEBUG */
1390 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
1391 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1392 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
1393 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1394 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1395 #define check_malloc_state(M) do_check_malloc_state(M)
1396 
1397 static void do_check_any_chunk(mstate m, mchunkptr p);
1398 static void do_check_top_chunk(mstate m, mchunkptr p);
1399 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1400 static void do_check_inuse_chunk(mstate m, mchunkptr p);
1401 static void do_check_free_chunk(mstate m, mchunkptr p);
1402 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
1403 static void do_check_tree(mstate m, tchunkptr t);
1404 static void do_check_treebin(mstate m, bindex_t i);
1405 static void do_check_smallbin(mstate m, bindex_t i);
1406 static void do_check_malloc_state(mstate m);
1407 static int bin_find(mstate m, mchunkptr x);
1408 static size_t traverse_and_check(mstate m);
1409 #endif /* DEBUG */
1410 
1411 /* ---------------------------- Indexing Bins ---------------------------- */
1412 
1413 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1414 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1415 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1416 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1417 
1418 /* addressing by index. See above about smallbin repositioning */
1419 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1420 #define treebin_at(M,i) (&((M)->treebins[i]))
1421 
1422 /* assign tree index for size S to variable I. Use x86 asm if possible */
1423 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1424 #define compute_tree_index(S, I)\
1425 {\
1426  unsigned int X = S >> TREEBIN_SHIFT;\
1427  if (X == 0)\
1428  I = 0;\
1429  else if (X > 0xFFFF)\
1430  I = NTREEBINS-1;\
1431  else {\
1432  unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1433  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1434  }\
1435 }
1436 
1437 #elif defined (__INTEL_COMPILER)
1438 #define compute_tree_index(S, I)\
1439 {\
1440  size_t X = S >> TREEBIN_SHIFT;\
1441  if (X == 0)\
1442  I = 0;\
1443  else if (X > 0xFFFF)\
1444  I = NTREEBINS-1;\
1445  else {\
1446  unsigned int K = _bit_scan_reverse (X); \
1447  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1448  }\
1449 }
1450 
1451 #elif defined(_MSC_VER) && _MSC_VER>=1300
1452 #define compute_tree_index(S, I)\
1453 {\
1454  size_t X = S >> TREEBIN_SHIFT;\
1455  if (X == 0)\
1456  I = 0;\
1457  else if (X > 0xFFFF)\
1458  I = NTREEBINS-1;\
1459  else {\
1460  unsigned int K;\
1461  _BitScanReverse((DWORD *) &K, (DWORD) X);\
1462  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1463  }\
1464 }
1465 
1466 #else /* GNUC */
1467 #define compute_tree_index(S, I)\
1468 {\
1469  size_t X = S >> TREEBIN_SHIFT;\
1470  if (X == 0)\
1471  I = 0;\
1472  else if (X > 0xFFFF)\
1473  I = NTREEBINS-1;\
1474  else {\
1475  unsigned int Y = (unsigned int)X;\
1476  unsigned int N = ((Y - 0x100) >> 16) & 8;\
1477  unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1478  N += K;\
1479  N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1480  K = 14 - N + ((Y <<= K) >> 15);\
1481  I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1482  }\
1483 }
1484 #endif /* GNUC */
1485 
1486 /* Bit representing maximum resolved size in a treebin at i */
1487 #define bit_for_tree_index(i) \
1488  (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1489 
1490 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
1491 #define leftshift_for_tree_index(i) \
1492  ((i == NTREEBINS-1)? 0 : \
1493  ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1494 
1495 /* The size of the smallest chunk held in bin with index i */
1496 #define minsize_for_tree_index(i) \
1497  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1498  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1499 
1500 
1501 /* ------------------------ Operations on bin maps ----------------------- */
1502 
1503 /* bit corresponding to given index */
1504 #define idx2bit(i) ((binmap_t)(1) << (i))
1505 
1506 /* Mark/Clear bits with given index */
1507 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1508 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1509 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1510 
1511 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1512 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1513 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1514 
1515 /* isolate the least set bit of a bitmap */
1516 #define least_bit(x) ((x) & -(x))
1517 
1518 /* mask with all bits to left of least bit of x on */
1519 #define left_bits(x) ((x<<1) | -(x<<1))
1520 
1521 /* mask with all bits to left of or equal to least bit of x on */
1522 #define same_or_left_bits(x) ((x) | -(x))
1523 
1524 /* index corresponding to given bit. Use x86 asm if possible */
1525 
1526 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1527 #define compute_bit2idx(X, I)\
1528 {\
1529  unsigned int J;\
1530  J = __builtin_ctz(X); \
1531  I = (bindex_t)J;\
1532 }
1533 
1534 #elif defined (__INTEL_COMPILER)
1535 #define compute_bit2idx(X, I)\
1536 {\
1537  unsigned int J;\
1538  J = _bit_scan_forward (X); \
1539  I = (bindex_t)J;\
1540 }
1541 
1542 #elif defined(_MSC_VER) && _MSC_VER>=1300
1543 #define compute_bit2idx(X, I)\
1544 {\
1545  unsigned int J;\
1546  _BitScanForward((DWORD *) &J, X);\
1547  I = (bindex_t)J;\
1548 }
1549 
1550 #elif USE_BUILTIN_FFS
1551 #define compute_bit2idx(X, I) I = ffs(X)-1
1552 
1553 #else
1554 #define compute_bit2idx(X, I)\
1555 {\
1556  unsigned int Y = X - 1;\
1557  unsigned int K = Y >> (16-4) & 16;\
1558  unsigned int N = K; Y >>= K;\
1559  N += K = Y >> (8-3) & 8; Y >>= K;\
1560  N += K = Y >> (4-2) & 4; Y >>= K;\
1561  N += K = Y >> (2-1) & 2; Y >>= K;\
1562  N += K = Y >> (1-0) & 1; Y >>= K;\
1563  I = (bindex_t)(N + Y);\
1564 }
1565 #endif /* GNUC */
1566 
1567 
1568 /* ----------------------- Runtime Check Support ------------------------- */
1569 
1570 /*
1571  For security, the main invariant is that malloc/free/etc never
1572  writes to a static address other than malloc_state, unless static
1573  malloc_state itself has been corrupted, which cannot occur via
1574  malloc (because of these checks). In essence this means that we
1575  believe all pointers, sizes, maps etc held in malloc_state, but
1576  check all of those linked or offsetted from other embedded data
1577  structures. These checks are interspersed with main code in a way
1578  that tends to minimize their run-time cost.
1579 
1580  When FOOTERS is defined, in addition to range checking, we also
1581  verify footer fields of inuse chunks, which can be used guarantee
1582  that the mstate controlling malloc/free is intact. This is a
1583  streamlined version of the approach described by William Robertson
1584  et al in "Run-time Detection of Heap-based Overflows" LISA'03
1585  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1586  of an inuse chunk holds the xor of its mstate and a random seed,
1587  that is checked upon calls to free() and realloc(). This is
1588  (probabilistically) unguessable from outside the program, but can be
1589  computed by any code successfully malloc'ing any chunk, so does not
1590  itself provide protection against code that has already broken
1591  security through some other means. Unlike Robertson et al, we
1592  always dynamically check addresses of all offset chunks (previous,
1593  next, etc). This turns out to be cheaper than relying on hashes.
1594 */
1595 
1596 #if !INSECURE
1597 /* Check if address a is at least as high as any from MORECORE or MMAP */
1598 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1599 /* Check if address of next chunk n is higher than base chunk p */
1600 #define ok_next(p, n) ((char*)(p) < (char*)(n))
1601 /* Check if p has inuse status */
1602 #define ok_inuse(p) is_inuse(p)
1603 /* Check if p has its pinuse bit on */
1604 #define ok_pinuse(p) pinuse(p)
1605 
1606 #else /* !INSECURE */
1607 #define ok_address(M, a) (1)
1608 #define ok_next(b, n) (1)
1609 #define ok_inuse(p) (1)
1610 #define ok_pinuse(p) (1)
1611 #endif /* !INSECURE */
1612 
1613 #if (FOOTERS && !INSECURE)
1614 /* Check if (alleged) mstate m has expected magic field */
1615 static inline int
1616 ok_magic (const mstate m)
1617 {
1618  return (m->magic == mparams.magic);
1619 }
1620 #else /* (FOOTERS && !INSECURE) */
1621 #define ok_magic(M) (1)
1622 #endif /* (FOOTERS && !INSECURE) */
1623 
1624 /* In gcc, use __builtin_expect to minimize impact of checks */
1625 #if !INSECURE
1626 #if defined(__GNUC__) && __GNUC__ >= 3
1627 #define RTCHECK(e) __builtin_expect(e, 1)
1628 #else /* GNUC */
1629 #define RTCHECK(e) (e)
1630 #endif /* GNUC */
1631 #else /* !INSECURE */
1632 #define RTCHECK(e) (1)
1633 #endif /* !INSECURE */
1634 
1635 /* macros to set up inuse chunks with or without footers */
1636 
1637 #if !FOOTERS
1638 
1639 #define mark_inuse_foot(M,p,s)
1640 
1641 /* Macros for setting head/foot of non-mmapped chunks */
1642 
1643 /* Set cinuse bit and pinuse bit of next chunk */
1644 #define set_inuse(M,p,s)\
1645  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1646  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1647 
1648 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1649 #define set_inuse_and_pinuse(M,p,s)\
1650  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1651  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1652 
1653 /* Set size, cinuse and pinuse bit of this chunk */
1654 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1655  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1656 
1657 #else /* FOOTERS */
1658 
1659 /* Set foot of inuse chunk to be xor of mstate and seed */
1660 #define mark_inuse_foot(M,p,s)\
1661  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1662 
1663 #define get_mstate_for(p)\
1664  ((mstate)(((mchunkptr)((char*)(p) +\
1665  (chunksize(p))))->prev_foot ^ mparams.magic))
1666 
1667 #define set_inuse(M,p,s)\
1668  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1669  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1670  mark_inuse_foot(M,p,s))
1671 
1672 #define set_inuse_and_pinuse(M,p,s)\
1673  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1674  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1675  mark_inuse_foot(M,p,s))
1676 
1677 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1678  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1679  mark_inuse_foot(M, p, s))
1680 
1681 #endif /* !FOOTERS */
1682 
1683 /* ---------------------------- setting mparams -------------------------- */
1684 
1685 #if LOCK_AT_FORK
1686 static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
1687 static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1688 static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
1689 #endif /* LOCK_AT_FORK */
1690 
1691 /* Initialize mparams */
1692 static int init_mparams(void) {
1693 #ifdef NEED_GLOBAL_LOCK_INIT
1694  if (malloc_global_mutex_status <= 0)
1695  init_malloc_global_mutex();
1696 #endif
1697 
1699  if (mparams.magic == 0) {
1700  size_t magic;
1701  size_t psize;
1702  size_t gsize;
1703 
1704 #ifndef WIN32
1705  psize = malloc_getpagesize;
1706  gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1707 #else /* WIN32 */
1708  {
1709  SYSTEM_INFO system_info;
1710  GetSystemInfo(&system_info);
1711  psize = system_info.dwPageSize;
1712  gsize = ((DEFAULT_GRANULARITY != 0)?
1713  DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1714  }
1715 #endif /* WIN32 */
1716 
1717  /* Sanity-check configuration:
1718  size_t must be unsigned and as wide as pointer type.
1719  ints must be at least 4 bytes.
1720  alignment must be at least 8.
1721  Alignment, min chunk size, and page size must all be powers of 2.
1722  */
1723  if ((sizeof(size_t) != sizeof(char*)) ||
1724  (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1725  (sizeof(int) < 4) ||
1726  (MALLOC_ALIGNMENT < (size_t)8U) ||
1728  ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1729  ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1730  ((psize & (psize-SIZE_T_ONE)) != 0))
1731  DLM_ABORT;
1732  mparams.granularity = gsize;
1733  mparams.page_size = psize;
1736 #if MORECORE_CONTIGUOUS
1738 #else /* MORECORE_CONTIGUOUS */
1740 #endif /* MORECORE_CONTIGUOUS */
1741 
1742 #if !ONLY_MSPACES
1743  /* Set up lock for main malloc area */
1744  gm->mflags = mparams.default_mflags;
1745  (void)INITIAL_LOCK(&gm->mutex);
1746 #endif
1747 #if LOCK_AT_FORK
1748  pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1749 #endif
1750 
1751  {
1752 #ifndef DLM_MAGIC_CONSTANT
1753 #if USE_DEV_RANDOM
1754  int fd;
1755  unsigned char buf[sizeof(size_t)];
1756  /* Try to use /dev/urandom, else fall back on using time */
1757  if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1758  read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1759  magic = *((size_t *) buf);
1760  close(fd);
1761  }
1762  else
1763 #endif /* USE_DEV_RANDOM */
1764 #ifdef WIN32
1765  magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1766 #elif defined(LACKS_TIME_H)
1767  magic = (size_t)&magic ^ (size_t)0x55555555U;
1768 #else
1769  magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1770 #endif
1771  magic |= (size_t)8U; /* ensure nonzero */
1772  magic &= ~(size_t)7U; /* improve chances of fault for bad values */
1773 #else
1774  magic = DLM_MAGIC_CONSTANT;
1775 #endif
1776  /* Until memory modes commonly available, use volatile-write */
1777  (*(volatile size_t *)(&(mparams.magic))) = magic;
1778  }
1779  }
1780 
1782  return 1;
1783 }
1784 
1785 /* support for mallopt */
1786 static int change_mparam(int param_number, int value) {
1787  size_t val;
1789  val = (value == -1)? MAX_SIZE_T : (size_t)value;
1790  switch(param_number) {
1791  case M_TRIM_THRESHOLD:
1792  mparams.trim_threshold = val;
1793  return 1;
1794  case M_GRANULARITY:
1795  if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1796  mparams.granularity = val;
1797  return 1;
1798  }
1799  else
1800  return 0;
1801  case M_MMAP_THRESHOLD:
1802  mparams.mmap_threshold = val;
1803  return 1;
1804  default:
1805  return 0;
1806  }
1807 }
1808 
1809 #if DEBUG
1810 /* ------------------------- Debugging Support --------------------------- */
1811 
1812 /* Check properties of any chunk, whether free, inuse, mmapped etc */
1813 static void do_check_any_chunk(mstate m, mchunkptr p) {
1814  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1815  assert(ok_address(m, p));
1816 }
1817 
1818 /* Check properties of top chunk */
1819 static void do_check_top_chunk(mstate m, mchunkptr p) {
1820  msegmentptr sp = segment_holding(m, (char*)p);
1821  size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1822  assert(sp != 0);
1823  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1824  assert(ok_address(m, p));
1825  assert(sz == m->topsize);
1826  assert(sz > 0);
1827  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1828  assert(pinuse(p));
1829  assert(!pinuse(chunk_plus_offset(p, sz)));
1830 }
1831 
1832 /* Check properties of (inuse) mmapped chunks */
1833 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1834  size_t sz = chunksize(p);
1835  size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1836  assert(is_mmapped(p));
1837  assert(use_mmap(m));
1838  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1839  assert(ok_address(m, p));
1840  assert(!is_small(sz));
1841  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1842  assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1843  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1844 }
1845 
1846 /* Check properties of inuse chunks */
1847 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1848  do_check_any_chunk(m, p);
1849  assert(is_inuse(p));
1850  assert(next_pinuse(p));
1851  /* If not pinuse and not mmapped, previous chunk has OK offset */
1852  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1853  if (is_mmapped(p))
1854  do_check_mmapped_chunk(m, p);
1855 }
1856 
1857 /* Check properties of free chunks */
1858 static void do_check_free_chunk(mstate m, mchunkptr p) {
1859  size_t sz = chunksize(p);
1860  mchunkptr next = chunk_plus_offset(p, sz);
1861  do_check_any_chunk(m, p);
1862  assert(!is_inuse(p));
1863  assert(!next_pinuse(p));
1864  assert (!is_mmapped(p));
1865  if (p != m->dv && p != m->top) {
1866  if (sz >= MIN_CHUNK_SIZE) {
1867  assert((sz & CHUNK_ALIGN_MASK) == 0);
1869  assert(next->prev_foot == sz);
1870  assert(pinuse(p));
1871  assert (next == m->top || is_inuse(next));
1872  assert(p->fd->bk == p);
1873  assert(p->bk->fd == p);
1874  }
1875  else /* markers are always of size SIZE_T_SIZE */
1876  assert(sz == SIZE_T_SIZE);
1877  }
1878 }
1879 
1880 /* Check properties of malloced chunks at the point they are malloced */
1881 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1882  if (mem != 0) {
1883  mchunkptr p = mem2chunk(mem);
1884  size_t sz = p->head & ~INUSE_BITS;
1885  do_check_inuse_chunk(m, p);
1886  assert((sz & CHUNK_ALIGN_MASK) == 0);
1887  assert(sz >= MIN_CHUNK_SIZE);
1888  assert(sz >= s);
1889  /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1890  assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1891  }
1892 }
1893 
1894 /* Check a tree and its subtrees. */
1895 static void do_check_tree(mstate m, tchunkptr t) {
1896  tchunkptr head = 0;
1897  tchunkptr u = t;
1898  bindex_t tindex = t->index;
1899  size_t tsize = chunksize(t);
1900  bindex_t idx;
1901  compute_tree_index(tsize, idx);
1902  assert(tindex == idx);
1903  assert(tsize >= MIN_LARGE_SIZE);
1904  assert(tsize >= minsize_for_tree_index(idx));
1905  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1906 
1907  do { /* traverse through chain of same-sized nodes */
1908  do_check_any_chunk(m, ((mchunkptr)u));
1909  assert(u->index == tindex);
1910  assert(chunksize(u) == tsize);
1911  assert(!is_inuse(u));
1912  assert(!next_pinuse(u));
1913  assert(u->fd->bk == u);
1914  assert(u->bk->fd == u);
1915  if (u->parent == 0) {
1916  assert(u->child[0] == 0);
1917  assert(u->child[1] == 0);
1918  }
1919  else {
1920  assert(head == 0); /* only one node on chain has parent */
1921  head = u;
1922  assert(u->parent != u);
1923  assert (u->parent->child[0] == u ||
1924  u->parent->child[1] == u ||
1925  *((tbinptr*)(u->parent)) == u);
1926  if (u->child[0] != 0) {
1927  assert(u->child[0]->parent == u);
1928  assert(u->child[0] != u);
1929  do_check_tree(m, u->child[0]);
1930  }
1931  if (u->child[1] != 0) {
1932  assert(u->child[1]->parent == u);
1933  assert(u->child[1] != u);
1934  do_check_tree(m, u->child[1]);
1935  }
1936  if (u->child[0] != 0 && u->child[1] != 0) {
1937  assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1938  }
1939  }
1940  u = u->fd;
1941  } while (u != t);
1942  assert(head != 0);
1943 }
1944 
1945 /* Check all the chunks in a treebin. */
1946 static void do_check_treebin(mstate m, bindex_t i) {
1947  tbinptr* tb = treebin_at(m, i);
1948  tchunkptr t = *tb;
1949  int empty = (m->treemap & (1U << i)) == 0;
1950  if (t == 0)
1951  assert(empty);
1952  if (!empty)
1953  do_check_tree(m, t);
1954 }
1955 
1956 /* Check all the chunks in a smallbin. */
1957 static void do_check_smallbin(mstate m, bindex_t i) {
1958  sbinptr b = smallbin_at(m, i);
1959  mchunkptr p = b->bk;
1960  unsigned int empty = (m->smallmap & (1U << i)) == 0;
1961  if (p == b)
1962  assert(empty);
1963  if (!empty) {
1964  for (; p != b; p = p->bk) {
1965  size_t size = chunksize(p);
1966  mchunkptr q;
1967  /* each chunk claims to be free */
1968  do_check_free_chunk(m, p);
1969  /* chunk belongs in bin */
1970  assert(small_index(size) == i);
1971  assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1972  /* chunk is followed by an inuse chunk */
1973  q = next_chunk(p);
1974  if (q->head != FENCEPOST_HEAD)
1975  do_check_inuse_chunk(m, q);
1976  }
1977  }
1978 }
1979 
1980 /* Find x in a bin. Used in other check functions. */
1981 static int bin_find(mstate m, mchunkptr x) {
1982  size_t size = chunksize(x);
1983  if (is_small(size)) {
1984  bindex_t sidx = small_index(size);
1985  sbinptr b = smallbin_at(m, sidx);
1986  if (smallmap_is_marked(m, sidx)) {
1987  mchunkptr p = b;
1988  do {
1989  if (p == x)
1990  return 1;
1991  } while ((p = p->fd) != b);
1992  }
1993  }
1994  else {
1995  bindex_t tidx;
1996  compute_tree_index(size, tidx);
1997  if (treemap_is_marked(m, tidx)) {
1998  tchunkptr t = *treebin_at(m, tidx);
1999  size_t sizebits = size << leftshift_for_tree_index(tidx);
2000  while (t != 0 && chunksize(t) != size) {
2001  t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2002  sizebits <<= 1;
2003  }
2004  if (t != 0) {
2005  tchunkptr u = t;
2006  do {
2007  if (u == (tchunkptr)x)
2008  return 1;
2009  } while ((u = u->fd) != t);
2010  }
2011  }
2012  }
2013  return 0;
2014 }
2015 
2016 /* Traverse each chunk and check it; return total */
2017 static size_t traverse_and_check(mstate m) {
2018  size_t sum = 0;
2019  if (is_initialized(m)) {
2020  msegmentptr s = &m->seg;
2021  sum += m->topsize + TOP_FOOT_SIZE;
2022  while (s != 0) {
2023  mchunkptr q = align_as_chunk(s->base);
2024  mchunkptr lastq = 0;
2025  assert(pinuse(q));
2026  while (segment_holds(s, q) &&
2027  q != m->top && q->head != FENCEPOST_HEAD) {
2028  sum += chunksize(q);
2029  if (is_inuse(q)) {
2030  assert(!bin_find(m, q));
2031  do_check_inuse_chunk(m, q);
2032  }
2033  else {
2034  assert(q == m->dv || bin_find(m, q));
2035  assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
2036  do_check_free_chunk(m, q);
2037  }
2038  lastq = q;
2039  q = next_chunk(q);
2040  }
2041  s = s->next;
2042  }
2043  }
2044  return sum;
2045 }
2046 
2047 
2048 /* Check all properties of malloc_state. */
2049 static void do_check_malloc_state(mstate m) {
2050  bindex_t i;
2051  size_t total;
2052  /* check bins */
2053  for (i = 0; i < NSMALLBINS; ++i)
2054  do_check_smallbin(m, i);
2055  for (i = 0; i < NTREEBINS; ++i)
2056  do_check_treebin(m, i);
2057 
2058  if (m->dvsize != 0) { /* check dv chunk */
2059  do_check_any_chunk(m, m->dv);
2060  assert(m->dvsize == chunksize(m->dv));
2061  assert(m->dvsize >= MIN_CHUNK_SIZE);
2062  assert(bin_find(m, m->dv) == 0);
2063  }
2064 
2065  if (m->top != 0) { /* check top chunk */
2066  do_check_top_chunk(m, m->top);
2067  /*assert(m->topsize == chunksize(m->top)); redundant */
2068  assert(m->topsize > 0);
2069  assert(bin_find(m, m->top) == 0);
2070  }
2071 
2072  total = traverse_and_check(m);
2073  assert(total <= m->footprint);
2074  assert(m->footprint <= m->max_footprint);
2075 }
2076 #endif /* DEBUG */
2077 
2078 /* ----------------------------- statistics ------------------------------ */
2079 
2080 #if !NO_MALLINFO
2081 static struct dlmallinfo internal_mallinfo(mstate m) {
2082  struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2084  if (!PREACTION(m)) {
2085  check_malloc_state(m);
2086  if (is_initialized(m)) {
2087  size_t nfree = SIZE_T_ONE; /* top always free */
2088  size_t mfree = m->topsize + TOP_FOOT_SIZE;
2089  size_t sum = mfree;
2090  msegmentptr s = &m->seg;
2091  while (s != 0) {
2092  mchunkptr q = align_as_chunk(s->base);
2093  while (segment_holds(s, q) &&
2094  q != m->top && q->head != FENCEPOST_HEAD) {
2095  size_t sz = chunksize(q);
2096  sum += sz;
2097  if (!is_inuse(q)) {
2098  mfree += sz;
2099  ++nfree;
2100  }
2101  q = next_chunk(q);
2102  }
2103  s = s->next;
2104  }
2105 
2106  nm.arena = sum;
2107  nm.ordblks = nfree;
2108  nm.hblkhd = m->footprint - sum;
2109  nm.usmblks = m->max_footprint;
2110  nm.uordblks = m->footprint - mfree;
2111  nm.fordblks = mfree;
2112  nm.keepcost = m->topsize;
2113  }
2114 
2115  POSTACTION(m);
2116  }
2117  return nm;
2118 }
2119 #endif /* !NO_MALLINFO */
2120 
2121 #if !NO_MALLOC_STATS
2122 static void internal_malloc_stats(mstate m) {
2124  if (!PREACTION(m)) {
2125  size_t maxfp = 0;
2126  size_t fp = 0;
2127  size_t used = 0;
2128  check_malloc_state(m);
2129  if (is_initialized(m)) {
2130  msegmentptr s = &m->seg;
2131  maxfp = m->max_footprint;
2132  fp = m->footprint;
2133  used = fp - (m->topsize + TOP_FOOT_SIZE);
2134 
2135  while (s != 0) {
2136  mchunkptr q = align_as_chunk(s->base);
2137  while (segment_holds(s, q) &&
2138  q != m->top && q->head != FENCEPOST_HEAD) {
2139  if (!is_inuse(q))
2140  used -= chunksize(q);
2141  q = next_chunk(q);
2142  }
2143  s = s->next;
2144  }
2145  }
2146  POSTACTION(m); /* drop lock */
2147  fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
2148  fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
2149  fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
2150  }
2151 }
2152 #endif /* NO_MALLOC_STATS */
2153 
2154 /* ----------------------- Operations on smallbins ----------------------- */
2155 
2156 /*
2157  Various forms of linking and unlinking are defined as macros. Even
2158  the ones for trees, which are very long but have very short typical
2159  paths. This is ugly but reduces reliance on inlining support of
2160  compilers.
2161 */
2162 
2163 /* Link a free chunk into a smallbin */
2164 #define insert_small_chunk(M, P, S) {\
2165  bindex_t I = small_index(S);\
2166  mchunkptr B = smallbin_at(M, I);\
2167  mchunkptr F = B;\
2168  assert(S >= MIN_CHUNK_SIZE);\
2169  if (!smallmap_is_marked(M, I))\
2170  mark_smallmap(M, I);\
2171  else if (RTCHECK(ok_address(M, B->fd)))\
2172  F = B->fd;\
2173  else {\
2174  CORRUPTION_ERROR_ACTION(M);\
2175  }\
2176  B->fd = P;\
2177  F->bk = P;\
2178  P->fd = F;\
2179  P->bk = B;\
2180 }
2181 
2182 /* Unlink a chunk from a smallbin */
2183 #define unlink_small_chunk(M, P, S) {\
2184  mchunkptr F = P->fd;\
2185  mchunkptr B = P->bk;\
2186  bindex_t I = small_index(S);\
2187  assert(P != B);\
2188  assert(P != F);\
2189  assert(chunksize(P) == small_index2size(I));\
2190  if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2191  if (B == F) {\
2192  clear_smallmap(M, I);\
2193  }\
2194  else if (RTCHECK(B == smallbin_at(M,I) ||\
2195  (ok_address(M, B) && B->fd == P))) {\
2196  F->bk = B;\
2197  B->fd = F;\
2198  }\
2199  else {\
2200  CORRUPTION_ERROR_ACTION(M);\
2201  }\
2202  }\
2203  else {\
2204  CORRUPTION_ERROR_ACTION(M);\
2205  }\
2206 }
2207 
2208 /* Unlink the first chunk from a smallbin */
2209 #define unlink_first_small_chunk(M, B, P, I) {\
2210  mchunkptr F = P->fd;\
2211  assert(P != B);\
2212  assert(P != F);\
2213  assert(chunksize(P) == small_index2size(I));\
2214  if (B == F) {\
2215  clear_smallmap(M, I);\
2216  }\
2217  else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2218  F->bk = B;\
2219  B->fd = F;\
2220  }\
2221  else {\
2222  CORRUPTION_ERROR_ACTION(M);\
2223  }\
2224 }
2225 
2226 /* Replace dv node, binning the old one */
2227 /* Used only when dvsize known to be small */
2228 #define replace_dv(M, P, S) {\
2229  size_t DVS = M->dvsize;\
2230  assert(is_small(DVS));\
2231  if (DVS != 0) {\
2232  mchunkptr DV = M->dv;\
2233  insert_small_chunk(M, DV, DVS);\
2234  }\
2235  M->dvsize = S;\
2236  M->dv = P;\
2237 }
2238 
2239 /* ------------------------- Operations on trees ------------------------- */
2240 
2241 /* Insert chunk into tree */
2242 #define insert_large_chunk(M, X, S) {\
2243  tbinptr* H;\
2244  bindex_t I;\
2245  compute_tree_index(S, I);\
2246  H = treebin_at(M, I);\
2247  X->index = I;\
2248  X->child[0] = X->child[1] = 0;\
2249  if (!treemap_is_marked(M, I)) {\
2250  mark_treemap(M, I);\
2251  *H = X;\
2252  X->parent = (tchunkptr)H;\
2253  X->fd = X->bk = X;\
2254  }\
2255  else {\
2256  tchunkptr T = *H;\
2257  size_t K = S << leftshift_for_tree_index(I);\
2258  for (;;) {\
2259  if (chunksize(T) != S) {\
2260  tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2261  K <<= 1;\
2262  if (*C != 0)\
2263  T = *C;\
2264  else if (RTCHECK(ok_address(M, C))) {\
2265  *C = X;\
2266  X->parent = T;\
2267  X->fd = X->bk = X;\
2268  break;\
2269  }\
2270  else {\
2271  CORRUPTION_ERROR_ACTION(M);\
2272  break;\
2273  }\
2274  }\
2275  else {\
2276  tchunkptr F = T->fd;\
2277  if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2278  T->fd = F->bk = X;\
2279  X->fd = F;\
2280  X->bk = T;\
2281  X->parent = 0;\
2282  break;\
2283  }\
2284  else {\
2285  CORRUPTION_ERROR_ACTION(M);\
2286  break;\
2287  }\
2288  }\
2289  }\
2290  }\
2291 }
2292 
2293 /*
2294  Unlink steps:
2295 
2296  1. If x is a chained node, unlink it from its same-sized fd/bk links
2297  and choose its bk node as its replacement.
2298  2. If x was the last node of its size, but not a leaf node, it must
2299  be replaced with a leaf node (not merely one with an open left or
2300  right), to make sure that lefts and rights of descendents
2301  correspond properly to bit masks. We use the rightmost descendent
2302  of x. We could use any other leaf, but this is easy to locate and
2303  tends to counteract removal of leftmosts elsewhere, and so keeps
2304  paths shorter than minimally guaranteed. This doesn't loop much
2305  because on average a node in a tree is near the bottom.
2306  3. If x is the base of a chain (i.e., has parent links) relink
2307  x's parent and children to x's replacement (or null if none).
2308 */
2309 
2310 #define unlink_large_chunk(M, X) {\
2311  tchunkptr XP = X->parent;\
2312  tchunkptr R;\
2313  if (X->bk != X) {\
2314  tchunkptr F = X->fd;\
2315  R = X->bk;\
2316  if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2317  F->bk = R;\
2318  R->fd = F;\
2319  }\
2320  else {\
2321  CORRUPTION_ERROR_ACTION(M);\
2322  }\
2323  }\
2324  else {\
2325  tchunkptr* RP;\
2326  if (((R = *(RP = &(X->child[1]))) != 0) ||\
2327  ((R = *(RP = &(X->child[0]))) != 0)) {\
2328  tchunkptr* CP;\
2329  while ((*(CP = &(R->child[1])) != 0) ||\
2330  (*(CP = &(R->child[0])) != 0)) {\
2331  R = *(RP = CP);\
2332  }\
2333  if (RTCHECK(ok_address(M, RP)))\
2334  *RP = 0;\
2335  else {\
2336  CORRUPTION_ERROR_ACTION(M);\
2337  }\
2338  }\
2339  }\
2340  if (XP != 0) {\
2341  tbinptr* H = treebin_at(M, X->index);\
2342  if (X == *H) {\
2343  if ((*H = R) == 0) \
2344  clear_treemap(M, X->index);\
2345  }\
2346  else if (RTCHECK(ok_address(M, XP))) {\
2347  if (XP->child[0] == X) \
2348  XP->child[0] = R;\
2349  else \
2350  XP->child[1] = R;\
2351  }\
2352  else\
2353  CORRUPTION_ERROR_ACTION(M);\
2354  if (R != 0) {\
2355  if (RTCHECK(ok_address(M, R))) {\
2356  tchunkptr C0, C1;\
2357  R->parent = XP;\
2358  if ((C0 = X->child[0]) != 0) {\
2359  if (RTCHECK(ok_address(M, C0))) {\
2360  R->child[0] = C0;\
2361  C0->parent = R;\
2362  }\
2363  else\
2364  CORRUPTION_ERROR_ACTION(M);\
2365  }\
2366  if ((C1 = X->child[1]) != 0) {\
2367  if (RTCHECK(ok_address(M, C1))) {\
2368  R->child[1] = C1;\
2369  C1->parent = R;\
2370  }\
2371  else\
2372  CORRUPTION_ERROR_ACTION(M);\
2373  }\
2374  }\
2375  else\
2376  CORRUPTION_ERROR_ACTION(M);\
2377  }\
2378  }\
2379 }
2380 
2381 /* Relays to large vs small bin operations */
2382 
2383 #define insert_chunk(M, P, S)\
2384  if (is_small(S)) insert_small_chunk(M, P, S)\
2385  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2386 
2387 #define unlink_chunk(M, P, S)\
2388  if (is_small(S)) unlink_small_chunk(M, P, S)\
2389  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2390 
2391 
2392 /* Relays to internal calls to malloc/free from realloc, memalign etc */
2393 
2394 #if ONLY_MSPACES
2395 #define internal_malloc(m, b) mspace_malloc(m, b)
2396 #define internal_free(m, mem) mspace_free(m,mem);
2397 #else /* ONLY_MSPACES */
2398 #if MSPACES
2399 #define internal_malloc(m, b)\
2400  ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2401 #define internal_free(m, mem)\
2402  if (m == gm) dlfree(mem); else mspace_free(m,mem);
2403 #else /* MSPACES */
2404 #define internal_malloc(m, b) dlmalloc(b)
2405 #define internal_free(m, mem) dlfree(mem)
2406 #endif /* MSPACES */
2407 #endif /* ONLY_MSPACES */
2408 
2409 /* ----------------------- Direct-mmapping chunks ----------------------- */
2410 
2411 /*
2412  Directly mmapped chunks are set up with an offset to the start of
2413  the mmapped region stored in the prev_foot field of the chunk. This
2414  allows reconstruction of the required argument to MUNMAP when freed,
2415  and also allows adjustment of the returned chunk to meet alignment
2416  requirements (especially in memalign).
2417 */
2418 
2419 /* Malloc using mmap */
2420 static void* mmap_alloc(mstate m, size_t nb) {
2421  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2422  if (m->footprint_limit != 0) {
2423  size_t fp = m->footprint + mmsize;
2424  if (fp <= m->footprint || fp > m->footprint_limit)
2425  return 0;
2426  }
2427  if (mmsize > nb) { /* Check for wrap around 0 */
2428  char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2429  if (mm != CMFAIL) {
2430  size_t offset = align_offset(chunk2mem(mm));
2431  size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2432  mchunkptr p = (mchunkptr)(mm + offset);
2433  p->prev_foot = offset;
2434  p->head = psize;
2435  mark_inuse_foot(m, p, psize);
2436  chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2437  chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2438 
2439  if (m->least_addr == 0 || mm < m->least_addr)
2440  m->least_addr = mm;
2441  if ((m->footprint += mmsize) > m->max_footprint)
2442  m->max_footprint = m->footprint;
2444  check_mmapped_chunk(m, p);
2445  return chunk2mem(p);
2446  }
2447  }
2448  return 0;
2449 }
2450 
2451 /* Realloc using mmap */
2452 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2453  size_t oldsize = chunksize(oldp);
2454  (void)flags; /* placate people compiling -Wunused */
2455  if (is_small(nb)) /* Can't shrink mmap regions below small size */
2456  return 0;
2457  /* Keep old chunk if big enough but not too big */
2458  if (oldsize >= nb + SIZE_T_SIZE &&
2459  (oldsize - nb) <= (mparams.granularity << 1))
2460  return oldp;
2461  else {
2462  size_t offset = oldp->prev_foot;
2463  size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2464  size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2465  char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2466  oldmmsize, newmmsize, flags);
2467  if (cp != CMFAIL) {
2468  mchunkptr newp = (mchunkptr)(cp + offset);
2469  size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2470  newp->head = psize;
2471  mark_inuse_foot(m, newp, psize);
2472  chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2473  chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2474 
2475  if (cp < m->least_addr)
2476  m->least_addr = cp;
2477  if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2478  m->max_footprint = m->footprint;
2479  check_mmapped_chunk(m, newp);
2480  return newp;
2481  }
2482  }
2483  return 0;
2484 }
2485 
2486 
2487 /* -------------------------- mspace management -------------------------- */
2488 
2489 /* Initialize top chunk and its size */
2490 static void init_top(mstate m, mchunkptr p, size_t psize) {
2491  /* Ensure alignment */
2492  size_t offset = align_offset(chunk2mem(p));
2493  p = (mchunkptr)((char*)p + offset);
2494  psize -= offset;
2495 
2496  m->top = p;
2497  m->topsize = psize;
2498  p->head = psize | PINUSE_BIT;
2499  /* set size of fake trailing chunk holding overhead space only once */
2500  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2501  m->trim_check = mparams.trim_threshold; /* reset on each update */
2502 }
2503 
2504 /* Initialize bins for a new mstate that is otherwise zeroed out */
2505 static void init_bins(mstate m) {
2506  /* Establish circular links for smallbins */
2507  bindex_t i;
2508  for (i = 0; i < NSMALLBINS; ++i) {
2509  sbinptr bin = smallbin_at(m,i);
2510  bin->fd = bin->bk = bin;
2511  }
2512 }
2513 
2514 #if PROCEED_ON_ERROR
2515 
2516 /* default corruption action */
2517 static void reset_on_error(mstate m) {
2518  int i;
2519  ++malloc_corruption_error_count;
2520  /* Reinitialize fields to forget about all memory */
2521  m->smallmap = m->treemap = 0;
2522  m->dvsize = m->topsize = 0;
2523  m->seg.base = 0;
2524  m->seg.size = 0;
2525  m->seg.next = 0;
2526  m->top = m->dv = 0;
2527  for (i = 0; i < NTREEBINS; ++i)
2528  *treebin_at(m, i) = 0;
2529  init_bins(m);
2530 }
2531 #endif /* PROCEED_ON_ERROR */
2532 
2533 /* Allocate chunk and prepend remainder with chunk in successor base. */
2534 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2535  size_t nb) {
2536  mchunkptr p = align_as_chunk(newbase);
2537  mchunkptr oldfirst = align_as_chunk(oldbase);
2538  size_t psize = (char*)oldfirst - (char*)p;
2539  mchunkptr q = chunk_plus_offset(p, nb);
2540  size_t qsize = psize - nb;
2542 
2543  assert((char*)oldfirst > (char*)q);
2544  assert(pinuse(oldfirst));
2545  assert(qsize >= MIN_CHUNK_SIZE);
2546 
2547  /* consolidate remainder with first chunk of old base */
2548  if (oldfirst == m->top) {
2549  size_t tsize = m->topsize += qsize;
2550  m->top = q;
2551  q->head = tsize | PINUSE_BIT;
2552  check_top_chunk(m, q);
2553  }
2554  else if (oldfirst == m->dv) {
2555  size_t dsize = m->dvsize += qsize;
2556  m->dv = q;
2558  }
2559  else {
2560  if (!is_inuse(oldfirst)) {
2561  size_t nsize = chunksize(oldfirst);
2562  unlink_chunk(m, oldfirst, nsize);
2563  oldfirst = chunk_plus_offset(oldfirst, nsize);
2564  qsize += nsize;
2565  }
2566  set_free_with_pinuse(q, qsize, oldfirst);
2567  insert_chunk(m, q, qsize);
2568  check_free_chunk(m, q);
2569  }
2570 
2571  check_malloced_chunk(m, chunk2mem(p), nb);
2572  return chunk2mem(p);
2573 }
2574 
2575 /* Add a segment to hold a new noncontiguous region */
2576 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2577  /* Determine locations and sizes of segment, fenceposts, old top */
2578  char* old_top = (char*)m->top;
2579  msegmentptr oldsp = segment_holding(m, old_top);
2580  char* old_end = oldsp->base + oldsp->size;
2581  size_t ssize = pad_request(sizeof(struct malloc_segment));
2582  char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2583  size_t offset = align_offset(chunk2mem(rawsp));
2584  char* asp = rawsp + offset;
2585  char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2586  mchunkptr sp = (mchunkptr)csp;
2587  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2588  mchunkptr tnext = chunk_plus_offset(sp, ssize);
2589  mchunkptr p = tnext;
2590  int nfences = 0;
2591 
2592  /* reset top to new space */
2593  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2594 
2595  /* Set up segment record */
2596  assert(is_aligned(ss));
2597  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2598  *ss = m->seg; /* Push current record */
2599  m->seg.base = tbase;
2600  m->seg.size = tsize;
2601  m->seg.sflags = mmapped;
2602  m->seg.next = ss;
2603 
2604  /* Insert trailing fenceposts */
2605  for (;;) {
2606  mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2607  p->head = FENCEPOST_HEAD;
2608  ++nfences;
2609  if ((char*)(&(nextp->head)) < old_end)
2610  p = nextp;
2611  else
2612  break;
2613  }
2614  assert(nfences >= 2);
2615 
2616  /* Insert the rest of old top into a bin as an ordinary free chunk */
2617  if (csp != old_top) {
2618  mchunkptr q = (mchunkptr)old_top;
2619  size_t psize = csp - old_top;
2620  mchunkptr tn = chunk_plus_offset(q, psize);
2621  set_free_with_pinuse(q, psize, tn);
2622  insert_chunk(m, q, psize);
2623  }
2624 
2625  check_top_chunk(m, m->top);
2626 }
2627 
2628 /* -------------------------- System allocation -------------------------- */
2629 
2630 /* Get memory from system using MORECORE or MMAP */
2631 static void* sys_alloc(mstate m, size_t nb) {
2632  char* tbase = CMFAIL;
2633  size_t tsize = 0;
2634  flag_t mmap_flag = 0;
2635  size_t asize; /* allocation size */
2636 
2638 
2639  if (use_noexpand(m))
2640  return 0;
2641 
2642  /* Directly map large chunks, but only if already initialized */
2643  if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2644  void* mem = mmap_alloc(m, nb);
2645  if (mem != 0)
2646  return mem;
2647  }
2648 
2649  asize = granularity_align(nb + SYS_ALLOC_PADDING);
2650  if (asize <= nb)
2651  return 0; /* wraparound */
2652  if (m->footprint_limit != 0) {
2653  size_t fp = m->footprint + asize;
2654  if (fp <= m->footprint || fp > m->footprint_limit)
2655  return 0;
2656  }
2657 
2658  /*
2659  Try getting memory in any of three ways (in most-preferred to
2660  least-preferred order):
2661  1. A call to MORECORE that can normally contiguously extend memory.
2662  (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2663  or main space is mmapped or a previous contiguous call failed)
2664  2. A call to MMAP new space (disabled if not HAVE_MMAP).
2665  Note that under the default settings, if MORECORE is unable to
2666  fulfill a request, and HAVE_MMAP is true, then mmap is
2667  used as a noncontiguous system allocator. This is a useful backup
2668  strategy for systems with holes in address spaces -- in this case
2669  sbrk cannot contiguously expand the heap, but mmap may be able to
2670  find space.
2671  3. A call to MORECORE that cannot usually contiguously extend memory.
2672  (disabled if not HAVE_MORECORE)
2673 
2674  In all cases, we need to request enough bytes from system to ensure
2675  we can malloc nb bytes upon success, so pad with enough space for
2676  top_foot, plus alignment-pad to make sure we don't lose bytes if
2677  not on boundary, and round this up to a granularity unit.
2678  */
2679 
2681  char* br = CMFAIL;
2682  size_t ssize = asize; /* sbrk call size */
2683  msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2685 
2686  if (ss == 0) { /* First time through or recovery */
2687  char* base = (char*)CALL_MORECORE(0);
2688  if (base != CMFAIL) {
2689  size_t fp;
2690  /* Adjust to end on a page boundary */
2691  if (!is_page_aligned(base))
2692  ssize += (page_align((size_t)base) - (size_t)base);
2693  fp = m->footprint + ssize; /* recheck limits */
2694  if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2695  (m->footprint_limit == 0 ||
2696  (fp > m->footprint && fp <= m->footprint_limit)) &&
2697  (br = (char*)(CALL_MORECORE(ssize))) == base) {
2698  tbase = base;
2699  tsize = ssize;
2700  }
2701  }
2702  }
2703  else {
2704  /* Subtract out existing available top space from MORECORE request. */
2705  ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2706  /* Use mem here only if it did continuously extend old space */
2707  if (ssize < HALF_MAX_SIZE_T &&
2708  (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2709  tbase = br;
2710  tsize = ssize;
2711  }
2712  }
2713 
2714  if (tbase == CMFAIL) { /* Cope with partial failure */
2715  if (br != CMFAIL) { /* Try to use/extend the space we did get */
2716  if (ssize < HALF_MAX_SIZE_T &&
2717  ssize < nb + SYS_ALLOC_PADDING) {
2718  size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2719  if (esize < HALF_MAX_SIZE_T) {
2720  char* end = (char*)CALL_MORECORE(esize);
2721  if (end != CMFAIL)
2722  ssize += esize;
2723  else { /* Can't use; try to release */
2724  (void) CALL_MORECORE(-ssize);
2725  br = CMFAIL;
2726  }
2727  }
2728  }
2729  }
2730  if (br != CMFAIL) { /* Use the space we did get */
2731  tbase = br;
2732  tsize = ssize;
2733  }
2734  else
2735  disable_contiguous(m); /* Don't try contiguous path in the future */
2736  }
2737 
2739  }
2740 
2741  if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
2742  char* mp = (char*)(CALL_MMAP(asize));
2743  if (mp != CMFAIL) {
2744  tbase = mp;
2745  tsize = asize;
2746  mmap_flag = USE_MMAP_BIT;
2747  }
2748  }
2749 
2750  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2751  if (asize < HALF_MAX_SIZE_T) {
2752  char* br = CMFAIL;
2753  char* end = CMFAIL;
2755  br = (char*)(CALL_MORECORE(asize));
2756  end = (char*)(CALL_MORECORE(0));
2758  if (br != CMFAIL && end != CMFAIL && br < end) {
2759  size_t ssize = end - br;
2760  if (ssize > nb + TOP_FOOT_SIZE) {
2761  tbase = br;
2762  tsize = ssize;
2763  }
2764  }
2765  }
2766  }
2767 
2768  if (tbase != CMFAIL) {
2769 
2770  if ((m->footprint += tsize) > m->max_footprint)
2771  m->max_footprint = m->footprint;
2772 
2773  if (!is_initialized(m)) { /* first-time initialization */
2774  if (m->least_addr == 0 || tbase < m->least_addr)
2775  m->least_addr = tbase;
2776  m->seg.base = tbase;
2777  m->seg.size = tsize;
2778  m->seg.sflags = mmap_flag;
2779  m->magic = mparams.magic;
2781  init_bins(m);
2782 #if !ONLY_MSPACES
2783  if (is_global(m))
2784  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2785  else
2786 #endif
2787  {
2788  /* Offset top by embedded malloc_state */
2789  mchunkptr mn = next_chunk(mem2chunk(m));
2790  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2791  }
2792  }
2793 
2794  else {
2795  /* Try to merge with an existing segment */
2796  msegmentptr sp = &m->seg;
2797  /* Only consider most recent segment if traversal suppressed */
2798  while (sp != 0 && tbase != sp->base + sp->size)
2799  sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2800  if (sp != 0 &&
2801  !is_extern_segment(sp) &&
2802  (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2803  segment_holds(sp, m->top)) { /* append */
2804  sp->size += tsize;
2805  init_top(m, m->top, m->topsize + tsize);
2806  }
2807  else {
2808  if (tbase < m->least_addr)
2809  m->least_addr = tbase;
2810  sp = &m->seg;
2811  while (sp != 0 && sp->base != tbase + tsize)
2812  sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2813  if (sp != 0 &&
2814  !is_extern_segment(sp) &&
2815  (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2816  char* oldbase = sp->base;
2817  sp->base = tbase;
2818  sp->size += tsize;
2819  return prepend_alloc(m, tbase, oldbase, nb);
2820  }
2821  else
2822  add_segment(m, tbase, tsize, mmap_flag);
2823  }
2824  }
2825 
2826  if (nb < m->topsize) { /* Allocate from new or extended top space */
2827  size_t rsize = m->topsize -= nb;
2828  mchunkptr p = m->top;
2829  mchunkptr r = m->top = chunk_plus_offset(p, nb);
2830  r->head = rsize | PINUSE_BIT;
2832  check_top_chunk(m, m->top);
2833  check_malloced_chunk(m, chunk2mem(p), nb);
2834  return chunk2mem(p);
2835  }
2836  }
2837 
2839  return 0;
2840 }
2841 
2842 /* ----------------------- system deallocation -------------------------- */
2843 
2844 /* Unmap and unlink any mmapped segments that don't contain used chunks */
2845 static size_t release_unused_segments(mstate m) {
2846  size_t released = 0;
2847  int nsegs = 0;
2848  msegmentptr pred = &m->seg;
2849  msegmentptr sp = pred->next;
2850  while (sp != 0) {
2851  char* base = sp->base;
2852  size_t size = sp->size;
2853  msegmentptr next = sp->next;
2854  ++nsegs;
2855  if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2856  mchunkptr p = align_as_chunk(base);
2857  size_t psize = chunksize(p);
2858  /* Can unmap if first chunk holds entire segment and not pinned */
2859  if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2860  tchunkptr tp = (tchunkptr)p;
2861  assert(segment_holds(sp, (char*)sp));
2862  if (p == m->dv) {
2863  m->dv = 0;
2864  m->dvsize = 0;
2865  }
2866  else {
2867  unlink_large_chunk(m, tp);
2868  }
2869  if (CALL_MUNMAP(base, size) == 0) {
2870  released += size;
2871  m->footprint -= size;
2872  /* unlink obsoleted record */
2873  sp = pred;
2874  sp->next = next;
2875  }
2876  else { /* back out if cannot unmap */
2877  insert_large_chunk(m, tp, psize);
2878  }
2879  }
2880  }
2881  if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2882  break;
2883  pred = sp;
2884  sp = next;
2885  }
2886  /* Reset check counter */
2887  m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2888  (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2889  return released;
2890 }
2891 
2892 static int sys_trim(mstate m, size_t pad) {
2893  size_t released = 0;
2895  if (pad < MAX_REQUEST && is_initialized(m)) {
2896  pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2897 
2898  if (m->topsize > pad) {
2899  /* Shrink top space in granularity-size units, keeping at least one */
2900  size_t unit = mparams.granularity;
2901  size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2902  SIZE_T_ONE) * unit;
2903  msegmentptr sp = segment_holding(m, (char*)m->top);
2904 
2905  if (!is_extern_segment(sp)) {
2906  if (is_mmapped_segment(sp)) {
2907  if (HAVE_MMAP &&
2908  sp->size >= extra &&
2909  !has_segment_link(m, sp)) { /* can't shrink if pinned */
2910  size_t newsize = sp->size - extra;
2911  (void)newsize; /* placate people compiling -Wunused-variable */
2912  /* Prefer mremap, fall back to munmap */
2913  if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2914  (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2915  released = extra;
2916  }
2917  }
2918  }
2919  else if (HAVE_MORECORE) {
2920  if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2921  extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2923  {
2924  /* Make sure end of memory is where we last set it. */
2925  char* old_br = (char*)(CALL_MORECORE(0));
2926  if (old_br == sp->base + sp->size) {
2927  char* rel_br = (char*)(CALL_MORECORE(-extra));
2928  char* new_br = (char*)(CALL_MORECORE(0));
2929  if (rel_br != CMFAIL && new_br < old_br)
2930  released = old_br - new_br;
2931  }
2932  }
2934  }
2935  }
2936 
2937  if (released != 0) {
2938  sp->size -= released;
2939  m->footprint -= released;
2940  init_top(m, m->top, m->topsize - released);
2941  check_top_chunk(m, m->top);
2942  }
2943  }
2944 
2945  /* Unmap any unused mmapped segments */
2946  if (HAVE_MMAP)
2947  released += release_unused_segments(m);
2948 
2949  /* On failure, disable autotrim to avoid repeated failed future calls */
2950  if (released == 0 && m->topsize > m->trim_check)
2951  m->trim_check = MAX_SIZE_T;
2952  }
2953 
2954  return (released != 0)? 1 : 0;
2955 }
2956 
2957 /* Consolidate and bin a chunk. Differs from exported versions
2958  of free mainly in that the chunk need not be marked as inuse.
2959 */
2960 static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2961  mchunkptr next = chunk_plus_offset(p, psize);
2962  if (!pinuse(p)) {
2963  mchunkptr prev;
2964  size_t prevsize = p->prev_foot;
2965  if (is_mmapped(p)) {
2966  psize += prevsize + MMAP_FOOT_PAD;
2967  if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2968  m->footprint -= psize;
2969  return;
2970  }
2971  prev = chunk_minus_offset(p, prevsize);
2972  psize += prevsize;
2973  p = prev;
2974  if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2975  if (p != m->dv) {
2976  unlink_chunk(m, p, prevsize);
2977  }
2978  else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2979  m->dvsize = psize;
2980  set_free_with_pinuse(p, psize, next);
2981  return;
2982  }
2983  }
2984  else {
2986  return;
2987  }
2988  }
2989  if (RTCHECK(ok_address(m, next))) {
2990  if (!cinuse(next)) { /* consolidate forward */
2991  if (next == m->top) {
2992  size_t tsize = m->topsize += psize;
2993  m->top = p;
2994  p->head = tsize | PINUSE_BIT;
2995  if (p == m->dv) {
2996  m->dv = 0;
2997  m->dvsize = 0;
2998  }
2999  return;
3000  }
3001  else if (next == m->dv) {
3002  size_t dsize = m->dvsize += psize;
3003  m->dv = p;
3005  return;
3006  }
3007  else {
3008  size_t nsize = chunksize(next);
3009  psize += nsize;
3010  unlink_chunk(m, next, nsize);
3012  if (p == m->dv) {
3013  m->dvsize = psize;
3014  return;
3015  }
3016  }
3017  }
3018  else {
3019  set_free_with_pinuse(p, psize, next);
3020  }
3021  insert_chunk(m, p, psize);
3022  }
3023  else {
3025  }
3026 }
3027 
3028 /* ---------------------------- malloc --------------------------- */
3029 
3030 /* allocate a large request from the best fitting chunk in a treebin */
3031 static void* tmalloc_large(mstate m, size_t nb) {
3032  tchunkptr v = 0;
3033  size_t rsize = -nb; /* Unsigned negation */
3034  tchunkptr t;
3035  bindex_t idx;
3036  compute_tree_index(nb, idx);
3037  if ((t = *treebin_at(m, idx)) != 0) {
3038  /* Traverse tree for this bin looking for node with size == nb */
3039  size_t sizebits = nb << leftshift_for_tree_index(idx);
3040  tchunkptr rst = 0; /* The deepest untaken right subtree */
3041  for (;;) {
3042  tchunkptr rt;
3043  size_t trem = chunksize(t) - nb;
3044  if (trem < rsize) {
3045  v = t;
3046  if ((rsize = trem) == 0)
3047  break;
3048  }
3049  rt = t->child[1];
3050  t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3051  if (rt != 0 && rt != t)
3052  rst = rt;
3053  if (t == 0) {
3054  t = rst; /* set t to least subtree holding sizes > nb */
3055  break;
3056  }
3057  sizebits <<= 1;
3058  }
3059  }
3060  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3061  binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3062  if (leftbits != 0) {
3063  bindex_t i;
3064  binmap_t leastbit = least_bit(leftbits);
3065  compute_bit2idx(leastbit, i);
3066  t = *treebin_at(m, i);
3067  }
3068  }
3069 
3070  while (t != 0) { /* find smallest of tree or subtree */
3071  size_t trem = chunksize(t) - nb;
3072  if (trem < rsize) {
3073  rsize = trem;
3074  v = t;
3075  }
3076  t = leftmost_child(t);
3077  }
3078 
3079  /* If dv is a better fit, return 0 so malloc will use it */
3080  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3081  if (RTCHECK(ok_address(m, v))) { /* split */
3082  mchunkptr r = chunk_plus_offset(v, nb);
3083  assert(chunksize(v) == rsize + nb);
3084  if (RTCHECK(ok_next(v, r))) {
3085  unlink_large_chunk(m, v);
3086  if (rsize < MIN_CHUNK_SIZE)
3087  set_inuse_and_pinuse(m, v, (rsize + nb));
3088  else {
3091  insert_chunk(m, r, rsize);
3092  }
3093  return chunk2mem(v);
3094  }
3095  }
3097  }
3098  return 0;
3099 }
3100 
3101 /* allocate a small request from the best fitting chunk in a treebin */
3102 static void* tmalloc_small(mstate m, size_t nb) {
3103  tchunkptr t, v;
3104  size_t rsize;
3105  bindex_t i;
3106  binmap_t leastbit = least_bit(m->treemap);
3107  compute_bit2idx(leastbit, i);
3108  v = t = *treebin_at(m, i);
3109  rsize = chunksize(t) - nb;
3110 
3111  while ((t = leftmost_child(t)) != 0) {
3112  size_t trem = chunksize(t) - nb;
3113  if (trem < rsize) {
3114  rsize = trem;
3115  v = t;
3116  }
3117  }
3118 
3119  if (RTCHECK(ok_address(m, v))) {
3120  mchunkptr r = chunk_plus_offset(v, nb);
3121  assert(chunksize(v) == rsize + nb);
3122  if (RTCHECK(ok_next(v, r))) {
3123  unlink_large_chunk(m, v);
3124  if (rsize < MIN_CHUNK_SIZE)
3125  set_inuse_and_pinuse(m, v, (rsize + nb));
3126  else {
3129  replace_dv(m, r, rsize);
3130  }
3131  return chunk2mem(v);
3132  }
3133  }
3134 
3136  return 0;
3137 }
3138 
3139 #if !ONLY_MSPACES
3140 
3141 void* dlmalloc(size_t bytes) {
3142  /*
3143  Basic algorithm:
3144  If a small request (< 256 bytes minus per-chunk overhead):
3145  1. If one exists, use a remainderless chunk in associated smallbin.
3146  (Remainderless means that there are too few excess bytes to
3147  represent as a chunk.)
3148  2. If it is big enough, use the dv chunk, which is normally the
3149  chunk adjacent to the one used for the most recent small request.
3150  3. If one exists, split the smallest available chunk in a bin,
3151  saving remainder in dv.
3152  4. If it is big enough, use the top chunk.
3153  5. If available, get memory from system and use it
3154  Otherwise, for a large request:
3155  1. Find the smallest available binned chunk that fits, and use it
3156  if it is better fitting than dv chunk, splitting if necessary.
3157  2. If better fitting than any binned chunk, use the dv chunk.
3158  3. If it is big enough, use the top chunk.
3159  4. If request size >= mmap threshold, try to directly mmap this chunk.
3160  5. If available, get memory from system and use it
3161 
3162  The ugly goto's here ensure that postaction occurs along all paths.
3163  */
3164 
3165 #if USE_LOCKS
3166  ensure_initialization(); /* initialize in sys_alloc if not using locks */
3167 #endif
3168 
3169  if (!PREACTION(gm)) {
3170  void* mem;
3171  size_t nb;
3172  if (bytes <= MAX_SMALL_REQUEST) {
3173  bindex_t idx;
3174  binmap_t smallbits;
3175  nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3176  idx = small_index(nb);
3177  smallbits = gm->smallmap >> idx;
3178 
3179  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3180  mchunkptr b, p;
3181  idx += ~smallbits & 1; /* Uses next bin if idx empty */
3182  b = smallbin_at(gm, idx);
3183  p = b->fd;
3184  assert(chunksize(p) == small_index2size(idx));
3185  unlink_first_small_chunk(gm, b, p, idx);
3187  mem = chunk2mem(p);
3188  check_malloced_chunk(gm, mem, nb);
3189  goto postaction;
3190  }
3191 
3192  else if (nb > gm->dvsize) {
3193  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3194  mchunkptr b, p, r;
3195  size_t rsize;
3196  bindex_t i;
3197  binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3198  binmap_t leastbit = least_bit(leftbits);
3199  compute_bit2idx(leastbit, i);
3200  b = smallbin_at(gm, i);
3201  p = b->fd;
3202  assert(chunksize(p) == small_index2size(i));
3203  unlink_first_small_chunk(gm, b, p, i);
3204  rsize = small_index2size(i) - nb;
3205  /* Fit here cannot be remainderless if 4byte sizes */
3206  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3208  else {
3210  r = chunk_plus_offset(p, nb);
3212  replace_dv(gm, r, rsize);
3213  }
3214  mem = chunk2mem(p);
3215  check_malloced_chunk(gm, mem, nb);
3216  goto postaction;
3217  }
3218 
3219  else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3220  check_malloced_chunk(gm, mem, nb);
3221  goto postaction;
3222  }
3223  }
3224  }
3225  else if (bytes >= MAX_REQUEST)
3226  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3227  else {
3228  nb = pad_request(bytes);
3229  if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3230  check_malloced_chunk(gm, mem, nb);
3231  goto postaction;
3232  }
3233  }
3234 
3235  if (nb <= gm->dvsize) {
3236  size_t rsize = gm->dvsize - nb;
3237  mchunkptr p = gm->dv;
3238  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3239  mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3240  gm->dvsize = rsize;
3243  }
3244  else { /* exhaust dv */
3245  size_t dvs = gm->dvsize;
3246  gm->dvsize = 0;
3247  gm->dv = 0;
3248  set_inuse_and_pinuse(gm, p, dvs);
3249  }
3250  mem = chunk2mem(p);
3251  check_malloced_chunk(gm, mem, nb);
3252  goto postaction;
3253  }
3254 
3255  else if (nb < gm->topsize) { /* Split top */
3256  size_t rsize = gm->topsize -= nb;
3257  mchunkptr p = gm->top;
3258  mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3259  r->head = rsize | PINUSE_BIT;
3261  mem = chunk2mem(p);
3262  check_top_chunk(gm, gm->top);
3263  check_malloced_chunk(gm, mem, nb);
3264  goto postaction;
3265  }
3266 
3267  mem = sys_alloc(gm, nb);
3268 
3269  postaction:
3270  POSTACTION(gm);
3271  return mem;
3272  }
3273 
3274  return 0;
3275 }
3276 
3277 /* ---------------------------- free --------------------------- */
3278 
3279 void dlfree(void* mem) {
3280  /*
3281  Consolidate freed chunks with preceding or succeeding bordering
3282  free chunks, if they exist, and then place in a bin. Intermixed
3283  with special cases for top, dv, mmapped chunks, and usage errors.
3284  */
3285 
3286  if (mem != 0) {
3287  mchunkptr p = mem2chunk(mem);
3288 #if FOOTERS
3289  mstate fm = get_mstate_for(p);
3290  if (!ok_magic(fm)) {
3291  USAGE_ERROR_ACTION(fm, p);
3292  return;
3293  }
3294 #else /* FOOTERS */
3295 #define fm gm
3296 #endif /* FOOTERS */
3297  if (!PREACTION(fm)) {
3298  check_inuse_chunk(fm, p);
3299  if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3300  size_t psize = chunksize(p);
3301  mchunkptr next = chunk_plus_offset(p, psize);
3302  if (!pinuse(p)) {
3303  size_t prevsize = p->prev_foot;
3304  if (is_mmapped(p)) {
3305  psize += prevsize + MMAP_FOOT_PAD;
3306  if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3307  fm->footprint -= psize;
3308  goto postaction;
3309  }
3310  else {
3311  mchunkptr prev = chunk_minus_offset(p, prevsize);
3312  psize += prevsize;
3313  p = prev;
3314  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3315  if (p != fm->dv) {
3316  unlink_chunk(fm, p, prevsize);
3317  }
3318  else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3319  fm->dvsize = psize;
3320  set_free_with_pinuse(p, psize, next);
3321  goto postaction;
3322  }
3323  }
3324  else
3325  goto erroraction;
3326  }
3327  }
3328 
3329  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3330  if (!cinuse(next)) { /* consolidate forward */
3331  if (next == fm->top) {
3332  size_t tsize = fm->topsize += psize;
3333  fm->top = p;
3334  p->head = tsize | PINUSE_BIT;
3335  if (p == fm->dv) {
3336  fm->dv = 0;
3337  fm->dvsize = 0;
3338  }
3339  if (should_trim(fm, tsize))
3340  sys_trim(fm, 0);
3341  goto postaction;
3342  }
3343  else if (next == fm->dv) {
3344  size_t dsize = fm->dvsize += psize;
3345  fm->dv = p;
3347  goto postaction;
3348  }
3349  else {
3350  size_t nsize = chunksize(next);
3351  psize += nsize;
3352  unlink_chunk(fm, next, nsize);
3354  if (p == fm->dv) {
3355  fm->dvsize = psize;
3356  goto postaction;
3357  }
3358  }
3359  }
3360  else
3361  set_free_with_pinuse(p, psize, next);
3362 
3363  if (is_small(psize)) {
3364  insert_small_chunk(fm, p, psize);
3365  check_free_chunk(fm, p);
3366  }
3367  else {
3368  tchunkptr tp = (tchunkptr)p;
3369  insert_large_chunk(fm, tp, psize);
3370  check_free_chunk(fm, p);
3371  if (--fm->release_checks == 0)
3373  }
3374  goto postaction;
3375  }
3376  }
3377  erroraction:
3378  USAGE_ERROR_ACTION(fm, p);
3379  postaction:
3380  POSTACTION(fm);
3381  }
3382  }
3383 #if !FOOTERS
3384 #undef fm
3385 #endif /* FOOTERS */
3386 }
3387 
3388 void* dlcalloc(size_t n_elements, size_t elem_size) {
3389  void* mem;
3390  size_t req = 0;
3391  if (n_elements != 0) {
3392  req = n_elements * elem_size;
3393  if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3394  (req / n_elements != elem_size))
3395  req = MAX_SIZE_T; /* force downstream failure on overflow */
3396  }
3397  mem = dlmalloc(req);
3398  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3399  memset(mem, 0, req);
3400  return mem;
3401 }
3402 
3403 #endif /* !ONLY_MSPACES */
3404 
3405 /* ------------ Internal support for realloc, memalign, etc -------------- */
3406 
3407 /* Try to realloc; only in-place unless can_move true */
3408 static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3409  int can_move) {
3410  mchunkptr newp = 0;
3411  size_t oldsize = chunksize(p);
3412  mchunkptr next = chunk_plus_offset(p, oldsize);
3413  if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3414  ok_next(p, next) && ok_pinuse(next))) {
3415  if (is_mmapped(p)) {
3416  newp = mmap_resize(m, p, nb, can_move);
3417  }
3418  else if (oldsize >= nb) { /* already big enough */
3419  size_t rsize = oldsize - nb;
3420  if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
3421  mchunkptr r = chunk_plus_offset(p, nb);
3422  set_inuse(m, p, nb);
3423  set_inuse(m, r, rsize);
3424  dispose_chunk(m, r, rsize);
3425  }
3426  newp = p;
3427  }
3428  else if (next == m->top) { /* extend into top */
3429  if (oldsize + m->topsize > nb) {
3430  size_t newsize = oldsize + m->topsize;
3431  size_t newtopsize = newsize - nb;
3432  mchunkptr newtop = chunk_plus_offset(p, nb);
3433  set_inuse(m, p, nb);
3434  newtop->head = newtopsize |PINUSE_BIT;
3435  m->top = newtop;
3436  m->topsize = newtopsize;
3437  newp = p;
3438  }
3439  }
3440  else if (next == m->dv) { /* extend into dv */
3441  size_t dvs = m->dvsize;
3442  if (oldsize + dvs >= nb) {
3443  size_t dsize = oldsize + dvs - nb;
3444  if (dsize >= MIN_CHUNK_SIZE) {
3445  mchunkptr r = chunk_plus_offset(p, nb);
3446  mchunkptr n = chunk_plus_offset(r, dsize);
3447  set_inuse(m, p, nb);
3449  clear_pinuse(n);
3450  m->dvsize = dsize;
3451  m->dv = r;
3452  }
3453  else { /* exhaust dv */
3454  size_t newsize = oldsize + dvs;
3455  set_inuse(m, p, newsize);
3456  m->dvsize = 0;
3457  m->dv = 0;
3458  }
3459  newp = p;
3460  }
3461  }
3462  else if (!cinuse(next)) { /* extend into next free chunk */
3463  size_t nextsize = chunksize(next);
3464  if (oldsize + nextsize >= nb) {
3465  size_t rsize = oldsize + nextsize - nb;
3466  unlink_chunk(m, next, nextsize);
3467  if (rsize < MIN_CHUNK_SIZE) {
3468  size_t newsize = oldsize + nextsize;
3469  set_inuse(m, p, newsize);
3470  }
3471  else {
3472  mchunkptr r = chunk_plus_offset(p, nb);
3473  set_inuse(m, p, nb);
3474  set_inuse(m, r, rsize);
3475  dispose_chunk(m, r, rsize);
3476  }
3477  newp = p;
3478  }
3479  }
3480  }
3481  else {
3483  }
3484  return newp;
3485 }
3486 
3487 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
3488  void* mem = 0;
3489  if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3490  alignment = MIN_CHUNK_SIZE;
3491  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
3492  size_t a = MALLOC_ALIGNMENT << 1;
3493  while (a < alignment) a <<= 1;
3494  alignment = a;
3495  }
3496  if (bytes >= MAX_REQUEST - alignment) {
3497  if (m != 0) { /* Test isn't needed but avoids compiler warning */
3499  }
3500  }
3501  else {
3502  size_t nb = request2size(bytes);
3503  size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3504  mem = internal_malloc(m, req);
3505  if (mem != 0) {
3506  mchunkptr p = mem2chunk(mem);
3507  if (PREACTION(m))
3508  return 0;
3509  if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
3510  /*
3511  Find an aligned spot inside chunk. Since we need to give
3512  back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3513  the first calculation places us at a spot with less than
3514  MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3515  We've allocated enough total room so that this is always
3516  possible.
3517  */
3518  char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
3519  SIZE_T_ONE)) &
3520  -alignment));
3521  char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3522  br : br+alignment;
3523  mchunkptr newp = (mchunkptr)pos;
3524  size_t leadsize = pos - (char*)(p);
3525  size_t newsize = chunksize(p) - leadsize;
3526 
3527  if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3528  newp->prev_foot = p->prev_foot + leadsize;
3529  newp->head = newsize;
3530  }
3531  else { /* Otherwise, give back leader, use the rest */
3532  set_inuse(m, newp, newsize);
3533  set_inuse(m, p, leadsize);
3534  dispose_chunk(m, p, leadsize);
3535  }
3536  p = newp;
3537  }
3538 
3539  /* Give back spare room at the end */
3540  if (!is_mmapped(p)) {
3541  size_t size = chunksize(p);
3542  if (size > nb + MIN_CHUNK_SIZE) {
3543  size_t remainder_size = size - nb;
3544  mchunkptr remainder = chunk_plus_offset(p, nb);
3545  set_inuse(m, p, nb);
3546  set_inuse(m, remainder, remainder_size);
3547  dispose_chunk(m, remainder, remainder_size);
3548  }
3549  }
3550 
3551  mem = chunk2mem(p);
3552  assert (chunksize(p) >= nb);
3553  assert(((size_t)mem & (alignment - 1)) == 0);
3554  check_inuse_chunk(m, p);
3555  POSTACTION(m);
3556  }
3557  }
3558  return mem;
3559 }
3560 
3561 /*
3562  Common support for independent_X routines, handling
3563  all of the combinations that can result.
3564  The opts arg has:
3565  bit 0 set if all elements are same size (using sizes[0])
3566  bit 1 set if elements should be zeroed
3567 */
3568 static void** ialloc(mstate m,
3569  size_t n_elements,
3570  size_t* sizes,
3571  int opts,
3572  void* chunks[]) {
3573 
3574  size_t element_size; /* chunksize of each element, if all same */
3575  size_t contents_size; /* total size of elements */
3576  size_t array_size; /* request size of pointer array */
3577  void* mem; /* malloced aggregate space */
3578  mchunkptr p; /* corresponding chunk */
3579  size_t remainder_size; /* remaining bytes while splitting */
3580  void** marray; /* either "chunks" or malloced ptr array */
3581  mchunkptr array_chunk; /* chunk for malloced ptr array */
3582  flag_t was_enabled; /* to disable mmap */
3583  size_t size;
3584  size_t i;
3585 
3587  /* compute array length, if needed */
3588  if (chunks != 0) {
3589  if (n_elements == 0)
3590  return chunks; /* nothing to do */
3591  marray = chunks;
3592  array_size = 0;
3593  }
3594  else {
3595  /* if empty req, must still return chunk representing empty array */
3596  if (n_elements == 0)
3597  return (void**)internal_malloc(m, 0);
3598  marray = 0;
3599  array_size = request2size(n_elements * (sizeof(void*)));
3600  }
3601 
3602  /* compute total element size */
3603  if (opts & 0x1) { /* all-same-size */
3604  element_size = request2size(*sizes);
3605  contents_size = n_elements * element_size;
3606  }
3607  else { /* add up all the sizes */
3608  element_size = 0;
3609  contents_size = 0;
3610  for (i = 0; i != n_elements; ++i)
3611  contents_size += request2size(sizes[i]);
3612  }
3613 
3614  size = contents_size + array_size;
3615 
3616  /*
3617  Allocate the aggregate chunk. First disable direct-mmapping so
3618  malloc won't use it, since we would not be able to later
3619  free/realloc space internal to a segregated mmap region.
3620  */
3621  was_enabled = use_mmap(m);
3622  disable_mmap(m);
3623  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3624  if (was_enabled)
3625  enable_mmap(m);
3626  if (mem == 0)
3627  return 0;
3628 
3629  if (PREACTION(m)) return 0;
3630  p = mem2chunk(mem);
3631  remainder_size = chunksize(p);
3632 
3633  assert(!is_mmapped(p));
3634 
3635  if (opts & 0x2) { /* optionally clear the elements */
3636  memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3637  }
3638 
3639  /* If not provided, allocate the pointer array as final part of chunk */
3640  if (marray == 0) {
3641  size_t array_chunk_size;
3642  array_chunk = chunk_plus_offset(p, contents_size);
3643  array_chunk_size = remainder_size - contents_size;
3644  marray = (void**) (chunk2mem(array_chunk));
3645  set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3646  remainder_size = contents_size;
3647  }
3648 
3649  /* split out elements */
3650  for (i = 0; ; ++i) {
3651  marray[i] = chunk2mem(p);
3652  if (i != n_elements-1) {
3653  if (element_size != 0)
3654  size = element_size;
3655  else
3656  size = request2size(sizes[i]);
3657  remainder_size -= size;
3659  p = chunk_plus_offset(p, size);
3660  }
3661  else { /* the final element absorbs any overallocation slop */
3662  set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3663  break;
3664  }
3665  }
3666 
3667 #if DEBUG
3668  if (marray != chunks) {
3669  /* final element must have exactly exhausted chunk */
3670  if (element_size != 0) {
3671  assert(remainder_size == element_size);
3672  }
3673  else {
3674  assert(remainder_size == request2size(sizes[i]));
3675  }
3676  check_inuse_chunk(m, mem2chunk(marray));
3677  }
3678  for (i = 0; i != n_elements; ++i)
3679  check_inuse_chunk(m, mem2chunk(marray[i]));
3680 
3681 #endif /* DEBUG */
3682 
3683  POSTACTION(m);
3684  return marray;
3685 }
3686 
3687 /* Try to free all pointers in the given array.
3688  Note: this could be made faster, by delaying consolidation,
3689  at the price of disabling some user integrity checks, We
3690  still optimize some consolidations by combining adjacent
3691  chunks before freeing, which will occur often if allocated
3692  with ialloc or the array is sorted.
3693 */
3694 static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
3695  size_t unfreed = 0;
3696  if (!PREACTION(m)) {
3697  void** a;
3698  void** fence = &(array[nelem]);
3699  for (a = array; a != fence; ++a) {
3700  void* mem = *a;
3701  if (mem != 0) {
3702  mchunkptr p = mem2chunk(mem);
3703  size_t psize = chunksize(p);
3704 #if FOOTERS
3705  if (get_mstate_for(p) != m) {
3706  ++unfreed;
3707  continue;
3708  }
3709 #endif
3710  check_inuse_chunk(m, p);
3711  *a = 0;
3712  if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3713  void ** b = a + 1; /* try to merge with next chunk */
3714  mchunkptr next = next_chunk(p);
3715  if (b != fence && *b == chunk2mem(next)) {
3716  size_t newsize = chunksize(next) + psize;
3717  set_inuse(m, p, newsize);
3718  *b = chunk2mem(p);
3719  }
3720  else
3721  dispose_chunk(m, p, psize);
3722  }
3723  else {
3725  break;
3726  }
3727  }
3728  }
3729  if (should_trim(m, m->topsize))
3730  sys_trim(m, 0);
3731  POSTACTION(m);
3732  }
3733  return unfreed;
3734 }
3735 
3736 /* Traversal */
3737 #if MALLOC_INSPECT_ALL
3738 static void internal_inspect_all(mstate m,
3739  void(*handler)(void *start,
3740  void *end,
3741  size_t used_bytes,
3742  void* callback_arg),
3743  void* arg) {
3744  if (is_initialized(m)) {
3745  mchunkptr top = m->top;
3746  msegmentptr s;
3747  for (s = &m->seg; s != 0; s = s->next) {
3748  mchunkptr q = align_as_chunk(s->base);
3749  while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3750  mchunkptr next = next_chunk(q);
3751  size_t sz = chunksize(q);
3752  size_t used;
3753  void* start;
3754  if (is_inuse(q)) {
3755  used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
3756  start = chunk2mem(q);
3757  }
3758  else {
3759  used = 0;
3760  if (is_small(sz)) { /* offset by possible bookkeeping */
3761  start = (void*)((char*)q + sizeof(struct malloc_chunk));
3762  }
3763  else {
3764  start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
3765  }
3766  }
3767  if (start < (void*)next) /* skip if all space is bookkeeping */
3768  handler(start, next, used, arg);
3769  if (q == top)
3770  break;
3771  q = next;
3772  }
3773  }
3774  }
3775 }
3776 #endif /* MALLOC_INSPECT_ALL */
3777 
3778 /* ------------------ Exported realloc, memalign, etc -------------------- */
3779 
3780 #if !ONLY_MSPACES
3781 
3782 void* dlrealloc(void* oldmem, size_t bytes) {
3783  void* mem = 0;
3784  if (oldmem == 0) {
3785  mem = dlmalloc(bytes);
3786  }
3787  else if (bytes >= MAX_REQUEST) {
3789  }
3790 #ifdef REALLOC_ZERO_BYTES_FREES
3791  else if (bytes == 0) {
3792  dlfree(oldmem);
3793  }
3794 #endif /* REALLOC_ZERO_BYTES_FREES */
3795  else {
3796  size_t nb = request2size(bytes);
3797  mchunkptr oldp = mem2chunk(oldmem);
3798 #if ! FOOTERS
3799  mstate m = gm;
3800 #else /* FOOTERS */
3801  mstate m = get_mstate_for(oldp);
3802  if (!ok_magic(m)) {
3803  USAGE_ERROR_ACTION(m, oldmem);
3804  return 0;
3805  }
3806 #endif /* FOOTERS */
3807  if (!PREACTION(m)) {
3808  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3809  POSTACTION(m);
3810  if (newp != 0) {
3811  check_inuse_chunk(m, newp);
3812  mem = chunk2mem(newp);
3813  }
3814  else {
3815  mem = internal_malloc(m, bytes);
3816  if (mem != 0) {
3817  size_t oc = chunksize(oldp) - overhead_for(oldp);
3818  memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3819  internal_free(m, oldmem);
3820  }
3821  }
3822  }
3823  }
3824  return mem;
3825 }
3826 
3827 void* dlrealloc_in_place(void* oldmem, size_t bytes) {
3828  void* mem = 0;
3829  if (oldmem != 0) {
3830  if (bytes >= MAX_REQUEST) {
3832  }
3833  else {
3834  size_t nb = request2size(bytes);
3835  mchunkptr oldp = mem2chunk(oldmem);
3836 #if ! FOOTERS
3837  mstate m = gm;
3838 #else /* FOOTERS */
3839  mstate m = get_mstate_for(oldp);
3840  if (!ok_magic(m)) {
3841  USAGE_ERROR_ACTION(m, oldmem);
3842  return 0;
3843  }
3844 #endif /* FOOTERS */
3845  if (!PREACTION(m)) {
3846  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3847  POSTACTION(m);
3848  if (newp == oldp) {
3849  check_inuse_chunk(m, newp);
3850  mem = oldmem;
3851  }
3852  }
3853  }
3854  }
3855  return mem;
3856 }
3857 
3858 void* dlmemalign(size_t alignment, size_t bytes) {
3859  if (alignment <= MALLOC_ALIGNMENT) {
3860  return dlmalloc(bytes);
3861  }
3862  return internal_memalign(gm, alignment, bytes);
3863 }
3864 
3865 int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
3866  void* mem = 0;
3867  if (alignment == MALLOC_ALIGNMENT)
3868  mem = dlmalloc(bytes);
3869  else {
3870  size_t d = alignment / sizeof(void*);
3871  size_t r = alignment % sizeof(void*);
3872  if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3873  return EINVAL;
3874  else if (bytes <= MAX_REQUEST - alignment) {
3875  if (alignment < MIN_CHUNK_SIZE)
3876  alignment = MIN_CHUNK_SIZE;
3877  mem = internal_memalign(gm, alignment, bytes);
3878  }
3879  }
3880  if (mem == 0)
3881  return ENOMEM;
3882  else {
3883  *pp = mem;
3884  return 0;
3885  }
3886 }
3887 
3888 void* dlvalloc(size_t bytes) {
3889  size_t pagesz;
3891  pagesz = mparams.page_size;
3892  return dlmemalign(pagesz, bytes);
3893 }
3894 
3895 void* dlpvalloc(size_t bytes) {
3896  size_t pagesz;
3898  pagesz = mparams.page_size;
3899  return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3900 }
3901 
3902 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
3903  void* chunks[]) {
3904  size_t sz = elem_size; /* serves as 1-element array */
3905  return ialloc(gm, n_elements, &sz, 3, chunks);
3906 }
3907 
3908 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
3909  void* chunks[]) {
3910  return ialloc(gm, n_elements, sizes, 0, chunks);
3911 }
3912 
3913 size_t dlbulk_free(void* array[], size_t nelem) {
3914  return internal_bulk_free(gm, array, nelem);
3915 }
3916 
3917 #if MALLOC_INSPECT_ALL
3918 void dlmalloc_inspect_all(void(*handler)(void *start,
3919  void *end,
3920  size_t used_bytes,
3921  void* callback_arg),
3922  void* arg) {
3924  if (!PREACTION(gm)) {
3925  internal_inspect_all(gm, handler, arg);
3926  POSTACTION(gm);
3927  }
3928 }
3929 #endif /* MALLOC_INSPECT_ALL */
3930 
3931 int dlmalloc_trim(size_t pad) {
3932  int result = 0;
3934  if (!PREACTION(gm)) {
3935  result = sys_trim(gm, pad);
3936  POSTACTION(gm);
3937  }
3938  return result;
3939 }
3940 
3941 size_t dlmalloc_footprint(void) {
3942  return gm->footprint;
3943 }
3944 
3946  return gm->max_footprint;
3947 }
3948 
3950  size_t maf = gm->footprint_limit;
3951  return maf == 0 ? MAX_SIZE_T : maf;
3952 }
3953 
3954 size_t dlmalloc_set_footprint_limit(size_t bytes) {
3955  size_t result; /* invert sense of 0 */
3956  if (bytes == 0)
3957  result = granularity_align(1); /* Use minimal size */
3958  if (bytes == MAX_SIZE_T)
3959  result = 0; /* disable */
3960  else
3961  result = granularity_align(bytes);
3962  return gm->footprint_limit = result;
3963 }
3964 
3965 #if !NO_MALLINFO
3966 struct dlmallinfo dlmallinfo(void) {
3967  return internal_mallinfo(gm);
3968 }
3969 #endif /* NO_MALLINFO */
3970 
3971 #if !NO_MALLOC_STATS
3974 }
3975 #endif /* NO_MALLOC_STATS */
3976 
3977 int dlmallopt(int param_number, int value) {
3978  return change_mparam(param_number, value);
3979 }
3980 
3981 size_t dlmalloc_usable_size(void* mem) {
3982  if (mem != 0) {
3983  mchunkptr p = mem2chunk(mem);
3984  if (is_inuse(p))
3985  return chunksize(p) - overhead_for(p);
3986  }
3987  return 0;
3988 }
3989 
3990 #endif /* !ONLY_MSPACES */
3991 
3992 /* ----------------------------- user mspaces ---------------------------- */
3993 
3994 #if MSPACES
3995 
3996 static mstate init_user_mstate(char* tbase, size_t tsize) {
3997  size_t msize = pad_request(sizeof(struct malloc_state));
3998  mchunkptr mn;
3999  mchunkptr msp = align_as_chunk(tbase);
4000  mstate m = (mstate)(chunk2mem(msp));
4001  memset(m, 0, msize);
4002  (void)INITIAL_LOCK(&m->mutex);
4003  msp->head = (msize|INUSE_BITS);
4004  m->seg.base = m->least_addr = tbase;
4005  m->seg.size = m->footprint = m->max_footprint = tsize;
4006  m->magic = mparams.magic;
4009  m->extp = 0;
4010  m->exts = 0;
4011  disable_contiguous(m);
4012  init_bins(m);
4013  mn = next_chunk(mem2chunk(m));
4014  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4015  check_top_chunk(m, m->top);
4016  return m;
4017 }
4018 
4019 mspace create_mspace(size_t capacity, int locked) {
4020  mstate m = 0;
4021  size_t msize;
4023  msize = pad_request(sizeof(struct malloc_state));
4024  if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4025  size_t rs = ((capacity == 0)? mparams.granularity :
4026  (capacity + TOP_FOOT_SIZE + msize));
4027  size_t tsize = granularity_align(rs);
4028  char* tbase = (char*)(CALL_MMAP(tsize));
4029  if (tbase != CMFAIL) {
4030  m = init_user_mstate(tbase, tsize);
4031  m->seg.sflags = USE_MMAP_BIT;
4032  set_lock(m, locked);
4033  }
4034  }
4035  return (mspace)m;
4036 }
4037 
4038 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4039  mstate m = 0;
4040  size_t msize;
4042  msize = pad_request(sizeof(struct malloc_state));
4043  if (capacity > msize + TOP_FOOT_SIZE &&
4044  capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4045  m = init_user_mstate((char*)base, capacity);
4046  m->seg.sflags = EXTERN_BIT;
4047  set_lock(m, locked);
4048  }
4049  return (mspace)m;
4050 }
4051 
4052 int mspace_track_large_chunks(mspace msp, int enable) {
4053  int ret = 0;
4054  mstate ms = (mstate)msp;
4055  if (!PREACTION(ms)) {
4056  if (!use_mmap(ms)) {
4057  ret = 1;
4058  }
4059  if (!enable) {
4060  enable_mmap(ms);
4061  } else {
4062  disable_mmap(ms);
4063  }
4064  POSTACTION(ms);
4065  }
4066  return ret;
4067 }
4068 
4069 size_t destroy_mspace(mspace msp) {
4070  size_t freed = 0;
4071  mstate ms = (mstate)msp;
4072  if (ok_magic(ms)) {
4073  msegmentptr sp = &ms->seg;
4074  (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
4075  while (sp != 0) {
4076  char* base = sp->base;
4077  size_t size = sp->size;
4078  flag_t flag = sp->sflags;
4079  (void)base; /* placate people compiling -Wunused-variable */
4080  sp = sp->next;
4081  if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4082  CALL_MUNMAP(base, size) == 0)
4083  freed += size;
4084  }
4085  }
4086  else {
4087  USAGE_ERROR_ACTION(ms,ms);
4088  }
4089  return freed;
4090 }
4091 
4092 void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
4093 {
4094  mstate ms;
4095  msegment *this_seg;
4096 
4097  ms = (mstate)msp;
4098  this_seg = &ms->seg;
4099 
4100  *addrp = this_seg->base;
4101  *sizep = this_seg->size;
4102 }
4103 
4104 int mspace_is_heap_object (mspace msp, void *p)
4105 {
4106  msegment *this_seg;
4107  char *pp, *base;
4108  mstate ms;
4109 
4110  ms = (mstate)msp;
4111 
4112  this_seg = &ms->seg;
4113  pp = (char *) p;
4114 
4115  while (this_seg)
4116  {
4117  base = this_seg->base;
4118  if (pp >= base && pp < (base + this_seg->size))
4119  return 1;
4120  this_seg = this_seg->next;
4121  }
4122 
4123  if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint)
4124  return 1;
4125 
4126  return 0;
4127 }
4128 
4129 void *mspace_least_addr (mspace msp)
4130 {
4131  mstate ms = (mstate) msp;
4132  return (void *) ms->least_addr;
4133 }
4134 
4135 void mspace_disable_expand (mspace msp)
4136 {
4137  mstate ms = (mstate)msp;
4138 
4139  disable_expand (ms);
4140 }
4141 
4142 int mspace_enable_disable_trace (mspace msp, int enable)
4143 {
4144  mstate ms = (mstate)msp;
4145  int was_enabled = 0;
4146 
4147  if (use_trace(ms))
4148  was_enabled = 1;
4149 
4150  if (enable)
4151  enable_trace (ms);
4152  else
4153  disable_trace (ms);
4154 
4155  return (was_enabled);
4156 }
4157 
4158 int mspace_is_traced (mspace msp)
4159 {
4160  mstate ms = (mstate)msp;
4161 
4162  if (use_trace(ms))
4163  return 1;
4164  return 0;
4165 }
4166 
4167 void* mspace_get_aligned (mspace msp,
4168  unsigned long n_user_data_bytes,
4169  unsigned long align,
4170  unsigned long align_offset) {
4171  char *rv;
4172  unsigned long searchp;
4173  unsigned *wwp; /* "where's Waldo" pointer */
4174  mstate ms = (mstate)msp;
4175 
4176  /*
4177  * Allocate space for the "Where's Waldo?" pointer
4178  * the base of the dlmalloc object
4179  */
4180  n_user_data_bytes += sizeof(unsigned);
4181 
4182  /*
4183  * Alignment requests less than the size of an mmx vector are ignored
4184  */
4185  if (align < sizeof (uword)) {
4186  rv = mspace_malloc (msp, n_user_data_bytes);
4187  if (rv == 0)
4188  return rv;
4189 
4190  if (use_trace(ms)) {
4191  mchunkptr p = mem2chunk(rv);
4192  size_t psize = chunksize(p);
4193 
4194  mheap_get_trace ((unsigned long)rv + sizeof (unsigned), psize);
4195  }
4196 
4197  wwp = (unsigned *)rv;
4198  *wwp = 0;
4199  rv += sizeof (unsigned);
4200 
4201  return rv;
4202  }
4203 
4204  /*
4205  * Alignment requests greater than 4K must be at offset zero,
4206  * and must be freed using mspace_free_no_offset - or never freed -
4207  * since the "Where's Waldo?" pointer would waste too much space.
4208  *
4209  * Waldo is the address of the chunk of memory returned by mspace_malloc,
4210  * which we need later to call mspace_free...
4211  */
4212  if (align > 4<<10 || align_offset == ~0UL) {
4213  n_user_data_bytes -= sizeof(unsigned);
4214  assert(align_offset == 0);
4215  rv = internal_memalign(ms, (size_t)align, n_user_data_bytes);
4216 
4217  /* Trace the allocation */
4218  if (rv && use_trace(ms)) {
4219  mchunkptr p = mem2chunk(rv);
4220  size_t psize = chunksize(p);
4221  mheap_get_trace ((unsigned long)rv, psize);
4222  }
4223  return rv;
4224  }
4225 
4226  align = clib_max (align, MALLOC_ALIGNMENT);
4227  align = max_pow2 (align);
4228 
4229  /* Correct align offset to be smaller than alignment. */
4230  align_offset &= (align - 1);
4231 
4232  n_user_data_bytes += align;
4233  rv = mspace_malloc (msp, n_user_data_bytes);
4234 
4235  if (rv == 0)
4236  return rv;
4237 
4238  /* Honor the alignment request */
4239  searchp = (unsigned long)(rv + sizeof (unsigned));
4240 
4241 #if 0 /* this is the idea... */
4242  while ((searchp + align_offset) % align)
4243  searchp++;
4244 #endif
4245 
4246  {
4247  unsigned long where_now, delta;
4248 
4249  where_now = (searchp + align_offset) % align;
4250  delta = align - where_now;
4251 
4252  searchp += delta;
4253  }
4254 
4255  wwp = (unsigned *)(searchp - sizeof(unsigned));
4256  *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp)));
4257  assert (*wwp < align);
4258 
4259  if (use_trace(ms)) {
4260  mchunkptr p = mem2chunk(rv);
4261  size_t psize = chunksize(p);
4262  mheap_get_trace (searchp, psize);
4263  }
4264  return (void *) searchp;
4265 }
4266 
4267 void mspace_put (mspace msp, void *p_arg)
4268 {
4269  char *object_header;
4270  unsigned *wwp;
4271  mstate ms = (mstate)msp;
4272 
4273  /* Find the object header delta */
4274  wwp = (unsigned *)p_arg;
4275  wwp --;
4276 
4277  /* Recover the dlmalloc object pointer */
4278  object_header = (char *)wwp;
4279  object_header -= *wwp;
4280 
4281  /* Tracing (if enabled) */
4282  if (use_trace(ms))
4283  {
4284  mchunkptr p = mem2chunk(object_header);
4285  size_t psize = chunksize(p);
4286 
4287  mheap_put_trace ((unsigned long)p_arg, psize);
4288  }
4289 
4290 #if CLIB_DEBUG > 0
4291  /* Poison the object */
4292  {
4293  size_t psize = mspace_usable_size (object_header);
4294  memset (object_header, 0x13, psize);
4295  }
4296 #endif
4297 
4298  /* And free it... */
4299  mspace_free (msp, object_header);
4300 }
4301 
4302 void mspace_put_no_offset (mspace msp, void *p_arg)
4303 {
4304  mstate ms = (mstate)msp;
4305 
4306  if (use_trace(ms))
4307  {
4308  mchunkptr p = mem2chunk(p_arg);
4309  size_t psize = chunksize(p);
4310 
4311  mheap_put_trace ((unsigned long)p_arg, psize);
4312  }
4313  mspace_free (msp, p_arg);
4314 }
4315 
4316 size_t mspace_usable_size_with_delta (const void *p)
4317 {
4318  size_t usable_size;
4319  char *object_header;
4320  unsigned *wwp;
4321 
4322  /* Find the object header delta */
4323  wwp = (unsigned *)p;
4324  wwp --;
4325 
4326  /* Recover the dlmalloc object pointer */
4327  object_header = (char *)wwp;
4328  object_header -= *wwp;
4329 
4330  usable_size = mspace_usable_size (object_header);
4331  /* account for the offset and the size of the offset... */
4332  usable_size -= (*wwp + sizeof (*wwp));
4333  return usable_size;
4334 }
4335 
4336 /*
4337  mspace versions of routines are near-clones of the global
4338  versions. This is not so nice but better than the alternatives.
4339 */
4340 
4341 void* mspace_malloc(mspace msp, size_t bytes) {
4342  mstate ms = (mstate)msp;
4343  if (!ok_magic(ms)) {
4344  USAGE_ERROR_ACTION(ms,ms);
4345  return 0;
4346  }
4347  if (!PREACTION(ms)) {
4348  void* mem;
4349  size_t nb;
4350  if (bytes <= MAX_SMALL_REQUEST) {
4351  bindex_t idx;
4352  binmap_t smallbits;
4353  nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4354  idx = small_index(nb);
4355  smallbits = ms->smallmap >> idx;
4356 
4357  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4358  mchunkptr b, p;
4359  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4360  b = smallbin_at(ms, idx);
4361  p = b->fd;
4362  assert(chunksize(p) == small_index2size(idx));
4363  unlink_first_small_chunk(ms, b, p, idx);
4365  mem = chunk2mem(p);
4366  check_malloced_chunk(ms, mem, nb);
4367  goto postaction;
4368  }
4369 
4370  else if (nb > ms->dvsize) {
4371  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4372  mchunkptr b, p, r;
4373  size_t rsize;
4374  bindex_t i;
4375  binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4376  binmap_t leastbit = least_bit(leftbits);
4377  compute_bit2idx(leastbit, i);
4378  b = smallbin_at(ms, i);
4379  p = b->fd;
4380  assert(chunksize(p) == small_index2size(i));
4381  unlink_first_small_chunk(ms, b, p, i);
4382  rsize = small_index2size(i) - nb;
4383  /* Fit here cannot be remainderless if 4byte sizes */
4384  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4386  else {
4388  r = chunk_plus_offset(p, nb);
4390  replace_dv(ms, r, rsize);
4391  }
4392  mem = chunk2mem(p);
4393  check_malloced_chunk(ms, mem, nb);
4394  goto postaction;
4395  }
4396 
4397  else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4398  check_malloced_chunk(ms, mem, nb);
4399  goto postaction;
4400  }
4401  }
4402  }
4403  else if (bytes >= MAX_REQUEST)
4404  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4405  else {
4406  nb = pad_request(bytes);
4407  if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4408  check_malloced_chunk(ms, mem, nb);
4409  goto postaction;
4410  }
4411  }
4412 
4413  if (nb <= ms->dvsize) {
4414  size_t rsize = ms->dvsize - nb;
4415  mchunkptr p = ms->dv;
4416  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4417  mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4418  ms->dvsize = rsize;
4421  }
4422  else { /* exhaust dv */
4423  size_t dvs = ms->dvsize;
4424  ms->dvsize = 0;
4425  ms->dv = 0;
4426  set_inuse_and_pinuse(ms, p, dvs);
4427  }
4428  mem = chunk2mem(p);
4429  check_malloced_chunk(ms, mem, nb);
4430  goto postaction;
4431  }
4432 
4433  else if (nb < ms->topsize) { /* Split top */
4434  size_t rsize = ms->topsize -= nb;
4435  mchunkptr p = ms->top;
4436  mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4437  r->head = rsize | PINUSE_BIT;
4439  mem = chunk2mem(p);
4440  check_top_chunk(ms, ms->top);
4441  check_malloced_chunk(ms, mem, nb);
4442  goto postaction;
4443  }
4444 
4445  mem = sys_alloc(ms, nb);
4446 
4447  postaction:
4448  POSTACTION(ms);
4449  return mem;
4450  }
4451 
4452  return 0;
4453 }
4454 
4455 void mspace_free(mspace msp, void* mem) {
4456  if (mem != 0) {
4457  mchunkptr p = mem2chunk(mem);
4458 #if FOOTERS
4459  mstate fm = get_mstate_for(p);
4460  (void)msp; /* placate people compiling -Wunused */
4461 #else /* FOOTERS */
4462  mstate fm = (mstate)msp;
4463 #endif /* FOOTERS */
4464  if (!ok_magic(fm)) {
4465  USAGE_ERROR_ACTION(fm, p);
4466  return;
4467  }
4468  if (!PREACTION(fm)) {
4469  check_inuse_chunk(fm, p);
4470  if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4471  size_t psize = chunksize(p);
4472  mchunkptr next = chunk_plus_offset(p, psize);
4473  if (!pinuse(p)) {
4474  size_t prevsize = p->prev_foot;
4475  if (is_mmapped(p)) {
4476  psize += prevsize + MMAP_FOOT_PAD;
4477  if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4478  fm->footprint -= psize;
4479  goto postaction;
4480  }
4481  else {
4482  mchunkptr prev = chunk_minus_offset(p, prevsize);
4483  psize += prevsize;
4484  p = prev;
4485  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4486  if (p != fm->dv) {
4487  unlink_chunk(fm, p, prevsize);
4488  }
4489  else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4490  fm->dvsize = psize;
4491  set_free_with_pinuse(p, psize, next);
4492  goto postaction;
4493  }
4494  }
4495  else
4496  goto erroraction;
4497  }
4498  }
4499 
4500  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4501  if (!cinuse(next)) { /* consolidate forward */
4502  if (next == fm->top) {
4503  size_t tsize = fm->topsize += psize;
4504  fm->top = p;
4505  p->head = tsize | PINUSE_BIT;
4506  if (p == fm->dv) {
4507  fm->dv = 0;
4508  fm->dvsize = 0;
4509  }
4510  if (should_trim(fm, tsize))
4511  sys_trim(fm, 0);
4512  goto postaction;
4513  }
4514  else if (next == fm->dv) {
4515  size_t dsize = fm->dvsize += psize;
4516  fm->dv = p;
4518  goto postaction;
4519  }
4520  else {
4521  size_t nsize = chunksize(next);
4522  psize += nsize;
4523  unlink_chunk(fm, next, nsize);
4525  if (p == fm->dv) {
4526  fm->dvsize = psize;
4527  goto postaction;
4528  }
4529  }
4530  }
4531  else
4532  set_free_with_pinuse(p, psize, next);
4533 
4534  if (is_small(psize)) {
4535  insert_small_chunk(fm, p, psize);
4536  check_free_chunk(fm, p);
4537  }
4538  else {
4539  tchunkptr tp = (tchunkptr)p;
4540  insert_large_chunk(fm, tp, psize);
4541  check_free_chunk(fm, p);
4542  if (--fm->release_checks == 0)
4544  }
4545  goto postaction;
4546  }
4547  }
4548  erroraction:
4549  USAGE_ERROR_ACTION(fm, p);
4550  postaction:
4551  POSTACTION(fm);
4552  }
4553  }
4554 }
4555 
4556 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
4557  void* mem;
4558  size_t req = 0;
4559  mstate ms = (mstate)msp;
4560  if (!ok_magic(ms)) {
4561  USAGE_ERROR_ACTION(ms,ms);
4562  return 0;
4563  }
4564  if (n_elements != 0) {
4565  req = n_elements * elem_size;
4566  if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4567  (req / n_elements != elem_size))
4568  req = MAX_SIZE_T; /* force downstream failure on overflow */
4569  }
4570  mem = internal_malloc(ms, req);
4571  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4572  memset(mem, 0, req);
4573  return mem;
4574 }
4575 
4576 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
4577  void* mem = 0;
4578  if (oldmem == 0) {
4579  mem = mspace_malloc(msp, bytes);
4580  }
4581  else if (bytes >= MAX_REQUEST) {
4583  }
4584 #ifdef REALLOC_ZERO_BYTES_FREES
4585  else if (bytes == 0) {
4586  mspace_free(msp, oldmem);
4587  }
4588 #endif /* REALLOC_ZERO_BYTES_FREES */
4589  else {
4590  size_t nb = request2size(bytes);
4591  mchunkptr oldp = mem2chunk(oldmem);
4592 #if ! FOOTERS
4593  mstate m = (mstate)msp;
4594 #else /* FOOTERS */
4595  mstate m = get_mstate_for(oldp);
4596  if (!ok_magic(m)) {
4597  USAGE_ERROR_ACTION(m, oldmem);
4598  return 0;
4599  }
4600 #endif /* FOOTERS */
4601  if (!PREACTION(m)) {
4602  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4603  POSTACTION(m);
4604  if (newp != 0) {
4605  check_inuse_chunk(m, newp);
4606  mem = chunk2mem(newp);
4607  }
4608  else {
4609  mem = mspace_malloc(m, bytes);
4610  if (mem != 0) {
4611  size_t oc = chunksize(oldp) - overhead_for(oldp);
4612  memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4613  mspace_free(m, oldmem);
4614  }
4615  }
4616  }
4617  }
4618  return mem;
4619 }
4620 
4621 void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
4622  void* mem = 0;
4623  if (oldmem != 0) {
4624  if (bytes >= MAX_REQUEST) {
4626  }
4627  else {
4628  size_t nb = request2size(bytes);
4629  mchunkptr oldp = mem2chunk(oldmem);
4630 #if ! FOOTERS
4631  mstate m = (mstate)msp;
4632 #else /* FOOTERS */
4633  mstate m = get_mstate_for(oldp);
4634  (void)msp; /* placate people compiling -Wunused */
4635  if (!ok_magic(m)) {
4636  USAGE_ERROR_ACTION(m, oldmem);
4637  return 0;
4638  }
4639 #endif /* FOOTERS */
4640  if (!PREACTION(m)) {
4641  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4642  POSTACTION(m);
4643  if (newp == oldp) {
4644  check_inuse_chunk(m, newp);
4645  mem = oldmem;
4646  }
4647  }
4648  }
4649  }
4650  return mem;
4651 }
4652 
4653 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4654  mstate ms = (mstate)msp;
4655  if (!ok_magic(ms)) {
4656  USAGE_ERROR_ACTION(ms,ms);
4657  return 0;
4658  }
4659  if (alignment <= MALLOC_ALIGNMENT)
4660  return mspace_malloc(msp, bytes);
4661  return internal_memalign(ms, alignment, bytes);
4662 }
4663 
4664 void** mspace_independent_calloc(mspace msp, size_t n_elements,
4665  size_t elem_size, void* chunks[]) {
4666  size_t sz = elem_size; /* serves as 1-element array */
4667  mstate ms = (mstate)msp;
4668  if (!ok_magic(ms)) {
4669  USAGE_ERROR_ACTION(ms,ms);
4670  return 0;
4671  }
4672  return ialloc(ms, n_elements, &sz, 3, chunks);
4673 }
4674 
4675 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4676  size_t sizes[], void* chunks[]) {
4677  mstate ms = (mstate)msp;
4678  if (!ok_magic(ms)) {
4679  USAGE_ERROR_ACTION(ms,ms);
4680  return 0;
4681  }
4682  return ialloc(ms, n_elements, sizes, 0, chunks);
4683 }
4684 
4685 size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
4686  return internal_bulk_free((mstate)msp, array, nelem);
4687 }
4688 
4689 #if MALLOC_INSPECT_ALL
4690 void mspace_inspect_all(mspace msp,
4691  void(*handler)(void *start,
4692  void *end,
4693  size_t used_bytes,
4694  void* callback_arg),
4695  void* arg) {
4696  mstate ms = (mstate)msp;
4697  if (ok_magic(ms)) {
4698  if (!PREACTION(ms)) {
4699  internal_inspect_all(ms, handler, arg);
4700  POSTACTION(ms);
4701  }
4702  }
4703  else {
4704  USAGE_ERROR_ACTION(ms,ms);
4705  }
4706 }
4707 #endif /* MALLOC_INSPECT_ALL */
4708 
4709 int mspace_trim(mspace msp, size_t pad) {
4710  int result = 0;
4711  mstate ms = (mstate)msp;
4712  if (ok_magic(ms)) {
4713  if (!PREACTION(ms)) {
4714  result = sys_trim(ms, pad);
4715  POSTACTION(ms);
4716  }
4717  }
4718  else {
4719  USAGE_ERROR_ACTION(ms,ms);
4720  }
4721  return result;
4722 }
4723 
4724 #if !NO_MALLOC_STATS
4725 void mspace_malloc_stats(mspace msp) {
4726  mstate ms = (mstate)msp;
4727  if (ok_magic(ms)) {
4729  }
4730  else {
4731  USAGE_ERROR_ACTION(ms,ms);
4732  }
4733 }
4734 #endif /* NO_MALLOC_STATS */
4735 
4736 size_t mspace_footprint(mspace msp) {
4737  size_t result = 0;
4738  mstate ms = (mstate)msp;
4739  if (ok_magic(ms)) {
4740  result = ms->footprint;
4741  }
4742  else {
4743  USAGE_ERROR_ACTION(ms,ms);
4744  }
4745  return result;
4746 }
4747 
4748 size_t mspace_max_footprint(mspace msp) {
4749  size_t result = 0;
4750  mstate ms = (mstate)msp;
4751  if (ok_magic(ms)) {
4752  result = ms->max_footprint;
4753  }
4754  else {
4755  USAGE_ERROR_ACTION(ms,ms);
4756  }
4757  return result;
4758 }
4759 
4760 size_t mspace_footprint_limit(mspace msp) {
4761  size_t result = 0;
4762  mstate ms = (mstate)msp;
4763  if (ok_magic(ms)) {
4764  size_t maf = ms->footprint_limit;
4765  result = (maf == 0) ? MAX_SIZE_T : maf;
4766  }
4767  else {
4768  USAGE_ERROR_ACTION(ms,ms);
4769  }
4770  return result;
4771 }
4772 
4773 size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
4774  size_t result = 0;
4775  mstate ms = (mstate)msp;
4776  if (ok_magic(ms)) {
4777  if (bytes == 0)
4778  result = granularity_align(1); /* Use minimal size */
4779  if (bytes == MAX_SIZE_T)
4780  result = 0; /* disable */
4781  else
4782  result = granularity_align(bytes);
4783  ms->footprint_limit = result;
4784  }
4785  else {
4786  USAGE_ERROR_ACTION(ms,ms);
4787  }
4788  return result;
4789 }
4790 
4791 #if !NO_MALLINFO
4792 struct dlmallinfo mspace_mallinfo(mspace msp) {
4793  mstate ms = (mstate)msp;
4794  if (!ok_magic(ms)) {
4795  USAGE_ERROR_ACTION(ms,ms);
4796  }
4797  return internal_mallinfo(ms);
4798 }
4799 #endif /* NO_MALLINFO */
4800 
4801 size_t mspace_usable_size(const void* mem) {
4802  if (mem != 0) {
4803  mchunkptr p = mem2chunk(mem);
4804  if (is_inuse(p))
4805  return chunksize(p) - overhead_for(p);
4806  }
4807  return 0;
4808 }
4809 
4810 int mspace_mallopt(int param_number, int value) {
4811  return change_mparam(param_number, value);
4812 }
4813 
4814 #endif /* MSPACES */
4815 
4816 
4817 /* -------------------- Alternative MORECORE functions ------------------- */
4818 
4819 /*
4820  Guidelines for creating a custom version of MORECORE:
4821 
4822  * For best performance, MORECORE should allocate in multiples of pagesize.
4823  * MORECORE may allocate more memory than requested. (Or even less,
4824  but this will usually result in a malloc failure.)
4825  * MORECORE must not allocate memory when given argument zero, but
4826  instead return one past the end address of memory from previous
4827  nonzero call.
4828  * For best performance, consecutive calls to MORECORE with positive
4829  arguments should return increasing addresses, indicating that
4830  space has been contiguously extended.
4831  * Even though consecutive calls to MORECORE need not return contiguous
4832  addresses, it must be OK for malloc'ed chunks to span multiple
4833  regions in those cases where they do happen to be contiguous.
4834  * MORECORE need not handle negative arguments -- it may instead
4835  just return MFAIL when given negative arguments.
4836  Negative arguments are always multiples of pagesize. MORECORE
4837  must not misinterpret negative args as large positive unsigned
4838  args. You can suppress all such calls from even occurring by defining
4839  MORECORE_CANNOT_TRIM,
4840 
4841  As an example alternative MORECORE, here is a custom allocator
4842  kindly contributed for pre-OSX macOS. It uses virtually but not
4843  necessarily physically contiguous non-paged memory (locked in,
4844  present and won't get swapped out). You can use it by uncommenting
4845  this section, adding some #includes, and setting up the appropriate
4846  defines above:
4847 
4848  #define MORECORE osMoreCore
4849 
4850  There is also a shutdown routine that should somehow be called for
4851  cleanup upon program exit.
4852 
4853  #define MAX_POOL_ENTRIES 100
4854  #define MINIMUM_MORECORE_SIZE (64 * 1024U)
4855  static int next_os_pool;
4856  void *our_os_pools[MAX_POOL_ENTRIES];
4857 
4858  void *osMoreCore(int size)
4859  {
4860  void *ptr = 0;
4861  static void *sbrk_top = 0;
4862 
4863  if (size > 0)
4864  {
4865  if (size < MINIMUM_MORECORE_SIZE)
4866  size = MINIMUM_MORECORE_SIZE;
4867  if (CurrentExecutionLevel() == kTaskLevel)
4868  ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4869  if (ptr == 0)
4870  {
4871  return (void *) MFAIL;
4872  }
4873  // save ptrs so they can be freed during cleanup
4874  our_os_pools[next_os_pool] = ptr;
4875  next_os_pool++;
4876  ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4877  sbrk_top = (char *) ptr + size;
4878  return ptr;
4879  }
4880  else if (size < 0)
4881  {
4882  // we don't currently support shrink behavior
4883  return (void *) MFAIL;
4884  }
4885  else
4886  {
4887  return sbrk_top;
4888  }
4889  }
4890 
4891  // cleanup any allocated memory pools
4892  // called as last thing before shutting down driver
4893 
4894  void osCleanupMem(void)
4895  {
4896  void **ptr;
4897 
4898  for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4899  if (*ptr)
4900  {
4901  PoolDeallocate(*ptr);
4902  *ptr = 0;
4903  }
4904  }
4905 
4906 */
4907 
4908 
4909 /* -----------------------------------------------------------------------
4910 History:
4911  v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
4912  * fix bad comparison in dlposix_memalign
4913  * don't reuse adjusted asize in sys_alloc
4914  * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
4915  * reduce compiler warnings -- thanks to all who reported/suggested these
4916 
4917  v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
4918  * Always perform unlink checks unless INSECURE
4919  * Add posix_memalign.
4920  * Improve realloc to expand in more cases; expose realloc_in_place.
4921  Thanks to Peter Buhr for the suggestion.
4922  * Add footprint_limit, inspect_all, bulk_free. Thanks
4923  to Barry Hayes and others for the suggestions.
4924  * Internal refactorings to avoid calls while holding locks
4925  * Use non-reentrant locks by default. Thanks to Roland McGrath
4926  for the suggestion.
4927  * Small fixes to mspace_destroy, reset_on_error.
4928  * Various configuration extensions/changes. Thanks
4929  to all who contributed these.
4930 
4931  V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
4932  * Update Creative Commons URL
4933 
4934  V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
4935  * Use zeros instead of prev foot for is_mmapped
4936  * Add mspace_track_large_chunks; thanks to Jean Brouwers
4937  * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
4938  * Fix insufficient sys_alloc padding when using 16byte alignment
4939  * Fix bad error check in mspace_footprint
4940  * Adaptations for ptmalloc; thanks to Wolfram Gloger.
4941  * Reentrant spin locks; thanks to Earl Chew and others
4942  * Win32 improvements; thanks to Niall Douglas and Earl Chew
4943  * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
4944  * Extension hook in malloc_state
4945  * Various small adjustments to reduce warnings on some compilers
4946  * Various configuration extensions/changes for more platforms. Thanks
4947  to all who contributed these.
4948 
4949  V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
4950  * Add max_footprint functions
4951  * Ensure all appropriate literals are size_t
4952  * Fix conditional compilation problem for some #define settings
4953  * Avoid concatenating segments with the one provided
4954  in create_mspace_with_base
4955  * Rename some variables to avoid compiler shadowing warnings
4956  * Use explicit lock initialization.
4957  * Better handling of sbrk interference.
4958  * Simplify and fix segment insertion, trimming and mspace_destroy
4959  * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
4960  * Thanks especially to Dennis Flanagan for help on these.
4961 
4962  V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
4963  * Fix memalign brace error.
4964 
4965  V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
4966  * Fix improper #endif nesting in C++
4967  * Add explicit casts needed for C++
4968 
4969  V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
4970  * Use trees for large bins
4971  * Support mspaces
4972  * Use segments to unify sbrk-based and mmap-based system allocation,
4973  removing need for emulation on most platforms without sbrk.
4974  * Default safety checks
4975  * Optional footer checks. Thanks to William Robertson for the idea.
4976  * Internal code refactoring
4977  * Incorporate suggestions and platform-specific changes.
4978  Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
4979  Aaron Bachmann, Emery Berger, and others.
4980  * Speed up non-fastbin processing enough to remove fastbins.
4981  * Remove useless cfree() to avoid conflicts with other apps.
4982  * Remove internal memcpy, memset. Compilers handle builtins better.
4983  * Remove some options that no one ever used and rename others.
4984 
4985  V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4986  * Fix malloc_state bitmap array misdeclaration
4987 
4988  V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
4989  * Allow tuning of FIRST_SORTED_BIN_SIZE
4990  * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
4991  * Better detection and support for non-contiguousness of MORECORE.
4992  Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
4993  * Bypass most of malloc if no frees. Thanks To Emery Berger.
4994  * Fix freeing of old top non-contiguous chunk im sysmalloc.
4995  * Raised default trim and map thresholds to 256K.
4996  * Fix mmap-related #defines. Thanks to Lubos Lunak.
4997  * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
4998  * Branch-free bin calculation
4999  * Default trim and mmap thresholds now 256K.
5000 
5001  V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5002  * Introduce independent_comalloc and independent_calloc.
5003  Thanks to Michael Pachos for motivation and help.
5004  * Make optional .h file available
5005  * Allow > 2GB requests on 32bit systems.
5006  * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5007  Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5008  and Anonymous.
5009  * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5010  helping test this.)
5011  * memalign: check alignment arg
5012  * realloc: don't try to shift chunks backwards, since this
5013  leads to more fragmentation in some programs and doesn't
5014  seem to help in any others.
5015  * Collect all cases in malloc requiring system memory into sysmalloc
5016  * Use mmap as backup to sbrk
5017  * Place all internal state in malloc_state
5018  * Introduce fastbins (although similar to 2.5.1)
5019  * Many minor tunings and cosmetic improvements
5020  * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5021  * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5022  Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5023  * Include errno.h to support default failure action.
5024 
5025  V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5026  * return null for negative arguments
5027  * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5028  * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5029  (e.g. WIN32 platforms)
5030  * Cleanup header file inclusion for WIN32 platforms
5031  * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5032  * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5033  memory allocation routines
5034  * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5035  * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5036  usage of 'assert' in non-WIN32 code
5037  * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5038  avoid infinite loop
5039  * Always call 'fREe()' rather than 'free()'
5040 
5041  V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5042  * Fixed ordering problem with boundary-stamping
5043 
5044  V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5045  * Added pvalloc, as recommended by H.J. Liu
5046  * Added 64bit pointer support mainly from Wolfram Gloger
5047  * Added anonymously donated WIN32 sbrk emulation
5048  * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5049  * malloc_extend_top: fix mask error that caused wastage after
5050  foreign sbrks
5051  * Add linux mremap support code from HJ Liu
5052 
5053  V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5054  * Integrated most documentation with the code.
5055  * Add support for mmap, with help from
5056  Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5057  * Use last_remainder in more cases.
5058  * Pack bins using idea from colin@nyx10.cs.du.edu
5059  * Use ordered bins instead of best-fit threshold
5060  * Eliminate block-local decls to simplify tracing and debugging.
5061  * Support another case of realloc via move into top
5062  * Fix error occurring when initial sbrk_base not word-aligned.
5063  * Rely on page size for units instead of SBRK_UNIT to
5064  avoid surprises about sbrk alignment conventions.
5065  * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5066  (raymond@es.ele.tue.nl) for the suggestion.
5067  * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5068  * More precautions for cases where other routines call sbrk,
5069  courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5070  * Added macros etc., allowing use in linux libc from
5071  H.J. Lu (hjl@gnu.ai.mit.edu)
5072  * Inverted this history list
5073 
5074  V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5075  * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5076  * Removed all preallocation code since under current scheme
5077  the work required to undo bad preallocations exceeds
5078  the work saved in good cases for most test programs.
5079  * No longer use return list or unconsolidated bins since
5080  no scheme using them consistently outperforms those that don't
5081  given above changes.
5082  * Use best fit for very large chunks to prevent some worst-cases.
5083  * Added some support for debugging
5084 
5085  V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5086  * Removed footers when chunks are in use. Thanks to
5087  Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5088 
5089  V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5090  * Added malloc_trim, with help from Wolfram Gloger
5091  (wmglo@Dent.MED.Uni-Muenchen.DE).
5092 
5093  V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5094 
5095  V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5096  * realloc: try to expand in both directions
5097  * malloc: swap order of clean-bin strategy;
5098  * realloc: only conditionally expand backwards
5099  * Try not to scavenge used bins
5100  * Use bin counts as a guide to preallocation
5101  * Occasionally bin return list chunks in first scan
5102  * Add a few optimizations from colin@nyx10.cs.du.edu
5103 
5104  V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5105  * faster bin computation & slightly different binning
5106  * merged all consolidations to one part of malloc proper
5107  (eliminating old malloc_find_space & malloc_clean_bin)
5108  * Scan 2 returns chunks (not just 1)
5109  * Propagate failure in realloc if malloc returns 0
5110  * Add stuff to allow compilation on non-ANSI compilers
5111  from kpv@research.att.com
5112 
5113  V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5114  * removed potential for odd address access in prev_chunk
5115  * removed dependency on getpagesize.h
5116  * misc cosmetics and a bit more internal documentation
5117  * anticosmetics: mangled names in macros to evade debugger strangeness
5118  * tested on sparc, hp-700, dec-mips, rs6000
5119  with gcc & native cc (hp, dec only) allowing
5120  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5121 
5122  Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5123  * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5124  structure of old version, but most details differ.)
5125 
5126 */
#define segment_holds(S, A)
Definition: dlmalloc.c:1283
size_t dlmalloc_footprint(void)
Definition: dlmalloc.c:3941
#define TOP_FOOT_SIZE
Definition: dlmalloc.c:1319
#define RTCHECK(e)
Definition: dlmalloc.c:1629
void ** dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
Definition: dlmalloc.c:3902
flag_t default_mflags
Definition: dlmalloc.c:1205
#define USE_LOCK_BIT
Definition: dlmalloc.c:389
#define chunk_plus_offset(p, s)
Definition: dlmalloc.c:853
DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad)
u8 pad[3]
log2 (size of the packing page block)
Definition: bihash_doc.h:61
DLMALLOC_EXPORT void * mspace_malloc(mspace msp, size_t bytes)
u32 flags
Definition: vhost_user.h:141
void * dlmemalign(size_t alignment, size_t bytes)
Definition: dlmalloc.c:3858
#define CALL_MMAP(s)
Definition: dlmalloc.c:328
mchunkptr top
Definition: dlmalloc.c:1170
#define cinuse(p)
Definition: dlmalloc.c:840
#define enable_mmap(M)
Definition: dlmalloc.c:1237
a
Definition: bitmap.h:538
size_t footprint_limit
Definition: dlmalloc.c:1178
DLMALLOC_EXPORT void ** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
#define DESTROY_LOCK(l)
Definition: dlmalloc.c:391
#define treemap_is_marked(M, i)
Definition: dlmalloc.c:1513
#define MIN_LARGE_SIZE
Definition: dlmalloc.c:1159
#define smallmap_is_marked(M, i)
Definition: dlmalloc.c:1509
static size_t release_unused_segments(mstate m)
Definition: dlmalloc.c:2845
struct malloc_chunk * bk
Definition: dlmalloc.c:772
static int sys_trim(mstate m, size_t pad)
Definition: dlmalloc.c:2892
Optimized string handling code, including c11-compliant "safe C library" variants.
static struct malloc_params mparams
Definition: dlmalloc.c:1208
#define DEFAULT_MMAP_THRESHOLD
Definition: dlmalloc.h:723
#define is_mmapped_segment(S)
Definition: dlmalloc.c:1060
struct malloc_segment * msegmentptr
Definition: dlmalloc.c:1064
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags)
Definition: dlmalloc.c:2452
#define MALLOC_FAILURE_ACTION
Definition: dlmalloc.h:669
#define insert_large_chunk(M, X, S)
Definition: dlmalloc.c:2242
#define insert_chunk(M, P, S)
Definition: dlmalloc.c:2383
#define NTREEBINS
Definition: dlmalloc.c:1155
#define is_initialized(M)
Definition: dlmalloc.c:1222
#define NO_SEGMENT_TRAVERSAL
Definition: dlmalloc.h:751
struct malloc_tree_chunk * parent
Definition: dlmalloc.c:985
size_t dlbulk_free(void *array[], size_t nelem)
Definition: dlmalloc.c:3913
#define EINVAL
Definition: string.h:93
#define chunk2mem(p)
Definition: dlmalloc.c:802
DLMALLOC_EXPORT void * mspace_realloc(mspace msp, void *mem, size_t newsize)
#define is_global(M)
Definition: dlmalloc.c:1218
#define is_page_aligned(S)
Definition: dlmalloc.c:1277
#define DLM_MAGIC_CONSTANT
Definition: dlmalloc.h:531
#define DLM_ABORT
Definition: dlmalloc.h:534
#define M_TRIM_THRESHOLD
Definition: dlmalloc.h:761
int i
void * dlpvalloc(size_t bytes)
Definition: dlmalloc.c:3895
#define is_aligned(A)
Definition: dlmalloc.c:196
unsigned int binmap_t
Definition: dlmalloc.c:779
size_t trim_check
Definition: dlmalloc.c:1171
size_t release_checks
Definition: dlmalloc.c:1172
#define disable_trace(M)
Definition: dlmalloc.c:1250
#define malloc_getpagesize
Definition: dlmalloc.c:136
#define ok_magic(M)
Definition: dlmalloc.c:1621
#define mem2chunk(mem)
Definition: dlmalloc.c:803
#define internal_malloc(m, b)
Definition: dlmalloc.c:2404
#define leftmost_child(t)
Definition: dlmalloc.c:994
MALLINFO_FIELD_TYPE hblkhd
Definition: dlmalloc.h:804
#define replace_dv(M, P, S)
Definition: dlmalloc.c:2228
#define MAX_SIZE_T
Definition: dlmalloc.h:600
static void init_top(mstate m, mchunkptr p, size_t psize)
Definition: dlmalloc.c:2490
vhost_vring_addr_t addr
Definition: vhost_user.h:147
#define USE_MMAP_BIT
Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP.
Definition: dlmalloc.c:322
#define MIN_REQUEST
Definition: dlmalloc.c:809
#define FENCEPOST_HEAD
Definition: dlmalloc.c:837
#define unlink_chunk(M, P, S)
Definition: dlmalloc.c:2387
#define unlink_large_chunk(M, X)
Definition: dlmalloc.c:2310
#define pad_request(req)
Definition: dlmalloc.c:812
#define MFAIL
Definition: dlmalloc.c:213
struct malloc_state * mstate
Definition: dlmalloc.c:1188
#define fm
flag_t sflags
Definition: dlmalloc.c:1057
void dlmalloc_stats()
Definition: dlmalloc.c:3972
#define assert(x)
Definition: dlmalloc.c:30
unsigned int flag_t
Definition: dlmalloc.c:780
#define INITIAL_LOCK(l)
Definition: dlmalloc.c:390
DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, int locked)
size_t max_footprint
Definition: dlmalloc.c:1177
DLMALLOC_EXPORT int mspace_is_heap_object(mspace msp, void *p)
#define small_index2size(i)
Definition: dlmalloc.c:1415
MALLINFO_FIELD_TYPE uordblks
Definition: dlmalloc.h:807
#define leftshift_for_tree_index(i)
Definition: dlmalloc.c:1491
#define CALL_DIRECT_MMAP(s)
Definition: dlmalloc.c:327
#define check_mmapped_chunk(M, P)
Definition: dlmalloc.c:1385
DLMALLOC_EXPORT void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
static void dispose_chunk(mstate m, mchunkptr p, size_t psize)
Definition: dlmalloc.c:2960
struct malloc_chunk * sbinptr
Definition: dlmalloc.c:777
#define idx2bit(i)
Definition: dlmalloc.c:1504
#define NSMALLBINS
Definition: dlmalloc.c:1154
#define check_malloc_state(M)
Definition: dlmalloc.c:1386
size_t exts
Definition: dlmalloc.c:1185
MALLINFO_FIELD_TYPE arena
Definition: dlmalloc.h:800
#define DEFAULT_GRANULARITY
Definition: dlmalloc.h:690
#define CHUNK_ALIGN_MASK
Definition: dlmalloc.c:193
#define clear_pinuse(p)
Definition: dlmalloc.c:848
msegment seg
Definition: dlmalloc.c:1183
struct malloc_tree_chunk * fd
Definition: dlmalloc.c:981
static void * sys_alloc(mstate m, size_t nb)
Definition: dlmalloc.c:2631
#define unlink_first_small_chunk(M, B, P, I)
Definition: dlmalloc.c:2209
#define check_malloced_chunk(M, P, N)
Definition: dlmalloc.c:1384
size_t dlmalloc_usable_size(void *mem)
Definition: dlmalloc.c:3981
#define enable_trace(M)
Definition: dlmalloc.c:1249
#define check_top_chunk(M, P)
Definition: dlmalloc.c:1387
size_t head
Definition: dlmalloc.c:770
DLMALLOC_EXPORT void mspace_put_no_offset(mspace msp, void *p)
void * extp
Definition: dlmalloc.c:1184
#define M_GRANULARITY
Definition: dlmalloc.h:762
#define internal_free(m, mem)
Definition: dlmalloc.c:2405
MALLINFO_FIELD_TYPE ordblks
Definition: dlmalloc.h:801
#define disable_expand(M)
Definition: dlmalloc.c:1247
static void init_bins(mstate m)
Definition: dlmalloc.c:2505
#define mmap_align(S)
Definition: dlmalloc.c:1271
void * dlmalloc(size_t bytes)
Definition: dlmalloc.c:3141
static void internal_malloc_stats(mstate m)
Definition: dlmalloc.c:2122
void * dlvalloc(size_t bytes)
Definition: dlmalloc.c:3888
size_t prev_foot
Definition: dlmalloc.c:979
int dlmalloc_trim(size_t pad)
Definition: dlmalloc.c:3931
struct malloc_segment * next
Definition: dlmalloc.c:1056
DLMALLOC_EXPORT void * mspace_memalign(mspace msp, size_t alignment, size_t bytes)
#define insert_small_chunk(M, P, S)
Definition: dlmalloc.c:2164
size_t magic
Definition: dlmalloc.c:1173
#define POSTACTION(M)
Definition: dlmalloc.c:1341
#define ok_inuse(p)
Definition: dlmalloc.c:1602
DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp)
#define SIZE_T_SIZE
Definition: dlmalloc.c:178
uword size
struct malloc_chunk * mchunkptr
Definition: dlmalloc.c:776
size_t prev_foot
Definition: dlmalloc.c:769
DLMALLOC_EXPORT void mheap_put_trace(uword offset, uword size)
Definition: mem_dlmalloc.c:150
static msegmentptr segment_holding(mstate m, char *addr)
Definition: dlmalloc.c:1287
#define gm
Definition: dlmalloc.c:1217
#define is_inuse(p)
Definition: dlmalloc.c:843
MALLINFO_FIELD_TYPE usmblks
Definition: dlmalloc.h:805
DLMALLOC_EXPORT void mspace_disable_expand(mspace msp)
#define left_bits(x)
Definition: dlmalloc.c:1519
#define compute_tree_index(S, I)
Definition: dlmalloc.c:1467
#define DEFAULT_TRIM_THRESHOLD
Definition: dlmalloc.h:695
#define MMAP_FOOT_PAD
Definition: dlmalloc.c:795
size_t trim_threshold
Definition: dlmalloc.c:1204
#define next_pinuse(p)
Definition: dlmalloc.c:861
size_t page_size
Definition: dlmalloc.c:1201
#define use_trace(M)
Definition: dlmalloc.c:1248
#define calloc_must_clear(p)
Definition: dlmalloc.c:883
#define MAX_SMALL_REQUEST
Definition: dlmalloc.c:1161
binmap_t smallmap
Definition: dlmalloc.c:1164
char * least_addr
Definition: dlmalloc.c:1168
#define USE_NONCONTIGUOUS_BIT
Definition: dlmalloc.c:346
static int init_mparams(void)
Definition: dlmalloc.c:1692
static void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
Definition: dlmalloc.c:2534
void * dlrealloc(void *oldmem, size_t bytes)
Definition: dlmalloc.c:3782
DLMALLOC_EXPORT int mspace_is_traced(mspace msp)
static void * tmalloc_small(mstate m, size_t nb)
Definition: dlmalloc.c:3102
u8 len
Definition: ip_types.api:90
static void * mmap_alloc(mstate m, size_t nb)
Definition: dlmalloc.c:2420
#define use_mmap(M)
Definition: dlmalloc.c:1236
struct malloc_tree_chunk * tbinptr
Definition: dlmalloc.c:991
size_t magic
Definition: dlmalloc.c:1200
struct malloc_chunk * fd
Definition: dlmalloc.c:771
svmdb_client_t * c
#define small_index(s)
Definition: dlmalloc.c:1414
DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked)
struct malloc_tree_chunk * tchunkptr
Definition: dlmalloc.c:990
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
Definition: dlmalloc.c:392
#define chunk_minus_offset(p, s)
Definition: dlmalloc.c:854
DLMALLOC_EXPORT void mspace_get_address_and_size(mspace msp, char **addrp, size_t *sizep)
DLMALLOC_EXPORT int mspace_mallopt(int, int)
DLMALLOC_EXPORT void ** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
size_t dlmalloc_max_footprint(void)
Definition: dlmalloc.c:3945
#define FORCEINLINE
Definition: dlmalloc.h:844
#define prev_chunk(p)
Definition: dlmalloc.c:858
#define SIZE_T_BITSIZE
Definition: dlmalloc.c:179
MALLINFO_FIELD_TYPE keepcost
Definition: dlmalloc.h:809
#define check_inuse_chunk(M, P)
Definition: dlmalloc.c:1383
#define CHUNK_OVERHEAD
Definition: dlmalloc.c:789
#define overhead_for(p)
Definition: dlmalloc.c:876
DLMALLOC_EXPORT int mspace_enable_disable_trace(mspace msp, int enable)
#define least_bit(x)
Definition: dlmalloc.c:1516
static uword max_pow2(uword x)
Definition: clib.h:227
#define set_lock(M, L)
Definition: dlmalloc.c:1252
binmap_t treemap
Definition: dlmalloc.c:1165
#define set_free_with_pinuse(p, s, n)
Definition: dlmalloc.c:872
static struct dlmallinfo internal_mallinfo(mstate m)
Definition: dlmalloc.c:2081
#define SYS_ALLOC_PADDING
Definition: dlmalloc.c:1275
unsigned int bindex_t
Definition: dlmalloc.c:778
#define set_inuse(M, p, s)
Definition: dlmalloc.c:1644
#define use_noncontiguous(M)
Definition: dlmalloc.c:1244
#define MAX_REQUEST
Definition: dlmalloc.c:808
#define MAX_RELEASE_CHECK_RATE
Definition: dlmalloc.h:730
#define HALF_MAX_SIZE_T
Definition: dlmalloc.c:190
u8 value
Definition: qos.api:53
#define align_offset(A)
Definition: dlmalloc.c:199
#define HAVE_MORECORE
Definition: dlmalloc.h:673
DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem)
#define PINUSE_BIT
Definition: dlmalloc.c:830
#define CALL_MUNMAP(a, s)
Definition: dlmalloc.c:329
#define USAGE_ERROR_ACTION(m, p)
Definition: dlmalloc.c:1372
#define MORECORE_CONTIGUOUS
Definition: dlmalloc.h:679
#define RELEASE_MALLOC_GLOBAL_LOCK()
Definition: dlmalloc.c:393
#define SIZE_T_ONE
Definition: dlmalloc.c:184
struct malloc_tree_chunk * bk
Definition: dlmalloc.c:982
#define INUSE_BITS
Definition: dlmalloc.c:833
#define set_size_and_pinuse_of_free_chunk(p, s)
Definition: dlmalloc.c:868
size_t topsize
Definition: dlmalloc.c:1167
#define check_free_chunk(M, P)
Definition: dlmalloc.c:1382
#define CORRUPTION_ERROR_ACTION(m)
Definition: dlmalloc.c:1368
size_t granularity
Definition: dlmalloc.c:1202
#define M_MMAP_THRESHOLD
Definition: dlmalloc.h:763
#define ensure_initialization()
Definition: dlmalloc.c:1211
#define SIX_SIZE_T_SIZES
Definition: dlmalloc.c:189
#define is_small(s)
Definition: dlmalloc.c:1413
#define disable_contiguous(M)
Definition: dlmalloc.c:1245
#define pinuse(p)
Definition: dlmalloc.c:841
#define mark_inuse_foot(M, p, s)
Definition: dlmalloc.c:1639
size_t dvsize
Definition: dlmalloc.c:1166
#define clib_max(x, y)
Definition: clib.h:295
struct malloc_tree_chunk * child[2]
Definition: dlmalloc.c:984
template key/value backing page structure
Definition: bihash_doc.h:44
DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem)
#define request2size(req)
Definition: dlmalloc.c:816
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
Definition: dlmalloc.c:3568
#define MIN_CHUNK_SIZE
Definition: dlmalloc.c:798
#define align_as_chunk(A)
Definition: dlmalloc.c:805
mchunkptr dv
Definition: dlmalloc.c:1169
size_t footprint
Definition: dlmalloc.c:1176
flag_t mflags
Definition: dlmalloc.c:1179
#define is_mmapped(p)
Definition: dlmalloc.c:844
u64 uword
Definition: types.h:112
void * dlrealloc_in_place(void *oldmem, size_t bytes)
Definition: dlmalloc.c:3827
DLMALLOC_EXPORT void * mspace_get_aligned(mspace msp, unsigned long n_user_data_bytes, unsigned long align, unsigned long align_offset)
#define smallbin_at(M, i)
Definition: dlmalloc.c:1419
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move)
Definition: dlmalloc.c:3408
bindex_t index
Definition: dlmalloc.c:986
#define next_chunk(p)
Definition: dlmalloc.c:857
#define CALL_MORECORE(S)
Define CALL_MORECORE.
Definition: dlmalloc.c:297
struct clib_bihash_value offset
template key/value backing page structure
#define ok_pinuse(p)
Definition: dlmalloc.c:1604
static void * tmalloc_large(mstate m, size_t nb)
Definition: dlmalloc.c:3031
void * mem
DLMALLOC_EXPORT size_t destroy_mspace(mspace msp)
#define HAVE_MMAP
Definition: dlmalloc.h:655
#define set_inuse_and_pinuse(M, p, s)
Definition: dlmalloc.c:1649
#define compute_bit2idx(X, I)
Definition: dlmalloc.c:1554
#define disable_mmap(M)
Definition: dlmalloc.c:1241
#define CALL_MREMAP(addr, osz, nsz, mv)
Define CALL_MREMAP.
Definition: dlmalloc.c:342
DLMALLOC_EXPORT size_t mspace_footprint(mspace msp)
#define CMFAIL
Definition: dlmalloc.c:214
#define ok_next(p, n)
Definition: dlmalloc.c:1600
DLMALLOC_EXPORT void mheap_get_trace(uword offset, uword size)
Definition: mem_dlmalloc.c:68
#define use_noexpand(M)
Definition: dlmalloc.c:1246
DLMALLOC_EXPORT void mspace_put(mspace msp, void *p)
#define is_extern_segment(S)
Definition: dlmalloc.c:1061
static void * internal_memalign(mstate m, size_t alignment, size_t bytes)
Definition: dlmalloc.c:3487
MALLINFO_FIELD_TYPE fordblks
Definition: dlmalloc.h:808
DLMALLOC_EXPORT size_t mspace_usable_size_with_delta(const void *p)
void ** dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
Definition: dlmalloc.c:3908
static void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
Definition: dlmalloc.c:2576
#define ok_address(M, a)
Definition: dlmalloc.c:1598
int dlmallopt(int param_number, int value)
Definition: dlmalloc.c:3977
size_t dlmalloc_set_footprint_limit(size_t bytes)
Definition: dlmalloc.c:3954
#define treebin_at(M, i)
Definition: dlmalloc.c:1420
DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable)
#define chunksize(p)
Definition: dlmalloc.c:846
#define minsize_for_tree_index(i)
Definition: dlmalloc.c:1496
#define FOUR_SIZE_T_SIZES
Definition: dlmalloc.c:188
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
Definition: dlmalloc.c:1654
#define granularity_align(S)
Definition: dlmalloc.c:1262
static int change_mparam(int param_number, int value)
Definition: dlmalloc.c:1786
int dlposix_memalign(void **pp, size_t alignment, size_t bytes)
Definition: dlmalloc.c:3865
DLMALLOC_EXPORT void * mspace_least_addr(mspace msp)
static int has_segment_link(mstate m, msegmentptr ss)
Definition: dlmalloc.c:1298
#define page_align(S)
Definition: dlmalloc.c:1258
size_t dlmalloc_footprint_limit(void)
Definition: dlmalloc.c:3949
static size_t internal_bulk_free(mstate m, void *array[], size_t nelem)
Definition: dlmalloc.c:3694
#define should_trim(M, s)
Definition: dlmalloc.c:1309
DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp)
#define MCHUNK_SIZE
Definition: dlmalloc.c:784
void * dlcalloc(size_t n_elements, size_t elem_size)
Definition: dlmalloc.c:3388
#define EXTERN_BIT
Definition: dlmalloc.c:355
void dlfree(void *mem)
Definition: dlmalloc.c:3279
#define MALLOC_ALIGNMENT
Definition: dlmalloc.h:633
#define PREACTION(M)
Definition: dlmalloc.c:1337
size_t mmap_threshold
Definition: dlmalloc.c:1203