FD.io VPP  v21.01.1
Vector Packet Processing
dlmalloc.c
Go to the documentation of this file.
1 /*
2  This is a version (aka dlmalloc) of malloc/free/realloc written by
3  Doug Lea and released to the public domain, as explained at
4  http://creativecommons.org/publicdomain/zero/1.0/ Send questions,
5  comments, complaints, performance data, etc to dl@cs.oswego.edu
6 */
7 
8 #include <vppinfra/dlmalloc.h>
9 #include <vppinfra/sanitizer.h>
10 
11 /*------------------------------ internal #includes ---------------------- */
12 
13 #ifdef _MSC_VER
14 #pragma warning( disable : 4146 ) /* no "unsigned" warnings */
15 #endif /* _MSC_VER */
16 #if !NO_MALLOC_STATS
17 #include <stdio.h> /* for printing in malloc_stats */
18 #endif /* NO_MALLOC_STATS */
19 #ifndef LACKS_ERRNO_H
20 #include <errno.h> /* for MALLOC_FAILURE_ACTION */
21 #endif /* LACKS_ERRNO_H */
22 #ifdef DEBUG
23 #if DLM_ABORT_ON_ASSERT_FAILURE
24 #undef assert
25 #define assert(x) if(!(x)) DLM_ABORT
26 #else /* DLM_ABORT_ON_ASSERT_FAILURE */
27 #include <assert.h>
28 #endif /* DLM_ABORT_ON_ASSERT_FAILURE */
29 #else /* DEBUG */
30 #ifndef assert
31 #define assert(x)
32 #endif
33 #define DEBUG 0
34 #endif /* DEBUG */
35 #if !defined(WIN32) && !defined(LACKS_TIME_H)
36 #include <time.h> /* for magic initialization */
37 #endif /* WIN32 */
38 #ifndef LACKS_STDLIB_H
39 #include <stdlib.h> /* for abort() */
40 #endif /* LACKS_STDLIB_H */
41 #ifndef LACKS_STRING_H
42 #include <string.h> /* for memset etc */
43 #endif /* LACKS_STRING_H */
44 #if USE_BUILTIN_FFS
45 #ifndef LACKS_STRINGS_H
46 #include <strings.h> /* for ffs */
47 #endif /* LACKS_STRINGS_H */
48 #endif /* USE_BUILTIN_FFS */
49 #if HAVE_MMAP
50 #ifndef LACKS_SYS_MMAN_H
51 /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */
52 #if (defined(linux) && !defined(__USE_GNU))
53 #define __USE_GNU 1
54 #include <sys/mman.h> /* for mmap */
55 #undef __USE_GNU
56 #else
57 #include <sys/mman.h> /* for mmap */
58 #endif /* linux */
59 #endif /* LACKS_SYS_MMAN_H */
60 #ifndef LACKS_FCNTL_H
61 #include <fcntl.h>
62 #endif /* LACKS_FCNTL_H */
63 #endif /* HAVE_MMAP */
64 #ifndef LACKS_UNISTD_H
65 #include <unistd.h> /* for sbrk, sysconf */
66 #else /* LACKS_UNISTD_H */
67 #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
68 extern void* sbrk(ptrdiff_t);
69 #endif /* FreeBSD etc */
70 #endif /* LACKS_UNISTD_H */
71 
72 /* Declarations for locking */
73 #if USE_LOCKS
74 #ifndef WIN32
75 #if defined (__SVR4) && defined (__sun) /* solaris */
76 #include <thread.h>
77 #elif !defined(LACKS_SCHED_H)
78 #include <sched.h>
79 #endif /* solaris or LACKS_SCHED_H */
80 #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS
81 #include <pthread.h>
82 #endif /* USE_RECURSIVE_LOCKS ... */
83 #elif defined(_MSC_VER)
84 #ifndef _M_AMD64
85 /* These are already defined on AMD64 builds */
86 #ifdef __cplusplus
87 extern "C" {
88 #endif /* __cplusplus */
89 LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp);
90 LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value);
91 #ifdef __cplusplus
92 }
93 #endif /* __cplusplus */
94 #endif /* _M_AMD64 */
95 #pragma intrinsic (_InterlockedCompareExchange)
96 #pragma intrinsic (_InterlockedExchange)
97 #define interlockedcompareexchange _InterlockedCompareExchange
98 #define interlockedexchange _InterlockedExchange
99 #elif defined(WIN32) && defined(__GNUC__)
100 #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b)
101 #define interlockedexchange __sync_lock_test_and_set
102 #endif /* Win32 */
103 #else /* USE_LOCKS */
104 #endif /* USE_LOCKS */
105 
106 #ifndef LOCK_AT_FORK
107 #define LOCK_AT_FORK 0
108 #endif
109 
110 /* Declarations for bit scanning on win32 */
111 #if defined(_MSC_VER) && _MSC_VER>=1300
112 #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */
113 #ifdef __cplusplus
114 extern "C" {
115 #endif /* __cplusplus */
116 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
117 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
118 #ifdef __cplusplus
119 }
120 #endif /* __cplusplus */
121 
122 #define BitScanForward _BitScanForward
123 #define BitScanReverse _BitScanReverse
124 #pragma intrinsic(_BitScanForward)
125 #pragma intrinsic(_BitScanReverse)
126 #endif /* BitScanForward */
127 #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */
128 
129 #ifndef WIN32
130 #ifndef malloc_getpagesize
131 # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
132 # ifndef _SC_PAGE_SIZE
133 # define _SC_PAGE_SIZE _SC_PAGESIZE
134 # endif
135 # endif
136 # ifdef _SC_PAGE_SIZE
137 # define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
138 # else
139 # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
140  extern size_t getpagesize();
141 # define malloc_getpagesize getpagesize()
142 # else
143 # ifdef WIN32 /* use supplied emulation of getpagesize */
144 # define malloc_getpagesize getpagesize()
145 # else
146 # ifndef LACKS_SYS_PARAM_H
147 # include <sys/param.h>
148 # endif
149 # ifdef EXEC_PAGESIZE
150 # define malloc_getpagesize EXEC_PAGESIZE
151 # else
152 # ifdef NBPG
153 # ifndef CLSIZE
154 # define malloc_getpagesize NBPG
155 # else
156 # define malloc_getpagesize (NBPG * CLSIZE)
157 # endif
158 # else
159 # ifdef NBPC
160 # define malloc_getpagesize NBPC
161 # else
162 # ifdef PAGESIZE
163 # define malloc_getpagesize PAGESIZE
164 # else /* just guess */
165 # define malloc_getpagesize ((size_t)4096U)
166 # endif
167 # endif
168 # endif
169 # endif
170 # endif
171 # endif
172 # endif
173 #endif
174 #endif
175 
176 /* ------------------- size_t and alignment properties -------------------- */
177 
178 /* The byte and bit size of a size_t */
179 #define SIZE_T_SIZE (sizeof(size_t))
180 #define SIZE_T_BITSIZE (sizeof(size_t) << 3)
181 
182 /* Some constants coerced to size_t */
183 /* Annoying but necessary to avoid errors on some platforms */
184 #define SIZE_T_ZERO ((size_t)0)
185 #define SIZE_T_ONE ((size_t)1)
186 #define SIZE_T_TWO ((size_t)2)
187 #define SIZE_T_FOUR ((size_t)4)
188 #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
189 #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
190 #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
191 #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
192 
193 /* The bit mask value corresponding to MALLOC_ALIGNMENT */
194 #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
195 
196 /* True if address a has acceptable alignment */
197 #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
198 
199 /* the number of bytes to offset an address to align it */
200 #define align_offset(A)\
201  ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
202  ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
203 
204 /* -------------------------- MMAP preliminaries ------------------------- */
205 
206 /*
207  If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
208  checks to fail so compiler optimizer can delete code rather than
209  using so many "#if"s.
210 */
211 
212 
213 /* MORECORE and MMAP must return MFAIL on failure */
214 #define MFAIL ((void*)(MAX_SIZE_T))
215 #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
216 
217 #if HAVE_MMAP
218 
219 #ifndef WIN32
220 #define MUNMAP_DEFAULT(a, s) munmap((a), (s))
221 #define MMAP_PROT (PROT_READ|PROT_WRITE)
222 #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
223 #define MAP_ANONYMOUS MAP_ANON
224 #endif /* MAP_ANON */
225 #ifdef MAP_ANONYMOUS
226 #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
227 #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
228 #else /* MAP_ANONYMOUS */
229 /*
230  Nearly all versions of mmap support MAP_ANONYMOUS, so the following
231  is unlikely to be needed, but is supplied just in case.
232 */
233 #define MMAP_FLAGS (MAP_PRIVATE)
234 static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
235 #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \
236  (dev_zero_fd = open("/dev/zero", O_RDWR), \
237  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
238  mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
239 #endif /* MAP_ANONYMOUS */
240 
241 #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s)
242 
243 #else /* WIN32 */
244 
245 /* Win32 MMAP via VirtualAlloc */
246 static FORCEINLINE void* win32mmap(size_t size) {
247  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE);
248  return (ptr != 0)? ptr: MFAIL;
249 }
250 
251 /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
252 static FORCEINLINE void* win32direct_mmap(size_t size) {
253  void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN,
254  PAGE_READWRITE);
255  return (ptr != 0)? ptr: MFAIL;
256 }
257 
258 /* This function supports releasing coalesed segments */
259 static FORCEINLINE int win32munmap(void* ptr, size_t size) {
260  MEMORY_BASIC_INFORMATION minfo;
261  char* cptr = (char*)ptr;
262  while (size) {
263  if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
264  return -1;
265  if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
266  minfo.State != MEM_COMMIT || minfo.RegionSize > size)
267  return -1;
268  if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
269  return -1;
270  cptr += minfo.RegionSize;
271  size -= minfo.RegionSize;
272  }
273  return 0;
274 }
275 
276 #define MMAP_DEFAULT(s) win32mmap(s)
277 #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s))
278 #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s)
279 #endif /* WIN32 */
280 #endif /* HAVE_MMAP */
281 
282 #if HAVE_MREMAP
283 #ifndef WIN32
284 #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
285 #endif /* WIN32 */
286 #endif /* HAVE_MREMAP */
287 
288 /**
289  * Define CALL_MORECORE
290  */
291 #if HAVE_MORECORE
292  #ifdef MORECORE
293  #define CALL_MORECORE(S) MORECORE(S)
294  #else /* MORECORE */
295  #define CALL_MORECORE(S) MORECORE_DEFAULT(S)
296  #endif /* MORECORE */
297 #else /* HAVE_MORECORE */
298  #define CALL_MORECORE(S) MFAIL
299 #endif /* HAVE_MORECORE */
300 
301 /**
302  * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP
303  */
304 #if HAVE_MMAP
305  #define USE_MMAP_BIT (SIZE_T_ONE)
306 
307  #ifdef MMAP
308  #define CALL_MMAP(s) MMAP(s)
309  #else /* MMAP */
310  #define CALL_MMAP(s) MMAP_DEFAULT(s)
311  #endif /* MMAP */
312  #ifdef MUNMAP
313  #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
314  #else /* MUNMAP */
315  #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s))
316  #endif /* MUNMAP */
317  #ifdef DIRECT_MMAP
318  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
319  #else /* DIRECT_MMAP */
320  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s)
321  #endif /* DIRECT_MMAP */
322 #else /* HAVE_MMAP */
323  #define USE_MMAP_BIT (SIZE_T_ZERO)
324 
325  #define MMAP(s) MFAIL
326  #define MUNMAP(a, s) (-1)
327  #define DIRECT_MMAP(s) MFAIL
328  #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s)
329  #define CALL_MMAP(s) MMAP(s)
330  #define CALL_MUNMAP(a, s) MUNMAP((a), (s))
331 #endif /* HAVE_MMAP */
332 
333 /**
334  * Define CALL_MREMAP
335  */
336 #if HAVE_MMAP && HAVE_MREMAP
337  #ifdef MREMAP
338  #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv))
339  #else /* MREMAP */
340  #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv))
341  #endif /* MREMAP */
342 #else /* HAVE_MMAP && HAVE_MREMAP */
343  #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
344 #endif /* HAVE_MMAP && HAVE_MREMAP */
345 
346 /* mstate bit set if contiguous morecore disabled or failed */
347 #define USE_NONCONTIGUOUS_BIT (4U)
348 
349 /* mstate bit set if no expansion allowed */
350 #define USE_NOEXPAND_BIT (8U)
351 
352 /* trace allocations if set */
353 #define USE_TRACE_BIT (16U)
354 
355 /* segment bit set in create_mspace_with_base */
356 #define EXTERN_BIT (8U)
357 
358 
359 /* --------------------------- Lock preliminaries ------------------------ */
360 
361 /*
362  When locks are defined, there is one global lock, plus
363  one per-mspace lock.
364 
365  The global lock_ensures that mparams.magic and other unique
366  mparams values are initialized only once. It also protects
367  sequences of calls to MORECORE. In many cases sys_alloc requires
368  two calls, that should not be interleaved with calls by other
369  threads. This does not protect against direct calls to MORECORE
370  by other threads not using this lock, so there is still code to
371  cope the best we can on interference.
372 
373  Per-mspace locks surround calls to malloc, free, etc.
374  By default, locks are simple non-reentrant mutexes.
375 
376  Because lock-protected regions generally have bounded times, it is
377  OK to use the supplied simple spinlocks. Spinlocks are likely to
378  improve performance for lightly contended applications, but worsen
379  performance under heavy contention.
380 
381  If USE_LOCKS is > 1, the definitions of lock routines here are
382  bypassed, in which case you will need to define the type MLOCK_T,
383  and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK
384  and TRY_LOCK. You must also declare a
385  static MLOCK_T malloc_global_mutex = { initialization values };.
386 
387 */
388 
389 #if !USE_LOCKS
390 #define USE_LOCK_BIT (0U)
391 #define INITIAL_LOCK(l) (0)
392 #define DESTROY_LOCK(l) (0)
393 #define ACQUIRE_MALLOC_GLOBAL_LOCK()
394 #define RELEASE_MALLOC_GLOBAL_LOCK()
395 
396 #else
397 #if USE_LOCKS > 1
398 /* ----------------------- User-defined locks ------------------------ */
399 /* Define your own lock implementation here */
400 /* #define INITIAL_LOCK(lk) ... */
401 /* #define DESTROY_LOCK(lk) ... */
402 /* #define ACQUIRE_LOCK(lk) ... */
403 /* #define RELEASE_LOCK(lk) ... */
404 /* #define TRY_LOCK(lk) ... */
405 /* static MLOCK_T malloc_global_mutex = ... */
406 
407 #elif USE_SPIN_LOCKS
408 
409 /* First, define CAS_LOCK and CLEAR_LOCK on ints */
410 /* Note CAS_LOCK defined to return 0 on success */
411 
412 #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
413 #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1)
414 #define CLEAR_LOCK(sl) __sync_lock_release(sl)
415 
416 #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)))
417 /* Custom spin locks for older gcc on x86 */
418 static FORCEINLINE int x86_cas_lock(int *sl) {
419  int ret;
420  int val = 1;
421  int cmp = 0;
422  __asm__ __volatile__ ("lock; cmpxchgl %1, %2"
423  : "=a" (ret)
424  : "r" (val), "m" (*(sl)), "0"(cmp)
425  : "memory", "cc");
426  return ret;
427 }
428 
429 static FORCEINLINE void x86_clear_lock(int* sl) {
430  assert(*sl != 0);
431  int prev = 0;
432  int ret;
433  __asm__ __volatile__ ("lock; xchgl %0, %1"
434  : "=r" (ret)
435  : "m" (*(sl)), "0"(prev)
436  : "memory");
437 }
438 
439 #define CAS_LOCK(sl) x86_cas_lock(sl)
440 #define CLEAR_LOCK(sl) x86_clear_lock(sl)
441 
442 #else /* Win32 MSC */
443 #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1)
444 #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0)
445 
446 #endif /* ... gcc spins locks ... */
447 
448 /* How to yield for a spin lock */
449 #define SPINS_PER_YIELD 63
450 #if defined(_MSC_VER)
451 #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */
452 #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE)
453 #elif defined (__SVR4) && defined (__sun) /* solaris */
454 #define SPIN_LOCK_YIELD thr_yield();
455 #elif !defined(LACKS_SCHED_H)
456 #define SPIN_LOCK_YIELD sched_yield();
457 #else
458 #define SPIN_LOCK_YIELD
459 #endif /* ... yield ... */
460 
461 #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0
462 /* Plain spin locks use single word (embedded in malloc_states) */
464 static int spin_acquire_lock(int *sl) {
465  int spins = 0;
466  while (*(volatile int *)sl != 0 || CAS_LOCK(sl)) {
467  if ((++spins & SPINS_PER_YIELD) == 0) {
468  SPIN_LOCK_YIELD;
469  }
470  }
471  return 0;
472 }
473 
474 #define MLOCK_T int
475 #define TRY_LOCK(sl) !CAS_LOCK(sl)
476 #define RELEASE_LOCK(sl) CLEAR_LOCK(sl)
477 #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0)
478 #define INITIAL_LOCK(sl) (*sl = 0)
479 #define DESTROY_LOCK(sl) (0)
480 static MLOCK_T malloc_global_mutex = 0;
481 
482 #else /* USE_RECURSIVE_LOCKS */
483 /* types for lock owners */
484 #ifdef WIN32
485 #define THREAD_ID_T DWORD
486 #define CURRENT_THREAD GetCurrentThreadId()
487 #define EQ_OWNER(X,Y) ((X) == (Y))
488 #else
489 /*
490  Note: the following assume that pthread_t is a type that can be
491  initialized to (casted) zero. If this is not the case, you will need to
492  somehow redefine these or not use spin locks.
493 */
494 #define THREAD_ID_T pthread_t
495 #define CURRENT_THREAD pthread_self()
496 #define EQ_OWNER(X,Y) pthread_equal(X, Y)
497 #endif
498 
499 struct malloc_recursive_lock {
500  int sl;
501  unsigned int c;
502  THREAD_ID_T threadid;
503 };
504 
505 #define MLOCK_T struct malloc_recursive_lock
506 static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0};
507 
508 static FORCEINLINE void recursive_release_lock(MLOCK_T *lk) {
509  assert(lk->sl != 0);
510  if (--lk->c == 0) {
511  CLEAR_LOCK(&lk->sl);
512  }
513 }
514 
515 static FORCEINLINE int recursive_acquire_lock(MLOCK_T *lk) {
516  THREAD_ID_T mythreadid = CURRENT_THREAD;
517  int spins = 0;
518  for (;;) {
519  if (*((volatile int *)(&lk->sl)) == 0) {
520  if (!CAS_LOCK(&lk->sl)) {
521  lk->threadid = mythreadid;
522  lk->c = 1;
523  return 0;
524  }
525  }
526  else if (EQ_OWNER(lk->threadid, mythreadid)) {
527  ++lk->c;
528  return 0;
529  }
530  if ((++spins & SPINS_PER_YIELD) == 0) {
531  SPIN_LOCK_YIELD;
532  }
533  }
534 }
535 
536 static FORCEINLINE int recursive_try_lock(MLOCK_T *lk) {
537  THREAD_ID_T mythreadid = CURRENT_THREAD;
538  if (*((volatile int *)(&lk->sl)) == 0) {
539  if (!CAS_LOCK(&lk->sl)) {
540  lk->threadid = mythreadid;
541  lk->c = 1;
542  return 1;
543  }
544  }
545  else if (EQ_OWNER(lk->threadid, mythreadid)) {
546  ++lk->c;
547  return 1;
548  }
549  return 0;
550 }
551 
552 #define RELEASE_LOCK(lk) recursive_release_lock(lk)
553 #define TRY_LOCK(lk) recursive_try_lock(lk)
554 #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk)
555 #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0)
556 #define DESTROY_LOCK(lk) (0)
557 #endif /* USE_RECURSIVE_LOCKS */
558 
559 #elif defined(WIN32) /* Win32 critical sections */
560 #define MLOCK_T CRITICAL_SECTION
561 #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0)
562 #define RELEASE_LOCK(lk) LeaveCriticalSection(lk)
563 #define TRY_LOCK(lk) TryEnterCriticalSection(lk)
564 #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000))
565 #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0)
566 #define NEED_GLOBAL_LOCK_INIT
567 
568 static MLOCK_T malloc_global_mutex;
569 static volatile LONG malloc_global_mutex_status;
570 
571 /* Use spin loop to initialize global lock */
572 static void init_malloc_global_mutex() {
573  for (;;) {
574  long stat = malloc_global_mutex_status;
575  if (stat > 0)
576  return;
577  /* transition to < 0 while initializing, then to > 0) */
578  if (stat == 0 &&
579  interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) {
580  InitializeCriticalSection(&malloc_global_mutex);
581  interlockedexchange(&malloc_global_mutex_status, (LONG)1);
582  return;
583  }
584  SleepEx(0, FALSE);
585  }
586 }
587 
588 #else /* pthreads-based locks */
589 #define MLOCK_T pthread_mutex_t
590 #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk)
591 #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk)
592 #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk))
593 #define INITIAL_LOCK(lk) pthread_init_lock(lk)
594 #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk)
595 
596 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE)
597 /* Cope with old-style linux recursive lock initialization by adding */
598 /* skipped internal declaration from pthread.h */
599 extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr,
600  int __kind));
601 #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP
602 #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y)
603 #endif /* USE_RECURSIVE_LOCKS ... */
604 
605 static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER;
606 
607 static int pthread_init_lock (MLOCK_T *lk) {
608  pthread_mutexattr_t attr;
609  if (pthread_mutexattr_init(&attr)) return 1;
610 #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0
611  if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1;
612 #endif
613  if (pthread_mutex_init(lk, &attr)) return 1;
614  if (pthread_mutexattr_destroy(&attr)) return 1;
615  return 0;
616 }
617 
618 #endif /* ... lock types ... */
619 
620 /* Common code for all lock types */
621 #define USE_LOCK_BIT (2U)
622 
623 #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK
624 #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex);
625 #endif
626 
627 #ifndef RELEASE_MALLOC_GLOBAL_LOCK
628 #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex);
629 #endif
630 
631 #endif /* USE_LOCKS */
632 
633 /* ----------------------- Chunk representations ------------------------ */
634 
635 /*
636  (The following includes lightly edited explanations by Colin Plumb.)
637 
638  The malloc_chunk declaration below is misleading (but accurate and
639  necessary). It declares a "view" into memory allowing access to
640  necessary fields at known offsets from a given base.
641 
642  Chunks of memory are maintained using a `boundary tag' method as
643  originally described by Knuth. (See the paper by Paul Wilson
644  ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
645  techniques.) Sizes of free chunks are stored both in the front of
646  each chunk and at the end. This makes consolidating fragmented
647  chunks into bigger chunks fast. The head fields also hold bits
648  representing whether chunks are free or in use.
649 
650  Here are some pictures to make it clearer. They are "exploded" to
651  show that the state of a chunk can be thought of as extending from
652  the high 31 bits of the head field of its header through the
653  prev_foot and PINUSE_BIT bit of the following chunk header.
654 
655  A chunk that's in use looks like:
656 
657  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
658  | Size of previous chunk (if P = 0) |
659  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
660  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
661  | Size of this chunk 1| +-+
662  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
663  | |
664  +- -+
665  | |
666  +- -+
667  | :
668  +- size - sizeof(size_t) available payload bytes -+
669  : |
670  chunk-> +- -+
671  | |
672  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
673  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
674  | Size of next chunk (may or may not be in use) | +-+
675  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
676 
677  And if it's free, it looks like this:
678 
679  chunk-> +- -+
680  | User payload (must be in use, or we would have merged!) |
681  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
682  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
683  | Size of this chunk 0| +-+
684  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
685  | Next pointer |
686  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
687  | Prev pointer |
688  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
689  | :
690  +- size - sizeof(struct chunk) unused bytes -+
691  : |
692  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
693  | Size of this chunk |
694  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
695  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
696  | Size of next chunk (must be in use, or we would have merged)| +-+
697  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
698  | :
699  +- User payload -+
700  : |
701  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
702  |0|
703  +-+
704  Note that since we always merge adjacent free chunks, the chunks
705  adjacent to a free chunk must be in use.
706 
707  Given a pointer to a chunk (which can be derived trivially from the
708  payload pointer) we can, in O(1) time, find out whether the adjacent
709  chunks are free, and if so, unlink them from the lists that they
710  are on and merge them with the current chunk.
711 
712  Chunks always begin on even word boundaries, so the mem portion
713  (which is returned to the user) is also on an even word boundary, and
714  thus at least double-word aligned.
715 
716  The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
717  chunk size (which is always a multiple of two words), is an in-use
718  bit for the *previous* chunk. If that bit is *clear*, then the
719  word before the current chunk size contains the previous chunk
720  size, and can be used to find the front of the previous chunk.
721  The very first chunk allocated always has this bit set, preventing
722  access to non-existent (or non-owned) memory. If pinuse is set for
723  any given chunk, then you CANNOT determine the size of the
724  previous chunk, and might even get a memory addressing fault when
725  trying to do so.
726 
727  The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
728  the chunk size redundantly records whether the current chunk is
729  inuse (unless the chunk is mmapped). This redundancy enables usage
730  checks within free and realloc, and reduces indirection when freeing
731  and consolidating chunks.
732 
733  Each freshly allocated chunk must have both cinuse and pinuse set.
734  That is, each allocated chunk borders either a previously allocated
735  and still in-use chunk, or the base of its memory arena. This is
736  ensured by making all allocations from the `lowest' part of any
737  found chunk. Further, no free chunk physically borders another one,
738  so each free chunk is known to be preceded and followed by either
739  inuse chunks or the ends of memory.
740 
741  Note that the `foot' of the current chunk is actually represented
742  as the prev_foot of the NEXT chunk. This makes it easier to
743  deal with alignments etc but can be very confusing when trying
744  to extend or adapt this code.
745 
746  The exceptions to all this are
747 
748  1. The special chunk `top' is the top-most available chunk (i.e.,
749  the one bordering the end of available memory). It is treated
750  specially. Top is never included in any bin, is used only if
751  no other chunk is available, and is released back to the
752  system if it is very large (see M_TRIM_THRESHOLD). In effect,
753  the top chunk is treated as larger (and thus less well
754  fitting) than any other available chunk. The top chunk
755  doesn't update its trailing size field since there is no next
756  contiguous chunk that would have to index off it. However,
757  space is still allocated for it (TOP_FOOT_SIZE) to enable
758  separation or merging when space is extended.
759 
760  3. Chunks allocated via mmap, have both cinuse and pinuse bits
761  cleared in their head fields. Because they are allocated
762  one-by-one, each must carry its own prev_foot field, which is
763  also used to hold the offset this chunk has within its mmapped
764  region, which is needed to preserve alignment. Each mmapped
765  chunk is trailed by the first two fields of a fake next-chunk
766  for sake of usage checks.
767 
768 */
769 
770 struct malloc_chunk {
771  size_t prev_foot; /* Size of previous chunk (if free). */
772  size_t head; /* Size and inuse bits. */
773  struct malloc_chunk* fd; /* double links -- used only if free. */
774  struct malloc_chunk* bk;
775 };
776 
777 typedef struct malloc_chunk mchunk;
778 typedef struct malloc_chunk* mchunkptr;
779 typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */
780 typedef unsigned int bindex_t; /* Described below */
781 typedef unsigned int binmap_t; /* Described below */
782 typedef unsigned int flag_t; /* The type of various bit flag sets */
783 
784 /* ------------------- Chunks sizes and alignments ----------------------- */
785 
786 #define MCHUNK_SIZE (sizeof(mchunk))
787 
788 #if FOOTERS
789 #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
790 #else /* FOOTERS */
791 #define CHUNK_OVERHEAD (SIZE_T_SIZE)
792 #endif /* FOOTERS */
793 
794 /* MMapped chunks need a second word of overhead ... */
795 #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
796 /* ... and additional padding for fake next-chunk at foot */
797 #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
798 
799 /* The smallest size we can malloc is an aligned minimal chunk */
800 #define MIN_CHUNK_SIZE\
801  ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
802 
803 /* conversion from malloc headers to user pointers, and back */
804 #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
805 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
806 /* chunk associated with aligned address A */
807 #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
808 
809 /* Bounds on request (not chunk) sizes. */
810 #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
811 #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
812 
813 /* pad request bytes into a usable size */
814 #define pad_request(req) \
815  (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
816 
817 /* pad request, checking for minimum (but not maximum) */
818 #define request2size(req) \
819  (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
820 
821 
822 /* ------------------ Operations on head and foot fields ----------------- */
823 
824 /*
825  The head field of a chunk is or'ed with PINUSE_BIT when previous
826  adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
827  use, unless mmapped, in which case both bits are cleared.
828 
829  FLAG4_BIT is not used by this malloc, but might be useful in extensions.
830 */
831 
832 #define PINUSE_BIT (SIZE_T_ONE)
833 #define CINUSE_BIT (SIZE_T_TWO)
834 #define FLAG4_BIT (SIZE_T_FOUR)
835 #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
836 #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT)
837 
838 /* Head value for fenceposts */
839 #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
840 
841 /* extraction of fields from head words */
842 #define cinuse(p) ((p)->head & CINUSE_BIT)
843 #define pinuse(p) ((p)->head & PINUSE_BIT)
844 #define flag4inuse(p) ((p)->head & FLAG4_BIT)
845 #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT)
846 #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0)
847 
848 #define chunksize(p) ((p)->head & ~(FLAG_BITS))
849 
850 #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
851 #define set_flag4(p) ((p)->head |= FLAG4_BIT)
852 #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT)
853 
854 /* Treat space at ptr +/- offset as a chunk */
855 #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
856 #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
857 
858 /* Ptr to next or previous physical malloc_chunk. */
859 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS)))
860 #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
861 
862 /* extract next chunk's pinuse bit */
863 #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
864 
865 /* Get/set size at footer */
866 #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
867 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
868 
869 /* Set size, pinuse bit, and foot */
870 #define set_size_and_pinuse_of_free_chunk(p, s)\
871  ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
872 
873 /* Set size, pinuse bit, foot, and clear next pinuse */
874 #define set_free_with_pinuse(p, s, n)\
875  (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
876 
877 /* Get the internal overhead associated with chunk p */
878 #define overhead_for(p)\
879  (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
880 
881 /* Return true if malloced space is not necessarily cleared */
882 #if MMAP_CLEARS
883 #define calloc_must_clear(p) (!is_mmapped(p))
884 #else /* MMAP_CLEARS */
885 #define calloc_must_clear(p) (1)
886 #endif /* MMAP_CLEARS */
887 
888 /* ---------------------- Overlaid data structures ----------------------- */
889 
890 /*
891  When chunks are not in use, they are treated as nodes of either
892  lists or trees.
893 
894  "Small" chunks are stored in circular doubly-linked lists, and look
895  like this:
896 
897  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
898  | Size of previous chunk |
899  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
900  `head:' | Size of chunk, in bytes |P|
901  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
902  | Forward pointer to next chunk in list |
903  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
904  | Back pointer to previous chunk in list |
905  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
906  | Unused space (may be 0 bytes long) .
907  . .
908  . |
909 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
910  `foot:' | Size of chunk, in bytes |
911  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
912 
913  Larger chunks are kept in a form of bitwise digital trees (aka
914  tries) keyed on chunksizes. Because malloc_tree_chunks are only for
915  free chunks greater than 256 bytes, their size doesn't impose any
916  constraints on user chunk sizes. Each node looks like:
917 
918  chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
919  | Size of previous chunk |
920  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
921  `head:' | Size of chunk, in bytes |P|
922  mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
923  | Forward pointer to next chunk of same size |
924  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
925  | Back pointer to previous chunk of same size |
926  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
927  | Pointer to left child (child[0]) |
928  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
929  | Pointer to right child (child[1]) |
930  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
931  | Pointer to parent |
932  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
933  | bin index of this chunk |
934  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
935  | Unused space .
936  . |
937 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
938  `foot:' | Size of chunk, in bytes |
939  +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
940 
941  Each tree holding treenodes is a tree of unique chunk sizes. Chunks
942  of the same size are arranged in a circularly-linked list, with only
943  the oldest chunk (the next to be used, in our FIFO ordering)
944  actually in the tree. (Tree members are distinguished by a non-null
945  parent pointer.) If a chunk with the same size an an existing node
946  is inserted, it is linked off the existing node using pointers that
947  work in the same way as fd/bk pointers of small chunks.
948 
949  Each tree contains a power of 2 sized range of chunk sizes (the
950  smallest is 0x100 <= x < 0x180), which is is divided in half at each
951  tree level, with the chunks in the smaller half of the range (0x100
952  <= x < 0x140 for the top nose) in the left subtree and the larger
953  half (0x140 <= x < 0x180) in the right subtree. This is, of course,
954  done by inspecting individual bits.
955 
956  Using these rules, each node's left subtree contains all smaller
957  sizes than its right subtree. However, the node at the root of each
958  subtree has no particular ordering relationship to either. (The
959  dividing line between the subtree sizes is based on trie relation.)
960  If we remove the last chunk of a given size from the interior of the
961  tree, we need to replace it with a leaf node. The tree ordering
962  rules permit a node to be replaced by any leaf below it.
963 
964  The smallest chunk in a tree (a common operation in a best-fit
965  allocator) can be found by walking a path to the leftmost leaf in
966  the tree. Unlike a usual binary tree, where we follow left child
967  pointers until we reach a null, here we follow the right child
968  pointer any time the left one is null, until we reach a leaf with
969  both child pointers null. The smallest chunk in the tree will be
970  somewhere along that path.
971 
972  The worst case number of steps to add, find, or remove a node is
973  bounded by the number of bits differentiating chunks within
974  bins. Under current bin calculations, this ranges from 6 up to 21
975  (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
976  is of course much better.
977 */
978 
980  /* The first four fields must be compatible with malloc_chunk */
981  size_t prev_foot;
982  size_t head;
985 
989 };
990 
991 typedef struct malloc_tree_chunk tchunk;
992 typedef struct malloc_tree_chunk* tchunkptr;
993 typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */
994 
995 /* A little helper macro for trees */
996 #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
997 
998 /* ----------------------------- Segments -------------------------------- */
999 
1000 /*
1001  Each malloc space may include non-contiguous segments, held in a
1002  list headed by an embedded malloc_segment record representing the
1003  top-most space. Segments also include flags holding properties of
1004  the space. Large chunks that are directly allocated by mmap are not
1005  included in this list. They are instead independently created and
1006  destroyed without otherwise keeping track of them.
1007 
1008  Segment management mainly comes into play for spaces allocated by
1009  MMAP. Any call to MMAP might or might not return memory that is
1010  adjacent to an existing segment. MORECORE normally contiguously
1011  extends the current space, so this space is almost always adjacent,
1012  which is simpler and faster to deal with. (This is why MORECORE is
1013  used preferentially to MMAP when both are available -- see
1014  sys_alloc.) When allocating using MMAP, we don't use any of the
1015  hinting mechanisms (inconsistently) supported in various
1016  implementations of unix mmap, or distinguish reserving from
1017  committing memory. Instead, we just ask for space, and exploit
1018  contiguity when we get it. It is probably possible to do
1019  better than this on some systems, but no general scheme seems
1020  to be significantly better.
1021 
1022  Management entails a simpler variant of the consolidation scheme
1023  used for chunks to reduce fragmentation -- new adjacent memory is
1024  normally prepended or appended to an existing segment. However,
1025  there are limitations compared to chunk consolidation that mostly
1026  reflect the fact that segment processing is relatively infrequent
1027  (occurring only when getting memory from system) and that we
1028  don't expect to have huge numbers of segments:
1029 
1030  * Segments are not indexed, so traversal requires linear scans. (It
1031  would be possible to index these, but is not worth the extra
1032  overhead and complexity for most programs on most platforms.)
1033  * New segments are only appended to old ones when holding top-most
1034  memory; if they cannot be prepended to others, they are held in
1035  different segments.
1036 
1037  Except for the top-most segment of an mstate, each segment record
1038  is kept at the tail of its segment. Segments are added by pushing
1039  segment records onto the list headed by &mstate.seg for the
1040  containing mstate.
1041 
1042  Segment flags control allocation/merge/deallocation policies:
1043  * If EXTERN_BIT set, then we did not allocate this segment,
1044  and so should not try to deallocate or merge with others.
1045  (This currently holds only for the initial segment passed
1046  into create_mspace_with_base.)
1047  * If USE_MMAP_BIT set, the segment may be merged with
1048  other surrounding mmapped segments and trimmed/de-allocated
1049  using munmap.
1050  * If neither bit is set, then the segment was obtained using
1051  MORECORE so can be merged with surrounding MORECORE'd segments
1052  and deallocated/trimmed using MORECORE with negative arguments.
1053 */
1054 
1056  char* base; /* base address */
1057  size_t size; /* allocated size */
1058  struct malloc_segment* next; /* ptr to next segment */
1059  flag_t sflags; /* mmap and extern flag */
1060 };
1061 
1062 #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT)
1063 #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
1064 
1065 typedef struct malloc_segment msegment;
1066 typedef struct malloc_segment* msegmentptr;
1067 
1068 /* ---------------------------- malloc_state ----------------------------- */
1069 
1070 /*
1071  A malloc_state holds all of the bookkeeping for a space.
1072  The main fields are:
1073 
1074  Top
1075  The topmost chunk of the currently active segment. Its size is
1076  cached in topsize. The actual size of topmost space is
1077  topsize+TOP_FOOT_SIZE, which includes space reserved for adding
1078  fenceposts and segment records if necessary when getting more
1079  space from the system. The size at which to autotrim top is
1080  cached from mparams in trim_check, except that it is disabled if
1081  an autotrim fails.
1082 
1083  Designated victim (dv)
1084  This is the preferred chunk for servicing small requests that
1085  don't have exact fits. It is normally the chunk split off most
1086  recently to service another small request. Its size is cached in
1087  dvsize. The link fields of this chunk are not maintained since it
1088  is not kept in a bin.
1089 
1090  SmallBins
1091  An array of bin headers for free chunks. These bins hold chunks
1092  with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
1093  chunks of all the same size, spaced 8 bytes apart. To simplify
1094  use in double-linked lists, each bin header acts as a malloc_chunk
1095  pointing to the real first node, if it exists (else pointing to
1096  itself). This avoids special-casing for headers. But to avoid
1097  waste, we allocate only the fd/bk pointers of bins, and then use
1098  repositioning tricks to treat these as the fields of a chunk.
1099 
1100  TreeBins
1101  Treebins are pointers to the roots of trees holding a range of
1102  sizes. There are 2 equally spaced treebins for each power of two
1103  from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
1104  larger.
1105 
1106  Bin maps
1107  There is one bit map for small bins ("smallmap") and one for
1108  treebins ("treemap). Each bin sets its bit when non-empty, and
1109  clears the bit when empty. Bit operations are then used to avoid
1110  bin-by-bin searching -- nearly all "search" is done without ever
1111  looking at bins that won't be selected. The bit maps
1112  conservatively use 32 bits per map word, even if on 64bit system.
1113  For a good description of some of the bit-based techniques used
1114  here, see Henry S. Warren Jr's book "Hacker's Delight" (and
1115  supplement at http://hackersdelight.org/). Many of these are
1116  intended to reduce the branchiness of paths through malloc etc, as
1117  well as to reduce the number of memory locations read or written.
1118 
1119  Segments
1120  A list of segments headed by an embedded malloc_segment record
1121  representing the initial space.
1122 
1123  Address check support
1124  The least_addr field is the least address ever obtained from
1125  MORECORE or MMAP. Attempted frees and reallocs of any address less
1126  than this are trapped (unless INSECURE is defined).
1127 
1128  Magic tag
1129  A cross-check field that should always hold same value as mparams.magic.
1130 
1131  Max allowed footprint
1132  The maximum allowed bytes to allocate from system (zero means no limit)
1133 
1134  Flags
1135  Bits recording whether to use MMAP, locks, or contiguous MORECORE
1136 
1137  Statistics
1138  Each space keeps track of current and maximum system memory
1139  obtained via MORECORE or MMAP.
1140 
1141  Trim support
1142  Fields holding the amount of unused topmost memory that should trigger
1143  trimming, and a counter to force periodic scanning to release unused
1144  non-topmost segments.
1145 
1146  Locking
1147  If USE_LOCKS is defined, the "mutex" lock is acquired and released
1148  around every public call using this mspace.
1149 
1150  Extension support
1151  A void* pointer and a size_t field that can be used to help implement
1152  extensions to this malloc.
1153 */
1154 
1155 /* Bin types, widths and sizes */
1156 #define NSMALLBINS (32U)
1157 #define NTREEBINS (32U)
1158 #define SMALLBIN_SHIFT (3U)
1159 #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
1160 #define TREEBIN_SHIFT (8U)
1161 #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
1162 #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
1163 #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
1164 
1168  size_t dvsize;
1169  size_t topsize;
1170  char* least_addr;
1171  mchunkptr dv;
1172  mchunkptr top;
1173  size_t trim_check;
1175  size_t magic;
1176  mchunkptr smallbins[(NSMALLBINS+1)*2];
1177  tbinptr treebins[NTREEBINS];
1178  size_t footprint;
1180  size_t footprint_limit; /* zero means no limit */
1182 #if USE_LOCKS
1183  MLOCK_T mutex; /* locate lock among fields that rarely change */
1184 #endif /* USE_LOCKS */
1186  void* extp; /* Unused but available for extensions */
1187  size_t exts;
1188 };
1189 
1190 typedef struct malloc_state* mstate;
1191 
1192 /* ------------- Global malloc_state and malloc_params ------------------- */
1193 
1194 /*
1195  malloc_params holds global properties, including those that can be
1196  dynamically set using mallopt. There is a single instance, mparams,
1197  initialized in init_mparams. Note that the non-zeroness of "magic"
1198  also serves as an initialization flag.
1199 */
1200 
1202  size_t magic;
1203  size_t page_size;
1204  size_t granularity;
1208 };
1209 
1210 static struct malloc_params mparams;
1211 
1212 /* Ensure mparams initialized */
1213 #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams())
1214 
1215 #if !ONLY_MSPACES
1216 
1217 /* The global malloc_state used for all non-"mspace" calls */
1218 static struct malloc_state _gm_;
1219 #define gm (&_gm_)
1220 #define is_global(M) ((M) == &_gm_)
1221 
1222 #endif /* !ONLY_MSPACES */
1223 
1224 #define is_initialized(M) ((M)->top != 0)
1225 
1226 /* -------------------------- system alloc setup ------------------------- */
1227 
1228 /* Operations on mflags */
1229 
1230 #define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
1231 #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
1232 #if USE_LOCKS
1233 #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
1234 #else
1235 #define disable_lock(M)
1236 #endif
1237 
1238 #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
1239 #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
1240 #if HAVE_MMAP
1241 #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
1242 #else
1243 #define disable_mmap(M)
1244 #endif
1245 
1246 #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
1247 #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
1248 #define use_noexpand(M) ((M)->mflags & USE_NOEXPAND_BIT)
1249 #define disable_expand(M) ((M)->mflags |= USE_NOEXPAND_BIT)
1250 #define use_trace(M) ((M)->mflags & USE_TRACE_BIT)
1251 #define enable_trace(M) ((M)->mflags |= USE_TRACE_BIT)
1252 #define disable_trace(M) ((M)->mflags &= ~USE_TRACE_BIT)
1253 
1254 #define set_lock(M,L)\
1255  ((M)->mflags = (L)?\
1256  ((M)->mflags | USE_LOCK_BIT) :\
1257  ((M)->mflags & ~USE_LOCK_BIT))
1258 
1259 /* page-align a size */
1260 #define page_align(S)\
1261  (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
1262 
1263 /* granularity-align a size */
1264 #define granularity_align(S)\
1265  (((S) + (mparams.granularity - SIZE_T_ONE))\
1266  & ~(mparams.granularity - SIZE_T_ONE))
1267 
1268 
1269 /* For mmap, use granularity alignment on windows, else page-align */
1270 #ifdef WIN32
1271 #define mmap_align(S) granularity_align(S)
1272 #else
1273 #define mmap_align(S) page_align(S)
1274 #endif
1275 
1276 /* For sys_alloc, enough padding to ensure can malloc request on success */
1277 #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT)
1278 
1279 #define is_page_aligned(S)\
1280  (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
1281 #define is_granularity_aligned(S)\
1282  (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
1283 
1284 /* True if segment S holds address A */
1285 #define segment_holds(S, A)\
1286  ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
1287 
1288 /* Return segment holding given address */
1290 static msegmentptr segment_holding(mstate m, char* addr) {
1291  msegmentptr sp = &m->seg;
1292  for (;;) {
1293  if (addr >= sp->base && addr < sp->base + sp->size)
1294  return sp;
1295  if ((sp = sp->next) == 0)
1296  return 0;
1297  }
1298 }
1299 
1300 /* Return true if segment contains a segment link */
1302 static int has_segment_link(mstate m, msegmentptr ss) {
1303  msegmentptr sp = &m->seg;
1304  for (;;) {
1305  if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size)
1306  return 1;
1307  if ((sp = sp->next) == 0)
1308  return 0;
1309  }
1310 }
1311 
1312 #ifndef MORECORE_CANNOT_TRIM
1313 #define should_trim(M,s) ((s) > (M)->trim_check)
1314 #else /* MORECORE_CANNOT_TRIM */
1315 #define should_trim(M,s) (0)
1316 #endif /* MORECORE_CANNOT_TRIM */
1317 
1318 /*
1319  TOP_FOOT_SIZE is padding at the end of a segment, including space
1320  that may be needed to place segment records and fenceposts when new
1321  noncontiguous segments are added.
1322 */
1323 #define TOP_FOOT_SIZE\
1324  (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
1325 
1326 
1327 /* ------------------------------- Hooks -------------------------------- */
1328 
1329 /*
1330  PREACTION should be defined to return 0 on success, and nonzero on
1331  failure. If you are not using locking, you can redefine these to do
1332  anything you like.
1333 */
1334 
1335 #if USE_LOCKS
1336 #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
1337 #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
1338 #else /* USE_LOCKS */
1339 
1340 #ifndef PREACTION
1341 #define PREACTION(M) (0)
1342 #endif /* PREACTION */
1343 
1344 #ifndef POSTACTION
1345 #define POSTACTION(M)
1346 #endif /* POSTACTION */
1347 
1348 #endif /* USE_LOCKS */
1349 
1350 /*
1351  CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
1352  USAGE_ERROR_ACTION is triggered on detected bad frees and
1353  reallocs. The argument p is an address that might have triggered the
1354  fault. It is ignored by the two predefined actions, but might be
1355  useful in custom actions that try to help diagnose errors.
1356 */
1357 
1358 #if PROCEED_ON_ERROR
1359 
1360 /* A count of the number of corruption errors causing resets */
1361 int malloc_corruption_error_count;
1362 
1363 /* default corruption action */
1364 static void reset_on_error(mstate m);
1365 
1366 #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
1367 #define USAGE_ERROR_ACTION(m, p)
1368 
1369 #else /* PROCEED_ON_ERROR */
1370 
1371 #ifndef CORRUPTION_ERROR_ACTION
1372 #define CORRUPTION_ERROR_ACTION(m) DLM_ABORT
1373 #endif /* CORRUPTION_ERROR_ACTION */
1374 
1375 #ifndef USAGE_ERROR_ACTION
1376 #define USAGE_ERROR_ACTION(m,p) DLM_ABORT
1377 #endif /* USAGE_ERROR_ACTION */
1378 
1379 #endif /* PROCEED_ON_ERROR */
1380 
1381 
1382 /* -------------------------- Debugging setup ---------------------------- */
1383 
1384 #if ! DEBUG
1385 
1386 #define check_free_chunk(M,P)
1387 #define check_inuse_chunk(M,P)
1388 #define check_malloced_chunk(M,P,N)
1389 #define check_mmapped_chunk(M,P)
1390 #define check_malloc_state(M)
1391 #define check_top_chunk(M,P)
1392 
1393 #else /* DEBUG */
1394 #define check_free_chunk(M,P) do_check_free_chunk(M,P)
1395 #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
1396 #define check_top_chunk(M,P) do_check_top_chunk(M,P)
1397 #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
1398 #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
1399 #define check_malloc_state(M) do_check_malloc_state(M)
1400 
1401 static void do_check_any_chunk(mstate m, mchunkptr p);
1402 static void do_check_top_chunk(mstate m, mchunkptr p);
1403 static void do_check_mmapped_chunk(mstate m, mchunkptr p);
1404 static void do_check_inuse_chunk(mstate m, mchunkptr p);
1405 static void do_check_free_chunk(mstate m, mchunkptr p);
1406 static void do_check_malloced_chunk(mstate m, void* mem, size_t s);
1407 static void do_check_tree(mstate m, tchunkptr t);
1408 static void do_check_treebin(mstate m, bindex_t i);
1409 static void do_check_smallbin(mstate m, bindex_t i);
1410 static void do_check_malloc_state(mstate m);
1411 static int bin_find(mstate m, mchunkptr x);
1412 static size_t traverse_and_check(mstate m);
1413 #endif /* DEBUG */
1414 
1415 /* ---------------------------- Indexing Bins ---------------------------- */
1416 
1417 #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
1418 #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT)
1419 #define small_index2size(i) ((i) << SMALLBIN_SHIFT)
1420 #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
1421 
1422 /* addressing by index. See above about smallbin repositioning */
1423 #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
1424 #define treebin_at(M,i) (&((M)->treebins[i]))
1425 
1426 /* assign tree index for size S to variable I. Use x86 asm if possible */
1427 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1428 #define compute_tree_index(S, I)\
1429 {\
1430  unsigned int X = S >> TREEBIN_SHIFT;\
1431  if (X == 0)\
1432  I = 0;\
1433  else if (X > 0xFFFF)\
1434  I = NTREEBINS-1;\
1435  else {\
1436  unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \
1437  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1438  }\
1439 }
1440 
1441 #elif defined (__INTEL_COMPILER)
1442 #define compute_tree_index(S, I)\
1443 {\
1444  size_t X = S >> TREEBIN_SHIFT;\
1445  if (X == 0)\
1446  I = 0;\
1447  else if (X > 0xFFFF)\
1448  I = NTREEBINS-1;\
1449  else {\
1450  unsigned int K = _bit_scan_reverse (X); \
1451  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1452  }\
1453 }
1454 
1455 #elif defined(_MSC_VER) && _MSC_VER>=1300
1456 #define compute_tree_index(S, I)\
1457 {\
1458  size_t X = S >> TREEBIN_SHIFT;\
1459  if (X == 0)\
1460  I = 0;\
1461  else if (X > 0xFFFF)\
1462  I = NTREEBINS-1;\
1463  else {\
1464  unsigned int K;\
1465  _BitScanReverse((DWORD *) &K, (DWORD) X);\
1466  I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
1467  }\
1468 }
1469 
1470 #else /* GNUC */
1471 #define compute_tree_index(S, I)\
1472 {\
1473  size_t X = S >> TREEBIN_SHIFT;\
1474  if (X == 0)\
1475  I = 0;\
1476  else if (X > 0xFFFF)\
1477  I = NTREEBINS-1;\
1478  else {\
1479  unsigned int Y = (unsigned int)X;\
1480  unsigned int N = ((Y - 0x100) >> 16) & 8;\
1481  unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
1482  N += K;\
1483  N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
1484  K = 14 - N + ((Y <<= K) >> 15);\
1485  I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
1486  }\
1487 }
1488 #endif /* GNUC */
1489 
1490 /* Bit representing maximum resolved size in a treebin at i */
1491 #define bit_for_tree_index(i) \
1492  (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
1493 
1494 /* Shift placing maximum resolved bit in a treebin at i as sign bit */
1495 #define leftshift_for_tree_index(i) \
1496  ((i == NTREEBINS-1)? 0 : \
1497  ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
1498 
1499 /* The size of the smallest chunk held in bin with index i */
1500 #define minsize_for_tree_index(i) \
1501  ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
1502  (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
1503 
1504 
1505 /* ------------------------ Operations on bin maps ----------------------- */
1506 
1507 /* bit corresponding to given index */
1508 #define idx2bit(i) ((binmap_t)(1) << (i))
1509 
1510 /* Mark/Clear bits with given index */
1511 #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
1512 #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
1513 #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
1514 
1515 #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
1516 #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
1517 #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
1518 
1519 /* isolate the least set bit of a bitmap */
1520 #define least_bit(x) ((x) & -(x))
1521 
1522 /* mask with all bits to left of least bit of x on */
1523 #define left_bits(x) ((x<<1) | -(x<<1))
1524 
1525 /* mask with all bits to left of or equal to least bit of x on */
1526 #define same_or_left_bits(x) ((x) | -(x))
1527 
1528 /* index corresponding to given bit. Use x86 asm if possible */
1529 
1530 #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))
1531 #define compute_bit2idx(X, I)\
1532 {\
1533  unsigned int J;\
1534  J = __builtin_ctz(X); \
1535  I = (bindex_t)J;\
1536 }
1537 
1538 #elif defined (__INTEL_COMPILER)
1539 #define compute_bit2idx(X, I)\
1540 {\
1541  unsigned int J;\
1542  J = _bit_scan_forward (X); \
1543  I = (bindex_t)J;\
1544 }
1545 
1546 #elif defined(_MSC_VER) && _MSC_VER>=1300
1547 #define compute_bit2idx(X, I)\
1548 {\
1549  unsigned int J;\
1550  _BitScanForward((DWORD *) &J, X);\
1551  I = (bindex_t)J;\
1552 }
1553 
1554 #elif USE_BUILTIN_FFS
1555 #define compute_bit2idx(X, I) I = ffs(X)-1
1556 
1557 #else
1558 #define compute_bit2idx(X, I)\
1559 {\
1560  unsigned int Y = X - 1;\
1561  unsigned int K = Y >> (16-4) & 16;\
1562  unsigned int N = K; Y >>= K;\
1563  N += K = Y >> (8-3) & 8; Y >>= K;\
1564  N += K = Y >> (4-2) & 4; Y >>= K;\
1565  N += K = Y >> (2-1) & 2; Y >>= K;\
1566  N += K = Y >> (1-0) & 1; Y >>= K;\
1567  I = (bindex_t)(N + Y);\
1568 }
1569 #endif /* GNUC */
1570 
1571 
1572 /* ----------------------- Runtime Check Support ------------------------- */
1573 
1574 /*
1575  For security, the main invariant is that malloc/free/etc never
1576  writes to a static address other than malloc_state, unless static
1577  malloc_state itself has been corrupted, which cannot occur via
1578  malloc (because of these checks). In essence this means that we
1579  believe all pointers, sizes, maps etc held in malloc_state, but
1580  check all of those linked or offsetted from other embedded data
1581  structures. These checks are interspersed with main code in a way
1582  that tends to minimize their run-time cost.
1583 
1584  When FOOTERS is defined, in addition to range checking, we also
1585  verify footer fields of inuse chunks, which can be used guarantee
1586  that the mstate controlling malloc/free is intact. This is a
1587  streamlined version of the approach described by William Robertson
1588  et al in "Run-time Detection of Heap-based Overflows" LISA'03
1589  http://www.usenix.org/events/lisa03/tech/robertson.html The footer
1590  of an inuse chunk holds the xor of its mstate and a random seed,
1591  that is checked upon calls to free() and realloc(). This is
1592  (probabilistically) unguessable from outside the program, but can be
1593  computed by any code successfully malloc'ing any chunk, so does not
1594  itself provide protection against code that has already broken
1595  security through some other means. Unlike Robertson et al, we
1596  always dynamically check addresses of all offset chunks (previous,
1597  next, etc). This turns out to be cheaper than relying on hashes.
1598 */
1599 
1600 #if !INSECURE
1601 /* Check if address a is at least as high as any from MORECORE or MMAP */
1602 #define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
1603 /* Check if address of next chunk n is higher than base chunk p */
1604 #define ok_next(p, n) ((char*)(p) < (char*)(n))
1605 /* Check if p has inuse status */
1606 #define ok_inuse(p) is_inuse(p)
1607 /* Check if p has its pinuse bit on */
1608 #define ok_pinuse(p) pinuse(p)
1609 
1610 #else /* !INSECURE */
1611 #define ok_address(M, a) (1)
1612 #define ok_next(b, n) (1)
1613 #define ok_inuse(p) (1)
1614 #define ok_pinuse(p) (1)
1615 #endif /* !INSECURE */
1616 
1617 #if (FOOTERS && !INSECURE)
1618 /* Check if (alleged) mstate m has expected magic field */
1620 static inline int
1621 ok_magic (const mstate m)
1622 {
1623  return (m->magic == mparams.magic);
1624 }
1625 #else /* (FOOTERS && !INSECURE) */
1626 #define ok_magic(M) (1)
1627 #endif /* (FOOTERS && !INSECURE) */
1628 
1629 /* In gcc, use __builtin_expect to minimize impact of checks */
1630 #if !INSECURE
1631 #if defined(__GNUC__) && __GNUC__ >= 3
1632 #define RTCHECK(e) __builtin_expect(e, 1)
1633 #else /* GNUC */
1634 #define RTCHECK(e) (e)
1635 #endif /* GNUC */
1636 #else /* !INSECURE */
1637 #define RTCHECK(e) (1)
1638 #endif /* !INSECURE */
1639 
1640 /* macros to set up inuse chunks with or without footers */
1641 
1642 #if !FOOTERS
1643 
1644 #define mark_inuse_foot(M,p,s)
1645 
1646 /* Macros for setting head/foot of non-mmapped chunks */
1647 
1648 /* Set cinuse bit and pinuse bit of next chunk */
1649 #define set_inuse(M,p,s)\
1650  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1651  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1652 
1653 /* Set cinuse and pinuse of this chunk and pinuse of next chunk */
1654 #define set_inuse_and_pinuse(M,p,s)\
1655  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1656  ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
1657 
1658 /* Set size, cinuse and pinuse bit of this chunk */
1659 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1660  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
1661 
1662 #else /* FOOTERS */
1663 
1664 /* Set foot of inuse chunk to be xor of mstate and seed */
1665 #define mark_inuse_foot(M,p,s)\
1666  (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
1667 
1668 #define get_mstate_for(p)\
1669  ((mstate)(((mchunkptr)((char*)(p) +\
1670  (chunksize(p))))->prev_foot ^ mparams.magic))
1671 
1672 #define set_inuse(M,p,s)\
1673  ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
1674  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
1675  mark_inuse_foot(M,p,s))
1676 
1677 #define set_inuse_and_pinuse(M,p,s)\
1678  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1679  (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
1680  mark_inuse_foot(M,p,s))
1681 
1682 #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
1683  ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
1684  mark_inuse_foot(M, p, s))
1685 
1686 #endif /* !FOOTERS */
1687 
1688 /* ---------------------------- setting mparams -------------------------- */
1689 
1690 #if LOCK_AT_FORK
1691 static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); }
1692 static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); }
1693 static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); }
1694 #endif /* LOCK_AT_FORK */
1695 
1696 /* Initialize mparams */
1697 static int init_mparams(void) {
1698 #ifdef NEED_GLOBAL_LOCK_INIT
1699  if (malloc_global_mutex_status <= 0)
1700  init_malloc_global_mutex();
1701 #endif
1702 
1704  if (mparams.magic == 0) {
1705  size_t magic;
1706  size_t psize;
1707  size_t gsize;
1708 
1709 #ifndef WIN32
1710  psize = malloc_getpagesize;
1711  gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize);
1712 #else /* WIN32 */
1713  {
1714  SYSTEM_INFO system_info;
1715  GetSystemInfo(&system_info);
1716  psize = system_info.dwPageSize;
1717  gsize = ((DEFAULT_GRANULARITY != 0)?
1718  DEFAULT_GRANULARITY : system_info.dwAllocationGranularity);
1719  }
1720 #endif /* WIN32 */
1721 
1722  /* Sanity-check configuration:
1723  size_t must be unsigned and as wide as pointer type.
1724  ints must be at least 4 bytes.
1725  alignment must be at least 8.
1726  Alignment, min chunk size, and page size must all be powers of 2.
1727  */
1728  if ((sizeof(size_t) != sizeof(char*)) ||
1729  (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
1730  (sizeof(int) < 4) ||
1731  (MALLOC_ALIGNMENT < (size_t)8U) ||
1733  ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) ||
1734  ((gsize & (gsize-SIZE_T_ONE)) != 0) ||
1735  ((psize & (psize-SIZE_T_ONE)) != 0))
1736  DLM_ABORT;
1737  mparams.granularity = gsize;
1738  mparams.page_size = psize;
1741 #if MORECORE_CONTIGUOUS
1743 #else /* MORECORE_CONTIGUOUS */
1745 #endif /* MORECORE_CONTIGUOUS */
1746 
1747 #if !ONLY_MSPACES
1748  /* Set up lock for main malloc area */
1749  gm->mflags = mparams.default_mflags;
1750  (void)INITIAL_LOCK(&gm->mutex);
1751 #endif
1752 #if LOCK_AT_FORK
1753  pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child);
1754 #endif
1755 
1756  {
1757 #ifndef DLM_MAGIC_CONSTANT
1758 #if USE_DEV_RANDOM
1759  int fd;
1760  unsigned char buf[sizeof(size_t)];
1761  /* Try to use /dev/urandom, else fall back on using time */
1762  if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
1763  read(fd, buf, sizeof(buf)) == sizeof(buf)) {
1764  magic = *((size_t *) buf);
1765  close(fd);
1766  }
1767  else
1768 #endif /* USE_DEV_RANDOM */
1769 #ifdef WIN32
1770  magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U);
1771 #elif defined(LACKS_TIME_H)
1772  magic = (size_t)&magic ^ (size_t)0x55555555U;
1773 #else
1774  magic = (size_t)(time(0) ^ (size_t)0x55555555U);
1775 #endif
1776  magic |= (size_t)8U; /* ensure nonzero */
1777  magic &= ~(size_t)7U; /* improve chances of fault for bad values */
1778 #else
1779  magic = DLM_MAGIC_CONSTANT;
1780 #endif
1781  /* Until memory modes commonly available, use volatile-write */
1782  (*(volatile size_t *)(&(mparams.magic))) = magic;
1783  }
1784  }
1785 
1787  return 1;
1788 }
1789 
1790 /* support for mallopt */
1791 static int change_mparam(int param_number, int value) {
1792  size_t val;
1794  val = (value == -1)? MAX_SIZE_T : (size_t)value;
1795  switch(param_number) {
1796  case M_TRIM_THRESHOLD:
1797  mparams.trim_threshold = val;
1798  return 1;
1799  case M_GRANULARITY:
1800  if (val >= mparams.page_size && ((val & (val-1)) == 0)) {
1801  mparams.granularity = val;
1802  return 1;
1803  }
1804  else
1805  return 0;
1806  case M_MMAP_THRESHOLD:
1807  mparams.mmap_threshold = val;
1808  return 1;
1809  default:
1810  return 0;
1811  }
1812 }
1813 
1814 #if DEBUG
1815 /* ------------------------- Debugging Support --------------------------- */
1816 
1817 /* Check properties of any chunk, whether free, inuse, mmapped etc */
1818 static void do_check_any_chunk(mstate m, mchunkptr p) {
1819  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1820  assert(ok_address(m, p));
1821 }
1822 
1823 /* Check properties of top chunk */
1824 static void do_check_top_chunk(mstate m, mchunkptr p) {
1825  msegmentptr sp = segment_holding(m, (char*)p);
1826  size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */
1827  assert(sp != 0);
1828  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1829  assert(ok_address(m, p));
1830  assert(sz == m->topsize);
1831  assert(sz > 0);
1832  assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE);
1833  assert(pinuse(p));
1834  assert(!pinuse(chunk_plus_offset(p, sz)));
1835 }
1836 
1837 /* Check properties of (inuse) mmapped chunks */
1838 static void do_check_mmapped_chunk(mstate m, mchunkptr p) {
1839  size_t sz = chunksize(p);
1840  size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD);
1841  assert(is_mmapped(p));
1842  assert(use_mmap(m));
1843  assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
1844  assert(ok_address(m, p));
1845  assert(!is_small(sz));
1846  assert((len & (mparams.page_size-SIZE_T_ONE)) == 0);
1847  assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
1848  assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0);
1849 }
1850 
1851 /* Check properties of inuse chunks */
1852 static void do_check_inuse_chunk(mstate m, mchunkptr p) {
1853  do_check_any_chunk(m, p);
1854  assert(is_inuse(p));
1855  assert(next_pinuse(p));
1856  /* If not pinuse and not mmapped, previous chunk has OK offset */
1857  assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
1858  if (is_mmapped(p))
1859  do_check_mmapped_chunk(m, p);
1860 }
1861 
1862 /* Check properties of free chunks */
1863 static void do_check_free_chunk(mstate m, mchunkptr p) {
1864  size_t sz = chunksize(p);
1865  mchunkptr next = chunk_plus_offset(p, sz);
1866  do_check_any_chunk(m, p);
1867  assert(!is_inuse(p));
1868  assert(!next_pinuse(p));
1869  assert (!is_mmapped(p));
1870  if (p != m->dv && p != m->top) {
1871  if (sz >= MIN_CHUNK_SIZE) {
1872  assert((sz & CHUNK_ALIGN_MASK) == 0);
1874  assert(next->prev_foot == sz);
1875  assert(pinuse(p));
1876  assert (next == m->top || is_inuse(next));
1877  assert(p->fd->bk == p);
1878  assert(p->bk->fd == p);
1879  }
1880  else /* markers are always of size SIZE_T_SIZE */
1881  assert(sz == SIZE_T_SIZE);
1882  }
1883 }
1884 
1885 /* Check properties of malloced chunks at the point they are malloced */
1886 static void do_check_malloced_chunk(mstate m, void* mem, size_t s) {
1887  if (mem != 0) {
1888  mchunkptr p = mem2chunk(mem);
1889  size_t sz = p->head & ~INUSE_BITS;
1890  do_check_inuse_chunk(m, p);
1891  assert((sz & CHUNK_ALIGN_MASK) == 0);
1892  assert(sz >= MIN_CHUNK_SIZE);
1893  assert(sz >= s);
1894  /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
1895  assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
1896  }
1897 }
1898 
1899 /* Check a tree and its subtrees. */
1900 static void do_check_tree(mstate m, tchunkptr t) {
1901  tchunkptr head = 0;
1902  tchunkptr u = t;
1903  bindex_t tindex = t->index;
1904  size_t tsize = chunksize(t);
1905  bindex_t idx;
1906  compute_tree_index(tsize, idx);
1907  assert(tindex == idx);
1908  assert(tsize >= MIN_LARGE_SIZE);
1909  assert(tsize >= minsize_for_tree_index(idx));
1910  assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1))));
1911 
1912  do { /* traverse through chain of same-sized nodes */
1913  do_check_any_chunk(m, ((mchunkptr)u));
1914  assert(u->index == tindex);
1915  assert(chunksize(u) == tsize);
1916  assert(!is_inuse(u));
1917  assert(!next_pinuse(u));
1918  assert(u->fd->bk == u);
1919  assert(u->bk->fd == u);
1920  if (u->parent == 0) {
1921  assert(u->child[0] == 0);
1922  assert(u->child[1] == 0);
1923  }
1924  else {
1925  assert(head == 0); /* only one node on chain has parent */
1926  head = u;
1927  assert(u->parent != u);
1928  assert (u->parent->child[0] == u ||
1929  u->parent->child[1] == u ||
1930  *((tbinptr*)(u->parent)) == u);
1931  if (u->child[0] != 0) {
1932  assert(u->child[0]->parent == u);
1933  assert(u->child[0] != u);
1934  do_check_tree(m, u->child[0]);
1935  }
1936  if (u->child[1] != 0) {
1937  assert(u->child[1]->parent == u);
1938  assert(u->child[1] != u);
1939  do_check_tree(m, u->child[1]);
1940  }
1941  if (u->child[0] != 0 && u->child[1] != 0) {
1942  assert(chunksize(u->child[0]) < chunksize(u->child[1]));
1943  }
1944  }
1945  u = u->fd;
1946  } while (u != t);
1947  assert(head != 0);
1948 }
1949 
1950 /* Check all the chunks in a treebin. */
1951 static void do_check_treebin(mstate m, bindex_t i) {
1952  tbinptr* tb = treebin_at(m, i);
1953  tchunkptr t = *tb;
1954  int empty = (m->treemap & (1U << i)) == 0;
1955  if (t == 0)
1956  assert(empty);
1957  if (!empty)
1958  do_check_tree(m, t);
1959 }
1960 
1961 /* Check all the chunks in a smallbin. */
1962 static void do_check_smallbin(mstate m, bindex_t i) {
1963  sbinptr b = smallbin_at(m, i);
1964  mchunkptr p = b->bk;
1965  unsigned int empty = (m->smallmap & (1U << i)) == 0;
1966  if (p == b)
1967  assert(empty);
1968  if (!empty) {
1969  for (; p != b; p = p->bk) {
1970  size_t size = chunksize(p);
1971  mchunkptr q;
1972  /* each chunk claims to be free */
1973  do_check_free_chunk(m, p);
1974  /* chunk belongs in bin */
1975  assert(small_index(size) == i);
1976  assert(p->bk == b || chunksize(p->bk) == chunksize(p));
1977  /* chunk is followed by an inuse chunk */
1978  q = next_chunk(p);
1979  if (q->head != FENCEPOST_HEAD)
1980  do_check_inuse_chunk(m, q);
1981  }
1982  }
1983 }
1984 
1985 /* Find x in a bin. Used in other check functions. */
1986 static int bin_find(mstate m, mchunkptr x) {
1987  size_t size = chunksize(x);
1988  if (is_small(size)) {
1989  bindex_t sidx = small_index(size);
1990  sbinptr b = smallbin_at(m, sidx);
1991  if (smallmap_is_marked(m, sidx)) {
1992  mchunkptr p = b;
1993  do {
1994  if (p == x)
1995  return 1;
1996  } while ((p = p->fd) != b);
1997  }
1998  }
1999  else {
2000  bindex_t tidx;
2001  compute_tree_index(size, tidx);
2002  if (treemap_is_marked(m, tidx)) {
2003  tchunkptr t = *treebin_at(m, tidx);
2004  size_t sizebits = size << leftshift_for_tree_index(tidx);
2005  while (t != 0 && chunksize(t) != size) {
2006  t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
2007  sizebits <<= 1;
2008  }
2009  if (t != 0) {
2010  tchunkptr u = t;
2011  do {
2012  if (u == (tchunkptr)x)
2013  return 1;
2014  } while ((u = u->fd) != t);
2015  }
2016  }
2017  }
2018  return 0;
2019 }
2020 
2021 /* Traverse each chunk and check it; return total */
2022 static size_t traverse_and_check(mstate m) {
2023  size_t sum = 0;
2024  if (is_initialized(m)) {
2025  msegmentptr s = &m->seg;
2026  sum += m->topsize + TOP_FOOT_SIZE;
2027  while (s != 0) {
2028  mchunkptr q = align_as_chunk(s->base);
2029  mchunkptr lastq = 0;
2030  assert(pinuse(q));
2031  while (segment_holds(s, q) &&
2032  q != m->top && q->head != FENCEPOST_HEAD) {
2033  sum += chunksize(q);
2034  if (is_inuse(q)) {
2035  assert(!bin_find(m, q));
2036  do_check_inuse_chunk(m, q);
2037  }
2038  else {
2039  assert(q == m->dv || bin_find(m, q));
2040  assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */
2041  do_check_free_chunk(m, q);
2042  }
2043  lastq = q;
2044  q = next_chunk(q);
2045  }
2046  s = s->next;
2047  }
2048  }
2049  return sum;
2050 }
2051 
2052 
2053 /* Check all properties of malloc_state. */
2054 static void do_check_malloc_state(mstate m) {
2055  bindex_t i;
2056  size_t total;
2057  /* check bins */
2058  for (i = 0; i < NSMALLBINS; ++i)
2059  do_check_smallbin(m, i);
2060  for (i = 0; i < NTREEBINS; ++i)
2061  do_check_treebin(m, i);
2062 
2063  if (m->dvsize != 0) { /* check dv chunk */
2064  do_check_any_chunk(m, m->dv);
2065  assert(m->dvsize == chunksize(m->dv));
2066  assert(m->dvsize >= MIN_CHUNK_SIZE);
2067  assert(bin_find(m, m->dv) == 0);
2068  }
2069 
2070  if (m->top != 0) { /* check top chunk */
2071  do_check_top_chunk(m, m->top);
2072  /*assert(m->topsize == chunksize(m->top)); redundant */
2073  assert(m->topsize > 0);
2074  assert(bin_find(m, m->top) == 0);
2075  }
2076 
2077  total = traverse_and_check(m);
2078  assert(total <= m->footprint);
2079  assert(m->footprint <= m->max_footprint);
2080 }
2081 #endif /* DEBUG */
2082 
2083 /* ----------------------------- statistics ------------------------------ */
2084 
2085 #if !NO_MALLINFO
2087 static struct dlmallinfo internal_mallinfo(mstate m) {
2088  struct dlmallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2090  if (!PREACTION(m)) {
2091  check_malloc_state(m);
2092  if (is_initialized(m)) {
2093  size_t nfree = SIZE_T_ONE; /* top always free */
2094  size_t mfree = m->topsize + TOP_FOOT_SIZE;
2095  size_t sum = mfree;
2096  msegmentptr s = &m->seg;
2097  while (s != 0) {
2098  mchunkptr q = align_as_chunk(s->base);
2099  while (segment_holds(s, q) &&
2100  q != m->top && q->head != FENCEPOST_HEAD) {
2101  size_t sz = chunksize(q);
2102  sum += sz;
2103  if (!is_inuse(q)) {
2104  mfree += sz;
2105  ++nfree;
2106  }
2107  q = next_chunk(q);
2108  }
2109  s = s->next;
2110  }
2111 
2112  nm.arena = sum;
2113  nm.ordblks = nfree;
2114  nm.hblkhd = m->footprint - sum;
2115  nm.usmblks = m->max_footprint;
2116  nm.uordblks = m->footprint - mfree;
2117  nm.fordblks = mfree;
2118  nm.keepcost = m->topsize;
2119  }
2120 
2121  POSTACTION(m);
2122  }
2123  return nm;
2124 }
2125 #endif /* !NO_MALLINFO */
2126 
2127 #if !NO_MALLOC_STATS
2128 static void internal_malloc_stats(mstate m) {
2130  if (!PREACTION(m)) {
2131  size_t maxfp = 0;
2132  size_t fp = 0;
2133  size_t used = 0;
2134  check_malloc_state(m);
2135  if (is_initialized(m)) {
2136  msegmentptr s = &m->seg;
2137  maxfp = m->max_footprint;
2138  fp = m->footprint;
2139  used = fp - (m->topsize + TOP_FOOT_SIZE);
2140 
2141  while (s != 0) {
2142  mchunkptr q = align_as_chunk(s->base);
2143  while (segment_holds(s, q) &&
2144  q != m->top && q->head != FENCEPOST_HEAD) {
2145  if (!is_inuse(q))
2146  used -= chunksize(q);
2147  q = next_chunk(q);
2148  }
2149  s = s->next;
2150  }
2151  }
2152  POSTACTION(m); /* drop lock */
2153  fprintf(stderr, "max system bytes = %10lu\n", (unsigned long)(maxfp));
2154  fprintf(stderr, "system bytes = %10lu\n", (unsigned long)(fp));
2155  fprintf(stderr, "in use bytes = %10lu\n", (unsigned long)(used));
2156  }
2157 }
2158 #endif /* NO_MALLOC_STATS */
2159 
2160 /* ----------------------- Operations on smallbins ----------------------- */
2161 
2162 /*
2163  Various forms of linking and unlinking are defined as macros. Even
2164  the ones for trees, which are very long but have very short typical
2165  paths. This is ugly but reduces reliance on inlining support of
2166  compilers.
2167 */
2168 
2169 /* Link a free chunk into a smallbin */
2170 #define insert_small_chunk(M, P, S) {\
2171  bindex_t I = small_index(S);\
2172  mchunkptr B = smallbin_at(M, I);\
2173  mchunkptr F = B;\
2174  assert(S >= MIN_CHUNK_SIZE);\
2175  if (!smallmap_is_marked(M, I))\
2176  mark_smallmap(M, I);\
2177  else if (RTCHECK(ok_address(M, B->fd)))\
2178  F = B->fd;\
2179  else {\
2180  CORRUPTION_ERROR_ACTION(M);\
2181  }\
2182  B->fd = P;\
2183  F->bk = P;\
2184  P->fd = F;\
2185  P->bk = B;\
2186 }
2187 
2188 /* Unlink a chunk from a smallbin */
2189 #define unlink_small_chunk(M, P, S) {\
2190  mchunkptr F = P->fd;\
2191  mchunkptr B = P->bk;\
2192  bindex_t I = small_index(S);\
2193  assert(P != B);\
2194  assert(P != F);\
2195  assert(chunksize(P) == small_index2size(I));\
2196  if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \
2197  if (B == F) {\
2198  clear_smallmap(M, I);\
2199  }\
2200  else if (RTCHECK(B == smallbin_at(M,I) ||\
2201  (ok_address(M, B) && B->fd == P))) {\
2202  F->bk = B;\
2203  B->fd = F;\
2204  }\
2205  else {\
2206  CORRUPTION_ERROR_ACTION(M);\
2207  }\
2208  }\
2209  else {\
2210  CORRUPTION_ERROR_ACTION(M);\
2211  }\
2212 }
2213 
2214 /* Unlink the first chunk from a smallbin */
2215 #define unlink_first_small_chunk(M, B, P, I) {\
2216  mchunkptr F = P->fd;\
2217  assert(P != B);\
2218  assert(P != F);\
2219  assert(chunksize(P) == small_index2size(I));\
2220  if (B == F) {\
2221  clear_smallmap(M, I);\
2222  }\
2223  else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\
2224  F->bk = B;\
2225  B->fd = F;\
2226  }\
2227  else {\
2228  CORRUPTION_ERROR_ACTION(M);\
2229  }\
2230 }
2231 
2232 /* Replace dv node, binning the old one */
2233 /* Used only when dvsize known to be small */
2234 #define replace_dv(M, P, S) {\
2235  size_t DVS = M->dvsize;\
2236  assert(is_small(DVS));\
2237  if (DVS != 0) {\
2238  mchunkptr DV = M->dv;\
2239  insert_small_chunk(M, DV, DVS);\
2240  }\
2241  M->dvsize = S;\
2242  M->dv = P;\
2243 }
2244 
2245 /* ------------------------- Operations on trees ------------------------- */
2246 
2247 /* Insert chunk into tree */
2248 #define insert_large_chunk(M, X, S) {\
2249  tbinptr* H;\
2250  bindex_t I;\
2251  compute_tree_index(S, I);\
2252  H = treebin_at(M, I);\
2253  X->index = I;\
2254  X->child[0] = X->child[1] = 0;\
2255  if (!treemap_is_marked(M, I)) {\
2256  mark_treemap(M, I);\
2257  *H = X;\
2258  X->parent = (tchunkptr)H;\
2259  X->fd = X->bk = X;\
2260  }\
2261  else {\
2262  tchunkptr T = *H;\
2263  size_t K = S << leftshift_for_tree_index(I);\
2264  for (;;) {\
2265  if (chunksize(T) != S) {\
2266  tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
2267  K <<= 1;\
2268  if (*C != 0)\
2269  T = *C;\
2270  else if (RTCHECK(ok_address(M, C))) {\
2271  *C = X;\
2272  X->parent = T;\
2273  X->fd = X->bk = X;\
2274  break;\
2275  }\
2276  else {\
2277  CORRUPTION_ERROR_ACTION(M);\
2278  break;\
2279  }\
2280  }\
2281  else {\
2282  tchunkptr F = T->fd;\
2283  if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
2284  T->fd = F->bk = X;\
2285  X->fd = F;\
2286  X->bk = T;\
2287  X->parent = 0;\
2288  break;\
2289  }\
2290  else {\
2291  CORRUPTION_ERROR_ACTION(M);\
2292  break;\
2293  }\
2294  }\
2295  }\
2296  }\
2297 }
2298 
2299 /*
2300  Unlink steps:
2301 
2302  1. If x is a chained node, unlink it from its same-sized fd/bk links
2303  and choose its bk node as its replacement.
2304  2. If x was the last node of its size, but not a leaf node, it must
2305  be replaced with a leaf node (not merely one with an open left or
2306  right), to make sure that lefts and rights of descendents
2307  correspond properly to bit masks. We use the rightmost descendent
2308  of x. We could use any other leaf, but this is easy to locate and
2309  tends to counteract removal of leftmosts elsewhere, and so keeps
2310  paths shorter than minimally guaranteed. This doesn't loop much
2311  because on average a node in a tree is near the bottom.
2312  3. If x is the base of a chain (i.e., has parent links) relink
2313  x's parent and children to x's replacement (or null if none).
2314 */
2315 
2316 #define unlink_large_chunk(M, X) {\
2317  tchunkptr XP = X->parent;\
2318  tchunkptr R;\
2319  if (X->bk != X) {\
2320  tchunkptr F = X->fd;\
2321  R = X->bk;\
2322  if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\
2323  F->bk = R;\
2324  R->fd = F;\
2325  }\
2326  else {\
2327  CORRUPTION_ERROR_ACTION(M);\
2328  }\
2329  }\
2330  else {\
2331  tchunkptr* RP;\
2332  if (((R = *(RP = &(X->child[1]))) != 0) ||\
2333  ((R = *(RP = &(X->child[0]))) != 0)) {\
2334  tchunkptr* CP;\
2335  while ((*(CP = &(R->child[1])) != 0) ||\
2336  (*(CP = &(R->child[0])) != 0)) {\
2337  R = *(RP = CP);\
2338  }\
2339  if (RTCHECK(ok_address(M, RP)))\
2340  *RP = 0;\
2341  else {\
2342  CORRUPTION_ERROR_ACTION(M);\
2343  }\
2344  }\
2345  }\
2346  if (XP != 0) {\
2347  tbinptr* H = treebin_at(M, X->index);\
2348  if (X == *H) {\
2349  if ((*H = R) == 0) \
2350  clear_treemap(M, X->index);\
2351  }\
2352  else if (RTCHECK(ok_address(M, XP))) {\
2353  if (XP->child[0] == X) \
2354  XP->child[0] = R;\
2355  else \
2356  XP->child[1] = R;\
2357  }\
2358  else\
2359  CORRUPTION_ERROR_ACTION(M);\
2360  if (R != 0) {\
2361  if (RTCHECK(ok_address(M, R))) {\
2362  tchunkptr C0, C1;\
2363  R->parent = XP;\
2364  if ((C0 = X->child[0]) != 0) {\
2365  if (RTCHECK(ok_address(M, C0))) {\
2366  R->child[0] = C0;\
2367  C0->parent = R;\
2368  }\
2369  else\
2370  CORRUPTION_ERROR_ACTION(M);\
2371  }\
2372  if ((C1 = X->child[1]) != 0) {\
2373  if (RTCHECK(ok_address(M, C1))) {\
2374  R->child[1] = C1;\
2375  C1->parent = R;\
2376  }\
2377  else\
2378  CORRUPTION_ERROR_ACTION(M);\
2379  }\
2380  }\
2381  else\
2382  CORRUPTION_ERROR_ACTION(M);\
2383  }\
2384  }\
2385 }
2386 
2387 /* Relays to large vs small bin operations */
2388 
2389 #define insert_chunk(M, P, S)\
2390  if (is_small(S)) insert_small_chunk(M, P, S)\
2391  else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
2392 
2393 #define unlink_chunk(M, P, S)\
2394  if (is_small(S)) unlink_small_chunk(M, P, S)\
2395  else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
2396 
2397 
2398 /* Relays to internal calls to malloc/free from realloc, memalign etc */
2399 
2400 #if ONLY_MSPACES
2401 #define internal_malloc(m, b) mspace_malloc(m, b)
2402 #define internal_free(m, mem) mspace_free(m,mem);
2403 #else /* ONLY_MSPACES */
2404 #if MSPACES
2405 #define internal_malloc(m, b)\
2406  ((m == gm)? dlmalloc(b) : mspace_malloc(m, b))
2407 #define internal_free(m, mem)\
2408  if (m == gm) dlfree(mem); else mspace_free(m,mem);
2409 #else /* MSPACES */
2410 #define internal_malloc(m, b) dlmalloc(b)
2411 #define internal_free(m, mem) dlfree(mem)
2412 #endif /* MSPACES */
2413 #endif /* ONLY_MSPACES */
2414 
2415 /* ----------------------- Direct-mmapping chunks ----------------------- */
2416 
2417 /*
2418  Directly mmapped chunks are set up with an offset to the start of
2419  the mmapped region stored in the prev_foot field of the chunk. This
2420  allows reconstruction of the required argument to MUNMAP when freed,
2421  and also allows adjustment of the returned chunk to meet alignment
2422  requirements (especially in memalign).
2423 */
2424 
2425 /* Malloc using mmap */
2426 static void* mmap_alloc(mstate m, size_t nb) {
2427  size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2428  if (m->footprint_limit != 0) {
2429  size_t fp = m->footprint + mmsize;
2430  if (fp <= m->footprint || fp > m->footprint_limit)
2431  return 0;
2432  }
2433  if (mmsize > nb) { /* Check for wrap around 0 */
2434  char* mm = (char*)(CALL_DIRECT_MMAP(mmsize));
2435  if (mm != CMFAIL) {
2436  size_t offset = align_offset(chunk2mem(mm));
2437  size_t psize = mmsize - offset - MMAP_FOOT_PAD;
2438  mchunkptr p = (mchunkptr)(mm + offset);
2439  p->prev_foot = offset;
2440  p->head = psize;
2441  mark_inuse_foot(m, p, psize);
2442  chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
2443  chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0;
2444 
2445  if (m->least_addr == 0 || mm < m->least_addr)
2446  m->least_addr = mm;
2447  if ((m->footprint += mmsize) > m->max_footprint)
2448  m->max_footprint = m->footprint;
2450  check_mmapped_chunk(m, p);
2451  return chunk2mem(p);
2452  }
2453  }
2454  return 0;
2455 }
2456 
2457 /* Realloc using mmap */
2458 static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) {
2459  size_t oldsize = chunksize(oldp);
2460  (void)flags; /* placate people compiling -Wunused */
2461  if (is_small(nb)) /* Can't shrink mmap regions below small size */
2462  return 0;
2463  /* Keep old chunk if big enough but not too big */
2464  if (oldsize >= nb + SIZE_T_SIZE &&
2465  (oldsize - nb) <= (mparams.granularity << 1))
2466  return oldp;
2467  else {
2468  size_t offset = oldp->prev_foot;
2469  size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
2470  size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2471  char* cp = (char*)CALL_MREMAP((char*)oldp - offset,
2472  oldmmsize, newmmsize, flags);
2473  if (cp != CMFAIL) {
2474  mchunkptr newp = (mchunkptr)(cp + offset);
2475  size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
2476  newp->head = psize;
2477  mark_inuse_foot(m, newp, psize);
2478  chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
2479  chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0;
2480 
2481  if (cp < m->least_addr)
2482  m->least_addr = cp;
2483  if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
2484  m->max_footprint = m->footprint;
2485  check_mmapped_chunk(m, newp);
2486  return newp;
2487  }
2488  }
2489  return 0;
2490 }
2491 
2492 
2493 /* -------------------------- mspace management -------------------------- */
2494 
2495 /* Initialize top chunk and its size */
2497 static void init_top(mstate m, mchunkptr p, size_t psize) {
2498  /* Ensure alignment */
2499  size_t offset = align_offset(chunk2mem(p));
2500  p = (mchunkptr)((char*)p + offset);
2501  psize -= offset;
2502 
2503  m->top = p;
2504  m->topsize = psize;
2505  p->head = psize | PINUSE_BIT;
2506  /* set size of fake trailing chunk holding overhead space only once */
2507  chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
2508  m->trim_check = mparams.trim_threshold; /* reset on each update */
2509 }
2510 
2511 /* Initialize bins for a new mstate that is otherwise zeroed out */
2512 static void init_bins(mstate m) {
2513  /* Establish circular links for smallbins */
2514  bindex_t i;
2515  for (i = 0; i < NSMALLBINS; ++i) {
2516  sbinptr bin = smallbin_at(m,i);
2517  bin->fd = bin->bk = bin;
2518  }
2519 }
2520 
2521 #if PROCEED_ON_ERROR
2522 
2523 /* default corruption action */
2524 static void reset_on_error(mstate m) {
2525  int i;
2526  ++malloc_corruption_error_count;
2527  /* Reinitialize fields to forget about all memory */
2528  m->smallmap = m->treemap = 0;
2529  m->dvsize = m->topsize = 0;
2530  m->seg.base = 0;
2531  m->seg.size = 0;
2532  m->seg.next = 0;
2533  m->top = m->dv = 0;
2534  for (i = 0; i < NTREEBINS; ++i)
2535  *treebin_at(m, i) = 0;
2536  init_bins(m);
2537 }
2538 #endif /* PROCEED_ON_ERROR */
2539 
2540 /* Allocate chunk and prepend remainder with chunk in successor base. */
2542 static void* prepend_alloc(mstate m, char* newbase, char* oldbase,
2543  size_t nb) {
2544  mchunkptr p = align_as_chunk(newbase);
2545  mchunkptr oldfirst = align_as_chunk(oldbase);
2546  size_t psize = (char*)oldfirst - (char*)p;
2547  mchunkptr q = chunk_plus_offset(p, nb);
2548  size_t qsize = psize - nb;
2550 
2551  assert((char*)oldfirst > (char*)q);
2552  assert(pinuse(oldfirst));
2553  assert(qsize >= MIN_CHUNK_SIZE);
2554 
2555  /* consolidate remainder with first chunk of old base */
2556  if (oldfirst == m->top) {
2557  size_t tsize = m->topsize += qsize;
2558  m->top = q;
2559  q->head = tsize | PINUSE_BIT;
2560  check_top_chunk(m, q);
2561  }
2562  else if (oldfirst == m->dv) {
2563  size_t dsize = m->dvsize += qsize;
2564  m->dv = q;
2566  }
2567  else {
2568  if (!is_inuse(oldfirst)) {
2569  size_t nsize = chunksize(oldfirst);
2570  unlink_chunk(m, oldfirst, nsize);
2571  oldfirst = chunk_plus_offset(oldfirst, nsize);
2572  qsize += nsize;
2573  }
2574  set_free_with_pinuse(q, qsize, oldfirst);
2575  insert_chunk(m, q, qsize);
2576  check_free_chunk(m, q);
2577  }
2578 
2579  check_malloced_chunk(m, chunk2mem(p), nb);
2580  return chunk2mem(p);
2581 }
2582 
2583 /* Add a segment to hold a new noncontiguous region */
2585 static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) {
2586  /* Determine locations and sizes of segment, fenceposts, old top */
2587  char* old_top = (char*)m->top;
2588  msegmentptr oldsp = segment_holding(m, old_top);
2589  char* old_end = oldsp->base + oldsp->size;
2590  size_t ssize = pad_request(sizeof(struct malloc_segment));
2591  char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
2592  size_t offset = align_offset(chunk2mem(rawsp));
2593  char* asp = rawsp + offset;
2594  char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp;
2595  mchunkptr sp = (mchunkptr)csp;
2596  msegmentptr ss = (msegmentptr)(chunk2mem(sp));
2597  mchunkptr tnext = chunk_plus_offset(sp, ssize);
2598  mchunkptr p = tnext;
2599  int nfences = 0;
2600 
2601  /* reset top to new space */
2602  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2603 
2604  /* Set up segment record */
2605  assert(is_aligned(ss));
2606  set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
2607  *ss = m->seg; /* Push current record */
2608  m->seg.base = tbase;
2609  m->seg.size = tsize;
2610  m->seg.sflags = mmapped;
2611  m->seg.next = ss;
2612 
2613  /* Insert trailing fenceposts */
2614  for (;;) {
2615  mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
2616  p->head = FENCEPOST_HEAD;
2617  ++nfences;
2618  if ((char*)(&(nextp->head)) < old_end)
2619  p = nextp;
2620  else
2621  break;
2622  }
2623  assert(nfences >= 2);
2624 
2625  /* Insert the rest of old top into a bin as an ordinary free chunk */
2626  if (csp != old_top) {
2627  mchunkptr q = (mchunkptr)old_top;
2628  size_t psize = csp - old_top;
2629  mchunkptr tn = chunk_plus_offset(q, psize);
2630  set_free_with_pinuse(q, psize, tn);
2631  insert_chunk(m, q, psize);
2632  }
2633 
2634  check_top_chunk(m, m->top);
2635 }
2636 
2637 /* -------------------------- System allocation -------------------------- */
2638 
2639 /* Get memory from system using MORECORE or MMAP */
2641 static void* sys_alloc(mstate m, size_t nb) {
2642  char* tbase = CMFAIL;
2643  size_t tsize = 0;
2644  flag_t mmap_flag = 0;
2645  size_t asize; /* allocation size */
2646 
2648 
2649  if (use_noexpand(m))
2650  return 0;
2651 
2652  /* Directly map large chunks, but only if already initialized */
2653  if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) {
2654  void* mem = mmap_alloc(m, nb);
2655  if (mem != 0)
2656  return mem;
2657  }
2658 
2659  asize = granularity_align(nb + SYS_ALLOC_PADDING);
2660  if (asize <= nb)
2661  return 0; /* wraparound */
2662  if (m->footprint_limit != 0) {
2663  size_t fp = m->footprint + asize;
2664  if (fp <= m->footprint || fp > m->footprint_limit)
2665  return 0;
2666  }
2667 
2668  /*
2669  Try getting memory in any of three ways (in most-preferred to
2670  least-preferred order):
2671  1. A call to MORECORE that can normally contiguously extend memory.
2672  (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
2673  or main space is mmapped or a previous contiguous call failed)
2674  2. A call to MMAP new space (disabled if not HAVE_MMAP).
2675  Note that under the default settings, if MORECORE is unable to
2676  fulfill a request, and HAVE_MMAP is true, then mmap is
2677  used as a noncontiguous system allocator. This is a useful backup
2678  strategy for systems with holes in address spaces -- in this case
2679  sbrk cannot contiguously expand the heap, but mmap may be able to
2680  find space.
2681  3. A call to MORECORE that cannot usually contiguously extend memory.
2682  (disabled if not HAVE_MORECORE)
2683 
2684  In all cases, we need to request enough bytes from system to ensure
2685  we can malloc nb bytes upon success, so pad with enough space for
2686  top_foot, plus alignment-pad to make sure we don't lose bytes if
2687  not on boundary, and round this up to a granularity unit.
2688  */
2689 
2691  char* br = CMFAIL;
2692  size_t ssize = asize; /* sbrk call size */
2693  msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top);
2695 
2696  if (ss == 0) { /* First time through or recovery */
2697  char* base = (char*)CALL_MORECORE(0);
2698  if (base != CMFAIL) {
2699  size_t fp;
2700  /* Adjust to end on a page boundary */
2701  if (!is_page_aligned(base))
2702  ssize += (page_align((size_t)base) - (size_t)base);
2703  fp = m->footprint + ssize; /* recheck limits */
2704  if (ssize > nb && ssize < HALF_MAX_SIZE_T &&
2705  (m->footprint_limit == 0 ||
2706  (fp > m->footprint && fp <= m->footprint_limit)) &&
2707  (br = (char*)(CALL_MORECORE(ssize))) == base) {
2708  tbase = base;
2709  tsize = ssize;
2710  }
2711  }
2712  }
2713  else {
2714  /* Subtract out existing available top space from MORECORE request. */
2715  ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING);
2716  /* Use mem here only if it did continuously extend old space */
2717  if (ssize < HALF_MAX_SIZE_T &&
2718  (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) {
2719  tbase = br;
2720  tsize = ssize;
2721  }
2722  }
2723 
2724  if (tbase == CMFAIL) { /* Cope with partial failure */
2725  if (br != CMFAIL) { /* Try to use/extend the space we did get */
2726  if (ssize < HALF_MAX_SIZE_T &&
2727  ssize < nb + SYS_ALLOC_PADDING) {
2728  size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize);
2729  if (esize < HALF_MAX_SIZE_T) {
2730  char* end = (char*)CALL_MORECORE(esize);
2731  if (end != CMFAIL)
2732  ssize += esize;
2733  else { /* Can't use; try to release */
2734  (void) CALL_MORECORE(-ssize);
2735  br = CMFAIL;
2736  }
2737  }
2738  }
2739  }
2740  if (br != CMFAIL) { /* Use the space we did get */
2741  tbase = br;
2742  tsize = ssize;
2743  }
2744  else
2745  disable_contiguous(m); /* Don't try contiguous path in the future */
2746  }
2747 
2749  }
2750 
2751  if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
2752  char* mp = (char*)(CALL_MMAP(asize));
2753  if (mp != CMFAIL) {
2754  tbase = mp;
2755  tsize = asize;
2756  mmap_flag = USE_MMAP_BIT;
2757  }
2758  }
2759 
2760  if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
2761  if (asize < HALF_MAX_SIZE_T) {
2762  char* br = CMFAIL;
2763  char* end = CMFAIL;
2765  br = (char*)(CALL_MORECORE(asize));
2766  end = (char*)(CALL_MORECORE(0));
2768  if (br != CMFAIL && end != CMFAIL && br < end) {
2769  size_t ssize = end - br;
2770  if (ssize > nb + TOP_FOOT_SIZE) {
2771  tbase = br;
2772  tsize = ssize;
2773  }
2774  }
2775  }
2776  }
2777 
2778  if (tbase != CMFAIL) {
2779 
2780  if ((m->footprint += tsize) > m->max_footprint)
2781  m->max_footprint = m->footprint;
2782 
2783  if (!is_initialized(m)) { /* first-time initialization */
2784  if (m->least_addr == 0 || tbase < m->least_addr)
2785  m->least_addr = tbase;
2786  m->seg.base = tbase;
2787  m->seg.size = tsize;
2788  m->seg.sflags = mmap_flag;
2789  m->magic = mparams.magic;
2791  init_bins(m);
2792 #if !ONLY_MSPACES
2793  if (is_global(m))
2794  init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE);
2795  else
2796 #endif
2797  {
2798  /* Offset top by embedded malloc_state */
2799  mchunkptr mn = next_chunk(mem2chunk(m));
2800  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE);
2801  }
2802  }
2803 
2804  else {
2805  /* Try to merge with an existing segment */
2806  msegmentptr sp = &m->seg;
2807  /* Only consider most recent segment if traversal suppressed */
2808  while (sp != 0 && tbase != sp->base + sp->size)
2809  sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2810  if (sp != 0 &&
2811  !is_extern_segment(sp) &&
2812  (sp->sflags & USE_MMAP_BIT) == mmap_flag &&
2813  segment_holds(sp, m->top)) { /* append */
2814  sp->size += tsize;
2815  init_top(m, m->top, m->topsize + tsize);
2816  }
2817  else {
2818  if (tbase < m->least_addr)
2819  m->least_addr = tbase;
2820  sp = &m->seg;
2821  while (sp != 0 && sp->base != tbase + tsize)
2822  sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next;
2823  if (sp != 0 &&
2824  !is_extern_segment(sp) &&
2825  (sp->sflags & USE_MMAP_BIT) == mmap_flag) {
2826  char* oldbase = sp->base;
2827  sp->base = tbase;
2828  sp->size += tsize;
2829  return prepend_alloc(m, tbase, oldbase, nb);
2830  }
2831  else
2832  add_segment(m, tbase, tsize, mmap_flag);
2833  }
2834  }
2835 
2836  if (nb < m->topsize) { /* Allocate from new or extended top space */
2837  size_t rsize = m->topsize -= nb;
2838  mchunkptr p = m->top;
2839  mchunkptr r = m->top = chunk_plus_offset(p, nb);
2840  r->head = rsize | PINUSE_BIT;
2842  check_top_chunk(m, m->top);
2843  check_malloced_chunk(m, chunk2mem(p), nb);
2844  return chunk2mem(p);
2845  }
2846  }
2847 
2849  return 0;
2850 }
2851 
2852 /* ----------------------- system deallocation -------------------------- */
2853 
2854 /* Unmap and unlink any mmapped segments that don't contain used chunks */
2856 static size_t release_unused_segments(mstate m) {
2857  size_t released = 0;
2858  int nsegs = 0;
2859  msegmentptr pred = &m->seg;
2860  msegmentptr sp = pred->next;
2861  while (sp != 0) {
2862  char* base = sp->base;
2863  size_t size = sp->size;
2864  msegmentptr next = sp->next;
2865  ++nsegs;
2866  if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
2867  mchunkptr p = align_as_chunk(base);
2868  size_t psize = chunksize(p);
2869  /* Can unmap if first chunk holds entire segment and not pinned */
2870  if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) {
2871  tchunkptr tp = (tchunkptr)p;
2872  assert(segment_holds(sp, (char*)sp));
2873  if (p == m->dv) {
2874  m->dv = 0;
2875  m->dvsize = 0;
2876  }
2877  else {
2878  unlink_large_chunk(m, tp);
2879  }
2880  if (CALL_MUNMAP(base, size) == 0) {
2881  released += size;
2882  m->footprint -= size;
2883  /* unlink obsoleted record */
2884  sp = pred;
2885  sp->next = next;
2886  }
2887  else { /* back out if cannot unmap */
2888  insert_large_chunk(m, tp, psize);
2889  }
2890  }
2891  }
2892  if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */
2893  break;
2894  pred = sp;
2895  sp = next;
2896  }
2897  /* Reset check counter */
2898  m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)?
2899  (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE);
2900  return released;
2901 }
2902 
2904 static int sys_trim(mstate m, size_t pad) {
2905  size_t released = 0;
2907  if (pad < MAX_REQUEST && is_initialized(m)) {
2908  pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
2909 
2910  if (m->topsize > pad) {
2911  /* Shrink top space in granularity-size units, keeping at least one */
2912  size_t unit = mparams.granularity;
2913  size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
2914  SIZE_T_ONE) * unit;
2915  msegmentptr sp = segment_holding(m, (char*)m->top);
2916 
2917  if (!is_extern_segment(sp)) {
2918  if (is_mmapped_segment(sp)) {
2919  if (HAVE_MMAP &&
2920  sp->size >= extra &&
2921  !has_segment_link(m, sp)) { /* can't shrink if pinned */
2922  size_t newsize = sp->size - extra;
2923  (void)newsize; /* placate people compiling -Wunused-variable */
2924  /* Prefer mremap, fall back to munmap */
2925  if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) ||
2926  (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
2927  released = extra;
2928  }
2929  }
2930  }
2931  else if (HAVE_MORECORE) {
2932  if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
2933  extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
2935  {
2936  /* Make sure end of memory is where we last set it. */
2937  char* old_br = (char*)(CALL_MORECORE(0));
2938  if (old_br == sp->base + sp->size) {
2939  char* rel_br = (char*)(CALL_MORECORE(-extra));
2940  char* new_br = (char*)(CALL_MORECORE(0));
2941  if (rel_br != CMFAIL && new_br < old_br)
2942  released = old_br - new_br;
2943  }
2944  }
2946  }
2947  }
2948 
2949  if (released != 0) {
2950  sp->size -= released;
2951  m->footprint -= released;
2952  init_top(m, m->top, m->topsize - released);
2953  check_top_chunk(m, m->top);
2954  }
2955  }
2956 
2957  /* Unmap any unused mmapped segments */
2958  if (HAVE_MMAP)
2959  released += release_unused_segments(m);
2960 
2961  /* On failure, disable autotrim to avoid repeated failed future calls */
2962  if (released == 0 && m->topsize > m->trim_check)
2963  m->trim_check = MAX_SIZE_T;
2964  }
2965 
2966  return (released != 0)? 1 : 0;
2967 }
2968 
2969 /* Consolidate and bin a chunk. Differs from exported versions
2970  of free mainly in that the chunk need not be marked as inuse.
2971 */
2973 static void dispose_chunk(mstate m, mchunkptr p, size_t psize) {
2974  mchunkptr next = chunk_plus_offset(p, psize);
2975  if (!pinuse(p)) {
2976  mchunkptr prev;
2977  size_t prevsize = p->prev_foot;
2978  if (is_mmapped(p)) {
2979  psize += prevsize + MMAP_FOOT_PAD;
2980  if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
2981  m->footprint -= psize;
2982  return;
2983  }
2984  prev = chunk_minus_offset(p, prevsize);
2985  psize += prevsize;
2986  p = prev;
2987  if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */
2988  if (p != m->dv) {
2989  unlink_chunk(m, p, prevsize);
2990  }
2991  else if ((next->head & INUSE_BITS) == INUSE_BITS) {
2992  m->dvsize = psize;
2993  set_free_with_pinuse(p, psize, next);
2994  return;
2995  }
2996  }
2997  else {
2999  return;
3000  }
3001  }
3002  if (RTCHECK(ok_address(m, next))) {
3003  if (!cinuse(next)) { /* consolidate forward */
3004  if (next == m->top) {
3005  size_t tsize = m->topsize += psize;
3006  m->top = p;
3007  p->head = tsize | PINUSE_BIT;
3008  if (p == m->dv) {
3009  m->dv = 0;
3010  m->dvsize = 0;
3011  }
3012  return;
3013  }
3014  else if (next == m->dv) {
3015  size_t dsize = m->dvsize += psize;
3016  m->dv = p;
3018  return;
3019  }
3020  else {
3021  size_t nsize = chunksize(next);
3022  psize += nsize;
3023  unlink_chunk(m, next, nsize);
3025  if (p == m->dv) {
3026  m->dvsize = psize;
3027  return;
3028  }
3029  }
3030  }
3031  else {
3032  set_free_with_pinuse(p, psize, next);
3033  }
3034  insert_chunk(m, p, psize);
3035  }
3036  else {
3038  }
3039 }
3040 
3041 /* ---------------------------- malloc --------------------------- */
3042 
3043 /* allocate a large request from the best fitting chunk in a treebin */
3045 static void* tmalloc_large(mstate m, size_t nb) {
3046  tchunkptr v = 0;
3047  size_t rsize = -nb; /* Unsigned negation */
3048  tchunkptr t;
3049  bindex_t idx;
3050  compute_tree_index(nb, idx);
3051  if ((t = *treebin_at(m, idx)) != 0) {
3052  /* Traverse tree for this bin looking for node with size == nb */
3053  size_t sizebits = nb << leftshift_for_tree_index(idx);
3054  tchunkptr rst = 0; /* The deepest untaken right subtree */
3055  for (;;) {
3056  tchunkptr rt;
3057  size_t trem = chunksize(t) - nb;
3058  if (trem < rsize) {
3059  v = t;
3060  if ((rsize = trem) == 0)
3061  break;
3062  }
3063  rt = t->child[1];
3064  t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1];
3065  if (rt != 0 && rt != t)
3066  rst = rt;
3067  if (t == 0) {
3068  t = rst; /* set t to least subtree holding sizes > nb */
3069  break;
3070  }
3071  sizebits <<= 1;
3072  }
3073  }
3074  if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3075  binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3076  if (leftbits != 0) {
3077  bindex_t i;
3078  binmap_t leastbit = least_bit(leftbits);
3079  compute_bit2idx(leastbit, i);
3080  t = *treebin_at(m, i);
3081  }
3082  }
3083 
3084  while (t != 0) { /* find smallest of tree or subtree */
3085  size_t trem = chunksize(t) - nb;
3086  if (trem < rsize) {
3087  rsize = trem;
3088  v = t;
3089  }
3090  t = leftmost_child(t);
3091  }
3092 
3093  /* If dv is a better fit, return 0 so malloc will use it */
3094  if (v != 0 && rsize < (size_t)(m->dvsize - nb)) {
3095  if (RTCHECK(ok_address(m, v))) { /* split */
3096  mchunkptr r = chunk_plus_offset(v, nb);
3097  assert(chunksize(v) == rsize + nb);
3098  if (RTCHECK(ok_next(v, r))) {
3099  unlink_large_chunk(m, v);
3100  if (rsize < MIN_CHUNK_SIZE)
3101  set_inuse_and_pinuse(m, v, (rsize + nb));
3102  else {
3105  insert_chunk(m, r, rsize);
3106  }
3107  return chunk2mem(v);
3108  }
3109  }
3111  }
3112  return 0;
3113 }
3114 
3115 /* allocate a small request from the best fitting chunk in a treebin */
3117 static void* tmalloc_small(mstate m, size_t nb) {
3118  tchunkptr t, v;
3119  size_t rsize;
3120  bindex_t i;
3121  binmap_t leastbit = least_bit(m->treemap);
3122  compute_bit2idx(leastbit, i);
3123  v = t = *treebin_at(m, i);
3124  rsize = chunksize(t) - nb;
3125 
3126  while ((t = leftmost_child(t)) != 0) {
3127  size_t trem = chunksize(t) - nb;
3128  if (trem < rsize) {
3129  rsize = trem;
3130  v = t;
3131  }
3132  }
3133 
3134  if (RTCHECK(ok_address(m, v))) {
3135  mchunkptr r = chunk_plus_offset(v, nb);
3136  assert(chunksize(v) == rsize + nb);
3137  if (RTCHECK(ok_next(v, r))) {
3138  unlink_large_chunk(m, v);
3139  if (rsize < MIN_CHUNK_SIZE)
3140  set_inuse_and_pinuse(m, v, (rsize + nb));
3141  else {
3144  replace_dv(m, r, rsize);
3145  }
3146  return chunk2mem(v);
3147  }
3148  }
3149 
3151  return 0;
3152 }
3153 
3154 #if !ONLY_MSPACES
3155 
3156 void* dlmalloc(size_t bytes) {
3157  /*
3158  Basic algorithm:
3159  If a small request (< 256 bytes minus per-chunk overhead):
3160  1. If one exists, use a remainderless chunk in associated smallbin.
3161  (Remainderless means that there are too few excess bytes to
3162  represent as a chunk.)
3163  2. If it is big enough, use the dv chunk, which is normally the
3164  chunk adjacent to the one used for the most recent small request.
3165  3. If one exists, split the smallest available chunk in a bin,
3166  saving remainder in dv.
3167  4. If it is big enough, use the top chunk.
3168  5. If available, get memory from system and use it
3169  Otherwise, for a large request:
3170  1. Find the smallest available binned chunk that fits, and use it
3171  if it is better fitting than dv chunk, splitting if necessary.
3172  2. If better fitting than any binned chunk, use the dv chunk.
3173  3. If it is big enough, use the top chunk.
3174  4. If request size >= mmap threshold, try to directly mmap this chunk.
3175  5. If available, get memory from system and use it
3176 
3177  The ugly goto's here ensure that postaction occurs along all paths.
3178  */
3179 
3180 #if USE_LOCKS
3181  ensure_initialization(); /* initialize in sys_alloc if not using locks */
3182 #endif
3183 
3184  if (!PREACTION(gm)) {
3185  void* mem;
3186  size_t nb;
3187  if (bytes <= MAX_SMALL_REQUEST) {
3188  bindex_t idx;
3189  binmap_t smallbits;
3190  nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
3191  idx = small_index(nb);
3192  smallbits = gm->smallmap >> idx;
3193 
3194  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
3195  mchunkptr b, p;
3196  idx += ~smallbits & 1; /* Uses next bin if idx empty */
3197  b = smallbin_at(gm, idx);
3198  p = b->fd;
3199  assert(chunksize(p) == small_index2size(idx));
3200  unlink_first_small_chunk(gm, b, p, idx);
3202  mem = chunk2mem(p);
3203  check_malloced_chunk(gm, mem, nb);
3204  goto postaction;
3205  }
3206 
3207  else if (nb > gm->dvsize) {
3208  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
3209  mchunkptr b, p, r;
3210  size_t rsize;
3211  bindex_t i;
3212  binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
3213  binmap_t leastbit = least_bit(leftbits);
3214  compute_bit2idx(leastbit, i);
3215  b = smallbin_at(gm, i);
3216  p = b->fd;
3217  assert(chunksize(p) == small_index2size(i));
3218  unlink_first_small_chunk(gm, b, p, i);
3219  rsize = small_index2size(i) - nb;
3220  /* Fit here cannot be remainderless if 4byte sizes */
3221  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
3223  else {
3225  r = chunk_plus_offset(p, nb);
3227  replace_dv(gm, r, rsize);
3228  }
3229  mem = chunk2mem(p);
3230  check_malloced_chunk(gm, mem, nb);
3231  goto postaction;
3232  }
3233 
3234  else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) {
3235  check_malloced_chunk(gm, mem, nb);
3236  goto postaction;
3237  }
3238  }
3239  }
3240  else if (bytes >= MAX_REQUEST)
3241  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
3242  else {
3243  nb = pad_request(bytes);
3244  if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
3245  check_malloced_chunk(gm, mem, nb);
3246  goto postaction;
3247  }
3248  }
3249 
3250  if (nb <= gm->dvsize) {
3251  size_t rsize = gm->dvsize - nb;
3252  mchunkptr p = gm->dv;
3253  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
3254  mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
3255  gm->dvsize = rsize;
3258  }
3259  else { /* exhaust dv */
3260  size_t dvs = gm->dvsize;
3261  gm->dvsize = 0;
3262  gm->dv = 0;
3263  set_inuse_and_pinuse(gm, p, dvs);
3264  }
3265  mem = chunk2mem(p);
3266  check_malloced_chunk(gm, mem, nb);
3267  goto postaction;
3268  }
3269 
3270  else if (nb < gm->topsize) { /* Split top */
3271  size_t rsize = gm->topsize -= nb;
3272  mchunkptr p = gm->top;
3273  mchunkptr r = gm->top = chunk_plus_offset(p, nb);
3274  r->head = rsize | PINUSE_BIT;
3276  mem = chunk2mem(p);
3277  check_top_chunk(gm, gm->top);
3278  check_malloced_chunk(gm, mem, nb);
3279  goto postaction;
3280  }
3281 
3282  mem = sys_alloc(gm, nb);
3283 
3284  postaction:
3285  POSTACTION(gm);
3286  return mem;
3287  }
3288 
3289  return 0;
3290 }
3291 
3292 /* ---------------------------- free --------------------------- */
3293 
3294 void dlfree(void* mem) {
3295  /*
3296  Consolidate freed chunks with preceding or succeeding bordering
3297  free chunks, if they exist, and then place in a bin. Intermixed
3298  with special cases for top, dv, mmapped chunks, and usage errors.
3299  */
3300 
3301  if (mem != 0) {
3302  mchunkptr p = mem2chunk(mem);
3303 #if FOOTERS
3304  mstate fm = get_mstate_for(p);
3305  if (!ok_magic(fm)) {
3306  USAGE_ERROR_ACTION(fm, p);
3307  return;
3308  }
3309 #else /* FOOTERS */
3310 #define fm gm
3311 #endif /* FOOTERS */
3312  if (!PREACTION(fm)) {
3313  check_inuse_chunk(fm, p);
3314  if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
3315  size_t psize = chunksize(p);
3316  mchunkptr next = chunk_plus_offset(p, psize);
3317  if (!pinuse(p)) {
3318  size_t prevsize = p->prev_foot;
3319  if (is_mmapped(p)) {
3320  psize += prevsize + MMAP_FOOT_PAD;
3321  if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
3322  fm->footprint -= psize;
3323  goto postaction;
3324  }
3325  else {
3326  mchunkptr prev = chunk_minus_offset(p, prevsize);
3327  psize += prevsize;
3328  p = prev;
3329  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
3330  if (p != fm->dv) {
3331  unlink_chunk(fm, p, prevsize);
3332  }
3333  else if ((next->head & INUSE_BITS) == INUSE_BITS) {
3334  fm->dvsize = psize;
3335  set_free_with_pinuse(p, psize, next);
3336  goto postaction;
3337  }
3338  }
3339  else
3340  goto erroraction;
3341  }
3342  }
3343 
3344  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
3345  if (!cinuse(next)) { /* consolidate forward */
3346  if (next == fm->top) {
3347  size_t tsize = fm->topsize += psize;
3348  fm->top = p;
3349  p->head = tsize | PINUSE_BIT;
3350  if (p == fm->dv) {
3351  fm->dv = 0;
3352  fm->dvsize = 0;
3353  }
3354  if (should_trim(fm, tsize))
3355  sys_trim(fm, 0);
3356  goto postaction;
3357  }
3358  else if (next == fm->dv) {
3359  size_t dsize = fm->dvsize += psize;
3360  fm->dv = p;
3362  goto postaction;
3363  }
3364  else {
3365  size_t nsize = chunksize(next);
3366  psize += nsize;
3367  unlink_chunk(fm, next, nsize);
3369  if (p == fm->dv) {
3370  fm->dvsize = psize;
3371  goto postaction;
3372  }
3373  }
3374  }
3375  else
3376  set_free_with_pinuse(p, psize, next);
3377 
3378  if (is_small(psize)) {
3379  insert_small_chunk(fm, p, psize);
3380  check_free_chunk(fm, p);
3381  }
3382  else {
3383  tchunkptr tp = (tchunkptr)p;
3384  insert_large_chunk(fm, tp, psize);
3385  check_free_chunk(fm, p);
3386  if (--fm->release_checks == 0)
3388  }
3389  goto postaction;
3390  }
3391  }
3392  erroraction:
3393  USAGE_ERROR_ACTION(fm, p);
3394  postaction:
3395  POSTACTION(fm);
3396  }
3397  }
3398 #if !FOOTERS
3399 #undef fm
3400 #endif /* FOOTERS */
3401 }
3402 
3403 void* dlcalloc(size_t n_elements, size_t elem_size) {
3404  void* mem;
3405  size_t req = 0;
3406  if (n_elements != 0) {
3407  req = n_elements * elem_size;
3408  if (((n_elements | elem_size) & ~(size_t)0xffff) &&
3409  (req / n_elements != elem_size))
3410  req = MAX_SIZE_T; /* force downstream failure on overflow */
3411  }
3412  mem = dlmalloc(req);
3413  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
3414  memset(mem, 0, req);
3415  return mem;
3416 }
3417 
3418 #endif /* !ONLY_MSPACES */
3419 
3420 /* ------------ Internal support for realloc, memalign, etc -------------- */
3421 
3422 /* Try to realloc; only in-place unless can_move true */
3423 static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb,
3424  int can_move) {
3425  mchunkptr newp = 0;
3426  size_t oldsize = chunksize(p);
3427  mchunkptr next = chunk_plus_offset(p, oldsize);
3428  if (RTCHECK(ok_address(m, p) && ok_inuse(p) &&
3429  ok_next(p, next) && ok_pinuse(next))) {
3430  if (is_mmapped(p)) {
3431  newp = mmap_resize(m, p, nb, can_move);
3432  }
3433  else if (oldsize >= nb) { /* already big enough */
3434  size_t rsize = oldsize - nb;
3435  if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */
3436  mchunkptr r = chunk_plus_offset(p, nb);
3437  set_inuse(m, p, nb);
3438  set_inuse(m, r, rsize);
3439  dispose_chunk(m, r, rsize);
3440  }
3441  newp = p;
3442  }
3443  else if (next == m->top) { /* extend into top */
3444  if (oldsize + m->topsize > nb) {
3445  size_t newsize = oldsize + m->topsize;
3446  size_t newtopsize = newsize - nb;
3447  mchunkptr newtop = chunk_plus_offset(p, nb);
3448  set_inuse(m, p, nb);
3449  newtop->head = newtopsize |PINUSE_BIT;
3450  m->top = newtop;
3451  m->topsize = newtopsize;
3452  newp = p;
3453  }
3454  }
3455  else if (next == m->dv) { /* extend into dv */
3456  size_t dvs = m->dvsize;
3457  if (oldsize + dvs >= nb) {
3458  size_t dsize = oldsize + dvs - nb;
3459  if (dsize >= MIN_CHUNK_SIZE) {
3460  mchunkptr r = chunk_plus_offset(p, nb);
3461  mchunkptr n = chunk_plus_offset(r, dsize);
3462  set_inuse(m, p, nb);
3464  clear_pinuse(n);
3465  m->dvsize = dsize;
3466  m->dv = r;
3467  }
3468  else { /* exhaust dv */
3469  size_t newsize = oldsize + dvs;
3470  set_inuse(m, p, newsize);
3471  m->dvsize = 0;
3472  m->dv = 0;
3473  }
3474  newp = p;
3475  }
3476  }
3477  else if (!cinuse(next)) { /* extend into next free chunk */
3478  size_t nextsize = chunksize(next);
3479  if (oldsize + nextsize >= nb) {
3480  size_t rsize = oldsize + nextsize - nb;
3481  unlink_chunk(m, next, nextsize);
3482  if (rsize < MIN_CHUNK_SIZE) {
3483  size_t newsize = oldsize + nextsize;
3484  set_inuse(m, p, newsize);
3485  }
3486  else {
3487  mchunkptr r = chunk_plus_offset(p, nb);
3488  set_inuse(m, p, nb);
3489  set_inuse(m, r, rsize);
3490  dispose_chunk(m, r, rsize);
3491  }
3492  newp = p;
3493  }
3494  }
3495  }
3496  else {
3498  }
3499  return newp;
3500 }
3501 
3503 static void* internal_memalign(mstate m, size_t alignment, size_t bytes) {
3504  void* mem = 0;
3505  if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
3506  alignment = MIN_CHUNK_SIZE;
3507  if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */
3508  size_t a = MALLOC_ALIGNMENT << 1;
3509  while (a < alignment) a <<= 1;
3510  alignment = a;
3511  }
3512  if (bytes >= MAX_REQUEST - alignment) {
3513  if (m != 0) { /* Test isn't needed but avoids compiler warning */
3515  }
3516  }
3517  else {
3518  size_t nb = request2size(bytes);
3519  size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
3520  mem = internal_malloc(m, req);
3521  if (mem != 0) {
3522  mchunkptr p = mem2chunk(mem);
3523  if (PREACTION(m))
3524  return 0;
3525  if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */
3526  /*
3527  Find an aligned spot inside chunk. Since we need to give
3528  back leading space in a chunk of at least MIN_CHUNK_SIZE, if
3529  the first calculation places us at a spot with less than
3530  MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
3531  We've allocated enough total room so that this is always
3532  possible.
3533  */
3534  char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment -
3535  SIZE_T_ONE)) &
3536  -alignment));
3537  char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)?
3538  br : br+alignment;
3539  mchunkptr newp = (mchunkptr)pos;
3540  size_t leadsize = pos - (char*)(p);
3541  size_t newsize = chunksize(p) - leadsize;
3542 
3543  if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
3544  newp->prev_foot = p->prev_foot + leadsize;
3545  newp->head = newsize;
3546  }
3547  else { /* Otherwise, give back leader, use the rest */
3548  set_inuse(m, newp, newsize);
3549  set_inuse(m, p, leadsize);
3550  dispose_chunk(m, p, leadsize);
3551  }
3552  p = newp;
3553  }
3554 
3555  /* Give back spare room at the end */
3556  if (!is_mmapped(p)) {
3557  size_t size = chunksize(p);
3558  if (size > nb + MIN_CHUNK_SIZE) {
3559  size_t remainder_size = size - nb;
3560  mchunkptr remainder = chunk_plus_offset(p, nb);
3561  set_inuse(m, p, nb);
3562  set_inuse(m, remainder, remainder_size);
3563  dispose_chunk(m, remainder, remainder_size);
3564  }
3565  }
3566 
3567  mem = chunk2mem(p);
3568  assert (chunksize(p) >= nb);
3569  assert(((size_t)mem & (alignment - 1)) == 0);
3570  check_inuse_chunk(m, p);
3571  POSTACTION(m);
3572  }
3573  }
3574  return mem;
3575 }
3576 
3577 /*
3578  Common support for independent_X routines, handling
3579  all of the combinations that can result.
3580  The opts arg has:
3581  bit 0 set if all elements are same size (using sizes[0])
3582  bit 1 set if elements should be zeroed
3583 */
3584 static void** ialloc(mstate m,
3585  size_t n_elements,
3586  size_t* sizes,
3587  int opts,
3588  void* chunks[]) {
3589 
3590  size_t element_size; /* chunksize of each element, if all same */
3591  size_t contents_size; /* total size of elements */
3592  size_t array_size; /* request size of pointer array */
3593  void* mem; /* malloced aggregate space */
3594  mchunkptr p; /* corresponding chunk */
3595  size_t remainder_size; /* remaining bytes while splitting */
3596  void** marray; /* either "chunks" or malloced ptr array */
3597  mchunkptr array_chunk; /* chunk for malloced ptr array */
3598  flag_t was_enabled; /* to disable mmap */
3599  size_t size;
3600  size_t i;
3601 
3603  /* compute array length, if needed */
3604  if (chunks != 0) {
3605  if (n_elements == 0)
3606  return chunks; /* nothing to do */
3607  marray = chunks;
3608  array_size = 0;
3609  }
3610  else {
3611  /* if empty req, must still return chunk representing empty array */
3612  if (n_elements == 0)
3613  return (void**)internal_malloc(m, 0);
3614  marray = 0;
3615  array_size = request2size(n_elements * (sizeof(void*)));
3616  }
3617 
3618  /* compute total element size */
3619  if (opts & 0x1) { /* all-same-size */
3620  element_size = request2size(*sizes);
3621  contents_size = n_elements * element_size;
3622  }
3623  else { /* add up all the sizes */
3624  element_size = 0;
3625  contents_size = 0;
3626  for (i = 0; i != n_elements; ++i)
3627  contents_size += request2size(sizes[i]);
3628  }
3629 
3630  size = contents_size + array_size;
3631 
3632  /*
3633  Allocate the aggregate chunk. First disable direct-mmapping so
3634  malloc won't use it, since we would not be able to later
3635  free/realloc space internal to a segregated mmap region.
3636  */
3637  was_enabled = use_mmap(m);
3638  disable_mmap(m);
3639  mem = internal_malloc(m, size - CHUNK_OVERHEAD);
3640  if (was_enabled)
3641  enable_mmap(m);
3642  if (mem == 0)
3643  return 0;
3644 
3645  if (PREACTION(m)) return 0;
3646  p = mem2chunk(mem);
3647  remainder_size = chunksize(p);
3648 
3649  assert(!is_mmapped(p));
3650 
3651  if (opts & 0x2) { /* optionally clear the elements */
3652  memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size);
3653  }
3654 
3655  /* If not provided, allocate the pointer array as final part of chunk */
3656  if (marray == 0) {
3657  size_t array_chunk_size;
3658  array_chunk = chunk_plus_offset(p, contents_size);
3659  array_chunk_size = remainder_size - contents_size;
3660  marray = (void**) (chunk2mem(array_chunk));
3661  set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
3662  remainder_size = contents_size;
3663  }
3664 
3665  /* split out elements */
3666  for (i = 0; ; ++i) {
3667  marray[i] = chunk2mem(p);
3668  if (i != n_elements-1) {
3669  if (element_size != 0)
3670  size = element_size;
3671  else
3672  size = request2size(sizes[i]);
3673  remainder_size -= size;
3675  p = chunk_plus_offset(p, size);
3676  }
3677  else { /* the final element absorbs any overallocation slop */
3678  set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
3679  break;
3680  }
3681  }
3682 
3683 #if DEBUG
3684  if (marray != chunks) {
3685  /* final element must have exactly exhausted chunk */
3686  if (element_size != 0) {
3687  assert(remainder_size == element_size);
3688  }
3689  else {
3690  assert(remainder_size == request2size(sizes[i]));
3691  }
3692  check_inuse_chunk(m, mem2chunk(marray));
3693  }
3694  for (i = 0; i != n_elements; ++i)
3695  check_inuse_chunk(m, mem2chunk(marray[i]));
3696 
3697 #endif /* DEBUG */
3698 
3699  POSTACTION(m);
3700  return marray;
3701 }
3702 
3703 /* Try to free all pointers in the given array.
3704  Note: this could be made faster, by delaying consolidation,
3705  at the price of disabling some user integrity checks, We
3706  still optimize some consolidations by combining adjacent
3707  chunks before freeing, which will occur often if allocated
3708  with ialloc or the array is sorted.
3709 */
3710 static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) {
3711  size_t unfreed = 0;
3712  if (!PREACTION(m)) {
3713  void** a;
3714  void** fence = &(array[nelem]);
3715  for (a = array; a != fence; ++a) {
3716  void* mem = *a;
3717  if (mem != 0) {
3718  mchunkptr p = mem2chunk(mem);
3719  size_t psize = chunksize(p);
3720 #if FOOTERS
3721  if (get_mstate_for(p) != m) {
3722  ++unfreed;
3723  continue;
3724  }
3725 #endif
3726  check_inuse_chunk(m, p);
3727  *a = 0;
3728  if (RTCHECK(ok_address(m, p) && ok_inuse(p))) {
3729  void ** b = a + 1; /* try to merge with next chunk */
3730  mchunkptr next = next_chunk(p);
3731  if (b != fence && *b == chunk2mem(next)) {
3732  size_t newsize = chunksize(next) + psize;
3733  set_inuse(m, p, newsize);
3734  *b = chunk2mem(p);
3735  }
3736  else
3737  dispose_chunk(m, p, psize);
3738  }
3739  else {
3741  break;
3742  }
3743  }
3744  }
3745  if (should_trim(m, m->topsize))
3746  sys_trim(m, 0);
3747  POSTACTION(m);
3748  }
3749  return unfreed;
3750 }
3751 
3752 /* Traversal */
3753 #if MALLOC_INSPECT_ALL
3754 static void internal_inspect_all(mstate m,
3755  void(*handler)(void *start,
3756  void *end,
3757  size_t used_bytes,
3758  void* callback_arg),
3759  void* arg) {
3760  if (is_initialized(m)) {
3761  mchunkptr top = m->top;
3762  msegmentptr s;
3763  for (s = &m->seg; s != 0; s = s->next) {
3764  mchunkptr q = align_as_chunk(s->base);
3765  while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) {
3766  mchunkptr next = next_chunk(q);
3767  size_t sz = chunksize(q);
3768  size_t used;
3769  void* start;
3770  if (is_inuse(q)) {
3771  used = sz - CHUNK_OVERHEAD; /* must not be mmapped */
3772  start = chunk2mem(q);
3773  }
3774  else {
3775  used = 0;
3776  if (is_small(sz)) { /* offset by possible bookkeeping */
3777  start = (void*)((char*)q + sizeof(struct malloc_chunk));
3778  }
3779  else {
3780  start = (void*)((char*)q + sizeof(struct malloc_tree_chunk));
3781  }
3782  }
3783  if (start < (void*)next) /* skip if all space is bookkeeping */
3784  handler(start, next, used, arg);
3785  if (q == top)
3786  break;
3787  q = next;
3788  }
3789  }
3790  }
3791 }
3792 #endif /* MALLOC_INSPECT_ALL */
3793 
3794 /* ------------------ Exported realloc, memalign, etc -------------------- */
3795 
3796 #if !ONLY_MSPACES
3797 
3798 void* dlrealloc(void* oldmem, size_t bytes) {
3799  void* mem = 0;
3800  if (oldmem == 0) {
3801  mem = dlmalloc(bytes);
3802  }
3803  else if (bytes >= MAX_REQUEST) {
3805  }
3806 #ifdef REALLOC_ZERO_BYTES_FREES
3807  else if (bytes == 0) {
3808  dlfree(oldmem);
3809  }
3810 #endif /* REALLOC_ZERO_BYTES_FREES */
3811  else {
3812  size_t nb = request2size(bytes);
3813  mchunkptr oldp = mem2chunk(oldmem);
3814 #if ! FOOTERS
3815  mstate m = gm;
3816 #else /* FOOTERS */
3817  mstate m = get_mstate_for(oldp);
3818  if (!ok_magic(m)) {
3819  USAGE_ERROR_ACTION(m, oldmem);
3820  return 0;
3821  }
3822 #endif /* FOOTERS */
3823  if (!PREACTION(m)) {
3824  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
3825  POSTACTION(m);
3826  if (newp != 0) {
3827  check_inuse_chunk(m, newp);
3828  mem = chunk2mem(newp);
3829  }
3830  else {
3831  mem = internal_malloc(m, bytes);
3832  if (mem != 0) {
3833  size_t oc = chunksize(oldp) - overhead_for(oldp);
3834  memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
3835  internal_free(m, oldmem);
3836  }
3837  }
3838  }
3839  }
3840  return mem;
3841 }
3842 
3843 void* dlrealloc_in_place(void* oldmem, size_t bytes) {
3844  void* mem = 0;
3845  if (oldmem != 0) {
3846  if (bytes >= MAX_REQUEST) {
3848  }
3849  else {
3850  size_t nb = request2size(bytes);
3851  mchunkptr oldp = mem2chunk(oldmem);
3852 #if ! FOOTERS
3853  mstate m = gm;
3854 #else /* FOOTERS */
3855  mstate m = get_mstate_for(oldp);
3856  if (!ok_magic(m)) {
3857  USAGE_ERROR_ACTION(m, oldmem);
3858  return 0;
3859  }
3860 #endif /* FOOTERS */
3861  if (!PREACTION(m)) {
3862  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
3863  POSTACTION(m);
3864  if (newp == oldp) {
3865  check_inuse_chunk(m, newp);
3866  mem = oldmem;
3867  }
3868  }
3869  }
3870  }
3871  return mem;
3872 }
3873 
3874 void* dlmemalign(size_t alignment, size_t bytes) {
3875  if (alignment <= MALLOC_ALIGNMENT) {
3876  return dlmalloc(bytes);
3877  }
3878  return internal_memalign(gm, alignment, bytes);
3879 }
3880 
3881 int dlposix_memalign(void** pp, size_t alignment, size_t bytes) {
3882  void* mem = 0;
3883  if (alignment == MALLOC_ALIGNMENT)
3884  mem = dlmalloc(bytes);
3885  else {
3886  size_t d = alignment / sizeof(void*);
3887  size_t r = alignment % sizeof(void*);
3888  if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0)
3889  return EINVAL;
3890  else if (bytes <= MAX_REQUEST - alignment) {
3891  if (alignment < MIN_CHUNK_SIZE)
3892  alignment = MIN_CHUNK_SIZE;
3893  mem = internal_memalign(gm, alignment, bytes);
3894  }
3895  }
3896  if (mem == 0)
3897  return ENOMEM;
3898  else {
3899  *pp = mem;
3900  return 0;
3901  }
3902 }
3903 
3904 void* dlvalloc(size_t bytes) {
3905  size_t pagesz;
3907  pagesz = mparams.page_size;
3908  return dlmemalign(pagesz, bytes);
3909 }
3910 
3911 void* dlpvalloc(size_t bytes) {
3912  size_t pagesz;
3914  pagesz = mparams.page_size;
3915  return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
3916 }
3917 
3918 void** dlindependent_calloc(size_t n_elements, size_t elem_size,
3919  void* chunks[]) {
3920  size_t sz = elem_size; /* serves as 1-element array */
3921  return ialloc(gm, n_elements, &sz, 3, chunks);
3922 }
3923 
3924 void** dlindependent_comalloc(size_t n_elements, size_t sizes[],
3925  void* chunks[]) {
3926  return ialloc(gm, n_elements, sizes, 0, chunks);
3927 }
3928 
3929 size_t dlbulk_free(void* array[], size_t nelem) {
3930  return internal_bulk_free(gm, array, nelem);
3931 }
3932 
3933 #if MALLOC_INSPECT_ALL
3934 void dlmalloc_inspect_all(void(*handler)(void *start,
3935  void *end,
3936  size_t used_bytes,
3937  void* callback_arg),
3938  void* arg) {
3940  if (!PREACTION(gm)) {
3941  internal_inspect_all(gm, handler, arg);
3942  POSTACTION(gm);
3943  }
3944 }
3945 #endif /* MALLOC_INSPECT_ALL */
3946 
3947 int dlmalloc_trim(size_t pad) {
3948  int result = 0;
3950  if (!PREACTION(gm)) {
3951  result = sys_trim(gm, pad);
3952  POSTACTION(gm);
3953  }
3954  return result;
3955 }
3956 
3957 size_t dlmalloc_footprint(void) {
3958  return gm->footprint;
3959 }
3960 
3962  return gm->max_footprint;
3963 }
3964 
3966  size_t maf = gm->footprint_limit;
3967  return maf == 0 ? MAX_SIZE_T : maf;
3968 }
3969 
3970 size_t dlmalloc_set_footprint_limit(size_t bytes) {
3971  size_t result; /* invert sense of 0 */
3972  if (bytes == 0)
3973  result = granularity_align(1); /* Use minimal size */
3974  if (bytes == MAX_SIZE_T)
3975  result = 0; /* disable */
3976  else
3977  result = granularity_align(bytes);
3978  return gm->footprint_limit = result;
3979 }
3980 
3981 #if !NO_MALLINFO
3982 struct dlmallinfo dlmallinfo(void) {
3983  return internal_mallinfo(gm);
3984 }
3985 #endif /* NO_MALLINFO */
3986 
3987 #if !NO_MALLOC_STATS
3990 }
3991 #endif /* NO_MALLOC_STATS */
3992 
3993 int dlmallopt(int param_number, int value) {
3994  return change_mparam(param_number, value);
3995 }
3996 
3997 size_t dlmalloc_usable_size(void* mem) {
3998  if (mem != 0) {
3999  mchunkptr p = mem2chunk(mem);
4000  if (is_inuse(p))
4001  return chunksize(p) - overhead_for(p);
4002  }
4003  return 0;
4004 }
4005 
4006 #endif /* !ONLY_MSPACES */
4007 
4008 /* ----------------------------- user mspaces ---------------------------- */
4009 
4010 #if MSPACES
4011 
4012 static mstate init_user_mstate(char* tbase, size_t tsize) {
4013  size_t msize = pad_request(sizeof(struct malloc_state));
4014  mchunkptr mn;
4015  mchunkptr msp = align_as_chunk(tbase);
4016  mstate m = (mstate)(chunk2mem(msp));
4017  memset(m, 0, msize);
4018  (void)INITIAL_LOCK(&m->mutex);
4019  msp->head = (msize|INUSE_BITS);
4020  m->seg.base = m->least_addr = tbase;
4021  m->seg.size = m->footprint = m->max_footprint = tsize;
4022  m->magic = mparams.magic;
4025  m->extp = 0;
4026  m->exts = 0;
4027  disable_contiguous(m);
4028  init_bins(m);
4029  mn = next_chunk(mem2chunk(m));
4030  init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE);
4031  check_top_chunk(m, m->top);
4032  return m;
4033 }
4034 
4035 mspace create_mspace(size_t capacity, int locked) {
4036  mstate m = 0;
4037  size_t msize;
4039  msize = pad_request(sizeof(struct malloc_state));
4040  if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4041  size_t rs = ((capacity == 0)? mparams.granularity :
4042  (capacity + TOP_FOOT_SIZE + msize));
4043  size_t tsize = granularity_align(rs);
4044  char* tbase = (char*)(CALL_MMAP(tsize));
4045  if (tbase != CMFAIL) {
4046  m = init_user_mstate(tbase, tsize);
4047  m->seg.sflags = USE_MMAP_BIT;
4048  set_lock(m, locked);
4049  }
4050  }
4051  return (mspace)m;
4052 }
4053 
4054 mspace create_mspace_with_base(void* base, size_t capacity, int locked) {
4055  mstate m = 0;
4056  size_t msize;
4058  msize = pad_request(sizeof(struct malloc_state));
4059  if (capacity > msize + TOP_FOOT_SIZE &&
4060  capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) {
4061  m = init_user_mstate((char*)base, capacity);
4062  m->seg.sflags = EXTERN_BIT;
4063  set_lock(m, locked);
4064  }
4065  return (mspace)m;
4066 }
4067 
4068 int mspace_track_large_chunks(mspace msp, int enable) {
4069  int ret = 0;
4070  mstate ms = (mstate)msp;
4071  if (!PREACTION(ms)) {
4072  if (!use_mmap(ms)) {
4073  ret = 1;
4074  }
4075  if (!enable) {
4076  enable_mmap(ms);
4077  } else {
4078  disable_mmap(ms);
4079  }
4080  POSTACTION(ms);
4081  }
4082  return ret;
4083 }
4084 
4086 size_t destroy_mspace(mspace msp) {
4087  size_t freed = 0;
4088  mstate ms = (mstate)msp;
4089  if (ok_magic(ms)) {
4090  msegmentptr sp = &ms->seg;
4091  (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */
4092  while (sp != 0) {
4093  char* base = sp->base;
4094  size_t size = sp->size;
4095  flag_t flag = sp->sflags;
4096  (void)base; /* placate people compiling -Wunused-variable */
4097  sp = sp->next;
4098  if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) &&
4099  CALL_MUNMAP(base, size) == 0)
4100  freed += size;
4101  }
4102  }
4103  else {
4104  USAGE_ERROR_ACTION(ms,ms);
4105  }
4106  return freed;
4107 }
4108 
4109 void mspace_get_address_and_size (mspace msp, char **addrp, size_t *sizep)
4110 {
4111  mstate ms;
4112  msegment *this_seg;
4113 
4114  ms = (mstate)msp;
4115  this_seg = &ms->seg;
4116 
4117  *addrp = this_seg->base;
4118  *sizep = this_seg->size;
4119 }
4120 
4121 CLIB_NOSANITIZE_ADDR __clib_export
4122 int mspace_is_heap_object (mspace msp, void *p)
4123 {
4124  msegment *this_seg;
4125  char *pp, *base;
4126  mstate ms;
4127 
4128  ms = (mstate)msp;
4129 
4130  this_seg = &ms->seg;
4131  pp = (char *) p;
4132 
4133  while (this_seg)
4134  {
4135  base = this_seg->base;
4136  if (pp >= base && pp < (base + this_seg->size))
4137  return 1;
4138  this_seg = this_seg->next;
4139  }
4140 
4141  if (pp > ms->least_addr && pp <= ms->least_addr + ms->footprint)
4142  return 1;
4143 
4144  return 0;
4145 }
4146 
4148 void *mspace_least_addr (mspace msp)
4149 {
4150  mstate ms = (mstate) msp;
4151  return (void *) ms->least_addr;
4152 }
4153 
4154 void mspace_disable_expand (mspace msp)
4155 {
4156  mstate ms = (mstate)msp;
4157 
4158  disable_expand (ms);
4159 }
4160 
4162 int mspace_enable_disable_trace (mspace msp, int enable)
4163 {
4164  mstate ms = (mstate)msp;
4165  int was_enabled = 0;
4166 
4167  if (use_trace(ms))
4168  was_enabled = 1;
4169 
4170  if (enable)
4171  enable_trace (ms);
4172  else
4173  disable_trace (ms);
4174 
4175  return (was_enabled);
4176 }
4177 
4179 int mspace_is_traced (mspace msp)
4180 {
4181  mstate ms = (mstate)msp;
4182 
4183  if (use_trace(ms))
4184  return 1;
4185  return 0;
4186 }
4187 
4188 CLIB_NOSANITIZE_ADDR __clib_export
4189 void* mspace_get_aligned (mspace msp,
4190  unsigned long n_user_data_bytes,
4191  unsigned long align,
4192  unsigned long align_offset) {
4193  char *rv;
4194  unsigned long searchp;
4195  unsigned *wwp; /* "where's Waldo" pointer */
4196  mstate ms = (mstate)msp;
4197 
4198  /*
4199  * Allocate space for the "Where's Waldo?" pointer
4200  * the base of the dlmalloc object
4201  */
4202  n_user_data_bytes += sizeof(unsigned);
4203  align = align < MALLOC_ALIGNMENT ? MALLOC_ALIGNMENT : align;
4204 
4205  /*
4206  * Alignment requests greater than 4K must be at offset zero,
4207  * and must be freed using mspace_free_no_offset - or never freed -
4208  * since the "Where's Waldo?" pointer would waste too much space.
4209  *
4210  * Waldo is the address of the chunk of memory returned by mspace_malloc,
4211  * which we need later to call mspace_free...
4212  */
4213  if (align > 4<<10 || align_offset == ~0UL) {
4214  n_user_data_bytes -= sizeof(unsigned);
4215  assert(align_offset == 0);
4216  rv = internal_memalign(ms, (size_t)align, n_user_data_bytes);
4217 
4218  /* Trace the allocation */
4219  if (rv && use_trace(ms)) {
4220  mchunkptr p = mem2chunk(rv);
4221  size_t psize = chunksize(p);
4222  mheap_get_trace ((unsigned long)rv, psize);
4223  }
4224  return rv;
4225  }
4226 
4227  align = clib_max (align, MALLOC_ALIGNMENT);
4228  align = max_pow2 (align);
4229 
4230  /* Correct align offset to be smaller than alignment. */
4231  align_offset &= (align - 1);
4232 
4233  n_user_data_bytes += align;
4234  rv = mspace_malloc (msp, n_user_data_bytes);
4235 
4236  if (rv == 0)
4237  return rv;
4238 
4239  /* Honor the alignment request */
4240  searchp = (unsigned long)(rv + sizeof (unsigned));
4241 
4242 #if 0 /* this is the idea... */
4243  while ((searchp + align_offset) % align)
4244  searchp++;
4245 #endif
4246 
4247  {
4248  unsigned long where_now, delta;
4249 
4250  where_now = (searchp + align_offset) % align;
4251  delta = align - where_now;
4252 
4253  searchp += delta;
4254  }
4255 
4256  wwp = (unsigned *)(searchp - sizeof(unsigned));
4257  *wwp = (searchp - (((unsigned long) rv) + sizeof (*wwp)));
4258  assert (*wwp < align);
4259 
4260  if (use_trace(ms)) {
4261  mchunkptr p = mem2chunk(rv);
4262  size_t psize = chunksize(p);
4263  mheap_get_trace (searchp, psize);
4264  }
4265  return (void *) searchp;
4266 }
4267 
4268 CLIB_NOSANITIZE_ADDR __clib_export
4269 void mspace_put (mspace msp, void *p_arg)
4270 {
4271  char *object_header;
4272  unsigned *wwp;
4273  mstate ms = (mstate)msp;
4274 
4275  /* Find the object header delta */
4276  wwp = (unsigned *)p_arg;
4277  wwp --;
4278 
4279  /* Recover the dlmalloc object pointer */
4280  object_header = (char *)wwp;
4281  object_header -= *wwp;
4282 
4283  /* Tracing (if enabled) */
4284  if (use_trace(ms))
4285  {
4286  mchunkptr p = mem2chunk(object_header);
4287  size_t psize = chunksize(p);
4288 
4289  mheap_put_trace ((unsigned long)p_arg, psize);
4290  }
4291 
4292 #if CLIB_DEBUG > 0 && !defined(CLIB_SANITIZE_ADDR)
4293  /* Poison the object */
4294  {
4295  size_t psize = mspace_usable_size (object_header);
4296  memset (object_header, 0x13, psize);
4297  }
4298 #endif
4299 
4300  /* And free it... */
4301  mspace_free (msp, object_header);
4302 }
4303 
4304 void mspace_put_no_offset (mspace msp, void *p_arg)
4305 {
4306  mstate ms = (mstate)msp;
4307 
4308  if (use_trace(ms))
4309  {
4310  mchunkptr p = mem2chunk(p_arg);
4311  size_t psize = chunksize(p);
4312 
4313  mheap_put_trace ((unsigned long)p_arg, psize);
4314  }
4315  mspace_free (msp, p_arg);
4316 }
4317 
4318 CLIB_NOSANITIZE_ADDR __clib_export
4319 size_t mspace_usable_size_with_delta (const void *p)
4320 {
4321  size_t usable_size;
4322  char *object_header;
4323  unsigned *wwp;
4324 
4325  /* Find the object header delta */
4326  wwp = (unsigned *)p;
4327  wwp --;
4328 
4329  /* Recover the dlmalloc object pointer */
4330  object_header = (char *)wwp;
4331  object_header -= *wwp;
4332 
4333  usable_size = mspace_usable_size (object_header);
4334  /* account for the offset and the size of the offset... */
4335  usable_size -= (*wwp + sizeof (*wwp));
4336  return usable_size;
4337 }
4338 
4339 /*
4340  mspace versions of routines are near-clones of the global
4341  versions. This is not so nice but better than the alternatives.
4342 */
4343 
4345 void* mspace_malloc(mspace msp, size_t bytes) {
4346  mstate ms = (mstate)msp;
4347  if (!ok_magic(ms)) {
4348  USAGE_ERROR_ACTION(ms,ms);
4349  return 0;
4350  }
4351  if (!PREACTION(ms)) {
4352  void* mem;
4353  size_t nb;
4354  if (bytes <= MAX_SMALL_REQUEST) {
4355  bindex_t idx;
4356  binmap_t smallbits;
4357  nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes);
4358  idx = small_index(nb);
4359  smallbits = ms->smallmap >> idx;
4360 
4361  if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4362  mchunkptr b, p;
4363  idx += ~smallbits & 1; /* Uses next bin if idx empty */
4364  b = smallbin_at(ms, idx);
4365  p = b->fd;
4366  assert(chunksize(p) == small_index2size(idx));
4367  unlink_first_small_chunk(ms, b, p, idx);
4369  mem = chunk2mem(p);
4370  check_malloced_chunk(ms, mem, nb);
4371  goto postaction;
4372  }
4373 
4374  else if (nb > ms->dvsize) {
4375  if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4376  mchunkptr b, p, r;
4377  size_t rsize;
4378  bindex_t i;
4379  binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx));
4380  binmap_t leastbit = least_bit(leftbits);
4381  compute_bit2idx(leastbit, i);
4382  b = smallbin_at(ms, i);
4383  p = b->fd;
4384  assert(chunksize(p) == small_index2size(i));
4385  unlink_first_small_chunk(ms, b, p, i);
4386  rsize = small_index2size(i) - nb;
4387  /* Fit here cannot be remainderless if 4byte sizes */
4388  if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4390  else {
4392  r = chunk_plus_offset(p, nb);
4394  replace_dv(ms, r, rsize);
4395  }
4396  mem = chunk2mem(p);
4397  check_malloced_chunk(ms, mem, nb);
4398  goto postaction;
4399  }
4400 
4401  else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) {
4402  check_malloced_chunk(ms, mem, nb);
4403  goto postaction;
4404  }
4405  }
4406  }
4407  else if (bytes >= MAX_REQUEST)
4408  nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4409  else {
4410  nb = pad_request(bytes);
4411  if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4412  check_malloced_chunk(ms, mem, nb);
4413  goto postaction;
4414  }
4415  }
4416 
4417  if (nb <= ms->dvsize) {
4418  size_t rsize = ms->dvsize - nb;
4419  mchunkptr p = ms->dv;
4420  if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4421  mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4422  ms->dvsize = rsize;
4425  }
4426  else { /* exhaust dv */
4427  size_t dvs = ms->dvsize;
4428  ms->dvsize = 0;
4429  ms->dv = 0;
4430  set_inuse_and_pinuse(ms, p, dvs);
4431  }
4432  mem = chunk2mem(p);
4433  check_malloced_chunk(ms, mem, nb);
4434  goto postaction;
4435  }
4436 
4437  else if (nb < ms->topsize) { /* Split top */
4438  size_t rsize = ms->topsize -= nb;
4439  mchunkptr p = ms->top;
4440  mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4441  r->head = rsize | PINUSE_BIT;
4443  mem = chunk2mem(p);
4444  check_top_chunk(ms, ms->top);
4445  check_malloced_chunk(ms, mem, nb);
4446  goto postaction;
4447  }
4448 
4449  mem = sys_alloc(ms, nb);
4450 
4451  postaction:
4452  POSTACTION(ms);
4453  return mem;
4454  }
4455 
4456  return 0;
4457 }
4458 
4460 void mspace_free(mspace msp, void* mem) {
4461  if (mem != 0) {
4462  mchunkptr p = mem2chunk(mem);
4463 #if FOOTERS
4464  mstate fm = get_mstate_for(p);
4465  (void)msp; /* placate people compiling -Wunused */
4466 #else /* FOOTERS */
4467  mstate fm = (mstate)msp;
4468 #endif /* FOOTERS */
4469  if (!ok_magic(fm)) {
4470  USAGE_ERROR_ACTION(fm, p);
4471  return;
4472  }
4473  if (!PREACTION(fm)) {
4474  check_inuse_chunk(fm, p);
4475  if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) {
4476  size_t psize = chunksize(p);
4477  mchunkptr next = chunk_plus_offset(p, psize);
4478  if (!pinuse(p)) {
4479  size_t prevsize = p->prev_foot;
4480  if (is_mmapped(p)) {
4481  psize += prevsize + MMAP_FOOT_PAD;
4482  if (CALL_MUNMAP((char*)p - prevsize, psize) == 0)
4483  fm->footprint -= psize;
4484  goto postaction;
4485  }
4486  else {
4487  mchunkptr prev = chunk_minus_offset(p, prevsize);
4488  psize += prevsize;
4489  p = prev;
4490  if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4491  if (p != fm->dv) {
4492  unlink_chunk(fm, p, prevsize);
4493  }
4494  else if ((next->head & INUSE_BITS) == INUSE_BITS) {
4495  fm->dvsize = psize;
4496  set_free_with_pinuse(p, psize, next);
4497  goto postaction;
4498  }
4499  }
4500  else
4501  goto erroraction;
4502  }
4503  }
4504 
4505  if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4506  if (!cinuse(next)) { /* consolidate forward */
4507  if (next == fm->top) {
4508  size_t tsize = fm->topsize += psize;
4509  fm->top = p;
4510  p->head = tsize | PINUSE_BIT;
4511  if (p == fm->dv) {
4512  fm->dv = 0;
4513  fm->dvsize = 0;
4514  }
4515  if (should_trim(fm, tsize))
4516  sys_trim(fm, 0);
4517  goto postaction;
4518  }
4519  else if (next == fm->dv) {
4520  size_t dsize = fm->dvsize += psize;
4521  fm->dv = p;
4523  goto postaction;
4524  }
4525  else {
4526  size_t nsize = chunksize(next);
4527  psize += nsize;
4528  unlink_chunk(fm, next, nsize);
4530  if (p == fm->dv) {
4531  fm->dvsize = psize;
4532  goto postaction;
4533  }
4534  }
4535  }
4536  else
4537  set_free_with_pinuse(p, psize, next);
4538 
4539  if (is_small(psize)) {
4540  insert_small_chunk(fm, p, psize);
4541  check_free_chunk(fm, p);
4542  }
4543  else {
4544  tchunkptr tp = (tchunkptr)p;
4545  insert_large_chunk(fm, tp, psize);
4546  check_free_chunk(fm, p);
4547  if (--fm->release_checks == 0)
4549  }
4550  goto postaction;
4551  }
4552  }
4553  erroraction:
4554  USAGE_ERROR_ACTION(fm, p);
4555  postaction:
4556  POSTACTION(fm);
4557  }
4558  }
4559 }
4560 
4561 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) {
4562  void* mem;
4563  size_t req = 0;
4564  mstate ms = (mstate)msp;
4565  if (!ok_magic(ms)) {
4566  USAGE_ERROR_ACTION(ms,ms);
4567  return 0;
4568  }
4569  if (n_elements != 0) {
4570  req = n_elements * elem_size;
4571  if (((n_elements | elem_size) & ~(size_t)0xffff) &&
4572  (req / n_elements != elem_size))
4573  req = MAX_SIZE_T; /* force downstream failure on overflow */
4574  }
4575  mem = internal_malloc(ms, req);
4576  if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4577  memset(mem, 0, req);
4578  return mem;
4579 }
4580 
4581 void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) {
4582  void* mem = 0;
4583  if (oldmem == 0) {
4584  mem = mspace_malloc(msp, bytes);
4585  }
4586  else if (bytes >= MAX_REQUEST) {
4588  }
4589 #ifdef REALLOC_ZERO_BYTES_FREES
4590  else if (bytes == 0) {
4591  mspace_free(msp, oldmem);
4592  }
4593 #endif /* REALLOC_ZERO_BYTES_FREES */
4594  else {
4595  size_t nb = request2size(bytes);
4596  mchunkptr oldp = mem2chunk(oldmem);
4597 #if ! FOOTERS
4598  mstate m = (mstate)msp;
4599 #else /* FOOTERS */
4600  mstate m = get_mstate_for(oldp);
4601  if (!ok_magic(m)) {
4602  USAGE_ERROR_ACTION(m, oldmem);
4603  return 0;
4604  }
4605 #endif /* FOOTERS */
4606  if (!PREACTION(m)) {
4607  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1);
4608  POSTACTION(m);
4609  if (newp != 0) {
4610  check_inuse_chunk(m, newp);
4611  mem = chunk2mem(newp);
4612  }
4613  else {
4614  mem = mspace_malloc(m, bytes);
4615  if (mem != 0) {
4616  size_t oc = chunksize(oldp) - overhead_for(oldp);
4617  memcpy(mem, oldmem, (oc < bytes)? oc : bytes);
4618  mspace_free(m, oldmem);
4619  }
4620  }
4621  }
4622  }
4623  return mem;
4624 }
4625 
4626 void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) {
4627  void* mem = 0;
4628  if (oldmem != 0) {
4629  if (bytes >= MAX_REQUEST) {
4631  }
4632  else {
4633  size_t nb = request2size(bytes);
4634  mchunkptr oldp = mem2chunk(oldmem);
4635 #if ! FOOTERS
4636  mstate m = (mstate)msp;
4637 #else /* FOOTERS */
4638  mstate m = get_mstate_for(oldp);
4639  (void)msp; /* placate people compiling -Wunused */
4640  if (!ok_magic(m)) {
4641  USAGE_ERROR_ACTION(m, oldmem);
4642  return 0;
4643  }
4644 #endif /* FOOTERS */
4645  if (!PREACTION(m)) {
4646  mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0);
4647  POSTACTION(m);
4648  if (newp == oldp) {
4649  check_inuse_chunk(m, newp);
4650  mem = oldmem;
4651  }
4652  }
4653  }
4654  }
4655  return mem;
4656 }
4657 
4658 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) {
4659  mstate ms = (mstate)msp;
4660  if (!ok_magic(ms)) {
4661  USAGE_ERROR_ACTION(ms,ms);
4662  return 0;
4663  }
4664  if (alignment <= MALLOC_ALIGNMENT)
4665  return mspace_malloc(msp, bytes);
4666  return internal_memalign(ms, alignment, bytes);
4667 }
4668 
4669 void** mspace_independent_calloc(mspace msp, size_t n_elements,
4670  size_t elem_size, void* chunks[]) {
4671  size_t sz = elem_size; /* serves as 1-element array */
4672  mstate ms = (mstate)msp;
4673  if (!ok_magic(ms)) {
4674  USAGE_ERROR_ACTION(ms,ms);
4675  return 0;
4676  }
4677  return ialloc(ms, n_elements, &sz, 3, chunks);
4678 }
4679 
4680 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
4681  size_t sizes[], void* chunks[]) {
4682  mstate ms = (mstate)msp;
4683  if (!ok_magic(ms)) {
4684  USAGE_ERROR_ACTION(ms,ms);
4685  return 0;
4686  }
4687  return ialloc(ms, n_elements, sizes, 0, chunks);
4688 }
4689 
4690 size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) {
4691  return internal_bulk_free((mstate)msp, array, nelem);
4692 }
4693 
4694 #if MALLOC_INSPECT_ALL
4695 void mspace_inspect_all(mspace msp,
4696  void(*handler)(void *start,
4697  void *end,
4698  size_t used_bytes,
4699  void* callback_arg),
4700  void* arg) {
4701  mstate ms = (mstate)msp;
4702  if (ok_magic(ms)) {
4703  if (!PREACTION(ms)) {
4704  internal_inspect_all(ms, handler, arg);
4705  POSTACTION(ms);
4706  }
4707  }
4708  else {
4709  USAGE_ERROR_ACTION(ms,ms);
4710  }
4711 }
4712 #endif /* MALLOC_INSPECT_ALL */
4713 
4714 int mspace_trim(mspace msp, size_t pad) {
4715  int result = 0;
4716  mstate ms = (mstate)msp;
4717  if (ok_magic(ms)) {
4718  if (!PREACTION(ms)) {
4719  result = sys_trim(ms, pad);
4720  POSTACTION(ms);
4721  }
4722  }
4723  else {
4724  USAGE_ERROR_ACTION(ms,ms);
4725  }
4726  return result;
4727 }
4728 
4729 #if !NO_MALLOC_STATS
4730 void mspace_malloc_stats(mspace msp) {
4731  mstate ms = (mstate)msp;
4732  if (ok_magic(ms)) {
4734  }
4735  else {
4736  USAGE_ERROR_ACTION(ms,ms);
4737  }
4738 }
4739 #endif /* NO_MALLOC_STATS */
4740 
4741 size_t mspace_footprint(mspace msp) {
4742  size_t result = 0;
4743  mstate ms = (mstate)msp;
4744  if (ok_magic(ms)) {
4745  result = ms->footprint;
4746  }
4747  else {
4748  USAGE_ERROR_ACTION(ms,ms);
4749  }
4750  return result;
4751 }
4752 
4753 size_t mspace_max_footprint(mspace msp) {
4754  size_t result = 0;
4755  mstate ms = (mstate)msp;
4756  if (ok_magic(ms)) {
4757  result = ms->max_footprint;
4758  }
4759  else {
4760  USAGE_ERROR_ACTION(ms,ms);
4761  }
4762  return result;
4763 }
4764 
4765 size_t mspace_footprint_limit(mspace msp) {
4766  size_t result = 0;
4767  mstate ms = (mstate)msp;
4768  if (ok_magic(ms)) {
4769  size_t maf = ms->footprint_limit;
4770  result = (maf == 0) ? MAX_SIZE_T : maf;
4771  }
4772  else {
4773  USAGE_ERROR_ACTION(ms,ms);
4774  }
4775  return result;
4776 }
4777 
4778 size_t mspace_set_footprint_limit(mspace msp, size_t bytes) {
4779  size_t result = 0;
4780  mstate ms = (mstate)msp;
4781  if (ok_magic(ms)) {
4782  if (bytes == 0)
4783  result = granularity_align(1); /* Use minimal size */
4784  if (bytes == MAX_SIZE_T)
4785  result = 0; /* disable */
4786  else
4787  result = granularity_align(bytes);
4788  ms->footprint_limit = result;
4789  }
4790  else {
4791  USAGE_ERROR_ACTION(ms,ms);
4792  }
4793  return result;
4794 }
4795 
4796 #if !NO_MALLINFO
4798 struct dlmallinfo mspace_mallinfo(mspace msp) {
4799  mstate ms = (mstate)msp;
4800  if (!ok_magic(ms)) {
4801  USAGE_ERROR_ACTION(ms,ms);
4802  }
4803  return internal_mallinfo(ms);
4804 }
4805 #endif /* NO_MALLINFO */
4806 
4808 size_t mspace_usable_size(const void* mem) {
4809  if (mem != 0) {
4810  mchunkptr p = mem2chunk(mem);
4811  if (is_inuse(p))
4812  return chunksize(p) - overhead_for(p);
4813  }
4814  return 0;
4815 }
4816 
4817 int mspace_mallopt(int param_number, int value) {
4818  return change_mparam(param_number, value);
4819 }
4820 
4821 #endif /* MSPACES */
4822 
4823 
4824 /* -------------------- Alternative MORECORE functions ------------------- */
4825 
4826 /*
4827  Guidelines for creating a custom version of MORECORE:
4828 
4829  * For best performance, MORECORE should allocate in multiples of pagesize.
4830  * MORECORE may allocate more memory than requested. (Or even less,
4831  but this will usually result in a malloc failure.)
4832  * MORECORE must not allocate memory when given argument zero, but
4833  instead return one past the end address of memory from previous
4834  nonzero call.
4835  * For best performance, consecutive calls to MORECORE with positive
4836  arguments should return increasing addresses, indicating that
4837  space has been contiguously extended.
4838  * Even though consecutive calls to MORECORE need not return contiguous
4839  addresses, it must be OK for malloc'ed chunks to span multiple
4840  regions in those cases where they do happen to be contiguous.
4841  * MORECORE need not handle negative arguments -- it may instead
4842  just return MFAIL when given negative arguments.
4843  Negative arguments are always multiples of pagesize. MORECORE
4844  must not misinterpret negative args as large positive unsigned
4845  args. You can suppress all such calls from even occurring by defining
4846  MORECORE_CANNOT_TRIM,
4847 
4848  As an example alternative MORECORE, here is a custom allocator
4849  kindly contributed for pre-OSX macOS. It uses virtually but not
4850  necessarily physically contiguous non-paged memory (locked in,
4851  present and won't get swapped out). You can use it by uncommenting
4852  this section, adding some #includes, and setting up the appropriate
4853  defines above:
4854 
4855  #define MORECORE osMoreCore
4856 
4857  There is also a shutdown routine that should somehow be called for
4858  cleanup upon program exit.
4859 
4860  #define MAX_POOL_ENTRIES 100
4861  #define MINIMUM_MORECORE_SIZE (64 * 1024U)
4862  static int next_os_pool;
4863  void *our_os_pools[MAX_POOL_ENTRIES];
4864 
4865  void *osMoreCore(int size)
4866  {
4867  void *ptr = 0;
4868  static void *sbrk_top = 0;
4869 
4870  if (size > 0)
4871  {
4872  if (size < MINIMUM_MORECORE_SIZE)
4873  size = MINIMUM_MORECORE_SIZE;
4874  if (CurrentExecutionLevel() == kTaskLevel)
4875  ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
4876  if (ptr == 0)
4877  {
4878  return (void *) MFAIL;
4879  }
4880  // save ptrs so they can be freed during cleanup
4881  our_os_pools[next_os_pool] = ptr;
4882  next_os_pool++;
4883  ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
4884  sbrk_top = (char *) ptr + size;
4885  return ptr;
4886  }
4887  else if (size < 0)
4888  {
4889  // we don't currently support shrink behavior
4890  return (void *) MFAIL;
4891  }
4892  else
4893  {
4894  return sbrk_top;
4895  }
4896  }
4897 
4898  // cleanup any allocated memory pools
4899  // called as last thing before shutting down driver
4900 
4901  void osCleanupMem(void)
4902  {
4903  void **ptr;
4904 
4905  for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
4906  if (*ptr)
4907  {
4908  PoolDeallocate(*ptr);
4909  *ptr = 0;
4910  }
4911  }
4912 
4913 */
4914 
4915 
4916 /* -----------------------------------------------------------------------
4917 History:
4918  v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea
4919  * fix bad comparison in dlposix_memalign
4920  * don't reuse adjusted asize in sys_alloc
4921  * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion
4922  * reduce compiler warnings -- thanks to all who reported/suggested these
4923 
4924  v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee)
4925  * Always perform unlink checks unless INSECURE
4926  * Add posix_memalign.
4927  * Improve realloc to expand in more cases; expose realloc_in_place.
4928  Thanks to Peter Buhr for the suggestion.
4929  * Add footprint_limit, inspect_all, bulk_free. Thanks
4930  to Barry Hayes and others for the suggestions.
4931  * Internal refactorings to avoid calls while holding locks
4932  * Use non-reentrant locks by default. Thanks to Roland McGrath
4933  for the suggestion.
4934  * Small fixes to mspace_destroy, reset_on_error.
4935  * Various configuration extensions/changes. Thanks
4936  to all who contributed these.
4937 
4938  V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu)
4939  * Update Creative Commons URL
4940 
4941  V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee)
4942  * Use zeros instead of prev foot for is_mmapped
4943  * Add mspace_track_large_chunks; thanks to Jean Brouwers
4944  * Fix set_inuse in internal_realloc; thanks to Jean Brouwers
4945  * Fix insufficient sys_alloc padding when using 16byte alignment
4946  * Fix bad error check in mspace_footprint
4947  * Adaptations for ptmalloc; thanks to Wolfram Gloger.
4948  * Reentrant spin locks; thanks to Earl Chew and others
4949  * Win32 improvements; thanks to Niall Douglas and Earl Chew
4950  * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options
4951  * Extension hook in malloc_state
4952  * Various small adjustments to reduce warnings on some compilers
4953  * Various configuration extensions/changes for more platforms. Thanks
4954  to all who contributed these.
4955 
4956  V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
4957  * Add max_footprint functions
4958  * Ensure all appropriate literals are size_t
4959  * Fix conditional compilation problem for some #define settings
4960  * Avoid concatenating segments with the one provided
4961  in create_mspace_with_base
4962  * Rename some variables to avoid compiler shadowing warnings
4963  * Use explicit lock initialization.
4964  * Better handling of sbrk interference.
4965  * Simplify and fix segment insertion, trimming and mspace_destroy
4966  * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
4967  * Thanks especially to Dennis Flanagan for help on these.
4968 
4969  V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
4970  * Fix memalign brace error.
4971 
4972  V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
4973  * Fix improper #endif nesting in C++
4974  * Add explicit casts needed for C++
4975 
4976  V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
4977  * Use trees for large bins
4978  * Support mspaces
4979  * Use segments to unify sbrk-based and mmap-based system allocation,
4980  removing need for emulation on most platforms without sbrk.
4981  * Default safety checks
4982  * Optional footer checks. Thanks to William Robertson for the idea.
4983  * Internal code refactoring
4984  * Incorporate suggestions and platform-specific changes.
4985  Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
4986  Aaron Bachmann, Emery Berger, and others.
4987  * Speed up non-fastbin processing enough to remove fastbins.
4988  * Remove useless cfree() to avoid conflicts with other apps.
4989  * Remove internal memcpy, memset. Compilers handle builtins better.
4990  * Remove some options that no one ever used and rename others.
4991 
4992  V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4993  * Fix malloc_state bitmap array misdeclaration
4994 
4995  V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
4996  * Allow tuning of FIRST_SORTED_BIN_SIZE
4997  * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
4998  * Better detection and support for non-contiguousness of MORECORE.
4999  Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5000  * Bypass most of malloc if no frees. Thanks To Emery Berger.
5001  * Fix freeing of old top non-contiguous chunk im sysmalloc.
5002  * Raised default trim and map thresholds to 256K.
5003  * Fix mmap-related #defines. Thanks to Lubos Lunak.
5004  * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5005  * Branch-free bin calculation
5006  * Default trim and mmap thresholds now 256K.
5007 
5008  V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5009  * Introduce independent_comalloc and independent_calloc.
5010  Thanks to Michael Pachos for motivation and help.
5011  * Make optional .h file available
5012  * Allow > 2GB requests on 32bit systems.
5013  * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5014  Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5015  and Anonymous.
5016  * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5017  helping test this.)
5018  * memalign: check alignment arg
5019  * realloc: don't try to shift chunks backwards, since this
5020  leads to more fragmentation in some programs and doesn't
5021  seem to help in any others.
5022  * Collect all cases in malloc requiring system memory into sysmalloc
5023  * Use mmap as backup to sbrk
5024  * Place all internal state in malloc_state
5025  * Introduce fastbins (although similar to 2.5.1)
5026  * Many minor tunings and cosmetic improvements
5027  * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5028  * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5029  Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5030  * Include errno.h to support default failure action.
5031 
5032  V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5033  * return null for negative arguments
5034  * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5035  * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5036  (e.g. WIN32 platforms)
5037  * Cleanup header file inclusion for WIN32 platforms
5038  * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5039  * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5040  memory allocation routines
5041  * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5042  * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5043  usage of 'assert' in non-WIN32 code
5044  * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5045  avoid infinite loop
5046  * Always call 'fREe()' rather than 'free()'
5047 
5048  V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5049  * Fixed ordering problem with boundary-stamping
5050 
5051  V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5052  * Added pvalloc, as recommended by H.J. Liu
5053  * Added 64bit pointer support mainly from Wolfram Gloger
5054  * Added anonymously donated WIN32 sbrk emulation
5055  * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5056  * malloc_extend_top: fix mask error that caused wastage after
5057  foreign sbrks
5058  * Add linux mremap support code from HJ Liu
5059 
5060  V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5061  * Integrated most documentation with the code.
5062  * Add support for mmap, with help from
5063  Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5064  * Use last_remainder in more cases.
5065  * Pack bins using idea from colin@nyx10.cs.du.edu
5066  * Use ordered bins instead of best-fit threshold
5067  * Eliminate block-local decls to simplify tracing and debugging.
5068  * Support another case of realloc via move into top
5069  * Fix error occurring when initial sbrk_base not word-aligned.
5070  * Rely on page size for units instead of SBRK_UNIT to
5071  avoid surprises about sbrk alignment conventions.
5072  * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5073  (raymond@es.ele.tue.nl) for the suggestion.
5074  * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5075  * More precautions for cases where other routines call sbrk,
5076  courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5077  * Added macros etc., allowing use in linux libc from
5078  H.J. Lu (hjl@gnu.ai.mit.edu)
5079  * Inverted this history list
5080 
5081  V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5082  * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5083  * Removed all preallocation code since under current scheme
5084  the work required to undo bad preallocations exceeds
5085  the work saved in good cases for most test programs.
5086  * No longer use return list or unconsolidated bins since
5087  no scheme using them consistently outperforms those that don't
5088  given above changes.
5089  * Use best fit for very large chunks to prevent some worst-cases.
5090  * Added some support for debugging
5091 
5092  V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5093  * Removed footers when chunks are in use. Thanks to
5094  Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5095 
5096  V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5097  * Added malloc_trim, with help from Wolfram Gloger
5098  (wmglo@Dent.MED.Uni-Muenchen.DE).
5099 
5100  V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5101 
5102  V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5103  * realloc: try to expand in both directions
5104  * malloc: swap order of clean-bin strategy;
5105  * realloc: only conditionally expand backwards
5106  * Try not to scavenge used bins
5107  * Use bin counts as a guide to preallocation
5108  * Occasionally bin return list chunks in first scan
5109  * Add a few optimizations from colin@nyx10.cs.du.edu
5110 
5111  V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5112  * faster bin computation & slightly different binning
5113  * merged all consolidations to one part of malloc proper
5114  (eliminating old malloc_find_space & malloc_clean_bin)
5115  * Scan 2 returns chunks (not just 1)
5116  * Propagate failure in realloc if malloc returns 0
5117  * Add stuff to allow compilation on non-ANSI compilers
5118  from kpv@research.att.com
5119 
5120  V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5121  * removed potential for odd address access in prev_chunk
5122  * removed dependency on getpagesize.h
5123  * misc cosmetics and a bit more internal documentation
5124  * anticosmetics: mangled names in macros to evade debugger strangeness
5125  * tested on sparc, hp-700, dec-mips, rs6000
5126  with gcc & native cc (hp, dec only) allowing
5127  Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5128 
5129  Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5130  * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5131  structure of old version, but most details differ.)
5132 
5133 */
#define segment_holds(S, A)
Definition: dlmalloc.c:1285
size_t dlmalloc_footprint(void)
Definition: dlmalloc.c:3957
#define TOP_FOOT_SIZE
Definition: dlmalloc.c:1323
static CLIB_NOSANITIZE_ADDR void init_top(mstate m, mchunkptr p, size_t psize)
Definition: dlmalloc.c:2497
#define RTCHECK(e)
Definition: dlmalloc.c:1634
void ** dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
Definition: dlmalloc.c:3918
flag_t default_mflags
Definition: dlmalloc.c:1207
#define USE_LOCK_BIT
Definition: dlmalloc.c:390
#define chunk_plus_offset(p, s)
Definition: dlmalloc.c:855
DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad)
u8 pad[3]
log2 (size of the packing page block)
Definition: bihash_doc.h:61
DLMALLOC_EXPORT void * mspace_malloc(mspace msp, size_t bytes)
void * dlmemalign(size_t alignment, size_t bytes)
Definition: dlmalloc.c:3874
#define CALL_MMAP(s)
Definition: dlmalloc.c:329
mchunkptr top
Definition: dlmalloc.c:1172
#define cinuse(p)
Definition: dlmalloc.c:842
#define enable_mmap(M)
Definition: dlmalloc.c:1239
vl_api_wireguard_peer_flags_t flags
Definition: wireguard.api:105
a
Definition: bitmap.h:544
static CLIB_NOSANITIZE_ADDR int sys_trim(mstate m, size_t pad)
Definition: dlmalloc.c:2904
size_t footprint_limit
Definition: dlmalloc.c:1180
DLMALLOC_EXPORT void ** mspace_independent_calloc(mspace msp, size_t n_elements, size_t elem_size, void *chunks[])
#define DESTROY_LOCK(l)
Definition: dlmalloc.c:392
#define treemap_is_marked(M, i)
Definition: dlmalloc.c:1517
static CLIB_NOSANITIZE_ADDR void * tmalloc_small(mstate m, size_t nb)
Definition: dlmalloc.c:3117
#define MIN_LARGE_SIZE
Definition: dlmalloc.c:1161
#define smallmap_is_marked(M, i)
Definition: dlmalloc.c:1513
struct malloc_chunk * bk
Definition: dlmalloc.c:774
Optimized string handling code, including c11-compliant "safe C library" variants.
static struct malloc_params mparams
Definition: dlmalloc.c:1210
#define DEFAULT_MMAP_THRESHOLD
Definition: dlmalloc.h:723
#define is_mmapped_segment(S)
Definition: dlmalloc.c:1062
struct malloc_segment * msegmentptr
Definition: dlmalloc.c:1066
static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags)
Definition: dlmalloc.c:2458
#define MALLOC_FAILURE_ACTION
Definition: dlmalloc.h:669
#define insert_large_chunk(M, X, S)
Definition: dlmalloc.c:2248
#define insert_chunk(M, P, S)
Definition: dlmalloc.c:2389
#define NTREEBINS
Definition: dlmalloc.c:1157
#define is_initialized(M)
Definition: dlmalloc.c:1224
#define NO_SEGMENT_TRAVERSAL
Definition: dlmalloc.h:751
struct malloc_tree_chunk * parent
Definition: dlmalloc.c:987
size_t dlbulk_free(void *array[], size_t nelem)
Definition: dlmalloc.c:3929
#define EINVAL
Definition: string.h:93
#define chunk2mem(p)
Definition: dlmalloc.c:804
DLMALLOC_EXPORT void * mspace_realloc(mspace msp, void *mem, size_t newsize)
#define is_global(M)
Definition: dlmalloc.c:1220
#define is_page_aligned(S)
Definition: dlmalloc.c:1279
#define DLM_MAGIC_CONSTANT
Definition: dlmalloc.h:531
#define DLM_ABORT
Definition: dlmalloc.h:534
#define M_TRIM_THRESHOLD
Definition: dlmalloc.h:761
void * dlpvalloc(size_t bytes)
Definition: dlmalloc.c:3911
#define is_aligned(A)
Definition: dlmalloc.c:197
unsigned int binmap_t
Definition: dlmalloc.c:781
size_t trim_check
Definition: dlmalloc.c:1173
static CLIB_NOSANITIZE_ADDR int has_segment_link(mstate m, msegmentptr ss)
Definition: dlmalloc.c:1302
size_t release_checks
Definition: dlmalloc.c:1174
#define disable_trace(M)
Definition: dlmalloc.c:1252
#define malloc_getpagesize
Definition: dlmalloc.c:137
#define ok_magic(M)
Definition: dlmalloc.c:1626
#define mem2chunk(mem)
Definition: dlmalloc.c:805
#define internal_malloc(m, b)
Definition: dlmalloc.c:2410
#define leftmost_child(t)
Definition: dlmalloc.c:996
u16 mask
Definition: flow_types.api:52
MALLINFO_FIELD_TYPE hblkhd
Definition: dlmalloc.h:804
#define replace_dv(M, P, S)
Definition: dlmalloc.c:2234
#define MAX_SIZE_T
Definition: dlmalloc.h:600
vhost_vring_addr_t addr
Definition: vhost_user.h:111
#define USE_MMAP_BIT
Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP.
Definition: dlmalloc.c:323
#define MIN_REQUEST
Definition: dlmalloc.c:811
#define FENCEPOST_HEAD
Definition: dlmalloc.c:839
#define unlink_chunk(M, P, S)
Definition: dlmalloc.c:2393
#define unlink_large_chunk(M, X)
Definition: dlmalloc.c:2316
#define pad_request(req)
Definition: dlmalloc.c:814
#define MFAIL
Definition: dlmalloc.c:214
struct malloc_state * mstate
Definition: dlmalloc.c:1190
#define fm
flag_t sflags
Definition: dlmalloc.c:1059
void dlmalloc_stats()
Definition: dlmalloc.c:3988
#define assert(x)
Definition: dlmalloc.c:31
unsigned int flag_t
Definition: dlmalloc.c:782
#define INITIAL_LOCK(l)
Definition: dlmalloc.c:391
DLMALLOC_EXPORT mspace create_mspace_with_base(void *base, size_t capacity, int locked)
size_t max_footprint
Definition: dlmalloc.c:1179
DLMALLOC_EXPORT int mspace_is_heap_object(mspace msp, void *p)
#define small_index2size(i)
Definition: dlmalloc.c:1419
MALLINFO_FIELD_TYPE uordblks
Definition: dlmalloc.h:807
#define leftshift_for_tree_index(i)
Definition: dlmalloc.c:1495
#define CALL_DIRECT_MMAP(s)
Definition: dlmalloc.c:328
#define check_mmapped_chunk(M, P)
Definition: dlmalloc.c:1389
DLMALLOC_EXPORT void * mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
struct malloc_chunk * sbinptr
Definition: dlmalloc.c:779
#define idx2bit(i)
Definition: dlmalloc.c:1508
#define NSMALLBINS
Definition: dlmalloc.c:1156
#define check_malloc_state(M)
Definition: dlmalloc.c:1390
size_t exts
Definition: dlmalloc.c:1187
MALLINFO_FIELD_TYPE arena
Definition: dlmalloc.h:800
static CLIB_NOSANITIZE_ADDR void * sys_alloc(mstate m, size_t nb)
Definition: dlmalloc.c:2641
#define DEFAULT_GRANULARITY
Definition: dlmalloc.h:690
#define CHUNK_ALIGN_MASK
Definition: dlmalloc.c:194
#define clear_pinuse(p)
Definition: dlmalloc.c:850
msegment seg
Definition: dlmalloc.c:1185
struct malloc_tree_chunk * fd
Definition: dlmalloc.c:983
#define unlink_first_small_chunk(M, B, P, I)
Definition: dlmalloc.c:2215
#define check_malloced_chunk(M, P, N)
Definition: dlmalloc.c:1388
static CLIB_NOSANITIZE_ADDR void add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
Definition: dlmalloc.c:2585
size_t dlmalloc_usable_size(void *mem)
Definition: dlmalloc.c:3997
#define enable_trace(M)
Definition: dlmalloc.c:1251
#define check_top_chunk(M, P)
Definition: dlmalloc.c:1391
size_t head
Definition: dlmalloc.c:772
DLMALLOC_EXPORT void mspace_put_no_offset(mspace msp, void *p)
void * extp
Definition: dlmalloc.c:1186
#define M_GRANULARITY
Definition: dlmalloc.h:762
#define internal_free(m, mem)
Definition: dlmalloc.c:2411
const cJSON *const b
Definition: cJSON.h:255
MALLINFO_FIELD_TYPE ordblks
Definition: dlmalloc.h:801
#define disable_expand(M)
Definition: dlmalloc.c:1249
static void init_bins(mstate m)
Definition: dlmalloc.c:2512
#define mmap_align(S)
Definition: dlmalloc.c:1273
void * dlmalloc(size_t bytes)
Definition: dlmalloc.c:3156
static void internal_malloc_stats(mstate m)
Definition: dlmalloc.c:2128
void * dlvalloc(size_t bytes)
Definition: dlmalloc.c:3904
size_t prev_foot
Definition: dlmalloc.c:981
int dlmalloc_trim(size_t pad)
Definition: dlmalloc.c:3947
struct malloc_segment * next
Definition: dlmalloc.c:1058
DLMALLOC_EXPORT void * mspace_memalign(mspace msp, size_t alignment, size_t bytes)
#define insert_small_chunk(M, P, S)
Definition: dlmalloc.c:2170
size_t magic
Definition: dlmalloc.c:1175
#define POSTACTION(M)
Definition: dlmalloc.c:1345
#define ok_inuse(p)
Definition: dlmalloc.c:1606
DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp)
#define SIZE_T_SIZE
Definition: dlmalloc.c:179
struct malloc_chunk * mchunkptr
Definition: dlmalloc.c:778
size_t prev_foot
Definition: dlmalloc.c:771
DLMALLOC_EXPORT void mheap_put_trace(uword offset, uword size)
Definition: mem_dlmalloc.c:147
#define gm
Definition: dlmalloc.c:1219
#define is_inuse(p)
Definition: dlmalloc.c:845
MALLINFO_FIELD_TYPE usmblks
Definition: dlmalloc.h:805
DLMALLOC_EXPORT void mspace_disable_expand(mspace msp)
#define left_bits(x)
Definition: dlmalloc.c:1523
#define compute_tree_index(S, I)
Definition: dlmalloc.c:1471
#define DEFAULT_TRIM_THRESHOLD
Definition: dlmalloc.h:695
#define MMAP_FOOT_PAD
Definition: dlmalloc.c:797
size_t trim_threshold
Definition: dlmalloc.c:1206
u32 size
Definition: vhost_user.h:106
#define next_pinuse(p)
Definition: dlmalloc.c:863
size_t page_size
Definition: dlmalloc.c:1203
#define use_trace(M)
Definition: dlmalloc.c:1250
#define calloc_must_clear(p)
Definition: dlmalloc.c:885
#define MAX_SMALL_REQUEST
Definition: dlmalloc.c:1163
binmap_t smallmap
Definition: dlmalloc.c:1166
char * least_addr
Definition: dlmalloc.c:1170
#define USE_NONCONTIGUOUS_BIT
Definition: dlmalloc.c:347
static int init_mparams(void)
Definition: dlmalloc.c:1697
void * dlrealloc(void *oldmem, size_t bytes)
Definition: dlmalloc.c:3798
#define CLIB_NOSANITIZE_ADDR
Definition: sanitizer.h:45
DLMALLOC_EXPORT int mspace_is_traced(mspace msp)
static CLIB_NOSANITIZE_ADDR void * tmalloc_large(mstate m, size_t nb)
Definition: dlmalloc.c:3045
u8 len
Definition: ip_types.api:103
static void * mmap_alloc(mstate m, size_t nb)
Definition: dlmalloc.c:2426
#define use_mmap(M)
Definition: dlmalloc.c:1238
struct malloc_tree_chunk * tbinptr
Definition: dlmalloc.c:993
size_t magic
Definition: dlmalloc.c:1202
struct malloc_chunk * fd
Definition: dlmalloc.c:773
svmdb_client_t * c
#define small_index(s)
Definition: dlmalloc.c:1418
DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked)
struct malloc_tree_chunk * tchunkptr
Definition: dlmalloc.c:992
#define ACQUIRE_MALLOC_GLOBAL_LOCK()
Definition: dlmalloc.c:393
#define chunk_minus_offset(p, s)
Definition: dlmalloc.c:856
DLMALLOC_EXPORT void mspace_get_address_and_size(mspace msp, char **addrp, size_t *sizep)
sll srl srl sll sra u16x4 i
Definition: vector_sse42.h:317
DLMALLOC_EXPORT int mspace_mallopt(int, int)
DLMALLOC_EXPORT void ** mspace_independent_comalloc(mspace msp, size_t n_elements, size_t sizes[], void *chunks[])
size_t dlmalloc_max_footprint(void)
Definition: dlmalloc.c:3961
#define FORCEINLINE
Definition: dlmalloc.h:844
#define prev_chunk(p)
Definition: dlmalloc.c:860
#define SIZE_T_BITSIZE
Definition: dlmalloc.c:180
MALLINFO_FIELD_TYPE keepcost
Definition: dlmalloc.h:809
#define check_inuse_chunk(M, P)
Definition: dlmalloc.c:1387
#define CHUNK_OVERHEAD
Definition: dlmalloc.c:791
#define overhead_for(p)
Definition: dlmalloc.c:878
DLMALLOC_EXPORT int mspace_enable_disable_trace(mspace msp, int enable)
#define least_bit(x)
Definition: dlmalloc.c:1520
static uword max_pow2(uword x)
Definition: clib.h:244
#define set_lock(M, L)
Definition: dlmalloc.c:1254
static CLIB_NOSANITIZE_ADDR struct dlmallinfo internal_mallinfo(mstate m)
Definition: dlmalloc.c:2087
binmap_t treemap
Definition: dlmalloc.c:1167
#define set_free_with_pinuse(p, s, n)
Definition: dlmalloc.c:874
#define SYS_ALLOC_PADDING
Definition: dlmalloc.c:1277
unsigned int bindex_t
Definition: dlmalloc.c:780
#define set_inuse(M, p, s)
Definition: dlmalloc.c:1649
#define use_noncontiguous(M)
Definition: dlmalloc.c:1246
#define MAX_REQUEST
Definition: dlmalloc.c:810
#define MAX_RELEASE_CHECK_RATE
Definition: dlmalloc.h:730
#define HALF_MAX_SIZE_T
Definition: dlmalloc.c:191
u8 value
Definition: qos.api:54
#define align_offset(A)
Definition: dlmalloc.c:200
#define HAVE_MORECORE
Definition: dlmalloc.h:673
DLMALLOC_EXPORT void mspace_free(mspace msp, void *mem)
#define PINUSE_BIT
Definition: dlmalloc.c:832
#define CALL_MUNMAP(a, s)
Definition: dlmalloc.c:330
#define USAGE_ERROR_ACTION(m, p)
Definition: dlmalloc.c:1376
#define MORECORE_CONTIGUOUS
Definition: dlmalloc.h:679
#define RELEASE_MALLOC_GLOBAL_LOCK()
Definition: dlmalloc.c:394
#define SIZE_T_ONE
Definition: dlmalloc.c:185
struct malloc_tree_chunk * bk
Definition: dlmalloc.c:984
#define INUSE_BITS
Definition: dlmalloc.c:835
#define set_size_and_pinuse_of_free_chunk(p, s)
Definition: dlmalloc.c:870
size_t topsize
Definition: dlmalloc.c:1169
#define check_free_chunk(M, P)
Definition: dlmalloc.c:1386
#define CORRUPTION_ERROR_ACTION(m)
Definition: dlmalloc.c:1372
size_t granularity
Definition: dlmalloc.c:1204
#define M_MMAP_THRESHOLD
Definition: dlmalloc.h:763
#define ensure_initialization()
Definition: dlmalloc.c:1213
#define SIX_SIZE_T_SIZES
Definition: dlmalloc.c:190
#define is_small(s)
Definition: dlmalloc.c:1417
#define disable_contiguous(M)
Definition: dlmalloc.c:1247
#define pinuse(p)
Definition: dlmalloc.c:843
#define mark_inuse_foot(M, p, s)
Definition: dlmalloc.c:1644
size_t dvsize
Definition: dlmalloc.c:1168
#define clib_max(x, y)
Definition: clib.h:321
struct malloc_tree_chunk * child[2]
Definition: dlmalloc.c:986
template key/value backing page structure
Definition: bihash_doc.h:44
DLMALLOC_EXPORT size_t mspace_usable_size(const void *mem)
#define request2size(req)
Definition: dlmalloc.c:818
static void ** ialloc(mstate m, size_t n_elements, size_t *sizes, int opts, void *chunks[])
Definition: dlmalloc.c:3584
#define MIN_CHUNK_SIZE
Definition: dlmalloc.c:800
#define align_as_chunk(A)
Definition: dlmalloc.c:807
mchunkptr dv
Definition: dlmalloc.c:1171
size_t footprint
Definition: dlmalloc.c:1178
flag_t mflags
Definition: dlmalloc.c:1181
#define is_mmapped(p)
Definition: dlmalloc.c:846
void * dlrealloc_in_place(void *oldmem, size_t bytes)
Definition: dlmalloc.c:3843
DLMALLOC_EXPORT void * mspace_get_aligned(mspace msp, unsigned long n_user_data_bytes, unsigned long align, unsigned long align_offset)
#define smallbin_at(M, i)
Definition: dlmalloc.c:1423
static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, int can_move)
Definition: dlmalloc.c:3423
u32 index
Definition: flow_types.api:221
bindex_t index
Definition: dlmalloc.c:988
static CLIB_NOSANITIZE_ADDR void dispose_chunk(mstate m, mchunkptr p, size_t psize)
Definition: dlmalloc.c:2973
#define next_chunk(p)
Definition: dlmalloc.c:859
#define CALL_MORECORE(S)
Define CALL_MORECORE.
Definition: dlmalloc.c:298
struct clib_bihash_value offset
template key/value backing page structure
#define ok_pinuse(p)
Definition: dlmalloc.c:1608
void * mem
DLMALLOC_EXPORT size_t destroy_mspace(mspace msp)
#define HAVE_MMAP
Definition: dlmalloc.h:655
#define set_inuse_and_pinuse(M, p, s)
Definition: dlmalloc.c:1654
#define compute_bit2idx(X, I)
Definition: dlmalloc.c:1558
#define disable_mmap(M)
Definition: dlmalloc.c:1243
#define CALL_MREMAP(addr, osz, nsz, mv)
Define CALL_MREMAP.
Definition: dlmalloc.c:343
DLMALLOC_EXPORT size_t mspace_footprint(mspace msp)
#define CMFAIL
Definition: dlmalloc.c:215
static CLIB_NOSANITIZE_ADDR msegmentptr segment_holding(mstate m, char *addr)
Definition: dlmalloc.c:1290
#define ok_next(p, n)
Definition: dlmalloc.c:1604
DLMALLOC_EXPORT void mheap_get_trace(uword offset, uword size)
Definition: mem_dlmalloc.c:63
#define use_noexpand(M)
Definition: dlmalloc.c:1248
static CLIB_NOSANITIZE_ADDR size_t release_unused_segments(mstate m)
Definition: dlmalloc.c:2856
DLMALLOC_EXPORT void mspace_put(mspace msp, void *p)
f64 end
end of the time range
Definition: mactime.api:44
#define is_extern_segment(S)
Definition: dlmalloc.c:1063
MALLINFO_FIELD_TYPE fordblks
Definition: dlmalloc.h:808
DLMALLOC_EXPORT size_t mspace_usable_size_with_delta(const void *p)
void ** dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
Definition: dlmalloc.c:3924
#define ok_address(M, a)
Definition: dlmalloc.c:1602
int dlmallopt(int param_number, int value)
Definition: dlmalloc.c:3993
static CLIB_NOSANITIZE_ADDR void * prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
Definition: dlmalloc.c:2542
size_t dlmalloc_set_footprint_limit(size_t bytes)
Definition: dlmalloc.c:3970
#define treebin_at(M, i)
Definition: dlmalloc.c:1424
DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable)
#define chunksize(p)
Definition: dlmalloc.c:848
#define minsize_for_tree_index(i)
Definition: dlmalloc.c:1500
#define FOUR_SIZE_T_SIZES
Definition: dlmalloc.c:189
#define set_size_and_pinuse_of_inuse_chunk(M, p, s)
Definition: dlmalloc.c:1659
#define granularity_align(S)
Definition: dlmalloc.c:1264
static int change_mparam(int param_number, int value)
Definition: dlmalloc.c:1791
int dlposix_memalign(void **pp, size_t alignment, size_t bytes)
Definition: dlmalloc.c:3881
DLMALLOC_EXPORT void * mspace_least_addr(mspace msp)
#define page_align(S)
Definition: dlmalloc.c:1260
size_t dlmalloc_footprint_limit(void)
Definition: dlmalloc.c:3965
static size_t internal_bulk_free(mstate m, void *array[], size_t nelem)
Definition: dlmalloc.c:3710
#define should_trim(M, s)
Definition: dlmalloc.c:1313
DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp)
#define MCHUNK_SIZE
Definition: dlmalloc.c:786
void * dlcalloc(size_t n_elements, size_t elem_size)
Definition: dlmalloc.c:3403
#define EXTERN_BIT
Definition: dlmalloc.c:356
void dlfree(void *mem)
Definition: dlmalloc.c:3294
#define MALLOC_ALIGNMENT
Definition: dlmalloc.h:633
static CLIB_NOSANITIZE_ADDR void * internal_memalign(mstate m, size_t alignment, size_t bytes)
Definition: dlmalloc.c:3503
#define PREACTION(M)
Definition: dlmalloc.c:1341
size_t mmap_threshold
Definition: dlmalloc.c:1205