1 // SPDX-License-Identifier: GPL-2.0+
3 * This code is based on a version (aka dlmalloc) of malloc/free/realloc written
4 * by Doug Lea and released to the public domain, as explained at
5 * http://creativecommons.org/publicdomain/zero/1.0/-
7 * The original code is available at http://gee.cs.oswego.edu/pub/misc/
8 * as file malloc-2.6.6.c.
11 #if CONFIG_IS_ENABLED(UNIT_TEST)
17 #include <asm/global_data.h>
21 #include <valgrind/memcheck.h>
25 static void malloc_update_mallinfo (void);
26 void malloc_stats (void);
28 static void malloc_update_mallinfo ();
33 DECLARE_GLOBAL_DATA_PTR;
36 Emulation of sbrk for WIN32
37 All code within the ifdef WIN32 is untested by me.
39 Thanks to Martin Fong and others for supplying this.
45 #define AlignPage(add) (((add) + (malloc_getpagesize-1)) & \
46 ~(malloc_getpagesize-1))
47 #define AlignPage64K(add) (((add) + (0x10000 - 1)) & ~(0x10000 - 1))
49 /* resrve 64MB to insure large contiguous space */
50 #define RESERVED_SIZE (1024*1024*64)
51 #define NEXT_SIZE (2048*1024)
52 #define TOP_MEMORY ((unsigned long)2*1024*1024*1024)
55 typedef struct GmListElement GmListElement;
63 static GmListElement* head = 0;
64 static unsigned int gNextAddress = 0;
65 static unsigned int gAddressBase = 0;
66 static unsigned int gAllocatedSize = 0;
69 GmListElement* makeGmListElement (void* bas)
72 this = (GmListElement*)(void*)LocalAlloc (0, sizeof (GmListElement));
86 assert ( (head == NULL) || (head->base == (void*)gAddressBase));
87 if (gAddressBase && (gNextAddress - gAddressBase))
89 rval = VirtualFree ((void*)gAddressBase,
90 gNextAddress - gAddressBase,
96 GmListElement* next = head->next;
97 rval = VirtualFree (head->base, 0, MEM_RELEASE);
105 void* findRegion (void* start_address, unsigned long size)
107 MEMORY_BASIC_INFORMATION info;
108 if (size >= TOP_MEMORY) return NULL;
110 while ((unsigned long)start_address + size < TOP_MEMORY)
112 VirtualQuery (start_address, &info, sizeof (info));
113 if ((info.State == MEM_FREE) && (info.RegionSize >= size))
114 return start_address;
117 /* Requested region is not available so see if the */
118 /* next region is available. Set 'start_address' */
119 /* to the next region and call 'VirtualQuery()' */
122 start_address = (char*)info.BaseAddress + info.RegionSize;
124 /* Make sure we start looking for the next region */
125 /* on the *next* 64K boundary. Otherwise, even if */
126 /* the new region is free according to */
127 /* 'VirtualQuery()', the subsequent call to */
128 /* 'VirtualAlloc()' (which follows the call to */
129 /* this routine in 'wsbrk()') will round *down* */
130 /* the requested address to a 64K boundary which */
131 /* we already know is an address in the */
132 /* unavailable region. Thus, the subsequent call */
133 /* to 'VirtualAlloc()' will fail and bring us back */
134 /* here, causing us to go into an infinite loop. */
137 (void *) AlignPage64K((unsigned long) start_address);
145 void* wsbrk (long size)
150 if (gAddressBase == 0)
152 gAllocatedSize = max (RESERVED_SIZE, AlignPage (size));
153 gNextAddress = gAddressBase =
154 (unsigned int)VirtualAlloc (NULL, gAllocatedSize,
155 MEM_RESERVE, PAGE_NOACCESS);
156 } else if (AlignPage (gNextAddress + size) > (gAddressBase +
159 long new_size = max (NEXT_SIZE, AlignPage (size));
160 void* new_address = (void*)(gAddressBase+gAllocatedSize);
163 new_address = findRegion (new_address, new_size);
168 gAddressBase = gNextAddress =
169 (unsigned int)VirtualAlloc (new_address, new_size,
170 MEM_RESERVE, PAGE_NOACCESS);
171 /* repeat in case of race condition */
172 /* The region that we found has been snagged */
173 /* by another thread */
175 while (gAddressBase == 0);
177 assert (new_address == (void*)gAddressBase);
179 gAllocatedSize = new_size;
181 if (!makeGmListElement ((void*)gAddressBase))
184 if ((size + gNextAddress) > AlignPage (gNextAddress))
187 res = VirtualAlloc ((void*)AlignPage (gNextAddress),
188 (size + gNextAddress -
189 AlignPage (gNextAddress)),
190 MEM_COMMIT, PAGE_READWRITE);
194 tmp = (void*)gNextAddress;
195 gNextAddress = (unsigned int)tmp + size;
200 unsigned int alignedGoal = AlignPage (gNextAddress + size);
201 /* Trim by releasing the virtual memory */
202 if (alignedGoal >= gAddressBase)
204 VirtualFree ((void*)alignedGoal, gNextAddress - alignedGoal,
206 gNextAddress = gNextAddress + size;
207 return (void*)gNextAddress;
211 VirtualFree ((void*)gAddressBase, gNextAddress - gAddressBase,
213 gNextAddress = gAddressBase;
219 return (void*)gNextAddress;
234 INTERNAL_SIZE_T prev_size; /* Size of previous chunk (if free). */
235 INTERNAL_SIZE_T size; /* Size in bytes, including overhead. */
236 struct malloc_chunk* fd; /* double links -- used only if free. */
237 struct malloc_chunk* bk;
238 } __attribute__((__may_alias__)) ;
240 typedef struct malloc_chunk* mchunkptr;
244 malloc_chunk details:
246 (The following includes lightly edited explanations by Colin Plumb.)
248 Chunks of memory are maintained using a `boundary tag' method as
249 described in e.g., Knuth or Standish. (See the paper by Paul
250 Wilson ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a
251 survey of such techniques.) Sizes of free chunks are stored both
252 in the front of each chunk and at the end. This makes
253 consolidating fragmented chunks into bigger chunks very fast. The
254 size fields also hold bits representing whether chunks are free or
257 An allocated chunk looks like this:
260 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
261 | Size of previous chunk, if allocated | |
262 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
263 | Size of chunk, in bytes |P|
264 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
265 | User data starts here... .
267 . (malloc_usable_space() bytes) .
269 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
271 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
274 Where "chunk" is the front of the chunk for the purpose of most of
275 the malloc code, but "mem" is the pointer that is returned to the
276 user. "Nextchunk" is the beginning of the next contiguous chunk.
278 Chunks always begin on even word boundries, so the mem portion
279 (which is returned to the user) is also on an even word boundary, and
280 thus double-word aligned.
282 Free chunks are stored in circular doubly-linked lists, and look like this:
284 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
285 | Size of previous chunk |
286 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
287 `head:' | Size of chunk, in bytes |P|
288 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
289 | Forward pointer to next chunk in list |
290 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
291 | Back pointer to previous chunk in list |
292 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
293 | Unused space (may be 0 bytes long) .
297 nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
298 `foot:' | Size of chunk, in bytes |
299 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
301 The P (PREV_INUSE) bit, stored in the unused low-order bit of the
302 chunk size (which is always a multiple of two words), is an in-use
303 bit for the *previous* chunk. If that bit is *clear*, then the
304 word before the current chunk size contains the previous chunk
305 size, and can be used to find the front of the previous chunk.
306 (The very first chunk allocated always has this bit set,
307 preventing access to non-existent (or non-owned) memory.)
309 Note that the `foot' of the current chunk is actually represented
310 as the prev_size of the NEXT chunk. (This makes it easier to
311 deal with alignments etc).
313 The two exceptions to all this are
315 1. The special chunk `top', which doesn't bother using the
316 trailing size field since there is no
317 next contiguous chunk that would have to index off it. (After
318 initialization, `top' is forced to always exist. If it would
319 become less than MINSIZE bytes long, it is replenished via
322 2. Chunks allocated via mmap, which have the second-lowest-order
323 bit (IS_MMAPPED) set in their size fields. Because they are
324 never merged or traversed from any other chunk, they have no
325 foot size or inuse information.
327 Available chunks are kept in any of several places (all declared below):
329 * `av': An array of chunks serving as bin headers for consolidated
330 chunks. Each bin is doubly linked. The bins are approximately
331 proportionally (log) spaced. There are a lot of these bins
332 (128). This may look excessive, but works very well in
333 practice. All procedures maintain the invariant that no
334 consolidated chunk physically borders another one. Chunks in
335 bins are kept in size order, with ties going to the
336 approximately least recently used chunk.
338 The chunks in each bin are maintained in decreasing sorted order by
339 size. This is irrelevant for the small bins, which all contain
340 the same-sized chunks, but facilitates best-fit allocation for
341 larger chunks. (These lists are just sequential. Keeping them in
342 order almost never requires enough traversal to warrant using
343 fancier ordered data structures.) Chunks of the same size are
344 linked with the most recently freed at the front, and allocations
345 are taken from the back. This results in LRU or FIFO allocation
346 order, which tends to give each chunk an equal opportunity to be
347 consolidated with adjacent freed chunks, resulting in larger free
348 chunks and less fragmentation.
350 * `top': The top-most available chunk (i.e., the one bordering the
351 end of available memory) is treated specially. It is never
352 included in any bin, is used only if no other chunk is
353 available, and is released back to the system if it is very
354 large (see M_TRIM_THRESHOLD).
356 * `last_remainder': A bin holding only the remainder of the
357 most recently split (non-top) chunk. This bin is checked
358 before other non-fitting chunks, so as to provide better
359 locality for runs of sequentially allocated chunks.
361 * Implicitly, through the host system's memory mapping tables.
362 If supported, requests greater than a threshold are usually
363 serviced via calls to mmap, and then later released via munmap.
367 /* sizes, alignments */
369 #define SIZE_SZ (sizeof(INTERNAL_SIZE_T))
370 #define MALLOC_ALIGNMENT (SIZE_SZ + SIZE_SZ)
371 #define MALLOC_ALIGN_MASK (MALLOC_ALIGNMENT - 1)
372 #define MINSIZE (sizeof(struct malloc_chunk))
374 /* conversion from malloc headers to user pointers, and back */
376 #define chunk2mem(p) ((Void_t*)((char*)(p) + 2*SIZE_SZ))
377 #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - 2*SIZE_SZ))
379 /* pad request bytes into a usable size */
381 #define request2size(req) \
382 (((long)((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) < \
383 (long)(MINSIZE + MALLOC_ALIGN_MASK)) ? MINSIZE : \
384 (((req) + (SIZE_SZ + MALLOC_ALIGN_MASK)) & ~(MALLOC_ALIGN_MASK)))
386 /* Check if m has acceptable alignment */
388 #define aligned_OK(m) (((unsigned long)((m)) & (MALLOC_ALIGN_MASK)) == 0)
394 Physical chunk operations
398 /* size field is or'ed with PREV_INUSE when previous adjacent chunk in use */
400 #define PREV_INUSE 0x1
402 /* size field is or'ed with IS_MMAPPED if the chunk was obtained with mmap() */
404 #define IS_MMAPPED 0x2
406 /* Bits to mask off when extracting size */
408 #define SIZE_BITS (PREV_INUSE|IS_MMAPPED)
411 /* Ptr to next physical malloc_chunk. */
413 #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->size & ~PREV_INUSE) ))
415 /* Ptr to previous physical malloc_chunk */
417 #define prev_chunk(p)\
418 ((mchunkptr)( ((char*)(p)) - ((p)->prev_size) ))
421 /* Treat space at ptr + offset as a chunk */
423 #define chunk_at_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
429 Dealing with use bits
432 /* extract p's inuse bit */
435 ((((mchunkptr)(((char*)(p))+((p)->size & ~PREV_INUSE)))->size) & PREV_INUSE)
437 /* extract inuse bit of previous chunk */
439 #define prev_inuse(p) ((p)->size & PREV_INUSE)
441 /* check for mmap()'ed chunk */
443 #define chunk_is_mmapped(p) ((p)->size & IS_MMAPPED)
445 /* set/clear chunk as in use without otherwise disturbing */
447 #define set_inuse(p)\
448 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size |= PREV_INUSE
450 #define clear_inuse(p)\
451 ((mchunkptr)(((char*)(p)) + ((p)->size & ~PREV_INUSE)))->size &= ~(PREV_INUSE)
453 /* check/set/clear inuse bits in known places */
455 #define inuse_bit_at_offset(p, s)\
456 (((mchunkptr)(((char*)(p)) + (s)))->size & PREV_INUSE)
458 #define set_inuse_bit_at_offset(p, s)\
459 (((mchunkptr)(((char*)(p)) + (s)))->size |= PREV_INUSE)
461 #define clear_inuse_bit_at_offset(p, s)\
462 (((mchunkptr)(((char*)(p)) + (s)))->size &= ~(PREV_INUSE))
468 Dealing with size fields
471 /* Get size, ignoring use bits */
473 #define chunksize(p) ((p)->size & ~(SIZE_BITS))
475 /* Set size at head, without disturbing its use bit */
477 #define set_head_size(p, s) ((p)->size = (((p)->size & PREV_INUSE) | (s)))
479 /* Set size/use ignoring previous bits in header */
481 #define set_head(p, s) ((p)->size = (s))
483 /* Set size at footer (only when chunk is not in use) */
485 #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_size = (s))
494 The bins, `av_' are an array of pairs of pointers serving as the
495 heads of (initially empty) doubly-linked lists of chunks, laid out
496 in a way so that each pair can be treated as if it were in a
497 malloc_chunk. (This way, the fd/bk offsets for linking bin heads
498 and chunks are the same).
500 Bins for sizes < 512 bytes contain chunks of all the same size, spaced
501 8 bytes apart. Larger bins are approximately logarithmically
502 spaced. (See the table below.) The `av_' array is never mentioned
503 directly in the code, but instead via bin access macros.
512 2 bins of size 262144
513 1 bin of size what's left
515 There is actually a little bit of slop in the numbers in bin_index
516 for the sake of speed. This makes no difference elsewhere.
518 The special chunks `top' and `last_remainder' get their own bins,
519 (this is implemented via yet more trickery with the av_ array),
520 although `top' is never properly linked to its bin since it is
521 always handled specially.
525 #define NAV 128 /* number of bins */
527 typedef struct malloc_chunk* mbinptr;
531 #define bin_at(i) ((mbinptr)((char*)&(av_[2*(i) + 2]) - 2*SIZE_SZ))
532 #define next_bin(b) ((mbinptr)((char*)(b) + 2 * sizeof(mbinptr)))
533 #define prev_bin(b) ((mbinptr)((char*)(b) - 2 * sizeof(mbinptr)))
536 The first 2 bins are never indexed. The corresponding av_ cells are instead
537 used for bookkeeping. This is not to save space, but to simplify
538 indexing, maintain locality, and avoid some initialization tests.
541 #define top (av_[2]) /* The topmost chunk */
542 #define last_remainder (bin_at(1)) /* remainder from last split */
546 Because top initially points to its own bin with initial
547 zero size, thus forcing extension on the first malloc request,
548 we avoid having any special code in malloc to check whether
549 it even exists yet. But we still need to in malloc_extend_top.
552 #define initial_top ((mchunkptr)(bin_at(0)))
554 /* Helper macro to initialize bins */
556 #define IAV(i) bin_at(i), bin_at(i)
558 static mbinptr av_[NAV * 2 + 2] = {
560 IAV(0), IAV(1), IAV(2), IAV(3), IAV(4), IAV(5), IAV(6), IAV(7),
561 IAV(8), IAV(9), IAV(10), IAV(11), IAV(12), IAV(13), IAV(14), IAV(15),
562 IAV(16), IAV(17), IAV(18), IAV(19), IAV(20), IAV(21), IAV(22), IAV(23),
563 IAV(24), IAV(25), IAV(26), IAV(27), IAV(28), IAV(29), IAV(30), IAV(31),
564 IAV(32), IAV(33), IAV(34), IAV(35), IAV(36), IAV(37), IAV(38), IAV(39),
565 IAV(40), IAV(41), IAV(42), IAV(43), IAV(44), IAV(45), IAV(46), IAV(47),
566 IAV(48), IAV(49), IAV(50), IAV(51), IAV(52), IAV(53), IAV(54), IAV(55),
567 IAV(56), IAV(57), IAV(58), IAV(59), IAV(60), IAV(61), IAV(62), IAV(63),
568 IAV(64), IAV(65), IAV(66), IAV(67), IAV(68), IAV(69), IAV(70), IAV(71),
569 IAV(72), IAV(73), IAV(74), IAV(75), IAV(76), IAV(77), IAV(78), IAV(79),
570 IAV(80), IAV(81), IAV(82), IAV(83), IAV(84), IAV(85), IAV(86), IAV(87),
571 IAV(88), IAV(89), IAV(90), IAV(91), IAV(92), IAV(93), IAV(94), IAV(95),
572 IAV(96), IAV(97), IAV(98), IAV(99), IAV(100), IAV(101), IAV(102), IAV(103),
573 IAV(104), IAV(105), IAV(106), IAV(107), IAV(108), IAV(109), IAV(110), IAV(111),
574 IAV(112), IAV(113), IAV(114), IAV(115), IAV(116), IAV(117), IAV(118), IAV(119),
575 IAV(120), IAV(121), IAV(122), IAV(123), IAV(124), IAV(125), IAV(126), IAV(127)
578 #ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
579 static void malloc_init(void);
582 ulong mem_malloc_start = 0;
583 ulong mem_malloc_end = 0;
584 ulong mem_malloc_brk = 0;
586 static bool malloc_testing; /* enable test mode */
587 static int malloc_max_allocs; /* return NULL after this many calls to malloc() */
589 void *sbrk(ptrdiff_t increment)
591 ulong old = mem_malloc_brk;
592 ulong new = old + increment;
595 * if we are giving memory back make sure we clear it out since
596 * we set MORECORE_CLEARS to 1
599 memset((void *)new, 0, -increment);
601 if ((new < mem_malloc_start) || (new > mem_malloc_end))
602 return (void *)MORECORE_FAILURE;
604 mem_malloc_brk = new;
609 void mem_malloc_init(ulong start, ulong size)
611 mem_malloc_start = start;
612 mem_malloc_end = start + size;
613 mem_malloc_brk = start;
615 #ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
619 debug("using memory %#lx-%#lx for malloc()\n", mem_malloc_start,
621 #if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
622 memset((void *)mem_malloc_start, 0x0, size);
626 /* field-extraction macros */
628 #define first(b) ((b)->fd)
629 #define last(b) ((b)->bk)
635 #define bin_index(sz) \
636 (((((unsigned long)(sz)) >> 9) == 0) ? (((unsigned long)(sz)) >> 3): \
637 ((((unsigned long)(sz)) >> 9) <= 4) ? 56 + (((unsigned long)(sz)) >> 6): \
638 ((((unsigned long)(sz)) >> 9) <= 20) ? 91 + (((unsigned long)(sz)) >> 9): \
639 ((((unsigned long)(sz)) >> 9) <= 84) ? 110 + (((unsigned long)(sz)) >> 12): \
640 ((((unsigned long)(sz)) >> 9) <= 340) ? 119 + (((unsigned long)(sz)) >> 15): \
641 ((((unsigned long)(sz)) >> 9) <= 1364) ? 124 + (((unsigned long)(sz)) >> 18): \
644 bins for chunks < 512 are all spaced 8 bytes apart, and hold
645 identically sized chunks. This is exploited in malloc.
648 #define MAX_SMALLBIN 63
649 #define MAX_SMALLBIN_SIZE 512
650 #define SMALLBIN_WIDTH 8
652 #define smallbin_index(sz) (((unsigned long)(sz)) >> 3)
655 Requests are `small' if both the corresponding and the next bin are small
658 #define is_small_request(nb) (nb < MAX_SMALLBIN_SIZE - SMALLBIN_WIDTH)
663 To help compensate for the large number of bins, a one-level index
664 structure is used for bin-by-bin searching. `binblocks' is a
665 one-word bitvector recording whether groups of BINBLOCKWIDTH bins
666 have any (possibly) non-empty bins, so they can be skipped over
667 all at once during during traversals. The bits are NOT always
668 cleared as soon as all bins in a block are empty, but instead only
669 when all are noticed to be empty during traversal in malloc.
672 #define BINBLOCKWIDTH 4 /* bins per block */
674 #define binblocks_r ((INTERNAL_SIZE_T)av_[1]) /* bitvector of nonempty blocks */
675 #define binblocks_w (av_[1])
677 /* bin<->block macros */
679 #define idx2binblock(ix) ((unsigned)1 << (ix / BINBLOCKWIDTH))
680 #define mark_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r | idx2binblock(ii)))
681 #define clear_binblock(ii) (binblocks_w = (mbinptr)(binblocks_r & ~(idx2binblock(ii))))
687 /* Other static bookkeeping data */
689 /* variables holding tunable values */
691 static unsigned long trim_threshold = DEFAULT_TRIM_THRESHOLD;
692 static unsigned long top_pad = DEFAULT_TOP_PAD;
693 static unsigned int n_mmaps_max = DEFAULT_MMAP_MAX;
694 static unsigned long mmap_threshold = DEFAULT_MMAP_THRESHOLD;
696 /* The first value returned from sbrk */
697 static char* sbrk_base = (char*)(-1);
699 /* The maximum memory obtained from system via sbrk */
700 static unsigned long max_sbrked_mem = 0;
702 /* The maximum via either sbrk or mmap */
703 static unsigned long max_total_mem = 0;
705 /* internal working copy of mallinfo */
706 static struct mallinfo current_mallinfo = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
708 /* The total memory obtained from system via sbrk */
709 #define sbrked_mem (current_mallinfo.arena)
714 static unsigned int n_mmaps = 0;
716 static unsigned long mmapped_mem = 0;
718 static unsigned int max_n_mmaps = 0;
719 static unsigned long max_mmapped_mem = 0;
722 #ifdef CONFIG_SYS_MALLOC_DEFAULT_TO_INIT
723 static void malloc_init(void)
727 debug("bins (av_ array) are at %p\n", (void *)av_);
729 av_[0] = NULL; av_[1] = NULL;
730 for (i = 2, j = 2; i < NAV * 2 + 2; i += 2, j++) {
731 av_[i] = bin_at(j - 2);
732 av_[i + 1] = bin_at(j - 2);
734 /* Just print the first few bins so that
735 * we can see there are alright.
738 debug("av_[%d]=%lx av_[%d]=%lx\n",
740 i + 1, (ulong)av_[i + 1]);
743 /* Init the static bookkeeping as well */
744 sbrk_base = (char *)(-1);
748 memset((void *)¤t_mallinfo, 0, sizeof(struct mallinfo));
761 These routines make a number of assertions about the states
762 of data structures that should be true at all times. If any
763 are not true, it's very likely that a user program has somehow
764 trashed memory. (It's also possible that there is a coding error
765 in malloc. In which case, please report it!)
769 static void do_check_chunk(mchunkptr p)
771 static void do_check_chunk(p) mchunkptr p;
774 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
776 /* No checkable chunk is mmapped */
777 assert(!chunk_is_mmapped(p));
779 /* Check for legal address ... */
780 assert((char*)p >= sbrk_base);
782 assert((char*)p + sz <= (char*)top);
784 assert((char*)p + sz <= sbrk_base + sbrked_mem);
790 static void do_check_free_chunk(mchunkptr p)
792 static void do_check_free_chunk(p) mchunkptr p;
795 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
796 mchunkptr next = chunk_at_offset(p, sz);
800 /* Check whether it claims to be free ... */
803 /* Unless a special marker, must have OK fields */
804 if ((long)sz >= (long)MINSIZE)
806 assert((sz & MALLOC_ALIGN_MASK) == 0);
807 assert(aligned_OK(chunk2mem(p)));
808 /* ... matching footer field */
809 assert(next->prev_size == sz);
810 /* ... and is fully consolidated */
811 assert(prev_inuse(p));
812 assert (next == top || inuse(next));
814 /* ... and has minimally sane links */
815 assert(p->fd->bk == p);
816 assert(p->bk->fd == p);
818 else /* markers are always of size SIZE_SZ */
819 assert(sz == SIZE_SZ);
823 static void do_check_inuse_chunk(mchunkptr p)
825 static void do_check_inuse_chunk(p) mchunkptr p;
828 mchunkptr next = next_chunk(p);
831 /* Check whether it claims to be in use ... */
834 /* ... and is surrounded by OK chunks.
835 Since more things can be checked with free chunks than inuse ones,
836 if an inuse chunk borders them and debug is on, it's worth doing them.
840 mchunkptr prv = prev_chunk(p);
841 assert(next_chunk(prv) == p);
842 do_check_free_chunk(prv);
846 assert(prev_inuse(next));
847 assert(chunksize(next) >= MINSIZE);
849 else if (!inuse(next))
850 do_check_free_chunk(next);
855 static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
857 static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
860 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
863 do_check_inuse_chunk(p);
866 assert((long)sz >= (long)MINSIZE);
867 assert((sz & MALLOC_ALIGN_MASK) == 0);
869 assert(room < (long)MINSIZE);
871 /* ... and alignment */
872 assert(aligned_OK(chunk2mem(p)));
875 /* ... and was allocated at front of an available chunk */
876 assert(prev_inuse(p));
881 #define check_free_chunk(P) do_check_free_chunk(P)
882 #define check_inuse_chunk(P) do_check_inuse_chunk(P)
883 #define check_chunk(P) do_check_chunk(P)
884 #define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
886 #define check_free_chunk(P)
887 #define check_inuse_chunk(P)
888 #define check_chunk(P)
889 #define check_malloced_chunk(P,N)
895 Macro-based internal utilities
900 Linking chunks in bin lists.
901 Call these only with variables, not arbitrary expressions, as arguments.
905 Place chunk p of size s in its bin, in size order,
906 putting it ahead of others of same size.
910 #define frontlink(P, S, IDX, BK, FD) \
912 if (S < MAX_SMALLBIN_SIZE) \
914 IDX = smallbin_index(S); \
915 mark_binblock(IDX); \
920 FD->bk = BK->fd = P; \
924 IDX = bin_index(S); \
927 if (FD == BK) mark_binblock(IDX); \
930 while (FD != BK && S < chunksize(FD)) FD = FD->fd; \
935 FD->bk = BK->fd = P; \
940 /* take a chunk off a list */
942 #define unlink(P, BK, FD) \
950 /* Place p as the last remainder */
952 #define link_last_remainder(P) \
954 last_remainder->fd = last_remainder->bk = P; \
955 P->fd = P->bk = last_remainder; \
958 /* Clear the last_remainder bin */
960 #define clear_last_remainder \
961 (last_remainder->fd = last_remainder->bk = last_remainder)
967 /* Routines dealing with mmap(). */
972 static mchunkptr mmap_chunk(size_t size)
974 static mchunkptr mmap_chunk(size) size_t size;
977 size_t page_mask = malloc_getpagesize - 1;
980 #ifndef MAP_ANONYMOUS
984 if(n_mmaps >= n_mmaps_max) return 0; /* too many regions */
986 /* For mmapped chunks, the overhead is one SIZE_SZ unit larger, because
987 * there is no following chunk whose prev_size field could be used.
989 size = (size + SIZE_SZ + page_mask) & ~page_mask;
992 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE,
993 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
994 #else /* !MAP_ANONYMOUS */
997 fd = open("/dev/zero", O_RDWR);
1000 p = (mchunkptr)mmap(0, size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0);
1003 if(p == (mchunkptr)-1) return 0;
1006 if (n_mmaps > max_n_mmaps) max_n_mmaps = n_mmaps;
1008 /* We demand that eight bytes into a page must be 8-byte aligned. */
1009 assert(aligned_OK(chunk2mem(p)));
1011 /* The offset to the start of the mmapped region is stored
1012 * in the prev_size field of the chunk; normally it is zero,
1013 * but that can be changed in memalign().
1016 set_head(p, size|IS_MMAPPED);
1018 mmapped_mem += size;
1019 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1020 max_mmapped_mem = mmapped_mem;
1021 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1022 max_total_mem = mmapped_mem + sbrked_mem;
1027 static void munmap_chunk(mchunkptr p)
1029 static void munmap_chunk(p) mchunkptr p;
1032 INTERNAL_SIZE_T size = chunksize(p);
1035 assert (chunk_is_mmapped(p));
1036 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1037 assert((n_mmaps > 0));
1038 assert(((p->prev_size + size) & (malloc_getpagesize-1)) == 0);
1041 mmapped_mem -= (size + p->prev_size);
1043 ret = munmap((char *)p - p->prev_size, size + p->prev_size);
1045 /* munmap returns non-zero on failure */
1052 static mchunkptr mremap_chunk(mchunkptr p, size_t new_size)
1054 static mchunkptr mremap_chunk(p, new_size) mchunkptr p; size_t new_size;
1057 size_t page_mask = malloc_getpagesize - 1;
1058 INTERNAL_SIZE_T offset = p->prev_size;
1059 INTERNAL_SIZE_T size = chunksize(p);
1062 assert (chunk_is_mmapped(p));
1063 assert(! ((char*)p >= sbrk_base && (char*)p < sbrk_base + sbrked_mem));
1064 assert((n_mmaps > 0));
1065 assert(((size + offset) & (malloc_getpagesize-1)) == 0);
1067 /* Note the extra SIZE_SZ overhead as in mmap_chunk(). */
1068 new_size = (new_size + offset + SIZE_SZ + page_mask) & ~page_mask;
1070 cp = (char *)mremap((char *)p - offset, size + offset, new_size, 1);
1072 if (cp == (char *)-1) return 0;
1074 p = (mchunkptr)(cp + offset);
1076 assert(aligned_OK(chunk2mem(p)));
1078 assert((p->prev_size == offset));
1079 set_head(p, (new_size - offset)|IS_MMAPPED);
1081 mmapped_mem -= size + offset;
1082 mmapped_mem += new_size;
1083 if ((unsigned long)mmapped_mem > (unsigned long)max_mmapped_mem)
1084 max_mmapped_mem = mmapped_mem;
1085 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1086 max_total_mem = mmapped_mem + sbrked_mem;
1090 #endif /* HAVE_MREMAP */
1092 #endif /* HAVE_MMAP */
1095 Extend the top-most chunk by obtaining memory from system.
1096 Main interface to sbrk (but see also malloc_trim).
1100 static void malloc_extend_top(INTERNAL_SIZE_T nb)
1102 static void malloc_extend_top(nb) INTERNAL_SIZE_T nb;
1105 char* brk; /* return value from sbrk */
1106 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of sbrked space */
1107 INTERNAL_SIZE_T correction; /* bytes for 2nd sbrk call */
1108 char* new_brk; /* return of 2nd sbrk call */
1109 INTERNAL_SIZE_T top_size; /* new size of top chunk */
1111 mchunkptr old_top = top; /* Record state of old top */
1112 INTERNAL_SIZE_T old_top_size = chunksize(old_top);
1113 char* old_end = (char*)(chunk_at_offset(old_top, old_top_size));
1115 /* Pad request with top_pad plus minimal overhead */
1117 INTERNAL_SIZE_T sbrk_size = nb + top_pad + MINSIZE;
1118 unsigned long pagesz = malloc_getpagesize;
1120 /* If not the first time through, round to preserve page boundary */
1121 /* Otherwise, we need to correct to a page size below anyway. */
1122 /* (We also correct below if an intervening foreign sbrk call.) */
1124 if (sbrk_base != (char*)(-1))
1125 sbrk_size = (sbrk_size + (pagesz - 1)) & ~(pagesz - 1);
1127 brk = (char*)(MORECORE (sbrk_size));
1129 /* Fail if sbrk failed or if a foreign sbrk call killed our space */
1130 if (brk == (char*)(MORECORE_FAILURE) ||
1131 (brk < old_end && old_top != initial_top))
1134 sbrked_mem += sbrk_size;
1136 if (brk == old_end) /* can just add bytes to current top */
1138 top_size = sbrk_size + old_top_size;
1139 set_head(top, top_size | PREV_INUSE);
1143 if (sbrk_base == (char*)(-1)) /* First time through. Record base */
1145 else /* Someone else called sbrk(). Count those bytes as sbrked_mem. */
1146 sbrked_mem += brk - (char*)old_end;
1148 /* Guarantee alignment of first new chunk made from this space */
1149 front_misalign = (unsigned long)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1150 if (front_misalign > 0)
1152 correction = (MALLOC_ALIGNMENT) - front_misalign;
1158 /* Guarantee the next brk will be at a page boundary */
1160 correction += ((((unsigned long)(brk + sbrk_size))+(pagesz-1)) &
1161 ~(pagesz - 1)) - ((unsigned long)(brk + sbrk_size));
1163 /* Allocate correction */
1164 new_brk = (char*)(MORECORE (correction));
1165 if (new_brk == (char*)(MORECORE_FAILURE)) return;
1167 sbrked_mem += correction;
1169 top = (mchunkptr)brk;
1170 top_size = new_brk - brk + correction;
1171 set_head(top, top_size | PREV_INUSE);
1173 if (old_top != initial_top)
1176 /* There must have been an intervening foreign sbrk call. */
1177 /* A double fencepost is necessary to prevent consolidation */
1179 /* If not enough space to do this, then user did something very wrong */
1180 if (old_top_size < MINSIZE)
1182 set_head(top, PREV_INUSE); /* will force null return from malloc */
1186 /* Also keep size a multiple of MALLOC_ALIGNMENT */
1187 old_top_size = (old_top_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1188 set_head_size(old_top, old_top_size);
1189 chunk_at_offset(old_top, old_top_size )->size =
1191 chunk_at_offset(old_top, old_top_size + SIZE_SZ)->size =
1193 /* If possible, release the rest. */
1194 if (old_top_size >= MINSIZE)
1195 fREe(chunk2mem(old_top));
1199 if ((unsigned long)sbrked_mem > (unsigned long)max_sbrked_mem)
1200 max_sbrked_mem = sbrked_mem;
1201 if ((unsigned long)(mmapped_mem + sbrked_mem) > (unsigned long)max_total_mem)
1202 max_total_mem = mmapped_mem + sbrked_mem;
1204 /* We always land on a page boundary */
1205 assert(((unsigned long)((char*)top + top_size) & (pagesz - 1)) == 0);
1211 /* Main public routines */
1217 The requested size is first converted into a usable form, `nb'.
1218 This currently means to add 4 bytes overhead plus possibly more to
1219 obtain 8-byte alignment and/or to obtain a size of at least
1220 MINSIZE (currently 16 bytes), the smallest allocatable size.
1221 (All fits are considered `exact' if they are within MINSIZE bytes.)
1223 From there, the first successful of the following steps is taken:
1225 1. The bin corresponding to the request size is scanned, and if
1226 a chunk of exactly the right size is found, it is taken.
1228 2. The most recently remaindered chunk is used if it is big
1229 enough. This is a form of (roving) first fit, used only in
1230 the absence of exact fits. Runs of consecutive requests use
1231 the remainder of the chunk used for the previous such request
1232 whenever possible. This limited use of a first-fit style
1233 allocation strategy tends to give contiguous chunks
1234 coextensive lifetimes, which improves locality and can reduce
1235 fragmentation in the long run.
1237 3. Other bins are scanned in increasing size order, using a
1238 chunk big enough to fulfill the request, and splitting off
1239 any remainder. This search is strictly by best-fit; i.e.,
1240 the smallest (with ties going to approximately the least
1241 recently used) chunk that fits is selected.
1243 4. If large enough, the chunk bordering the end of memory
1244 (`top') is split off. (This use of `top' is in accord with
1245 the best-fit search rule. In effect, `top' is treated as
1246 larger (and thus less well fitting) than any other available
1247 chunk since it can be extended to be as large as necessary
1248 (up to system limitations).
1250 5. If the request size meets the mmap threshold and the
1251 system supports mmap, and there are few enough currently
1252 allocated mmapped regions, and a call to mmap succeeds,
1253 the request is allocated via direct memory mapping.
1255 6. Otherwise, the top of memory is extended by
1256 obtaining more space from the system (normally using sbrk,
1257 but definable to anything else via the MORECORE macro).
1258 Memory is gathered from the system (in system page-sized
1259 units) in a way that allows chunks obtained across different
1260 sbrk calls to be consolidated, but does not require
1261 contiguous memory. Thus, it should be safe to intersperse
1262 mallocs with other sbrk calls.
1265 All allocations are made from the the `lowest' part of any found
1266 chunk. (The implementation invariant is that prev_inuse is
1267 always true of any allocated chunk; i.e., that each allocated
1268 chunk borders either a previously allocated and still in-use chunk,
1269 or the base of its memory arena.)
1274 Void_t* mALLOc(size_t bytes)
1276 Void_t* mALLOc(bytes) size_t bytes;
1279 mchunkptr victim; /* inspected/selected chunk */
1280 INTERNAL_SIZE_T victim_size; /* its size */
1281 int idx; /* index for bin traversal */
1282 mbinptr bin; /* associated bin */
1283 mchunkptr remainder; /* remainder from a split */
1284 long remainder_size; /* its size */
1285 int remainder_index; /* its bin index */
1286 unsigned long block; /* block traverser bit */
1287 int startidx; /* first bin of a traversed block */
1288 mchunkptr fwd; /* misc temp for linking */
1289 mchunkptr bck; /* misc temp for linking */
1290 mbinptr q; /* misc temp */
1294 #if CONFIG_IS_ENABLED(SYS_MALLOC_F)
1295 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT))
1296 return malloc_simple(bytes);
1299 if (CONFIG_IS_ENABLED(UNIT_TEST) && malloc_testing) {
1300 if (--malloc_max_allocs < 0)
1304 /* check if mem_malloc_init() was run */
1305 if ((mem_malloc_start == 0) && (mem_malloc_end == 0)) {
1306 /* not initialized yet */
1310 if ((long)bytes < 0) return NULL;
1312 nb = request2size(bytes); /* padded request size; */
1314 /* Check for exact match in a bin */
1316 if (is_small_request(nb)) /* Faster version for small requests */
1318 idx = smallbin_index(nb);
1320 /* No traversal or size check necessary for small bins. */
1325 /* Also scan the next one, since it would have a remainder < MINSIZE */
1333 victim_size = chunksize(victim);
1334 unlink(victim, bck, fwd);
1335 set_inuse_bit_at_offset(victim, victim_size);
1336 check_malloced_chunk(victim, nb);
1337 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1338 return chunk2mem(victim);
1341 idx += 2; /* Set for bin scan below. We've already scanned 2 bins. */
1346 idx = bin_index(nb);
1349 for (victim = last(bin); victim != bin; victim = victim->bk)
1351 victim_size = chunksize(victim);
1352 remainder_size = victim_size - nb;
1354 if (remainder_size >= (long)MINSIZE) /* too big */
1356 --idx; /* adjust to rescan below after checking last remainder */
1360 else if (remainder_size >= 0) /* exact fit */
1362 unlink(victim, bck, fwd);
1363 set_inuse_bit_at_offset(victim, victim_size);
1364 check_malloced_chunk(victim, nb);
1365 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1366 return chunk2mem(victim);
1374 /* Try to use the last split-off remainder */
1376 if ( (victim = last_remainder->fd) != last_remainder)
1378 victim_size = chunksize(victim);
1379 remainder_size = victim_size - nb;
1381 if (remainder_size >= (long)MINSIZE) /* re-split */
1383 remainder = chunk_at_offset(victim, nb);
1384 set_head(victim, nb | PREV_INUSE);
1385 link_last_remainder(remainder);
1386 set_head(remainder, remainder_size | PREV_INUSE);
1387 set_foot(remainder, remainder_size);
1388 check_malloced_chunk(victim, nb);
1389 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1390 return chunk2mem(victim);
1393 clear_last_remainder;
1395 if (remainder_size >= 0) /* exhaust */
1397 set_inuse_bit_at_offset(victim, victim_size);
1398 check_malloced_chunk(victim, nb);
1399 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1400 return chunk2mem(victim);
1403 /* Else place in bin */
1405 frontlink(victim, victim_size, remainder_index, bck, fwd);
1409 If there are any possibly nonempty big-enough blocks,
1410 search for best fitting chunk by scanning bins in blockwidth units.
1413 if ( (block = idx2binblock(idx)) <= binblocks_r)
1416 /* Get to the first marked block */
1418 if ( (block & binblocks_r) == 0)
1420 /* force to an even block boundary */
1421 idx = (idx & ~(BINBLOCKWIDTH - 1)) + BINBLOCKWIDTH;
1423 while ((block & binblocks_r) == 0)
1425 idx += BINBLOCKWIDTH;
1430 /* For each possibly nonempty block ... */
1433 startidx = idx; /* (track incomplete blocks) */
1434 q = bin = bin_at(idx);
1436 /* For each bin in this block ... */
1439 /* Find and use first big enough chunk ... */
1441 for (victim = last(bin); victim != bin; victim = victim->bk)
1443 victim_size = chunksize(victim);
1444 remainder_size = victim_size - nb;
1446 if (remainder_size >= (long)MINSIZE) /* split */
1448 remainder = chunk_at_offset(victim, nb);
1449 set_head(victim, nb | PREV_INUSE);
1450 unlink(victim, bck, fwd);
1451 link_last_remainder(remainder);
1452 set_head(remainder, remainder_size | PREV_INUSE);
1453 set_foot(remainder, remainder_size);
1454 check_malloced_chunk(victim, nb);
1455 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1456 return chunk2mem(victim);
1459 else if (remainder_size >= 0) /* take */
1461 set_inuse_bit_at_offset(victim, victim_size);
1462 unlink(victim, bck, fwd);
1463 check_malloced_chunk(victim, nb);
1464 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1465 return chunk2mem(victim);
1470 bin = next_bin(bin);
1472 } while ((++idx & (BINBLOCKWIDTH - 1)) != 0);
1474 /* Clear out the block bit. */
1476 do /* Possibly backtrack to try to clear a partial block */
1478 if ((startidx & (BINBLOCKWIDTH - 1)) == 0)
1480 av_[1] = (mbinptr)(binblocks_r & ~block);
1485 } while (first(q) == q);
1487 /* Get to the next possibly nonempty block */
1489 if ( (block <<= 1) <= binblocks_r && (block != 0) )
1491 while ((block & binblocks_r) == 0)
1493 idx += BINBLOCKWIDTH;
1503 /* Try to use top chunk */
1505 /* Require that there be a remainder, ensuring top always exists */
1506 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1510 /* If big and would otherwise need to extend, try to use mmap instead */
1511 if ((unsigned long)nb >= (unsigned long)mmap_threshold &&
1512 (victim = mmap_chunk(nb)))
1513 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1514 return chunk2mem(victim);
1518 malloc_extend_top(nb);
1519 if ( (remainder_size = chunksize(top) - nb) < (long)MINSIZE)
1520 return NULL; /* propagate failure */
1524 set_head(victim, nb | PREV_INUSE);
1525 top = chunk_at_offset(victim, nb);
1526 set_head(top, remainder_size | PREV_INUSE);
1527 check_malloced_chunk(victim, nb);
1528 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(victim), bytes, SIZE_SZ, false);
1529 return chunk2mem(victim);
1542 1. free(0) has no effect.
1544 2. If the chunk was allocated via mmap, it is release via munmap().
1546 3. If a returned chunk borders the current high end of memory,
1547 it is consolidated into the top, and if the total unused
1548 topmost memory exceeds the trim threshold, malloc_trim is
1551 4. Other chunks are consolidated as they arrive, and
1552 placed in corresponding bins. (This includes the case of
1553 consolidating with the current `last_remainder').
1559 void fREe(Void_t* mem)
1561 void fREe(mem) Void_t* mem;
1564 mchunkptr p; /* chunk corresponding to mem */
1565 INTERNAL_SIZE_T hd; /* its head field */
1566 INTERNAL_SIZE_T sz; /* its size */
1567 int idx; /* its bin index */
1568 mchunkptr next; /* next contiguous chunk */
1569 INTERNAL_SIZE_T nextsz; /* its size */
1570 INTERNAL_SIZE_T prevsz; /* size of previous contiguous chunk */
1571 mchunkptr bck; /* misc temp for linking */
1572 mchunkptr fwd; /* misc temp for linking */
1573 int islr; /* track whether merging with last_remainder */
1575 #if CONFIG_IS_ENABLED(SYS_MALLOC_F)
1576 /* free() is a no-op - all the memory will be freed on relocation */
1577 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1578 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
1583 if (mem == NULL) /* free(0) has no effect */
1590 if (hd & IS_MMAPPED) /* release mmapped memory. */
1597 check_inuse_chunk(p);
1599 sz = hd & ~PREV_INUSE;
1600 next = chunk_at_offset(p, sz);
1601 nextsz = chunksize(next);
1602 VALGRIND_FREELIKE_BLOCK(mem, SIZE_SZ);
1604 if (next == top) /* merge with top */
1608 if (!(hd & PREV_INUSE)) /* consolidate backward */
1610 prevsz = p->prev_size;
1611 p = chunk_at_offset(p, -((long) prevsz));
1613 unlink(p, bck, fwd);
1616 set_head(p, sz | PREV_INUSE);
1618 if ((unsigned long)(sz) >= (unsigned long)trim_threshold)
1619 malloc_trim(top_pad);
1623 set_head(next, nextsz); /* clear inuse bit */
1627 if (!(hd & PREV_INUSE)) /* consolidate backward */
1629 prevsz = p->prev_size;
1630 p = chunk_at_offset(p, -((long) prevsz));
1633 if (p->fd == last_remainder) /* keep as last_remainder */
1636 unlink(p, bck, fwd);
1639 if (!(inuse_bit_at_offset(next, nextsz))) /* consolidate forward */
1643 if (!islr && next->fd == last_remainder) /* re-insert last_remainder */
1646 link_last_remainder(p);
1649 unlink(next, bck, fwd);
1653 set_head(p, sz | PREV_INUSE);
1656 frontlink(p, sz, idx, bck, fwd);
1667 Chunks that were obtained via mmap cannot be extended or shrunk
1668 unless HAVE_MREMAP is defined, in which case mremap is used.
1669 Otherwise, if their reallocation is for additional space, they are
1670 copied. If for less, they are just left alone.
1672 Otherwise, if the reallocation is for additional space, and the
1673 chunk can be extended, it is, else a malloc-copy-free sequence is
1674 taken. There are several different ways that a chunk could be
1675 extended. All are tried:
1677 * Extending forward into following adjacent free chunk.
1678 * Shifting backwards, joining preceding adjacent space
1679 * Both shifting backwards and extending forward.
1680 * Extending into newly sbrked space
1682 Unless the #define REALLOC_ZERO_BYTES_FREES is set, realloc with a
1683 size argument of zero (re)allocates a minimum-sized chunk.
1685 If the reallocation is for less space, and the new request is for
1686 a `small' (<512 bytes) size, then the newly unused space is lopped
1689 The old unix realloc convention of allowing the last-free'd chunk
1690 to be used as an argument to realloc is no longer supported.
1691 I don't know of any programs still relying on this feature,
1692 and allowing it would also allow too many other incorrect
1693 usages of realloc to be sensible.
1700 Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1702 Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1705 INTERNAL_SIZE_T nb; /* padded request size */
1707 mchunkptr oldp; /* chunk corresponding to oldmem */
1708 INTERNAL_SIZE_T oldsize; /* its size */
1710 mchunkptr newp; /* chunk to return */
1711 INTERNAL_SIZE_T newsize; /* its size */
1712 Void_t* newmem; /* corresponding user mem */
1714 mchunkptr next; /* next contiguous chunk after oldp */
1715 INTERNAL_SIZE_T nextsize; /* its size */
1717 mchunkptr prev; /* previous contiguous chunk before oldp */
1718 INTERNAL_SIZE_T prevsize; /* its size */
1720 mchunkptr remainder; /* holds split off extra space from newp */
1721 INTERNAL_SIZE_T remainder_size; /* its size */
1723 mchunkptr bck; /* misc temp for linking */
1724 mchunkptr fwd; /* misc temp for linking */
1726 #ifdef REALLOC_ZERO_BYTES_FREES
1733 if ((long)bytes < 0) return NULL;
1735 /* realloc of null is supposed to be same as malloc */
1736 if (oldmem == NULL) return mALLOc(bytes);
1738 #if CONFIG_IS_ENABLED(SYS_MALLOC_F)
1739 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1740 /* This is harder to support and should not be needed */
1741 panic("pre-reloc realloc() is not supported");
1745 newp = oldp = mem2chunk(oldmem);
1746 newsize = oldsize = chunksize(oldp);
1749 nb = request2size(bytes);
1752 if (chunk_is_mmapped(oldp))
1755 newp = mremap_chunk(oldp, nb);
1756 if(newp) return chunk2mem(newp);
1758 /* Note the extra SIZE_SZ overhead. */
1759 if(oldsize - SIZE_SZ >= nb) return oldmem; /* do nothing */
1760 /* Must alloc, copy, free. */
1761 newmem = mALLOc(bytes);
1763 return NULL; /* propagate failure */
1764 MALLOC_COPY(newmem, oldmem, oldsize - 2*SIZE_SZ);
1770 check_inuse_chunk(oldp);
1772 if ((long)(oldsize) < (long)(nb))
1775 /* Try expanding forward */
1777 next = chunk_at_offset(oldp, oldsize);
1778 if (next == top || !inuse(next))
1780 nextsize = chunksize(next);
1782 /* Forward into top only if a remainder */
1785 if ((long)(nextsize + newsize) >= (long)(nb + MINSIZE))
1787 newsize += nextsize;
1788 top = chunk_at_offset(oldp, nb);
1789 set_head(top, (newsize - nb) | PREV_INUSE);
1790 set_head_size(oldp, nb);
1791 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1792 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
1793 return chunk2mem(oldp);
1797 /* Forward into next chunk */
1798 else if (((long)(nextsize + newsize) >= (long)(nb)))
1800 unlink(next, bck, fwd);
1801 newsize += nextsize;
1802 VALGRIND_RESIZEINPLACE_BLOCK(chunk2mem(oldp), 0, bytes, SIZE_SZ);
1803 VALGRIND_MAKE_MEM_DEFINED(chunk2mem(oldp), bytes);
1813 /* Try shifting backwards. */
1815 if (!prev_inuse(oldp))
1817 prev = prev_chunk(oldp);
1818 prevsize = chunksize(prev);
1820 /* try forward + backward first to save a later consolidation */
1827 if ((long)(nextsize + prevsize + newsize) >= (long)(nb + MINSIZE))
1829 unlink(prev, bck, fwd);
1831 newsize += prevsize + nextsize;
1832 newmem = chunk2mem(newp);
1833 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
1834 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1835 top = chunk_at_offset(newp, nb);
1836 set_head(top, (newsize - nb) | PREV_INUSE);
1837 set_head_size(newp, nb);
1838 VALGRIND_FREELIKE_BLOCK(oldmem, SIZE_SZ);
1843 /* into next chunk */
1844 else if (((long)(nextsize + prevsize + newsize) >= (long)(nb)))
1846 unlink(next, bck, fwd);
1847 unlink(prev, bck, fwd);
1849 newsize += nextsize + prevsize;
1850 newmem = chunk2mem(newp);
1851 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
1852 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1858 if (prev != NULL && (long)(prevsize + newsize) >= (long)nb)
1860 unlink(prev, bck, fwd);
1862 newsize += prevsize;
1863 newmem = chunk2mem(newp);
1864 VALGRIND_MALLOCLIKE_BLOCK(newmem, bytes, SIZE_SZ, false);
1865 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1872 newmem = mALLOc (bytes);
1874 if (newmem == NULL) /* propagate failure */
1877 /* Avoid copy if newp is next chunk after oldp. */
1878 /* (This can only happen when new chunk is sbrk'ed.) */
1880 if ( (newp = mem2chunk(newmem)) == next_chunk(oldp))
1882 newsize += chunksize(newp);
1887 /* Otherwise copy, free, and exit */
1888 MALLOC_COPY(newmem, oldmem, oldsize - SIZE_SZ);
1892 VALGRIND_RESIZEINPLACE_BLOCK(oldmem, 0, bytes, SIZE_SZ);
1893 VALGRIND_MAKE_MEM_DEFINED(oldmem, bytes);
1897 split: /* split off extra room in old or expanded chunk */
1899 if (newsize - nb >= MINSIZE) /* split off remainder */
1901 remainder = chunk_at_offset(newp, nb);
1902 remainder_size = newsize - nb;
1903 set_head_size(newp, nb);
1904 set_head(remainder, remainder_size | PREV_INUSE);
1905 set_inuse_bit_at_offset(remainder, remainder_size);
1906 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
1908 fREe(chunk2mem(remainder)); /* let free() deal with it */
1912 set_head_size(newp, newsize);
1913 set_inuse_bit_at_offset(newp, newsize);
1916 check_inuse_chunk(newp);
1917 return chunk2mem(newp);
1927 memalign requests more than enough space from malloc, finds a spot
1928 within that chunk that meets the alignment request, and then
1929 possibly frees the leading and trailing space.
1931 The alignment argument must be a power of two. This property is not
1932 checked by memalign, so misuse may result in random runtime errors.
1934 8-byte alignment is guaranteed by normal malloc calls, so don't
1935 bother calling memalign with an argument of 8 or less.
1937 Overreliance on memalign is a sure way to fragment space.
1943 Void_t* mEMALIGn(size_t alignment, size_t bytes)
1945 Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
1948 INTERNAL_SIZE_T nb; /* padded request size */
1949 char* m; /* memory returned by malloc call */
1950 mchunkptr p; /* corresponding chunk */
1951 char* brk; /* alignment point within p */
1952 mchunkptr newp; /* chunk to return */
1953 INTERNAL_SIZE_T newsize; /* its size */
1954 INTERNAL_SIZE_T leadsize; /* leading space befor alignment point */
1955 mchunkptr remainder; /* spare room at end to split off */
1956 long remainder_size; /* its size */
1958 if ((long)bytes < 0) return NULL;
1960 #if CONFIG_IS_ENABLED(SYS_MALLOC_F)
1961 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
1962 return memalign_simple(alignment, bytes);
1966 /* If need less alignment than we give anyway, just relay to malloc */
1968 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
1970 /* Otherwise, ensure that it is at least a minimum chunk size */
1972 if (alignment < MINSIZE) alignment = MINSIZE;
1974 /* Call malloc with worst case padding to hit alignment. */
1976 nb = request2size(bytes);
1977 m = (char*)(mALLOc(nb + alignment + MINSIZE));
1980 * The attempt to over-allocate (with a size large enough to guarantee the
1981 * ability to find an aligned region within allocated memory) failed.
1983 * Try again, this time only allocating exactly the size the user wants. If
1984 * the allocation now succeeds and just happens to be aligned, we can still
1985 * fulfill the user's request.
1988 size_t extra, extra2;
1990 * Use bytes not nb, since mALLOc internally calls request2size too, and
1991 * each call increases the size to allocate, to account for the header.
1993 m = (char*)(mALLOc(bytes));
1994 /* Aligned -> return it */
1995 if ((((unsigned long)(m)) % alignment) == 0)
1998 * Otherwise, try again, requesting enough extra space to be able to
1999 * acquire alignment.
2002 /* Add in extra bytes to match misalignment of unexpanded allocation */
2003 extra = alignment - (((unsigned long)(m)) % alignment);
2004 m = (char*)(mALLOc(bytes + extra));
2006 * m might not be the same as before. Validate that the previous value of
2007 * extra still works for the current value of m.
2008 * If (!m), extra2=alignment so
2011 extra2 = alignment - (((unsigned long)(m)) % alignment);
2012 if (extra2 > extra) {
2017 /* Fall through to original NULL check and chunk splitting logic */
2020 if (m == NULL) return NULL; /* propagate failure */
2024 if ((((unsigned long)(m)) % alignment) == 0) /* aligned */
2027 if(chunk_is_mmapped(p))
2028 return chunk2mem(p); /* nothing more to do */
2031 else /* misaligned */
2034 Find an aligned spot inside chunk.
2035 Since we need to give back leading space in a chunk of at
2036 least MINSIZE, if the first calculation places us at
2037 a spot with less than MINSIZE leader, we can move to the
2038 next aligned spot -- we've allocated enough total room so that
2039 this is always possible.
2042 brk = (char*)mem2chunk(((unsigned long)(m + alignment - 1)) & -((signed) alignment));
2043 if ((long)(brk - (char*)(p)) < MINSIZE) brk = brk + alignment;
2045 newp = (mchunkptr)brk;
2046 leadsize = brk - (char*)(p);
2047 newsize = chunksize(p) - leadsize;
2050 if(chunk_is_mmapped(p))
2052 newp->prev_size = p->prev_size + leadsize;
2053 set_head(newp, newsize|IS_MMAPPED);
2054 return chunk2mem(newp);
2058 /* give back leader, use the rest */
2060 set_head(newp, newsize | PREV_INUSE);
2061 set_inuse_bit_at_offset(newp, newsize);
2062 set_head_size(p, leadsize);
2065 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(p), bytes, SIZE_SZ, false);
2067 assert (newsize >= nb && (((unsigned long)(chunk2mem(p))) % alignment) == 0);
2070 /* Also give back spare room at the end */
2072 remainder_size = chunksize(p) - nb;
2074 if (remainder_size >= (long)MINSIZE)
2076 remainder = chunk_at_offset(p, nb);
2077 set_head(remainder, remainder_size | PREV_INUSE);
2078 set_head_size(p, nb);
2079 VALGRIND_MALLOCLIKE_BLOCK(chunk2mem(remainder), remainder_size, SIZE_SZ,
2081 fREe(chunk2mem(remainder));
2084 check_inuse_chunk(p);
2085 return chunk2mem(p);
2093 valloc just invokes memalign with alignment argument equal
2094 to the page size of the system (or as near to this as can
2095 be figured out from all the includes/defines above.)
2099 Void_t* vALLOc(size_t bytes)
2101 Void_t* vALLOc(bytes) size_t bytes;
2104 return mEMALIGn (malloc_getpagesize, bytes);
2108 pvalloc just invokes valloc for the nearest pagesize
2109 that will accommodate request
2114 Void_t* pvALLOc(size_t bytes)
2116 Void_t* pvALLOc(bytes) size_t bytes;
2119 size_t pagesize = malloc_getpagesize;
2120 return mEMALIGn (pagesize, (bytes + pagesize - 1) & ~(pagesize - 1));
2125 calloc calls malloc, then zeroes out the allocated chunk.
2130 Void_t* cALLOc(size_t n, size_t elem_size)
2132 Void_t* cALLOc(n, elem_size) size_t n; size_t elem_size;
2136 INTERNAL_SIZE_T csz;
2138 INTERNAL_SIZE_T sz = n * elem_size;
2141 /* check if expand_top called, in which case don't need to clear */
2142 #if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
2144 mchunkptr oldtop = top;
2145 INTERNAL_SIZE_T oldtopsize = chunksize(top);
2148 Void_t* mem = mALLOc (sz);
2150 if ((long)n < 0) return NULL;
2156 #if CONFIG_IS_ENABLED(SYS_MALLOC_F)
2157 if (!(gd->flags & GD_FLG_FULL_MALLOC_INIT)) {
2164 /* Two optional cases in which clearing not necessary */
2168 if (chunk_is_mmapped(p)) return mem;
2173 #if CONFIG_IS_ENABLED(SYS_MALLOC_CLEAR_ON_INIT)
2175 if (p == oldtop && csz > oldtopsize)
2177 /* clear only the bytes from non-freshly-sbrked memory */
2183 MALLOC_ZERO(mem, csz - SIZE_SZ);
2184 VALGRIND_MAKE_MEM_DEFINED(mem, sz);
2191 cfree just calls free. It is needed/defined on some systems
2192 that pair it with calloc, presumably for odd historical reasons.
2196 #if !defined(INTERNAL_LINUX_C_LIB) || !defined(__ELF__)
2198 void cfree(Void_t *mem)
2200 void cfree(mem) Void_t *mem;
2211 Malloc_trim gives memory back to the system (via negative
2212 arguments to sbrk) if there is unused memory at the `high' end of
2213 the malloc pool. You can call this after freeing large blocks of
2214 memory to potentially reduce the system-level memory requirements
2215 of a program. However, it cannot guarantee to reduce memory. Under
2216 some allocation patterns, some large free blocks of memory will be
2217 locked between two used chunks, so they cannot be given back to
2220 The `pad' argument to malloc_trim represents the amount of free
2221 trailing space to leave untrimmed. If this argument is zero,
2222 only the minimum amount of memory to maintain internal data
2223 structures will be left (one page or less). Non-zero arguments
2224 can be supplied to maintain enough trailing space to service
2225 future expected allocations without having to re-obtain memory
2228 Malloc_trim returns 1 if it actually released any memory, else 0.
2233 int malloc_trim(size_t pad)
2235 int malloc_trim(pad) size_t pad;
2238 long top_size; /* Amount of top-most memory */
2239 long extra; /* Amount to release */
2240 char* current_brk; /* address returned by pre-check sbrk call */
2241 char* new_brk; /* address returned by negative sbrk call */
2243 unsigned long pagesz = malloc_getpagesize;
2245 top_size = chunksize(top);
2246 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
2248 if (extra < (long)pagesz) /* Not enough memory to release */
2253 /* Test to make sure no one else called sbrk */
2254 current_brk = (char*)(MORECORE (0));
2255 if (current_brk != (char*)(top) + top_size)
2256 return 0; /* Apparently we don't own memory; must fail */
2260 new_brk = (char*)(MORECORE (-extra));
2262 if (new_brk == (char*)(MORECORE_FAILURE)) /* sbrk failed? */
2264 /* Try to figure out what we have */
2265 current_brk = (char*)(MORECORE (0));
2266 top_size = current_brk - (char*)top;
2267 if (top_size >= (long)MINSIZE) /* if not, we are very very dead! */
2269 sbrked_mem = current_brk - sbrk_base;
2270 set_head(top, top_size | PREV_INUSE);
2278 /* Success. Adjust top accordingly. */
2279 set_head(top, (top_size - extra) | PREV_INUSE);
2280 sbrked_mem -= extra;
2293 This routine tells you how many bytes you can actually use in an
2294 allocated chunk, which may be more than you requested (although
2295 often not). You can use this many bytes without worrying about
2296 overwriting other allocated objects. Not a particularly great
2297 programming practice, but still sometimes useful.
2302 size_t malloc_usable_size(Void_t* mem)
2304 size_t malloc_usable_size(mem) Void_t* mem;
2313 if(!chunk_is_mmapped(p))
2315 if (!inuse(p)) return 0;
2316 check_inuse_chunk(p);
2317 return chunksize(p) - SIZE_SZ;
2319 return chunksize(p) - 2*SIZE_SZ;
2326 /* Utility to update current_mallinfo for malloc_stats and mallinfo() */
2329 static void malloc_update_mallinfo(void)
2338 INTERNAL_SIZE_T avail = chunksize(top);
2339 int navail = ((long)(avail) >= (long)MINSIZE)? 1 : 0;
2341 for (i = 1; i < NAV; ++i)
2344 for (p = last(b); p != b; p = p->bk)
2347 check_free_chunk(p);
2348 for (q = next_chunk(p);
2349 q < top && inuse(q) && (long)(chunksize(q)) >= (long)MINSIZE;
2351 check_inuse_chunk(q);
2353 avail += chunksize(p);
2358 current_mallinfo.ordblks = navail;
2359 current_mallinfo.uordblks = sbrked_mem - avail;
2360 current_mallinfo.fordblks = avail;
2361 current_mallinfo.hblks = n_mmaps;
2362 current_mallinfo.hblkhd = mmapped_mem;
2363 current_mallinfo.keepcost = chunksize(top);
2374 Prints on the amount of space obtain from the system (both
2375 via sbrk and mmap), the maximum amount (which may be more than
2376 current if malloc_trim and/or munmap got called), the maximum
2377 number of simultaneous mmap regions used, and the current number
2378 of bytes allocated via malloc (or realloc, etc) but not yet
2379 freed. (Note that this is the number of bytes allocated, not the
2380 number requested. It will be larger than the number requested
2381 because of alignment and bookkeeping overhead.)
2386 void malloc_stats(void)
2388 malloc_update_mallinfo();
2389 printf("max system bytes = %10u\n",
2390 (unsigned int)(max_total_mem));
2391 printf("system bytes = %10u\n",
2392 (unsigned int)(sbrked_mem + mmapped_mem));
2393 printf("in use bytes = %10u\n",
2394 (unsigned int)(current_mallinfo.uordblks + mmapped_mem));
2396 printf("max mmap regions = %10u\n",
2397 (unsigned int)max_n_mmaps);
2403 mallinfo returns a copy of updated current mallinfo.
2407 struct mallinfo mALLINFo(void)
2409 malloc_update_mallinfo();
2410 return current_mallinfo;
2420 mallopt is the general SVID/XPG interface to tunable parameters.
2421 The format is to provide a (parameter-number, parameter-value) pair.
2422 mallopt then sets the corresponding parameter to the argument
2423 value if it can (i.e., so long as the value is meaningful),
2424 and returns 1 if successful else 0.
2426 See descriptions of tunable parameters above.
2431 int mALLOPt(int param_number, int value)
2433 int mALLOPt(param_number, value) int param_number; int value;
2436 switch(param_number)
2438 case M_TRIM_THRESHOLD:
2439 trim_threshold = value; return 1;
2441 top_pad = value; return 1;
2442 case M_MMAP_THRESHOLD:
2443 mmap_threshold = value; return 1;
2446 n_mmaps_max = value; return 1;
2448 if (value != 0) return 0; else n_mmaps_max = value; return 1;
2456 int initf_malloc(void)
2458 #if CONFIG_IS_ENABLED(SYS_MALLOC_F)
2459 assert(gd->malloc_base); /* Set up by crt0.S */
2460 gd->malloc_limit = CONFIG_VAL(SYS_MALLOC_F_LEN);
2467 void malloc_enable_testing(int max_allocs)
2469 malloc_testing = true;
2470 malloc_max_allocs = max_allocs;
2473 void malloc_disable_testing(void)
2475 malloc_testing = false;
2482 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2483 * return null for negative arguments
2484 * Added Several WIN32 cleanups from Martin C. Fong <mcfong@yahoo.com>
2485 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2486 (e.g. WIN32 platforms)
2487 * Cleanup up header file inclusion for WIN32 platforms
2488 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2489 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2490 memory allocation routines
2491 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2492 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
2493 usage of 'assert' in non-WIN32 code
2494 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2496 * Always call 'fREe()' rather than 'free()'
2498 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2499 * Fixed ordering problem with boundary-stamping
2501 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2502 * Added pvalloc, as recommended by H.J. Liu
2503 * Added 64bit pointer support mainly from Wolfram Gloger
2504 * Added anonymously donated WIN32 sbrk emulation
2505 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2506 * malloc_extend_top: fix mask error that caused wastage after
2508 * Add linux mremap support code from HJ Liu
2510 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2511 * Integrated most documentation with the code.
2512 * Add support for mmap, with help from
2513 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2514 * Use last_remainder in more cases.
2515 * Pack bins using idea from colin@nyx10.cs.du.edu
2516 * Use ordered bins instead of best-fit threshhold
2517 * Eliminate block-local decls to simplify tracing and debugging.
2518 * Support another case of realloc via move into top
2519 * Fix error occuring when initial sbrk_base not word-aligned.
2520 * Rely on page size for units instead of SBRK_UNIT to
2521 avoid surprises about sbrk alignment conventions.
2522 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
2523 (raymond@es.ele.tue.nl) for the suggestion.
2524 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2525 * More precautions for cases where other routines call sbrk,
2526 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2527 * Added macros etc., allowing use in linux libc from
2528 H.J. Lu (hjl@gnu.ai.mit.edu)
2529 * Inverted this history list
2531 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2532 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2533 * Removed all preallocation code since under current scheme
2534 the work required to undo bad preallocations exceeds
2535 the work saved in good cases for most test programs.
2536 * No longer use return list or unconsolidated bins since
2537 no scheme using them consistently outperforms those that don't
2538 given above changes.
2539 * Use best fit for very large chunks to prevent some worst-cases.
2540 * Added some support for debugging
2542 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2543 * Removed footers when chunks are in use. Thanks to
2544 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
2546 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2547 * Added malloc_trim, with help from Wolfram Gloger
2548 (wmglo@Dent.MED.Uni-Muenchen.DE).
2550 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2552 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2553 * realloc: try to expand in both directions
2554 * malloc: swap order of clean-bin strategy;
2555 * realloc: only conditionally expand backwards
2556 * Try not to scavenge used bins
2557 * Use bin counts as a guide to preallocation
2558 * Occasionally bin return list chunks in first scan
2559 * Add a few optimizations from colin@nyx10.cs.du.edu
2561 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2562 * faster bin computation & slightly different binning
2563 * merged all consolidations to one part of malloc proper
2564 (eliminating old malloc_find_space & malloc_clean_bin)
2565 * Scan 2 returns chunks (not just 1)
2566 * Propagate failure in realloc if malloc returns 0
2567 * Add stuff to allow compilation on non-ANSI compilers
2568 from kpv@research.att.com
2570 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2571 * removed potential for odd address access in prev_chunk
2572 * removed dependency on getpagesize.h
2573 * misc cosmetics and a bit more internal documentation
2574 * anticosmetics: mangled names in macros to evade debugger strangeness
2575 * tested on sparc, hp-700, dec-mips, rs6000
2576 with gcc & native cc (hp, dec only) allowing
2577 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
2579 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2580 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
2581 structure of old version, but most details differ.)