YAP 7.1.0
dlmalloc.c
1#include "Yap.h"
2
3#if USE_DL_MALLOC
4
5#include "YapHeap.h"
6#if HAVE_STRING_H
7#include <string.h>
8#endif
9#include "alloc.h"
10#include "dlmalloc.h"
11
12static struct malloc_chunk *
13ChunkPtrAdjust (struct malloc_chunk *ptr)
14{
15 return (struct malloc_chunk *) ((char *) (ptr) + LOCAL_HDiff);
16}
17
18
19
20/*
21 This is a version (aka dlmalloc) of malloc/free/realloc written by
22 Doug Lea and released to the public domain. Use, modify, and
23 redistribute this code without permission or acknowledgement in any
24 way you wish. Send questions, comments, complaints, performance
25 data, etc to dl@cs.oswego.edu
26
27* VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
28
29 Note: There may be an updated version of this malloc obtainable at
30 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
31 Check before installing!
32
33* Quickstart
34
35 This library is all in one file to simplify the most common usage:
36 ftp it, compile it (-O), and link it into another program. All
37 of the compile-time options default to reasonable values for use on
38 most unix platforms. Compile -DWIN32 for reasonable defaults on windows.
39 You might later want to step through various compile-time and dynamic
40 tuning options.
41
42 For convenience, an include file for code using this malloc is at:
43 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.7.1.h
44 You don't really need this .h file unless you call functions not
45 defined in your system include files. The .h file contains only the
46 excerpts from this file needed for using this malloc on ANSI C/C++
47 systems, so long as you haven't changed compile-time options about
48 naming and tuning parameters. If you do, then you can create your
49 own malloc.h that does include all settings by cutting at the point
50 indicated below.
51
52* Why use this malloc?
53
54 This is not the fastest, most space-conserving, most portable, or
55 most tunable malloc ever written. However it is among the fastest
56 while also being among the most space-conserving, portable and tunable.
57 Consistent balance across these factors results in a good general-purpose
58 allocator for malloc-intensive programs.
59
60 The main properties of the algorithms are:
61 * For large (>= 512 bytes) requests, it is a pure best-fit allocator,
62 with ties normally decided via FIFO (i.e. least recently used).
63 * For small (<= 64 bytes by default) requests, it is a caching
64 allocator, that maintains pools of quickly recycled chunks.
65 * In between, and for combinations of large and small requests, it does
66 the best it can trying to meet both goals at once.
67 * For very large requests (>= 128KB by default), it relies on system
68 memory mapping facilities, if supported.
69
70 For a longer but slightly out of date high-level description, see
71 http://gee.cs.oswego.edu/dl/html/malloc.html
72
73 You may already by default be using a C library containing a malloc
74 that is based on some version of this malloc (for example in
75 linux). You might still want to use the one in this file in order to
76 customize settings or to avoid overheads associated with library
77 versions.
78
79* Contents, described in more detail in "description of public routines" below.
80
81 Standard (ANSI/SVID/...) functions:
82 malloc(size_t n);
83 calloc(size_t n_elements, size_t element_size);
84 free(Void_t* p);
85 realloc(Void_t* p, size_t n);
86 memalign(size_t alignment, size_t n);
87 valloc(size_t n);
88 mallinfo()
89 mallopt(int parameter_number, int parameter_value)
90
91 Additional functions:
92 independent_calloc(size_t n_elements, size_t size, Void_t* chunks[]);
93 independent_comalloc(size_t n_elements, size_t sizes[], Void_t* chunks[]);
94 pvalloc(size_t n);
95 cfree(Void_t* p);
96 malloc_trim(size_t pad);
97 malloc_usable_size(Void_t* p);
98 malloc_stats();
99
100* Vital statistics:
101
102 Supported pointer representation: 4 or 8 bytes
103 Supported size_t representation: 4 or 8 bytes
104 Note that size_t is allowed to be 4 bytes even if pointers are 8.
105 You can adjust this by defining INTERNAL_SIZE_T
106
107 Alignment: 2 * sizeof(size_t) (default)
108 (i.e., 8 byte alignment with 4byte size_t). This suffices for
109 nearly all current machines and C compilers. However, you can
110 define MALLOC_ALIGNMENT to be wider than this if necessary.
111
112 Minimum overhead per allocated chunk: 4 or 8 bytes
113 Each malloced chunk has a hidden word of overhead holding size
114 and status information.
115
116 Minimum allocated size: 4-byte ptrs: 16 bytes (including 4 overhead)
117 8-byte ptrs: 24/32 bytes (including, 4/8 overhead)
118
119 When a chunk is freed, 12 (for 4byte ptrs) or 20 (for 8 byte
120 ptrs but 4 byte size) or 24 (for 8/8) additional bytes are
121 needed; 4 (8) for a trailing size field and 8 (16) bytes for
122 free list pointers. Thus, the minimum allocatable size is
123 16/24/32 bytes.
124
125 Even a request for zero bytes (i.e., malloc(0)) returns a
126 pointer to something of the minimum allocatable size.
127
128 The maximum overhead wastage (i.e., number of extra bytes
129 allocated than were requested in malloc) is less than or equal
130 to the minimum size, except for requests >= mmap_threshold that
131 are serviced via mmap(), where the worst case wastage is 2 *
132 sizeof(size_t) bytes plus the remainder from a system page (the
133 minimal mmap unit); typically 4096 or 8192 bytes.
134
135 Maximum allocated size: 4-byte size_t: 2^32 minus about two pages
136 8-byte size_t: 2^64 minus about two pages
137
138 It is assumed that (possibly signed) size_t values suffice to
139 represent chunk sizes. `Possibly signed' is due to the fact
140 that `size_t' may be defined on a system as either a signed or
141 an unsigned type. The ISO C standard says that it must be
142 unsigned, but a few systems are known not to adhere to this.
143 Additionally, even when size_t is unsigned, sbrk (which is by
144 default used to obtain memory from system) accepts signed
145 arguments, and may not be able to handle size_t-wide arguments
146 with negative sign bit. Generally, values that would
147 appear as negative after accounting for overhead and alignment
148 are supported only via mmap(), which does not have this
149 limitation.
150
151 Requests for sizes outside the allowed range will perform an optional
152 failure action and then return null. (Requests may also
153 also fail because a system is out of memory.)
154
155 Thread-safety: NOT thread-safe unless USE_MALLOC_LOCK defined
156
157 When USE_MALLOC_LOCK is defined, wrappers are created to
158 surround every public call with either a pthread mutex or
159 a win32 spinlock (depending on WIN32). This is not
160 especially fast, and can be a major bottleneck.
161 It is designed only to provide minimal protection
162 in concurrent environments, and to provide a basis for
163 extensions. If you are using malloc in a concurrent program,
164 you would be far better off obtaining ptmalloc, which is
165 derived from a version of this malloc, and is well-tuned for
166 concurrent programs. (See http://www.malloc.de) Note that
167 even when USE_MALLOC_LOCK is defined, you can can guarantee
168 full thread-safety only if no threads acquire memory through
169 direct calls to MORECORE or other system-level allocators.
170
171 Compliance: I believe it is compliant with the 1997 Single Unix Specification
172 (See http://www.opennc.org). Also SVID/XPG, ANSI C, and probably
173 others as well.
174
175*/
176
177/* vsc: emulation of sbrk with YAP contiguous memory management */
178
179void
180Yap_add_memory_hole(ADDR start, ADDR end)
181{
182 if (Yap_NOfMemoryHoles == MAX_DLMALLOC_HOLES) {
183 Yap_Error(SYSTEM_ERROR_OPERATING_SYSTEM, 0L, "Unexpected Too Much Memory Fragmentation: please contact YAP maintainers");
184 return;
185 }
186 Yap_MemoryHoles[Yap_NOfMemoryHoles].start = start;
187 Yap_MemoryHoles[Yap_NOfMemoryHoles].end = end;
188 Yap_HoleSize += (UInt)(start-end);
189 Yap_NOfMemoryHoles++;
190}
191
192static void *
193yapsbrk(long size)
194{
195 ADDR newHeapTop = HeapTop, oldHeapTop = HeapTop;
196 newHeapTop = HeapTop+size;
197 while (Yap_NOfMemoryHoles && newHeapTop > Yap_MemoryHoles[0].start) {
198 UInt i;
199
200 HeapTop = oldHeapTop = Yap_MemoryHoles[0].end;
201 newHeapTop = oldHeapTop+size;
202 Yap_NOfMemoryHoles--;
203 for (i=0; i < Yap_NOfMemoryHoles; i++) {
204 Yap_MemoryHoles[i].start = Yap_MemoryHoles[i+1].start;
205 Yap_MemoryHoles[i].end = Yap_MemoryHoles[i+1].end;
206 }
207 }
208 if (newHeapTop > HeapLim - MinHeapGap) {
209 if (HeapTop + size < HeapLim) {
210 /* small allocations, we can wait */
211 HeapTop += size;
212 UNLOCK(HeapTopLock);
213 Yap_signal(YAP_CDOVF_SIGNAL);
214 } else {
215 if (size > GLOBAL_SizeOfOverflow)
216 GLOBAL_SizeOfOverflow = size;
217 /* big allocations, the caller must handle the problem */
218 UNLOCK(HeapUsedLock);
219 UNLOCK(HeapTopLock);
220 return (void *)MORECORE_FAILURE;
221 }
222 }
223 HeapTop = newHeapTop;
224 UNLOCK(HeapTopLock);
225 return oldHeapTop;
226}
227
228/*
229 Compute index for size. We expect this to be inlined when
230 compiled with optimization, else not, which works out well.
231*/
232static int largebin_index(unsigned int sz) {
233 unsigned int x = sz >> SMALLBIN_WIDTH;
234 unsigned int m; /* bit position of highest set bit of m */
235
236 if (x >= 0x10000) return NBINS-1;
237
238 /* On intel, use BSRL instruction to find highest bit */
239#if defined(__GNUC__) && defined(i386)
240
241 __asm__("bsrl %1,%0\n\t"
242 : "=r" (m)
243 : "g" (x));
244
245#else
246 {
247 /*
248 Based on branch-free nlz algorithm in chapter 5 of Henry
249 S. Warren Jr's book "Hacker's Delight".
250 */
251
252 unsigned int n = ((x - 0x100) >> 16) & 8;
253 x <<= n;
254 m = ((x - 0x1000) >> 16) & 4;
255 n += m;
256 x <<= m;
257 m = ((x - 0x4000) >> 16) & 2;
258 n += m;
259 x = (x << m) >> 14;
260 m = 13 - n + (x & ~(x>>1));
261 }
262#endif
263
264 /* Use next 2 bits to create finer-granularity bins */
265 return NSMALLBINS + (m << 2) + ((sz >> (m + 6)) & 3);
266}
267
268#define bin_index(sz) \
269 ((in_smallbin_range(sz)) ? smallbin_index(sz) : largebin_index(sz))
270
271/*
272 FIRST_SORTED_BIN_SIZE is the chunk size corresponding to the
273 first bin that is maintained in sorted order. This must
274 be the smallest size corresponding to a given bin.
275
276 Normally, this should be MIN_LARGE_SIZE. But you can weaken
277 best fit guarantees to sometimes speed up malloc by increasing value.
278 Doing this means that malloc may choose a chunk that is
279 non-best-fitting by up to the width of the bin.
280
281 Some useful cutoff values:
282 512 - all bins sorted
283 2560 - leaves bins <= 64 bytes wide unsorted
284 12288 - leaves bins <= 512 bytes wide unsorted
285 65536 - leaves bins <= 4096 bytes wide unsorted
286 262144 - leaves bins <= 32768 bytes wide unsorted
287 -1 - no bins sorted (not recommended!)
288*/
289
290/*#define FIRST_SORTED_BIN_SIZE MIN_LARGE_SIZE */
291#define FIRST_SORTED_BIN_SIZE 2056
292
293/*
294 Unsorted chunks
295
296 All remainders from chunk splits, as well as all returned chunks,
297 are first placed in the "unsorted" bin. They are then placed
298 in regular bins after malloc gives them ONE chance to be used before
299 binning. So, basically, the unsorted_chunks list acts as a queue,
300 with chunks being placed on it in free (and malloc_consolidate),
301 and taken off (to be either used or placed in bins) in malloc.
302*/
303
304/* The otherwise unindexable 1-bin is used to hold unsorted chunks. */
305#define unsorted_chunks(M) (bin_at(M, 1))
306
307/*
308 Top
309
310 The top-most available chunk (i.e., the one bordering the end of
311 available memory) is treated specially. It is never included in
312 any bin, is used only if no other chunk is available, and is
313 released back to the system if it is very large (see
314 M_TRIM_THRESHOLD). Because top initially
315 points to its own bin with initial zero size, thus forcing
316 extension on the first malloc request, we avoid having any special
317 code in malloc to check whether it even exists yet. But we still
318 need to do so when getting memory from system, so we make
319 initial_top treat the bin as a legal but unusable chunk during the
320 interval between initialization and the first call to
321 sYSMALLOc. (This is somewhat delicate, since it relies on
322 the 2 preceding words to be zero during this interval as well.)
323*/
324
325/* Conveniently, the unsorted bin can be used as dummy top on first call */
326#define initial_top(M) (unsorted_chunks(M))
327
328/*
329 Binmap
330
331 To help compensate for the large number of bins, a one-level index
332 structure is used for bin-by-bin searching. `binmap' is a
333 bitvector recording whether bins are definitely empty so they can
334 be skipped over during during traversals. The bits are NOT always
335 cleared as soon as bins are empty, but instead only
336 when they are noticed to be empty during traversal in malloc.
337*/
338
339#define idx2block(i) ((i) >> BINMAPSHIFT)
340#define idx2bit(i) ((1U << ((i) & ((1U << BINMAPSHIFT)-1))))
341
342#define mark_bin(m,i) ((m)->binmap[idx2block(i)] |= idx2bit(i))
343#define unmark_bin(m,i) ((m)->binmap[idx2block(i)] &= ~(idx2bit(i)))
344#define get_binmap(m,i) ((m)->binmap[idx2block(i)] & idx2bit(i))
345
346/*
347 Fastbins
348
349 An array of lists holding recently freed small chunks. Fastbins
350 are not doubly linked. It is faster to single-link them, and
351 since chunks are never removed from the middles of these lists,
352 double linking is not necessary. Also, unlike regular bins, they
353 are not even processed in FIFO order (they use faster LIFO) since
354 ordering doesn't much matter in the transient contexts in which
355 fastbins are normally used.
356
357 Chunks in fastbins keep their inuse bit set, so they cannot
358 be consolidated with other free chunks. malloc_consolidate
359 releases all chunks in fastbins and consolidates them with
360 other free chunks.
361*/
362
363/*
364 FASTBIN_CONSOLIDATION_THRESHOLD is the size of a chunk in free()
365 that triggers automatic consolidation of possibly-surrounding
366 fastbin chunks. This is a heuristic, so the exact value should not
367 matter too much. It is defined at half the default trim threshold as a
368 compromise heuristic to only attempt consolidation if it is likely
369 to lead to trimming. However, it is not dynamically tunable, since
370 consolidation reduces fragmentation surrounding loarge chunks even
371 if trimming is not used.
372*/
373
374#define FASTBIN_CONSOLIDATION_THRESHOLD \
375 ((unsigned long)(DEFAULT_TRIM_THRESHOLD) >> 1)
376
377/*
378 Since the lowest 2 bits in max_fast don't matter in size comparisons,
379 they are used as flags.
380*/
381
382/*
383 ANYCHUNKS_BIT held in max_fast indicates that there may be any
384 freed chunks at all. It is set true when entering a chunk into any
385 bin.
386*/
387
388#define ANYCHUNKS_BIT (1U)
389
390#define have_anychunks(M) (((M)->max_fast & ANYCHUNKS_BIT))
391#define set_anychunks(M) ((M)->max_fast |= ANYCHUNKS_BIT)
392#define clear_anychunks(M) ((M)->max_fast &= ~ANYCHUNKS_BIT)
393
394/*
395 FASTCHUNKS_BIT held in max_fast indicates that there are probably
396 some fastbin chunks. It is set true on entering a chunk into any
397 fastbin, and cleared only in malloc_consolidate.
398*/
399
400#define FASTCHUNKS_BIT (2U)
401
402#define have_fastchunks(M) (((M)->max_fast & FASTCHUNKS_BIT))
403#define set_fastchunks(M) ((M)->max_fast |= (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
404#define clear_fastchunks(M) ((M)->max_fast &= ~(FASTCHUNKS_BIT))
405
406/*
407 Set value of max_fast.
408 Use impossibly small value if 0.
409*/
410
411#define set_max_fast(M, s) \
412 (M)->max_fast = (((s) == 0)? SMALLBIN_WIDTH: request2size(s)) | \
413 ((M)->max_fast & (FASTCHUNKS_BIT|ANYCHUNKS_BIT))
414
415#define get_max_fast(M) \
416 ((M)->max_fast & ~(FASTCHUNKS_BIT | ANYCHUNKS_BIT))
417
418
419/*
420 morecore_properties is a status word holding dynamically discovered
421 or controlled properties of the morecore function
422*/
423
424#define MORECORE_CONTIGUOUS_BIT (1U)
425
426#define contiguous(M) \
427 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT))
428#define noncontiguous(M) \
429 (((M)->morecore_properties & MORECORE_CONTIGUOUS_BIT) == 0)
430#define set_contiguous(M) \
431 ((M)->morecore_properties |= MORECORE_CONTIGUOUS_BIT)
432#define set_noncontiguous(M) \
433 ((M)->morecore_properties &= ~MORECORE_CONTIGUOUS_BIT)
434
435
436/*
437 There is exactly one instance of this struct in this malloc.
438 If you are adapting this malloc in a way that does NOT use a static
439 malloc_state, you MUST explicitly zero-fill it before using. This
440 malloc relies on the property that malloc_state is initialized to
441 all zeroes (as is true of C statics).
442*/
443
444/* static struct malloc_state av_; */ /* never directly referenced */
445
446/*
447 All uses of av_ are via get_malloc_state().
448 At most one "call" to get_malloc_state is made per invocation of
449 the public versions of malloc and free, but other routines
450 that in turn invoke malloc and/or free may call more then once.
451 Also, it is called in check* routines if DEBUG is set.
452*/
453
454/* #define get_malloc_state() (&(av_)) */
455#define get_malloc_state() Yap_av
456
457/*
458 Initialize a malloc_state struct.
459
460 This is called only from within malloc_consolidate, which needs
461 be called in the same contexts anyway. It is never called directly
462 outside of malloc_consolidate because some optimizing compilers try
463 to inline it at all call points, which turns out not to be an
464 optimization at all. (Inlining it in malloc_consolidate is fine though.)
465*/
466
467#if __STD_C
468static void malloc_init_state(mstate av)
469#else
470static void malloc_init_state(av) mstate av;
471#endif
472{
473 int i;
474 mbinptr bin;
475
476 /* Establish circular links for normal bins */
477 for (i = 1; i < NBINS; ++i) {
478 bin = bin_at(av,i);
479 bin->fd = bin->bk = bin;
480 }
481
482 av->top_pad = DEFAULT_TOP_PAD;
483 av->trim_threshold = DEFAULT_TRIM_THRESHOLD;
484
485#if MORECORE_CONTIGUOUS
486 set_contiguous(av);
487#else
488 set_noncontiguous(av);
489#endif
490
491
492 set_max_fast(av, DEFAULT_MXFAST);
493
494 av->top = initial_top(av);
495 av->pagesize = malloc_getpagesize;
496}
497
498/*
499 Other internal utilities operating on mstates
500*/
501
502#if __STD_C
503static Void_t* sYSMALLOc(INTERNAL_SIZE_T, mstate);
504static int sYSTRIm(size_t, mstate);
505static void malloc_consolidate(mstate);
506static Void_t** iALLOc(size_t, size_t*, int, Void_t**);
507#else
508static Void_t* sYSMALLOc();
509static int sYSTRIm();
510static void malloc_consolidate();
511static Void_t** iALLOc();
512#endif
513
514/*
515 Debugging support
516
517 These routines make a number of assertions about the states
518 of data structures that should be true at all times. If any
519 are not true, it's very likely that a user program has somehow
520 trashed memory. (It's also possible that there is a coding error
521 in malloc. In which case, please report it!)
522*/
523
524#if ! DEBUG_DLMALLOC
525
526#define check_chunk(P)
527#define check_free_chunk(P)
528#define check_inuse_chunk(P)
529#define check_remalloced_chunk(P,N)
530#define check_malloced_chunk(P,N)
531#define check_malloc_state()
532
533#else
534#define check_chunk(P) do_check_chunk(P)
535#define check_free_chunk(P) do_check_free_chunk(P)
536#define check_inuse_chunk(P) do_check_inuse_chunk(P)
537#define check_remalloced_chunk(P,N) do_check_remalloced_chunk(P,N)
538#define check_malloced_chunk(P,N) do_check_malloced_chunk(P,N)
539#define check_malloc_state() do_check_malloc_state()
540
541/*
542 Properties of all chunks
543*/
544
545#if __STD_C
546static void do_check_chunk(mchunkptr p)
547#else
548static void do_check_chunk(p) mchunkptr p;
549#endif
550{
551 mstate av = get_malloc_state();
552#if DEBUG_DLMALLOC
553 /* min and max possible addresses assuming contiguous allocation */
554 char* max_address = (char*)(av->top) + chunksize(av->top);
555 CHUNK_SIZE_T sz = chunksize(p);
556 char* min_address = max_address - av->sbrked_mem;
557#endif
558
559 if (!chunk_is_mmapped(p)) {
560
561 /* Has legal address ... */
562 if (p != av->top) {
563 if (contiguous(av)) {
564 assert(((char*)p) >= min_address);
565 assert(((char*)p + sz) <= ((char*)(av->top)));
566 }
567 }
568 else {
569 /* top size is always at least MINSIZE */
570 assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
571 /* top predecessor always marked inuse */
572 assert(prev_inuse(p));
573 }
574
575 }
576 else {
577#if HAVE_MMAP
578 /* address is outside main heap */
579 if (contiguous(av) && av->top != initial_top(av)) {
580 assert(((char*)p) < min_address || ((char*)p) > max_address);
581 }
582 /* chunk is page-aligned */
583 assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
584 /* mem is aligned */
585 assert(aligned_OK(chunk2mem(p)));
586#else
587 /* force an appropriate assert violation if debug set */
588 assert(!chunk_is_mmapped(p));
589#endif
590 }
591}
592
593/*
594 Properties of free chunks
595*/
596
597#if __STD_C
598static void do_check_free_chunk(mchunkptr p)
599#else
600static void do_check_free_chunk(p) mchunkptr p;
601#endif
602{
603#if DEBUG_DLMALLOC
604 mstate av = get_malloc_state();
605#endif
606
607 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
608#if DEBUG_DLMALLOC
609 mchunkptr next = chunk_at_offset(p, sz);
610#endif
611
612 do_check_chunk(p);
613
614 /* Chunk must claim to be free ... */
615 assert(!inuse(p));
616 assert (!chunk_is_mmapped(p));
617
618 /* Unless a special marker, must have OK fields */
619 if ((CHUNK_SIZE_T)(sz) >= MINSIZE)
620 {
621 assert((sz & MALLOC_ALIGN_MASK) == 0);
622 assert(aligned_OK(chunk2mem(p)));
623 /* ... matching footer field */
624 assert(next->prev_size == sz);
625 /* ... and is fully consolidated */
626 assert(prev_inuse(p));
627 assert (next == av->top || inuse(next));
628
629 /* ... and has minimally sane links */
630 assert(p->fd->bk == p);
631 assert(p->bk->fd == p);
632 }
633 else /* markers are always of size SIZE_SZ */
634 assert(sz == SIZE_SZ);
635}
636
637/*
638 Properties of inuse chunks
639*/
640
641#if __STD_C
642static void do_check_inuse_chunk(mchunkptr p)
643#else
644static void do_check_inuse_chunk(p) mchunkptr p;
645#endif
646{
647 mstate av = get_malloc_state();
648 mchunkptr next;
649 do_check_chunk(p);
650
651 if (chunk_is_mmapped(p))
652 return; /* mmapped chunks have no next/prev */
653
654 /* Check whether it claims to be in use ... */
655 assert(inuse(p));
656
657 next = next_chunk(p);
658
659 /* ... and is surrounded by OK chunks.
660 Since more things can be checked with free chunks than inuse ones,
661 if an inuse chunk borders them and debug is on, it's worth doing them.
662 */
663 if (!prev_inuse(p)) {
664 /* Note that we cannot even look at prev unless it is not inuse */
665 mchunkptr prv = prev_chunk(p);
666 assert(next_chunk(prv) == p);
667 do_check_free_chunk(prv);
668 }
669
670 if (next == av->top) {
671 assert(prev_inuse(next));
672 assert(chunksize(next) >= MINSIZE);
673 }
674 else if (!inuse(next))
675 do_check_free_chunk(next);
676}
677
678/*
679 Properties of chunks recycled from fastbins
680*/
681
682#if __STD_C
683static void do_check_remalloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
684#else
685static void do_check_remalloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
686#endif
687{
688#if DEBUG_DLMALLOC
689 INTERNAL_SIZE_T sz = p->size & ~PREV_INUSE;
690#endif
691
692 do_check_inuse_chunk(p);
693
694 /* Legal size ... */
695 assert((sz & MALLOC_ALIGN_MASK) == 0);
696 assert((CHUNK_SIZE_T)(sz) >= MINSIZE);
697 /* ... and alignment */
698 assert(aligned_OK(chunk2mem(p)));
699 /* chunk is less than MINSIZE more than request */
700 assert((long)(sz) - (long)(s) >= 0);
701 assert((long)(sz) - (long)(s + MINSIZE) < 0);
702}
703
704/*
705 Properties of nonrecycled chunks at the point they are malloced
706*/
707
708#if __STD_C
709static void do_check_malloced_chunk(mchunkptr p, INTERNAL_SIZE_T s)
710#else
711static void do_check_malloced_chunk(p, s) mchunkptr p; INTERNAL_SIZE_T s;
712#endif
713{
714 /* same as recycled case ... */
715 do_check_remalloced_chunk(p, s);
716
717 /*
718 ... plus, must obey implementation invariant that prev_inuse is
719 always true of any allocated chunk; i.e., that each allocated
720 chunk borders either a previously allocated and still in-use
721 chunk, or the base of its memory arena. This is ensured
722 by making all allocations from the the `lowest' part of any found
723 chunk. This does not necessarily hold however for chunks
724 recycled via fastbins.
725 */
726
727 assert(prev_inuse(p));
728}
729
730
731/*
732 Properties of malloc_state.
733
734 This may be useful for debugging malloc, as well as detecting user
735 programmer errors that somehow write into malloc_state.
736
737 If you are extending or experimenting with this malloc, you can
738 probably figure out how to hack this routine to print out or
739 display chunk addresses, sizes, bins, and other instrumentation.
740*/
741
742static void do_check_malloc_state(void)
743{
744 mstate av = get_malloc_state();
745 int i;
746 mchunkptr p;
747 mchunkptr q;
748 mbinptr b;
749 unsigned int binbit;
750 int empty;
751 unsigned int idx;
752 INTERNAL_SIZE_T size;
753 CHUNK_SIZE_T total = 0;
754 int max_fast_bin;
755
756 /* internal size_t must be no wider than pointer type */
757 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
758
759 /* alignment is a power of 2 */
760 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
761
762 /* cannot run remaining checks until fully initialized */
763 if (av->top == 0 || av->top == initial_top(av))
764 return;
765
766 /* pagesize is a power of 2 */
767 assert((av->pagesize & (av->pagesize-1)) == 0);
768
769 /* properties of fastbins */
770
771 /* max_fast is in allowed range */
772 assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
773
774 max_fast_bin = fastbin_index(av->max_fast);
775
776 for (i = 0; i < NFASTBINS; ++i) {
777 p = av->fastbins[i];
778
779 /* all bins past max_fast are empty */
780 if (i > max_fast_bin)
781 assert(p == 0);
782
783 while (p != 0) {
784 /* each chunk claims to be inuse */
785 do_check_inuse_chunk(p);
786 total += chunksize(p);
787 /* chunk belongs in this bin */
788 assert(fastbin_index(chunksize(p)) == i);
789 p = p->fd;
790 }
791 }
792
793 if (total != 0)
794 assert(have_fastchunks(av));
795 else if (!have_fastchunks(av))
796 assert(total == 0);
797
798 /* check normal bins */
799 for (i = 1; i < NBINS; ++i) {
800 b = bin_at(av,i);
801
802 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
803 if (i >= 2) {
804 binbit = get_binmap(av,i);
805 empty = last(b) == b;
806 if (!binbit)
807 assert(empty);
808 else if (!empty)
809 assert(binbit);
810 }
811
812 for (p = last(b); p != b; p = p->bk) {
813 /* each chunk claims to be free */
814 do_check_free_chunk(p);
815 size = chunksize(p);
816 total += size;
817 if (i >= 2) {
818 /* chunk belongs in bin */
819 idx = bin_index(size);
820 assert(idx == i);
821 /* lists are sorted */
822 if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
823 assert(p->bk == b ||
824 (CHUNK_SIZE_T)chunksize(p->bk) >=
825 (CHUNK_SIZE_T)chunksize(p));
826 }
827 }
828 /* chunk is followed by a legal chain of inuse chunks */
829 for (q = next_chunk(p);
830 (q != av->top && inuse(q) &&
831 (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
832 q = next_chunk(q))
833 do_check_inuse_chunk(q);
834 }
835 }
836
837 /* top chunk is OK */
838 check_chunk(av->top);
839
840 /* sanity checks for statistics */
841
842 assert(total <= (CHUNK_SIZE_T)(av->max_total_mem));
843
844 assert((CHUNK_SIZE_T)(av->sbrked_mem) <=
845 (CHUNK_SIZE_T)(av->max_sbrked_mem));
846
847 assert((CHUNK_SIZE_T)(av->max_total_mem) >=
848 (CHUNK_SIZE_T)(av->sbrked_mem));
849}
850#endif
851
852
853/* ----------- Routines dealing with system allocation -------------- */
854
855/*
856 sysmalloc handles malloc cases requiring more memory from the system.
857 On entry, it is assumed that av->top does not have enough
858 space to service request for nb bytes, thus requiring that av->top
859 be extended or replaced.
860*/
861
862#if __STD_C
863static Void_t* sYSMALLOc(INTERNAL_SIZE_T nb, mstate av)
864#else
865static Void_t* sYSMALLOc(nb, av) INTERNAL_SIZE_T nb; mstate av;
866#endif
867{
868 mchunkptr old_top; /* incoming value of av->top */
869 INTERNAL_SIZE_T old_size; /* its size */
870 char* old_end; /* its end address */
871
872 long size; /* arg to first MORECORE or mmap call */
873 char* brk; /* return value from MORECORE */
874
875 long correction; /* arg to 2nd MORECORE call */
876 char* snd_brk; /* 2nd return val */
877
878 INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
879 INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
880 char* aligned_brk; /* aligned offset into brk */
881
882 mchunkptr p; /* the allocated/returned chunk */
883 mchunkptr remainder; /* remainder from allocation */
884 CHUNK_SIZE_T remainder_size; /* its size */
885
886 CHUNK_SIZE_T sum; /* for updating stats */
887
888 size_t pagemask = av->pagesize - 1;
889
890 /*
891 If there is space available in fastbins, consolidate and retry
892 malloc from scratch rather than getting memory from system. This
893 can occur only if nb is in smallbin range so we didn't consolidate
894 upon entry to malloc. It is much easier to handle this case here
895 than in malloc proper.
896 */
897
898 if (have_fastchunks(av)) {
899 assert(in_smallbin_range(nb));
900 malloc_consolidate(av);
901 return mALLOc(nb - MALLOC_ALIGN_MASK);
902 }
903
904
905 /* Record incoming configuration of top */
906
907 old_top = av->top;
908 old_size = chunksize(old_top);
909 old_end = (char*)(chunk_at_offset(old_top, old_size));
910
911 brk = snd_brk = (char*)(MORECORE_FAILURE);
912
913 /*
914 If not the first time through, we require old_size to be
915 at least MINSIZE and to have prev_inuse set.
916 */
917
918 assert((old_top == initial_top(av) && old_size == 0) ||
919 ((CHUNK_SIZE_T) (old_size) >= MINSIZE &&
920 prev_inuse(old_top)));
921
922 /* Precondition: not enough current space to satisfy nb request */
923 assert((CHUNK_SIZE_T)(old_size) < (CHUNK_SIZE_T)(nb + MINSIZE));
924
925 /* Precondition: all fastbins are consolidated */
926 assert(!have_fastchunks(av));
927
928
929 /* Request enough space for nb + pad + overhead */
930
931 size = nb + av->top_pad + MINSIZE;
932
933 /*
934 If contiguous, we can subtract out existing space that we hope to
935 combine with new space. We add it back later only if
936 we don't actually get contiguous space.
937 */
938
939 if (contiguous(av))
940 size -= old_size;
941
942 /*
943 Round to a multiple of page size.
944 If MORECORE is not contiguous, this ensures that we only call it
945 with whole-page arguments. And if MORECORE is contiguous and
946 this is not first time through, this preserves page-alignment of
947 previous calls. Otherwise, we correct to page-align below.
948 */
949
950 size = (size + pagemask) & ~pagemask;
951
952 /*
953 Don't try to call MORECORE if argument is so big as to appear
954 negative. Note that since mmap takes size_t arg, it may succeed
955 below even if we cannot call MORECORE.
956 */
957
958 if (size > 0)
959 brk = (char*)(MORECORE(size));
960
961 /*
962 If have mmap, try using it as a backup when MORECORE fails or
963 cannot be used. This is worth doing on systems that have "holes" in
964 address space, so sbrk cannot extend to give contiguous space, but
965 space is available elsewhere. Note that we ignore mmap max count
966 and threshold limits, since the space will not be used as a
967 segregated mmap region.
968 */
969
970
971 if (brk != (char*)(MORECORE_FAILURE)) {
972 av->sbrked_mem += size;
973
974 /*
975 If MORECORE extends previous space, we can likewise extend top size.
976 */
977
978 if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
979 set_head(old_top, (size + old_size) | PREV_INUSE);
980 }
981
982 /*
983 Otherwise, make adjustments:
984
985 * If the first time through or noncontiguous, we need to call sbrk
986 just to find out where the end of memory lies.
987
988 * We need to ensure that all returned chunks from malloc will meet
989 MALLOC_ALIGNMENT
990
991 * If there was an intervening foreign sbrk, we need to adjust sbrk
992 request size to account for fact that we will not be able to
993 combine new space with existing space in old_top.
994
995 * Almost all systems internally allocate whole pages at a time, in
996 which case we might as well use the whole last page of request.
997 So we allocate enough more memory to hit a page boundary now,
998 which in turn causes future contiguous calls to page-align.
999 */
1000
1001 else {
1002 front_misalign = 0;
1003 end_misalign = 0;
1004 correction = 0;
1005 aligned_brk = brk;
1006
1007 /*
1008 If MORECORE returns an address lower than we have seen before,
1009 we know it isn't really contiguous. This and some subsequent
1010 checks help cope with non-conforming MORECORE functions and
1011 the presence of "foreign" calls to MORECORE from outside of
1012 malloc or by other threads. We cannot guarantee to detect
1013 these in all cases, but cope with the ones we do detect.
1014 */
1015 if (contiguous(av) && old_size != 0 && brk < old_end) {
1016 set_noncontiguous(av);
1017 }
1018
1019 /* handle contiguous cases */
1020 if (contiguous(av)) {
1021
1022 /*
1023 We can tolerate forward non-contiguities here (usually due
1024 to foreign calls) but treat them as part of our space for
1025 stats reporting.
1026 */
1027 if (old_size != 0)
1028 av->sbrked_mem += brk - old_end;
1029
1030 /* Guarantee alignment of first new chunk made from this space */
1031
1032 front_misalign = (INTERNAL_SIZE_T)chunk2mem(brk) & MALLOC_ALIGN_MASK;
1033 if (front_misalign > 0) {
1034
1035 /*
1036 Skip over some bytes to arrive at an aligned position.
1037 We don't need to specially mark these wasted front bytes.
1038 They will never be accessed anyway because
1039 prev_inuse of av->top (and any chunk created from its start)
1040 is always true after initialization.
1041 */
1042
1043 correction = MALLOC_ALIGNMENT - front_misalign;
1044 aligned_brk += correction;
1045 }
1046
1047 /*
1048 If this isn't adjacent to existing space, then we will not
1049 be able to merge with old_top space, so must add to 2nd request.
1050 */
1051
1052 correction += old_size;
1053
1054 /* Extend the end address to hit a page boundary */
1055 end_misalign = (INTERNAL_SIZE_T)(brk + size + correction);
1056 correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
1057
1058 assert(correction >= 0);
1059 snd_brk = (char*)(MORECORE(correction));
1060
1061 if (snd_brk == (char*)(MORECORE_FAILURE)) {
1062 /*
1063 If can't allocate correction, try to at least find out current
1064 brk. It might be enough to proceed without failing.
1065 */
1066 correction = 0;
1067 snd_brk = (char*)(MORECORE(0));
1068 }
1069 else if (snd_brk < brk) {
1070 /*
1071 If the second call gives noncontiguous space even though
1072 it says it won't, the only course of action is to ignore
1073 results of second call, and conservatively estimate where
1074 the first call left us. Also set noncontiguous, so this
1075 won't happen again, leaving at most one hole.
1076
1077 Note that this check is intrinsically incomplete. Because
1078 MORECORE is allowed to give more space than we ask for,
1079 there is no reliable way to detect a noncontiguity
1080 producing a forward gap for the second call.
1081 */
1082 snd_brk = brk + size;
1083 correction = 0;
1084 set_noncontiguous(av);
1085 }
1086
1087 }
1088
1089 /* handle non-contiguous cases */
1090 else {
1091 /* MORECORE/mmap must correctly align */
1092 assert(aligned_OK(chunk2mem(brk)));
1093
1094 /* Find out current end of memory */
1095 if (snd_brk == (char*)(MORECORE_FAILURE)) {
1096 snd_brk = (char*)(MORECORE(0));
1097 av->sbrked_mem += snd_brk - brk - size;
1098 }
1099 }
1100
1101 /* Adjust top based on results of second sbrk */
1102 if (snd_brk != (char*)(MORECORE_FAILURE)) {
1103 av->top = (mchunkptr)aligned_brk;
1104 set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
1105 av->sbrked_mem += correction;
1106
1107 /*
1108 If not the first time through, we either have a
1109 gap due to foreign sbrk or a non-contiguous region. Insert a
1110 double fencepost at old_top to prevent consolidation with space
1111 we don't own. These fenceposts are artificial chunks that are
1112 marked as inuse and are in any case too small to use. We need
1113 two to make sizes and alignments work out.
1114 */
1115
1116 if (old_size != 0) {
1117 /*
1118 Shrink old_top to insert fenceposts, keeping size a
1119 multiple of MALLOC_ALIGNMENT. We know there is at least
1120 enough space in old_top to do this.
1121 */
1122 old_size = (old_size - 3*SIZE_SZ) & ~MALLOC_ALIGN_MASK;
1123 set_head(old_top, old_size | PREV_INUSE);
1124
1125 /*
1126 Note that the following assignments completely overwrite
1127 old_top when old_size was previously MINSIZE. This is
1128 intentional. We need the fencepost, even if old_top otherwise gets
1129 lost.
1130 */
1131 chunk_at_offset(old_top, old_size )->size =
1132 SIZE_SZ|PREV_INUSE;
1133
1134 chunk_at_offset(old_top, old_size + SIZE_SZ)->size =
1135 SIZE_SZ|PREV_INUSE;
1136
1137 /*
1138 If possible, release the rest, suppressing trimming.
1139 */
1140 if (old_size >= MINSIZE) {
1141 INTERNAL_SIZE_T tt = av->trim_threshold;
1142 av->trim_threshold = (INTERNAL_SIZE_T)(-1);
1143 fREe(chunk2mem(old_top));
1144 av->trim_threshold = tt;
1145 }
1146 }
1147 }
1148 }
1149
1150 /* Update statistics */
1151 sum = av->sbrked_mem;
1152 if (sum > (CHUNK_SIZE_T)(av->max_sbrked_mem))
1153 av->max_sbrked_mem = sum;
1154
1155 sum += av->mmapped_mem;
1156 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
1157 av->max_total_mem = sum;
1158
1159 check_malloc_state();
1160
1161 /* finally, do the allocation */
1162
1163 p = av->top;
1164 size = chunksize(p);
1165
1166 /* check that one of the above allocation paths succeeded */
1167 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
1168
1169 remainder_size = size - nb;
1170 remainder = chunk_at_offset(p, nb);
1171 av->top = remainder;
1172 set_head(p, nb | PREV_INUSE);
1173 set_head(remainder, remainder_size | PREV_INUSE);
1174 check_malloced_chunk(p, nb);
1175 return chunk2mem(p);
1176 }
1177
1178 }
1179
1180 /* catch all failure paths */
1181 MALLOC_FAILURE_ACTION;
1182 return 0;
1183}
1184
1185
1186
1187/*
1188 sYSTRIm is an inverse of sorts to sYSMALLOc. It gives memory back
1189 to the system (via negative arguments to sbrk) if there is unused
1190 memory at the `high' end of the malloc pool. It is called
1191 automatically by free() when top space exceeds the trim
1192 threshold. It is also called by the public malloc_trim routine. It
1193 returns 1 if it actually released any memory, else 0.
1194*/
1195
1196#if __STD_C
1197static int sYSTRIm(size_t pad, mstate av)
1198#else
1199static int sYSTRIm(pad, av) size_t pad; mstate av;
1200#endif
1201{
1202 long top_size; /* Amount of top-most memory */
1203 long extra; /* Amount to release */
1204 long released; /* Amount actually released */
1205 char* current_brk; /* address returned by pre-check sbrk call */
1206 char* new_brk; /* address returned by post-check sbrk call */
1207 size_t pagesz;
1208
1209 pagesz = av->pagesize;
1210 top_size = chunksize(av->top);
1211
1212 /* Release in pagesize units, keeping at least one page */
1213 extra = ((top_size - pad - MINSIZE + (pagesz-1)) / pagesz - 1) * pagesz;
1214
1215 if (extra > 0) {
1216
1217 /*
1218 Only proceed if end of memory is where we last set it.
1219 This avoids problems if there were foreign sbrk calls.
1220 */
1221 current_brk = (char*)(MORECORE(0));
1222 if (current_brk == (char*)(av->top) + top_size) {
1223
1224 /*
1225 Attempt to release memory. We ignore MORECORE return value,
1226 and instead call again to find out where new end of memory is.
1227 This avoids problems if first call releases less than we asked,
1228 of if failure somehow altered brk value. (We could still
1229 encounter problems if it altered brk in some very bad way,
1230 but the only thing we can do is adjust anyway, which will cause
1231 some downstream failure.)
1232 */
1233
1234 MORECORE(-extra);
1235 new_brk = (char*)(MORECORE(0));
1236
1237 if (new_brk != (char*)MORECORE_FAILURE) {
1238 released = (long)(current_brk - new_brk);
1239
1240 if (released != 0) {
1241 /* Success. Adjust top. */
1242 av->sbrked_mem -= released;
1243 set_head(av->top, (top_size - released) | PREV_INUSE);
1244 check_malloc_state();
1245 return 1;
1246 }
1247 }
1248 }
1249 }
1250 return 0;
1251}
1252
1253/*
1254 ------------------------------ malloc ------------------------------
1255*/
1256
1257
1258#if __STD_C
1259Void_t* mALLOc(size_t bytes)
1260#else
1261 Void_t* mALLOc(bytes) size_t bytes;
1262#endif
1263{
1264 mstate av = get_malloc_state();
1265
1266 INTERNAL_SIZE_T nb; /* normalized request size */
1267 unsigned int idx; /* associated bin index */
1268 mbinptr bin; /* associated bin */
1269 mfastbinptr* fb; /* associated fastbin */
1270
1271 mchunkptr victim; /* inspected/selected chunk */
1272 INTERNAL_SIZE_T size; /* its size */
1273 int victim_index; /* its bin index */
1274
1275 mchunkptr remainder; /* remainder from a split */
1276 CHUNK_SIZE_T remainder_size; /* its size */
1277
1278 unsigned int block; /* bit map traverser */
1279 unsigned int bit; /* bit map traverser */
1280 unsigned int map; /* current word of binmap */
1281
1282 mchunkptr fwd; /* misc temp for linking */
1283 mchunkptr bck; /* misc temp for linking */
1284
1285 /*
1286 Convert request size to internal form by adding SIZE_SZ bytes
1287 overhead plus possibly more to obtain necessary alignment and/or
1288 to obtain a size of at least MINSIZE, the smallest allocatable
1289 size. Also, checked_request2size traps (returning 0) request sizes
1290 that are so large that they wrap around zero when padded and
1291 aligned.
1292 */
1293
1294 checked_request2size(bytes, nb);
1295
1296 /*
1297 Bypass search if no frees yet
1298 */
1299 if (!have_anychunks(av)) {
1300 if (av->max_fast == 0) /* initialization check */
1301 malloc_consolidate(av);
1302 goto use_top;
1303 }
1304
1305 /*
1306 If the size qualifies as a fastbin, first check corresponding bin.
1307 */
1308
1309 if ((CHUNK_SIZE_T)(nb) <= (CHUNK_SIZE_T)(av->max_fast)) {
1310 fb = &(av->fastbins[(fastbin_index(nb))]);
1311 if ( (victim = *fb) != 0) {
1312 *fb = victim->fd;
1313 check_remalloced_chunk(victim, nb);
1314 return chunk2mem(victim);
1315 }
1316 }
1317
1318 /*
1319 If a small request, check regular bin. Since these "smallbins"
1320 hold one size each, no searching within bins is necessary.
1321 (For a large request, we need to wait until unsorted chunks are
1322 processed to find best fit. But for small ones, fits are exact
1323 anyway, so we can check now, which is faster.)
1324 */
1325
1326 if (in_smallbin_range(nb)) {
1327 idx = smallbin_index(nb);
1328 bin = bin_at(av,idx);
1329
1330 if ( (victim = last(bin)) != bin) {
1331 bck = victim->bk;
1332 set_inuse_bit_at_offset(victim, nb);
1333 bin->bk = bck;
1334 bck->fd = bin;
1335
1336 check_malloced_chunk(victim, nb);
1337 return chunk2mem(victim);
1338 }
1339 }
1340
1341 /*
1342 If this is a large request, consolidate fastbins before continuing.
1343 While it might look excessive to kill all fastbins before
1344 even seeing if there is space available, this avoids
1345 fragmentation problems normally associated with fastbins.
1346 Also, in practice, programs tend to have runs of either small or
1347 large requests, but less often mixtures, so consolidation is not
1348 invoked all that often in most programs. And the programs that
1349 it is called frequently in otherwise tend to fragment.
1350 */
1351
1352 else {
1353 idx = largebin_index(nb);
1354 if (have_fastchunks(av))
1355 malloc_consolidate(av);
1356 }
1357
1358 /*
1359 Process recently freed or remaindered chunks, taking one only if
1360 it is exact fit, or, if this a small request, the chunk is remainder from
1361 the most recent non-exact fit. Place other traversed chunks in
1362 bins. Note that this step is the only place in any routine where
1363 chunks are placed in bins.
1364 */
1365
1366 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
1367 bck = victim->bk;
1368 size = chunksize(victim);
1369
1370 /*
1371 If a small request, try to use last remainder if it is the
1372 only chunk in unsorted bin. This helps promote locality for
1373 runs of consecutive small requests. This is the only
1374 exception to best-fit, and applies only when there is
1375 no exact fit for a small chunk.
1376 */
1377
1378 if (in_smallbin_range(nb) &&
1379 bck == unsorted_chunks(av) &&
1380 victim == av->last_remainder &&
1381 (CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
1382
1383 /* split and reattach remainder */
1384 remainder_size = size - nb;
1385 remainder = chunk_at_offset(victim, nb);
1386 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
1387 av->last_remainder = remainder;
1388 remainder->bk = remainder->fd = unsorted_chunks(av);
1389
1390 set_head(victim, nb | PREV_INUSE);
1391 set_head(remainder, remainder_size | PREV_INUSE);
1392 set_foot(remainder, remainder_size);
1393
1394 check_malloced_chunk(victim, nb);
1395 return chunk2mem(victim);
1396 }
1397
1398 /* remove from unsorted list */
1399 unsorted_chunks(av)->bk = bck;
1400 bck->fd = unsorted_chunks(av);
1401
1402 /* Take now instead of binning if exact fit */
1403
1404 if (size == nb) {
1405 set_inuse_bit_at_offset(victim, size);
1406 check_malloced_chunk(victim, nb);
1407 return chunk2mem(victim);
1408 }
1409
1410 /* place chunk in bin */
1411
1412 if (in_smallbin_range(size)) {
1413 victim_index = smallbin_index(size);
1414 bck = bin_at(av, victim_index);
1415 fwd = bck->fd;
1416 }
1417 else {
1418 victim_index = largebin_index(size);
1419 bck = bin_at(av, victim_index);
1420 fwd = bck->fd;
1421
1422 if (fwd != bck) {
1423 /* if smaller than smallest, place first */
1424 if ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(bck->bk->size)) {
1425 fwd = bck;
1426 bck = bck->bk;
1427 }
1428 else if ((CHUNK_SIZE_T)(size) >=
1429 (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
1430
1431 /* maintain large bins in sorted order */
1432 size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
1433 while ((CHUNK_SIZE_T)(size) < (CHUNK_SIZE_T)(fwd->size))
1434 fwd = fwd->fd;
1435 bck = fwd->bk;
1436 }
1437 }
1438 }
1439
1440 mark_bin(av, victim_index);
1441 victim->bk = bck;
1442 victim->fd = fwd;
1443 fwd->bk = victim;
1444 bck->fd = victim;
1445 }
1446
1447 /*
1448 If a large request, scan through the chunks of current bin to
1449 find one that fits. (This will be the smallest that fits unless
1450 FIRST_SORTED_BIN_SIZE has been changed from default.) This is
1451 the only step where an unbounded number of chunks might be
1452 scanned without doing anything useful with them. However the
1453 lists tend to be short.
1454 */
1455
1456 if (!in_smallbin_range(nb)) {
1457 bin = bin_at(av, idx);
1458
1459 for (victim = last(bin); victim != bin; victim = victim->bk) {
1460 size = chunksize(victim);
1461
1462 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb)) {
1463 remainder_size = size - nb;
1464 dl_unlink(victim, bck, fwd);
1465
1466 /* Exhaust */
1467 if (remainder_size < MINSIZE) {
1468 set_inuse_bit_at_offset(victim, size);
1469 check_malloced_chunk(victim, nb);
1470 return chunk2mem(victim);
1471 }
1472 /* Split */
1473 else {
1474 remainder = chunk_at_offset(victim, nb);
1475 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
1476 remainder->bk = remainder->fd = unsorted_chunks(av);
1477 set_head(victim, nb | PREV_INUSE);
1478 set_head(remainder, remainder_size | PREV_INUSE);
1479 set_foot(remainder, remainder_size);
1480 check_malloced_chunk(victim, nb);
1481 return chunk2mem(victim);
1482 }
1483 }
1484 }
1485 }
1486
1487 /*
1488 Search for a chunk by scanning bins, starting with next largest
1489 bin. This search is strictly by best-fit; i.e., the smallest
1490 (with ties going to approximately the least recently used) chunk
1491 that fits is selected.
1492
1493 The bitmap avoids needing to check that most blocks are nonempty.
1494 */
1495
1496 ++idx;
1497 bin = bin_at(av,idx);
1498 block = idx2block(idx);
1499 map = av->binmap[block];
1500 bit = idx2bit(idx);
1501
1502 for (;;) {
1503
1504 /* Skip rest of block if there are no more set bits in this block. */
1505 if (bit > map || bit == 0) {
1506 do {
1507 if (++block >= BINMAPSIZE) /* out of bins */
1508 goto use_top;
1509 } while ( (map = av->binmap[block]) == 0);
1510
1511 bin = bin_at(av, (block << BINMAPSHIFT));
1512 bit = 1;
1513 }
1514
1515 /* Advance to bin with set bit. There must be one. */
1516 while ((bit & map) == 0) {
1517 bin = next_bin(bin);
1518 bit <<= 1;
1519 assert(bit != 0);
1520 }
1521
1522 /* Inspect the bin. It is likely to be non-empty */
1523 victim = last(bin);
1524
1525 /* If a false alarm (empty bin), clear the bit. */
1526 if (victim == bin) {
1527 av->binmap[block] = map &= ~bit; /* Write through */
1528 bin = next_bin(bin);
1529 bit <<= 1;
1530 }
1531
1532 else {
1533 size = chunksize(victim);
1534
1535 /* We know the first chunk in this bin is big enough to use. */
1536 assert((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb));
1537
1538 remainder_size = size - nb;
1539
1540 /* dl_unlink */
1541 bck = victim->bk;
1542 bin->bk = bck;
1543 bck->fd = bin;
1544
1545 /* Exhaust */
1546 if (remainder_size < MINSIZE) {
1547 set_inuse_bit_at_offset(victim, size);
1548 check_malloced_chunk(victim, nb);
1549 return chunk2mem(victim);
1550 }
1551
1552 /* Split */
1553 else {
1554 remainder = chunk_at_offset(victim, nb);
1555
1556 unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
1557 remainder->bk = remainder->fd = unsorted_chunks(av);
1558 /* advertise as last remainder */
1559 if (in_smallbin_range(nb))
1560 av->last_remainder = remainder;
1561
1562 set_head(victim, nb | PREV_INUSE);
1563 set_head(remainder, remainder_size | PREV_INUSE);
1564 set_foot(remainder, remainder_size);
1565 check_malloced_chunk(victim, nb);
1566 return chunk2mem(victim);
1567 }
1568 }
1569 }
1570
1571 use_top:
1572 /*
1573 If large enough, split off the chunk bordering the end of memory
1574 (held in av->top). Note that this is in accord with the best-fit
1575 search rule. In effect, av->top is treated as larger (and thus
1576 less well fitting) than any other available chunk since it can
1577 be extended to be as large as necessary (up to system
1578 limitations).
1579
1580 We require that av->top always exists (i.e., has size >=
1581 MINSIZE) after initialization, so if it would otherwise be
1582 exhuasted by current request, it is replenished. (The main
1583 reason for ensuring it exists is that we may need MINSIZE space
1584 to put in fenceposts in sysmalloc.)
1585 */
1586
1587 victim = av->top;
1588 size = chunksize(victim);
1589
1590 if ((CHUNK_SIZE_T)(size) >= (CHUNK_SIZE_T)(nb + MINSIZE)) {
1591 remainder_size = size - nb;
1592 remainder = chunk_at_offset(victim, nb);
1593 av->top = remainder;
1594 set_head(victim, nb | PREV_INUSE);
1595 set_head(remainder, remainder_size | PREV_INUSE);
1596
1597 check_malloced_chunk(victim, nb);
1598 return chunk2mem(victim);
1599 }
1600
1601 /*
1602 If no space in top, relay to handle system-dependent cases
1603 */
1604 return sYSMALLOc(nb, av);
1605}
1606
1607/*
1608 ------------------------------ free ------------------------------
1609*/
1610
1611#if __STD_C
1612void fREe(Void_t* mem)
1613#else
1614void fREe(mem) Void_t* mem;
1615#endif
1616{
1617 mstate av = get_malloc_state();
1618
1619 mchunkptr p; /* chunk corresponding to mem */
1620 INTERNAL_SIZE_T size; /* its size */
1621 mfastbinptr* fb; /* associated fastbin */
1622 mchunkptr nextchunk; /* next contiguous chunk */
1623 INTERNAL_SIZE_T nextsize; /* its size */
1624 int nextinuse; /* true if nextchunk is used */
1625 INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
1626 mchunkptr bck; /* misc temp for linking */
1627 mchunkptr fwd; /* misc temp for linking */
1628
1629 /* free(0) has no effect */
1630 if (mem != 0) {
1631 p = mem2chunk(mem);
1632 size = chunksize(p);
1633
1634 check_inuse_chunk(p);
1635
1636 /*
1637 If eligible, place chunk on a fastbin so it can be found
1638 and used quickly in malloc.
1639 */
1640
1641 if ((CHUNK_SIZE_T)(size) <= (CHUNK_SIZE_T)(av->max_fast)
1642
1643#if TRIM_FASTBINS
1644 /*
1645 If TRIM_FASTBINS set, don't place chunks
1646 bordering top into fastbins
1647 */
1648 && (chunk_at_offset(p, size) != av->top)
1649#endif
1650 ) {
1651
1652 set_fastchunks(av);
1653 fb = &(av->fastbins[fastbin_index(size)]);
1654 p->fd = *fb;
1655 *fb = p;
1656 }
1657
1658 /*
1659 Consolidate other non-mmapped chunks as they arrive.
1660 */
1661
1662 else if (!chunk_is_mmapped(p)) {
1663 set_anychunks(av);
1664
1665 nextchunk = chunk_at_offset(p, size);
1666 nextsize = chunksize(nextchunk);
1667
1668 /* consolidate backward */
1669 if (!prev_inuse(p)) {
1670 prevsize = p->prev_size;
1671 size += prevsize;
1672 p = chunk_at_offset(p, -((long) prevsize));
1673 dl_unlink(p, bck, fwd);
1674 }
1675
1676 if (nextchunk != av->top) {
1677 /* get and clear inuse bit */
1678 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
1679 set_head(nextchunk, nextsize);
1680
1681 /* consolidate forward */
1682 if (!nextinuse) {
1683 dl_unlink(nextchunk, bck, fwd);
1684 size += nextsize;
1685 }
1686
1687 /*
1688 Place the chunk in unsorted chunk list. Chunks are
1689 not placed into regular bins until after they have
1690 been given one chance to be used in malloc.
1691 */
1692
1693 bck = unsorted_chunks(av);
1694 fwd = bck->fd;
1695 p->bk = bck;
1696 p->fd = fwd;
1697 bck->fd = p;
1698 fwd->bk = p;
1699
1700 set_head(p, size | PREV_INUSE);
1701 set_foot(p, size);
1702
1703 check_free_chunk(p);
1704 }
1705
1706 /*
1707 If the chunk borders the current high end of memory,
1708 consolidate into top
1709 */
1710
1711 else {
1712 size += nextsize;
1713 set_head(p, size | PREV_INUSE);
1714 av->top = p;
1715 check_chunk(p);
1716 }
1717
1718 /*
1719 If freeing a large space, consolidate possibly-surrounding
1720 chunks. Then, if the total unused topmost memory exceeds trim
1721 threshold, ask malloc_trim to reduce top.
1722
1723 Unless max_fast is 0, we don't know if there are fastbins
1724 bordering top, so we cannot tell for sure whether threshold
1725 has been reached unless fastbins are consolidated. But we
1726 don't want to consolidate on each free. As a compromise,
1727 consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
1728 is reached.
1729 */
1730
1731 if ((CHUNK_SIZE_T)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
1732 if (have_fastchunks(av))
1733 malloc_consolidate(av);
1734
1735#ifndef MORECORE_CANNOT_TRIM
1736 if ((CHUNK_SIZE_T)(chunksize(av->top)) >=
1737 (CHUNK_SIZE_T)(av->trim_threshold))
1738 sYSTRIm(av->top_pad, av);
1739#endif
1740 }
1741
1742 }
1743 /*
1744 If the chunk was allocated via mmap, release via munmap()
1745 Note that if HAVE_MMAP is false but chunk_is_mmapped is
1746 true, then user must have overwritten memory. There's nothing
1747 we can do to catch this error unless DEBUG is set, in which case
1748 check_inuse_chunk (above) will have triggered error.
1749 */
1750
1751 else {
1752 }
1753 }
1754}
1755
1756/*
1757 ------------------------- malloc_consolidate -------------------------
1758
1759 malloc_consolidate is a specialized version of free() that tears
1760 down chunks held in fastbins. Free itself cannot be used for this
1761 purpose since, among other things, it might place chunks back onto
1762 fastbins. So, instead, we need to use a minor variant of the same
1763 code.
1764
1765 Also, because this routine needs to be called the first time through
1766 malloc anyway, it turns out to be the perfect place to trigger
1767 initialization code.
1768*/
1769
1770#if __STD_C
1771static void malloc_consolidate(mstate av)
1772#else
1773static void malloc_consolidate(av) mstate av;
1774#endif
1775{
1776 mfastbinptr* fb; /* current fastbin being consolidated */
1777 mfastbinptr* maxfb; /* last fastbin (for loop control) */
1778 mchunkptr p; /* current chunk being consolidated */
1779 mchunkptr nextp; /* next chunk to consolidate */
1780 mchunkptr unsorted_bin; /* bin header */
1781 mchunkptr first_unsorted; /* chunk to link to */
1782
1783 /* These have same use as in free() */
1784 mchunkptr nextchunk;
1785 INTERNAL_SIZE_T size;
1786 INTERNAL_SIZE_T nextsize;
1787 INTERNAL_SIZE_T prevsize;
1788 int nextinuse;
1789 mchunkptr bck;
1790 mchunkptr fwd;
1791
1792 /*
1793 If max_fast is 0, we know that av hasn't
1794 yet been initialized, in which case do so below
1795 */
1796
1797 if (av->max_fast != 0) {
1798 clear_fastchunks(av);
1799
1800 unsorted_bin = unsorted_chunks(av);
1801
1802 /*
1803 Remove each chunk from fast bin and consolidate it, placing it
1804 then in unsorted bin. Among other reasons for doing this,
1805 placing in unsorted bin avoids needing to calculate actual bins
1806 until malloc is sure that chunks aren't immediately going to be
1807 reused anyway.
1808 */
1809
1810 maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
1811 fb = &(av->fastbins[0]);
1812 do {
1813 if ( (p = *fb) != 0) {
1814 *fb = 0;
1815
1816 do {
1817 check_inuse_chunk(p);
1818 nextp = p->fd;
1819
1820 /* Slightly streamlined version of consolidation code in free() */
1821 size = p->size & ~PREV_INUSE;
1822 nextchunk = chunk_at_offset(p, size);
1823 nextsize = chunksize(nextchunk);
1824
1825 if (!prev_inuse(p)) {
1826 prevsize = p->prev_size;
1827 size += prevsize;
1828 p = chunk_at_offset(p, -((long) prevsize));
1829 dl_unlink(p, bck, fwd);
1830 }
1831
1832 if (nextchunk != av->top) {
1833 nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
1834 set_head(nextchunk, nextsize);
1835
1836 if (!nextinuse) {
1837 size += nextsize;
1838 dl_unlink(nextchunk, bck, fwd);
1839 }
1840
1841 first_unsorted = unsorted_bin->fd;
1842 unsorted_bin->fd = p;
1843 first_unsorted->bk = p;
1844
1845 set_head(p, size | PREV_INUSE);
1846 p->bk = unsorted_bin;
1847 p->fd = first_unsorted;
1848 set_foot(p, size);
1849 }
1850
1851 else {
1852 size += nextsize;
1853 set_head(p, size | PREV_INUSE);
1854 av->top = p;
1855 }
1856
1857 } while ( (p = nextp) != 0);
1858
1859 }
1860 } while (fb++ != maxfb);
1861 }
1862 else {
1863 malloc_init_state(av);
1864 check_malloc_state();
1865 }
1866}
1867
1868/*
1869 ------------------------------ realloc ------------------------------
1870*/
1871
1872
1873#if __STD_C
1874Void_t* rEALLOc(Void_t* oldmem, size_t bytes)
1875#else
1876Void_t* rEALLOc(oldmem, bytes) Void_t* oldmem; size_t bytes;
1877#endif
1878{
1879 mstate av = get_malloc_state();
1880
1881 INTERNAL_SIZE_T nb; /* padded request size */
1882
1883 mchunkptr oldp; /* chunk corresponding to oldmem */
1884 INTERNAL_SIZE_T oldsize; /* its size */
1885
1886 mchunkptr newp; /* chunk to return */
1887 INTERNAL_SIZE_T newsize; /* its size */
1888 Void_t* newmem; /* corresponding user mem */
1889
1890 mchunkptr next; /* next contiguous chunk after oldp */
1891
1892 mchunkptr remainder; /* extra space at end of newp */
1893 CHUNK_SIZE_T remainder_size; /* its size */
1894
1895 mchunkptr bck; /* misc temp for linking */
1896 mchunkptr fwd; /* misc temp for linking */
1897
1898 CHUNK_SIZE_T copysize; /* bytes to copy */
1899 unsigned int ncopies; /* INTERNAL_SIZE_T words to copy */
1900 INTERNAL_SIZE_T* s; /* copy source */
1901 INTERNAL_SIZE_T* d; /* copy destination */
1902
1903
1904#ifdef REALLOC_ZERO_BYTES_FREES
1905 if (bytes == 0) {
1906 fREe(oldmem);
1907 return 0;
1908 }
1909#endif
1910
1911 /* realloc of null is supposed to be same as malloc */
1912 if (oldmem == 0) return mALLOc(bytes);
1913
1914 checked_request2size(bytes, nb);
1915
1916 oldp = mem2chunk(oldmem);
1917 oldsize = chunksize(oldp);
1918
1919 check_inuse_chunk(oldp);
1920
1921 if (!chunk_is_mmapped(oldp)) {
1922
1923 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb)) {
1924 /* already big enough; split below */
1925 newp = oldp;
1926 newsize = oldsize;
1927 }
1928
1929 else {
1930 next = chunk_at_offset(oldp, oldsize);
1931
1932 /* Try to expand forward into top */
1933 if (next == av->top &&
1934 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
1935 (CHUNK_SIZE_T)(nb + MINSIZE)) {
1936 set_head_size(oldp, nb);
1937 av->top = chunk_at_offset(oldp, nb);
1938 set_head(av->top, (newsize - nb) | PREV_INUSE);
1939 return chunk2mem(oldp);
1940 }
1941
1942 /* Try to expand forward into next chunk; split off remainder below */
1943 else if (next != av->top &&
1944 !inuse(next) &&
1945 (CHUNK_SIZE_T)(newsize = oldsize + chunksize(next)) >=
1946 (CHUNK_SIZE_T)(nb)) {
1947 newp = oldp;
1948 dl_unlink(next, bck, fwd);
1949 }
1950
1951 /* allocate, copy, free */
1952 else {
1953 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
1954 if (newmem == 0)
1955 return 0; /* propagate failure */
1956
1957 newp = mem2chunk(newmem);
1958 newsize = chunksize(newp);
1959
1960 /*
1961 Avoid copy if newp is next chunk after oldp.
1962 */
1963 if (newp == next) {
1964 newsize += oldsize;
1965 newp = oldp;
1966 }
1967 else {
1968 /*
1969 Unroll copy of <= 36 bytes (72 if 8byte sizes)
1970 We know that contents have an odd number of
1971 INTERNAL_SIZE_T-sized words; minimally 3.
1972 */
1973
1974 copysize = oldsize - SIZE_SZ;
1975 s = (INTERNAL_SIZE_T*)(oldmem);
1976 d = (INTERNAL_SIZE_T*)(newmem);
1977 ncopies = copysize / sizeof(INTERNAL_SIZE_T);
1978 assert(ncopies >= 3);
1979
1980 if (ncopies > 9)
1981 memcpy(d, s, copysize);
1982
1983 else {
1984 *(d+0) = *(s+0);
1985 *(d+1) = *(s+1);
1986 *(d+2) = *(s+2);
1987 if (ncopies > 4) {
1988 *(d+3) = *(s+3);
1989 *(d+4) = *(s+4);
1990 if (ncopies > 6) {
1991 *(d+5) = *(s+5);
1992 *(d+6) = *(s+6);
1993 if (ncopies > 8) {
1994 *(d+7) = *(s+7);
1995 *(d+8) = *(s+8);
1996 }
1997 }
1998 }
1999 }
2000
2001 fREe(oldmem);
2002 check_inuse_chunk(newp);
2003 return chunk2mem(newp);
2004 }
2005 }
2006 }
2007
2008 /* If possible, free extra space in old or extended chunk */
2009
2010 assert((CHUNK_SIZE_T)(newsize) >= (CHUNK_SIZE_T)(nb));
2011
2012 remainder_size = newsize - nb;
2013
2014 if (remainder_size < MINSIZE) { /* not enough extra to split off */
2015 set_head_size(newp, newsize);
2016 set_inuse_bit_at_offset(newp, newsize);
2017 }
2018 else { /* split remainder */
2019 remainder = chunk_at_offset(newp, nb);
2020 set_head_size(newp, nb);
2021 set_head(remainder, remainder_size | PREV_INUSE);
2022 /* Mark remainder as inuse so free() won't complain */
2023 set_inuse_bit_at_offset(remainder, remainder_size);
2024 fREe(chunk2mem(remainder));
2025 }
2026
2027 check_inuse_chunk(newp);
2028 return chunk2mem(newp);
2029 }
2030
2031 /*
2032 Handle mmap cases
2033 */
2034
2035 else {
2036#if HAVE_MMAP
2037
2038#if HAVE_MREMAP
2039 INTERNAL_SIZE_T offset = oldp->prev_size;
2040 size_t pagemask = av->pagesize - 1;
2041 char *cp;
2042 CHUNK_SIZE_T sum;
2043
2044 /* Note the extra SIZE_SZ overhead */
2045 newsize = (nb + offset + SIZE_SZ + pagemask) & ~pagemask;
2046
2047 /* don't need to remap if still within same page */
2048 if (oldsize == newsize - offset)
2049 return oldmem;
2050
2051 cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
2052
2053 if (cp != (char*)MORECORE_FAILURE) {
2054
2055 newp = (mchunkptr)(cp + offset);
2056 set_head(newp, (newsize - offset)|IS_MMAPPED);
2057
2058 assert(aligned_OK(chunk2mem(newp)));
2059 assert((newp->prev_size == offset));
2060
2061 /* update statistics */
2062 sum = av->mmapped_mem += newsize - oldsize;
2063 if (sum > (CHUNK_SIZE_T)(av->max_mmapped_mem))
2064 av->max_mmapped_mem = sum;
2065 sum += av->sbrked_mem;
2066 if (sum > (CHUNK_SIZE_T)(av->max_total_mem))
2067 av->max_total_mem = sum;
2068
2069 return chunk2mem(newp);
2070 }
2071#endif
2072
2073 /* Note the extra SIZE_SZ overhead. */
2074 if ((CHUNK_SIZE_T)(oldsize) >= (CHUNK_SIZE_T)(nb + SIZE_SZ))
2075 newmem = oldmem; /* do nothing */
2076 else {
2077 /* Must alloc, copy, free. */
2078 newmem = mALLOc(nb - MALLOC_ALIGN_MASK);
2079 if (newmem != 0) {
2080 memcpy(newmem, oldmem, oldsize - 2*SIZE_SZ);
2081 fREe(oldmem);
2082 }
2083 }
2084 return newmem;
2085
2086#else
2087 /* If !HAVE_MMAP, but chunk_is_mmapped, user must have overwritten mem */
2088 check_malloc_state();
2089 MALLOC_FAILURE_ACTION;
2090 return 0;
2091#endif
2092 }
2093}
2094
2095/*
2096 ------------------------------ memalign ------------------------------
2097*/
2098
2099#if __STD_C
2100Void_t* mEMALIGn(size_t alignment, size_t bytes)
2101#else
2102Void_t* mEMALIGn(alignment, bytes) size_t alignment; size_t bytes;
2103#endif
2104{
2105 INTERNAL_SIZE_T nb; /* padded request size */
2106 char* m; /* memory returned by malloc call */
2107 mchunkptr p; /* corresponding chunk */
2108 char* brk; /* alignment point within p */
2109 mchunkptr newp; /* chunk to return */
2110 INTERNAL_SIZE_T newsize; /* its size */
2111 INTERNAL_SIZE_T leadsize; /* leading space before alignment point */
2112 mchunkptr remainder; /* spare room at end to split off */
2113 CHUNK_SIZE_T remainder_size; /* its size */
2114 INTERNAL_SIZE_T size;
2115
2116 /* If need less alignment than we give anyway, just relay to malloc */
2117
2118 if (alignment <= MALLOC_ALIGNMENT) return mALLOc(bytes);
2119
2120 /* Otherwise, ensure that it is at least a minimum chunk size */
2121
2122 if (alignment < MINSIZE) alignment = MINSIZE;
2123
2124 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
2125 if ((alignment & (alignment - 1)) != 0) {
2126 size_t a = MALLOC_ALIGNMENT * 2;
2127 while ((CHUNK_SIZE_T)a < (CHUNK_SIZE_T)alignment) a <<= 1;
2128 alignment = a;
2129 }
2130
2131 checked_request2size(bytes, nb);
2132
2133 /*
2134 Strategy: find a spot within that chunk that meets the alignment
2135 request, and then possibly free the leading and trailing space.
2136 */
2137
2138
2139 /* Call malloc with worst case padding to hit alignment. */
2140
2141 m = (char*)(mALLOc(nb + alignment + MINSIZE));
2142
2143 if (m == 0) return 0; /* propagate failure */
2144
2145 p = mem2chunk(m);
2146
2147 if ((((PTR_UINT)(m)) % alignment) != 0) { /* misaligned */
2148
2149 /*
2150 Find an aligned spot inside chunk. Since we need to give back
2151 leading space in a chunk of at least MINSIZE, if the first
2152 calculation places us at a spot with less than MINSIZE leader,
2153 we can move to the next aligned spot -- we've allocated enough
2154 total room so that this is always possible.
2155 */
2156
2157 brk = (char*)mem2chunk((PTR_UINT)(((PTR_UINT)(m + alignment - 1)) &
2158 -((signed long) alignment)));
2159 if ((CHUNK_SIZE_T)(brk - (char*)(p)) < MINSIZE)
2160 brk += alignment;
2161
2162 newp = (mchunkptr)brk;
2163 leadsize = brk - (char*)(p);
2164 newsize = chunksize(p) - leadsize;
2165
2166 /* For mmapped chunks, just adjust offset */
2167 if (chunk_is_mmapped(p)) {
2168 newp->prev_size = p->prev_size + leadsize;
2169 set_head(newp, newsize|IS_MMAPPED);
2170 return chunk2mem(newp);
2171 }
2172
2173 /* Otherwise, give back leader, use the rest */
2174 set_head(newp, newsize | PREV_INUSE);
2175 set_inuse_bit_at_offset(newp, newsize);
2176 set_head_size(p, leadsize);
2177 fREe(chunk2mem(p));
2178 p = newp;
2179
2180 assert (newsize >= nb &&
2181 (((PTR_UINT)(chunk2mem(p))) % alignment) == 0);
2182 }
2183
2184 /* Also give back spare room at the end */
2185 if (!chunk_is_mmapped(p)) {
2186 size = chunksize(p);
2187 if ((CHUNK_SIZE_T)(size) > (CHUNK_SIZE_T)(nb + MINSIZE)) {
2188 remainder_size = size - nb;
2189 remainder = chunk_at_offset(p, nb);
2190 set_head(remainder, remainder_size | PREV_INUSE);
2191 set_head_size(p, nb);
2192 fREe(chunk2mem(remainder));
2193 }
2194 }
2195
2196 check_inuse_chunk(p);
2197 return chunk2mem(p);
2198}
2199
2200/*
2201 ------------------------------ calloc ------------------------------
2202*/
2203
2204#if __STD_C
2205Void_t* cALLOc(size_t n_elements, size_t elem_size)
2206#else
2207Void_t* cALLOc(n_elements, elem_size) size_t n_elements; size_t elem_size;
2208#endif
2209{
2210 mchunkptr p;
2211 CHUNK_SIZE_T clearsize;
2212 CHUNK_SIZE_T nclears;
2213 INTERNAL_SIZE_T* d;
2214
2215 Void_t* mem = mALLOc(n_elements * elem_size);
2216
2217 if (mem != 0) {
2218 p = mem2chunk(mem);
2219
2220 if (!chunk_is_mmapped(p))
2221 {
2222 /*
2223 Unroll clear of <= 36 bytes (72 if 8byte sizes)
2224 We know that contents have an odd number of
2225 INTERNAL_SIZE_T-sized words; minimally 3.
2226 */
2227
2228 d = (INTERNAL_SIZE_T*)mem;
2229 clearsize = chunksize(p) - SIZE_SZ;
2230 nclears = clearsize / sizeof(INTERNAL_SIZE_T);
2231 assert(nclears >= 3);
2232
2233 if (nclears > 9)
2234 memset(d, 0, clearsize);
2235
2236 else {
2237 *(d+0) = 0;
2238 *(d+1) = 0;
2239 *(d+2) = 0;
2240 if (nclears > 4) {
2241 *(d+3) = 0;
2242 *(d+4) = 0;
2243 if (nclears > 6) {
2244 *(d+5) = 0;
2245 *(d+6) = 0;
2246 if (nclears > 8) {
2247 *(d+7) = 0;
2248 *(d+8) = 0;
2249 }
2250 }
2251 }
2252 }
2253 }
2254 }
2255 return mem;
2256}
2257
2258/*
2259 ------------------------------ cfree ------------------------------
2260*/
2261
2262#if __STD_C
2263void cFREe(Void_t *mem)
2264#else
2265void cFREe(mem) Void_t *mem;
2266#endif
2267{
2268 fREe(mem);
2269}
2270
2271/*
2272 ------------------------- independent_calloc -------------------------
2273*/
2274
2275#if __STD_C
2276Void_t** iCALLOc(size_t n_elements, size_t elem_size, Void_t* chunks[])
2277#else
2278Void_t** iCALLOc(n_elements, elem_size, chunks) size_t n_elements; size_t elem_size; Void_t* chunks[];
2279#endif
2280{
2281 size_t sz = elem_size; /* serves as 1-element array */
2282 /* opts arg of 3 means all elements are same size, and should be cleared */
2283 return iALLOc(n_elements, &sz, 3, chunks);
2284}
2285
2286/*
2287 ------------------------- independent_comalloc -------------------------
2288*/
2289
2290#if __STD_C
2291Void_t** iCOMALLOc(size_t n_elements, size_t sizes[], Void_t* chunks[])
2292#else
2293Void_t** iCOMALLOc(n_elements, sizes, chunks) size_t n_elements; size_t sizes[]; Void_t* chunks[];
2294#endif
2295{
2296 return iALLOc(n_elements, sizes, 0, chunks);
2297}
2298
2299
2300/*
2301 ------------------------------ ialloc ------------------------------
2302 ialloc provides common support for independent_X routines, handling all of
2303 the combinations that can result.
2304
2305 The opts arg has:
2306 bit 0 set if all elements are same size (using sizes[0])
2307 bit 1 set if elements should be zeroed
2308*/
2309
2310
2311#if __STD_C
2312static Void_t** iALLOc(size_t n_elements,
2313 size_t* sizes,
2314 int opts,
2315 Void_t* chunks[])
2316#else
2317static Void_t** iALLOc(n_elements, sizes, opts, chunks) size_t n_elements; size_t* sizes; int opts; Void_t* chunks[];
2318#endif
2319{
2320 mstate av = get_malloc_state();
2321 INTERNAL_SIZE_T element_size; /* chunksize of each element, if all same */
2322 INTERNAL_SIZE_T contents_size; /* total size of elements */
2323 INTERNAL_SIZE_T array_size; /* request size of pointer array */
2324 Void_t* mem; /* malloced aggregate space */
2325 mchunkptr p; /* corresponding chunk */
2326 INTERNAL_SIZE_T remainder_size; /* remaining bytes while splitting */
2327 Void_t** marray; /* either "chunks" or malloced ptr array */
2328 mchunkptr array_chunk; /* chunk for malloced ptr array */
2329 INTERNAL_SIZE_T size;
2330 size_t i;
2331
2332 /* Ensure initialization */
2333 if (av->max_fast == 0) malloc_consolidate(av);
2334
2335 /* compute array length, if needed */
2336 if (chunks != 0) {
2337 if (n_elements == 0)
2338 return chunks; /* nothing to do */
2339 marray = chunks;
2340 array_size = 0;
2341 }
2342 else {
2343 /* if empty req, must still return chunk representing empty array */
2344 if (n_elements == 0)
2345 return (Void_t**) mALLOc(0);
2346 marray = 0;
2347 array_size = request2size(n_elements * (sizeof(Void_t*)));
2348 }
2349
2350 /* compute total element size */
2351 if (opts & 0x1) { /* all-same-size */
2352 element_size = request2size(*sizes);
2353 contents_size = n_elements * element_size;
2354 }
2355 else { /* add up all the sizes */
2356 element_size = 0;
2357 contents_size = 0;
2358 for (i = 0; i != n_elements; ++i)
2359 contents_size += request2size(sizes[i]);
2360 }
2361
2362 /* subtract out alignment bytes from total to minimize overallocation */
2363 size = contents_size + array_size - MALLOC_ALIGN_MASK;
2364
2365 /*
2366 Allocate the aggregate chunk.
2367 But first disable mmap so malloc won't use it, since
2368 we would not be able to later free/realloc space internal
2369 to a segregated mmap region.
2370 */
2371 mem = mALLOc(size);
2372 if (mem == 0)
2373 return 0;
2374
2375 p = mem2chunk(mem);
2376 assert(!chunk_is_mmapped(p));
2377 remainder_size = chunksize(p);
2378
2379 if (opts & 0x2) { /* optionally clear the elements */
2380 memset(mem, 0, remainder_size - SIZE_SZ - array_size);
2381 }
2382
2383 /* If not provided, allocate the pointer array as final part of chunk */
2384 if (marray == 0) {
2385 array_chunk = chunk_at_offset(p, contents_size);
2386 marray = (Void_t**) (chunk2mem(array_chunk));
2387 set_head(array_chunk, (remainder_size - contents_size) | PREV_INUSE);
2388 remainder_size = contents_size;
2389 }
2390
2391 /* split out elements */
2392 for (i = 0; ; ++i) {
2393 marray[i] = chunk2mem(p);
2394 if (i != n_elements-1) {
2395 if (element_size != 0)
2396 size = element_size;
2397 else
2398 size = request2size(sizes[i]);
2399 remainder_size -= size;
2400 set_head(p, size | PREV_INUSE);
2401 p = chunk_at_offset(p, size);
2402 }
2403 else { /* the final element absorbs any overallocation slop */
2404 set_head(p, remainder_size | PREV_INUSE);
2405 break;
2406 }
2407 }
2408
2409#if DEBUG_DLMALLOC
2410 if (marray != chunks) {
2411 /* final element must have exactly exhausted chunk */
2412 if (element_size != 0)
2413 assert(remainder_size == element_size);
2414 else
2415 assert(remainder_size == request2size(sizes[i]));
2416 check_inuse_chunk(mem2chunk(marray));
2417 }
2418
2419 for (i = 0; i != n_elements; ++i)
2420 check_inuse_chunk(mem2chunk(marray[i]));
2421#endif
2422
2423 return marray;
2424}
2425
2426
2427/*
2428 ------------------------------ valloc ------------------------------
2429*/
2430
2431#if __STD_C
2432Void_t* vALLOc(size_t bytes)
2433#else
2434Void_t* vALLOc(bytes) size_t bytes;
2435#endif
2436{
2437 /* Ensure initialization */
2438 mstate av = get_malloc_state();
2439 if (av->max_fast == 0) malloc_consolidate(av);
2440 return mEMALIGn(av->pagesize, bytes);
2441}
2442
2443/*
2444 ------------------------------ pvalloc ------------------------------
2445*/
2446
2447
2448#if __STD_C
2449Void_t* pVALLOc(size_t bytes)
2450#else
2451Void_t* pVALLOc(bytes) size_t bytes;
2452#endif
2453{
2454 mstate av = get_malloc_state();
2455 size_t pagesz;
2456
2457 /* Ensure initialization */
2458 if (av->max_fast == 0) malloc_consolidate(av);
2459 pagesz = av->pagesize;
2460 return mEMALIGn(pagesz, (bytes + pagesz - 1) & ~(pagesz - 1));
2461}
2462
2463
2464/*
2465 ------------------------------ malloc_trim ------------------------------
2466*/
2467
2468#if __STD_C
2469int mTRIm(size_t pad)
2470#else
2471int mTRIm(pad) size_t pad;
2472#endif
2473{
2474 mstate av = get_malloc_state();
2475 /* Ensure initialization/consolidation */
2476 malloc_consolidate(av);
2477
2478#ifndef MORECORE_CANNOT_TRIM
2479 return sYSTRIm(pad, av);
2480#else
2481 return 0;
2482#endif
2483}
2484
2485
2486/*
2487 ------------------------- malloc_usable_size -------------------------
2488*/
2489
2490#if __STD_C
2491size_t mUSABLe(Void_t* mem)
2492#else
2493size_t mUSABLe(mem) Void_t* mem;
2494#endif
2495{
2496 mchunkptr p;
2497 if (mem != 0) {
2498 p = mem2chunk(mem);
2499 if (chunk_is_mmapped(p))
2500 return chunksize(p) - 2*SIZE_SZ;
2501 else if (inuse(p))
2502 return chunksize(p) - SIZE_SZ;
2503 }
2504 return 0;
2505}
2506
2507/*
2508 ------------------------------ mallinfo ------------------------------
2509*/
2510
2511struct mallinfo mALLINFo()
2512{
2513 mstate av = get_malloc_state();
2514 struct mallinfo mi;
2515 int i;
2516 mbinptr b;
2517 mchunkptr p;
2518 INTERNAL_SIZE_T avail;
2519 INTERNAL_SIZE_T fastavail;
2520 int nblocks;
2521 int nfastblocks;
2522
2523 /* Ensure initialization */
2524 if (av->top == 0) malloc_consolidate(av);
2525
2526 check_malloc_state();
2527
2528 /* Account for top */
2529 avail = chunksize(av->top);
2530 nblocks = 1; /* top always exists */
2531
2532 /* traverse fastbins */
2533 nfastblocks = 0;
2534 fastavail = 0;
2535
2536 for (i = 0; i < NFASTBINS; ++i) {
2537 for (p = av->fastbins[i]; p != 0; p = p->fd) {
2538 ++nfastblocks;
2539 fastavail += chunksize(p);
2540 }
2541 }
2542
2543 avail += fastavail;
2544
2545 /* traverse regular bins */
2546 for (i = 1; i < NBINS; ++i) {
2547 b = bin_at(av, i);
2548 for (p = last(b); p != b; p = p->bk) {
2549 ++nblocks;
2550 avail += chunksize(p);
2551 }
2552 }
2553
2554 mi.smblks = nfastblocks;
2555 mi.ordblks = nblocks;
2556 mi.fordblks = avail;
2557 mi.uordblks = av->sbrked_mem - avail;
2558 mi.arena = av->sbrked_mem;
2559 mi.fsmblks = fastavail;
2560 mi.keepcost = chunksize(av->top);
2561 mi.usmblks = av->max_total_mem;
2562 /* YAP doesn't have special mmapped regions */
2563 mi.hblkhd = 0L;
2564 mi.hblks = 0L;
2565 return mi;
2566}
2567
2568/*
2569 ------------------------------ malloc_stats ------------------------------
2570*/
2571
2572UInt
2573Yap_givemallinfo(void)
2574{
2575 struct mallinfo mi = mALLINFo();
2576 return mi.uordblks;
2577}
2578
2579
2580void mSTATs(void)
2581{
2582 struct mallinfo mi = mALLINFo();
2583
2584
2585
2586 fprintf(stderr, "max system bytes = %10lu\n",
2587 (CHUNK_SIZE_T)(mi.usmblks));
2588 fprintf(stderr, "system bytes = %10lu\n",
2589 (CHUNK_SIZE_T)(mi.arena + mi.hblkhd));
2590 fprintf(stderr, "in use bytes = %10lu\n",
2591 (CHUNK_SIZE_T)(mi.uordblks + mi.hblkhd));
2592
2593}
2594
2595
2596/*
2597 ------------------------------ mallopt ------------------------------
2598*/
2599
2600#if __STD_C
2601int mALLOPt(int param_number, int value)
2602#else
2603int mALLOPt(param_number, value) int param_number; int value;
2604#endif
2605{
2606 mstate av = get_malloc_state();
2607 /* Ensure initialization/consolidation */
2608 malloc_consolidate(av);
2609
2610 switch(param_number) {
2611 case M_MXFAST:
2612 if (value >= 0 && value <= MAX_FAST_SIZE) {
2613 set_max_fast(av, value);
2614 return 1;
2615 }
2616 else
2617 return 0;
2618
2619 case M_TRIM_THRESHOLD:
2620 av->trim_threshold = value;
2621 return 1;
2622
2623 case M_TOP_PAD:
2624 av->top_pad = value;
2625 return 1;
2626
2627 default:
2628 return 0;
2629 }
2630}
2631
2632/*
2633 -------------------- Alternative MORECORE functions --------------------
2634*/
2635
2636
2637/*
2638 General Requirements for MORECORE.
2639
2640 The MORECORE function must have the following properties:
2641
2642 If MORECORE_CONTIGUOUS is false:
2643
2644 * MORECORE must allocate in multiples of pagesize. It will
2645 only be called with arguments that are multiples of pagesize.
2646
2647 * MORECORE(0) must return an address that is at least
2648 MALLOC_ALIGNMENT aligned. (Page-aligning always suffices.)
2649
2650 else (i.e. If MORECORE_CONTIGUOUS is true):
2651
2652 * Consecutive calls to MORECORE with positive arguments
2653 return increasing addresses, indicating that space has been
2654 contiguously extended.
2655
2656 * MORECORE need not allocate in multiples of pagesize.
2657 Calls to MORECORE need not have args of multiples of pagesize.
2658
2659 * MORECORE need not page-align.
2660
2661 In either case:
2662
2663 * MORECORE may allocate more memory than requested. (Or even less,
2664 but this will generally result in a malloc failure.)
2665
2666 * MORECORE must not allocate memory when given argument zero, but
2667 instead return one past the end address of memory from previous
2668 nonzero call. This malloc does NOT call MORECORE(0)
2669 until at least one call with positive arguments is made, so
2670 the initial value returned is not important.
2671
2672 * Even though consecutive calls to MORECORE need not return contiguous
2673 addresses, it must be OK for malloc'ed chunks to span multiple
2674 regions in those cases where they do happen to be contiguous.
2675
2676 * MORECORE need not handle negative arguments -- it may instead
2677 just return MORECORE_FAILURE when given negative arguments.
2678 Negative arguments are always multiples of pagesize. MORECORE
2679 must not misinterpret negative args as large positive unsigned
2680 args. You can suppress all such calls from even occurring by defining
2681 MORECORE_CANNOT_TRIM,
2682
2683 There is some variation across systems about the type of the
2684 argument to sbrk/MORECORE. If size_t is unsigned, then it cannot
2685 actually be size_t, because sbrk supports negative args, so it is
2686 normally the signed type of the same width as size_t (sometimes
2687 declared as "intptr_t", and sometimes "ptrdiff_t"). It doesn't much
2688 matter though. Internally, we use "long" as arguments, which should
2689 work across all reasonable possibilities.
2690
2691 Additionally, if MORECORE ever returns failure for a positive
2692 request, and HAVE_MMAP is true, then mmap is used as a noncontiguous
2693 system allocator. This is a useful backup strategy for systems with
2694 holes in address spaces -- in this case sbrk cannot contiguously
2695 expand the heap, but mmap may be able to map noncontiguous space.
2696
2697 If you'd like mmap to ALWAYS be used, you can define MORECORE to be
2698 a function that always returns MORECORE_FAILURE.
2699
2700 Malloc only has limited ability to detect failures of MORECORE
2701 to supply contiguous space when it says it can. In particular,
2702 multithreaded programs that do not use locks may result in
2703 rece conditions across calls to MORECORE that result in gaps
2704 that cannot be detected as such, and subsequent corruption.
2705
2706 If you are using this malloc with something other than sbrk (or its
2707 emulation) to supply memory regions, you probably want to set
2708 MORECORE_CONTIGUOUS as false. As an example, here is a custom
2709 allocator kindly contributed for pre-OSX macOS. It uses virtually
2710 but not necessarily physically contiguous non-paged memory (locked
2711 in, present and won't get swapped out). You can use it by
2712 uncommenting this section, adding some #includes, and setting up the
2713 appropriate defines above:
2714
2715 #define MORECORE osMoreCore
2716 #define MORECORE_CONTIGUOUS 0
2717
2718 There is also a shutdown routine that should somehow be called for
2719 cleanup upon program exit.
2720
2721 #define MAX_POOL_ENTRIES 100
2722 #define MINIMUM_MORECORE_SIZE (64 * 1024)
2723 static int next_os_pool;
2724 void *our_os_pools[MAX_POOL_ENTRIES];
2725
2726 void *osMoreCore(int size)
2727 {
2728 void *ptr = 0;
2729 static void *sbrk_top = 0;
2730
2731 if (size > 0)
2732 {
2733 if (size < MINIMUM_MORECORE_SIZE)
2734 size = MINIMUM_MORECORE_SIZE;
2735 if (CurrentExecutionLevel() == kTaskLevel)
2736 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
2737 if (ptr == 0)
2738 {
2739 return (void *) MORECORE_FAILURE;
2740 }
2741 // save ptrs so they can be freed during cleanup
2742 our_os_pools[next_os_pool] = ptr;
2743 next_os_pool++;
2744 ptr = (void *) ((((CHUNK_SIZE_T) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
2745 sbrk_top = (char *) ptr + size;
2746 return ptr;
2747 }
2748 else if (size < 0)
2749 {
2750 // we don't currently support shrink behavior
2751 return (void *) MORECORE_FAILURE;
2752 }
2753 else
2754 {
2755 return sbrk_top;
2756 }
2757 }
2758
2759 // cleanup any allocated memory pools
2760 // called as last thing before shutting down driver
2761
2762 void osCleanupMem(void)
2763 {
2764 void **ptr;
2765
2766 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
2767 if (*ptr)
2768 {
2769 PoolDeallocate(*ptr);
2770 *ptr = 0;
2771 }
2772 }
2773
2774*/
2775
2776
2777
2778/* ------------------------------------------------------------
2779History:
2780 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
2781 * Fix malloc_state bitmap array misdeclaration
2782
2783 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
2784 * Allow tuning of FIRST_SORTED_BIN_SIZE
2785 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
2786 * Better detection and support for non-contiguousness of MORECORE.
2787 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
2788 * Bypass most of malloc if no frees. Thanks To Emery Berger.
2789 * Fix freeing of old top non-contiguous chunk im sysmalloc.
2790 * Raised default trim and map thresholds to 256K.
2791 * Fix mmap-related #defines. Thanks to Lubos Lunak.
2792 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
2793 * Branch-free bin calculation
2794 * Default trim and mmap thresholds now 256K.
2795
2796 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
2797 * Introduce independent_comalloc and independent_calloc.
2798 Thanks to Michael Pachos for motivation and help.
2799 * Make optional .h file available
2800 * Allow > 2GB requests on 32bit systems.
2801 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
2802 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
2803 and Anonymous.
2804 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
2805 helping test this.)
2806 * memalign: check alignment arg
2807 * realloc: don't try to shift chunks backwards, since this
2808 leads to more fragmentation in some programs and doesn't
2809 seem to help in any others.
2810 * Collect all cases in malloc requiring system memory into sYSMALLOc
2811 * Use mmap as backup to sbrk
2812 * Place all internal state in malloc_state
2813 * Introduce fastbins (although similar to 2.5.1)
2814 * Many minor tunings and cosmetic improvements
2815 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
2816 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
2817 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
2818 * Include errno.h to support default failure action.
2819
2820 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
2821 * return null for negative arguments
2822 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
2823 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
2824 (e.g. WIN32 platforms)
2825 * Cleanup header file inclusion for WIN32 platforms
2826 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
2827 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
2828 memory allocation routines
2829 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
2830 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
2831 usage of 'assert' in non-WIN32 code
2832 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
2833 avoid infinite loop
2834 * Always call 'fREe()' rather than 'free()'
2835
2836 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
2837 * Fixed ordering problem with boundary-stamping
2838
2839 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
2840 * Added pvalloc, as recommended by H.J. Liu
2841 * Added 64bit pointer support mainly from Wolfram Gloger
2842 * Added anonymously donated WIN32 sbrk emulation
2843 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
2844 * malloc_extend_top: fix mask error that caused wastage after
2845 foreign sbrks
2846 * Add linux mremap support code from HJ Liu
2847
2848 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
2849 * Integrated most documentation with the code.
2850 * Add support for mmap, with help from
2851 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2852 * Use last_remainder in more cases.
2853 * Pack bins using idea from colin@nyx10.cs.du.edu
2854 * Use ordered bins instead of best-fit threshhold
2855 * Eliminate block-local decls to simplify tracing and debugging.
2856 * Support another case of realloc via move into top
2857 * Fix error occuring when initial sbrk_base not word-aligned.
2858 * Rely on page size for units instead of SBRK_UNIT to
2859 avoid surprises about sbrk alignment conventions.
2860 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
2861 (raymond@es.ele.tue.nl) for the suggestion.
2862 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
2863 * More precautions for cases where other routines call sbrk,
2864 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
2865 * Added macros etc., allowing use in linux libc from
2866 H.J. Lu (hjl@gnu.ai.mit.edu)
2867 * Inverted this history list
2868
2869 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
2870 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
2871 * Removed all preallocation code since under current scheme
2872 the work required to undo bad preallocations exceeds
2873 the work saved in good cases for most test programs.
2874 * No longer use return list or unconsolidated bins since
2875 no scheme using them consistently outperforms those that don't
2876 given above changes.
2877 * Use best fit for very large chunks to prevent some worst-cases.
2878 * Added some support for debugging
2879
2880 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
2881 * Removed footers when chunks are in use. Thanks to
2882 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
2883
2884 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
2885 * Added malloc_trim, with help from Wolfram Gloger
2886 (wmglo@Dent.MED.Uni-Muenchen.DE).
2887
2888 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
2889
2890 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
2891 * realloc: try to expand in both directions
2892 * malloc: swap order of clean-bin strategy;
2893 * realloc: only conditionally expand backwards
2894 * Try not to scavenge used bins
2895 * Use bin counts as a guide to preallocation
2896 * Occasionally bin return list chunks in first scan
2897 * Add a few optimizations from colin@nyx10.cs.du.edu
2898
2899 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
2900 * faster bin computation & slightly different binning
2901 * merged all consolidations to one part of malloc proper
2902 (eliminating old malloc_find_space & malloc_clean_bin)
2903 * Scan 2 returns chunks (not just 1)
2904 * Propagate failure in realloc if malloc returns 0
2905 * Add stuff to allow compilation on non-ANSI compilers
2906 from kpv@research.att.com
2907
2908 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
2909 * removed potential for odd address access in prev_chunk
2910 * removed dependency on getpagesize.h
2911 * misc cosmetics and a bit more internal documentation
2912 * anticosmetics: mangled names in macros to evade debugger strangeness
2913 * tested on sparc, hp-700, dec-mips, rs6000
2914 with gcc & native cc (hp, dec only) allowing
2915 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
2916
2917 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
2918 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
2919 structure of old version, but most details differ.)
2920
2921*/
2922
2923void
2924Yap_initdlmalloc(void)
2925{
2926 HeapTop = (ADDR)ALIGN_SIZE(HeapTop,16);
2927 Yap_NOfMemoryHoles = 0;
2928 Yap_av = (struct malloc_state *)HeapTop;
2929 memset((void *)Yap_av, 0, sizeof(struct malloc_state));
2930 HeapTop += sizeof(struct malloc_state);
2931 HeapTop = (ADDR)ALIGN_SIZE(HeapTop,2*SIZEOF_LONG_LONG_INT);
2932 HeapMax = HeapTop-Yap_HeapBase;
2933}
2934
2935void Yap_RestoreDLMalloc(void)
2936{
2937 mstate av = Yap_av;
2938 int i;
2939 mchunkptr p;
2940 mchunkptr q;
2941 mbinptr b;
2942 unsigned int binbit;
2943 int empty;
2944 unsigned int idx;
2945 INTERNAL_SIZE_T size;
2946 CHUNK_SIZE_T total = 0;
2947 int max_fast_bin;
2948
2949 /* internal size_t must be no wider than pointer type */
2950 assert(sizeof(INTERNAL_SIZE_T) <= sizeof(char*));
2951
2952 /* alignment is a power of 2 */
2953 assert((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-1)) == 0);
2954
2955 /* cannot run remaining checks until fully initialized */
2956 if (av->top == 0 || av->top == initial_top(av))
2957 return;
2958
2959 /* pagesize is a power of 2 */
2960 assert((av->pagesize & (av->pagesize-1)) == 0);
2961
2962 /* properties of fastbins */
2963
2964 /* max_fast is in allowed range */
2965 assert(get_max_fast(av) <= request2size(MAX_FAST_SIZE));
2966
2967 max_fast_bin = fastbin_index(av->max_fast);
2968
2969 if (av->top) {
2970 av->top = ChunkPtrAdjust(av->top);
2971 }
2972 if (av->last_remainder) {
2973 av->last_remainder = ChunkPtrAdjust(av->last_remainder);
2974 }
2975 for (i = 0; i < NFASTBINS; ++i) {
2976
2977 if (av->fastbins[i]) {
2978 av->fastbins[i] = ChunkPtrAdjust(av->fastbins[i]);
2979 }
2980 p = av->fastbins[i];
2981
2982 /* all bins past max_fast are empty */
2983 if (i > max_fast_bin)
2984 assert(p == 0);
2985
2986 while (p != 0) {
2987 /* each chunk claims to be inuse */
2988 check_inuse_chunk(p);
2989 total += chunksize(p);
2990 /* chunk belongs in this bin */
2991 assert(fastbin_index(chunksize(p)) == i);
2992 if (p->fd)
2993 p->fd = ChunkPtrAdjust(p->fd);
2994 if (p->bk)
2995 p->bk = ChunkPtrAdjust(p->bk);
2996 p = p->fd;
2997 }
2998 }
2999
3000 if (total != 0)
3001 assert(have_fastchunks(av));
3002 else if (!have_fastchunks(av))
3003 assert(total == 0);
3004
3005 for (i = 0; i < NBINS*2; i++) {
3006 if (av->bins[i]) {
3007 av->bins[i] = ChunkPtrAdjust(av->bins[i]);
3008 }
3009 }
3010
3011 /* check normal bins */
3012 for (i = 1; i < NBINS; ++i) {
3013 b = bin_at(av,i);
3014
3015 /* binmap is accurate (except for bin 1 == unsorted_chunks) */
3016 if (i >= 2) {
3017 binbit = get_binmap(av,i);
3018 empty = last(b) == b;
3019 if (!binbit)
3020 assert(empty);
3021 else if (!empty)
3022 assert(binbit);
3023 }
3024
3025 for (p = last(b); p != b; p = p->bk) {
3026 /* each chunk claims to be free */
3027 check_free_chunk(p);
3028 if (p->fd)
3029 p->fd = ChunkPtrAdjust(p->fd);
3030 if (p->bk)
3031 p->bk = ChunkPtrAdjust(p->bk);
3032 size = chunksize(p);
3033 total += size;
3034 if (i >= 2) {
3035 /* chunk belongs in bin */
3036 idx = bin_index(size);
3037 assert(idx == i);
3038 /* lists are sorted */
3039 if ((CHUNK_SIZE_T) size >= (CHUNK_SIZE_T)(FIRST_SORTED_BIN_SIZE)) {
3040 assert(p->bk == b ||
3041 (CHUNK_SIZE_T)chunksize(p->bk) >=
3042 (CHUNK_SIZE_T)chunksize(p));
3043 }
3044 }
3045 /* chunk is followed by a legal chain of inuse chunks */
3046 for (q = next_chunk(p);
3047 (q != av->top && inuse(q) &&
3048 (CHUNK_SIZE_T)(chunksize(q)) >= MINSIZE);
3049 q = next_chunk(q)) {
3050 check_inuse_chunk(q);
3051 }
3052 }
3053 }
3054
3055}
3056
3057
3058#endif /* USE_DL_MALLOC */
3059
Main definitions.
A matrix.
Definition: matrix.c:68