1/*
2 Simple DirectMedia Layer
3 Copyright (C) 1997-2021 Sam Lantinga <slouken@libsdl.org>
4
5 This software is provided 'as-is', without any express or implied
6 warranty. In no event will the authors be held liable for any damages
7 arising from the use of this software.
8
9 Permission is granted to anyone to use this software for any purpose,
10 including commercial applications, and to alter it and redistribute it
11 freely, subject to the following restrictions:
12
13 1. The origin of this software must not be misrepresented; you must not
14 claim that you wrote the original software. If you use this software
15 in a product, an acknowledgment in the product documentation would be
16 appreciated but is not required.
17 2. Altered source versions must be plainly marked as such, and must not be
18 misrepresented as being the original software.
19 3. This notice may not be removed or altered from any source distribution.
20*/
21
22#if defined(__clang_analyzer__) && !defined(SDL_DISABLE_ANALYZE_MACROS)
23#define SDL_DISABLE_ANALYZE_MACROS 1
24#endif
25
26#include "../SDL_internal.h"
27
28/* This file contains portable memory management functions for SDL */
29#include "SDL_stdinc.h"
30#include "SDL_atomic.h"
31#include "SDL_error.h"
32
33#ifndef HAVE_MALLOC
34#define LACKS_SYS_TYPES_H
35#define LACKS_STDIO_H
36#define LACKS_STRINGS_H
37#define LACKS_STRING_H
38#define LACKS_STDLIB_H
39#define ABORT
40#define USE_LOCKS 1
41#define USE_DL_PREFIX
42
43/*
44 This is a version (aka dlmalloc) of malloc/free/realloc written by
45 Doug Lea and released to the public domain, as explained at
46 http://creativecommons.org/licenses/publicdomain. Send questions,
47 comments, complaints, performance data, etc to dl@cs.oswego.edu
48
49* Version 2.8.3 Thu Sep 22 11:16:15 2005 Doug Lea (dl at gee)
50
51 Note: There may be an updated version of this malloc obtainable at
52 ftp://gee.cs.oswego.edu/pub/misc/malloc.c
53 Check before installing!
54
55* Quickstart
56
57 This library is all in one file to simplify the most common usage:
58 ftp it, compile it (-O3), and link it into another program. All of
59 the compile-time options default to reasonable values for use on
60 most platforms. You might later want to step through various
61 compile-time and dynamic tuning options.
62
63 For convenience, an include file for code using this malloc is at:
64 ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.3.h
65 You don't really need this .h file unless you call functions not
66 defined in your system include files. The .h file contains only the
67 excerpts from this file needed for using this malloc on ANSI C/C++
68 systems, so long as you haven't changed compile-time options about
69 naming and tuning parameters. If you do, then you can create your
70 own malloc.h that does include all settings by cutting at the point
71 indicated below. Note that you may already by default be using a C
72 library containing a malloc that is based on some version of this
73 malloc (for example in linux). You might still want to use the one
74 in this file to customize settings or to avoid overheads associated
75 with library versions.
76
77* Vital statistics:
78
79 Supported pointer/size_t representation: 4 or 8 bytes
80 size_t MUST be an unsigned type of the same width as
81 pointers. (If you are using an ancient system that declares
82 size_t as a signed type, or need it to be a different width
83 than pointers, you can use a previous release of this malloc
84 (e.g. 2.7.2) supporting these.)
85
86 Alignment: 8 bytes (default)
87 This suffices for nearly all current machines and C compilers.
88 However, you can define MALLOC_ALIGNMENT to be wider than this
89 if necessary (up to 128bytes), at the expense of using more space.
90
91 Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes)
92 8 or 16 bytes (if 8byte sizes)
93 Each malloced chunk has a hidden word of overhead holding size
94 and status information, and additional cross-check word
95 if FOOTERS is defined.
96
97 Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead)
98 8-byte ptrs: 32 bytes (including overhead)
99
100 Even a request for zero bytes (i.e., malloc(0)) returns a
101 pointer to something of the minimum allocatable size.
102 The maximum overhead wastage (i.e., number of extra bytes
103 allocated than were requested in malloc) is less than or equal
104 to the minimum size, except for requests >= mmap_threshold that
105 are serviced via mmap(), where the worst case wastage is about
106 32 bytes plus the remainder from a system page (the minimal
107 mmap unit); typically 4096 or 8192 bytes.
108
109 Security: static-safe; optionally more or less
110 The "security" of malloc refers to the ability of malicious
111 code to accentuate the effects of errors (for example, freeing
112 space that is not currently malloc'ed or overwriting past the
113 ends of chunks) in code that calls malloc. This malloc
114 guarantees not to modify any memory locations below the base of
115 heap, i.e., static variables, even in the presence of usage
116 errors. The routines additionally detect most improper frees
117 and reallocs. All this holds as long as the static bookkeeping
118 for malloc itself is not corrupted by some other means. This
119 is only one aspect of security -- these checks do not, and
120 cannot, detect all possible programming errors.
121
122 If FOOTERS is defined nonzero, then each allocated chunk
123 carries an additional check word to verify that it was malloced
124 from its space. These check words are the same within each
125 execution of a program using malloc, but differ across
126 executions, so externally crafted fake chunks cannot be
127 freed. This improves security by rejecting frees/reallocs that
128 could corrupt heap memory, in addition to the checks preventing
129 writes to statics that are always on. This may further improve
130 security at the expense of time and space overhead. (Note that
131 FOOTERS may also be worth using with MSPACES.)
132
133 By default detected errors cause the program to abort (calling
134 "abort()"). You can override this to instead proceed past
135 errors by defining PROCEED_ON_ERROR. In this case, a bad free
136 has no effect, and a malloc that encounters a bad address
137 caused by user overwrites will ignore the bad address by
138 dropping pointers and indices to all known memory. This may
139 be appropriate for programs that should continue if at all
140 possible in the face of programming errors, although they may
141 run out of memory because dropped memory is never reclaimed.
142
143 If you don't like either of these options, you can define
144 CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything
145 else. And if if you are sure that your program using malloc has
146 no errors or vulnerabilities, you can define INSECURE to 1,
147 which might (or might not) provide a small performance improvement.
148
149 Thread-safety: NOT thread-safe unless USE_LOCKS defined
150 When USE_LOCKS is defined, each public call to malloc, free,
151 etc is surrounded with either a pthread mutex or a win32
152 spinlock (depending on WIN32). This is not especially fast, and
153 can be a major bottleneck. It is designed only to provide
154 minimal protection in concurrent environments, and to provide a
155 basis for extensions. If you are using malloc in a concurrent
156 program, consider instead using ptmalloc, which is derived from
157 a version of this malloc. (See http://www.malloc.de).
158
159 System requirements: Any combination of MORECORE and/or MMAP/MUNMAP
160 This malloc can use unix sbrk or any emulation (invoked using
161 the CALL_MORECORE macro) and/or mmap/munmap or any emulation
162 (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system
163 memory. On most unix systems, it tends to work best if both
164 MORECORE and MMAP are enabled. On Win32, it uses emulations
165 based on VirtualAlloc. It also uses common C library functions
166 like memset.
167
168 Compliance: I believe it is compliant with the Single Unix Specification
169 (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably
170 others as well.
171
172* Overview of algorithms
173
174 This is not the fastest, most space-conserving, most portable, or
175 most tunable malloc ever written. However it is among the fastest
176 while also being among the most space-conserving, portable and
177 tunable. Consistent balance across these factors results in a good
178 general-purpose allocator for malloc-intensive programs.
179
180 In most ways, this malloc is a best-fit allocator. Generally, it
181 chooses the best-fitting existing chunk for a request, with ties
182 broken in approximately least-recently-used order. (This strategy
183 normally maintains low fragmentation.) However, for requests less
184 than 256bytes, it deviates from best-fit when there is not an
185 exactly fitting available chunk by preferring to use space adjacent
186 to that used for the previous small request, as well as by breaking
187 ties in approximately most-recently-used order. (These enhance
188 locality of series of small allocations.) And for very large requests
189 (>= 256Kb by default), it relies on system memory mapping
190 facilities, if supported. (This helps avoid carrying around and
191 possibly fragmenting memory used only for large chunks.)
192
193 All operations (except malloc_stats and mallinfo) have execution
194 times that are bounded by a constant factor of the number of bits in
195 a size_t, not counting any clearing in calloc or copying in realloc,
196 or actions surrounding MORECORE and MMAP that have times
197 proportional to the number of non-contiguous regions returned by
198 system allocation routines, which is often just 1.
199
200 The implementation is not very modular and seriously overuses
201 macros. Perhaps someday all C compilers will do as good a job
202 inlining modular code as can now be done by brute-force expansion,
203 but now, enough of them seem not to.
204
205 Some compilers issue a lot of warnings about code that is
206 dead/unreachable only on some platforms, and also about intentional
207 uses of negation on unsigned types. All known cases of each can be
208 ignored.
209
210 For a longer but out of date high-level description, see
211 http://gee.cs.oswego.edu/dl/html/malloc.html
212
213* MSPACES
214 If MSPACES is defined, then in addition to malloc, free, etc.,
215 this file also defines mspace_malloc, mspace_free, etc. These
216 are versions of malloc routines that take an "mspace" argument
217 obtained using create_mspace, to control all internal bookkeeping.
218 If ONLY_MSPACES is defined, only these versions are compiled.
219 So if you would like to use this allocator for only some allocations,
220 and your system malloc for others, you can compile with
221 ONLY_MSPACES and then do something like...
222 static mspace mymspace = create_mspace(0,0); // for example
223 #define mymalloc(bytes) mspace_malloc(mymspace, bytes)
224
225 (Note: If you only need one instance of an mspace, you can instead
226 use "USE_DL_PREFIX" to relabel the global malloc.)
227
228 You can similarly create thread-local allocators by storing
229 mspaces as thread-locals. For example:
230 static __thread mspace tlms = 0;
231 void* tlmalloc(size_t bytes) {
232 if (tlms == 0) tlms = create_mspace(0, 0);
233 return mspace_malloc(tlms, bytes);
234 }
235 void tlfree(void* mem) { mspace_free(tlms, mem); }
236
237 Unless FOOTERS is defined, each mspace is completely independent.
238 You cannot allocate from one and free to another (although
239 conformance is only weakly checked, so usage errors are not always
240 caught). If FOOTERS is defined, then each chunk carries around a tag
241 indicating its originating mspace, and frees are directed to their
242 originating spaces.
243
244 ------------------------- Compile-time options ---------------------------
245
246Be careful in setting #define values for numerical constants of type
247size_t. On some systems, literal values are not automatically extended
248to size_t precision unless they are explicitly casted.
249
250WIN32 default: defined if _WIN32 defined
251 Defining WIN32 sets up defaults for MS environment and compilers.
252 Otherwise defaults are for unix.
253
254MALLOC_ALIGNMENT default: (size_t)8
255 Controls the minimum alignment for malloc'ed chunks. It must be a
256 power of two and at least 8, even on machines for which smaller
257 alignments would suffice. It may be defined as larger than this
258 though. Note however that code and data structures are optimized for
259 the case of 8-byte alignment.
260
261MSPACES default: 0 (false)
262 If true, compile in support for independent allocation spaces.
263 This is only supported if HAVE_MMAP is true.
264
265ONLY_MSPACES default: 0 (false)
266 If true, only compile in mspace versions, not regular versions.
267
268USE_LOCKS default: 0 (false)
269 Causes each call to each public routine to be surrounded with
270 pthread or WIN32 mutex lock/unlock. (If set true, this can be
271 overridden on a per-mspace basis for mspace versions.)
272
273FOOTERS default: 0
274 If true, provide extra checking and dispatching by placing
275 information in the footers of allocated chunks. This adds
276 space and time overhead.
277
278INSECURE default: 0
279 If true, omit checks for usage errors and heap space overwrites.
280
281USE_DL_PREFIX default: NOT defined
282 Causes compiler to prefix all public routines with the string 'dl'.
283 This can be useful when you only want to use this malloc in one part
284 of a program, using your regular system malloc elsewhere.
285
286ABORT default: defined as abort()
287 Defines how to abort on failed checks. On most systems, a failed
288 check cannot die with an "assert" or even print an informative
289 message, because the underlying print routines in turn call malloc,
290 which will fail again. Generally, the best policy is to simply call
291 abort(). It's not very useful to do more than this because many
292 errors due to overwriting will show up as address faults (null, odd
293 addresses etc) rather than malloc-triggered checks, so will also
294 abort. Also, most compilers know that abort() does not return, so
295 can better optimize code conditionally calling it.
296
297PROCEED_ON_ERROR default: defined as 0 (false)
298 Controls whether detected bad addresses cause them to bypassed
299 rather than aborting. If set, detected bad arguments to free and
300 realloc are ignored. And all bookkeeping information is zeroed out
301 upon a detected overwrite of freed heap space, thus losing the
302 ability to ever return it from malloc again, but enabling the
303 application to proceed. If PROCEED_ON_ERROR is defined, the
304 static variable malloc_corruption_error_count is compiled in
305 and can be examined to see if errors have occurred. This option
306 generates slower code than the default abort policy.
307
308DEBUG default: NOT defined
309 The DEBUG setting is mainly intended for people trying to modify
310 this code or diagnose problems when porting to new platforms.
311 However, it may also be able to better isolate user errors than just
312 using runtime checks. The assertions in the check routines spell
313 out in more detail the assumptions and invariants underlying the
314 algorithms. The checking is fairly extensive, and will slow down
315 execution noticeably. Calling malloc_stats or mallinfo with DEBUG
316 set will attempt to check every non-mmapped allocated and free chunk
317 in the course of computing the summaries.
318
319ABORT_ON_ASSERT_FAILURE default: defined as 1 (true)
320 Debugging assertion failures can be nearly impossible if your
321 version of the assert macro causes malloc to be called, which will
322 lead to a cascade of further failures, blowing the runtime stack.
323 ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(),
324 which will usually make debugging easier.
325
326MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32
327 The action to take before "return 0" when malloc fails to be able to
328 return memory because there is none available.
329
330HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES
331 True if this system supports sbrk or an emulation of it.
332
333MORECORE default: sbrk
334 The name of the sbrk-style system routine to call to obtain more
335 memory. See below for guidance on writing custom MORECORE
336 functions. The type of the argument to sbrk/MORECORE varies across
337 systems. It cannot be size_t, because it supports negative
338 arguments, so it is normally the signed type of the same width as
339 size_t (sometimes declared as "intptr_t"). It doesn't much matter
340 though. Internally, we only call it with arguments less than half
341 the max value of a size_t, which should work across all reasonable
342 possibilities, although sometimes generating compiler warnings. See
343 near the end of this file for guidelines for creating a custom
344 version of MORECORE.
345
346MORECORE_CONTIGUOUS default: 1 (true)
347 If true, take advantage of fact that consecutive calls to MORECORE
348 with positive arguments always return contiguous increasing
349 addresses. This is true of unix sbrk. It does not hurt too much to
350 set it true anyway, since malloc copes with non-contiguities.
351 Setting it false when definitely non-contiguous saves time
352 and possibly wasted space it would take to discover this though.
353
354MORECORE_CANNOT_TRIM default: NOT defined
355 True if MORECORE cannot release space back to the system when given
356 negative arguments. This is generally necessary only if you are
357 using a hand-crafted MORECORE function that cannot handle negative
358 arguments.
359
360HAVE_MMAP default: 1 (true)
361 True if this system supports mmap or an emulation of it. If so, and
362 HAVE_MORECORE is not true, MMAP is used for all system
363 allocation. If set and HAVE_MORECORE is true as well, MMAP is
364 primarily used to directly allocate very large blocks. It is also
365 used as a backup strategy in cases where MORECORE fails to provide
366 space from system. Note: A single call to MUNMAP is assumed to be
367 able to unmap memory that may have be allocated using multiple calls
368 to MMAP, so long as they are adjacent.
369
370HAVE_MREMAP default: 1 on linux, else 0
371 If true realloc() uses mremap() to re-allocate large blocks and
372 extend or shrink allocation spaces.
373
374MMAP_CLEARS default: 1 on unix
375 True if mmap clears memory so calloc doesn't need to. This is true
376 for standard unix mmap using /dev/zero.
377
378USE_BUILTIN_FFS default: 0 (i.e., not used)
379 Causes malloc to use the builtin ffs() function to compute indices.
380 Some compilers may recognize and intrinsify ffs to be faster than the
381 supplied C version. Also, the case of x86 using gcc is special-cased
382 to an asm instruction, so is already as fast as it can be, and so
383 this setting has no effect. (On most x86s, the asm version is only
384 slightly faster than the C version.)
385
386malloc_getpagesize default: derive from system includes, or 4096.
387 The system page size. To the extent possible, this malloc manages
388 memory from the system in page-size units. This may be (and
389 usually is) a function rather than a constant. This is ignored
390 if WIN32, where page size is determined using getSystemInfo during
391 initialization.
392
393USE_DEV_RANDOM default: 0 (i.e., not used)
394 Causes malloc to use /dev/random to initialize secure magic seed for
395 stamping footers. Otherwise, the current time is used.
396
397NO_MALLINFO default: 0
398 If defined, don't compile "mallinfo". This can be a simple way
399 of dealing with mismatches between system declarations and
400 those in this file.
401
402MALLINFO_FIELD_TYPE default: size_t
403 The type of the fields in the mallinfo struct. This was originally
404 defined as "int" in SVID etc, but is more usefully defined as
405 size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set
406
407REALLOC_ZERO_BYTES_FREES default: not defined
408 This should be set if a call to realloc with zero bytes should
409 be the same as a call to free. Some people think it should. Otherwise,
410 since this malloc returns a unique pointer for malloc(0), so does
411 realloc(p, 0).
412
413LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H
414LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H
415LACKS_STDLIB_H default: NOT defined unless on WIN32
416 Define these if your system does not have these header files.
417 You might need to manually insert some of the declarations they provide.
418
419DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS,
420 system_info.dwAllocationGranularity in WIN32,
421 otherwise 64K.
422 Also settable using mallopt(M_GRANULARITY, x)
423 The unit for allocating and deallocating memory from the system. On
424 most systems with contiguous MORECORE, there is no reason to
425 make this more than a page. However, systems with MMAP tend to
426 either require or encourage larger granularities. You can increase
427 this value to prevent system allocation functions to be called so
428 often, especially if they are slow. The value must be at least one
429 page and must be a power of two. Setting to 0 causes initialization
430 to either page size or win32 region size. (Note: In previous
431 versions of malloc, the equivalent of this option was called
432 "TOP_PAD")
433
434DEFAULT_TRIM_THRESHOLD default: 2MB
435 Also settable using mallopt(M_TRIM_THRESHOLD, x)
436 The maximum amount of unused top-most memory to keep before
437 releasing via malloc_trim in free(). Automatic trimming is mainly
438 useful in long-lived programs using contiguous MORECORE. Because
439 trimming via sbrk can be slow on some systems, and can sometimes be
440 wasteful (in cases where programs immediately afterward allocate
441 more large chunks) the value should be high enough so that your
442 overall system performance would improve by releasing this much
443 memory. As a rough guide, you might set to a value close to the
444 average size of a process (program) running on your system.
445 Releasing this much memory would allow such a process to run in
446 memory. Generally, it is worth tuning trim thresholds when a
447 program undergoes phases where several large chunks are allocated
448 and released in ways that can reuse each other's storage, perhaps
449 mixed with phases where there are no such chunks at all. The trim
450 value must be greater than page size to have any useful effect. To
451 disable trimming completely, you can set to MAX_SIZE_T. Note that the trick
452 some people use of mallocing a huge space and then freeing it at
453 program startup, in an attempt to reserve system memory, doesn't
454 have the intended effect under automatic trimming, since that memory
455 will immediately be returned to the system.
456
457DEFAULT_MMAP_THRESHOLD default: 256K
458 Also settable using mallopt(M_MMAP_THRESHOLD, x)
459 The request size threshold for using MMAP to directly service a
460 request. Requests of at least this size that cannot be allocated
461 using already-existing space will be serviced via mmap. (If enough
462 normal freed space already exists it is used instead.) Using mmap
463 segregates relatively large chunks of memory so that they can be
464 individually obtained and released from the host system. A request
465 serviced through mmap is never reused by any other request (at least
466 not directly; the system may just so happen to remap successive
467 requests to the same locations). Segregating space in this way has
468 the benefits that: Mmapped space can always be individually released
469 back to the system, which helps keep the system level memory demands
470 of a long-lived program low. Also, mapped memory doesn't become
471 `locked' between other chunks, as can happen with normally allocated
472 chunks, which means that even trimming via malloc_trim would not
473 release them. However, it has the disadvantage that the space
474 cannot be reclaimed, consolidated, and then used to service later
475 requests, as happens with normal chunks. The advantages of mmap
476 nearly always outweigh disadvantages for "large" chunks, but the
477 value of "large" may vary across systems. The default is an
478 empirically derived value that works well in most systems. You can
479 disable mmap by setting to MAX_SIZE_T.
480
481*/
482
483#ifndef WIN32
484#ifdef _WIN32
485#define WIN32 1
486#endif /* _WIN32 */
487#endif /* WIN32 */
488
489#ifdef WIN32
490#define WIN32_LEAN_AND_MEAN
491#include <windows.h>
492#define HAVE_MMAP 1
493#define HAVE_MORECORE 0
494#define LACKS_UNISTD_H
495#define LACKS_SYS_PARAM_H
496#define LACKS_SYS_MMAN_H
497#define LACKS_STRING_H
498#define LACKS_STRINGS_H
499#define LACKS_SYS_TYPES_H
500#define LACKS_ERRNO_H
501#define LACKS_FCNTL_H
502#define MALLOC_FAILURE_ACTION
503#define MMAP_CLEARS 0 /* WINCE and some others apparently don't clear */
504#endif /* WIN32 */
505
506#ifdef __OS2__
507#define INCL_DOS
508#include <os2.h>
509#define HAVE_MMAP 1
510#define HAVE_MORECORE 0
511#define LACKS_SYS_MMAN_H
512#endif /* __OS2__ */
513
514#if defined(DARWIN) || defined(_DARWIN)
515/* Mac OSX docs advise not to use sbrk; it seems better to use mmap */
516#ifndef HAVE_MORECORE
517#define HAVE_MORECORE 0
518#define HAVE_MMAP 1
519#endif /* HAVE_MORECORE */
520#endif /* DARWIN */
521
522#ifndef LACKS_SYS_TYPES_H
523#include <sys/types.h> /* For size_t */
524#endif /* LACKS_SYS_TYPES_H */
525
526/* The maximum possible size_t value has all bits set */
527#define MAX_SIZE_T (~(size_t)0)
528
529#ifndef ONLY_MSPACES
530#define ONLY_MSPACES 0
531#endif /* ONLY_MSPACES */
532#ifndef MSPACES
533#if ONLY_MSPACES
534#define MSPACES 1
535#else /* ONLY_MSPACES */
536#define MSPACES 0
537#endif /* ONLY_MSPACES */
538#endif /* MSPACES */
539#ifndef MALLOC_ALIGNMENT
540#define MALLOC_ALIGNMENT ((size_t)8U)
541#endif /* MALLOC_ALIGNMENT */
542#ifndef FOOTERS
543#define FOOTERS 0
544#endif /* FOOTERS */
545#ifndef ABORT
546#define ABORT abort()
547#endif /* ABORT */
548#ifndef ABORT_ON_ASSERT_FAILURE
549#define ABORT_ON_ASSERT_FAILURE 1
550#endif /* ABORT_ON_ASSERT_FAILURE */
551#ifndef PROCEED_ON_ERROR
552#define PROCEED_ON_ERROR 0
553#endif /* PROCEED_ON_ERROR */
554#ifndef USE_LOCKS
555#define USE_LOCKS 0
556#endif /* USE_LOCKS */
557#ifndef INSECURE
558#define INSECURE 0
559#endif /* INSECURE */
560#ifndef HAVE_MMAP
561#define HAVE_MMAP 1
562#endif /* HAVE_MMAP */
563#ifndef MMAP_CLEARS
564#define MMAP_CLEARS 1
565#endif /* MMAP_CLEARS */
566#ifndef HAVE_MREMAP
567#ifdef linux
568#define HAVE_MREMAP 1
569#else /* linux */
570#define HAVE_MREMAP 0
571#endif /* linux */
572#endif /* HAVE_MREMAP */
573#ifndef MALLOC_FAILURE_ACTION
574#define MALLOC_FAILURE_ACTION errno = ENOMEM;
575#endif /* MALLOC_FAILURE_ACTION */
576#ifndef HAVE_MORECORE
577#if ONLY_MSPACES
578#define HAVE_MORECORE 0
579#else /* ONLY_MSPACES */
580#define HAVE_MORECORE 1
581#endif /* ONLY_MSPACES */
582#endif /* HAVE_MORECORE */
583#if !HAVE_MORECORE
584#define MORECORE_CONTIGUOUS 0
585#else /* !HAVE_MORECORE */
586#ifndef MORECORE
587#define MORECORE sbrk
588#endif /* MORECORE */
589#ifndef MORECORE_CONTIGUOUS
590#define MORECORE_CONTIGUOUS 1
591#endif /* MORECORE_CONTIGUOUS */
592#endif /* HAVE_MORECORE */
593#ifndef DEFAULT_GRANULARITY
594#if MORECORE_CONTIGUOUS
595#define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */
596#else /* MORECORE_CONTIGUOUS */
597#define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U)
598#endif /* MORECORE_CONTIGUOUS */
599#endif /* DEFAULT_GRANULARITY */
600#ifndef DEFAULT_TRIM_THRESHOLD
601#ifndef MORECORE_CANNOT_TRIM
602#define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U)
603#else /* MORECORE_CANNOT_TRIM */
604#define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T
605#endif /* MORECORE_CANNOT_TRIM */
606#endif /* DEFAULT_TRIM_THRESHOLD */
607#ifndef DEFAULT_MMAP_THRESHOLD
608#if HAVE_MMAP
609#define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U)
610#else /* HAVE_MMAP */
611#define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T
612#endif /* HAVE_MMAP */
613#endif /* DEFAULT_MMAP_THRESHOLD */
614#ifndef USE_BUILTIN_FFS
615#define USE_BUILTIN_FFS 0
616#endif /* USE_BUILTIN_FFS */
617#ifndef USE_DEV_RANDOM
618#define USE_DEV_RANDOM 0
619#endif /* USE_DEV_RANDOM */
620#ifndef NO_MALLINFO
621#define NO_MALLINFO 0
622#endif /* NO_MALLINFO */
623#ifndef MALLINFO_FIELD_TYPE
624#define MALLINFO_FIELD_TYPE size_t
625#endif /* MALLINFO_FIELD_TYPE */
626
627#ifndef memset
628#define memset SDL_memset
629#endif
630#ifndef memcpy
631#define memcpy SDL_memcpy
632#endif
633
634/*
635 mallopt tuning options. SVID/XPG defines four standard parameter
636 numbers for mallopt, normally defined in malloc.h. None of these
637 are used in this malloc, so setting them has no effect. But this
638 malloc does support the following options.
639*/
640
641#define M_TRIM_THRESHOLD (-1)
642#define M_GRANULARITY (-2)
643#define M_MMAP_THRESHOLD (-3)
644
645/* ------------------------ Mallinfo declarations ------------------------ */
646
647#if !NO_MALLINFO
648/*
649 This version of malloc supports the standard SVID/XPG mallinfo
650 routine that returns a struct containing usage properties and
651 statistics. It should work on any system that has a
652 /usr/include/malloc.h defining struct mallinfo. The main
653 declaration needed is the mallinfo struct that is returned (by-copy)
654 by mallinfo(). The malloinfo struct contains a bunch of fields that
655 are not even meaningful in this version of malloc. These fields are
656 are instead filled by mallinfo() with other numbers that might be of
657 interest.
658
659 HAVE_USR_INCLUDE_MALLOC_H should be set if you have a
660 /usr/include/malloc.h file that includes a declaration of struct
661 mallinfo. If so, it is included; else a compliant version is
662 declared below. These must be precisely the same for mallinfo() to
663 work. The original SVID version of this struct, defined on most
664 systems with mallinfo, declares all fields as ints. But some others
665 define as unsigned long. If your system defines the fields using a
666 type of different width than listed here, you MUST #include your
667 system version and #define HAVE_USR_INCLUDE_MALLOC_H.
668*/
669
670/* #define HAVE_USR_INCLUDE_MALLOC_H */
671
672#ifdef HAVE_USR_INCLUDE_MALLOC_H
673#include "/usr/include/malloc.h"
674#else /* HAVE_USR_INCLUDE_MALLOC_H */
675
676struct mallinfo
677{
678 MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */
679 MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */
680 MALLINFO_FIELD_TYPE smblks; /* always 0 */
681 MALLINFO_FIELD_TYPE hblks; /* always 0 */
682 MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */
683 MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */
684 MALLINFO_FIELD_TYPE fsmblks; /* always 0 */
685 MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
686 MALLINFO_FIELD_TYPE fordblks; /* total free space */
687 MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
688};
689
690#endif /* HAVE_USR_INCLUDE_MALLOC_H */
691#endif /* NO_MALLINFO */
692
693#ifdef __cplusplus
694extern "C"
695{
696#endif /* __cplusplus */
697
698#if !ONLY_MSPACES
699
700/* ------------------- Declarations of public routines ------------------- */
701
702#ifndef USE_DL_PREFIX
703#define dlcalloc calloc
704#define dlfree free
705#define dlmalloc malloc
706#define dlmemalign memalign
707#define dlrealloc realloc
708#define dlvalloc valloc
709#define dlpvalloc pvalloc
710#define dlmallinfo mallinfo
711#define dlmallopt mallopt
712#define dlmalloc_trim malloc_trim
713#define dlmalloc_stats malloc_stats
714#define dlmalloc_usable_size malloc_usable_size
715#define dlmalloc_footprint malloc_footprint
716#define dlmalloc_max_footprint malloc_max_footprint
717#define dlindependent_calloc independent_calloc
718#define dlindependent_comalloc independent_comalloc
719#endif /* USE_DL_PREFIX */
720
721
722/*
723 malloc(size_t n)
724 Returns a pointer to a newly allocated chunk of at least n bytes, or
725 null if no space is available, in which case errno is set to ENOMEM
726 on ANSI C systems.
727
728 If n is zero, malloc returns a minimum-sized chunk. (The minimum
729 size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
730 systems.) Note that size_t is an unsigned type, so calls with
731 arguments that would be negative if signed are interpreted as
732 requests for huge amounts of space, which will often fail. The
733 maximum supported value of n differs across systems, but is in all
734 cases less than the maximum representable value of a size_t.
735*/
736 void *dlmalloc(size_t);
737
738/*
739 free(void* p)
740 Releases the chunk of memory pointed to by p, that had been previously
741 allocated using malloc or a related routine such as realloc.
742 It has no effect if p is null. If p was not malloced or already
743 freed, free(p) will by default cause the current program to abort.
744*/
745 void dlfree(void *);
746
747/*
748 calloc(size_t n_elements, size_t element_size);
749 Returns a pointer to n_elements * element_size bytes, with all locations
750 set to zero.
751*/
752 void *dlcalloc(size_t, size_t);
753
754/*
755 realloc(void* p, size_t n)
756 Returns a pointer to a chunk of size n that contains the same data
757 as does chunk p up to the minimum of (n, p's size) bytes, or null
758 if no space is available.
759
760 The returned pointer may or may not be the same as p. The algorithm
761 prefers extending p in most cases when possible, otherwise it
762 employs the equivalent of a malloc-copy-free sequence.
763
764 If p is null, realloc is equivalent to malloc.
765
766 If space is not available, realloc returns null, errno is set (if on
767 ANSI) and p is NOT freed.
768
769 if n is for fewer bytes than already held by p, the newly unused
770 space is lopped off and freed if possible. realloc with a size
771 argument of zero (re)allocates a minimum-sized chunk.
772
773 The old unix realloc convention of allowing the last-free'd chunk
774 to be used as an argument to realloc is not supported.
775*/
776
777 void *dlrealloc(void *, size_t);
778
779/*
780 memalign(size_t alignment, size_t n);
781 Returns a pointer to a newly allocated chunk of n bytes, aligned
782 in accord with the alignment argument.
783
784 The alignment argument should be a power of two. If the argument is
785 not a power of two, the nearest greater power is used.
786 8-byte alignment is guaranteed by normal malloc calls, so don't
787 bother calling memalign with an argument of 8 or less.
788
789 Overreliance on memalign is a sure way to fragment space.
790*/
791 void *dlmemalign(size_t, size_t);
792
793/*
794 valloc(size_t n);
795 Equivalent to memalign(pagesize, n), where pagesize is the page
796 size of the system. If the pagesize is unknown, 4096 is used.
797*/
798 void *dlvalloc(size_t);
799
800/*
801 mallopt(int parameter_number, int parameter_value)
802 Sets tunable parameters The format is to provide a
803 (parameter-number, parameter-value) pair. mallopt then sets the
804 corresponding parameter to the argument value if it can (i.e., so
805 long as the value is meaningful), and returns 1 if successful else
806 0. SVID/XPG/ANSI defines four standard param numbers for mallopt,
807 normally defined in malloc.h. None of these are use in this malloc,
808 so setting them has no effect. But this malloc also supports other
809 options in mallopt. See below for details. Briefly, supported
810 parameters are as follows (listed defaults are for "typical"
811 configurations).
812
813 Symbol param # default allowed param values
814 M_TRIM_THRESHOLD -1 2*1024*1024 any (MAX_SIZE_T disables)
815 M_GRANULARITY -2 page size any power of 2 >= page size
816 M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support)
817*/
818 int dlmallopt(int, int);
819
820/*
821 malloc_footprint();
822 Returns the number of bytes obtained from the system. The total
823 number of bytes allocated by malloc, realloc etc., is less than this
824 value. Unlike mallinfo, this function returns only a precomputed
825 result, so can be called frequently to monitor memory consumption.
826 Even if locks are otherwise defined, this function does not use them,
827 so results might not be up to date.
828*/
829 size_t dlmalloc_footprint(void);
830
831/*
832 malloc_max_footprint();
833 Returns the maximum number of bytes obtained from the system. This
834 value will be greater than current footprint if deallocated space
835 has been reclaimed by the system. The peak number of bytes allocated
836 by malloc, realloc etc., is less than this value. Unlike mallinfo,
837 this function returns only a precomputed result, so can be called
838 frequently to monitor memory consumption. Even if locks are
839 otherwise defined, this function does not use them, so results might
840 not be up to date.
841*/
842 size_t dlmalloc_max_footprint(void);
843
844#if !NO_MALLINFO
845/*
846 mallinfo()
847 Returns (by copy) a struct containing various summary statistics:
848
849 arena: current total non-mmapped bytes allocated from system
850 ordblks: the number of free chunks
851 smblks: always zero.
852 hblks: current number of mmapped regions
853 hblkhd: total bytes held in mmapped regions
854 usmblks: the maximum total allocated space. This will be greater
855 than current total if trimming has occurred.
856 fsmblks: always zero
857 uordblks: current total allocated space (normal or mmapped)
858 fordblks: total free space
859 keepcost: the maximum number of bytes that could ideally be released
860 back to system via malloc_trim. ("ideally" means that
861 it ignores page restrictions etc.)
862
863 Because these fields are ints, but internal bookkeeping may
864 be kept as longs, the reported values may wrap around zero and
865 thus be inaccurate.
866*/
867 struct mallinfo dlmallinfo(void);
868#endif /* NO_MALLINFO */
869
870/*
871 independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
872
873 independent_calloc is similar to calloc, but instead of returning a
874 single cleared space, it returns an array of pointers to n_elements
875 independent elements that can hold contents of size elem_size, each
876 of which starts out cleared, and can be independently freed,
877 realloc'ed etc. The elements are guaranteed to be adjacently
878 allocated (this is not guaranteed to occur with multiple callocs or
879 mallocs), which may also improve cache locality in some
880 applications.
881
882 The "chunks" argument is optional (i.e., may be null, which is
883 probably the most typical usage). If it is null, the returned array
884 is itself dynamically allocated and should also be freed when it is
885 no longer needed. Otherwise, the chunks array must be of at least
886 n_elements in length. It is filled in with the pointers to the
887 chunks.
888
889 In either case, independent_calloc returns this pointer array, or
890 null if the allocation failed. If n_elements is zero and "chunks"
891 is null, it returns a chunk representing an array with zero elements
892 (which should be freed if not wanted).
893
894 Each element must be individually freed when it is no longer
895 needed. If you'd like to instead be able to free all at once, you
896 should instead use regular calloc and assign pointers into this
897 space to represent elements. (In this case though, you cannot
898 independently free elements.)
899
900 independent_calloc simplifies and speeds up implementations of many
901 kinds of pools. It may also be useful when constructing large data
902 structures that initially have a fixed number of fixed-sized nodes,
903 but the number is not known at compile time, and some of the nodes
904 may later need to be freed. For example:
905
906 struct Node { int item; struct Node* next; };
907
908 struct Node* build_list() {
909 struct Node** pool;
910 int n = read_number_of_nodes_needed();
911 if (n <= 0) return 0;
912 pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
913 if (pool == 0) die();
914 // organize into a linked list...
915 struct Node* first = pool[0];
916 for (i = 0; i < n-1; ++i)
917 pool[i]->next = pool[i+1];
918 free(pool); // Can now free the array (or not, if it is needed later)
919 return first;
920 }
921*/
922 void **dlindependent_calloc(size_t, size_t, void **);
923
924/*
925 independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
926
927 independent_comalloc allocates, all at once, a set of n_elements
928 chunks with sizes indicated in the "sizes" array. It returns
929 an array of pointers to these elements, each of which can be
930 independently freed, realloc'ed etc. The elements are guaranteed to
931 be adjacently allocated (this is not guaranteed to occur with
932 multiple callocs or mallocs), which may also improve cache locality
933 in some applications.
934
935 The "chunks" argument is optional (i.e., may be null). If it is null
936 the returned array is itself dynamically allocated and should also
937 be freed when it is no longer needed. Otherwise, the chunks array
938 must be of at least n_elements in length. It is filled in with the
939 pointers to the chunks.
940
941 In either case, independent_comalloc returns this pointer array, or
942 null if the allocation failed. If n_elements is zero and chunks is
943 null, it returns a chunk representing an array with zero elements
944 (which should be freed if not wanted).
945
946 Each element must be individually freed when it is no longer
947 needed. If you'd like to instead be able to free all at once, you
948 should instead use a single regular malloc, and assign pointers at
949 particular offsets in the aggregate space. (In this case though, you
950 cannot independently free elements.)
951
952 independent_comallac differs from independent_calloc in that each
953 element may have a different size, and also that it does not
954 automatically clear elements.
955
956 independent_comalloc can be used to speed up allocation in cases
957 where several structs or objects must always be allocated at the
958 same time. For example:
959
960 struct Head { ... }
961 struct Foot { ... }
962
963 void send_message(char* msg) {
964 int msglen = strlen(msg);
965 size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
966 void* chunks[3];
967 if (independent_comalloc(3, sizes, chunks) == 0)
968 die();
969 struct Head* head = (struct Head*)(chunks[0]);
970 char* body = (char*)(chunks[1]);
971 struct Foot* foot = (struct Foot*)(chunks[2]);
972 // ...
973 }
974
975 In general though, independent_comalloc is worth using only for
976 larger values of n_elements. For small values, you probably won't
977 detect enough difference from series of malloc calls to bother.
978
979 Overuse of independent_comalloc can increase overall memory usage,
980 since it cannot reuse existing noncontiguous small chunks that
981 might be available for some of the elements.
982*/
983 void **dlindependent_comalloc(size_t, size_t *, void **);
984
985
986/*
987 pvalloc(size_t n);
988 Equivalent to valloc(minimum-page-that-holds(n)), that is,
989 round up n to nearest pagesize.
990 */
991 void *dlpvalloc(size_t);
992
993/*
994 malloc_trim(size_t pad);
995
996 If possible, gives memory back to the system (via negative arguments
997 to sbrk) if there is unused memory at the `high' end of the malloc
998 pool or in unused MMAP segments. You can call this after freeing
999 large blocks of memory to potentially reduce the system-level memory
1000 requirements of a program. However, it cannot guarantee to reduce
1001 memory. Under some allocation patterns, some large free blocks of
1002 memory will be locked between two used chunks, so they cannot be
1003 given back to the system.
1004
1005 The `pad' argument to malloc_trim represents the amount of free
1006 trailing space to leave untrimmed. If this argument is zero, only
1007 the minimum amount of memory to maintain internal data structures
1008 will be left. Non-zero arguments can be supplied to maintain enough
1009 trailing space to service future expected allocations without having
1010 to re-obtain memory from the system.
1011
1012 Malloc_trim returns 1 if it actually released any memory, else 0.
1013*/
1014 int dlmalloc_trim(size_t);
1015
1016/*
1017 malloc_usable_size(void* p);
1018
1019 Returns the number of bytes you can actually use in
1020 an allocated chunk, which may be more than you requested (although
1021 often not) due to alignment and minimum size constraints.
1022 You can use this many bytes without worrying about
1023 overwriting other allocated objects. This is not a particularly great
1024 programming practice. malloc_usable_size can be more useful in
1025 debugging and assertions, for example:
1026
1027 p = malloc(n);
1028 assert(malloc_usable_size(p) >= 256);
1029*/
1030 size_t dlmalloc_usable_size(void *);
1031
1032/*
1033 malloc_stats();
1034 Prints on stderr the amount of space obtained from the system (both
1035 via sbrk and mmap), the maximum amount (which may be more than
1036 current if malloc_trim and/or munmap got called), and the current
1037 number of bytes allocated via malloc (or realloc, etc) but not yet
1038 freed. Note that this is the number of bytes allocated, not the
1039 number requested. It will be larger than the number requested
1040 because of alignment and bookkeeping overhead. Because it includes
1041 alignment wastage as being in use, this figure may be greater than
1042 zero even when no user-level chunks are allocated.
1043
1044 The reported current and maximum system memory can be inaccurate if
1045 a program makes other calls to system memory allocation functions
1046 (normally sbrk) outside of malloc.
1047
1048 malloc_stats prints only the most commonly interesting statistics.
1049 More information can be obtained by calling mallinfo.
1050*/
1051 void dlmalloc_stats(void);
1052
1053#endif /* ONLY_MSPACES */
1054
1055#if MSPACES
1056
1057/*
1058 mspace is an opaque type representing an independent
1059 region of space that supports mspace_malloc, etc.
1060*/
1061 typedef void *mspace;
1062
1063/*
1064 create_mspace creates and returns a new independent space with the
1065 given initial capacity, or, if 0, the default granularity size. It
1066 returns null if there is no system memory available to create the
1067 space. If argument locked is non-zero, the space uses a separate
1068 lock to control access. The capacity of the space will grow
1069 dynamically as needed to service mspace_malloc requests. You can
1070 control the sizes of incremental increases of this space by
1071 compiling with a different DEFAULT_GRANULARITY or dynamically
1072 setting with mallopt(M_GRANULARITY, value).
1073*/
1074 mspace create_mspace(size_t capacity, int locked);
1075
1076/*
1077 destroy_mspace destroys the given space, and attempts to return all
1078 of its memory back to the system, returning the total number of
1079 bytes freed. After destruction, the results of access to all memory
1080 used by the space become undefined.
1081*/
1082 size_t destroy_mspace(mspace msp);
1083
1084/*
1085 create_mspace_with_base uses the memory supplied as the initial base
1086 of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
1087 space is used for bookkeeping, so the capacity must be at least this
1088 large. (Otherwise 0 is returned.) When this initial space is
1089 exhausted, additional memory will be obtained from the system.
1090 Destroying this space will deallocate all additionally allocated
1091 space (if possible) but not the initial base.
1092*/
1093 mspace create_mspace_with_base(void *base, size_t capacity, int locked);
1094
1095/*
1096 mspace_malloc behaves as malloc, but operates within
1097 the given space.
1098*/
1099 void *mspace_malloc(mspace msp, size_t bytes);
1100
1101/*
1102 mspace_free behaves as free, but operates within
1103 the given space.
1104
1105 If compiled with FOOTERS==1, mspace_free is not actually needed.
1106 free may be called instead of mspace_free because freed chunks from
1107 any space are handled by their originating spaces.
1108*/
1109 void mspace_free(mspace msp, void *mem);
1110
1111/*
1112 mspace_realloc behaves as realloc, but operates within
1113 the given space.
1114
1115 If compiled with FOOTERS==1, mspace_realloc is not actually
1116 needed. realloc may be called instead of mspace_realloc because
1117 realloced chunks from any space are handled by their originating
1118 spaces.
1119*/
1120 void *mspace_realloc(mspace msp, void *mem, size_t newsize);
1121
1122/*
1123 mspace_calloc behaves as calloc, but operates within
1124 the given space.
1125*/
1126 void *mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
1127
1128/*
1129 mspace_memalign behaves as memalign, but operates within
1130 the given space.
1131*/
1132 void *mspace_memalign(mspace msp, size_t alignment, size_t bytes);
1133
1134/*
1135 mspace_independent_calloc behaves as independent_calloc, but
1136 operates within the given space.
1137*/
1138 void **mspace_independent_calloc(mspace msp, size_t n_elements,
1139 size_t elem_size, void *chunks[]);
1140
1141/*
1142 mspace_independent_comalloc behaves as independent_comalloc, but
1143 operates within the given space.
1144*/
1145 void **mspace_independent_comalloc(mspace msp, size_t n_elements,
1146 size_t sizes[], void *chunks[]);
1147
1148/*
1149 mspace_footprint() returns the number of bytes obtained from the
1150 system for this space.
1151*/
1152 size_t mspace_footprint(mspace msp);
1153
1154/*
1155 mspace_max_footprint() returns the peak number of bytes obtained from the
1156 system for this space.
1157*/
1158 size_t mspace_max_footprint(mspace msp);
1159
1160
1161#if !NO_MALLINFO
1162/*
1163 mspace_mallinfo behaves as mallinfo, but reports properties of
1164 the given space.
1165*/
1166 struct mallinfo mspace_mallinfo(mspace msp);
1167#endif /* NO_MALLINFO */
1168
1169/*
1170 mspace_malloc_stats behaves as malloc_stats, but reports
1171 properties of the given space.
1172*/
1173 void mspace_malloc_stats(mspace msp);
1174
1175/*
1176 mspace_trim behaves as malloc_trim, but
1177 operates within the given space.
1178*/
1179 int mspace_trim(mspace msp, size_t pad);
1180
1181/*
1182 An alias for mallopt.
1183*/
1184 int mspace_mallopt(int, int);
1185
1186#endif /* MSPACES */
1187
1188#ifdef __cplusplus
1189}; /* end of extern "C" */
1190#endif /* __cplusplus */
1191
1192/*
1193 ========================================================================
1194 To make a fully customizable malloc.h header file, cut everything
1195 above this line, put into file malloc.h, edit to suit, and #include it
1196 on the next line, as well as in programs that use this malloc.
1197 ========================================================================
1198*/
1199
1200/* #include "malloc.h" */
1201
1202/*------------------------------ internal #includes ---------------------- */
1203
1204#ifdef _MSC_VER
1205#pragma warning( disable : 4146 ) /* no "unsigned" warnings */
1206#endif /* _MSC_VER */
1207
1208#ifndef LACKS_STDIO_H
1209#include <stdio.h> /* for printing in malloc_stats */
1210#endif
1211
1212#ifndef LACKS_ERRNO_H
1213#include <errno.h> /* for MALLOC_FAILURE_ACTION */
1214#endif /* LACKS_ERRNO_H */
1215#if FOOTERS
1216#include <time.h> /* for magic initialization */
1217#endif /* FOOTERS */
1218#ifndef LACKS_STDLIB_H
1219#include <stdlib.h> /* for abort() */
1220#endif /* LACKS_STDLIB_H */
1221#ifdef DEBUG
1222#if ABORT_ON_ASSERT_FAILURE
1223#define assert(x) if(!(x)) ABORT
1224#else /* ABORT_ON_ASSERT_FAILURE */
1225#include <assert.h>
1226#endif /* ABORT_ON_ASSERT_FAILURE */
1227#else /* DEBUG */
1228#define assert(x)
1229#endif /* DEBUG */
1230#ifndef LACKS_STRING_H
1231#include <string.h> /* for memset etc */
1232#endif /* LACKS_STRING_H */
1233#if USE_BUILTIN_FFS
1234#ifndef LACKS_STRINGS_H
1235#include <strings.h> /* for ffs */
1236#endif /* LACKS_STRINGS_H */
1237#endif /* USE_BUILTIN_FFS */
1238#if HAVE_MMAP
1239#ifndef LACKS_SYS_MMAN_H
1240#include <sys/mman.h> /* for mmap */
1241#endif /* LACKS_SYS_MMAN_H */
1242#ifndef LACKS_FCNTL_H
1243#include <fcntl.h>
1244#endif /* LACKS_FCNTL_H */
1245#endif /* HAVE_MMAP */
1246#if HAVE_MORECORE
1247#ifndef LACKS_UNISTD_H
1248#include <unistd.h> /* for sbrk */
1249#else /* LACKS_UNISTD_H */
1250#if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__)
1251extern void *sbrk(ptrdiff_t);
1252#endif /* FreeBSD etc */
1253#endif /* LACKS_UNISTD_H */
1254#endif /* HAVE_MMAP */
1255
1256#ifndef WIN32
1257#ifndef malloc_getpagesize
1258# ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */
1259# ifndef _SC_PAGE_SIZE
1260# define _SC_PAGE_SIZE _SC_PAGESIZE
1261# endif
1262# endif
1263# ifdef _SC_PAGE_SIZE
1264# define malloc_getpagesize sysconf(_SC_PAGE_SIZE)
1265# else
1266# if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE)
1267extern size_t getpagesize();
1268# define malloc_getpagesize getpagesize()
1269# else
1270# ifdef WIN32 /* use supplied emulation of getpagesize */
1271# define malloc_getpagesize getpagesize()
1272# else
1273# ifndef LACKS_SYS_PARAM_H
1274# include <sys/param.h>
1275# endif
1276# ifdef EXEC_PAGESIZE
1277# define malloc_getpagesize EXEC_PAGESIZE
1278# else
1279# ifdef NBPG
1280# ifndef CLSIZE
1281# define malloc_getpagesize NBPG
1282# else
1283# define malloc_getpagesize (NBPG * CLSIZE)
1284# endif
1285# else
1286# ifdef NBPC
1287# define malloc_getpagesize NBPC
1288# else
1289# ifdef PAGESIZE
1290# define malloc_getpagesize PAGESIZE
1291# else /* just guess */
1292# define malloc_getpagesize ((size_t)4096U)
1293# endif
1294# endif
1295# endif
1296# endif
1297# endif
1298# endif
1299# endif
1300#endif
1301#endif
1302
1303/* ------------------- size_t and alignment properties -------------------- */
1304
1305/* The byte and bit size of a size_t */
1306#define SIZE_T_SIZE (sizeof(size_t))
1307#define SIZE_T_BITSIZE (sizeof(size_t) << 3)
1308
1309/* Some constants coerced to size_t */
1310/* Annoying but necessary to avoid errors on some plaftorms */
1311#define SIZE_T_ZERO ((size_t)0)
1312#define SIZE_T_ONE ((size_t)1)
1313#define SIZE_T_TWO ((size_t)2)
1314#define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1)
1315#define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2)
1316#define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES)
1317#define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U)
1318
1319/* The bit mask value corresponding to MALLOC_ALIGNMENT */
1320#define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE)
1321
1322/* True if address a has acceptable alignment */
1323#define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0)
1324
1325/* the number of bytes to offset an address to align it */
1326#define align_offset(A)\
1327 ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\
1328 ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK))
1329
1330/* -------------------------- MMAP preliminaries ------------------------- */
1331
1332/*
1333 If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and
1334 checks to fail so compiler optimizer can delete code rather than
1335 using so many "#if"s.
1336*/
1337
1338
1339/* MORECORE and MMAP must return MFAIL on failure */
1340#define MFAIL ((void*)(MAX_SIZE_T))
1341#define CMFAIL ((char*)(MFAIL)) /* defined for convenience */
1342
1343#if !HAVE_MMAP
1344#define IS_MMAPPED_BIT (SIZE_T_ZERO)
1345#define USE_MMAP_BIT (SIZE_T_ZERO)
1346#define CALL_MMAP(s) MFAIL
1347#define CALL_MUNMAP(a, s) (-1)
1348#define DIRECT_MMAP(s) MFAIL
1349
1350#else /* HAVE_MMAP */
1351#define IS_MMAPPED_BIT (SIZE_T_ONE)
1352#define USE_MMAP_BIT (SIZE_T_ONE)
1353
1354#if !defined(WIN32) && !defined (__OS2__)
1355#define CALL_MUNMAP(a, s) munmap((a), (s))
1356#define MMAP_PROT (PROT_READ|PROT_WRITE)
1357#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
1358#define MAP_ANONYMOUS MAP_ANON
1359#endif /* MAP_ANON */
1360#ifdef MAP_ANONYMOUS
1361#define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS)
1362#define CALL_MMAP(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0)
1363#else /* MAP_ANONYMOUS */
1364/*
1365 Nearly all versions of mmap support MAP_ANONYMOUS, so the following
1366 is unlikely to be needed, but is supplied just in case.
1367*/
1368#define MMAP_FLAGS (MAP_PRIVATE)
1369static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */
1370#define CALL_MMAP(s) ((dev_zero_fd < 0) ? \
1371 (dev_zero_fd = open("/dev/zero", O_RDWR), \
1372 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \
1373 mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0))
1374#endif /* MAP_ANONYMOUS */
1375
1376#define DIRECT_MMAP(s) CALL_MMAP(s)
1377
1378#elif defined(__OS2__)
1379
1380/* OS/2 MMAP via DosAllocMem */
1381static void* os2mmap(size_t size) {
1382 void* ptr;
1383 if (DosAllocMem(&ptr, size, OBJ_ANY|PAG_COMMIT|PAG_READ|PAG_WRITE) &&
1384 DosAllocMem(&ptr, size, PAG_COMMIT|PAG_READ|PAG_WRITE))
1385 return MFAIL;
1386 return ptr;
1387}
1388
1389#define os2direct_mmap(n) os2mmap(n)
1390
1391/* This function supports releasing coalesed segments */
1392static int os2munmap(void* ptr, size_t size) {
1393 while (size) {
1394 ULONG ulSize = size;
1395 ULONG ulFlags = 0;
1396 if (DosQueryMem(ptr, &ulSize, &ulFlags) != 0)
1397 return -1;
1398 if ((ulFlags & PAG_BASE) == 0 ||(ulFlags & PAG_COMMIT) == 0 ||
1399 ulSize > size)
1400 return -1;
1401 if (DosFreeMem(ptr) != 0)
1402 return -1;
1403 ptr = ( void * ) ( ( char * ) ptr + ulSize );
1404 size -= ulSize;
1405 }
1406 return 0;
1407}
1408
1409#define CALL_MMAP(s) os2mmap(s)
1410#define CALL_MUNMAP(a, s) os2munmap((a), (s))
1411#define DIRECT_MMAP(s) os2direct_mmap(s)
1412
1413#else /* WIN32 */
1414
1415/* Win32 MMAP via VirtualAlloc */
1416static void *
1417win32mmap(size_t size)
1418{
1419 void *ptr =
1420 VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
1421 return (ptr != 0) ? ptr : MFAIL;
1422}
1423
1424/* For direct MMAP, use MEM_TOP_DOWN to minimize interference */
1425static void *
1426win32direct_mmap(size_t size)
1427{
1428 void *ptr = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT | MEM_TOP_DOWN,
1429 PAGE_READWRITE);
1430 return (ptr != 0) ? ptr : MFAIL;
1431}
1432
1433/* This function supports releasing coalesed segments */
1434static int
1435win32munmap(void *ptr, size_t size)
1436{
1437 MEMORY_BASIC_INFORMATION minfo;
1438 char *cptr = ptr;
1439 while (size) {
1440 if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0)
1441 return -1;
1442 if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr ||
1443 minfo.State != MEM_COMMIT || minfo.RegionSize > size)
1444 return -1;
1445 if (VirtualFree(cptr, 0, MEM_RELEASE) == 0)
1446 return -1;
1447 cptr += minfo.RegionSize;
1448 size -= minfo.RegionSize;
1449 }
1450 return 0;
1451}
1452
1453#define CALL_MMAP(s) win32mmap(s)
1454#define CALL_MUNMAP(a, s) win32munmap((a), (s))
1455#define DIRECT_MMAP(s) win32direct_mmap(s)
1456#endif /* WIN32 */
1457#endif /* HAVE_MMAP */
1458
1459#if HAVE_MMAP && HAVE_MREMAP
1460#define CALL_MREMAP(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv))
1461#else /* HAVE_MMAP && HAVE_MREMAP */
1462#define CALL_MREMAP(addr, osz, nsz, mv) MFAIL
1463#endif /* HAVE_MMAP && HAVE_MREMAP */
1464
1465#if HAVE_MORECORE
1466#define CALL_MORECORE(S) MORECORE(S)
1467#else /* HAVE_MORECORE */
1468#define CALL_MORECORE(S) MFAIL
1469#endif /* HAVE_MORECORE */
1470
1471/* mstate bit set if continguous morecore disabled or failed */
1472#define USE_NONCONTIGUOUS_BIT (4U)
1473
1474/* segment bit set in create_mspace_with_base */
1475#define EXTERN_BIT (8U)
1476
1477
1478/* --------------------------- Lock preliminaries ------------------------ */
1479
1480#if USE_LOCKS
1481
1482/*
1483 When locks are defined, there are up to two global locks:
1484
1485 * If HAVE_MORECORE, morecore_mutex protects sequences of calls to
1486 MORECORE. In many cases sys_alloc requires two calls, that should
1487 not be interleaved with calls by other threads. This does not
1488 protect against direct calls to MORECORE by other threads not
1489 using this lock, so there is still code to cope the best we can on
1490 interference.
1491
1492 * magic_init_mutex ensures that mparams.magic and other
1493 unique mparams values are initialized only once.
1494*/
1495
1496#if !defined(WIN32) && !defined(__OS2__)
1497/* By default use posix locks */
1498#include <pthread.h>
1499#define MLOCK_T pthread_mutex_t
1500#define INITIAL_LOCK(l) pthread_mutex_init(l, NULL)
1501#define ACQUIRE_LOCK(l) pthread_mutex_lock(l)
1502#define RELEASE_LOCK(l) pthread_mutex_unlock(l)
1503
1504#if HAVE_MORECORE
1505static MLOCK_T morecore_mutex = PTHREAD_MUTEX_INITIALIZER;
1506#endif /* HAVE_MORECORE */
1507
1508static MLOCK_T magic_init_mutex = PTHREAD_MUTEX_INITIALIZER;
1509
1510#elif defined(__OS2__)
1511#define MLOCK_T HMTX
1512#define INITIAL_LOCK(l) DosCreateMutexSem(0, l, 0, FALSE)
1513#define ACQUIRE_LOCK(l) DosRequestMutexSem(*l, SEM_INDEFINITE_WAIT)
1514#define RELEASE_LOCK(l) DosReleaseMutexSem(*l)
1515#if HAVE_MORECORE
1516static MLOCK_T morecore_mutex;
1517#endif /* HAVE_MORECORE */
1518static MLOCK_T magic_init_mutex;
1519
1520#else /* WIN32 */
1521/*
1522 Because lock-protected regions have bounded times, and there
1523 are no recursive lock calls, we can use simple spinlocks.
1524*/
1525
1526#define MLOCK_T long
1527static int
1528win32_acquire_lock(MLOCK_T * sl)
1529{
1530 for (;;) {
1531#ifdef InterlockedCompareExchangePointer
1532 if (!InterlockedCompareExchange(sl, 1, 0))
1533 return 0;
1534#else /* Use older void* version */
1535 if (!InterlockedCompareExchange((void **) sl, (void *) 1, (void *) 0))
1536 return 0;
1537#endif /* InterlockedCompareExchangePointer */
1538 Sleep(0);
1539 }
1540}
1541
1542static void
1543win32_release_lock(MLOCK_T * sl)
1544{
1545 InterlockedExchange(sl, 0);
1546}
1547
1548#define INITIAL_LOCK(l) *(l)=0
1549#define ACQUIRE_LOCK(l) win32_acquire_lock(l)
1550#define RELEASE_LOCK(l) win32_release_lock(l)
1551#if HAVE_MORECORE
1552static MLOCK_T morecore_mutex;
1553#endif /* HAVE_MORECORE */
1554static MLOCK_T magic_init_mutex;
1555#endif /* WIN32 */
1556
1557#define USE_LOCK_BIT (2U)
1558#else /* USE_LOCKS */
1559#define USE_LOCK_BIT (0U)
1560#define INITIAL_LOCK(l)
1561#endif /* USE_LOCKS */
1562
1563#if USE_LOCKS && HAVE_MORECORE
1564#define ACQUIRE_MORECORE_LOCK() ACQUIRE_LOCK(&morecore_mutex);
1565#define RELEASE_MORECORE_LOCK() RELEASE_LOCK(&morecore_mutex);
1566#else /* USE_LOCKS && HAVE_MORECORE */
1567#define ACQUIRE_MORECORE_LOCK()
1568#define RELEASE_MORECORE_LOCK()
1569#endif /* USE_LOCKS && HAVE_MORECORE */
1570
1571#if USE_LOCKS
1572#define ACQUIRE_MAGIC_INIT_LOCK() ACQUIRE_LOCK(&magic_init_mutex);
1573#define RELEASE_MAGIC_INIT_LOCK() RELEASE_LOCK(&magic_init_mutex);
1574#else /* USE_LOCKS */
1575#define ACQUIRE_MAGIC_INIT_LOCK()
1576#define RELEASE_MAGIC_INIT_LOCK()
1577#endif /* USE_LOCKS */
1578
1579
1580/* ----------------------- Chunk representations ------------------------ */
1581
1582/*
1583 (The following includes lightly edited explanations by Colin Plumb.)
1584
1585 The malloc_chunk declaration below is misleading (but accurate and
1586 necessary). It declares a "view" into memory allowing access to
1587 necessary fields at known offsets from a given base.
1588
1589 Chunks of memory are maintained using a `boundary tag' method as
1590 originally described by Knuth. (See the paper by Paul Wilson
1591 ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such
1592 techniques.) Sizes of free chunks are stored both in the front of
1593 each chunk and at the end. This makes consolidating fragmented
1594 chunks into bigger chunks fast. The head fields also hold bits
1595 representing whether chunks are free or in use.
1596
1597 Here are some pictures to make it clearer. They are "exploded" to
1598 show that the state of a chunk can be thought of as extending from
1599 the high 31 bits of the head field of its header through the
1600 prev_foot and PINUSE_BIT bit of the following chunk header.
1601
1602 A chunk that's in use looks like:
1603
1604 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1605 | Size of previous chunk (if P = 1) |
1606 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1607 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1608 | Size of this chunk 1| +-+
1609 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1610 | |
1611 +- -+
1612 | |
1613 +- -+
1614 | :
1615 +- size - sizeof(size_t) available payload bytes -+
1616 : |
1617 chunk-> +- -+
1618 | |
1619 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1620 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1|
1621 | Size of next chunk (may or may not be in use) | +-+
1622 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1623
1624 And if it's free, it looks like this:
1625
1626 chunk-> +- -+
1627 | User payload (must be in use, or we would have merged!) |
1628 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1629 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P|
1630 | Size of this chunk 0| +-+
1631 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1632 | Next pointer |
1633 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1634 | Prev pointer |
1635 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1636 | :
1637 +- size - sizeof(struct chunk) unused bytes -+
1638 : |
1639 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1640 | Size of this chunk |
1641 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1642 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0|
1643 | Size of next chunk (must be in use, or we would have merged)| +-+
1644 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1645 | :
1646 +- User payload -+
1647 : |
1648 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1649 |0|
1650 +-+
1651 Note that since we always merge adjacent free chunks, the chunks
1652 adjacent to a free chunk must be in use.
1653
1654 Given a pointer to a chunk (which can be derived trivially from the
1655 payload pointer) we can, in O(1) time, find out whether the adjacent
1656 chunks are free, and if so, unlink them from the lists that they
1657 are on and merge them with the current chunk.
1658
1659 Chunks always begin on even word boundaries, so the mem portion
1660 (which is returned to the user) is also on an even word boundary, and
1661 thus at least double-word aligned.
1662
1663 The P (PINUSE_BIT) bit, stored in the unused low-order bit of the
1664 chunk size (which is always a multiple of two words), is an in-use
1665 bit for the *previous* chunk. If that bit is *clear*, then the
1666 word before the current chunk size contains the previous chunk
1667 size, and can be used to find the front of the previous chunk.
1668 The very first chunk allocated always has this bit set, preventing
1669 access to non-existent (or non-owned) memory. If pinuse is set for
1670 any given chunk, then you CANNOT determine the size of the
1671 previous chunk, and might even get a memory addressing fault when
1672 trying to do so.
1673
1674 The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of
1675 the chunk size redundantly records whether the current chunk is
1676 inuse. This redundancy enables usage checks within free and realloc,
1677 and reduces indirection when freeing and consolidating chunks.
1678
1679 Each freshly allocated chunk must have both cinuse and pinuse set.
1680 That is, each allocated chunk borders either a previously allocated
1681 and still in-use chunk, or the base of its memory arena. This is
1682 ensured by making all allocations from the the `lowest' part of any
1683 found chunk. Further, no free chunk physically borders another one,
1684 so each free chunk is known to be preceded and followed by either
1685 inuse chunks or the ends of memory.
1686
1687 Note that the `foot' of the current chunk is actually represented
1688 as the prev_foot of the NEXT chunk. This makes it easier to
1689 deal with alignments etc but can be very confusing when trying
1690 to extend or adapt this code.
1691
1692 The exceptions to all this are
1693
1694 1. The special chunk `top' is the top-most available chunk (i.e.,
1695 the one bordering the end of available memory). It is treated
1696 specially. Top is never included in any bin, is used only if
1697 no other chunk is available, and is released back to the
1698 system if it is very large (see M_TRIM_THRESHOLD). In effect,
1699 the top chunk is treated as larger (and thus less well
1700 fitting) than any other available chunk. The top chunk
1701 doesn't update its trailing size field since there is no next
1702 contiguous chunk that would have to index off it. However,
1703 space is still allocated for it (TOP_FOOT_SIZE) to enable
1704 separation or merging when space is extended.
1705
1706 3. Chunks allocated via mmap, which have the lowest-order bit
1707 (IS_MMAPPED_BIT) set in their prev_foot fields, and do not set
1708 PINUSE_BIT in their head fields. Because they are allocated
1709 one-by-one, each must carry its own prev_foot field, which is
1710 also used to hold the offset this chunk has within its mmapped
1711 region, which is needed to preserve alignment. Each mmapped
1712 chunk is trailed by the first two fields of a fake next-chunk
1713 for sake of usage checks.
1714
1715*/
1716
1717struct malloc_chunk
1718{
1719 size_t prev_foot; /* Size of previous chunk (if free). */
1720 size_t head; /* Size and inuse bits. */
1721 struct malloc_chunk *fd; /* double links -- used only if free. */
1722 struct malloc_chunk *bk;
1723};
1724
1725typedef struct malloc_chunk mchunk;
1726typedef struct malloc_chunk *mchunkptr;
1727typedef struct malloc_chunk *sbinptr; /* The type of bins of chunks */
1728typedef size_t bindex_t; /* Described below */
1729typedef unsigned int binmap_t; /* Described below */
1730typedef unsigned int flag_t; /* The type of various bit flag sets */
1731
1732/* ------------------- Chunks sizes and alignments ----------------------- */
1733
1734#define MCHUNK_SIZE (sizeof(mchunk))
1735
1736#if FOOTERS
1737#define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1738#else /* FOOTERS */
1739#define CHUNK_OVERHEAD (SIZE_T_SIZE)
1740#endif /* FOOTERS */
1741
1742/* MMapped chunks need a second word of overhead ... */
1743#define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES)
1744/* ... and additional padding for fake next-chunk at foot */
1745#define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES)
1746
1747/* The smallest size we can malloc is an aligned minimal chunk */
1748#define MIN_CHUNK_SIZE\
1749 ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1750
1751/* conversion from malloc headers to user pointers, and back */
1752#define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES))
1753#define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES))
1754/* chunk associated with aligned address A */
1755#define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A)))
1756
1757/* Bounds on request (not chunk) sizes. */
1758#define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2)
1759#define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE)
1760
1761/* pad request bytes into a usable size */
1762#define pad_request(req) \
1763 (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK)
1764
1765/* pad request, checking for minimum (but not maximum) */
1766#define request2size(req) \
1767 (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req))
1768
1769
1770/* ------------------ Operations on head and foot fields ----------------- */
1771
1772/*
1773 The head field of a chunk is or'ed with PINUSE_BIT when previous
1774 adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in
1775 use. If the chunk was obtained with mmap, the prev_foot field has
1776 IS_MMAPPED_BIT set, otherwise holding the offset of the base of the
1777 mmapped region to the base of the chunk.
1778*/
1779
1780#define PINUSE_BIT (SIZE_T_ONE)
1781#define CINUSE_BIT (SIZE_T_TWO)
1782#define INUSE_BITS (PINUSE_BIT|CINUSE_BIT)
1783
1784/* Head value for fenceposts */
1785#define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE)
1786
1787/* extraction of fields from head words */
1788#define cinuse(p) ((p)->head & CINUSE_BIT)
1789#define pinuse(p) ((p)->head & PINUSE_BIT)
1790#define chunksize(p) ((p)->head & ~(INUSE_BITS))
1791
1792#define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT)
1793#define clear_cinuse(p) ((p)->head &= ~CINUSE_BIT)
1794
1795/* Treat space at ptr +/- offset as a chunk */
1796#define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s)))
1797#define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s)))
1798
1799/* Ptr to next or previous physical malloc_chunk. */
1800#define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~INUSE_BITS)))
1801#define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) ))
1802
1803/* extract next chunk's pinuse bit */
1804#define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT)
1805
1806/* Get/set size at footer */
1807#define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot)
1808#define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s))
1809
1810/* Set size, pinuse bit, and foot */
1811#define set_size_and_pinuse_of_free_chunk(p, s)\
1812 ((p)->head = (s|PINUSE_BIT), set_foot(p, s))
1813
1814/* Set size, pinuse bit, foot, and clear next pinuse */
1815#define set_free_with_pinuse(p, s, n)\
1816 (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s))
1817
1818#define is_mmapped(p)\
1819 (!((p)->head & PINUSE_BIT) && ((p)->prev_foot & IS_MMAPPED_BIT))
1820
1821/* Get the internal overhead associated with chunk p */
1822#define overhead_for(p)\
1823 (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD)
1824
1825/* Return true if malloced space is not necessarily cleared */
1826#if MMAP_CLEARS
1827#define calloc_must_clear(p) (!is_mmapped(p))
1828#else /* MMAP_CLEARS */
1829#define calloc_must_clear(p) (1)
1830#endif /* MMAP_CLEARS */
1831
1832/* ---------------------- Overlaid data structures ----------------------- */
1833
1834/*
1835 When chunks are not in use, they are treated as nodes of either
1836 lists or trees.
1837
1838 "Small" chunks are stored in circular doubly-linked lists, and look
1839 like this:
1840
1841 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1842 | Size of previous chunk |
1843 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1844 `head:' | Size of chunk, in bytes |P|
1845 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1846 | Forward pointer to next chunk in list |
1847 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1848 | Back pointer to previous chunk in list |
1849 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1850 | Unused space (may be 0 bytes long) .
1851 . .
1852 . |
1853nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1854 `foot:' | Size of chunk, in bytes |
1855 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1856
1857 Larger chunks are kept in a form of bitwise digital trees (aka
1858 tries) keyed on chunksizes. Because malloc_tree_chunks are only for
1859 free chunks greater than 256 bytes, their size doesn't impose any
1860 constraints on user chunk sizes. Each node looks like:
1861
1862 chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1863 | Size of previous chunk |
1864 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1865 `head:' | Size of chunk, in bytes |P|
1866 mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1867 | Forward pointer to next chunk of same size |
1868 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1869 | Back pointer to previous chunk of same size |
1870 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1871 | Pointer to left child (child[0]) |
1872 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1873 | Pointer to right child (child[1]) |
1874 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1875 | Pointer to parent |
1876 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1877 | bin index of this chunk |
1878 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1879 | Unused space .
1880 . |
1881nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1882 `foot:' | Size of chunk, in bytes |
1883 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
1884
1885 Each tree holding treenodes is a tree of unique chunk sizes. Chunks
1886 of the same size are arranged in a circularly-linked list, with only
1887 the oldest chunk (the next to be used, in our FIFO ordering)
1888 actually in the tree. (Tree members are distinguished by a non-null
1889 parent pointer.) If a chunk with the same size an an existing node
1890 is inserted, it is linked off the existing node using pointers that
1891 work in the same way as fd/bk pointers of small chunks.
1892
1893 Each tree contains a power of 2 sized range of chunk sizes (the
1894 smallest is 0x100 <= x < 0x180), which is is divided in half at each
1895 tree level, with the chunks in the smaller half of the range (0x100
1896 <= x < 0x140 for the top nose) in the left subtree and the larger
1897 half (0x140 <= x < 0x180) in the right subtree. This is, of course,
1898 done by inspecting individual bits.
1899
1900 Using these rules, each node's left subtree contains all smaller
1901 sizes than its right subtree. However, the node at the root of each
1902 subtree has no particular ordering relationship to either. (The
1903 dividing line between the subtree sizes is based on trie relation.)
1904 If we remove the last chunk of a given size from the interior of the
1905 tree, we need to replace it with a leaf node. The tree ordering
1906 rules permit a node to be replaced by any leaf below it.
1907
1908 The smallest chunk in a tree (a common operation in a best-fit
1909 allocator) can be found by walking a path to the leftmost leaf in
1910 the tree. Unlike a usual binary tree, where we follow left child
1911 pointers until we reach a null, here we follow the right child
1912 pointer any time the left one is null, until we reach a leaf with
1913 both child pointers null. The smallest chunk in the tree will be
1914 somewhere along that path.
1915
1916 The worst case number of steps to add, find, or remove a node is
1917 bounded by the number of bits differentiating chunks within
1918 bins. Under current bin calculations, this ranges from 6 up to 21
1919 (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case
1920 is of course much better.
1921*/
1922
1923struct malloc_tree_chunk
1924{
1925 /* The first four fields must be compatible with malloc_chunk */
1926 size_t prev_foot;
1927 size_t head;
1928 struct malloc_tree_chunk *fd;
1929 struct malloc_tree_chunk *bk;
1930
1931 struct malloc_tree_chunk *child[2];
1932 struct malloc_tree_chunk *parent;
1933 bindex_t index;
1934};
1935
1936typedef struct malloc_tree_chunk tchunk;
1937typedef struct malloc_tree_chunk *tchunkptr;
1938typedef struct malloc_tree_chunk *tbinptr; /* The type of bins of trees */
1939
1940/* A little helper macro for trees */
1941#define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1])
1942
1943/* ----------------------------- Segments -------------------------------- */
1944
1945/*
1946 Each malloc space may include non-contiguous segments, held in a
1947 list headed by an embedded malloc_segment record representing the
1948 top-most space. Segments also include flags holding properties of
1949 the space. Large chunks that are directly allocated by mmap are not
1950 included in this list. They are instead independently created and
1951 destroyed without otherwise keeping track of them.
1952
1953 Segment management mainly comes into play for spaces allocated by
1954 MMAP. Any call to MMAP might or might not return memory that is
1955 adjacent to an existing segment. MORECORE normally contiguously
1956 extends the current space, so this space is almost always adjacent,
1957 which is simpler and faster to deal with. (This is why MORECORE is
1958 used preferentially to MMAP when both are available -- see
1959 sys_alloc.) When allocating using MMAP, we don't use any of the
1960 hinting mechanisms (inconsistently) supported in various
1961 implementations of unix mmap, or distinguish reserving from
1962 committing memory. Instead, we just ask for space, and exploit
1963 contiguity when we get it. It is probably possible to do
1964 better than this on some systems, but no general scheme seems
1965 to be significantly better.
1966
1967 Management entails a simpler variant of the consolidation scheme
1968 used for chunks to reduce fragmentation -- new adjacent memory is
1969 normally prepended or appended to an existing segment. However,
1970 there are limitations compared to chunk consolidation that mostly
1971 reflect the fact that segment processing is relatively infrequent
1972 (occurring only when getting memory from system) and that we
1973 don't expect to have huge numbers of segments:
1974
1975 * Segments are not indexed, so traversal requires linear scans. (It
1976 would be possible to index these, but is not worth the extra
1977 overhead and complexity for most programs on most platforms.)
1978 * New segments are only appended to old ones when holding top-most
1979 memory; if they cannot be prepended to others, they are held in
1980 different segments.
1981
1982 Except for the top-most segment of an mstate, each segment record
1983 is kept at the tail of its segment. Segments are added by pushing
1984 segment records onto the list headed by &mstate.seg for the
1985 containing mstate.
1986
1987 Segment flags control allocation/merge/deallocation policies:
1988 * If EXTERN_BIT set, then we did not allocate this segment,
1989 and so should not try to deallocate or merge with others.
1990 (This currently holds only for the initial segment passed
1991 into create_mspace_with_base.)
1992 * If IS_MMAPPED_BIT set, the segment may be merged with
1993 other surrounding mmapped segments and trimmed/de-allocated
1994 using munmap.
1995 * If neither bit is set, then the segment was obtained using
1996 MORECORE so can be merged with surrounding MORECORE'd segments
1997 and deallocated/trimmed using MORECORE with negative arguments.
1998*/
1999
2000struct malloc_segment
2001{
2002 char *base; /* base address */
2003 size_t size; /* allocated size */
2004 struct malloc_segment *next; /* ptr to next segment */
2005 flag_t sflags; /* mmap and extern flag */
2006};
2007
2008#define is_mmapped_segment(S) ((S)->sflags & IS_MMAPPED_BIT)
2009#define is_extern_segment(S) ((S)->sflags & EXTERN_BIT)
2010
2011typedef struct malloc_segment msegment;
2012typedef struct malloc_segment *msegmentptr;
2013
2014/* ---------------------------- malloc_state ----------------------------- */
2015
2016/*
2017 A malloc_state holds all of the bookkeeping for a space.
2018 The main fields are:
2019
2020 Top
2021 The topmost chunk of the currently active segment. Its size is
2022 cached in topsize. The actual size of topmost space is
2023 topsize+TOP_FOOT_SIZE, which includes space reserved for adding
2024 fenceposts and segment records if necessary when getting more
2025 space from the system. The size at which to autotrim top is
2026 cached from mparams in trim_check, except that it is disabled if
2027 an autotrim fails.
2028
2029 Designated victim (dv)
2030 This is the preferred chunk for servicing small requests that
2031 don't have exact fits. It is normally the chunk split off most
2032 recently to service another small request. Its size is cached in
2033 dvsize. The link fields of this chunk are not maintained since it
2034 is not kept in a bin.
2035
2036 SmallBins
2037 An array of bin headers for free chunks. These bins hold chunks
2038 with sizes less than MIN_LARGE_SIZE bytes. Each bin contains
2039 chunks of all the same size, spaced 8 bytes apart. To simplify
2040 use in double-linked lists, each bin header acts as a malloc_chunk
2041 pointing to the real first node, if it exists (else pointing to
2042 itself). This avoids special-casing for headers. But to avoid
2043 waste, we allocate only the fd/bk pointers of bins, and then use
2044 repositioning tricks to treat these as the fields of a chunk.
2045
2046 TreeBins
2047 Treebins are pointers to the roots of trees holding a range of
2048 sizes. There are 2 equally spaced treebins for each power of two
2049 from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything
2050 larger.
2051
2052 Bin maps
2053 There is one bit map for small bins ("smallmap") and one for
2054 treebins ("treemap). Each bin sets its bit when non-empty, and
2055 clears the bit when empty. Bit operations are then used to avoid
2056 bin-by-bin searching -- nearly all "search" is done without ever
2057 looking at bins that won't be selected. The bit maps
2058 conservatively use 32 bits per map word, even if on 64bit system.
2059 For a good description of some of the bit-based techniques used
2060 here, see Henry S. Warren Jr's book "Hacker's Delight" (and
2061 supplement at http://hackersdelight.org/). Many of these are
2062 intended to reduce the branchiness of paths through malloc etc, as
2063 well as to reduce the number of memory locations read or written.
2064
2065 Segments
2066 A list of segments headed by an embedded malloc_segment record
2067 representing the initial space.
2068
2069 Address check support
2070 The least_addr field is the least address ever obtained from
2071 MORECORE or MMAP. Attempted frees and reallocs of any address less
2072 than this are trapped (unless INSECURE is defined).
2073
2074 Magic tag
2075 A cross-check field that should always hold same value as mparams.magic.
2076
2077 Flags
2078 Bits recording whether to use MMAP, locks, or contiguous MORECORE
2079
2080 Statistics
2081 Each space keeps track of current and maximum system memory
2082 obtained via MORECORE or MMAP.
2083
2084 Locking
2085 If USE_LOCKS is defined, the "mutex" lock is acquired and released
2086 around every public call using this mspace.
2087*/
2088
2089/* Bin types, widths and sizes */
2090#define NSMALLBINS (32U)
2091#define NTREEBINS (32U)
2092#define SMALLBIN_SHIFT (3U)
2093#define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT)
2094#define TREEBIN_SHIFT (8U)
2095#define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT)
2096#define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE)
2097#define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD)
2098
2099struct malloc_state
2100{
2101 binmap_t smallmap;
2102 binmap_t treemap;
2103 size_t dvsize;
2104 size_t topsize;
2105 char *least_addr;
2106 mchunkptr dv;
2107 mchunkptr top;
2108 size_t trim_check;
2109 size_t magic;
2110 mchunkptr smallbins[(NSMALLBINS + 1) * 2];
2111 tbinptr treebins[NTREEBINS];
2112 size_t footprint;
2113 size_t max_footprint;
2114 flag_t mflags;
2115#if USE_LOCKS
2116 MLOCK_T mutex; /* locate lock among fields that rarely change */
2117#endif /* USE_LOCKS */
2118 msegment seg;
2119};
2120
2121typedef struct malloc_state *mstate;
2122
2123/* ------------- Global malloc_state and malloc_params ------------------- */
2124
2125/*
2126 malloc_params holds global properties, including those that can be
2127 dynamically set using mallopt. There is a single instance, mparams,
2128 initialized in init_mparams.
2129*/
2130
2131struct malloc_params
2132{
2133 size_t magic;
2134 size_t page_size;
2135 size_t granularity;
2136 size_t mmap_threshold;
2137 size_t trim_threshold;
2138 flag_t default_mflags;
2139};
2140
2141static struct malloc_params mparams;
2142
2143/* The global malloc_state used for all non-"mspace" calls */
2144static struct malloc_state _gm_;
2145#define gm (&_gm_)
2146#define is_global(M) ((M) == &_gm_)
2147#define is_initialized(M) ((M)->top != 0)
2148
2149/* -------------------------- system alloc setup ------------------------- */
2150
2151/* Operations on mflags */
2152
2153#define use_lock(M) ((M)->mflags & USE_LOCK_BIT)
2154#define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT)
2155#define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT)
2156
2157#define use_mmap(M) ((M)->mflags & USE_MMAP_BIT)
2158#define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT)
2159#define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT)
2160
2161#define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT)
2162#define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT)
2163
2164#define set_lock(M,L)\
2165 ((M)->mflags = (L)?\
2166 ((M)->mflags | USE_LOCK_BIT) :\
2167 ((M)->mflags & ~USE_LOCK_BIT))
2168
2169/* page-align a size */
2170#define page_align(S)\
2171 (((S) + (mparams.page_size)) & ~(mparams.page_size - SIZE_T_ONE))
2172
2173/* granularity-align a size */
2174#define granularity_align(S)\
2175 (((S) + (mparams.granularity)) & ~(mparams.granularity - SIZE_T_ONE))
2176
2177#define is_page_aligned(S)\
2178 (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0)
2179#define is_granularity_aligned(S)\
2180 (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0)
2181
2182/* True if segment S holds address A */
2183#define segment_holds(S, A)\
2184 ((char*)(A) >= S->base && (char*)(A) < S->base + S->size)
2185
2186/* Return segment holding given address */
2187static msegmentptr
2188segment_holding(mstate m, char *addr)
2189{
2190 msegmentptr sp = &m->seg;
2191 for (;;) {
2192 if (addr >= sp->base && addr < sp->base + sp->size)
2193 return sp;
2194 if ((sp = sp->next) == 0)
2195 return 0;
2196 }
2197}
2198
2199/* Return true if segment contains a segment link */
2200static int
2201has_segment_link(mstate m, msegmentptr ss)
2202{
2203 msegmentptr sp = &m->seg;
2204 for (;;) {
2205 if ((char *) sp >= ss->base && (char *) sp < ss->base + ss->size)
2206 return 1;
2207 if ((sp = sp->next) == 0)
2208 return 0;
2209 }
2210}
2211
2212#ifndef MORECORE_CANNOT_TRIM
2213#define should_trim(M,s) ((s) > (M)->trim_check)
2214#else /* MORECORE_CANNOT_TRIM */
2215#define should_trim(M,s) (0)
2216#endif /* MORECORE_CANNOT_TRIM */
2217
2218/*
2219 TOP_FOOT_SIZE is padding at the end of a segment, including space
2220 that may be needed to place segment records and fenceposts when new
2221 noncontiguous segments are added.
2222*/
2223#define TOP_FOOT_SIZE\
2224 (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE)
2225
2226
2227/* ------------------------------- Hooks -------------------------------- */
2228
2229/*
2230 PREACTION should be defined to return 0 on success, and nonzero on
2231 failure. If you are not using locking, you can redefine these to do
2232 anything you like.
2233*/
2234
2235#if USE_LOCKS
2236
2237/* Ensure locks are initialized */
2238#define GLOBALLY_INITIALIZE() (mparams.page_size == 0 && init_mparams())
2239
2240#define PREACTION(M) ((GLOBALLY_INITIALIZE() || use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0)
2241#define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); }
2242#else /* USE_LOCKS */
2243
2244#ifndef PREACTION
2245#define PREACTION(M) (0)
2246#endif /* PREACTION */
2247
2248#ifndef POSTACTION
2249#define POSTACTION(M)
2250#endif /* POSTACTION */
2251
2252#endif /* USE_LOCKS */
2253
2254/*
2255 CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses.
2256 USAGE_ERROR_ACTION is triggered on detected bad frees and
2257 reallocs. The argument p is an address that might have triggered the
2258 fault. It is ignored by the two predefined actions, but might be
2259 useful in custom actions that try to help diagnose errors.
2260*/
2261
2262#if PROCEED_ON_ERROR
2263
2264/* A count of the number of corruption errors causing resets */
2265int malloc_corruption_error_count;
2266
2267/* default corruption action */
2268static void reset_on_error(mstate m);
2269
2270#define CORRUPTION_ERROR_ACTION(m) reset_on_error(m)
2271#define USAGE_ERROR_ACTION(m, p)
2272
2273#else /* PROCEED_ON_ERROR */
2274
2275#ifndef CORRUPTION_ERROR_ACTION
2276#define CORRUPTION_ERROR_ACTION(m) ABORT
2277#endif /* CORRUPTION_ERROR_ACTION */
2278
2279#ifndef USAGE_ERROR_ACTION
2280#define USAGE_ERROR_ACTION(m,p) ABORT
2281#endif /* USAGE_ERROR_ACTION */
2282
2283#endif /* PROCEED_ON_ERROR */
2284
2285/* -------------------------- Debugging setup ---------------------------- */
2286
2287#if ! DEBUG
2288
2289#define check_free_chunk(M,P)
2290#define check_inuse_chunk(M,P)
2291#define check_malloced_chunk(M,P,N)
2292#define check_mmapped_chunk(M,P)
2293#define check_malloc_state(M)
2294#define check_top_chunk(M,P)
2295
2296#else /* DEBUG */
2297#define check_free_chunk(M,P) do_check_free_chunk(M,P)
2298#define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P)
2299#define check_top_chunk(M,P) do_check_top_chunk(M,P)
2300#define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N)
2301#define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P)
2302#define check_malloc_state(M) do_check_malloc_state(M)
2303
2304static void do_check_any_chunk(mstate m, mchunkptr p);
2305static void do_check_top_chunk(mstate m, mchunkptr p);
2306static void do_check_mmapped_chunk(mstate m, mchunkptr p);
2307static void do_check_inuse_chunk(mstate m, mchunkptr p);
2308static void do_check_free_chunk(mstate m, mchunkptr p);
2309static void do_check_malloced_chunk(mstate m, void *mem, size_t s);
2310static void do_check_tree(mstate m, tchunkptr t);
2311static void do_check_treebin(mstate m, bindex_t i);
2312static void do_check_smallbin(mstate m, bindex_t i);
2313static void do_check_malloc_state(mstate m);
2314static int bin_find(mstate m, mchunkptr x);
2315static size_t traverse_and_check(mstate m);
2316#endif /* DEBUG */
2317
2318/* ---------------------------- Indexing Bins ---------------------------- */
2319
2320#define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS)
2321#define small_index(s) ((s) >> SMALLBIN_SHIFT)
2322#define small_index2size(i) ((i) << SMALLBIN_SHIFT)
2323#define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE))
2324
2325/* addressing by index. See above about smallbin repositioning */
2326#define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1])))
2327#define treebin_at(M,i) (&((M)->treebins[i]))
2328
2329/* assign tree index for size S to variable I */
2330#if defined(__GNUC__) && defined(__i386__)
2331#define compute_tree_index(S, I)\
2332{\
2333 size_t X = S >> TREEBIN_SHIFT;\
2334 if (X == 0)\
2335 I = 0;\
2336 else if (X > 0xFFFF)\
2337 I = NTREEBINS-1;\
2338 else {\
2339 unsigned int K;\
2340 __asm__("bsrl %1,%0\n\t" : "=r" (K) : "rm" (X));\
2341 I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\
2342 }\
2343}
2344#else /* GNUC */
2345#define compute_tree_index(S, I)\
2346{\
2347 size_t X = S >> TREEBIN_SHIFT;\
2348 if (X == 0)\
2349 I = 0;\
2350 else if (X > 0xFFFF)\
2351 I = NTREEBINS-1;\
2352 else {\
2353 unsigned int Y = (unsigned int)X;\
2354 unsigned int N = ((Y - 0x100) >> 16) & 8;\
2355 unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\
2356 N += K;\
2357 N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\
2358 K = 14 - N + ((Y <<= K) >> 15);\
2359 I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\
2360 }\
2361}
2362#endif /* GNUC */
2363
2364/* Bit representing maximum resolved size in a treebin at i */
2365#define bit_for_tree_index(i) \
2366 (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2)
2367
2368/* Shift placing maximum resolved bit in a treebin at i as sign bit */
2369#define leftshift_for_tree_index(i) \
2370 ((i == NTREEBINS-1)? 0 : \
2371 ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2)))
2372
2373/* The size of the smallest chunk held in bin with index i */
2374#define minsize_for_tree_index(i) \
2375 ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \
2376 (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1)))
2377
2378
2379/* ------------------------ Operations on bin maps ----------------------- */
2380
2381/* bit corresponding to given index */
2382#define idx2bit(i) ((binmap_t)(1) << (i))
2383
2384/* Mark/Clear bits with given index */
2385#define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i))
2386#define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i))
2387#define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i))
2388
2389#define mark_treemap(M,i) ((M)->treemap |= idx2bit(i))
2390#define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i))
2391#define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i))
2392
2393/* index corresponding to given bit */
2394
2395#if defined(__GNUC__) && defined(__i386__)
2396#define compute_bit2idx(X, I)\
2397{\
2398 unsigned int J;\
2399 __asm__("bsfl %1,%0\n\t" : "=r" (J) : "rm" (X));\
2400 I = (bindex_t)J;\
2401}
2402
2403#else /* GNUC */
2404#if USE_BUILTIN_FFS
2405#define compute_bit2idx(X, I) I = ffs(X)-1
2406
2407#else /* USE_BUILTIN_FFS */
2408#define compute_bit2idx(X, I)\
2409{\
2410 unsigned int Y = X - 1;\
2411 unsigned int K = Y >> (16-4) & 16;\
2412 unsigned int N = K; Y >>= K;\
2413 N += K = Y >> (8-3) & 8; Y >>= K;\
2414 N += K = Y >> (4-2) & 4; Y >>= K;\
2415 N += K = Y >> (2-1) & 2; Y >>= K;\
2416 N += K = Y >> (1-0) & 1; Y >>= K;\
2417 I = (bindex_t)(N + Y);\
2418}
2419#endif /* USE_BUILTIN_FFS */
2420#endif /* GNUC */
2421
2422/* isolate the least set bit of a bitmap */
2423#define least_bit(x) ((x) & -(x))
2424
2425/* mask with all bits to left of least bit of x on */
2426#define left_bits(x) ((x<<1) | -(x<<1))
2427
2428/* mask with all bits to left of or equal to least bit of x on */
2429#define same_or_left_bits(x) ((x) | -(x))
2430
2431
2432/* ----------------------- Runtime Check Support ------------------------- */
2433
2434/*
2435 For security, the main invariant is that malloc/free/etc never
2436 writes to a static address other than malloc_state, unless static
2437 malloc_state itself has been corrupted, which cannot occur via
2438 malloc (because of these checks). In essence this means that we
2439 believe all pointers, sizes, maps etc held in malloc_state, but
2440 check all of those linked or offsetted from other embedded data
2441 structures. These checks are interspersed with main code in a way
2442 that tends to minimize their run-time cost.
2443
2444 When FOOTERS is defined, in addition to range checking, we also
2445 verify footer fields of inuse chunks, which can be used guarantee
2446 that the mstate controlling malloc/free is intact. This is a
2447 streamlined version of the approach described by William Robertson
2448 et al in "Run-time Detection of Heap-based Overflows" LISA'03
2449 http://www.usenix.org/events/lisa03/tech/robertson.html The footer
2450 of an inuse chunk holds the xor of its mstate and a random seed,
2451 that is checked upon calls to free() and realloc(). This is
2452 (probablistically) unguessable from outside the program, but can be
2453 computed by any code successfully malloc'ing any chunk, so does not
2454 itself provide protection against code that has already broken
2455 security through some other means. Unlike Robertson et al, we
2456 always dynamically check addresses of all offset chunks (previous,
2457 next, etc). This turns out to be cheaper than relying on hashes.
2458*/
2459
2460#if !INSECURE
2461/* Check if address a is at least as high as any from MORECORE or MMAP */
2462#define ok_address(M, a) ((char*)(a) >= (M)->least_addr)
2463/* Check if address of next chunk n is higher than base chunk p */
2464#define ok_next(p, n) ((char*)(p) < (char*)(n))
2465/* Check if p has its cinuse bit on */
2466#define ok_cinuse(p) cinuse(p)
2467/* Check if p has its pinuse bit on */
2468#define ok_pinuse(p) pinuse(p)
2469
2470#else /* !INSECURE */
2471#define ok_address(M, a) (1)
2472#define ok_next(b, n) (1)
2473#define ok_cinuse(p) (1)
2474#define ok_pinuse(p) (1)
2475#endif /* !INSECURE */
2476
2477#if (FOOTERS && !INSECURE)
2478/* Check if (alleged) mstate m has expected magic field */
2479#define ok_magic(M) ((M)->magic == mparams.magic)
2480#else /* (FOOTERS && !INSECURE) */
2481#define ok_magic(M) (1)
2482#endif /* (FOOTERS && !INSECURE) */
2483
2484
2485/* In gcc, use __builtin_expect to minimize impact of checks */
2486#if !INSECURE
2487#if defined(__GNUC__) && __GNUC__ >= 3
2488#define RTCHECK(e) __builtin_expect(e, 1)
2489#else /* GNUC */
2490#define RTCHECK(e) (e)
2491#endif /* GNUC */
2492#else /* !INSECURE */
2493#define RTCHECK(e) (1)
2494#endif /* !INSECURE */
2495
2496/* macros to set up inuse chunks with or without footers */
2497
2498#if !FOOTERS
2499
2500#define mark_inuse_foot(M,p,s)
2501
2502/* Set cinuse bit and pinuse bit of next chunk */
2503#define set_inuse(M,p,s)\
2504 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2505 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2506
2507/* Set cinuse and pinuse of this chunk and pinuse of next chunk */
2508#define set_inuse_and_pinuse(M,p,s)\
2509 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2510 ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT)
2511
2512/* Set size, cinuse and pinuse bit of this chunk */
2513#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2514 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT))
2515
2516#else /* FOOTERS */
2517
2518/* Set foot of inuse chunk to be xor of mstate and seed */
2519#define mark_inuse_foot(M,p,s)\
2520 (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic))
2521
2522#define get_mstate_for(p)\
2523 ((mstate)(((mchunkptr)((char*)(p) +\
2524 (chunksize(p))))->prev_foot ^ mparams.magic))
2525
2526#define set_inuse(M,p,s)\
2527 ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\
2528 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \
2529 mark_inuse_foot(M,p,s))
2530
2531#define set_inuse_and_pinuse(M,p,s)\
2532 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2533 (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\
2534 mark_inuse_foot(M,p,s))
2535
2536#define set_size_and_pinuse_of_inuse_chunk(M, p, s)\
2537 ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\
2538 mark_inuse_foot(M, p, s))
2539
2540#endif /* !FOOTERS */
2541
2542/* ---------------------------- setting mparams -------------------------- */
2543
2544/* Initialize mparams */
2545static int
2546init_mparams(void)
2547{
2548 if (mparams.page_size == 0) {
2549 size_t s;
2550
2551 mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD;
2552 mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD;
2553#if MORECORE_CONTIGUOUS
2554 mparams.default_mflags = USE_LOCK_BIT | USE_MMAP_BIT;
2555#else /* MORECORE_CONTIGUOUS */
2556 mparams.default_mflags =
2557 USE_LOCK_BIT | USE_MMAP_BIT | USE_NONCONTIGUOUS_BIT;
2558#endif /* MORECORE_CONTIGUOUS */
2559
2560#if (FOOTERS && !INSECURE)
2561 {
2562#if USE_DEV_RANDOM
2563 int fd;
2564 unsigned char buf[sizeof(size_t)];
2565 /* Try to use /dev/urandom, else fall back on using time */
2566 if ((fd = open("/dev/urandom", O_RDONLY)) >= 0 &&
2567 read(fd, buf, sizeof(buf)) == sizeof(buf)) {
2568 s = *((size_t *) buf);
2569 close(fd);
2570 } else
2571#endif /* USE_DEV_RANDOM */
2572 s = (size_t) (time(0) ^ (size_t) 0x55555555U);
2573
2574 s |= (size_t) 8U; /* ensure nonzero */
2575 s &= ~(size_t) 7U; /* improve chances of fault for bad values */
2576
2577 }
2578#else /* (FOOTERS && !INSECURE) */
2579 s = (size_t) 0x58585858U;
2580#endif /* (FOOTERS && !INSECURE) */
2581 ACQUIRE_MAGIC_INIT_LOCK();
2582 if (mparams.magic == 0) {
2583 mparams.magic = s;
2584 /* Set up lock for main malloc area */
2585 INITIAL_LOCK(&gm->mutex);
2586 gm->mflags = mparams.default_mflags;
2587 }
2588 RELEASE_MAGIC_INIT_LOCK();
2589
2590#if !defined(WIN32) && !defined(__OS2__)
2591 mparams.page_size = malloc_getpagesize;
2592 mparams.granularity = ((DEFAULT_GRANULARITY != 0) ?
2593 DEFAULT_GRANULARITY : mparams.page_size);
2594#elif defined (__OS2__)
2595 /* if low-memory is used, os2munmap() would break
2596 if it were anything other than 64k */
2597 mparams.page_size = 4096u;
2598 mparams.granularity = 65536u;
2599#else /* WIN32 */
2600 {
2601 SYSTEM_INFO system_info;
2602 GetSystemInfo(&system_info);
2603 mparams.page_size = system_info.dwPageSize;
2604 mparams.granularity = system_info.dwAllocationGranularity;
2605 }
2606#endif /* WIN32 */
2607
2608 /* Sanity-check configuration:
2609 size_t must be unsigned and as wide as pointer type.
2610 ints must be at least 4 bytes.
2611 alignment must be at least 8.
2612 Alignment, min chunk size, and page size must all be powers of 2.
2613 */
2614 if ((sizeof(size_t) != sizeof(char *)) ||
2615 (MAX_SIZE_T < MIN_CHUNK_SIZE) ||
2616 (sizeof(int) < 4) ||
2617 (MALLOC_ALIGNMENT < (size_t) 8U) ||
2618 ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT - SIZE_T_ONE)) != 0) ||
2619 ((MCHUNK_SIZE & (MCHUNK_SIZE - SIZE_T_ONE)) != 0) ||
2620 ((mparams.granularity & (mparams.granularity - SIZE_T_ONE)) != 0)
2621 || ((mparams.page_size & (mparams.page_size - SIZE_T_ONE)) != 0))
2622 ABORT;
2623 }
2624 return 0;
2625}
2626
2627/* support for mallopt */
2628static int
2629change_mparam(int param_number, int value)
2630{
2631 size_t val = (size_t) value;
2632 init_mparams();
2633 switch (param_number) {
2634 case M_TRIM_THRESHOLD:
2635 mparams.trim_threshold = val;
2636 return 1;
2637 case M_GRANULARITY:
2638 if (val >= mparams.page_size && ((val & (val - 1)) == 0)) {
2639 mparams.granularity = val;
2640 return 1;
2641 } else
2642 return 0;
2643 case M_MMAP_THRESHOLD:
2644 mparams.mmap_threshold = val;
2645 return 1;
2646 default:
2647 return 0;
2648 }
2649}
2650
2651#if DEBUG
2652/* ------------------------- Debugging Support --------------------------- */
2653
2654/* Check properties of any chunk, whether free, inuse, mmapped etc */
2655static void
2656do_check_any_chunk(mstate m, mchunkptr p)
2657{
2658 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2659 assert(ok_address(m, p));
2660}
2661
2662/* Check properties of top chunk */
2663static void
2664do_check_top_chunk(mstate m, mchunkptr p)
2665{
2666 msegmentptr sp = segment_holding(m, (char *) p);
2667 size_t sz = chunksize(p);
2668 assert(sp != 0);
2669 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2670 assert(ok_address(m, p));
2671 assert(sz == m->topsize);
2672 assert(sz > 0);
2673 assert(sz == ((sp->base + sp->size) - (char *) p) - TOP_FOOT_SIZE);
2674 assert(pinuse(p));
2675 assert(!next_pinuse(p));
2676}
2677
2678/* Check properties of (inuse) mmapped chunks */
2679static void
2680do_check_mmapped_chunk(mstate m, mchunkptr p)
2681{
2682 size_t sz = chunksize(p);
2683 size_t len = (sz + (p->prev_foot & ~IS_MMAPPED_BIT) + MMAP_FOOT_PAD);
2684 assert(is_mmapped(p));
2685 assert(use_mmap(m));
2686 assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD));
2687 assert(ok_address(m, p));
2688 assert(!is_small(sz));
2689 assert((len & (mparams.page_size - SIZE_T_ONE)) == 0);
2690 assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD);
2691 assert(chunk_plus_offset(p, sz + SIZE_T_SIZE)->head == 0);
2692}
2693
2694/* Check properties of inuse chunks */
2695static void
2696do_check_inuse_chunk(mstate m, mchunkptr p)
2697{
2698 do_check_any_chunk(m, p);
2699 assert(cinuse(p));
2700 assert(next_pinuse(p));
2701 /* If not pinuse and not mmapped, previous chunk has OK offset */
2702 assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p);
2703 if (is_mmapped(p))
2704 do_check_mmapped_chunk(m, p);
2705}
2706
2707/* Check properties of free chunks */
2708static void
2709do_check_free_chunk(mstate m, mchunkptr p)
2710{
2711 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2712 mchunkptr next = chunk_plus_offset(p, sz);
2713 do_check_any_chunk(m, p);
2714 assert(!cinuse(p));
2715 assert(!next_pinuse(p));
2716 assert(!is_mmapped(p));
2717 if (p != m->dv && p != m->top) {
2718 if (sz >= MIN_CHUNK_SIZE) {
2719 assert((sz & CHUNK_ALIGN_MASK) == 0);
2720 assert(is_aligned(chunk2mem(p)));
2721 assert(next->prev_foot == sz);
2722 assert(pinuse(p));
2723 assert(next == m->top || cinuse(next));
2724 assert(p->fd->bk == p);
2725 assert(p->bk->fd == p);
2726 } else /* markers are always of size SIZE_T_SIZE */
2727 assert(sz == SIZE_T_SIZE);
2728 }
2729}
2730
2731/* Check properties of malloced chunks at the point they are malloced */
2732static void
2733do_check_malloced_chunk(mstate m, void *mem, size_t s)
2734{
2735 if (mem != 0) {
2736 mchunkptr p = mem2chunk(mem);
2737 size_t sz = p->head & ~(PINUSE_BIT | CINUSE_BIT);
2738 do_check_inuse_chunk(m, p);
2739 assert((sz & CHUNK_ALIGN_MASK) == 0);
2740 assert(sz >= MIN_CHUNK_SIZE);
2741 assert(sz >= s);
2742 /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */
2743 assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE));
2744 }
2745}
2746
2747/* Check a tree and its subtrees. */
2748static void
2749do_check_tree(mstate m, tchunkptr t)
2750{
2751 tchunkptr head = 0;
2752 tchunkptr u = t;
2753 bindex_t tindex = t->index;
2754 size_t tsize = chunksize(t);
2755 bindex_t idx;
2756 compute_tree_index(tsize, idx);
2757 assert(tindex == idx);
2758 assert(tsize >= MIN_LARGE_SIZE);
2759 assert(tsize >= minsize_for_tree_index(idx));
2760 assert((idx == NTREEBINS - 1)
2761 || (tsize < minsize_for_tree_index((idx + 1))));
2762
2763 do { /* traverse through chain of same-sized nodes */
2764 do_check_any_chunk(m, ((mchunkptr) u));
2765 assert(u->index == tindex);
2766 assert(chunksize(u) == tsize);
2767 assert(!cinuse(u));
2768 assert(!next_pinuse(u));
2769 assert(u->fd->bk == u);
2770 assert(u->bk->fd == u);
2771 if (u->parent == 0) {
2772 assert(u->child[0] == 0);
2773 assert(u->child[1] == 0);
2774 } else {
2775 assert(head == 0); /* only one node on chain has parent */
2776 head = u;
2777 assert(u->parent != u);
2778 assert(u->parent->child[0] == u ||
2779 u->parent->child[1] == u ||
2780 *((tbinptr *) (u->parent)) == u);
2781 if (u->child[0] != 0) {
2782 assert(u->child[0]->parent == u);
2783 assert(u->child[0] != u);
2784 do_check_tree(m, u->child[0]);
2785 }
2786 if (u->child[1] != 0) {
2787 assert(u->child[1]->parent == u);
2788 assert(u->child[1] != u);
2789 do_check_tree(m, u->child[1]);
2790 }
2791 if (u->child[0] != 0 && u->child[1] != 0) {
2792 assert(chunksize(u->child[0]) < chunksize(u->child[1]));
2793 }
2794 }
2795 u = u->fd;
2796 } while (u != t);
2797 assert(head != 0);
2798}
2799
2800/* Check all the chunks in a treebin. */
2801static void
2802do_check_treebin(mstate m, bindex_t i)
2803{
2804 tbinptr *tb = treebin_at(m, i);
2805 tchunkptr t = *tb;
2806 int empty = (m->treemap & (1U << i)) == 0;
2807 if (t == 0)
2808 assert(empty);
2809 if (!empty)
2810 do_check_tree(m, t);
2811}
2812
2813/* Check all the chunks in a smallbin. */
2814static void
2815do_check_smallbin(mstate m, bindex_t i)
2816{
2817 sbinptr b = smallbin_at(m, i);
2818 mchunkptr p = b->bk;
2819 unsigned int empty = (m->smallmap & (1U << i)) == 0;
2820 if (p == b)
2821 assert(empty);
2822 if (!empty) {
2823 for (; p != b; p = p->bk) {
2824 size_t size = chunksize(p);
2825 mchunkptr q;
2826 /* each chunk claims to be free */
2827 do_check_free_chunk(m, p);
2828 /* chunk belongs in bin */
2829 assert(small_index(size) == i);
2830 assert(p->bk == b || chunksize(p->bk) == chunksize(p));
2831 /* chunk is followed by an inuse chunk */
2832 q = next_chunk(p);
2833 if (q->head != FENCEPOST_HEAD)
2834 do_check_inuse_chunk(m, q);
2835 }
2836 }
2837}
2838
2839/* Find x in a bin. Used in other check functions. */
2840static int
2841bin_find(mstate m, mchunkptr x)
2842{
2843 size_t size = chunksize(x);
2844 if (is_small(size)) {
2845 bindex_t sidx = small_index(size);
2846 sbinptr b = smallbin_at(m, sidx);
2847 if (smallmap_is_marked(m, sidx)) {
2848 mchunkptr p = b;
2849 do {
2850 if (p == x)
2851 return 1;
2852 } while ((p = p->fd) != b);
2853 }
2854 } else {
2855 bindex_t tidx;
2856 compute_tree_index(size, tidx);
2857 if (treemap_is_marked(m, tidx)) {
2858 tchunkptr t = *treebin_at(m, tidx);
2859 size_t sizebits = size << leftshift_for_tree_index(tidx);
2860 while (t != 0 && chunksize(t) != size) {
2861 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
2862 sizebits <<= 1;
2863 }
2864 if (t != 0) {
2865 tchunkptr u = t;
2866 do {
2867 if (u == (tchunkptr) x)
2868 return 1;
2869 } while ((u = u->fd) != t);
2870 }
2871 }
2872 }
2873 return 0;
2874}
2875
2876/* Traverse each chunk and check it; return total */
2877static size_t
2878traverse_and_check(mstate m)
2879{
2880 size_t sum = 0;
2881 if (is_initialized(m)) {
2882 msegmentptr s = &m->seg;
2883 sum += m->topsize + TOP_FOOT_SIZE;
2884 while (s != 0) {
2885 mchunkptr q = align_as_chunk(s->base);
2886 mchunkptr lastq = 0;
2887 assert(pinuse(q));
2888 while (segment_holds(s, q) &&
2889 q != m->top && q->head != FENCEPOST_HEAD) {
2890 sum += chunksize(q);
2891 if (cinuse(q)) {
2892 assert(!bin_find(m, q));
2893 do_check_inuse_chunk(m, q);
2894 } else {
2895 assert(q == m->dv || bin_find(m, q));
2896 assert(lastq == 0 || cinuse(lastq)); /* Not 2 consecutive free */
2897 do_check_free_chunk(m, q);
2898 }
2899 lastq = q;
2900 q = next_chunk(q);
2901 }
2902 s = s->next;
2903 }
2904 }
2905 return sum;
2906}
2907
2908/* Check all properties of malloc_state. */
2909static void
2910do_check_malloc_state(mstate m)
2911{
2912 bindex_t i;
2913 size_t total;
2914 /* check bins */
2915 for (i = 0; i < NSMALLBINS; ++i)
2916 do_check_smallbin(m, i);
2917 for (i = 0; i < NTREEBINS; ++i)
2918 do_check_treebin(m, i);
2919
2920 if (m->dvsize != 0) { /* check dv chunk */
2921 do_check_any_chunk(m, m->dv);
2922 assert(m->dvsize == chunksize(m->dv));
2923 assert(m->dvsize >= MIN_CHUNK_SIZE);
2924 assert(bin_find(m, m->dv) == 0);
2925 }
2926
2927 if (m->top != 0) { /* check top chunk */
2928 do_check_top_chunk(m, m->top);
2929 assert(m->topsize == chunksize(m->top));
2930 assert(m->topsize > 0);
2931 assert(bin_find(m, m->top) == 0);
2932 }
2933
2934 total = traverse_and_check(m);
2935 assert(total <= m->footprint);
2936 assert(m->footprint <= m->max_footprint);
2937}
2938#endif /* DEBUG */
2939
2940/* ----------------------------- statistics ------------------------------ */
2941
2942#if !NO_MALLINFO
2943static struct mallinfo
2944internal_mallinfo(mstate m)
2945{
2946 struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2947 if (!PREACTION(m)) {
2948 check_malloc_state(m);
2949 if (is_initialized(m)) {
2950 size_t nfree = SIZE_T_ONE; /* top always free */
2951 size_t mfree = m->topsize + TOP_FOOT_SIZE;
2952 size_t sum = mfree;
2953 msegmentptr s = &m->seg;
2954 while (s != 0) {
2955 mchunkptr q = align_as_chunk(s->base);
2956 while (segment_holds(s, q) &&
2957 q != m->top && q->head != FENCEPOST_HEAD) {
2958 size_t sz = chunksize(q);
2959 sum += sz;
2960 if (!cinuse(q)) {
2961 mfree += sz;
2962 ++nfree;
2963 }
2964 q = next_chunk(q);
2965 }
2966 s = s->next;
2967 }
2968
2969 nm.arena = sum;
2970 nm.ordblks = nfree;
2971 nm.hblkhd = m->footprint - sum;
2972 nm.usmblks = m->max_footprint;
2973 nm.uordblks = m->footprint - mfree;
2974 nm.fordblks = mfree;
2975 nm.keepcost = m->topsize;
2976 }
2977
2978 POSTACTION(m);
2979 }
2980 return nm;
2981}
2982#endif /* !NO_MALLINFO */
2983
2984static void
2985internal_malloc_stats(mstate m)
2986{
2987 if (!PREACTION(m)) {
2988#ifndef LACKS_STDIO_H
2989 size_t maxfp = 0;
2990#endif
2991 size_t fp = 0;
2992 size_t used = 0;
2993 check_malloc_state(m);
2994 if (is_initialized(m)) {
2995 msegmentptr s = &m->seg;
2996#ifndef LACKS_STDIO_H
2997 maxfp = m->max_footprint;
2998#endif
2999 fp = m->footprint;
3000 used = fp - (m->topsize + TOP_FOOT_SIZE);
3001
3002 while (s != 0) {
3003 mchunkptr q = align_as_chunk(s->base);
3004 while (segment_holds(s, q) &&
3005 q != m->top && q->head != FENCEPOST_HEAD) {
3006 if (!cinuse(q))
3007 used -= chunksize(q);
3008 q = next_chunk(q);
3009 }
3010 s = s->next;
3011 }
3012 }
3013#ifndef LACKS_STDIO_H
3014 fprintf(stderr, "max system bytes = %10lu\n",
3015 (unsigned long) (maxfp));
3016 fprintf(stderr, "system bytes = %10lu\n", (unsigned long) (fp));
3017 fprintf(stderr, "in use bytes = %10lu\n", (unsigned long) (used));
3018#endif
3019
3020 POSTACTION(m);
3021 }
3022}
3023
3024/* ----------------------- Operations on smallbins ----------------------- */
3025
3026/*
3027 Various forms of linking and unlinking are defined as macros. Even
3028 the ones for trees, which are very long but have very short typical
3029 paths. This is ugly but reduces reliance on inlining support of
3030 compilers.
3031*/
3032
3033/* Link a free chunk into a smallbin */
3034#define insert_small_chunk(M, P, S) {\
3035 bindex_t I = small_index(S);\
3036 mchunkptr B = smallbin_at(M, I);\
3037 mchunkptr F = B;\
3038 assert(S >= MIN_CHUNK_SIZE);\
3039 if (!smallmap_is_marked(M, I))\
3040 mark_smallmap(M, I);\
3041 else if (RTCHECK(ok_address(M, B->fd)))\
3042 F = B->fd;\
3043 else {\
3044 CORRUPTION_ERROR_ACTION(M);\
3045 }\
3046 B->fd = P;\
3047 F->bk = P;\
3048 P->fd = F;\
3049 P->bk = B;\
3050}
3051
3052/* Unlink a chunk from a smallbin */
3053#define unlink_small_chunk(M, P, S) {\
3054 mchunkptr F = P->fd;\
3055 mchunkptr B = P->bk;\
3056 bindex_t I = small_index(S);\
3057 assert(P != B);\
3058 assert(P != F);\
3059 assert(chunksize(P) == small_index2size(I));\
3060 if (F == B)\
3061 clear_smallmap(M, I);\
3062 else if (RTCHECK((F == smallbin_at(M,I) || ok_address(M, F)) &&\
3063 (B == smallbin_at(M,I) || ok_address(M, B)))) {\
3064 F->bk = B;\
3065 B->fd = F;\
3066 }\
3067 else {\
3068 CORRUPTION_ERROR_ACTION(M);\
3069 }\
3070}
3071
3072/* Unlink the first chunk from a smallbin */
3073#define unlink_first_small_chunk(M, B, P, I) {\
3074 mchunkptr F = P->fd;\
3075 assert(P != B);\
3076 assert(P != F);\
3077 assert(chunksize(P) == small_index2size(I));\
3078 if (B == F)\
3079 clear_smallmap(M, I);\
3080 else if (RTCHECK(ok_address(M, F))) {\
3081 B->fd = F;\
3082 F->bk = B;\
3083 }\
3084 else {\
3085 CORRUPTION_ERROR_ACTION(M);\
3086 }\
3087}
3088
3089/* Replace dv node, binning the old one */
3090/* Used only when dvsize known to be small */
3091#define replace_dv(M, P, S) {\
3092 size_t DVS = M->dvsize;\
3093 if (DVS != 0) {\
3094 mchunkptr DV = M->dv;\
3095 assert(is_small(DVS));\
3096 insert_small_chunk(M, DV, DVS);\
3097 }\
3098 M->dvsize = S;\
3099 M->dv = P;\
3100}
3101
3102/* ------------------------- Operations on trees ------------------------- */
3103
3104/* Insert chunk into tree */
3105#define insert_large_chunk(M, X, S) {\
3106 tbinptr* H;\
3107 bindex_t I;\
3108 compute_tree_index(S, I);\
3109 H = treebin_at(M, I);\
3110 X->index = I;\
3111 X->child[0] = X->child[1] = 0;\
3112 if (!treemap_is_marked(M, I)) {\
3113 mark_treemap(M, I);\
3114 *H = X;\
3115 X->parent = (tchunkptr)H;\
3116 X->fd = X->bk = X;\
3117 }\
3118 else {\
3119 tchunkptr T = *H;\
3120 size_t K = S << leftshift_for_tree_index(I);\
3121 for (;;) {\
3122 if (chunksize(T) != S) {\
3123 tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\
3124 K <<= 1;\
3125 if (*C != 0)\
3126 T = *C;\
3127 else if (RTCHECK(ok_address(M, C))) {\
3128 *C = X;\
3129 X->parent = T;\
3130 X->fd = X->bk = X;\
3131 break;\
3132 }\
3133 else {\
3134 CORRUPTION_ERROR_ACTION(M);\
3135 break;\
3136 }\
3137 }\
3138 else {\
3139 tchunkptr F = T->fd;\
3140 if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\
3141 T->fd = F->bk = X;\
3142 X->fd = F;\
3143 X->bk = T;\
3144 X->parent = 0;\
3145 break;\
3146 }\
3147 else {\
3148 CORRUPTION_ERROR_ACTION(M);\
3149 break;\
3150 }\
3151 }\
3152 }\
3153 }\
3154}
3155
3156/*
3157 Unlink steps:
3158
3159 1. If x is a chained node, unlink it from its same-sized fd/bk links
3160 and choose its bk node as its replacement.
3161 2. If x was the last node of its size, but not a leaf node, it must
3162 be replaced with a leaf node (not merely one with an open left or
3163 right), to make sure that lefts and rights of descendents
3164 correspond properly to bit masks. We use the rightmost descendent
3165 of x. We could use any other leaf, but this is easy to locate and
3166 tends to counteract removal of leftmosts elsewhere, and so keeps
3167 paths shorter than minimally guaranteed. This doesn't loop much
3168 because on average a node in a tree is near the bottom.
3169 3. If x is the base of a chain (i.e., has parent links) relink
3170 x's parent and children to x's replacement (or null if none).
3171*/
3172
3173#define unlink_large_chunk(M, X) {\
3174 tchunkptr XP = X->parent;\
3175 tchunkptr R;\
3176 if (X->bk != X) {\
3177 tchunkptr F = X->fd;\
3178 R = X->bk;\
3179 if (RTCHECK(ok_address(M, F))) {\
3180 F->bk = R;\
3181 R->fd = F;\
3182 }\
3183 else {\
3184 CORRUPTION_ERROR_ACTION(M);\
3185 }\
3186 }\
3187 else {\
3188 tchunkptr* RP;\
3189 if (((R = *(RP = &(X->child[1]))) != 0) ||\
3190 ((R = *(RP = &(X->child[0]))) != 0)) {\
3191 tchunkptr* CP;\
3192 while ((*(CP = &(R->child[1])) != 0) ||\
3193 (*(CP = &(R->child[0])) != 0)) {\
3194 R = *(RP = CP);\
3195 }\
3196 if (RTCHECK(ok_address(M, RP)))\
3197 *RP = 0;\
3198 else {\
3199 CORRUPTION_ERROR_ACTION(M);\
3200 }\
3201 }\
3202 }\
3203 if (XP != 0) {\
3204 tbinptr* H = treebin_at(M, X->index);\
3205 if (X == *H) {\
3206 if ((*H = R) == 0) \
3207 clear_treemap(M, X->index);\
3208 }\
3209 else if (RTCHECK(ok_address(M, XP))) {\
3210 if (XP->child[0] == X) \
3211 XP->child[0] = R;\
3212 else \
3213 XP->child[1] = R;\
3214 }\
3215 else\
3216 CORRUPTION_ERROR_ACTION(M);\
3217 if (R != 0) {\
3218 if (RTCHECK(ok_address(M, R))) {\
3219 tchunkptr C0, C1;\
3220 R->parent = XP;\
3221 if ((C0 = X->child[0]) != 0) {\
3222 if (RTCHECK(ok_address(M, C0))) {\
3223 R->child[0] = C0;\
3224 C0->parent = R;\
3225 }\
3226 else\
3227 CORRUPTION_ERROR_ACTION(M);\
3228 }\
3229 if ((C1 = X->child[1]) != 0) {\
3230 if (RTCHECK(ok_address(M, C1))) {\
3231 R->child[1] = C1;\
3232 C1->parent = R;\
3233 }\
3234 else\
3235 CORRUPTION_ERROR_ACTION(M);\
3236 }\
3237 }\
3238 else\
3239 CORRUPTION_ERROR_ACTION(M);\
3240 }\
3241 }\
3242}
3243
3244/* Relays to large vs small bin operations */
3245
3246#define insert_chunk(M, P, S)\
3247 if (is_small(S)) insert_small_chunk(M, P, S)\
3248 else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); }
3249
3250#define unlink_chunk(M, P, S)\
3251 if (is_small(S)) unlink_small_chunk(M, P, S)\
3252 else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); }
3253
3254
3255/* Relays to internal calls to malloc/free from realloc, memalign etc */
3256
3257#if ONLY_MSPACES
3258#define internal_malloc(m, b) mspace_malloc(m, b)
3259#define internal_free(m, mem) mspace_free(m,mem);
3260#else /* ONLY_MSPACES */
3261#if MSPACES
3262#define internal_malloc(m, b)\
3263 (m == gm)? dlmalloc(b) : mspace_malloc(m, b)
3264#define internal_free(m, mem)\
3265 if (m == gm) dlfree(mem); else mspace_free(m,mem);
3266#else /* MSPACES */
3267#define internal_malloc(m, b) dlmalloc(b)
3268#define internal_free(m, mem) dlfree(mem)
3269#endif /* MSPACES */
3270#endif /* ONLY_MSPACES */
3271
3272/* ----------------------- Direct-mmapping chunks ----------------------- */
3273
3274/*
3275 Directly mmapped chunks are set up with an offset to the start of
3276 the mmapped region stored in the prev_foot field of the chunk. This
3277 allows reconstruction of the required argument to MUNMAP when freed,
3278 and also allows adjustment of the returned chunk to meet alignment
3279 requirements (especially in memalign). There is also enough space
3280 allocated to hold a fake next chunk of size SIZE_T_SIZE to maintain
3281 the PINUSE bit so frees can be checked.
3282*/
3283
3284/* Malloc using mmap */
3285static void *
3286mmap_alloc(mstate m, size_t nb)
3287{
3288 size_t mmsize =
3289 granularity_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3290 if (mmsize > nb) { /* Check for wrap around 0 */
3291 char *mm = (char *) (DIRECT_MMAP(mmsize));
3292 if (mm != CMFAIL) {
3293 size_t offset = align_offset(chunk2mem(mm));
3294 size_t psize = mmsize - offset - MMAP_FOOT_PAD;
3295 mchunkptr p = (mchunkptr) (mm + offset);
3296 p->prev_foot = offset | IS_MMAPPED_BIT;
3297 (p)->head = (psize | CINUSE_BIT);
3298 mark_inuse_foot(m, p, psize);
3299 chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD;
3300 chunk_plus_offset(p, psize + SIZE_T_SIZE)->head = 0;
3301
3302 if (mm < m->least_addr)
3303 m->least_addr = mm;
3304 if ((m->footprint += mmsize) > m->max_footprint)
3305 m->max_footprint = m->footprint;
3306 assert(is_aligned(chunk2mem(p)));
3307 check_mmapped_chunk(m, p);
3308 return chunk2mem(p);
3309 }
3310 }
3311 return 0;
3312}
3313
3314/* Realloc using mmap */
3315static mchunkptr
3316mmap_resize(mstate m, mchunkptr oldp, size_t nb)
3317{
3318 size_t oldsize = chunksize(oldp);
3319 if (is_small(nb)) /* Can't shrink mmap regions below small size */
3320 return 0;
3321 /* Keep old chunk if big enough but not too big */
3322 if (oldsize >= nb + SIZE_T_SIZE &&
3323 (oldsize - nb) <= (mparams.granularity << 1))
3324 return oldp;
3325 else {
3326 size_t offset = oldp->prev_foot & ~IS_MMAPPED_BIT;
3327 size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD;
3328 size_t newmmsize = granularity_align(nb + SIX_SIZE_T_SIZES +
3329 CHUNK_ALIGN_MASK);
3330 char *cp = (char *) CALL_MREMAP((char *) oldp - offset,
3331 oldmmsize, newmmsize, 1);
3332 if (cp != CMFAIL) {
3333 mchunkptr newp = (mchunkptr) (cp + offset);
3334 size_t psize = newmmsize - offset - MMAP_FOOT_PAD;
3335 newp->head = (psize | CINUSE_BIT);
3336 mark_inuse_foot(m, newp, psize);
3337 chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD;
3338 chunk_plus_offset(newp, psize + SIZE_T_SIZE)->head = 0;
3339
3340 if (cp < m->least_addr)
3341 m->least_addr = cp;
3342 if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint)
3343 m->max_footprint = m->footprint;
3344 check_mmapped_chunk(m, newp);
3345 return newp;
3346 }
3347 }
3348 return 0;
3349}
3350
3351/* -------------------------- mspace management -------------------------- */
3352
3353/* Initialize top chunk and its size */
3354static void
3355init_top(mstate m, mchunkptr p, size_t psize)
3356{
3357 /* Ensure alignment */
3358 size_t offset = align_offset(chunk2mem(p));
3359 p = (mchunkptr) ((char *) p + offset);
3360 psize -= offset;
3361
3362 m->top = p;
3363 m->topsize = psize;
3364 p->head = psize | PINUSE_BIT;
3365 /* set size of fake trailing chunk holding overhead space only once */
3366 chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE;
3367 m->trim_check = mparams.trim_threshold; /* reset on each update */
3368}
3369
3370/* Initialize bins for a new mstate that is otherwise zeroed out */
3371static void
3372init_bins(mstate m)
3373{
3374 /* Establish circular links for smallbins */
3375 bindex_t i;
3376 for (i = 0; i < NSMALLBINS; ++i) {
3377 sbinptr bin = smallbin_at(m, i);
3378 bin->fd = bin->bk = bin;
3379 }
3380}
3381
3382#if PROCEED_ON_ERROR
3383
3384/* default corruption action */
3385static void
3386reset_on_error(mstate m)
3387{
3388 int i;
3389 ++malloc_corruption_error_count;
3390 /* Reinitialize fields to forget about all memory */
3391 m->smallbins = m->treebins = 0;
3392 m->dvsize = m->topsize = 0;
3393 m->seg.base = 0;
3394 m->seg.size = 0;
3395 m->seg.next = 0;
3396 m->top = m->dv = 0;
3397 for (i = 0; i < NTREEBINS; ++i)
3398 *treebin_at(m, i) = 0;
3399 init_bins(m);
3400}
3401#endif /* PROCEED_ON_ERROR */
3402
3403/* Allocate chunk and prepend remainder with chunk in successor base. */
3404static void *
3405prepend_alloc(mstate m, char *newbase, char *oldbase, size_t nb)
3406{
3407 mchunkptr p = align_as_chunk(newbase);
3408 mchunkptr oldfirst = align_as_chunk(oldbase);
3409 size_t psize = (char *) oldfirst - (char *) p;
3410 mchunkptr q = chunk_plus_offset(p, nb);
3411 size_t qsize = psize - nb;
3412 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3413
3414 assert((char *) oldfirst > (char *) q);
3415 assert(pinuse(oldfirst));
3416 assert(qsize >= MIN_CHUNK_SIZE);
3417
3418 /* consolidate remainder with first chunk of old base */
3419 if (oldfirst == m->top) {
3420 size_t tsize = m->topsize += qsize;
3421 m->top = q;
3422 q->head = tsize | PINUSE_BIT;
3423 check_top_chunk(m, q);
3424 } else if (oldfirst == m->dv) {
3425 size_t dsize = m->dvsize += qsize;
3426 m->dv = q;
3427 set_size_and_pinuse_of_free_chunk(q, dsize);
3428 } else {
3429 if (!cinuse(oldfirst)) {
3430 size_t nsize = chunksize(oldfirst);
3431 unlink_chunk(m, oldfirst, nsize);
3432 oldfirst = chunk_plus_offset(oldfirst, nsize);
3433 qsize += nsize;
3434 }
3435 set_free_with_pinuse(q, qsize, oldfirst);
3436 insert_chunk(m, q, qsize);
3437 check_free_chunk(m, q);
3438 }
3439
3440 check_malloced_chunk(m, chunk2mem(p), nb);
3441 return chunk2mem(p);
3442}
3443
3444
3445/* Add a segment to hold a new noncontiguous region */
3446static void
3447add_segment(mstate m, char *tbase, size_t tsize, flag_t mmapped)
3448{
3449 /* Determine locations and sizes of segment, fenceposts, old top */
3450 char *old_top = (char *) m->top;
3451 msegmentptr oldsp = segment_holding(m, old_top);
3452 char *old_end = oldsp->base + oldsp->size;
3453 size_t ssize = pad_request(sizeof(struct malloc_segment));
3454 char *rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK);
3455 size_t offset = align_offset(chunk2mem(rawsp));
3456 char *asp = rawsp + offset;
3457 char *csp = (asp < (old_top + MIN_CHUNK_SIZE)) ? old_top : asp;
3458 mchunkptr sp = (mchunkptr) csp;
3459 msegmentptr ss = (msegmentptr) (chunk2mem(sp));
3460 mchunkptr tnext = chunk_plus_offset(sp, ssize);
3461 mchunkptr p = tnext;
3462 int nfences = 0;
3463
3464 /* reset top to new space */
3465 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3466
3467 /* Set up segment record */
3468 assert(is_aligned(ss));
3469 set_size_and_pinuse_of_inuse_chunk(m, sp, ssize);
3470 *ss = m->seg; /* Push current record */
3471 m->seg.base = tbase;
3472 m->seg.size = tsize;
3473 m->seg.sflags = mmapped;
3474 m->seg.next = ss;
3475
3476 /* Insert trailing fenceposts */
3477 for (;;) {
3478 mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE);
3479 p->head = FENCEPOST_HEAD;
3480 ++nfences;
3481 if ((char *) (&(nextp->head)) < old_end)
3482 p = nextp;
3483 else
3484 break;
3485 }
3486 assert(nfences >= 2);
3487
3488 /* Insert the rest of old top into a bin as an ordinary free chunk */
3489 if (csp != old_top) {
3490 mchunkptr q = (mchunkptr) old_top;
3491 size_t psize = csp - old_top;
3492 mchunkptr tn = chunk_plus_offset(q, psize);
3493 set_free_with_pinuse(q, psize, tn);
3494 insert_chunk(m, q, psize);
3495 }
3496
3497 check_top_chunk(m, m->top);
3498}
3499
3500/* -------------------------- System allocation -------------------------- */
3501
3502/* Get memory from system using MORECORE or MMAP */
3503static void *
3504sys_alloc(mstate m, size_t nb)
3505{
3506 char *tbase = CMFAIL;
3507 size_t tsize = 0;
3508 flag_t mmap_flag = 0;
3509
3510 init_mparams();
3511
3512 /* Directly map large chunks */
3513 if (use_mmap(m) && nb >= mparams.mmap_threshold) {
3514 void *mem = mmap_alloc(m, nb);
3515 if (mem != 0)
3516 return mem;
3517 }
3518
3519 /*
3520 Try getting memory in any of three ways (in most-preferred to
3521 least-preferred order):
3522 1. A call to MORECORE that can normally contiguously extend memory.
3523 (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or
3524 or main space is mmapped or a previous contiguous call failed)
3525 2. A call to MMAP new space (disabled if not HAVE_MMAP).
3526 Note that under the default settings, if MORECORE is unable to
3527 fulfill a request, and HAVE_MMAP is true, then mmap is
3528 used as a noncontiguous system allocator. This is a useful backup
3529 strategy for systems with holes in address spaces -- in this case
3530 sbrk cannot contiguously expand the heap, but mmap may be able to
3531 find space.
3532 3. A call to MORECORE that cannot usually contiguously extend memory.
3533 (disabled if not HAVE_MORECORE)
3534 */
3535
3536 if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) {
3537 char *br = CMFAIL;
3538 msegmentptr ss =
3539 (m->top == 0) ? 0 : segment_holding(m, (char *) m->top);
3540 size_t asize = 0;
3541 ACQUIRE_MORECORE_LOCK();
3542
3543 if (ss == 0) { /* First time through or recovery */
3544 char *base = (char *) CALL_MORECORE(0);
3545 if (base != CMFAIL) {
3546 asize =
3547 granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT +
3548 SIZE_T_ONE);
3549 /* Adjust to end on a page boundary */
3550 if (!is_page_aligned(base))
3551 asize += (page_align((size_t) base) - (size_t) base);
3552 /* Can't call MORECORE if size is negative when treated as signed */
3553 if (asize < HALF_MAX_SIZE_T &&
3554 (br = (char *) (CALL_MORECORE(asize))) == base) {
3555 tbase = base;
3556 tsize = asize;
3557 }
3558 }
3559 } else {
3560 /* Subtract out existing available top space from MORECORE request. */
3561 asize =
3562 granularity_align(nb - m->topsize + TOP_FOOT_SIZE +
3563 MALLOC_ALIGNMENT + SIZE_T_ONE);
3564 /* Use mem here only if it did continuously extend old space */
3565 if (asize < HALF_MAX_SIZE_T &&
3566 (br =
3567 (char *) (CALL_MORECORE(asize))) == ss->base + ss->size) {
3568 tbase = br;
3569 tsize = asize;
3570 }
3571 }
3572
3573 if (tbase == CMFAIL) { /* Cope with partial failure */
3574 if (br != CMFAIL) { /* Try to use/extend the space we did get */
3575 if (asize < HALF_MAX_SIZE_T &&
3576 asize < nb + TOP_FOOT_SIZE + SIZE_T_ONE) {
3577 size_t esize =
3578 granularity_align(nb + TOP_FOOT_SIZE +
3579 MALLOC_ALIGNMENT + SIZE_T_ONE -
3580 asize);
3581 if (esize < HALF_MAX_SIZE_T) {
3582 char *end = (char *) CALL_MORECORE(esize);
3583 if (end != CMFAIL)
3584 asize += esize;
3585 else { /* Can't use; try to release */
3586 end = (char *) CALL_MORECORE(-asize);
3587 br = CMFAIL;
3588 }
3589 }
3590 }
3591 }
3592 if (br != CMFAIL) { /* Use the space we did get */
3593 tbase = br;
3594 tsize = asize;
3595 } else
3596 disable_contiguous(m); /* Don't try contiguous path in the future */
3597 }
3598
3599 RELEASE_MORECORE_LOCK();
3600 }
3601
3602 if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */
3603 size_t req = nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT + SIZE_T_ONE;
3604 size_t rsize = granularity_align(req);
3605 if (rsize > nb) { /* Fail if wraps around zero */
3606 char *mp = (char *) (CALL_MMAP(rsize));
3607 if (mp != CMFAIL) {
3608 tbase = mp;
3609 tsize = rsize;
3610 mmap_flag = IS_MMAPPED_BIT;
3611 }
3612 }
3613 }
3614
3615 if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */
3616 size_t asize =
3617 granularity_align(nb + TOP_FOOT_SIZE + MALLOC_ALIGNMENT +
3618 SIZE_T_ONE);
3619 if (asize < HALF_MAX_SIZE_T) {
3620 char *br = CMFAIL;
3621 char *end = CMFAIL;
3622 ACQUIRE_MORECORE_LOCK();
3623 br = (char *) (CALL_MORECORE(asize));
3624 end = (char *) (CALL_MORECORE(0));
3625 RELEASE_MORECORE_LOCK();
3626 if (br != CMFAIL && end != CMFAIL && br < end) {
3627 size_t ssize = end - br;
3628 if (ssize > nb + TOP_FOOT_SIZE) {
3629 tbase = br;
3630 tsize = ssize;
3631 }
3632 }
3633 }
3634 }
3635
3636 if (tbase != CMFAIL) {
3637
3638 if ((m->footprint += tsize) > m->max_footprint)
3639 m->max_footprint = m->footprint;
3640
3641 if (!is_initialized(m)) { /* first-time initialization */
3642 m->seg.base = m->least_addr = tbase;
3643 m->seg.size = tsize;
3644 m->seg.sflags = mmap_flag;
3645 m->magic = mparams.magic;
3646 init_bins(m);
3647 if (is_global(m))
3648 init_top(m, (mchunkptr) tbase, tsize - TOP_FOOT_SIZE);
3649 else {
3650 /* Offset top by embedded malloc_state */
3651 mchunkptr mn = next_chunk(mem2chunk(m));
3652 init_top(m, mn,
3653 (size_t) ((tbase + tsize) - (char *) mn) -
3654 TOP_FOOT_SIZE);
3655 }
3656 }
3657
3658 else {
3659 /* Try to merge with an existing segment */
3660 msegmentptr sp = &m->seg;
3661 while (sp != 0 && tbase != sp->base + sp->size)
3662 sp = sp->next;
3663 if (sp != 0 && !is_extern_segment(sp) && (sp->sflags & IS_MMAPPED_BIT) == mmap_flag && segment_holds(sp, m->top)) { /* append */
3664 sp->size += tsize;
3665 init_top(m, m->top, m->topsize + tsize);
3666 } else {
3667 if (tbase < m->least_addr)
3668 m->least_addr = tbase;
3669 sp = &m->seg;
3670 while (sp != 0 && sp->base != tbase + tsize)
3671 sp = sp->next;
3672 if (sp != 0 &&
3673 !is_extern_segment(sp) &&
3674 (sp->sflags & IS_MMAPPED_BIT) == mmap_flag) {
3675 char *oldbase = sp->base;
3676 sp->base = tbase;
3677 sp->size += tsize;
3678 return prepend_alloc(m, tbase, oldbase, nb);
3679 } else
3680 add_segment(m, tbase, tsize, mmap_flag);
3681 }
3682 }
3683
3684 if (nb < m->topsize) { /* Allocate from new or extended top space */
3685 size_t rsize = m->topsize -= nb;
3686 mchunkptr p = m->top;
3687 mchunkptr r = m->top = chunk_plus_offset(p, nb);
3688 r->head = rsize | PINUSE_BIT;
3689 set_size_and_pinuse_of_inuse_chunk(m, p, nb);
3690 check_top_chunk(m, m->top);
3691 check_malloced_chunk(m, chunk2mem(p), nb);
3692 return chunk2mem(p);
3693 }
3694 }
3695
3696 MALLOC_FAILURE_ACTION;
3697 return 0;
3698}
3699
3700/* ----------------------- system deallocation -------------------------- */
3701
3702/* Unmap and unlink any mmapped segments that don't contain used chunks */
3703static size_t
3704release_unused_segments(mstate m)
3705{
3706 size_t released = 0;
3707 msegmentptr pred = &m->seg;
3708 msegmentptr sp = pred->next;
3709 while (sp != 0) {
3710 char *base = sp->base;
3711 size_t size = sp->size;
3712 msegmentptr next = sp->next;
3713 if (is_mmapped_segment(sp) && !is_extern_segment(sp)) {
3714 mchunkptr p = align_as_chunk(base);
3715 size_t psize = chunksize(p);
3716 /* Can unmap if first chunk holds entire segment and not pinned */
3717 if (!cinuse(p)
3718 && (char *) p + psize >= base + size - TOP_FOOT_SIZE) {
3719 tchunkptr tp = (tchunkptr) p;
3720 assert(segment_holds(sp, (char *) sp));
3721 if (p == m->dv) {
3722 m->dv = 0;
3723 m->dvsize = 0;
3724 } else {
3725 unlink_large_chunk(m, tp);
3726 }
3727 if (CALL_MUNMAP(base, size) == 0) {
3728 released += size;
3729 m->footprint -= size;
3730 /* unlink obsoleted record */
3731 sp = pred;
3732 sp->next = next;
3733 } else { /* back out if cannot unmap */
3734 insert_large_chunk(m, tp, psize);
3735 }
3736 }
3737 }
3738 pred = sp;
3739 sp = next;
3740 }
3741 return released;
3742}
3743
3744static int
3745sys_trim(mstate m, size_t pad)
3746{
3747 size_t released = 0;
3748 if (pad < MAX_REQUEST && is_initialized(m)) {
3749 pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */
3750
3751 if (m->topsize > pad) {
3752 /* Shrink top space in granularity-size units, keeping at least one */
3753 size_t unit = mparams.granularity;
3754 size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit -
3755 SIZE_T_ONE) * unit;
3756 msegmentptr sp = segment_holding(m, (char *) m->top);
3757
3758 if (!is_extern_segment(sp)) {
3759 if (is_mmapped_segment(sp)) {
3760 if (HAVE_MMAP && sp->size >= extra && !has_segment_link(m, sp)) { /* can't shrink if pinned */
3761 size_t newsize = sp->size - extra;
3762 /* Prefer mremap, fall back to munmap */
3763 if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) !=
3764 MFAIL)
3765 || (CALL_MUNMAP(sp->base + newsize, extra) == 0)) {
3766 released = extra;
3767 }
3768 }
3769 } else if (HAVE_MORECORE) {
3770 if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */
3771 extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit;
3772 ACQUIRE_MORECORE_LOCK();
3773 {
3774 /* Make sure end of memory is where we last set it. */
3775 char *old_br = (char *) (CALL_MORECORE(0));
3776 if (old_br == sp->base + sp->size) {
3777 char *rel_br = (char *) (CALL_MORECORE(-extra));
3778 char *new_br = (char *) (CALL_MORECORE(0));
3779 if (rel_br != CMFAIL && new_br < old_br)
3780 released = old_br - new_br;
3781 }
3782 }
3783 RELEASE_MORECORE_LOCK();
3784 }
3785 }
3786
3787 if (released != 0) {
3788 sp->size -= released;
3789 m->footprint -= released;
3790 init_top(m, m->top, m->topsize - released);
3791 check_top_chunk(m, m->top);
3792 }
3793 }
3794
3795 /* Unmap any unused mmapped segments */
3796 if (HAVE_MMAP)
3797 released += release_unused_segments(m);
3798
3799 /* On failure, disable autotrim to avoid repeated failed future calls */
3800 if (released == 0)
3801 m->trim_check = MAX_SIZE_T;
3802 }
3803
3804 return (released != 0) ? 1 : 0;
3805}
3806
3807/* ---------------------------- malloc support --------------------------- */
3808
3809/* allocate a large request from the best fitting chunk in a treebin */
3810static void *
3811tmalloc_large(mstate m, size_t nb)
3812{
3813 tchunkptr v = 0;
3814 size_t rsize = -nb; /* Unsigned negation */
3815 tchunkptr t;
3816 bindex_t idx;
3817 compute_tree_index(nb, idx);
3818
3819 if ((t = *treebin_at(m, idx)) != 0) {
3820 /* Traverse tree for this bin looking for node with size == nb */
3821 size_t sizebits = nb << leftshift_for_tree_index(idx);
3822 tchunkptr rst = 0; /* The deepest untaken right subtree */
3823 for (;;) {
3824 tchunkptr rt;
3825 size_t trem = chunksize(t) - nb;
3826 if (trem < rsize) {
3827 v = t;
3828 if ((rsize = trem) == 0)
3829 break;
3830 }
3831 rt = t->child[1];
3832 t = t->child[(sizebits >> (SIZE_T_BITSIZE - SIZE_T_ONE)) & 1];
3833 if (rt != 0 && rt != t)
3834 rst = rt;
3835 if (t == 0) {
3836 t = rst; /* set t to least subtree holding sizes > nb */
3837 break;
3838 }
3839 sizebits <<= 1;
3840 }
3841 }
3842
3843 if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */
3844 binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap;
3845 if (leftbits != 0) {
3846 bindex_t i;
3847 binmap_t leastbit = least_bit(leftbits);
3848 compute_bit2idx(leastbit, i);
3849 t = *treebin_at(m, i);
3850 }
3851 }
3852
3853 while (t != 0) { /* find smallest of tree or subtree */
3854 size_t trem = chunksize(t) - nb;
3855 if (trem < rsize) {
3856 rsize = trem;
3857 v = t;
3858 }
3859 t = leftmost_child(t);
3860 }
3861
3862 /* If dv is a better fit, return 0 so malloc will use it */
3863 if (v != 0 && rsize < (size_t) (m->dvsize - nb)) {
3864 if (RTCHECK(ok_address(m, v))) { /* split */
3865 mchunkptr r = chunk_plus_offset(v, nb);
3866 assert(chunksize(v) == rsize + nb);
3867 if (RTCHECK(ok_next(v, r))) {
3868 unlink_large_chunk(m, v);
3869 if (rsize < MIN_CHUNK_SIZE)
3870 set_inuse_and_pinuse(m, v, (rsize + nb));
3871 else {
3872 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3873 set_size_and_pinuse_of_free_chunk(r, rsize);
3874 insert_chunk(m, r, rsize);
3875 }
3876 return chunk2mem(v);
3877 }
3878 }
3879 CORRUPTION_ERROR_ACTION(m);
3880 }
3881 return 0;
3882}
3883
3884/* allocate a small request from the best fitting chunk in a treebin */
3885static void *
3886tmalloc_small(mstate m, size_t nb)
3887{
3888 tchunkptr t, v;
3889 size_t rsize;
3890 bindex_t i;
3891 binmap_t leastbit = least_bit(m->treemap);
3892 compute_bit2idx(leastbit, i);
3893
3894 v = t = *treebin_at(m, i);
3895 rsize = chunksize(t) - nb;
3896
3897 while ((t = leftmost_child(t)) != 0) {
3898 size_t trem = chunksize(t) - nb;
3899 if (trem < rsize) {
3900 rsize = trem;
3901 v = t;
3902 }
3903 }
3904
3905 if (RTCHECK(ok_address(m, v))) {
3906 mchunkptr r = chunk_plus_offset(v, nb);
3907 assert(chunksize(v) == rsize + nb);
3908 if (RTCHECK(ok_next(v, r))) {
3909 unlink_large_chunk(m, v);
3910 if (rsize < MIN_CHUNK_SIZE)
3911 set_inuse_and_pinuse(m, v, (rsize + nb));
3912 else {
3913 set_size_and_pinuse_of_inuse_chunk(m, v, nb);
3914 set_size_and_pinuse_of_free_chunk(r, rsize);
3915 replace_dv(m, r, rsize);
3916 }
3917 return chunk2mem(v);
3918 }
3919 }
3920
3921 CORRUPTION_ERROR_ACTION(m);
3922 return 0;
3923}
3924
3925/* --------------------------- realloc support --------------------------- */
3926
3927static void *
3928internal_realloc(mstate m, void *oldmem, size_t bytes)
3929{
3930 if (bytes >= MAX_REQUEST) {
3931 MALLOC_FAILURE_ACTION;
3932 return 0;
3933 }
3934 if (!PREACTION(m)) {
3935 mchunkptr oldp = mem2chunk(oldmem);
3936 size_t oldsize = chunksize(oldp);
3937 mchunkptr next = chunk_plus_offset(oldp, oldsize);
3938 mchunkptr newp = 0;
3939 void *extra = 0;
3940
3941 /* Try to either shrink or extend into top. Else malloc-copy-free */
3942
3943 if (RTCHECK(ok_address(m, oldp) && ok_cinuse(oldp) &&
3944 ok_next(oldp, next) && ok_pinuse(next))) {
3945 size_t nb = request2size(bytes);
3946 if (is_mmapped(oldp))
3947 newp = mmap_resize(m, oldp, nb);
3948 else if (oldsize >= nb) { /* already big enough */
3949 size_t rsize = oldsize - nb;
3950 newp = oldp;
3951 if (rsize >= MIN_CHUNK_SIZE) {
3952 mchunkptr remainder = chunk_plus_offset(newp, nb);
3953 set_inuse(m, newp, nb);
3954 set_inuse(m, remainder, rsize);
3955 extra = chunk2mem(remainder);
3956 }
3957 } else if (next == m->top && oldsize + m->topsize > nb) {
3958 /* Expand into top */
3959 size_t newsize = oldsize + m->topsize;
3960 size_t newtopsize = newsize - nb;
3961 mchunkptr newtop = chunk_plus_offset(oldp, nb);
3962 set_inuse(m, oldp, nb);
3963 newtop->head = newtopsize | PINUSE_BIT;
3964 m->top = newtop;
3965 m->topsize = newtopsize;
3966 newp = oldp;
3967 }
3968 } else {
3969 USAGE_ERROR_ACTION(m, oldmem);
3970 POSTACTION(m);
3971 return 0;
3972 }
3973
3974 POSTACTION(m);
3975
3976 if (newp != 0) {
3977 if (extra != 0) {
3978 internal_free(m, extra);
3979 }
3980 check_inuse_chunk(m, newp);
3981 return chunk2mem(newp);
3982 } else {
3983 void *newmem = internal_malloc(m, bytes);
3984 if (newmem != 0) {
3985 size_t oc = oldsize - overhead_for(oldp);
3986 memcpy(newmem, oldmem, (oc < bytes) ? oc : bytes);
3987 internal_free(m, oldmem);
3988 }
3989 return newmem;
3990 }
3991 }
3992 return 0;
3993}
3994
3995/* --------------------------- memalign support -------------------------- */
3996
3997static void *
3998internal_memalign(mstate m, size_t alignment, size_t bytes)
3999{
4000 if (alignment <= MALLOC_ALIGNMENT) /* Can just use malloc */
4001 return internal_malloc(m, bytes);
4002 if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */
4003 alignment = MIN_CHUNK_SIZE;
4004 if ((alignment & (alignment - SIZE_T_ONE)) != 0) { /* Ensure a power of 2 */
4005 size_t a = MALLOC_ALIGNMENT << 1;
4006 while (a < alignment)
4007 a <<= 1;
4008 alignment = a;
4009 }
4010
4011 if (bytes >= MAX_REQUEST - alignment) {
4012 if (m != 0) { /* Test isn't needed but avoids compiler warning */
4013 MALLOC_FAILURE_ACTION;
4014 }
4015 } else {
4016 size_t nb = request2size(bytes);
4017 size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD;
4018 char *mem = (char *) internal_malloc(m, req);
4019 if (mem != 0) {
4020 void *leader = 0;
4021 void *trailer = 0;
4022 mchunkptr p = mem2chunk(mem);
4023
4024 if (PREACTION(m))
4025 return 0;
4026 if ((((size_t) (mem)) % alignment) != 0) { /* misaligned */
4027 /*
4028 Find an aligned spot inside chunk. Since we need to give
4029 back leading space in a chunk of at least MIN_CHUNK_SIZE, if
4030 the first calculation places us at a spot with less than
4031 MIN_CHUNK_SIZE leader, we can move to the next aligned spot.
4032 We've allocated enough total room so that this is always
4033 possible.
4034 */
4035 char *br = (char *) mem2chunk((size_t) (((size_t) (mem +
4036 alignment -
4037 SIZE_T_ONE))
4038 & -alignment));
4039 char *pos =
4040 ((size_t) (br - (char *) (p)) >=
4041 MIN_CHUNK_SIZE) ? br : br + alignment;
4042 mchunkptr newp = (mchunkptr) pos;
4043 size_t leadsize = pos - (char *) (p);
4044 size_t newsize = chunksize(p) - leadsize;
4045
4046 if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */
4047 newp->prev_foot = p->prev_foot + leadsize;
4048 newp->head = (newsize | CINUSE_BIT);
4049 } else { /* Otherwise, give back leader, use the rest */
4050 set_inuse(m, newp, newsize);
4051 set_inuse(m, p, leadsize);
4052 leader = chunk2mem(p);
4053 }
4054 p = newp;
4055 }
4056
4057 /* Give back spare room at the end */
4058 if (!is_mmapped(p)) {
4059 size_t size = chunksize(p);
4060 if (size > nb + MIN_CHUNK_SIZE) {
4061 size_t remainder_size = size - nb;
4062 mchunkptr remainder = chunk_plus_offset(p, nb);
4063 set_inuse(m, p, nb);
4064 set_inuse(m, remainder, remainder_size);
4065 trailer = chunk2mem(remainder);
4066 }
4067 }
4068
4069 assert(chunksize(p) >= nb);
4070 assert((((size_t) (chunk2mem(p))) % alignment) == 0);
4071 check_inuse_chunk(m, p);
4072 POSTACTION(m);
4073 if (leader != 0) {
4074 internal_free(m, leader);
4075 }
4076 if (trailer != 0) {
4077 internal_free(m, trailer);
4078 }
4079 return chunk2mem(p);
4080 }
4081 }
4082 return 0;
4083}
4084
4085/* ------------------------ comalloc/coalloc support --------------------- */
4086
4087static void **
4088ialloc(mstate m, size_t n_elements, size_t * sizes, int opts, void *chunks[])
4089{
4090 /*
4091 This provides common support for independent_X routines, handling
4092 all of the combinations that can result.
4093
4094 The opts arg has:
4095 bit 0 set if all elements are same size (using sizes[0])
4096 bit 1 set if elements should be zeroed
4097 */
4098
4099 size_t element_size; /* chunksize of each element, if all same */
4100 size_t contents_size; /* total size of elements */
4101 size_t array_size; /* request size of pointer array */
4102 void *mem; /* malloced aggregate space */
4103 mchunkptr p; /* corresponding chunk */
4104 size_t remainder_size; /* remaining bytes while splitting */
4105 void **marray; /* either "chunks" or malloced ptr array */
4106 mchunkptr array_chunk; /* chunk for malloced ptr array */
4107 flag_t was_enabled; /* to disable mmap */
4108 size_t size;
4109 size_t i;
4110
4111 /* compute array length, if needed */
4112 if (chunks != 0) {
4113 if (n_elements == 0)
4114 return chunks; /* nothing to do */
4115 marray = chunks;
4116 array_size = 0;
4117 } else {
4118 /* if empty req, must still return chunk representing empty array */
4119 if (n_elements == 0)
4120 return (void **) internal_malloc(m, 0);
4121 marray = 0;
4122 array_size = request2size(n_elements * (sizeof(void *)));
4123 }
4124
4125 /* compute total element size */
4126 if (opts & 0x1) { /* all-same-size */
4127 element_size = request2size(*sizes);
4128 contents_size = n_elements * element_size;
4129 } else { /* add up all the sizes */
4130 element_size = 0;
4131 contents_size = 0;
4132 for (i = 0; i != n_elements; ++i)
4133 contents_size += request2size(sizes[i]);
4134 }
4135
4136 size = contents_size + array_size;
4137
4138 /*
4139 Allocate the aggregate chunk. First disable direct-mmapping so
4140 malloc won't use it, since we would not be able to later
4141 free/realloc space internal to a segregated mmap region.
4142 */
4143 was_enabled = use_mmap(m);
4144 disable_mmap(m);
4145 mem = internal_malloc(m, size - CHUNK_OVERHEAD);
4146 if (was_enabled)
4147 enable_mmap(m);
4148 if (mem == 0)
4149 return 0;
4150
4151 if (PREACTION(m))
4152 return 0;
4153 p = mem2chunk(mem);
4154 remainder_size = chunksize(p);
4155
4156 assert(!is_mmapped(p));
4157
4158 if (opts & 0x2) { /* optionally clear the elements */
4159 memset((size_t *) mem, 0, remainder_size - SIZE_T_SIZE - array_size);
4160 }
4161
4162 /* If not provided, allocate the pointer array as final part of chunk */
4163 if (marray == 0) {
4164 size_t array_chunk_size;
4165 array_chunk = chunk_plus_offset(p, contents_size);
4166 array_chunk_size = remainder_size - contents_size;
4167 marray = (void **) (chunk2mem(array_chunk));
4168 set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size);
4169 remainder_size = contents_size;
4170 }
4171
4172 /* split out elements */
4173 for (i = 0;; ++i) {
4174 marray[i] = chunk2mem(p);
4175 if (i != n_elements - 1) {
4176 if (element_size != 0)
4177 size = element_size;
4178 else
4179 size = request2size(sizes[i]);
4180 remainder_size -= size;
4181 set_size_and_pinuse_of_inuse_chunk(m, p, size);
4182 p = chunk_plus_offset(p, size);
4183 } else { /* the final element absorbs any overallocation slop */
4184 set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size);
4185 break;
4186 }
4187 }
4188
4189#if DEBUG
4190 if (marray != chunks) {
4191 /* final element must have exactly exhausted chunk */
4192 if (element_size != 0) {
4193 assert(remainder_size == element_size);
4194 } else {
4195 assert(remainder_size == request2size(sizes[i]));
4196 }
4197 check_inuse_chunk(m, mem2chunk(marray));
4198 }
4199 for (i = 0; i != n_elements; ++i)
4200 check_inuse_chunk(m, mem2chunk(marray[i]));
4201
4202#endif /* DEBUG */
4203
4204 POSTACTION(m);
4205 return marray;
4206}
4207
4208
4209/* -------------------------- public routines ---------------------------- */
4210
4211#if !ONLY_MSPACES
4212
4213void *
4214dlmalloc(size_t bytes)
4215{
4216 /*
4217 Basic algorithm:
4218 If a small request (< 256 bytes minus per-chunk overhead):
4219 1. If one exists, use a remainderless chunk in associated smallbin.
4220 (Remainderless means that there are too few excess bytes to
4221 represent as a chunk.)
4222 2. If it is big enough, use the dv chunk, which is normally the
4223 chunk adjacent to the one used for the most recent small request.
4224 3. If one exists, split the smallest available chunk in a bin,
4225 saving remainder in dv.
4226 4. If it is big enough, use the top chunk.
4227 5. If available, get memory from system and use it
4228 Otherwise, for a large request:
4229 1. Find the smallest available binned chunk that fits, and use it
4230 if it is better fitting than dv chunk, splitting if necessary.
4231 2. If better fitting than any binned chunk, use the dv chunk.
4232 3. If it is big enough, use the top chunk.
4233 4. If request size >= mmap threshold, try to directly mmap this chunk.
4234 5. If available, get memory from system and use it
4235
4236 The ugly goto's here ensure that postaction occurs along all paths.
4237 */
4238
4239 if (!PREACTION(gm)) {
4240 void *mem;
4241 size_t nb;
4242 if (bytes <= MAX_SMALL_REQUEST) {
4243 bindex_t idx;
4244 binmap_t smallbits;
4245 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4246 idx = small_index(nb);
4247 smallbits = gm->smallmap >> idx;
4248
4249 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4250 mchunkptr b, p;
4251 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4252 b = smallbin_at(gm, idx);
4253 p = b->fd;
4254 assert(chunksize(p) == small_index2size(idx));
4255 unlink_first_small_chunk(gm, b, p, idx);
4256 set_inuse_and_pinuse(gm, p, small_index2size(idx));
4257 mem = chunk2mem(p);
4258 check_malloced_chunk(gm, mem, nb);
4259 goto postaction;
4260 }
4261
4262 else if (nb > gm->dvsize) {
4263 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4264 mchunkptr b, p, r;
4265 size_t rsize;
4266 bindex_t i;
4267 binmap_t leftbits =
4268 (smallbits << idx) & left_bits(idx2bit(idx));
4269 binmap_t leastbit = least_bit(leftbits);
4270 compute_bit2idx(leastbit, i);
4271 b = smallbin_at(gm, i);
4272 p = b->fd;
4273 assert(chunksize(p) == small_index2size(i));
4274 unlink_first_small_chunk(gm, b, p, i);
4275 rsize = small_index2size(i) - nb;
4276 /* Fit here cannot be remainderless if 4byte sizes */
4277 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4278 set_inuse_and_pinuse(gm, p, small_index2size(i));
4279 else {
4280 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4281 r = chunk_plus_offset(p, nb);
4282 set_size_and_pinuse_of_free_chunk(r, rsize);
4283 replace_dv(gm, r, rsize);
4284 }
4285 mem = chunk2mem(p);
4286 check_malloced_chunk(gm, mem, nb);
4287 goto postaction;
4288 }
4289
4290 else if (gm->treemap != 0
4291 && (mem = tmalloc_small(gm, nb)) != 0) {
4292 check_malloced_chunk(gm, mem, nb);
4293 goto postaction;
4294 }
4295 }
4296 } else if (bytes >= MAX_REQUEST)
4297 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4298 else {
4299 nb = pad_request(bytes);
4300 if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) {
4301 check_malloced_chunk(gm, mem, nb);
4302 goto postaction;
4303 }
4304 }
4305
4306 if (nb <= gm->dvsize) {
4307 size_t rsize = gm->dvsize - nb;
4308 mchunkptr p = gm->dv;
4309 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4310 mchunkptr r = gm->dv = chunk_plus_offset(p, nb);
4311 gm->dvsize = rsize;
4312 set_size_and_pinuse_of_free_chunk(r, rsize);
4313 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4314 } else { /* exhaust dv */
4315 size_t dvs = gm->dvsize;
4316 gm->dvsize = 0;
4317 gm->dv = 0;
4318 set_inuse_and_pinuse(gm, p, dvs);
4319 }
4320 mem = chunk2mem(p);
4321 check_malloced_chunk(gm, mem, nb);
4322 goto postaction;
4323 }
4324
4325 else if (nb < gm->topsize) { /* Split top */
4326 size_t rsize = gm->topsize -= nb;
4327 mchunkptr p = gm->top;
4328 mchunkptr r = gm->top = chunk_plus_offset(p, nb);
4329 r->head = rsize | PINUSE_BIT;
4330 set_size_and_pinuse_of_inuse_chunk(gm, p, nb);
4331 mem = chunk2mem(p);
4332 check_top_chunk(gm, gm->top);
4333 check_malloced_chunk(gm, mem, nb);
4334 goto postaction;
4335 }
4336
4337 mem = sys_alloc(gm, nb);
4338
4339 postaction:
4340 POSTACTION(gm);
4341 return mem;
4342 }
4343
4344 return 0;
4345}
4346
4347void
4348dlfree(void *mem)
4349{
4350 /*
4351 Consolidate freed chunks with preceeding or succeeding bordering
4352 free chunks, if they exist, and then place in a bin. Intermixed
4353 with special cases for top, dv, mmapped chunks, and usage errors.
4354 */
4355
4356 if (mem != 0) {
4357 mchunkptr p = mem2chunk(mem);
4358#if FOOTERS
4359 mstate fm = get_mstate_for(p);
4360 if (!ok_magic(fm)) {
4361 USAGE_ERROR_ACTION(fm, p);
4362 return;
4363 }
4364#else /* FOOTERS */
4365#define fm gm
4366#endif /* FOOTERS */
4367 if (!PREACTION(fm)) {
4368 check_inuse_chunk(fm, p);
4369 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4370 size_t psize = chunksize(p);
4371 mchunkptr next = chunk_plus_offset(p, psize);
4372 if (!pinuse(p)) {
4373 size_t prevsize = p->prev_foot;
4374 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4375 prevsize &= ~IS_MMAPPED_BIT;
4376 psize += prevsize + MMAP_FOOT_PAD;
4377 if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4378 fm->footprint -= psize;
4379 goto postaction;
4380 } else {
4381 mchunkptr prev = chunk_minus_offset(p, prevsize);
4382 psize += prevsize;
4383 p = prev;
4384 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4385 if (p != fm->dv) {
4386 unlink_chunk(fm, p, prevsize);
4387 } else if ((next->head & INUSE_BITS) ==
4388 INUSE_BITS) {
4389 fm->dvsize = psize;
4390 set_free_with_pinuse(p, psize, next);
4391 goto postaction;
4392 }
4393 } else
4394 goto erroraction;
4395 }
4396 }
4397
4398 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4399 if (!cinuse(next)) { /* consolidate forward */
4400 if (next == fm->top) {
4401 size_t tsize = fm->topsize += psize;
4402 fm->top = p;
4403 p->head = tsize | PINUSE_BIT;
4404 if (p == fm->dv) {
4405 fm->dv = 0;
4406 fm->dvsize = 0;
4407 }
4408 if (should_trim(fm, tsize))
4409 sys_trim(fm, 0);
4410 goto postaction;
4411 } else if (next == fm->dv) {
4412 size_t dsize = fm->dvsize += psize;
4413 fm->dv = p;
4414 set_size_and_pinuse_of_free_chunk(p, dsize);
4415 goto postaction;
4416 } else {
4417 size_t nsize = chunksize(next);
4418 psize += nsize;
4419 unlink_chunk(fm, next, nsize);
4420 set_size_and_pinuse_of_free_chunk(p, psize);
4421 if (p == fm->dv) {
4422 fm->dvsize = psize;
4423 goto postaction;
4424 }
4425 }
4426 } else
4427 set_free_with_pinuse(p, psize, next);
4428 insert_chunk(fm, p, psize);
4429 check_free_chunk(fm, p);
4430 goto postaction;
4431 }
4432 }
4433 erroraction:
4434 USAGE_ERROR_ACTION(fm, p);
4435 postaction:
4436 POSTACTION(fm);
4437 }
4438 }
4439#if !FOOTERS
4440#undef fm
4441#endif /* FOOTERS */
4442}
4443
4444void *
4445dlcalloc(size_t n_elements, size_t elem_size)
4446{
4447 void *mem;
4448 size_t req = 0;
4449 if (n_elements != 0) {
4450 req = n_elements * elem_size;
4451 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4452 (req / n_elements != elem_size))
4453 req = MAX_SIZE_T; /* force downstream failure on overflow */
4454 }
4455 mem = dlmalloc(req);
4456 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4457 memset(mem, 0, req);
4458 return mem;
4459}
4460
4461void *
4462dlrealloc(void *oldmem, size_t bytes)
4463{
4464 if (oldmem == 0)
4465 return dlmalloc(bytes);
4466#ifdef REALLOC_ZERO_BYTES_FREES
4467 if (bytes == 0) {
4468 dlfree(oldmem);
4469 return 0;
4470 }
4471#endif /* REALLOC_ZERO_BYTES_FREES */
4472 else {
4473#if ! FOOTERS
4474 mstate m = gm;
4475#else /* FOOTERS */
4476 mstate m = get_mstate_for(mem2chunk(oldmem));
4477 if (!ok_magic(m)) {
4478 USAGE_ERROR_ACTION(m, oldmem);
4479 return 0;
4480 }
4481#endif /* FOOTERS */
4482 return internal_realloc(m, oldmem, bytes);
4483 }
4484}
4485
4486void *
4487dlmemalign(size_t alignment, size_t bytes)
4488{
4489 return internal_memalign(gm, alignment, bytes);
4490}
4491
4492void **
4493dlindependent_calloc(size_t n_elements, size_t elem_size, void *chunks[])
4494{
4495 size_t sz = elem_size; /* serves as 1-element array */
4496 return ialloc(gm, n_elements, &sz, 3, chunks);
4497}
4498
4499void **
4500dlindependent_comalloc(size_t n_elements, size_t sizes[], void *chunks[])
4501{
4502 return ialloc(gm, n_elements, sizes, 0, chunks);
4503}
4504
4505void *
4506dlvalloc(size_t bytes)
4507{
4508 size_t pagesz;
4509 init_mparams();
4510 pagesz = mparams.page_size;
4511 return dlmemalign(pagesz, bytes);
4512}
4513
4514void *
4515dlpvalloc(size_t bytes)
4516{
4517 size_t pagesz;
4518 init_mparams();
4519 pagesz = mparams.page_size;
4520 return dlmemalign(pagesz,
4521 (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE));
4522}
4523
4524int
4525dlmalloc_trim(size_t pad)
4526{
4527 int result = 0;
4528 if (!PREACTION(gm)) {
4529 result = sys_trim(gm, pad);
4530 POSTACTION(gm);
4531 }
4532 return result;
4533}
4534
4535size_t
4536dlmalloc_footprint(void)
4537{
4538 return gm->footprint;
4539}
4540
4541size_t
4542dlmalloc_max_footprint(void)
4543{
4544 return gm->max_footprint;
4545}
4546
4547#if !NO_MALLINFO
4548struct mallinfo
4549dlmallinfo(void)
4550{
4551 return internal_mallinfo(gm);
4552}
4553#endif /* NO_MALLINFO */
4554
4555void
4556dlmalloc_stats()
4557{
4558 internal_malloc_stats(gm);
4559}
4560
4561size_t
4562dlmalloc_usable_size(void *mem)
4563{
4564 if (mem != 0) {
4565 mchunkptr p = mem2chunk(mem);
4566 if (cinuse(p))
4567 return chunksize(p) - overhead_for(p);
4568 }
4569 return 0;
4570}
4571
4572int
4573dlmallopt(int param_number, int value)
4574{
4575 return change_mparam(param_number, value);
4576}
4577
4578#endif /* !ONLY_MSPACES */
4579
4580/* ----------------------------- user mspaces ---------------------------- */
4581
4582#if MSPACES
4583
4584static mstate
4585init_user_mstate(char *tbase, size_t tsize)
4586{
4587 size_t msize = pad_request(sizeof(struct malloc_state));
4588 mchunkptr mn;
4589 mchunkptr msp = align_as_chunk(tbase);
4590 mstate m = (mstate) (chunk2mem(msp));
4591 memset(m, 0, msize);
4592 INITIAL_LOCK(&m->mutex);
4593 msp->head = (msize | PINUSE_BIT | CINUSE_BIT);
4594 m->seg.base = m->least_addr = tbase;
4595 m->seg.size = m->footprint = m->max_footprint = tsize;
4596 m->magic = mparams.magic;
4597 m->mflags = mparams.default_mflags;
4598 disable_contiguous(m);
4599 init_bins(m);
4600 mn = next_chunk(mem2chunk(m));
4601 init_top(m, mn, (size_t) ((tbase + tsize) - (char *) mn) - TOP_FOOT_SIZE);
4602 check_top_chunk(m, m->top);
4603 return m;
4604}
4605
4606mspace
4607create_mspace(size_t capacity, int locked)
4608{
4609 mstate m = 0;
4610 size_t msize = pad_request(sizeof(struct malloc_state));
4611 init_mparams(); /* Ensure pagesize etc initialized */
4612
4613 if (capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4614 size_t rs = ((capacity == 0) ? mparams.granularity :
4615 (capacity + TOP_FOOT_SIZE + msize));
4616 size_t tsize = granularity_align(rs);
4617 char *tbase = (char *) (CALL_MMAP(tsize));
4618 if (tbase != CMFAIL) {
4619 m = init_user_mstate(tbase, tsize);
4620 m->seg.sflags = IS_MMAPPED_BIT;
4621 set_lock(m, locked);
4622 }
4623 }
4624 return (mspace) m;
4625}
4626
4627mspace
4628create_mspace_with_base(void *base, size_t capacity, int locked)
4629{
4630 mstate m = 0;
4631 size_t msize = pad_request(sizeof(struct malloc_state));
4632 init_mparams(); /* Ensure pagesize etc initialized */
4633
4634 if (capacity > msize + TOP_FOOT_SIZE &&
4635 capacity < (size_t) - (msize + TOP_FOOT_SIZE + mparams.page_size)) {
4636 m = init_user_mstate((char *) base, capacity);
4637 m->seg.sflags = EXTERN_BIT;
4638 set_lock(m, locked);
4639 }
4640 return (mspace) m;
4641}
4642
4643size_t
4644destroy_mspace(mspace msp)
4645{
4646 size_t freed = 0;
4647 mstate ms = (mstate) msp;
4648 if (ok_magic(ms)) {
4649 msegmentptr sp = &ms->seg;
4650 while (sp != 0) {
4651 char *base = sp->base;
4652 size_t size = sp->size;
4653 flag_t flag = sp->sflags;
4654 sp = sp->next;
4655 if ((flag & IS_MMAPPED_BIT) && !(flag & EXTERN_BIT) &&
4656 CALL_MUNMAP(base, size) == 0)
4657 freed += size;
4658 }
4659 } else {
4660 USAGE_ERROR_ACTION(ms, ms);
4661 }
4662 return freed;
4663}
4664
4665/*
4666 mspace versions of routines are near-clones of the global
4667 versions. This is not so nice but better than the alternatives.
4668*/
4669
4670
4671void *
4672mspace_malloc(mspace msp, size_t bytes)
4673{
4674 mstate ms = (mstate) msp;
4675 if (!ok_magic(ms)) {
4676 USAGE_ERROR_ACTION(ms, ms);
4677 return 0;
4678 }
4679 if (!PREACTION(ms)) {
4680 void *mem;
4681 size_t nb;
4682 if (bytes <= MAX_SMALL_REQUEST) {
4683 bindex_t idx;
4684 binmap_t smallbits;
4685 nb = (bytes < MIN_REQUEST) ? MIN_CHUNK_SIZE : pad_request(bytes);
4686 idx = small_index(nb);
4687 smallbits = ms->smallmap >> idx;
4688
4689 if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */
4690 mchunkptr b, p;
4691 idx += ~smallbits & 1; /* Uses next bin if idx empty */
4692 b = smallbin_at(ms, idx);
4693 p = b->fd;
4694 assert(chunksize(p) == small_index2size(idx));
4695 unlink_first_small_chunk(ms, b, p, idx);
4696 set_inuse_and_pinuse(ms, p, small_index2size(idx));
4697 mem = chunk2mem(p);
4698 check_malloced_chunk(ms, mem, nb);
4699 goto postaction;
4700 }
4701
4702 else if (nb > ms->dvsize) {
4703 if (smallbits != 0) { /* Use chunk in next nonempty smallbin */
4704 mchunkptr b, p, r;
4705 size_t rsize;
4706 bindex_t i;
4707 binmap_t leftbits =
4708 (smallbits << idx) & left_bits(idx2bit(idx));
4709 binmap_t leastbit = least_bit(leftbits);
4710 compute_bit2idx(leastbit, i);
4711 b = smallbin_at(ms, i);
4712 p = b->fd;
4713 assert(chunksize(p) == small_index2size(i));
4714 unlink_first_small_chunk(ms, b, p, i);
4715 rsize = small_index2size(i) - nb;
4716 /* Fit here cannot be remainderless if 4byte sizes */
4717 if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE)
4718 set_inuse_and_pinuse(ms, p, small_index2size(i));
4719 else {
4720 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4721 r = chunk_plus_offset(p, nb);
4722 set_size_and_pinuse_of_free_chunk(r, rsize);
4723 replace_dv(ms, r, rsize);
4724 }
4725 mem = chunk2mem(p);
4726 check_malloced_chunk(ms, mem, nb);
4727 goto postaction;
4728 }
4729
4730 else if (ms->treemap != 0
4731 && (mem = tmalloc_small(ms, nb)) != 0) {
4732 check_malloced_chunk(ms, mem, nb);
4733 goto postaction;
4734 }
4735 }
4736 } else if (bytes >= MAX_REQUEST)
4737 nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */
4738 else {
4739 nb = pad_request(bytes);
4740 if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) {
4741 check_malloced_chunk(ms, mem, nb);
4742 goto postaction;
4743 }
4744 }
4745
4746 if (nb <= ms->dvsize) {
4747 size_t rsize = ms->dvsize - nb;
4748 mchunkptr p = ms->dv;
4749 if (rsize >= MIN_CHUNK_SIZE) { /* split dv */
4750 mchunkptr r = ms->dv = chunk_plus_offset(p, nb);
4751 ms->dvsize = rsize;
4752 set_size_and_pinuse_of_free_chunk(r, rsize);
4753 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4754 } else { /* exhaust dv */
4755 size_t dvs = ms->dvsize;
4756 ms->dvsize = 0;
4757 ms->dv = 0;
4758 set_inuse_and_pinuse(ms, p, dvs);
4759 }
4760 mem = chunk2mem(p);
4761 check_malloced_chunk(ms, mem, nb);
4762 goto postaction;
4763 }
4764
4765 else if (nb < ms->topsize) { /* Split top */
4766 size_t rsize = ms->topsize -= nb;
4767 mchunkptr p = ms->top;
4768 mchunkptr r = ms->top = chunk_plus_offset(p, nb);
4769 r->head = rsize | PINUSE_BIT;
4770 set_size_and_pinuse_of_inuse_chunk(ms, p, nb);
4771 mem = chunk2mem(p);
4772 check_top_chunk(ms, ms->top);
4773 check_malloced_chunk(ms, mem, nb);
4774 goto postaction;
4775 }
4776
4777 mem = sys_alloc(ms, nb);
4778
4779 postaction:
4780 POSTACTION(ms);
4781 return mem;
4782 }
4783
4784 return 0;
4785}
4786
4787void
4788mspace_free(mspace msp, void *mem)
4789{
4790 if (mem != 0) {
4791 mchunkptr p = mem2chunk(mem);
4792#if FOOTERS
4793 mstate fm = get_mstate_for(p);
4794#else /* FOOTERS */
4795 mstate fm = (mstate) msp;
4796#endif /* FOOTERS */
4797 if (!ok_magic(fm)) {
4798 USAGE_ERROR_ACTION(fm, p);
4799 return;
4800 }
4801 if (!PREACTION(fm)) {
4802 check_inuse_chunk(fm, p);
4803 if (RTCHECK(ok_address(fm, p) && ok_cinuse(p))) {
4804 size_t psize = chunksize(p);
4805 mchunkptr next = chunk_plus_offset(p, psize);
4806 if (!pinuse(p)) {
4807 size_t prevsize = p->prev_foot;
4808 if ((prevsize & IS_MMAPPED_BIT) != 0) {
4809 prevsize &= ~IS_MMAPPED_BIT;
4810 psize += prevsize + MMAP_FOOT_PAD;
4811 if (CALL_MUNMAP((char *) p - prevsize, psize) == 0)
4812 fm->footprint -= psize;
4813 goto postaction;
4814 } else {
4815 mchunkptr prev = chunk_minus_offset(p, prevsize);
4816 psize += prevsize;
4817 p = prev;
4818 if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */
4819 if (p != fm->dv) {
4820 unlink_chunk(fm, p, prevsize);
4821 } else if ((next->head & INUSE_BITS) ==
4822 INUSE_BITS) {
4823 fm->dvsize = psize;
4824 set_free_with_pinuse(p, psize, next);
4825 goto postaction;
4826 }
4827 } else
4828 goto erroraction;
4829 }
4830 }
4831
4832 if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) {
4833 if (!cinuse(next)) { /* consolidate forward */
4834 if (next == fm->top) {
4835 size_t tsize = fm->topsize += psize;
4836 fm->top = p;
4837 p->head = tsize | PINUSE_BIT;
4838 if (p == fm->dv) {
4839 fm->dv = 0;
4840 fm->dvsize = 0;
4841 }
4842 if (should_trim(fm, tsize))
4843 sys_trim(fm, 0);
4844 goto postaction;
4845 } else if (next == fm->dv) {
4846 size_t dsize = fm->dvsize += psize;
4847 fm->dv = p;
4848 set_size_and_pinuse_of_free_chunk(p, dsize);
4849 goto postaction;
4850 } else {
4851 size_t nsize = chunksize(next);
4852 psize += nsize;
4853 unlink_chunk(fm, next, nsize);
4854 set_size_and_pinuse_of_free_chunk(p, psize);
4855 if (p == fm->dv) {
4856 fm->dvsize = psize;
4857 goto postaction;
4858 }
4859 }
4860 } else
4861 set_free_with_pinuse(p, psize, next);
4862 insert_chunk(fm, p, psize);
4863 check_free_chunk(fm, p);
4864 goto postaction;
4865 }
4866 }
4867 erroraction:
4868 USAGE_ERROR_ACTION(fm, p);
4869 postaction:
4870 POSTACTION(fm);
4871 }
4872 }
4873}
4874
4875void *
4876mspace_calloc(mspace msp, size_t n_elements, size_t elem_size)
4877{
4878 void *mem;
4879 size_t req = 0;
4880 mstate ms = (mstate) msp;
4881 if (!ok_magic(ms)) {
4882 USAGE_ERROR_ACTION(ms, ms);
4883 return 0;
4884 }
4885 if (n_elements != 0) {
4886 req = n_elements * elem_size;
4887 if (((n_elements | elem_size) & ~(size_t) 0xffff) &&
4888 (req / n_elements != elem_size))
4889 req = MAX_SIZE_T; /* force downstream failure on overflow */
4890 }
4891 mem = internal_malloc(ms, req);
4892 if (mem != 0 && calloc_must_clear(mem2chunk(mem)))
4893 memset(mem, 0, req);
4894 return mem;
4895}
4896
4897void *
4898mspace_realloc(mspace msp, void *oldmem, size_t bytes)
4899{
4900 if (oldmem == 0)
4901 return mspace_malloc(msp, bytes);
4902#ifdef REALLOC_ZERO_BYTES_FREES
4903 if (bytes == 0) {
4904 mspace_free(msp, oldmem);
4905 return 0;
4906 }
4907#endif /* REALLOC_ZERO_BYTES_FREES */
4908 else {
4909#if FOOTERS
4910 mchunkptr p = mem2chunk(oldmem);
4911 mstate ms = get_mstate_for(p);
4912#else /* FOOTERS */
4913 mstate ms = (mstate) msp;
4914#endif /* FOOTERS */
4915 if (!ok_magic(ms)) {
4916 USAGE_ERROR_ACTION(ms, ms);
4917 return 0;
4918 }
4919 return internal_realloc(ms, oldmem, bytes);
4920 }
4921}
4922
4923void *
4924mspace_memalign(mspace msp, size_t alignment, size_t bytes)
4925{
4926 mstate ms = (mstate) msp;
4927 if (!ok_magic(ms)) {
4928 USAGE_ERROR_ACTION(ms, ms);
4929 return 0;
4930 }
4931 return internal_memalign(ms, alignment, bytes);
4932}
4933
4934void **
4935mspace_independent_calloc(mspace msp, size_t n_elements,
4936 size_t elem_size, void *chunks[])
4937{
4938 size_t sz = elem_size; /* serves as 1-element array */
4939 mstate ms = (mstate) msp;
4940 if (!ok_magic(ms)) {
4941 USAGE_ERROR_ACTION(ms, ms);
4942 return 0;
4943 }
4944 return ialloc(ms, n_elements, &sz, 3, chunks);
4945}
4946
4947void **
4948mspace_independent_comalloc(mspace msp, size_t n_elements,
4949 size_t sizes[], void *chunks[])
4950{
4951 mstate ms = (mstate) msp;
4952 if (!ok_magic(ms)) {
4953 USAGE_ERROR_ACTION(ms, ms);
4954 return 0;
4955 }
4956 return ialloc(ms, n_elements, sizes, 0, chunks);
4957}
4958
4959int
4960mspace_trim(mspace msp, size_t pad)
4961{
4962 int result = 0;
4963 mstate ms = (mstate) msp;
4964 if (ok_magic(ms)) {
4965 if (!PREACTION(ms)) {
4966 result = sys_trim(ms, pad);
4967 POSTACTION(ms);
4968 }
4969 } else {
4970 USAGE_ERROR_ACTION(ms, ms);
4971 }
4972 return result;
4973}
4974
4975void
4976mspace_malloc_stats(mspace msp)
4977{
4978 mstate ms = (mstate) msp;
4979 if (ok_magic(ms)) {
4980 internal_malloc_stats(ms);
4981 } else {
4982 USAGE_ERROR_ACTION(ms, ms);
4983 }
4984}
4985
4986size_t
4987mspace_footprint(mspace msp)
4988{
4989 size_t result;
4990 mstate ms = (mstate) msp;
4991 if (ok_magic(ms)) {
4992 result = ms->footprint;
4993 }
4994 USAGE_ERROR_ACTION(ms, ms);
4995 return result;
4996}
4997
4998
4999size_t
5000mspace_max_footprint(mspace msp)
5001{
5002 size_t result;
5003 mstate ms = (mstate) msp;
5004 if (ok_magic(ms)) {
5005 result = ms->max_footprint;
5006 }
5007 USAGE_ERROR_ACTION(ms, ms);
5008 return result;
5009}
5010
5011
5012#if !NO_MALLINFO
5013struct mallinfo
5014mspace_mallinfo(mspace msp)
5015{
5016 mstate ms = (mstate) msp;
5017 if (!ok_magic(ms)) {
5018 USAGE_ERROR_ACTION(ms, ms);
5019 }
5020 return internal_mallinfo(ms);
5021}
5022#endif /* NO_MALLINFO */
5023
5024int
5025mspace_mallopt(int param_number, int value)
5026{
5027 return change_mparam(param_number, value);
5028}
5029
5030#endif /* MSPACES */
5031
5032/* -------------------- Alternative MORECORE functions ------------------- */
5033
5034/*
5035 Guidelines for creating a custom version of MORECORE:
5036
5037 * For best performance, MORECORE should allocate in multiples of pagesize.
5038 * MORECORE may allocate more memory than requested. (Or even less,
5039 but this will usually result in a malloc failure.)
5040 * MORECORE must not allocate memory when given argument zero, but
5041 instead return one past the end address of memory from previous
5042 nonzero call.
5043 * For best performance, consecutive calls to MORECORE with positive
5044 arguments should return increasing addresses, indicating that
5045 space has been contiguously extended.
5046 * Even though consecutive calls to MORECORE need not return contiguous
5047 addresses, it must be OK for malloc'ed chunks to span multiple
5048 regions in those cases where they do happen to be contiguous.
5049 * MORECORE need not handle negative arguments -- it may instead
5050 just return MFAIL when given negative arguments.
5051 Negative arguments are always multiples of pagesize. MORECORE
5052 must not misinterpret negative args as large positive unsigned
5053 args. You can suppress all such calls from even occurring by defining
5054 MORECORE_CANNOT_TRIM,
5055
5056 As an example alternative MORECORE, here is a custom allocator
5057 kindly contributed for pre-OSX macOS. It uses virtually but not
5058 necessarily physically contiguous non-paged memory (locked in,
5059 present and won't get swapped out). You can use it by uncommenting
5060 this section, adding some #includes, and setting up the appropriate
5061 defines above:
5062
5063 #define MORECORE osMoreCore
5064
5065 There is also a shutdown routine that should somehow be called for
5066 cleanup upon program exit.
5067
5068 #define MAX_POOL_ENTRIES 100
5069 #define MINIMUM_MORECORE_SIZE (64 * 1024U)
5070 static int next_os_pool;
5071 void *our_os_pools[MAX_POOL_ENTRIES];
5072
5073 void *osMoreCore(int size)
5074 {
5075 void *ptr = 0;
5076 static void *sbrk_top = 0;
5077
5078 if (size > 0)
5079 {
5080 if (size < MINIMUM_MORECORE_SIZE)
5081 size = MINIMUM_MORECORE_SIZE;
5082 if (CurrentExecutionLevel() == kTaskLevel)
5083 ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0);
5084 if (ptr == 0)
5085 {
5086 return (void *) MFAIL;
5087 }
5088 // save ptrs so they can be freed during cleanup
5089 our_os_pools[next_os_pool] = ptr;
5090 next_os_pool++;
5091 ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK);
5092 sbrk_top = (char *) ptr + size;
5093 return ptr;
5094 }
5095 else if (size < 0)
5096 {
5097 // we don't currently support shrink behavior
5098 return (void *) MFAIL;
5099 }
5100 else
5101 {
5102 return sbrk_top;
5103 }
5104 }
5105
5106 // cleanup any allocated memory pools
5107 // called as last thing before shutting down driver
5108
5109 void osCleanupMem(void)
5110 {
5111 void **ptr;
5112
5113 for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++)
5114 if (*ptr)
5115 {
5116 PoolDeallocate(*ptr);
5117 *ptr = 0;
5118 }
5119 }
5120
5121*/
5122
5123
5124/* -----------------------------------------------------------------------
5125History:
5126 V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee)
5127 * Add max_footprint functions
5128 * Ensure all appropriate literals are size_t
5129 * Fix conditional compilation problem for some #define settings
5130 * Avoid concatenating segments with the one provided
5131 in create_mspace_with_base
5132 * Rename some variables to avoid compiler shadowing warnings
5133 * Use explicit lock initialization.
5134 * Better handling of sbrk interference.
5135 * Simplify and fix segment insertion, trimming and mspace_destroy
5136 * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x
5137 * Thanks especially to Dennis Flanagan for help on these.
5138
5139 V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee)
5140 * Fix memalign brace error.
5141
5142 V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee)
5143 * Fix improper #endif nesting in C++
5144 * Add explicit casts needed for C++
5145
5146 V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee)
5147 * Use trees for large bins
5148 * Support mspaces
5149 * Use segments to unify sbrk-based and mmap-based system allocation,
5150 removing need for emulation on most platforms without sbrk.
5151 * Default safety checks
5152 * Optional footer checks. Thanks to William Robertson for the idea.
5153 * Internal code refactoring
5154 * Incorporate suggestions and platform-specific changes.
5155 Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas,
5156 Aaron Bachmann, Emery Berger, and others.
5157 * Speed up non-fastbin processing enough to remove fastbins.
5158 * Remove useless cfree() to avoid conflicts with other apps.
5159 * Remove internal memcpy, memset. Compilers handle builtins better.
5160 * Remove some options that no one ever used and rename others.
5161
5162 V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5163 * Fix malloc_state bitmap array misdeclaration
5164
5165 V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee)
5166 * Allow tuning of FIRST_SORTED_BIN_SIZE
5167 * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte.
5168 * Better detection and support for non-contiguousness of MORECORE.
5169 Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger
5170 * Bypass most of malloc if no frees. Thanks To Emery Berger.
5171 * Fix freeing of old top non-contiguous chunk im sysmalloc.
5172 * Raised default trim and map thresholds to 256K.
5173 * Fix mmap-related #defines. Thanks to Lubos Lunak.
5174 * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield.
5175 * Branch-free bin calculation
5176 * Default trim and mmap thresholds now 256K.
5177
5178 V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee)
5179 * Introduce independent_comalloc and independent_calloc.
5180 Thanks to Michael Pachos for motivation and help.
5181 * Make optional .h file available
5182 * Allow > 2GB requests on 32bit systems.
5183 * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>.
5184 Thanks also to Andreas Mueller <a.mueller at paradatec.de>,
5185 and Anonymous.
5186 * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for
5187 helping test this.)
5188 * memalign: check alignment arg
5189 * realloc: don't try to shift chunks backwards, since this
5190 leads to more fragmentation in some programs and doesn't
5191 seem to help in any others.
5192 * Collect all cases in malloc requiring system memory into sysmalloc
5193 * Use mmap as backup to sbrk
5194 * Place all internal state in malloc_state
5195 * Introduce fastbins (although similar to 2.5.1)
5196 * Many minor tunings and cosmetic improvements
5197 * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK
5198 * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS
5199 Thanks to Tony E. Bennett <tbennett@nvidia.com> and others.
5200 * Include errno.h to support default failure action.
5201
5202 V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee)
5203 * return null for negative arguments
5204 * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com>
5205 * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h'
5206 (e.g. WIN32 platforms)
5207 * Cleanup header file inclusion for WIN32 platforms
5208 * Cleanup code to avoid Microsoft Visual C++ compiler complaints
5209 * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing
5210 memory allocation routines
5211 * Set 'malloc_getpagesize' for WIN32 platforms (needs more work)
5212 * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to
5213 usage of 'assert' in non-WIN32 code
5214 * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to
5215 avoid infinite loop
5216 * Always call 'fREe()' rather than 'free()'
5217
5218 V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee)
5219 * Fixed ordering problem with boundary-stamping
5220
5221 V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee)
5222 * Added pvalloc, as recommended by H.J. Liu
5223 * Added 64bit pointer support mainly from Wolfram Gloger
5224 * Added anonymously donated WIN32 sbrk emulation
5225 * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen
5226 * malloc_extend_top: fix mask error that caused wastage after
5227 foreign sbrks
5228 * Add linux mremap support code from HJ Liu
5229
5230 V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee)
5231 * Integrated most documentation with the code.
5232 * Add support for mmap, with help from
5233 Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5234 * Use last_remainder in more cases.
5235 * Pack bins using idea from colin@nyx10.cs.du.edu
5236 * Use ordered bins instead of best-fit threshhold
5237 * Eliminate block-local decls to simplify tracing and debugging.
5238 * Support another case of realloc via move into top
5239 * Fix error occuring when initial sbrk_base not word-aligned.
5240 * Rely on page size for units instead of SBRK_UNIT to
5241 avoid surprises about sbrk alignment conventions.
5242 * Add mallinfo, mallopt. Thanks to Raymond Nijssen
5243 (raymond@es.ele.tue.nl) for the suggestion.
5244 * Add `pad' argument to malloc_trim and top_pad mallopt parameter.
5245 * More precautions for cases where other routines call sbrk,
5246 courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de).
5247 * Added macros etc., allowing use in linux libc from
5248 H.J. Lu (hjl@gnu.ai.mit.edu)
5249 * Inverted this history list
5250
5251 V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee)
5252 * Re-tuned and fixed to behave more nicely with V2.6.0 changes.
5253 * Removed all preallocation code since under current scheme
5254 the work required to undo bad preallocations exceeds
5255 the work saved in good cases for most test programs.
5256 * No longer use return list or unconsolidated bins since
5257 no scheme using them consistently outperforms those that don't
5258 given above changes.
5259 * Use best fit for very large chunks to prevent some worst-cases.
5260 * Added some support for debugging
5261
5262 V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee)
5263 * Removed footers when chunks are in use. Thanks to
5264 Paul Wilson (wilson@cs.texas.edu) for the suggestion.
5265
5266 V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee)
5267 * Added malloc_trim, with help from Wolfram Gloger
5268 (wmglo@Dent.MED.Uni-Muenchen.DE).
5269
5270 V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g)
5271
5272 V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g)
5273 * realloc: try to expand in both directions
5274 * malloc: swap order of clean-bin strategy;
5275 * realloc: only conditionally expand backwards
5276 * Try not to scavenge used bins
5277 * Use bin counts as a guide to preallocation
5278 * Occasionally bin return list chunks in first scan
5279 * Add a few optimizations from colin@nyx10.cs.du.edu
5280
5281 V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g)
5282 * faster bin computation & slightly different binning
5283 * merged all consolidations to one part of malloc proper
5284 (eliminating old malloc_find_space & malloc_clean_bin)
5285 * Scan 2 returns chunks (not just 1)
5286 * Propagate failure in realloc if malloc returns 0
5287 * Add stuff to allow compilation on non-ANSI compilers
5288 from kpv@research.att.com
5289
5290 V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu)
5291 * removed potential for odd address access in prev_chunk
5292 * removed dependency on getpagesize.h
5293 * misc cosmetics and a bit more internal documentation
5294 * anticosmetics: mangled names in macros to evade debugger strangeness
5295 * tested on sparc, hp-700, dec-mips, rs6000
5296 with gcc & native cc (hp, dec only) allowing
5297 Detlefs & Zorn comparison study (in SIGPLAN Notices.)
5298
5299 Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu)
5300 * Based loosely on libg++-1.2X malloc. (It retains some of the overall
5301 structure of old version, but most details differ.)
5302
5303*/
5304
5305#endif /* !HAVE_MALLOC */
5306
5307#ifdef HAVE_MALLOC
5308#define real_malloc malloc
5309#define real_calloc calloc
5310#define real_realloc realloc
5311#define real_free free
5312#else
5313#define real_malloc dlmalloc
5314#define real_calloc dlcalloc
5315#define real_realloc dlrealloc
5316#define real_free dlfree
5317#endif
5318
5319/* Memory functions used by SDL that can be replaced by the application */
5320static struct
5321{
5322 SDL_malloc_func malloc_func;
5323 SDL_calloc_func calloc_func;
5324 SDL_realloc_func realloc_func;
5325 SDL_free_func free_func;
5326 SDL_atomic_t num_allocations;
5327} s_mem = {
5328 real_malloc, real_calloc, real_realloc, real_free, { 0 }
5329};
5330
5331void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func,
5332 SDL_calloc_func *calloc_func,
5333 SDL_realloc_func *realloc_func,
5334 SDL_free_func *free_func)
5335{
5336 if (malloc_func) {
5337 *malloc_func = s_mem.malloc_func;
5338 }
5339 if (calloc_func) {
5340 *calloc_func = s_mem.calloc_func;
5341 }
5342 if (realloc_func) {
5343 *realloc_func = s_mem.realloc_func;
5344 }
5345 if (free_func) {
5346 *free_func = s_mem.free_func;
5347 }
5348}
5349
5350int SDL_SetMemoryFunctions(SDL_malloc_func malloc_func,
5351 SDL_calloc_func calloc_func,
5352 SDL_realloc_func realloc_func,
5353 SDL_free_func free_func)
5354{
5355 if (!malloc_func) {
5356 return SDL_InvalidParamError("malloc_func");
5357 }
5358 if (!calloc_func) {
5359 return SDL_InvalidParamError("calloc_func");
5360 }
5361 if (!realloc_func) {
5362 return SDL_InvalidParamError("realloc_func");
5363 }
5364 if (!free_func) {
5365 return SDL_InvalidParamError("free_func");
5366 }
5367
5368 s_mem.malloc_func = malloc_func;
5369 s_mem.calloc_func = calloc_func;
5370 s_mem.realloc_func = realloc_func;
5371 s_mem.free_func = free_func;
5372 return 0;
5373}
5374
5375int SDL_GetNumAllocations(void)
5376{
5377 return SDL_AtomicGet(&s_mem.num_allocations);
5378}
5379
5380void *SDL_malloc(size_t size)
5381{
5382 void *mem;
5383
5384 if (!size) {
5385 size = 1;
5386 }
5387
5388 mem = s_mem.malloc_func(size);
5389 if (mem) {
5390 SDL_AtomicIncRef(&s_mem.num_allocations);
5391 }
5392 return mem;
5393}
5394
5395void *SDL_calloc(size_t nmemb, size_t size)
5396{
5397 void *mem;
5398
5399 if (!nmemb || !size) {
5400 nmemb = 1;
5401 size = 1;
5402 }
5403
5404 mem = s_mem.calloc_func(nmemb, size);
5405 if (mem) {
5406 SDL_AtomicIncRef(&s_mem.num_allocations);
5407 }
5408 return mem;
5409}
5410
5411void *SDL_realloc(void *ptr, size_t size)
5412{
5413 void *mem;
5414
5415 if (!ptr && !size) {
5416 size = 1;
5417 }
5418
5419 mem = s_mem.realloc_func(ptr, size);
5420 if (mem && !ptr) {
5421 SDL_AtomicIncRef(&s_mem.num_allocations);
5422 }
5423 return mem;
5424}
5425
5426void SDL_free(void *ptr)
5427{
5428 if (!ptr) {
5429 return;
5430 }
5431
5432 s_mem.free_func(ptr);
5433 (void)SDL_AtomicDecRef(&s_mem.num_allocations);
5434}
5435
5436/* vi: set ts=4 sw=4 expandtab: */
5437