1 | /* |
2 | Simple DirectMedia Layer |
3 | Copyright (C) 1997-2025 Sam Lantinga <slouken@libsdl.org> |
4 | |
5 | This software is provided 'as-is', without any express or implied |
6 | warranty. In no event will the authors be held liable for any damages |
7 | arising from the use of this software. |
8 | |
9 | Permission is granted to anyone to use this software for any purpose, |
10 | including commercial applications, and to alter it and redistribute it |
11 | freely, subject to the following restrictions: |
12 | |
13 | 1. The origin of this software must not be misrepresented; you must not |
14 | claim that you wrote the original software. If you use this software |
15 | in a product, an acknowledgment in the product documentation would be |
16 | appreciated but is not required. |
17 | 2. Altered source versions must be plainly marked as such, and must not be |
18 | misrepresented as being the original software. |
19 | 3. This notice may not be removed or altered from any source distribution. |
20 | */ |
21 | #include "SDL_internal.h" |
22 | |
23 | /* This file contains portable memory management functions for SDL */ |
24 | |
25 | #ifndef HAVE_MALLOC |
26 | #define LACKS_SYS_TYPES_H |
27 | #define LACKS_STDIO_H |
28 | #define LACKS_STRINGS_H |
29 | #define LACKS_STRING_H |
30 | #define LACKS_STDLIB_H |
31 | #define ABORT |
32 | #define NO_MALLOC_STATS 1 |
33 | #define USE_LOCKS 1 |
34 | #define USE_DL_PREFIX |
35 | |
36 | /* |
37 | This is a version (aka dlmalloc) of malloc/free/realloc written by |
38 | Doug Lea and released to the public domain, as explained at |
39 | http://creativecommons.org/publicdomain/zero/1.0/ Send questions, |
40 | comments, complaints, performance data, etc to dl@cs.oswego.edu |
41 | |
42 | * Version 2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea |
43 | Note: There may be an updated version of this malloc obtainable at |
44 | ftp://gee.cs.oswego.edu/pub/misc/malloc.c |
45 | Check before installing! |
46 | |
47 | * Quickstart |
48 | |
49 | This library is all in one file to simplify the most common usage: |
50 | ftp it, compile it (-O3), and link it into another program. All of |
51 | the compile-time options default to reasonable values for use on |
52 | most platforms. You might later want to step through various |
53 | compile-time and dynamic tuning options. |
54 | |
55 | For convenience, an include file for code using this malloc is at: |
56 | ftp://gee.cs.oswego.edu/pub/misc/malloc-2.8.6.h |
57 | You don't really need this .h file unless you call functions not |
58 | defined in your system include files. The .h file contains only the |
59 | excerpts from this file needed for using this malloc on ANSI C/C++ |
60 | systems, so long as you haven't changed compile-time options about |
61 | naming and tuning parameters. If you do, then you can create your |
62 | own malloc.h that does include all settings by cutting at the point |
63 | indicated below. Note that you may already by default be using a C |
64 | library containing a malloc that is based on some version of this |
65 | malloc (for example in linux). You might still want to use the one |
66 | in this file to customize settings or to avoid overheads associated |
67 | with library versions. |
68 | |
69 | * Vital statistics: |
70 | |
71 | Supported pointer/size_t representation: 4 or 8 bytes |
72 | size_t MUST be an unsigned type of the same width as |
73 | pointers. (If you are using an ancient system that declares |
74 | size_t as a signed type, or need it to be a different width |
75 | than pointers, you can use a previous release of this malloc |
76 | (e.g. 2.7.2) supporting these.) |
77 | |
78 | Alignment: 8 bytes (minimum) |
79 | This suffices for nearly all current machines and C compilers. |
80 | However, you can define MALLOC_ALIGNMENT to be wider than this |
81 | if necessary (up to 128bytes), at the expense of using more space. |
82 | |
83 | Minimum overhead per allocated chunk: 4 or 8 bytes (if 4byte sizes) |
84 | 8 or 16 bytes (if 8byte sizes) |
85 | Each malloced chunk has a hidden word of overhead holding size |
86 | and status information, and additional cross-check word |
87 | if FOOTERS is defined. |
88 | |
89 | Minimum allocated size: 4-byte ptrs: 16 bytes (including overhead) |
90 | 8-byte ptrs: 32 bytes (including overhead) |
91 | |
92 | Even a request for zero bytes (i.e., malloc(0)) returns a |
93 | pointer to something of the minimum allocatable size. |
94 | The maximum overhead wastage (i.e., number of extra bytes |
95 | allocated than were requested in malloc) is less than or equal |
96 | to the minimum size, except for requests >= mmap_threshold that |
97 | are serviced via mmap(), where the worst case wastage is about |
98 | 32 bytes plus the remainder from a system page (the minimal |
99 | mmap unit); typically 4096 or 8192 bytes. |
100 | |
101 | Security: static-safe; optionally more or less |
102 | The "security" of malloc refers to the ability of malicious |
103 | code to accentuate the effects of errors (for example, freeing |
104 | space that is not currently malloc'ed or overwriting past the |
105 | ends of chunks) in code that calls malloc. This malloc |
106 | guarantees not to modify any memory locations below the base of |
107 | heap, i.e., static variables, even in the presence of usage |
108 | errors. The routines additionally detect most improper frees |
109 | and reallocs. All this holds as long as the static bookkeeping |
110 | for malloc itself is not corrupted by some other means. This |
111 | is only one aspect of security -- these checks do not, and |
112 | cannot, detect all possible programming errors. |
113 | |
114 | If FOOTERS is defined nonzero, then each allocated chunk |
115 | carries an additional check word to verify that it was malloced |
116 | from its space. These check words are the same within each |
117 | execution of a program using malloc, but differ across |
118 | executions, so externally crafted fake chunks cannot be |
119 | freed. This improves security by rejecting frees/reallocs that |
120 | could corrupt heap memory, in addition to the checks preventing |
121 | writes to statics that are always on. This may further improve |
122 | security at the expense of time and space overhead. (Note that |
123 | FOOTERS may also be worth using with MSPACES.) |
124 | |
125 | By default detected errors cause the program to abort (calling |
126 | "abort()"). You can override this to instead proceed past |
127 | errors by defining PROCEED_ON_ERROR. In this case, a bad free |
128 | has no effect, and a malloc that encounters a bad address |
129 | caused by user overwrites will ignore the bad address by |
130 | dropping pointers and indices to all known memory. This may |
131 | be appropriate for programs that should continue if at all |
132 | possible in the face of programming errors, although they may |
133 | run out of memory because dropped memory is never reclaimed. |
134 | |
135 | If you don't like either of these options, you can define |
136 | CORRUPTION_ERROR_ACTION and USAGE_ERROR_ACTION to do anything |
137 | else. And if if you are sure that your program using malloc has |
138 | no errors or vulnerabilities, you can define INSECURE to 1, |
139 | which might (or might not) provide a small performance improvement. |
140 | |
141 | It is also possible to limit the maximum total allocatable |
142 | space, using malloc_set_footprint_limit. This is not |
143 | designed as a security feature in itself (calls to set limits |
144 | are not screened or privileged), but may be useful as one |
145 | aspect of a secure implementation. |
146 | |
147 | Thread-safety: NOT thread-safe unless USE_LOCKS defined non-zero |
148 | When USE_LOCKS is defined, each public call to malloc, free, |
149 | etc is surrounded with a lock. By default, this uses a plain |
150 | pthread mutex, win32 critical section, or a spin-lock if if |
151 | available for the platform and not disabled by setting |
152 | USE_SPIN_LOCKS=0. However, if USE_RECURSIVE_LOCKS is defined, |
153 | recursive versions are used instead (which are not required for |
154 | base functionality but may be needed in layered extensions). |
155 | Using a global lock is not especially fast, and can be a major |
156 | bottleneck. It is designed only to provide minimal protection |
157 | in concurrent environments, and to provide a basis for |
158 | extensions. If you are using malloc in a concurrent program, |
159 | consider instead using nedmalloc |
160 | (http://www.nedprod.com/programs/portable/nedmalloc/) or |
161 | ptmalloc (See http://www.malloc.de), which are derived from |
162 | versions of this malloc. |
163 | |
164 | System requirements: Any combination of MORECORE and/or MMAP/MUNMAP |
165 | This malloc can use unix sbrk or any emulation (invoked using |
166 | the CALL_MORECORE macro) and/or mmap/munmap or any emulation |
167 | (invoked using CALL_MMAP/CALL_MUNMAP) to get and release system |
168 | memory. On most unix systems, it tends to work best if both |
169 | MORECORE and MMAP are enabled. On Win32, it uses emulations |
170 | based on VirtualAlloc. It also uses common C library functions |
171 | like memset. |
172 | |
173 | Compliance: I believe it is compliant with the Single Unix Specification |
174 | (See http://www.unix.org). Also SVID/XPG, ANSI C, and probably |
175 | others as well. |
176 | |
177 | * Overview of algorithms |
178 | |
179 | This is not the fastest, most space-conserving, most portable, or |
180 | most tunable malloc ever written. However it is among the fastest |
181 | while also being among the most space-conserving, portable and |
182 | tunable. Consistent balance across these factors results in a good |
183 | general-purpose allocator for malloc-intensive programs. |
184 | |
185 | In most ways, this malloc is a best-fit allocator. Generally, it |
186 | chooses the best-fitting existing chunk for a request, with ties |
187 | broken in approximately least-recently-used order. (This strategy |
188 | normally maintains low fragmentation.) However, for requests less |
189 | than 256bytes, it deviates from best-fit when there is not an |
190 | exactly fitting available chunk by preferring to use space adjacent |
191 | to that used for the previous small request, as well as by breaking |
192 | ties in approximately most-recently-used order. (These enhance |
193 | locality of series of small allocations.) And for very large requests |
194 | (>= 256Kb by default), it relies on system memory mapping |
195 | facilities, if supported. (This helps avoid carrying around and |
196 | possibly fragmenting memory used only for large chunks.) |
197 | |
198 | All operations (except malloc_stats and mallinfo) have execution |
199 | times that are bounded by a constant factor of the number of bits in |
200 | a size_t, not counting any clearing in calloc or copying in realloc, |
201 | or actions surrounding MORECORE and MMAP that have times |
202 | proportional to the number of non-contiguous regions returned by |
203 | system allocation routines, which is often just 1. In real-time |
204 | applications, you can optionally suppress segment traversals using |
205 | NO_SEGMENT_TRAVERSAL, which assures bounded execution even when |
206 | system allocators return non-contiguous spaces, at the typical |
207 | expense of carrying around more memory and increased fragmentation. |
208 | |
209 | The implementation is not very modular and seriously overuses |
210 | macros. Perhaps someday all C compilers will do as good a job |
211 | inlining modular code as can now be done by brute-force expansion, |
212 | but now, enough of them seem not to. |
213 | |
214 | Some compilers issue a lot of warnings about code that is |
215 | dead/unreachable only on some platforms, and also about intentional |
216 | uses of negation on unsigned types. All known cases of each can be |
217 | ignored. |
218 | |
219 | For a longer but out of date high-level description, see |
220 | http://gee.cs.oswego.edu/dl/html/malloc.html |
221 | |
222 | * MSPACES |
223 | If MSPACES is defined, then in addition to malloc, free, etc., |
224 | this file also defines mspace_malloc, mspace_free, etc. These |
225 | are versions of malloc routines that take an "mspace" argument |
226 | obtained using create_mspace, to control all internal bookkeeping. |
227 | If ONLY_MSPACES is defined, only these versions are compiled. |
228 | So if you would like to use this allocator for only some allocations, |
229 | and your system malloc for others, you can compile with |
230 | ONLY_MSPACES and then do something like... |
231 | static mspace mymspace = create_mspace(0,0); // for example |
232 | #define mymalloc(bytes) mspace_malloc(mymspace, bytes) |
233 | |
234 | (Note: If you only need one instance of an mspace, you can instead |
235 | use "USE_DL_PREFIX" to relabel the global malloc.) |
236 | |
237 | You can similarly create thread-local allocators by storing |
238 | mspaces as thread-locals. For example: |
239 | static __thread mspace tlms = 0; |
240 | void* tlmalloc(size_t bytes) { |
241 | if (tlms == 0) tlms = create_mspace(0, 0); |
242 | return mspace_malloc(tlms, bytes); |
243 | } |
244 | void tlfree(void* mem) { mspace_free(tlms, mem); } |
245 | |
246 | Unless FOOTERS is defined, each mspace is completely independent. |
247 | You cannot allocate from one and free to another (although |
248 | conformance is only weakly checked, so usage errors are not always |
249 | caught). If FOOTERS is defined, then each chunk carries around a tag |
250 | indicating its originating mspace, and frees are directed to their |
251 | originating spaces. Normally, this requires use of locks. |
252 | |
253 | ------------------------- Compile-time options --------------------------- |
254 | |
255 | Be careful in setting #define values for numerical constants of type |
256 | size_t. On some systems, literal values are not automatically extended |
257 | to size_t precision unless they are explicitly casted. You can also |
258 | use the symbolic values MAX_SIZE_T, SIZE_T_ONE, etc below. |
259 | |
260 | WIN32 default: defined if _WIN32 defined |
261 | Defining WIN32 sets up defaults for MS environment and compilers. |
262 | Otherwise defaults are for unix. Beware that there seem to be some |
263 | cases where this malloc might not be a pure drop-in replacement for |
264 | Win32 malloc: Random-looking failures from Win32 GDI API's (eg; |
265 | SetDIBits()) may be due to bugs in some video driver implementations |
266 | when pixel buffers are malloc()ed, and the region spans more than |
267 | one VirtualAlloc()ed region. Because dlmalloc uses a small (64Kb) |
268 | default granularity, pixel buffers may straddle virtual allocation |
269 | regions more often than when using the Microsoft allocator. You can |
270 | avoid this by using VirtualAlloc() and VirtualFree() for all pixel |
271 | buffers rather than using malloc(). If this is not possible, |
272 | recompile this malloc with a larger DEFAULT_GRANULARITY. Note: |
273 | in cases where MSC and gcc (cygwin) are known to differ on WIN32, |
274 | conditions use _MSC_VER to distinguish them. |
275 | |
276 | DLMALLOC_EXPORT default: extern |
277 | Defines how public APIs are declared. If you want to export via a |
278 | Windows DLL, you might define this as |
279 | #define DLMALLOC_EXPORT extern __declspec(dllexport) |
280 | If you want a POSIX ELF shared object, you might use |
281 | #define DLMALLOC_EXPORT extern __attribute__((visibility("default"))) |
282 | |
283 | MALLOC_ALIGNMENT default: (size_t)(2 * sizeof(void *)) |
284 | Controls the minimum alignment for malloc'ed chunks. It must be a |
285 | power of two and at least 8, even on machines for which smaller |
286 | alignments would suffice. It may be defined as larger than this |
287 | though. Note however that code and data structures are optimized for |
288 | the case of 8-byte alignment. |
289 | |
290 | MSPACES default: 0 (false) |
291 | If true, compile in support for independent allocation spaces. |
292 | This is only supported if HAVE_MMAP is true. |
293 | |
294 | ONLY_MSPACES default: 0 (false) |
295 | If true, only compile in mspace versions, not regular versions. |
296 | |
297 | USE_LOCKS default: 0 (false) |
298 | Causes each call to each public routine to be surrounded with |
299 | pthread or WIN32 mutex lock/unlock. (If set true, this can be |
300 | overridden on a per-mspace basis for mspace versions.) If set to a |
301 | non-zero value other than 1, locks are used, but their |
302 | implementation is left out, so lock functions must be supplied manually, |
303 | as described below. |
304 | |
305 | USE_SPIN_LOCKS default: 1 iff USE_LOCKS and spin locks available |
306 | If true, uses custom spin locks for locking. This is currently |
307 | supported only gcc >= 4.1, older gccs on x86 platforms, and recent |
308 | MS compilers. Otherwise, posix locks or win32 critical sections are |
309 | used. |
310 | |
311 | USE_RECURSIVE_LOCKS default: not defined |
312 | If defined nonzero, uses recursive (aka reentrant) locks, otherwise |
313 | uses plain mutexes. This is not required for malloc proper, but may |
314 | be needed for layered allocators such as nedmalloc. |
315 | |
316 | LOCK_AT_FORK default: not defined |
317 | If defined nonzero, performs pthread_atfork upon initialization |
318 | to initialize child lock while holding parent lock. The implementation |
319 | assumes that pthread locks (not custom locks) are being used. In other |
320 | cases, you may need to customize the implementation. |
321 | |
322 | FOOTERS default: 0 |
323 | If true, provide extra checking and dispatching by placing |
324 | information in the footers of allocated chunks. This adds |
325 | space and time overhead. |
326 | |
327 | INSECURE default: 0 |
328 | If true, omit checks for usage errors and heap space overwrites. |
329 | |
330 | USE_DL_PREFIX default: NOT defined |
331 | Causes compiler to prefix all public routines with the string 'dl'. |
332 | This can be useful when you only want to use this malloc in one part |
333 | of a program, using your regular system malloc elsewhere. |
334 | |
335 | MALLOC_INSPECT_ALL default: NOT defined |
336 | If defined, compiles malloc_inspect_all and mspace_inspect_all, that |
337 | perform traversal of all heap space. Unless access to these |
338 | functions is otherwise restricted, you probably do not want to |
339 | include them in secure implementations. |
340 | |
341 | ABORT default: defined as abort() |
342 | Defines how to abort on failed checks. On most systems, a failed |
343 | check cannot die with an "assert" or even print an informative |
344 | message, because the underlying print routines in turn call malloc, |
345 | which will fail again. Generally, the best policy is to simply call |
346 | abort(). It's not very useful to do more than this because many |
347 | errors due to overwriting will show up as address faults (null, odd |
348 | addresses etc) rather than malloc-triggered checks, so will also |
349 | abort. Also, most compilers know that abort() does not return, so |
350 | can better optimize code conditionally calling it. |
351 | |
352 | PROCEED_ON_ERROR default: defined as 0 (false) |
353 | Controls whether detected bad addresses cause them to bypassed |
354 | rather than aborting. If set, detected bad arguments to free and |
355 | realloc are ignored. And all bookkeeping information is zeroed out |
356 | upon a detected overwrite of freed heap space, thus losing the |
357 | ability to ever return it from malloc again, but enabling the |
358 | application to proceed. If PROCEED_ON_ERROR is defined, the |
359 | static variable malloc_corruption_error_count is compiled in |
360 | and can be examined to see if errors have occurred. This option |
361 | generates slower code than the default abort policy. |
362 | |
363 | DEBUG default: NOT defined |
364 | The DEBUG setting is mainly intended for people trying to modify |
365 | this code or diagnose problems when porting to new platforms. |
366 | However, it may also be able to better isolate user errors than just |
367 | using runtime checks. The assertions in the check routines spell |
368 | out in more detail the assumptions and invariants underlying the |
369 | algorithms. The checking is fairly extensive, and will slow down |
370 | execution noticeably. Calling malloc_stats or mallinfo with DEBUG |
371 | set will attempt to check every non-mmapped allocated and free chunk |
372 | in the course of computing the summaries. |
373 | |
374 | ABORT_ON_ASSERT_FAILURE default: defined as 1 (true) |
375 | Debugging assertion failures can be nearly impossible if your |
376 | version of the assert macro causes malloc to be called, which will |
377 | lead to a cascade of further failures, blowing the runtime stack. |
378 | ABORT_ON_ASSERT_FAILURE cause assertions failures to call abort(), |
379 | which will usually make debugging easier. |
380 | |
381 | MALLOC_FAILURE_ACTION default: sets errno to ENOMEM, or no-op on win32 |
382 | The action to take before "return 0" when malloc fails to be able to |
383 | return memory because there is none available. |
384 | |
385 | HAVE_MORECORE default: 1 (true) unless win32 or ONLY_MSPACES |
386 | True if this system supports sbrk or an emulation of it. |
387 | |
388 | MORECORE default: sbrk |
389 | The name of the sbrk-style system routine to call to obtain more |
390 | memory. See below for guidance on writing custom MORECORE |
391 | functions. The type of the argument to sbrk/MORECORE varies across |
392 | systems. It cannot be size_t, because it supports negative |
393 | arguments, so it is normally the signed type of the same width as |
394 | size_t (sometimes declared as "intptr_t"). It doesn't much matter |
395 | though. Internally, we only call it with arguments less than half |
396 | the max value of a size_t, which should work across all reasonable |
397 | possibilities, although sometimes generating compiler warnings. |
398 | |
399 | MORECORE_CONTIGUOUS default: 1 (true) if HAVE_MORECORE |
400 | If true, take advantage of fact that consecutive calls to MORECORE |
401 | with positive arguments always return contiguous increasing |
402 | addresses. This is true of unix sbrk. It does not hurt too much to |
403 | set it true anyway, since malloc copes with non-contiguities. |
404 | Setting it false when definitely non-contiguous saves time |
405 | and possibly wasted space it would take to discover this though. |
406 | |
407 | MORECORE_CANNOT_TRIM default: NOT defined |
408 | True if MORECORE cannot release space back to the system when given |
409 | negative arguments. This is generally necessary only if you are |
410 | using a hand-crafted MORECORE function that cannot handle negative |
411 | arguments. |
412 | |
413 | NO_SEGMENT_TRAVERSAL default: 0 |
414 | If non-zero, suppresses traversals of memory segments |
415 | returned by either MORECORE or CALL_MMAP. This disables |
416 | merging of segments that are contiguous, and selectively |
417 | releasing them to the OS if unused, but bounds execution times. |
418 | |
419 | HAVE_MMAP default: 1 (true) |
420 | True if this system supports mmap or an emulation of it. If so, and |
421 | HAVE_MORECORE is not true, MMAP is used for all system |
422 | allocation. If set and HAVE_MORECORE is true as well, MMAP is |
423 | primarily used to directly allocate very large blocks. It is also |
424 | used as a backup strategy in cases where MORECORE fails to provide |
425 | space from system. Note: A single call to MUNMAP is assumed to be |
426 | able to unmap memory that may have be allocated using multiple calls |
427 | to MMAP, so long as they are adjacent. |
428 | |
429 | HAVE_MREMAP default: 1 on linux, else 0 |
430 | If true realloc() uses mremap() to re-allocate large blocks and |
431 | extend or shrink allocation spaces. |
432 | |
433 | MMAP_CLEARS default: 1 except on WINCE. |
434 | True if mmap clears memory so calloc doesn't need to. This is true |
435 | for standard unix mmap using /dev/zero and on WIN32 except for WINCE. |
436 | |
437 | USE_BUILTIN_FFS default: 0 (i.e., not used) |
438 | Causes malloc to use the builtin ffs() function to compute indices. |
439 | Some compilers may recognize and intrinsify ffs to be faster than the |
440 | supplied C version. Also, the case of x86 using gcc is special-cased |
441 | to an asm instruction, so is already as fast as it can be, and so |
442 | this setting has no effect. Similarly for Win32 under recent MS compilers. |
443 | (On most x86s, the asm version is only slightly faster than the C version.) |
444 | |
445 | malloc_getpagesize default: derive from system includes, or 4096. |
446 | The system page size. To the extent possible, this malloc manages |
447 | memory from the system in page-size units. This may be (and |
448 | usually is) a function rather than a constant. This is ignored |
449 | if WIN32, where page size is determined using getSystemInfo during |
450 | initialization. |
451 | |
452 | USE_DEV_RANDOM default: 0 (i.e., not used) |
453 | Causes malloc to use /dev/random to initialize secure magic seed for |
454 | stamping footers. Otherwise, the current time is used. |
455 | |
456 | NO_MALLINFO default: 0 |
457 | If defined, don't compile "mallinfo". This can be a simple way |
458 | of dealing with mismatches between system declarations and |
459 | those in this file. |
460 | |
461 | MALLINFO_FIELD_TYPE default: size_t |
462 | The type of the fields in the mallinfo struct. This was originally |
463 | defined as "int" in SVID etc, but is more usefully defined as |
464 | size_t. The value is used only if HAVE_USR_INCLUDE_MALLOC_H is not set |
465 | |
466 | NO_MALLOC_STATS default: 0 |
467 | If defined, don't compile "malloc_stats". This avoids calls to |
468 | fprintf and bringing in stdio dependencies you might not want. |
469 | |
470 | REALLOC_ZERO_BYTES_FREES default: not defined |
471 | This should be set if a call to realloc with zero bytes should |
472 | be the same as a call to free. Some people think it should. Otherwise, |
473 | since this malloc returns a unique pointer for malloc(0), so does |
474 | realloc(p, 0). |
475 | |
476 | LACKS_UNISTD_H, LACKS_FCNTL_H, LACKS_SYS_PARAM_H, LACKS_SYS_MMAN_H |
477 | LACKS_STRINGS_H, LACKS_STRING_H, LACKS_SYS_TYPES_H, LACKS_ERRNO_H |
478 | LACKS_STDLIB_H LACKS_SCHED_H LACKS_TIME_H default: NOT defined unless on WIN32 |
479 | Define these if your system does not have these header files. |
480 | You might need to manually insert some of the declarations they provide. |
481 | |
482 | DEFAULT_GRANULARITY default: page size if MORECORE_CONTIGUOUS, |
483 | system_info.dwAllocationGranularity in WIN32, |
484 | otherwise 64K. |
485 | Also settable using mallopt(M_GRANULARITY, x) |
486 | The unit for allocating and deallocating memory from the system. On |
487 | most systems with contiguous MORECORE, there is no reason to |
488 | make this more than a page. However, systems with MMAP tend to |
489 | either require or encourage larger granularities. You can increase |
490 | this value to prevent system allocation functions to be called so |
491 | often, especially if they are slow. The value must be at least one |
492 | page and must be a power of two. Setting to 0 causes initialization |
493 | to either page size or win32 region size. (Note: In previous |
494 | versions of malloc, the equivalent of this option was called |
495 | "TOP_PAD") |
496 | |
497 | DEFAULT_TRIM_THRESHOLD default: 2MB |
498 | Also settable using mallopt(M_TRIM_THRESHOLD, x) |
499 | The maximum amount of unused top-most memory to keep before |
500 | releasing via malloc_trim in free(). Automatic trimming is mainly |
501 | useful in long-lived programs using contiguous MORECORE. Because |
502 | trimming via sbrk can be slow on some systems, and can sometimes be |
503 | wasteful (in cases where programs immediately afterward allocate |
504 | more large chunks) the value should be high enough so that your |
505 | overall system performance would improve by releasing this much |
506 | memory. As a rough guide, you might set to a value close to the |
507 | average size of a process (program) running on your system. |
508 | Releasing this much memory would allow such a process to run in |
509 | memory. Generally, it is worth tuning trim thresholds when a |
510 | program undergoes phases where several large chunks are allocated |
511 | and released in ways that can reuse each other's storage, perhaps |
512 | mixed with phases where there are no such chunks at all. The trim |
513 | value must be greater than page size to have any useful effect. To |
514 | disable trimming completely, you can set to MAX_SIZE_T. Note that the trick |
515 | some people use of mallocing a huge space and then freeing it at |
516 | program startup, in an attempt to reserve system memory, doesn't |
517 | have the intended effect under automatic trimming, since that memory |
518 | will immediately be returned to the system. |
519 | |
520 | DEFAULT_MMAP_THRESHOLD default: 256K |
521 | Also settable using mallopt(M_MMAP_THRESHOLD, x) |
522 | The request size threshold for using MMAP to directly service a |
523 | request. Requests of at least this size that cannot be allocated |
524 | using already-existing space will be serviced via mmap. (If enough |
525 | normal freed space already exists it is used instead.) Using mmap |
526 | segregates relatively large chunks of memory so that they can be |
527 | individually obtained and released from the host system. A request |
528 | serviced through mmap is never reused by any other request (at least |
529 | not directly; the system may just so happen to remap successive |
530 | requests to the same locations). Segregating space in this way has |
531 | the benefits that: Mmapped space can always be individually released |
532 | back to the system, which helps keep the system level memory demands |
533 | of a long-lived program low. Also, mapped memory doesn't become |
534 | `locked' between other chunks, as can happen with normally allocated |
535 | chunks, which means that even trimming via malloc_trim would not |
536 | release them. However, it has the disadvantage that the space |
537 | cannot be reclaimed, consolidated, and then used to service later |
538 | requests, as happens with normal chunks. The advantages of mmap |
539 | nearly always outweigh disadvantages for "large" chunks, but the |
540 | value of "large" may vary across systems. The default is an |
541 | empirically derived value that works well in most systems. You can |
542 | disable mmap by setting to MAX_SIZE_T. |
543 | |
544 | MAX_RELEASE_CHECK_RATE default: 4095 unless not HAVE_MMAP |
545 | The number of consolidated frees between checks to release |
546 | unused segments when freeing. When using non-contiguous segments, |
547 | especially with multiple mspaces, checking only for topmost space |
548 | doesn't always suffice to trigger trimming. To compensate for this, |
549 | free() will, with a period of MAX_RELEASE_CHECK_RATE (or the |
550 | current number of segments, if greater) try to release unused |
551 | segments to the OS when freeing chunks that result in |
552 | consolidation. The best value for this parameter is a compromise |
553 | between slowing down frees with relatively costly checks that |
554 | rarely trigger versus holding on to unused memory. To effectively |
555 | disable, set to MAX_SIZE_T. This may lead to a very slight speed |
556 | improvement at the expense of carrying around more memory. |
557 | */ |
558 | |
559 | /* Version identifier to allow people to support multiple versions */ |
560 | #ifndef DLMALLOC_VERSION |
561 | #define DLMALLOC_VERSION 20806 |
562 | #endif /* DLMALLOC_VERSION */ |
563 | |
564 | #ifndef DLMALLOC_EXPORT |
565 | #define DLMALLOC_EXPORT extern |
566 | #endif |
567 | |
568 | #ifndef WIN32 |
569 | #ifdef _WIN32 |
570 | #define WIN32 1 |
571 | #endif /* _WIN32 */ |
572 | #ifdef _WIN32_WCE |
573 | #define LACKS_FCNTL_H |
574 | #define WIN32 1 |
575 | #endif /* _WIN32_WCE */ |
576 | #endif /* WIN32 */ |
577 | #ifdef WIN32 |
578 | #define WIN32_LEAN_AND_MEAN |
579 | #include <windows.h> |
580 | #include <tchar.h> |
581 | #define HAVE_MMAP 1 |
582 | #define HAVE_MORECORE 0 |
583 | #define LACKS_UNISTD_H |
584 | #define LACKS_SYS_PARAM_H |
585 | #define LACKS_SYS_MMAN_H |
586 | #define LACKS_STRING_H |
587 | #define LACKS_STRINGS_H |
588 | #define LACKS_SYS_TYPES_H |
589 | #define LACKS_ERRNO_H |
590 | #define LACKS_SCHED_H |
591 | #ifndef MALLOC_FAILURE_ACTION |
592 | #define MALLOC_FAILURE_ACTION |
593 | #endif /* MALLOC_FAILURE_ACTION */ |
594 | #ifndef MMAP_CLEARS |
595 | #ifdef _WIN32_WCE /* WINCE reportedly does not clear */ |
596 | #define MMAP_CLEARS 0 |
597 | #else |
598 | #define MMAP_CLEARS 1 |
599 | #endif /* _WIN32_WCE */ |
600 | #endif /*MMAP_CLEARS */ |
601 | #endif /* WIN32 */ |
602 | |
603 | #if defined(DARWIN) || defined(_DARWIN) |
604 | /* Mac OSX docs advise not to use sbrk; it seems better to use mmap */ |
605 | #ifndef HAVE_MORECORE |
606 | #define HAVE_MORECORE 0 |
607 | #define HAVE_MMAP 1 |
608 | /* OSX allocators provide 16 byte alignment */ |
609 | #ifndef MALLOC_ALIGNMENT |
610 | #define MALLOC_ALIGNMENT ((size_t)16U) |
611 | #endif |
612 | #endif /* HAVE_MORECORE */ |
613 | #endif /* DARWIN */ |
614 | |
615 | #ifndef LACKS_SYS_TYPES_H |
616 | #include <sys/types.h> /* For size_t */ |
617 | #endif /* LACKS_SYS_TYPES_H */ |
618 | |
619 | /* The maximum possible size_t value has all bits set */ |
620 | #define MAX_SIZE_T (~(size_t)0) |
621 | |
622 | #ifndef USE_LOCKS /* ensure true if spin or recursive locks set */ |
623 | #define USE_LOCKS ((defined(USE_SPIN_LOCKS) && USE_SPIN_LOCKS != 0) || \ |
624 | (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0)) |
625 | #endif /* USE_LOCKS */ |
626 | |
627 | #if USE_LOCKS /* Spin locks for gcc >= 4.1, older gcc on x86, MSC >= 1310 */ |
628 | #if ((defined(__GNUC__) && \ |
629 | ((__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) || \ |
630 | defined(__i386__) || defined(__x86_64__))) || \ |
631 | (defined(_MSC_VER) && _MSC_VER>=1310)) |
632 | #ifndef USE_SPIN_LOCKS |
633 | #define USE_SPIN_LOCKS 1 |
634 | #endif /* USE_SPIN_LOCKS */ |
635 | #elif USE_SPIN_LOCKS |
636 | #error "USE_SPIN_LOCKS defined without implementation" |
637 | #endif /* ... locks available... */ |
638 | #elif !defined(USE_SPIN_LOCKS) |
639 | #define USE_SPIN_LOCKS 0 |
640 | #endif /* USE_LOCKS */ |
641 | |
642 | #ifndef ONLY_MSPACES |
643 | #define ONLY_MSPACES 0 |
644 | #endif /* ONLY_MSPACES */ |
645 | #ifndef MSPACES |
646 | #if ONLY_MSPACES |
647 | #define MSPACES 1 |
648 | #else /* ONLY_MSPACES */ |
649 | #define MSPACES 0 |
650 | #endif /* ONLY_MSPACES */ |
651 | #endif /* MSPACES */ |
652 | #ifndef MALLOC_ALIGNMENT |
653 | #define MALLOC_ALIGNMENT ((size_t)(2 * sizeof(void *))) |
654 | #endif /* MALLOC_ALIGNMENT */ |
655 | #ifndef FOOTERS |
656 | #define FOOTERS 0 |
657 | #endif /* FOOTERS */ |
658 | #ifndef ABORT |
659 | #define ABORT abort() |
660 | #endif /* ABORT */ |
661 | #ifndef ABORT_ON_ASSERT_FAILURE |
662 | #define ABORT_ON_ASSERT_FAILURE 1 |
663 | #endif /* ABORT_ON_ASSERT_FAILURE */ |
664 | #ifndef PROCEED_ON_ERROR |
665 | #define PROCEED_ON_ERROR 0 |
666 | #endif /* PROCEED_ON_ERROR */ |
667 | |
668 | #ifndef INSECURE |
669 | #define INSECURE 0 |
670 | #endif /* INSECURE */ |
671 | #ifndef MALLOC_INSPECT_ALL |
672 | #define MALLOC_INSPECT_ALL 0 |
673 | #endif /* MALLOC_INSPECT_ALL */ |
674 | #ifndef HAVE_MMAP |
675 | #define HAVE_MMAP 1 |
676 | #endif /* HAVE_MMAP */ |
677 | #ifndef MMAP_CLEARS |
678 | #define MMAP_CLEARS 1 |
679 | #endif /* MMAP_CLEARS */ |
680 | #ifndef HAVE_MREMAP |
681 | #ifdef linux |
682 | #define HAVE_MREMAP 1 |
683 | #define _GNU_SOURCE /* Turns on mremap() definition */ |
684 | #else /* linux */ |
685 | #define HAVE_MREMAP 0 |
686 | #endif /* linux */ |
687 | #endif /* HAVE_MREMAP */ |
688 | #ifndef MALLOC_FAILURE_ACTION |
689 | #define MALLOC_FAILURE_ACTION errno = ENOMEM; |
690 | #endif /* MALLOC_FAILURE_ACTION */ |
691 | #ifndef HAVE_MORECORE |
692 | #if ONLY_MSPACES |
693 | #define HAVE_MORECORE 0 |
694 | #else /* ONLY_MSPACES */ |
695 | #define HAVE_MORECORE 1 |
696 | #endif /* ONLY_MSPACES */ |
697 | #endif /* HAVE_MORECORE */ |
698 | #if !HAVE_MORECORE |
699 | #define MORECORE_CONTIGUOUS 0 |
700 | #else /* !HAVE_MORECORE */ |
701 | #define MORECORE_DEFAULT sbrk |
702 | #ifndef MORECORE_CONTIGUOUS |
703 | #define MORECORE_CONTIGUOUS 1 |
704 | #endif /* MORECORE_CONTIGUOUS */ |
705 | #endif /* HAVE_MORECORE */ |
706 | #ifndef DEFAULT_GRANULARITY |
707 | #if (MORECORE_CONTIGUOUS || defined(WIN32)) |
708 | #define DEFAULT_GRANULARITY (0) /* 0 means to compute in init_mparams */ |
709 | #else /* MORECORE_CONTIGUOUS */ |
710 | #define DEFAULT_GRANULARITY ((size_t)64U * (size_t)1024U) |
711 | #endif /* MORECORE_CONTIGUOUS */ |
712 | #endif /* DEFAULT_GRANULARITY */ |
713 | #ifndef DEFAULT_TRIM_THRESHOLD |
714 | #ifndef MORECORE_CANNOT_TRIM |
715 | #define DEFAULT_TRIM_THRESHOLD ((size_t)2U * (size_t)1024U * (size_t)1024U) |
716 | #else /* MORECORE_CANNOT_TRIM */ |
717 | #define DEFAULT_TRIM_THRESHOLD MAX_SIZE_T |
718 | #endif /* MORECORE_CANNOT_TRIM */ |
719 | #endif /* DEFAULT_TRIM_THRESHOLD */ |
720 | #ifndef DEFAULT_MMAP_THRESHOLD |
721 | #if HAVE_MMAP |
722 | #define DEFAULT_MMAP_THRESHOLD ((size_t)256U * (size_t)1024U) |
723 | #else /* HAVE_MMAP */ |
724 | #define DEFAULT_MMAP_THRESHOLD MAX_SIZE_T |
725 | #endif /* HAVE_MMAP */ |
726 | #endif /* DEFAULT_MMAP_THRESHOLD */ |
727 | #ifndef MAX_RELEASE_CHECK_RATE |
728 | #if HAVE_MMAP |
729 | #define MAX_RELEASE_CHECK_RATE 4095 |
730 | #else |
731 | #define MAX_RELEASE_CHECK_RATE MAX_SIZE_T |
732 | #endif /* HAVE_MMAP */ |
733 | #endif /* MAX_RELEASE_CHECK_RATE */ |
734 | #ifndef USE_BUILTIN_FFS |
735 | #define USE_BUILTIN_FFS 0 |
736 | #endif /* USE_BUILTIN_FFS */ |
737 | #ifndef USE_DEV_RANDOM |
738 | #define USE_DEV_RANDOM 0 |
739 | #endif /* USE_DEV_RANDOM */ |
740 | #ifndef NO_MALLINFO |
741 | #define NO_MALLINFO 0 |
742 | #endif /* NO_MALLINFO */ |
743 | #ifndef MALLINFO_FIELD_TYPE |
744 | #define MALLINFO_FIELD_TYPE size_t |
745 | #endif /* MALLINFO_FIELD_TYPE */ |
746 | #ifndef NO_MALLOC_STATS |
747 | #define NO_MALLOC_STATS 0 |
748 | #endif /* NO_MALLOC_STATS */ |
749 | #ifndef NO_SEGMENT_TRAVERSAL |
750 | #define NO_SEGMENT_TRAVERSAL 0 |
751 | #endif /* NO_SEGMENT_TRAVERSAL */ |
752 | |
753 | /* |
754 | mallopt tuning options. SVID/XPG defines four standard parameter |
755 | numbers for mallopt, normally defined in malloc.h. None of these |
756 | are used in this malloc, so setting them has no effect. But this |
757 | malloc does support the following options. |
758 | */ |
759 | |
760 | #define M_TRIM_THRESHOLD (-1) |
761 | #define M_GRANULARITY (-2) |
762 | #define M_MMAP_THRESHOLD (-3) |
763 | |
764 | /* ------------------------ Mallinfo declarations ------------------------ */ |
765 | |
766 | #if !NO_MALLINFO |
767 | /* |
768 | This version of malloc supports the standard SVID/XPG mallinfo |
769 | routine that returns a struct containing usage properties and |
770 | statistics. It should work on any system that has a |
771 | /usr/include/malloc.h defining struct mallinfo. The main |
772 | declaration needed is the mallinfo struct that is returned (by-copy) |
773 | by mallinfo(). The malloinfo struct contains a bunch of fields that |
774 | are not even meaningful in this version of malloc. These fields are |
775 | are instead filled by mallinfo() with other numbers that might be of |
776 | interest. |
777 | |
778 | HAVE_USR_INCLUDE_MALLOC_H should be set if you have a |
779 | /usr/include/malloc.h file that includes a declaration of struct |
780 | mallinfo. If so, it is included; else a compliant version is |
781 | declared below. These must be precisely the same for mallinfo() to |
782 | work. The original SVID version of this struct, defined on most |
783 | systems with mallinfo, declares all fields as ints. But some others |
784 | define as unsigned long. If your system defines the fields using a |
785 | type of different width than listed here, you MUST #include your |
786 | system version and #define HAVE_USR_INCLUDE_MALLOC_H. |
787 | */ |
788 | |
789 | /* #define HAVE_USR_INCLUDE_MALLOC_H */ |
790 | |
791 | #ifdef HAVE_USR_INCLUDE_MALLOC_H |
792 | #include "/usr/include/malloc.h" |
793 | #else /* HAVE_USR_INCLUDE_MALLOC_H */ |
794 | #ifndef STRUCT_MALLINFO_DECLARED |
795 | /* HP-UX (and others?) redefines mallinfo unless _STRUCT_MALLINFO is defined */ |
796 | #define _STRUCT_MALLINFO |
797 | #define STRUCT_MALLINFO_DECLARED 1 |
798 | struct mallinfo { |
799 | MALLINFO_FIELD_TYPE arena; /* non-mmapped space allocated from system */ |
800 | MALLINFO_FIELD_TYPE ordblks; /* number of free chunks */ |
801 | MALLINFO_FIELD_TYPE smblks; /* always 0 */ |
802 | MALLINFO_FIELD_TYPE hblks; /* always 0 */ |
803 | MALLINFO_FIELD_TYPE hblkhd; /* space in mmapped regions */ |
804 | MALLINFO_FIELD_TYPE usmblks; /* maximum total allocated space */ |
805 | MALLINFO_FIELD_TYPE fsmblks; /* always 0 */ |
806 | MALLINFO_FIELD_TYPE uordblks; /* total allocated space */ |
807 | MALLINFO_FIELD_TYPE fordblks; /* total free space */ |
808 | MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */ |
809 | }; |
810 | #endif /* STRUCT_MALLINFO_DECLARED */ |
811 | #endif /* HAVE_USR_INCLUDE_MALLOC_H */ |
812 | #endif /* NO_MALLINFO */ |
813 | |
814 | /* |
815 | Try to persuade compilers to inline. The most critical functions for |
816 | inlining are defined as macros, so these aren't used for them. |
817 | */ |
818 | |
819 | #if 0 /* SDL */ |
820 | #ifndef FORCEINLINE |
821 | #if defined(__GNUC__) |
822 | #define FORCEINLINE __inline __attribute__ ((always_inline)) |
823 | #elif defined(_MSC_VER) |
824 | #define FORCEINLINE __forceinline |
825 | #endif |
826 | #endif |
827 | #endif /* SDL */ |
828 | #ifndef NOINLINE |
829 | #if defined(__GNUC__) |
830 | #define NOINLINE __attribute__ ((noinline)) |
831 | #elif defined(_MSC_VER) |
832 | #define NOINLINE __declspec(noinline) |
833 | #else |
834 | #define NOINLINE |
835 | #endif |
836 | #endif |
837 | |
838 | #ifdef __cplusplus |
839 | extern "C" { |
840 | #if 0 /* SDL */ |
841 | #ifndef FORCEINLINE |
842 | #define FORCEINLINE inline |
843 | #endif |
844 | #endif /* SDL */ |
845 | #endif /* __cplusplus */ |
846 | #if 0 /* SDL */ |
847 | #ifndef FORCEINLINE |
848 | #define FORCEINLINE |
849 | #endif |
850 | #endif /* SDL_FORCE_INLINE */ |
851 | |
852 | #if !ONLY_MSPACES |
853 | |
854 | /* ------------------- Declarations of public routines ------------------- */ |
855 | |
856 | #ifndef USE_DL_PREFIX |
857 | #define dlcalloc calloc |
858 | #define dlfree free |
859 | #define dlmalloc malloc |
860 | #define dlmemalign memalign |
861 | #define dlposix_memalign posix_memalign |
862 | #define dlrealloc realloc |
863 | #define dlrealloc_in_place realloc_in_place |
864 | #define dlvalloc valloc |
865 | #define dlpvalloc pvalloc |
866 | #define dlmallinfo mallinfo |
867 | #define dlmallopt mallopt |
868 | #define dlmalloc_trim malloc_trim |
869 | #define dlmalloc_stats malloc_stats |
870 | #define dlmalloc_usable_size malloc_usable_size |
871 | #define dlmalloc_footprint malloc_footprint |
872 | #define dlmalloc_max_footprint malloc_max_footprint |
873 | #define dlmalloc_footprint_limit malloc_footprint_limit |
874 | #define dlmalloc_set_footprint_limit malloc_set_footprint_limit |
875 | #define dlmalloc_inspect_all malloc_inspect_all |
876 | #define dlindependent_calloc independent_calloc |
877 | #define dlindependent_comalloc independent_comalloc |
878 | #define dlbulk_free bulk_free |
879 | #endif /* USE_DL_PREFIX */ |
880 | |
881 | /* |
882 | malloc(size_t n) |
883 | Returns a pointer to a newly allocated chunk of at least n bytes, or |
884 | null if no space is available, in which case errno is set to ENOMEM |
885 | on ANSI C systems. |
886 | |
887 | If n is zero, malloc returns a minimum-sized chunk. (The minimum |
888 | size is 16 bytes on most 32bit systems, and 32 bytes on 64bit |
889 | systems.) Note that size_t is an unsigned type, so calls with |
890 | arguments that would be negative if signed are interpreted as |
891 | requests for huge amounts of space, which will often fail. The |
892 | maximum supported value of n differs across systems, but is in all |
893 | cases less than the maximum representable value of a size_t. |
894 | */ |
895 | DLMALLOC_EXPORT void* dlmalloc(size_t); |
896 | |
897 | /* |
898 | free(void* p) |
899 | Releases the chunk of memory pointed to by p, that had been previously |
900 | allocated using malloc or a related routine such as realloc. |
901 | It has no effect if p is null. If p was not malloced or already |
902 | freed, free(p) will by default cause the current program to abort. |
903 | */ |
904 | DLMALLOC_EXPORT void dlfree(void*); |
905 | |
906 | /* |
907 | calloc(size_t n_elements, size_t element_size); |
908 | Returns a pointer to n_elements * element_size bytes, with all locations |
909 | set to zero. |
910 | */ |
911 | DLMALLOC_EXPORT void* dlcalloc(size_t, size_t); |
912 | |
913 | /* |
914 | realloc(void* p, size_t n) |
915 | Returns a pointer to a chunk of size n that contains the same data |
916 | as does chunk p up to the minimum of (n, p's size) bytes, or null |
917 | if no space is available. |
918 | |
919 | The returned pointer may or may not be the same as p. The algorithm |
920 | prefers extending p in most cases when possible, otherwise it |
921 | employs the equivalent of a malloc-copy-free sequence. |
922 | |
923 | If p is null, realloc is equivalent to malloc. |
924 | |
925 | If space is not available, realloc returns null, errno is set (if on |
926 | ANSI) and p is NOT freed. |
927 | |
928 | if n is for fewer bytes than already held by p, the newly unused |
929 | space is lopped off and freed if possible. realloc with a size |
930 | argument of zero (re)allocates a minimum-sized chunk. |
931 | |
932 | The old unix realloc convention of allowing the last-free'd chunk |
933 | to be used as an argument to realloc is not supported. |
934 | */ |
935 | DLMALLOC_EXPORT void* dlrealloc(void*, size_t); |
936 | |
937 | /* |
938 | realloc_in_place(void* p, size_t n) |
939 | Resizes the space allocated for p to size n, only if this can be |
940 | done without moving p (i.e., only if there is adjacent space |
941 | available if n is greater than p's current allocated size, or n is |
942 | less than or equal to p's size). This may be used instead of plain |
943 | realloc if an alternative allocation strategy is needed upon failure |
944 | to expand space; for example, reallocation of a buffer that must be |
945 | memory-aligned or cleared. You can use realloc_in_place to trigger |
946 | these alternatives only when needed. |
947 | |
948 | Returns p if successful; otherwise null. |
949 | */ |
950 | DLMALLOC_EXPORT void* dlrealloc_in_place(void*, size_t); |
951 | |
952 | /* |
953 | memalign(size_t alignment, size_t n); |
954 | Returns a pointer to a newly allocated chunk of n bytes, aligned |
955 | in accord with the alignment argument. |
956 | |
957 | The alignment argument should be a power of two. If the argument is |
958 | not a power of two, the nearest greater power is used. |
959 | 8-byte alignment is guaranteed by normal malloc calls, so don't |
960 | bother calling memalign with an argument of 8 or less. |
961 | |
962 | Overreliance on memalign is a sure way to fragment space. |
963 | */ |
964 | DLMALLOC_EXPORT void* dlmemalign(size_t, size_t); |
965 | |
966 | /* |
967 | int posix_memalign(void** pp, size_t alignment, size_t n); |
968 | Allocates a chunk of n bytes, aligned in accord with the alignment |
969 | argument. Differs from memalign only in that it (1) assigns the |
970 | allocated memory to *pp rather than returning it, (2) fails and |
971 | returns EINVAL if the alignment is not a power of two (3) fails and |
972 | returns ENOMEM if memory cannot be allocated. |
973 | */ |
974 | DLMALLOC_EXPORT int dlposix_memalign(void**, size_t, size_t); |
975 | |
976 | /* |
977 | valloc(size_t n); |
978 | Equivalent to memalign(pagesize, n), where pagesize is the page |
979 | size of the system. If the pagesize is unknown, 4096 is used. |
980 | */ |
981 | DLMALLOC_EXPORT void* dlvalloc(size_t); |
982 | |
983 | /* |
984 | mallopt(int parameter_number, int parameter_value) |
985 | Sets tunable parameters The format is to provide a |
986 | (parameter-number, parameter-value) pair. mallopt then sets the |
987 | corresponding parameter to the argument value if it can (i.e., so |
988 | long as the value is meaningful), and returns 1 if successful else |
989 | 0. To workaround the fact that mallopt is specified to use int, |
990 | not size_t parameters, the value -1 is specially treated as the |
991 | maximum unsigned size_t value. |
992 | |
993 | SVID/XPG/ANSI defines four standard param numbers for mallopt, |
994 | normally defined in malloc.h. None of these are use in this malloc, |
995 | so setting them has no effect. But this malloc also supports other |
996 | options in mallopt. See below for details. Briefly, supported |
997 | parameters are as follows (listed defaults are for "typical" |
998 | configurations). |
999 | |
1000 | Symbol param # default allowed param values |
1001 | M_TRIM_THRESHOLD -1 2*1024*1024 any (-1 disables) |
1002 | M_GRANULARITY -2 page size any power of 2 >= page size |
1003 | M_MMAP_THRESHOLD -3 256*1024 any (or 0 if no MMAP support) |
1004 | */ |
1005 | DLMALLOC_EXPORT int dlmallopt(int, int); |
1006 | |
1007 | /* |
1008 | malloc_footprint(); |
1009 | Returns the number of bytes obtained from the system. The total |
1010 | number of bytes allocated by malloc, realloc etc., is less than this |
1011 | value. Unlike mallinfo, this function returns only a precomputed |
1012 | result, so can be called frequently to monitor memory consumption. |
1013 | Even if locks are otherwise defined, this function does not use them, |
1014 | so results might not be up to date. |
1015 | */ |
1016 | DLMALLOC_EXPORT size_t dlmalloc_footprint(void); |
1017 | |
1018 | /* |
1019 | malloc_max_footprint(); |
1020 | Returns the maximum number of bytes obtained from the system. This |
1021 | value will be greater than current footprint if deallocated space |
1022 | has been reclaimed by the system. The peak number of bytes allocated |
1023 | by malloc, realloc etc., is less than this value. Unlike mallinfo, |
1024 | this function returns only a precomputed result, so can be called |
1025 | frequently to monitor memory consumption. Even if locks are |
1026 | otherwise defined, this function does not use them, so results might |
1027 | not be up to date. |
1028 | */ |
1029 | DLMALLOC_EXPORT size_t dlmalloc_max_footprint(void); |
1030 | |
1031 | /* |
1032 | malloc_footprint_limit(); |
1033 | Returns the number of bytes that the heap is allowed to obtain from |
1034 | the system, returning the last value returned by |
1035 | malloc_set_footprint_limit, or the maximum size_t value if |
1036 | never set. The returned value reflects a permission. There is no |
1037 | guarantee that this number of bytes can actually be obtained from |
1038 | the system. |
1039 | */ |
1040 | DLMALLOC_EXPORT size_t dlmalloc_footprint_limit(); |
1041 | |
1042 | /* |
1043 | malloc_set_footprint_limit(); |
1044 | Sets the maximum number of bytes to obtain from the system, causing |
1045 | failure returns from malloc and related functions upon attempts to |
1046 | exceed this value. The argument value may be subject to page |
1047 | rounding to an enforceable limit; this actual value is returned. |
1048 | Using an argument of the maximum possible size_t effectively |
1049 | disables checks. If the argument is less than or equal to the |
1050 | current malloc_footprint, then all future allocations that require |
1051 | additional system memory will fail. However, invocation cannot |
1052 | retroactively deallocate existing used memory. |
1053 | */ |
1054 | DLMALLOC_EXPORT size_t dlmalloc_set_footprint_limit(size_t bytes); |
1055 | |
1056 | #if MALLOC_INSPECT_ALL |
1057 | /* |
1058 | malloc_inspect_all(void(*handler)(void *start, |
1059 | void *end, |
1060 | size_t used_bytes, |
1061 | void* callback_arg), |
1062 | void* arg); |
1063 | Traverses the heap and calls the given handler for each managed |
1064 | region, skipping all bytes that are (or may be) used for bookkeeping |
1065 | purposes. Traversal does not include include chunks that have been |
1066 | directly memory mapped. Each reported region begins at the start |
1067 | address, and continues up to but not including the end address. The |
1068 | first used_bytes of the region contain allocated data. If |
1069 | used_bytes is zero, the region is unallocated. The handler is |
1070 | invoked with the given callback argument. If locks are defined, they |
1071 | are held during the entire traversal. It is a bad idea to invoke |
1072 | other malloc functions from within the handler. |
1073 | |
1074 | For example, to count the number of in-use chunks with size greater |
1075 | than 1000, you could write: |
1076 | static int count = 0; |
1077 | void count_chunks(void* start, void* end, size_t used, void* arg) { |
1078 | if (used >= 1000) ++count; |
1079 | } |
1080 | then: |
1081 | malloc_inspect_all(count_chunks, NULL); |
1082 | |
1083 | malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined. |
1084 | */ |
1085 | DLMALLOC_EXPORT void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*), |
1086 | void* arg); |
1087 | |
1088 | #endif /* MALLOC_INSPECT_ALL */ |
1089 | |
1090 | #if !NO_MALLINFO |
1091 | /* |
1092 | mallinfo() |
1093 | Returns (by copy) a struct containing various summary statistics: |
1094 | |
1095 | arena: current total non-mmapped bytes allocated from system |
1096 | ordblks: the number of free chunks |
1097 | smblks: always zero. |
1098 | hblks: current number of mmapped regions |
1099 | hblkhd: total bytes held in mmapped regions |
1100 | usmblks: the maximum total allocated space. This will be greater |
1101 | than current total if trimming has occurred. |
1102 | fsmblks: always zero |
1103 | uordblks: current total allocated space (normal or mmapped) |
1104 | fordblks: total free space |
1105 | keepcost: the maximum number of bytes that could ideally be released |
1106 | back to system via malloc_trim. ("ideally" means that |
1107 | it ignores page restrictions etc.) |
1108 | |
1109 | Because these fields are ints, but internal bookkeeping may |
1110 | be kept as longs, the reported values may wrap around zero and |
1111 | thus be inaccurate. |
1112 | */ |
1113 | DLMALLOC_EXPORT struct mallinfo dlmallinfo(void); |
1114 | #endif /* NO_MALLINFO */ |
1115 | |
1116 | /* |
1117 | independent_calloc(size_t n_elements, size_t element_size, void* chunks[]); |
1118 | |
1119 | independent_calloc is similar to calloc, but instead of returning a |
1120 | single cleared space, it returns an array of pointers to n_elements |
1121 | independent elements that can hold contents of size elem_size, each |
1122 | of which starts out cleared, and can be independently freed, |
1123 | realloc'ed etc. The elements are guaranteed to be adjacently |
1124 | allocated (this is not guaranteed to occur with multiple callocs or |
1125 | mallocs), which may also improve cache locality in some |
1126 | applications. |
1127 | |
1128 | The "chunks" argument is optional (i.e., may be null, which is |
1129 | probably the most typical usage). If it is null, the returned array |
1130 | is itself dynamically allocated and should also be freed when it is |
1131 | no longer needed. Otherwise, the chunks array must be of at least |
1132 | n_elements in length. It is filled in with the pointers to the |
1133 | chunks. |
1134 | |
1135 | In either case, independent_calloc returns this pointer array, or |
1136 | null if the allocation failed. If n_elements is zero and "chunks" |
1137 | is null, it returns a chunk representing an array with zero elements |
1138 | (which should be freed if not wanted). |
1139 | |
1140 | Each element must be freed when it is no longer needed. This can be |
1141 | done all at once using bulk_free. |
1142 | |
1143 | independent_calloc simplifies and speeds up implementations of many |
1144 | kinds of pools. It may also be useful when constructing large data |
1145 | structures that initially have a fixed number of fixed-sized nodes, |
1146 | but the number is not known at compile time, and some of the nodes |
1147 | may later need to be freed. For example: |
1148 | |
1149 | struct Node { int item; struct Node* next; }; |
1150 | |
1151 | struct Node* build_list() { |
1152 | struct Node** pool; |
1153 | int n = read_number_of_nodes_needed(); |
1154 | if (n <= 0) return 0; |
1155 | pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0); |
1156 | if (pool == 0) die(); |
1157 | // organize into a linked list... |
1158 | struct Node* first = pool[0]; |
1159 | for (i = 0; i < n-1; ++i) |
1160 | pool[i]->next = pool[i+1]; |
1161 | free(pool); // Can now free the array (or not, if it is needed later) |
1162 | return first; |
1163 | } |
1164 | */ |
1165 | DLMALLOC_EXPORT void** dlindependent_calloc(size_t, size_t, void**); |
1166 | |
1167 | /* |
1168 | independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]); |
1169 | |
1170 | independent_comalloc allocates, all at once, a set of n_elements |
1171 | chunks with sizes indicated in the "sizes" array. It returns |
1172 | an array of pointers to these elements, each of which can be |
1173 | independently freed, realloc'ed etc. The elements are guaranteed to |
1174 | be adjacently allocated (this is not guaranteed to occur with |
1175 | multiple callocs or mallocs), which may also improve cache locality |
1176 | in some applications. |
1177 | |
1178 | The "chunks" argument is optional (i.e., may be null). If it is null |
1179 | the returned array is itself dynamically allocated and should also |
1180 | be freed when it is no longer needed. Otherwise, the chunks array |
1181 | must be of at least n_elements in length. It is filled in with the |
1182 | pointers to the chunks. |
1183 | |
1184 | In either case, independent_comalloc returns this pointer array, or |
1185 | null if the allocation failed. If n_elements is zero and chunks is |
1186 | null, it returns a chunk representing an array with zero elements |
1187 | (which should be freed if not wanted). |
1188 | |
1189 | Each element must be freed when it is no longer needed. This can be |
1190 | done all at once using bulk_free. |
1191 | |
1192 | independent_comallac differs from independent_calloc in that each |
1193 | element may have a different size, and also that it does not |
1194 | automatically clear elements. |
1195 | |
1196 | independent_comalloc can be used to speed up allocation in cases |
1197 | where several structs or objects must always be allocated at the |
1198 | same time. For example: |
1199 | |
1200 | struct Head { ... } |
1201 | struct Foot { ... } |
1202 | |
1203 | void send_message(char* msg) { |
1204 | int msglen = strlen(msg); |
1205 | size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) }; |
1206 | void* chunks[3]; |
1207 | if (independent_comalloc(3, sizes, chunks) == 0) |
1208 | die(); |
1209 | struct Head* head = (struct Head*)(chunks[0]); |
1210 | char* body = (char*)(chunks[1]); |
1211 | struct Foot* foot = (struct Foot*)(chunks[2]); |
1212 | // ... |
1213 | } |
1214 | |
1215 | In general though, independent_comalloc is worth using only for |
1216 | larger values of n_elements. For small values, you probably won't |
1217 | detect enough difference from series of malloc calls to bother. |
1218 | |
1219 | Overuse of independent_comalloc can increase overall memory usage, |
1220 | since it cannot reuse existing noncontiguous small chunks that |
1221 | might be available for some of the elements. |
1222 | */ |
1223 | DLMALLOC_EXPORT void** dlindependent_comalloc(size_t, size_t*, void**); |
1224 | |
1225 | /* |
1226 | bulk_free(void* array[], size_t n_elements) |
1227 | Frees and clears (sets to null) each non-null pointer in the given |
1228 | array. This is likely to be faster than freeing them one-by-one. |
1229 | If footers are used, pointers that have been allocated in different |
1230 | mspaces are not freed or cleared, and the count of all such pointers |
1231 | is returned. For large arrays of pointers with poor locality, it |
1232 | may be worthwhile to sort this array before calling bulk_free. |
1233 | */ |
1234 | DLMALLOC_EXPORT size_t dlbulk_free(void**, size_t n_elements); |
1235 | |
1236 | /* |
1237 | pvalloc(size_t n); |
1238 | Equivalent to valloc(minimum-page-that-holds(n)), that is, |
1239 | round up n to nearest pagesize. |
1240 | */ |
1241 | DLMALLOC_EXPORT void* dlpvalloc(size_t); |
1242 | |
1243 | /* |
1244 | malloc_trim(size_t pad); |
1245 | |
1246 | If possible, gives memory back to the system (via negative arguments |
1247 | to sbrk) if there is unused memory at the `high' end of the malloc |
1248 | pool or in unused MMAP segments. You can call this after freeing |
1249 | large blocks of memory to potentially reduce the system-level memory |
1250 | requirements of a program. However, it cannot guarantee to reduce |
1251 | memory. Under some allocation patterns, some large free blocks of |
1252 | memory will be locked between two used chunks, so they cannot be |
1253 | given back to the system. |
1254 | |
1255 | The `pad' argument to malloc_trim represents the amount of free |
1256 | trailing space to leave untrimmed. If this argument is zero, only |
1257 | the minimum amount of memory to maintain internal data structures |
1258 | will be left. Non-zero arguments can be supplied to maintain enough |
1259 | trailing space to service future expected allocations without having |
1260 | to re-obtain memory from the system. |
1261 | |
1262 | Malloc_trim returns 1 if it actually released any memory, else 0. |
1263 | */ |
1264 | DLMALLOC_EXPORT int dlmalloc_trim(size_t); |
1265 | |
1266 | /* |
1267 | malloc_stats(); |
1268 | Prints on stderr the amount of space obtained from the system (both |
1269 | via sbrk and mmap), the maximum amount (which may be more than |
1270 | current if malloc_trim and/or munmap got called), and the current |
1271 | number of bytes allocated via malloc (or realloc, etc) but not yet |
1272 | freed. Note that this is the number of bytes allocated, not the |
1273 | number requested. It will be larger than the number requested |
1274 | because of alignment and bookkeeping overhead. Because it includes |
1275 | alignment wastage as being in use, this figure may be greater than |
1276 | zero even when no user-level chunks are allocated. |
1277 | |
1278 | The reported current and maximum system memory can be inaccurate if |
1279 | a program makes other calls to system memory allocation functions |
1280 | (normally sbrk) outside of malloc. |
1281 | |
1282 | malloc_stats prints only the most commonly interesting statistics. |
1283 | More information can be obtained by calling mallinfo. |
1284 | */ |
1285 | DLMALLOC_EXPORT void dlmalloc_stats(void); |
1286 | |
1287 | /* |
1288 | malloc_usable_size(void* p); |
1289 | |
1290 | Returns the number of bytes you can actually use in |
1291 | an allocated chunk, which may be more than you requested (although |
1292 | often not) due to alignment and minimum size constraints. |
1293 | You can use this many bytes without worrying about |
1294 | overwriting other allocated objects. This is not a particularly great |
1295 | programming practice. malloc_usable_size can be more useful in |
1296 | debugging and assertions, for example: |
1297 | |
1298 | p = malloc(n); |
1299 | assert(malloc_usable_size(p) >= 256); |
1300 | */ |
1301 | size_t dlmalloc_usable_size(void*); |
1302 | |
1303 | #endif /* ONLY_MSPACES */ |
1304 | |
1305 | #if MSPACES |
1306 | |
1307 | /* |
1308 | mspace is an opaque type representing an independent |
1309 | region of space that supports mspace_malloc, etc. |
1310 | */ |
1311 | typedef void* mspace; |
1312 | |
1313 | /* |
1314 | create_mspace creates and returns a new independent space with the |
1315 | given initial capacity, or, if 0, the default granularity size. It |
1316 | returns null if there is no system memory available to create the |
1317 | space. If argument locked is non-zero, the space uses a separate |
1318 | lock to control access. The capacity of the space will grow |
1319 | dynamically as needed to service mspace_malloc requests. You can |
1320 | control the sizes of incremental increases of this space by |
1321 | compiling with a different DEFAULT_GRANULARITY or dynamically |
1322 | setting with mallopt(M_GRANULARITY, value). |
1323 | */ |
1324 | DLMALLOC_EXPORT mspace create_mspace(size_t capacity, int locked); |
1325 | |
1326 | /* |
1327 | destroy_mspace destroys the given space, and attempts to return all |
1328 | of its memory back to the system, returning the total number of |
1329 | bytes freed. After destruction, the results of access to all memory |
1330 | used by the space become undefined. |
1331 | */ |
1332 | DLMALLOC_EXPORT size_t destroy_mspace(mspace msp); |
1333 | |
1334 | /* |
1335 | create_mspace_with_base uses the memory supplied as the initial base |
1336 | of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this |
1337 | space is used for bookkeeping, so the capacity must be at least this |
1338 | large. (Otherwise 0 is returned.) When this initial space is |
1339 | exhausted, additional memory will be obtained from the system. |
1340 | Destroying this space will deallocate all additionally allocated |
1341 | space (if possible) but not the initial base. |
1342 | */ |
1343 | DLMALLOC_EXPORT mspace create_mspace_with_base(void* base, size_t capacity, int locked); |
1344 | |
1345 | /* |
1346 | mspace_track_large_chunks controls whether requests for large chunks |
1347 | are allocated in their own untracked mmapped regions, separate from |
1348 | others in this mspace. By default large chunks are not tracked, |
1349 | which reduces fragmentation. However, such chunks are not |
1350 | necessarily released to the system upon destroy_mspace. Enabling |
1351 | tracking by setting to true may increase fragmentation, but avoids |
1352 | leakage when relying on destroy_mspace to release all memory |
1353 | allocated using this space. The function returns the previous |
1354 | setting. |
1355 | */ |
1356 | DLMALLOC_EXPORT int mspace_track_large_chunks(mspace msp, int enable); |
1357 | |
1358 | |
1359 | /* |
1360 | mspace_malloc behaves as malloc, but operates within |
1361 | the given space. |
1362 | */ |
1363 | DLMALLOC_EXPORT void* mspace_malloc(mspace msp, size_t bytes); |
1364 | |
1365 | /* |
1366 | mspace_free behaves as free, but operates within |
1367 | the given space. |
1368 | |
1369 | If compiled with FOOTERS==1, mspace_free is not actually needed. |
1370 | free may be called instead of mspace_free because freed chunks from |
1371 | any space are handled by their originating spaces. |
1372 | */ |
1373 | DLMALLOC_EXPORT void mspace_free(mspace msp, void* mem); |
1374 | |
1375 | /* |
1376 | mspace_realloc behaves as realloc, but operates within |
1377 | the given space. |
1378 | |
1379 | If compiled with FOOTERS==1, mspace_realloc is not actually |
1380 | needed. realloc may be called instead of mspace_realloc because |
1381 | realloced chunks from any space are handled by their originating |
1382 | spaces. |
1383 | */ |
1384 | DLMALLOC_EXPORT void* mspace_realloc(mspace msp, void* mem, size_t newsize); |
1385 | |
1386 | /* |
1387 | mspace_calloc behaves as calloc, but operates within |
1388 | the given space. |
1389 | */ |
1390 | DLMALLOC_EXPORT void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size); |
1391 | |
1392 | /* |
1393 | mspace_memalign behaves as memalign, but operates within |
1394 | the given space. |
1395 | */ |
1396 | DLMALLOC_EXPORT void* mspace_memalign(mspace msp, size_t alignment, size_t bytes); |
1397 | |
1398 | /* |
1399 | mspace_independent_calloc behaves as independent_calloc, but |
1400 | operates within the given space. |
1401 | */ |
1402 | DLMALLOC_EXPORT void** mspace_independent_calloc(mspace msp, size_t n_elements, |
1403 | size_t elem_size, void* chunks[]); |
1404 | |
1405 | /* |
1406 | mspace_independent_comalloc behaves as independent_comalloc, but |
1407 | operates within the given space. |
1408 | */ |
1409 | DLMALLOC_EXPORT void** mspace_independent_comalloc(mspace msp, size_t n_elements, |
1410 | size_t sizes[], void* chunks[]); |
1411 | |
1412 | /* |
1413 | mspace_footprint() returns the number of bytes obtained from the |
1414 | system for this space. |
1415 | */ |
1416 | DLMALLOC_EXPORT size_t mspace_footprint(mspace msp); |
1417 | |
1418 | /* |
1419 | mspace_max_footprint() returns the peak number of bytes obtained from the |
1420 | system for this space. |
1421 | */ |
1422 | DLMALLOC_EXPORT size_t mspace_max_footprint(mspace msp); |
1423 | |
1424 | |
1425 | #if !NO_MALLINFO |
1426 | /* |
1427 | mspace_mallinfo behaves as mallinfo, but reports properties of |
1428 | the given space. |
1429 | */ |
1430 | DLMALLOC_EXPORT struct mallinfo mspace_mallinfo(mspace msp); |
1431 | #endif /* NO_MALLINFO */ |
1432 | |
1433 | /* |
1434 | malloc_usable_size(void* p) behaves the same as malloc_usable_size; |
1435 | */ |
1436 | DLMALLOC_EXPORT size_t mspace_usable_size(const void* mem); |
1437 | |
1438 | /* |
1439 | mspace_malloc_stats behaves as malloc_stats, but reports |
1440 | properties of the given space. |
1441 | */ |
1442 | DLMALLOC_EXPORT void mspace_malloc_stats(mspace msp); |
1443 | |
1444 | /* |
1445 | mspace_trim behaves as malloc_trim, but |
1446 | operates within the given space. |
1447 | */ |
1448 | DLMALLOC_EXPORT int mspace_trim(mspace msp, size_t pad); |
1449 | |
1450 | /* |
1451 | An alias for mallopt. |
1452 | */ |
1453 | DLMALLOC_EXPORT int mspace_mallopt(int, int); |
1454 | |
1455 | #endif /* MSPACES */ |
1456 | |
1457 | #ifdef __cplusplus |
1458 | } /* end of extern "C" */ |
1459 | #endif /* __cplusplus */ |
1460 | |
1461 | /* |
1462 | ======================================================================== |
1463 | To make a fully customizable malloc.h header file, cut everything |
1464 | above this line, put into file malloc.h, edit to suit, and #include it |
1465 | on the next line, as well as in programs that use this malloc. |
1466 | ======================================================================== |
1467 | */ |
1468 | |
1469 | /* #include "malloc.h" */ |
1470 | |
1471 | /*------------------------------ internal #includes ---------------------- */ |
1472 | |
1473 | #ifdef _MSC_VER |
1474 | #pragma warning( disable : 4146 ) /* no "unsigned" warnings */ |
1475 | #endif /* _MSC_VER */ |
1476 | #if !NO_MALLOC_STATS |
1477 | #include <stdio.h> /* for printing in malloc_stats */ |
1478 | #endif /* NO_MALLOC_STATS */ |
1479 | #ifndef LACKS_ERRNO_H |
1480 | #include <errno.h> /* for MALLOC_FAILURE_ACTION */ |
1481 | #endif /* LACKS_ERRNO_H */ |
1482 | #ifdef DEBUG |
1483 | #if ABORT_ON_ASSERT_FAILURE |
1484 | #undef assert |
1485 | #define assert(x) if(!(x)) ABORT |
1486 | #else /* ABORT_ON_ASSERT_FAILURE */ |
1487 | #include <assert.h> |
1488 | #endif /* ABORT_ON_ASSERT_FAILURE */ |
1489 | #else /* DEBUG */ |
1490 | #ifndef assert |
1491 | #define assert(x) |
1492 | #endif |
1493 | #define DEBUG 0 |
1494 | #endif /* DEBUG */ |
1495 | #if !defined(WIN32) && !defined(LACKS_TIME_H) |
1496 | #include <time.h> /* for magic initialization */ |
1497 | #endif /* WIN32 */ |
1498 | #ifndef LACKS_STDLIB_H |
1499 | #include <stdlib.h> /* for abort() */ |
1500 | #endif /* LACKS_STDLIB_H */ |
1501 | #ifndef LACKS_STRING_H |
1502 | #include <string.h> /* for memset etc */ |
1503 | #endif /* LACKS_STRING_H */ |
1504 | #if USE_BUILTIN_FFS |
1505 | #ifndef LACKS_STRINGS_H |
1506 | #include <strings.h> /* for ffs */ |
1507 | #endif /* LACKS_STRINGS_H */ |
1508 | #endif /* USE_BUILTIN_FFS */ |
1509 | #if HAVE_MMAP |
1510 | #ifndef LACKS_SYS_MMAN_H |
1511 | /* On some versions of linux, mremap decl in mman.h needs __USE_GNU set */ |
1512 | #if (defined(linux) && !defined(__USE_GNU)) |
1513 | #define __USE_GNU 1 |
1514 | #include <sys/mman.h> /* for mmap */ |
1515 | #undef __USE_GNU |
1516 | #else |
1517 | #include <sys/mman.h> /* for mmap */ |
1518 | #endif /* linux */ |
1519 | #endif /* LACKS_SYS_MMAN_H */ |
1520 | #ifndef LACKS_FCNTL_H |
1521 | #include <fcntl.h> |
1522 | #endif /* LACKS_FCNTL_H */ |
1523 | #endif /* HAVE_MMAP */ |
1524 | #ifndef LACKS_UNISTD_H |
1525 | #include <unistd.h> /* for sbrk, sysconf */ |
1526 | #else /* LACKS_UNISTD_H */ |
1527 | #if !defined(__FreeBSD__) && !defined(__OpenBSD__) && !defined(__NetBSD__) |
1528 | extern void* sbrk(ptrdiff_t); |
1529 | #endif /* FreeBSD etc */ |
1530 | #endif /* LACKS_UNISTD_H */ |
1531 | |
1532 | /* Declarations for locking */ |
1533 | #if USE_LOCKS |
1534 | #ifndef WIN32 |
1535 | #if defined (__SVR4) && defined (__sun) /* solaris */ |
1536 | #include <thread.h> |
1537 | #elif !defined(LACKS_SCHED_H) |
1538 | #include <sched.h> |
1539 | #endif /* solaris or LACKS_SCHED_H */ |
1540 | #if (defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0) || !USE_SPIN_LOCKS |
1541 | #include <pthread.h> |
1542 | #endif /* USE_RECURSIVE_LOCKS ... */ |
1543 | #elif defined(_MSC_VER) |
1544 | #ifndef _M_AMD64 |
1545 | /* These are already defined on AMD64 builds */ |
1546 | #ifdef __cplusplus |
1547 | extern "C" { |
1548 | #endif /* __cplusplus */ |
1549 | LONG __cdecl _InterlockedCompareExchange(LONG volatile *Dest, LONG Exchange, LONG Comp); |
1550 | LONG __cdecl _InterlockedExchange(LONG volatile *Target, LONG Value); |
1551 | #ifdef __cplusplus |
1552 | } |
1553 | #endif /* __cplusplus */ |
1554 | #endif /* _M_AMD64 */ |
1555 | #pragma intrinsic (_InterlockedCompareExchange) |
1556 | #pragma intrinsic (_InterlockedExchange) |
1557 | #define interlockedcompareexchange _InterlockedCompareExchange |
1558 | #define interlockedexchange _InterlockedExchange |
1559 | #elif defined(WIN32) && defined(__GNUC__) |
1560 | #define interlockedcompareexchange(a, b, c) __sync_val_compare_and_swap(a, c, b) |
1561 | #define interlockedexchange __sync_lock_test_and_set |
1562 | #endif /* Win32 */ |
1563 | #else /* USE_LOCKS */ |
1564 | #endif /* USE_LOCKS */ |
1565 | |
1566 | #ifndef LOCK_AT_FORK |
1567 | #define LOCK_AT_FORK 0 |
1568 | #endif |
1569 | |
1570 | /* Declarations for bit scanning on win32 */ |
1571 | #if defined(_MSC_VER) && _MSC_VER>=1300 |
1572 | #ifndef BitScanForward /* Try to avoid pulling in WinNT.h */ |
1573 | #ifdef __cplusplus |
1574 | extern "C" { |
1575 | #endif /* __cplusplus */ |
1576 | unsigned char _BitScanForward(unsigned long *index, unsigned long mask); |
1577 | unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); |
1578 | #ifdef __cplusplus |
1579 | } |
1580 | #endif /* __cplusplus */ |
1581 | |
1582 | #define BitScanForward _BitScanForward |
1583 | #define BitScanReverse _BitScanReverse |
1584 | #pragma intrinsic(_BitScanForward) |
1585 | #pragma intrinsic(_BitScanReverse) |
1586 | #endif /* BitScanForward */ |
1587 | #endif /* defined(_MSC_VER) && _MSC_VER>=1300 */ |
1588 | |
1589 | #ifndef WIN32 |
1590 | #ifndef malloc_getpagesize |
1591 | # ifdef _SC_PAGESIZE /* some SVR4 systems omit an underscore */ |
1592 | # ifndef _SC_PAGE_SIZE |
1593 | # define _SC_PAGE_SIZE _SC_PAGESIZE |
1594 | # endif |
1595 | # endif |
1596 | # ifdef _SC_PAGE_SIZE |
1597 | # define malloc_getpagesize sysconf(_SC_PAGE_SIZE) |
1598 | # else |
1599 | # if defined(BSD) || defined(DGUX) || defined(HAVE_GETPAGESIZE) |
1600 | extern int getpagesize(); |
1601 | # define malloc_getpagesize getpagesize() |
1602 | # else |
1603 | # ifdef WIN32 /* use supplied emulation of getpagesize */ |
1604 | # define malloc_getpagesize getpagesize() |
1605 | # else |
1606 | # ifndef LACKS_SYS_PARAM_H |
1607 | # include <sys/param.h> |
1608 | # endif |
1609 | # ifdef EXEC_PAGESIZE |
1610 | # define malloc_getpagesize EXEC_PAGESIZE |
1611 | # else |
1612 | # ifdef NBPG |
1613 | # ifndef CLSIZE |
1614 | # define malloc_getpagesize NBPG |
1615 | # else |
1616 | # define malloc_getpagesize (NBPG * CLSIZE) |
1617 | # endif |
1618 | # else |
1619 | # ifdef NBPC |
1620 | # define malloc_getpagesize NBPC |
1621 | # else |
1622 | # ifdef PAGESIZE |
1623 | # define malloc_getpagesize PAGESIZE |
1624 | # else /* just guess */ |
1625 | # define malloc_getpagesize ((size_t)4096U) |
1626 | # endif |
1627 | # endif |
1628 | # endif |
1629 | # endif |
1630 | # endif |
1631 | # endif |
1632 | # endif |
1633 | #endif |
1634 | #endif |
1635 | |
1636 | /* ------------------- size_t and alignment properties -------------------- */ |
1637 | |
1638 | /* The byte and bit size of a size_t */ |
1639 | #define SIZE_T_SIZE (sizeof(size_t)) |
1640 | #define SIZE_T_BITSIZE (sizeof(size_t) << 3) |
1641 | |
1642 | /* Some constants coerced to size_t */ |
1643 | /* Annoying but necessary to avoid errors on some platforms */ |
1644 | #define SIZE_T_ZERO ((size_t)0) |
1645 | #define SIZE_T_ONE ((size_t)1) |
1646 | #define SIZE_T_TWO ((size_t)2) |
1647 | #define SIZE_T_FOUR ((size_t)4) |
1648 | #define TWO_SIZE_T_SIZES (SIZE_T_SIZE<<1) |
1649 | #define FOUR_SIZE_T_SIZES (SIZE_T_SIZE<<2) |
1650 | #define SIX_SIZE_T_SIZES (FOUR_SIZE_T_SIZES+TWO_SIZE_T_SIZES) |
1651 | #define HALF_MAX_SIZE_T (MAX_SIZE_T / 2U) |
1652 | |
1653 | /* The bit mask value corresponding to MALLOC_ALIGNMENT */ |
1654 | #define CHUNK_ALIGN_MASK (MALLOC_ALIGNMENT - SIZE_T_ONE) |
1655 | |
1656 | /* True if address a has acceptable alignment */ |
1657 | #define is_aligned(A) (((size_t)((A)) & (CHUNK_ALIGN_MASK)) == 0) |
1658 | |
1659 | /* the number of bytes to offset an address to align it */ |
1660 | #define align_offset(A)\ |
1661 | ((((size_t)(A) & CHUNK_ALIGN_MASK) == 0)? 0 :\ |
1662 | ((MALLOC_ALIGNMENT - ((size_t)(A) & CHUNK_ALIGN_MASK)) & CHUNK_ALIGN_MASK)) |
1663 | |
1664 | /* -------------------------- MMAP preliminaries ------------------------- */ |
1665 | |
1666 | /* |
1667 | If HAVE_MORECORE or HAVE_MMAP are false, we just define calls and |
1668 | checks to fail so compiler optimizer can delete code rather than |
1669 | using so many "#if"s. |
1670 | */ |
1671 | |
1672 | |
1673 | /* MORECORE and MMAP must return MFAIL on failure */ |
1674 | #define MFAIL ((void*)(MAX_SIZE_T)) |
1675 | #define CMFAIL ((char*)(MFAIL)) /* defined for convenience */ |
1676 | |
1677 | #if HAVE_MMAP |
1678 | |
1679 | #ifndef WIN32 |
1680 | #define MUNMAP_DEFAULT(a, s) munmap((a), (s)) |
1681 | #define MMAP_PROT (PROT_READ|PROT_WRITE) |
1682 | #if !defined(MAP_ANONYMOUS) && defined(MAP_ANON) |
1683 | #define MAP_ANONYMOUS MAP_ANON |
1684 | #endif /* MAP_ANON */ |
1685 | #ifdef MAP_ANONYMOUS |
1686 | #define MMAP_FLAGS (MAP_PRIVATE|MAP_ANONYMOUS) |
1687 | #define MMAP_DEFAULT(s) mmap(0, (s), MMAP_PROT, MMAP_FLAGS, -1, 0) |
1688 | #else /* MAP_ANONYMOUS */ |
1689 | /* |
1690 | Nearly all versions of mmap support MAP_ANONYMOUS, so the following |
1691 | is unlikely to be needed, but is supplied just in case. |
1692 | */ |
1693 | #define MMAP_FLAGS (MAP_PRIVATE) |
1694 | static int dev_zero_fd = -1; /* Cached file descriptor for /dev/zero. */ |
1695 | #define MMAP_DEFAULT(s) ((dev_zero_fd < 0) ? \ |
1696 | (dev_zero_fd = open("/dev/zero", O_RDWR), \ |
1697 | mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) : \ |
1698 | mmap(0, (s), MMAP_PROT, MMAP_FLAGS, dev_zero_fd, 0)) |
1699 | #endif /* MAP_ANONYMOUS */ |
1700 | |
1701 | #define DIRECT_MMAP_DEFAULT(s) MMAP_DEFAULT(s) |
1702 | |
1703 | #else /* WIN32 */ |
1704 | |
1705 | /* Win32 MMAP via VirtualAlloc */ |
1706 | SDL_FORCE_INLINE void* win32mmap(size_t size) { |
1707 | void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT, PAGE_READWRITE); |
1708 | return (ptr != 0)? ptr: MFAIL; |
1709 | } |
1710 | |
1711 | /* For direct MMAP, use MEM_TOP_DOWN to minimize interference */ |
1712 | SDL_FORCE_INLINE void* win32direct_mmap(size_t size) { |
1713 | void* ptr = VirtualAlloc(0, size, MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, |
1714 | PAGE_READWRITE); |
1715 | return (ptr != 0)? ptr: MFAIL; |
1716 | } |
1717 | |
1718 | /* This function supports releasing coalesed segments */ |
1719 | SDL_FORCE_INLINE int win32munmap(void* ptr, size_t size) { |
1720 | MEMORY_BASIC_INFORMATION minfo; |
1721 | char* cptr = (char*)ptr; |
1722 | while (size) { |
1723 | if (VirtualQuery(cptr, &minfo, sizeof(minfo)) == 0) |
1724 | return -1; |
1725 | if (minfo.BaseAddress != cptr || minfo.AllocationBase != cptr || |
1726 | minfo.State != MEM_COMMIT || minfo.RegionSize > size) |
1727 | return -1; |
1728 | if (VirtualFree(cptr, 0, MEM_RELEASE) == 0) |
1729 | return -1; |
1730 | cptr += minfo.RegionSize; |
1731 | size -= minfo.RegionSize; |
1732 | } |
1733 | return 0; |
1734 | } |
1735 | |
1736 | #define MMAP_DEFAULT(s) win32mmap(s) |
1737 | #define MUNMAP_DEFAULT(a, s) win32munmap((a), (s)) |
1738 | #define DIRECT_MMAP_DEFAULT(s) win32direct_mmap(s) |
1739 | #endif /* WIN32 */ |
1740 | #endif /* HAVE_MMAP */ |
1741 | |
1742 | #if HAVE_MREMAP |
1743 | #ifndef WIN32 |
1744 | #define MREMAP_DEFAULT(addr, osz, nsz, mv) mremap((addr), (osz), (nsz), (mv)) |
1745 | #endif /* WIN32 */ |
1746 | #endif /* HAVE_MREMAP */ |
1747 | |
1748 | /** |
1749 | * Define CALL_MORECORE |
1750 | */ |
1751 | #if HAVE_MORECORE |
1752 | #ifdef MORECORE |
1753 | #define CALL_MORECORE(S) MORECORE(S) |
1754 | #else /* MORECORE */ |
1755 | #define CALL_MORECORE(S) MORECORE_DEFAULT(S) |
1756 | #endif /* MORECORE */ |
1757 | #else /* HAVE_MORECORE */ |
1758 | #define CALL_MORECORE(S) MFAIL |
1759 | #endif /* HAVE_MORECORE */ |
1760 | |
1761 | /** |
1762 | * Define CALL_MMAP/CALL_MUNMAP/CALL_DIRECT_MMAP |
1763 | */ |
1764 | #if HAVE_MMAP |
1765 | #define USE_MMAP_BIT (SIZE_T_ONE) |
1766 | |
1767 | #ifdef MMAP |
1768 | #define CALL_MMAP(s) MMAP(s) |
1769 | #else /* MMAP */ |
1770 | #define CALL_MMAP(s) MMAP_DEFAULT(s) |
1771 | #endif /* MMAP */ |
1772 | #ifdef MUNMAP |
1773 | #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) |
1774 | #else /* MUNMAP */ |
1775 | #define CALL_MUNMAP(a, s) MUNMAP_DEFAULT((a), (s)) |
1776 | #endif /* MUNMAP */ |
1777 | #ifdef DIRECT_MMAP |
1778 | #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) |
1779 | #else /* DIRECT_MMAP */ |
1780 | #define CALL_DIRECT_MMAP(s) DIRECT_MMAP_DEFAULT(s) |
1781 | #endif /* DIRECT_MMAP */ |
1782 | #else /* HAVE_MMAP */ |
1783 | #define USE_MMAP_BIT (SIZE_T_ZERO) |
1784 | |
1785 | #define MMAP(s) MFAIL |
1786 | #define MUNMAP(a, s) (-1) |
1787 | #define DIRECT_MMAP(s) MFAIL |
1788 | #define CALL_DIRECT_MMAP(s) DIRECT_MMAP(s) |
1789 | #define CALL_MMAP(s) MMAP(s) |
1790 | #define CALL_MUNMAP(a, s) MUNMAP((a), (s)) |
1791 | #endif /* HAVE_MMAP */ |
1792 | |
1793 | /** |
1794 | * Define CALL_MREMAP |
1795 | */ |
1796 | #if HAVE_MMAP && HAVE_MREMAP |
1797 | #ifdef MREMAP |
1798 | #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP((addr), (osz), (nsz), (mv)) |
1799 | #else /* MREMAP */ |
1800 | #define CALL_MREMAP(addr, osz, nsz, mv) MREMAP_DEFAULT((addr), (osz), (nsz), (mv)) |
1801 | #endif /* MREMAP */ |
1802 | #else /* HAVE_MMAP && HAVE_MREMAP */ |
1803 | #define CALL_MREMAP(addr, osz, nsz, mv) MFAIL |
1804 | #endif /* HAVE_MMAP && HAVE_MREMAP */ |
1805 | |
1806 | /* mstate bit set if continguous morecore disabled or failed */ |
1807 | #define USE_NONCONTIGUOUS_BIT (4U) |
1808 | |
1809 | /* segment bit set in create_mspace_with_base */ |
1810 | #define EXTERN_BIT (8U) |
1811 | |
1812 | |
1813 | /* --------------------------- Lock preliminaries ------------------------ */ |
1814 | |
1815 | /* |
1816 | When locks are defined, there is one global lock, plus |
1817 | one per-mspace lock. |
1818 | |
1819 | The global lock_ensures that mparams.magic and other unique |
1820 | mparams values are initialized only once. It also protects |
1821 | sequences of calls to MORECORE. In many cases sys_alloc requires |
1822 | two calls, that should not be interleaved with calls by other |
1823 | threads. This does not protect against direct calls to MORECORE |
1824 | by other threads not using this lock, so there is still code to |
1825 | cope the best we can on interference. |
1826 | |
1827 | Per-mspace locks surround calls to malloc, free, etc. |
1828 | By default, locks are simple non-reentrant mutexes. |
1829 | |
1830 | Because lock-protected regions generally have bounded times, it is |
1831 | OK to use the supplied simple spinlocks. Spinlocks are likely to |
1832 | improve performance for lightly contended applications, but worsen |
1833 | performance under heavy contention. |
1834 | |
1835 | If USE_LOCKS is > 1, the definitions of lock routines here are |
1836 | bypassed, in which case you will need to define the type MLOCK_T, |
1837 | and at least INITIAL_LOCK, DESTROY_LOCK, ACQUIRE_LOCK, RELEASE_LOCK |
1838 | and TRY_LOCK. You must also declare a |
1839 | static MLOCK_T malloc_global_mutex = { initialization values };. |
1840 | |
1841 | */ |
1842 | |
1843 | #if !USE_LOCKS |
1844 | #define USE_LOCK_BIT (0U) |
1845 | #define INITIAL_LOCK(l) (0) |
1846 | #define DESTROY_LOCK(l) (0) |
1847 | #define ACQUIRE_MALLOC_GLOBAL_LOCK() |
1848 | #define RELEASE_MALLOC_GLOBAL_LOCK() |
1849 | |
1850 | #else |
1851 | #if USE_LOCKS > 1 |
1852 | /* ----------------------- User-defined locks ------------------------ */ |
1853 | /* Define your own lock implementation here */ |
1854 | /* #define INITIAL_LOCK(lk) ... */ |
1855 | /* #define DESTROY_LOCK(lk) ... */ |
1856 | /* #define ACQUIRE_LOCK(lk) ... */ |
1857 | /* #define RELEASE_LOCK(lk) ... */ |
1858 | /* #define TRY_LOCK(lk) ... */ |
1859 | /* static MLOCK_T malloc_global_mutex = ... */ |
1860 | |
1861 | #elif USE_SPIN_LOCKS |
1862 | |
1863 | /* First, define CAS_LOCK and CLEAR_LOCK on ints */ |
1864 | /* Note CAS_LOCK defined to return 0 on success */ |
1865 | |
1866 | #if defined(__GNUC__)&& (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) |
1867 | #define CAS_LOCK(sl) __sync_lock_test_and_set(sl, 1) |
1868 | #define CLEAR_LOCK(sl) __sync_lock_release(sl) |
1869 | |
1870 | #elif (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) |
1871 | /* Custom spin locks for older gcc on x86 */ |
1872 | SDL_FORCE_INLINE int x86_cas_lock(int *sl) { |
1873 | int ret; |
1874 | int val = 1; |
1875 | int cmp = 0; |
1876 | __asm__ __volatile__ ("lock; cmpxchgl %1, %2" |
1877 | : "=a" (ret) |
1878 | : "r" (val), "m" (*(sl)), "0" (cmp) |
1879 | : "memory" , "cc" ); |
1880 | return ret; |
1881 | } |
1882 | |
1883 | SDL_FORCE_INLINE void x86_clear_lock(int* sl) { |
1884 | assert(*sl != 0); |
1885 | int prev = 0; |
1886 | int ret; |
1887 | __asm__ __volatile__ ("lock; xchgl %0, %1" |
1888 | : "=r" (ret) |
1889 | : "m" (*(sl)), "0" (prev) |
1890 | : "memory" ); |
1891 | } |
1892 | |
1893 | #define CAS_LOCK(sl) x86_cas_lock(sl) |
1894 | #define CLEAR_LOCK(sl) x86_clear_lock(sl) |
1895 | |
1896 | #else /* Win32 MSC */ |
1897 | #define CAS_LOCK(sl) interlockedexchange(sl, (LONG)1) |
1898 | #define CLEAR_LOCK(sl) interlockedexchange (sl, (LONG)0) |
1899 | |
1900 | #endif /* ... gcc spins locks ... */ |
1901 | |
1902 | /* How to yield for a spin lock */ |
1903 | #define SPINS_PER_YIELD 63 |
1904 | #if defined(_MSC_VER) |
1905 | #define SLEEP_EX_DURATION 50 /* delay for yield/sleep */ |
1906 | #define SPIN_LOCK_YIELD SleepEx(SLEEP_EX_DURATION, FALSE) |
1907 | #elif defined (__SVR4) && defined (__sun) /* solaris */ |
1908 | #define SPIN_LOCK_YIELD thr_yield(); |
1909 | #elif !defined(LACKS_SCHED_H) |
1910 | #define SPIN_LOCK_YIELD sched_yield(); |
1911 | #else |
1912 | #define SPIN_LOCK_YIELD |
1913 | #endif /* ... yield ... */ |
1914 | |
1915 | #if !defined(USE_RECURSIVE_LOCKS) || USE_RECURSIVE_LOCKS == 0 |
1916 | /* Plain spin locks use single word (embedded in malloc_states) */ |
1917 | static int spin_acquire_lock(volatile long *sl) { |
1918 | int spins = 0; |
1919 | while (*sl != 0 || CAS_LOCK(sl)) { |
1920 | if ((++spins & SPINS_PER_YIELD) == 0) { |
1921 | SPIN_LOCK_YIELD; |
1922 | } |
1923 | } |
1924 | return 0; |
1925 | } |
1926 | |
1927 | #define MLOCK_T volatile long |
1928 | #define TRY_LOCK(sl) !CAS_LOCK(sl) |
1929 | #define RELEASE_LOCK(sl) CLEAR_LOCK(sl) |
1930 | #define ACQUIRE_LOCK(sl) (CAS_LOCK(sl)? spin_acquire_lock(sl) : 0) |
1931 | #define INITIAL_LOCK(sl) (*sl = 0) |
1932 | #define DESTROY_LOCK(sl) (0) |
1933 | static MLOCK_T malloc_global_mutex = 0; |
1934 | |
1935 | #else /* USE_RECURSIVE_LOCKS */ |
1936 | /* types for lock owners */ |
1937 | #ifdef WIN32 |
1938 | #define THREAD_ID_T DWORD |
1939 | #define CURRENT_THREAD GetCurrentThreadId() |
1940 | #define EQ_OWNER(X,Y) ((X) == (Y)) |
1941 | #else |
1942 | /* |
1943 | Note: the following assume that pthread_t is a type that can be |
1944 | initialized to (casted) zero. If this is not the case, you will need to |
1945 | somehow redefine these or not use spin locks. |
1946 | */ |
1947 | #define THREAD_ID_T pthread_t |
1948 | #define CURRENT_THREAD pthread_self() |
1949 | #define EQ_OWNER(X,Y) pthread_equal(X, Y) |
1950 | #endif |
1951 | |
1952 | struct malloc_recursive_lock { |
1953 | int sl; |
1954 | unsigned int c; |
1955 | THREAD_ID_T threadid; |
1956 | }; |
1957 | |
1958 | #define MLOCK_T struct malloc_recursive_lock |
1959 | static MLOCK_T malloc_global_mutex = { 0, 0, (THREAD_ID_T)0}; |
1960 | |
1961 | SDL_FORCE_INLINE void recursive_release_lock(MLOCK_T *lk) { |
1962 | assert(lk->sl != 0); |
1963 | if (--lk->c == 0) { |
1964 | CLEAR_LOCK(&lk->sl); |
1965 | } |
1966 | } |
1967 | |
1968 | SDL_FORCE_INLINE int recursive_acquire_lock(MLOCK_T *lk) { |
1969 | THREAD_ID_T mythreadid = CURRENT_THREAD; |
1970 | int spins = 0; |
1971 | for (;;) { |
1972 | if (*((volatile int *)(&lk->sl)) == 0) { |
1973 | if (!CAS_LOCK(&lk->sl)) { |
1974 | lk->threadid = mythreadid; |
1975 | lk->c = 1; |
1976 | return 0; |
1977 | } |
1978 | } |
1979 | else if (EQ_OWNER(lk->threadid, mythreadid)) { |
1980 | ++lk->c; |
1981 | return 0; |
1982 | } |
1983 | if ((++spins & SPINS_PER_YIELD) == 0) { |
1984 | SPIN_LOCK_YIELD; |
1985 | } |
1986 | } |
1987 | } |
1988 | |
1989 | SDL_FORCE_INLINE int recursive_try_lock(MLOCK_T *lk) { |
1990 | THREAD_ID_T mythreadid = CURRENT_THREAD; |
1991 | if (*((volatile int *)(&lk->sl)) == 0) { |
1992 | if (!CAS_LOCK(&lk->sl)) { |
1993 | lk->threadid = mythreadid; |
1994 | lk->c = 1; |
1995 | return 1; |
1996 | } |
1997 | } |
1998 | else if (EQ_OWNER(lk->threadid, mythreadid)) { |
1999 | ++lk->c; |
2000 | return 1; |
2001 | } |
2002 | return 0; |
2003 | } |
2004 | |
2005 | #define RELEASE_LOCK(lk) recursive_release_lock(lk) |
2006 | #define TRY_LOCK(lk) recursive_try_lock(lk) |
2007 | #define ACQUIRE_LOCK(lk) recursive_acquire_lock(lk) |
2008 | #define INITIAL_LOCK(lk) ((lk)->threadid = (THREAD_ID_T)0, (lk)->sl = 0, (lk)->c = 0) |
2009 | #define DESTROY_LOCK(lk) (0) |
2010 | #endif /* USE_RECURSIVE_LOCKS */ |
2011 | |
2012 | #elif defined(WIN32) /* Win32 critical sections */ |
2013 | #define MLOCK_T CRITICAL_SECTION |
2014 | #define ACQUIRE_LOCK(lk) (EnterCriticalSection(lk), 0) |
2015 | #define RELEASE_LOCK(lk) LeaveCriticalSection(lk) |
2016 | #define TRY_LOCK(lk) TryEnterCriticalSection(lk) |
2017 | #define INITIAL_LOCK(lk) (!InitializeCriticalSectionAndSpinCount((lk), 0x80000000|4000)) |
2018 | #define DESTROY_LOCK(lk) (DeleteCriticalSection(lk), 0) |
2019 | #define NEED_GLOBAL_LOCK_INIT |
2020 | |
2021 | static MLOCK_T malloc_global_mutex; |
2022 | static volatile LONG malloc_global_mutex_status; |
2023 | |
2024 | /* Use spin loop to initialize global lock */ |
2025 | static void init_malloc_global_mutex() { |
2026 | for (;;) { |
2027 | long stat = malloc_global_mutex_status; |
2028 | if (stat > 0) |
2029 | return; |
2030 | /* transition to < 0 while initializing, then to > 0) */ |
2031 | if (stat == 0 && |
2032 | interlockedcompareexchange(&malloc_global_mutex_status, (LONG)-1, (LONG)0) == 0) { |
2033 | InitializeCriticalSection(&malloc_global_mutex); |
2034 | interlockedexchange(&malloc_global_mutex_status, (LONG)1); |
2035 | return; |
2036 | } |
2037 | SleepEx(0, FALSE); |
2038 | } |
2039 | } |
2040 | |
2041 | #else /* pthreads-based locks */ |
2042 | #define MLOCK_T pthread_mutex_t |
2043 | #define ACQUIRE_LOCK(lk) pthread_mutex_lock(lk) |
2044 | #define RELEASE_LOCK(lk) pthread_mutex_unlock(lk) |
2045 | #define TRY_LOCK(lk) (!pthread_mutex_trylock(lk)) |
2046 | #define INITIAL_LOCK(lk) pthread_init_lock(lk) |
2047 | #define DESTROY_LOCK(lk) pthread_mutex_destroy(lk) |
2048 | |
2049 | #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 && defined(linux) && !defined(PTHREAD_MUTEX_RECURSIVE) |
2050 | /* Cope with old-style linux recursive lock initialization by adding */ |
2051 | /* skipped internal declaration from pthread.h */ |
2052 | extern int pthread_mutexattr_setkind_np __P ((pthread_mutexattr_t *__attr, |
2053 | int __kind)); |
2054 | #define PTHREAD_MUTEX_RECURSIVE PTHREAD_MUTEX_RECURSIVE_NP |
2055 | #define pthread_mutexattr_settype(x,y) pthread_mutexattr_setkind_np(x,y) |
2056 | #endif /* USE_RECURSIVE_LOCKS ... */ |
2057 | |
2058 | static MLOCK_T malloc_global_mutex = PTHREAD_MUTEX_INITIALIZER; |
2059 | |
2060 | static int pthread_init_lock (MLOCK_T *lk) { |
2061 | pthread_mutexattr_t attr; |
2062 | if (pthread_mutexattr_init(&attr)) return 1; |
2063 | #if defined(USE_RECURSIVE_LOCKS) && USE_RECURSIVE_LOCKS != 0 |
2064 | if (pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE)) return 1; |
2065 | #endif |
2066 | if (pthread_mutex_init(lk, &attr)) return 1; |
2067 | if (pthread_mutexattr_destroy(&attr)) return 1; |
2068 | return 0; |
2069 | } |
2070 | |
2071 | #endif /* ... lock types ... */ |
2072 | |
2073 | /* Common code for all lock types */ |
2074 | #define USE_LOCK_BIT (2U) |
2075 | |
2076 | #ifndef ACQUIRE_MALLOC_GLOBAL_LOCK |
2077 | #define ACQUIRE_MALLOC_GLOBAL_LOCK() ACQUIRE_LOCK(&malloc_global_mutex); |
2078 | #endif |
2079 | |
2080 | #ifndef RELEASE_MALLOC_GLOBAL_LOCK |
2081 | #define RELEASE_MALLOC_GLOBAL_LOCK() RELEASE_LOCK(&malloc_global_mutex); |
2082 | #endif |
2083 | |
2084 | #endif /* USE_LOCKS */ |
2085 | |
2086 | /* ----------------------- Chunk representations ------------------------ */ |
2087 | |
2088 | /* |
2089 | (The following includes lightly edited explanations by Colin Plumb.) |
2090 | |
2091 | The malloc_chunk declaration below is misleading (but accurate and |
2092 | necessary). It declares a "view" into memory allowing access to |
2093 | necessary fields at known offsets from a given base. |
2094 | |
2095 | Chunks of memory are maintained using a `boundary tag' method as |
2096 | originally described by Knuth. (See the paper by Paul Wilson |
2097 | ftp://ftp.cs.utexas.edu/pub/garbage/allocsrv.ps for a survey of such |
2098 | techniques.) Sizes of free chunks are stored both in the front of |
2099 | each chunk and at the end. This makes consolidating fragmented |
2100 | chunks into bigger chunks fast. The head fields also hold bits |
2101 | representing whether chunks are free or in use. |
2102 | |
2103 | Here are some pictures to make it clearer. They are "exploded" to |
2104 | show that the state of a chunk can be thought of as extending from |
2105 | the high 31 bits of the head field of its header through the |
2106 | prev_foot and PINUSE_BIT bit of the following chunk header. |
2107 | |
2108 | A chunk that's in use looks like: |
2109 | |
2110 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2111 | | Size of previous chunk (if P = 0) | |
2112 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2113 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
2114 | | Size of this chunk 1| +-+ |
2115 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2116 | | | |
2117 | +- -+ |
2118 | | | |
2119 | +- -+ |
2120 | | : |
2121 | +- size - sizeof(size_t) available payload bytes -+ |
2122 | : | |
2123 | chunk-> +- -+ |
2124 | | | |
2125 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2126 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |1| |
2127 | | Size of next chunk (may or may not be in use) | +-+ |
2128 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2129 | |
2130 | And if it's free, it looks like this: |
2131 | |
2132 | chunk-> +- -+ |
2133 | | User payload (must be in use, or we would have merged!) | |
2134 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2135 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |P| |
2136 | | Size of this chunk 0| +-+ |
2137 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2138 | | Next pointer | |
2139 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2140 | | Prev pointer | |
2141 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2142 | | : |
2143 | +- size - sizeof(struct chunk) unused bytes -+ |
2144 | : | |
2145 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2146 | | Size of this chunk | |
2147 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2148 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |0| |
2149 | | Size of next chunk (must be in use, or we would have merged)| +-+ |
2150 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2151 | | : |
2152 | +- User payload -+ |
2153 | : | |
2154 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2155 | |0| |
2156 | +-+ |
2157 | Note that since we always merge adjacent free chunks, the chunks |
2158 | adjacent to a free chunk must be in use. |
2159 | |
2160 | Given a pointer to a chunk (which can be derived trivially from the |
2161 | payload pointer) we can, in O(1) time, find out whether the adjacent |
2162 | chunks are free, and if so, unlink them from the lists that they |
2163 | are on and merge them with the current chunk. |
2164 | |
2165 | Chunks always begin on even word boundaries, so the mem portion |
2166 | (which is returned to the user) is also on an even word boundary, and |
2167 | thus at least double-word aligned. |
2168 | |
2169 | The P (PINUSE_BIT) bit, stored in the unused low-order bit of the |
2170 | chunk size (which is always a multiple of two words), is an in-use |
2171 | bit for the *previous* chunk. If that bit is *clear*, then the |
2172 | word before the current chunk size contains the previous chunk |
2173 | size, and can be used to find the front of the previous chunk. |
2174 | The very first chunk allocated always has this bit set, preventing |
2175 | access to non-existent (or non-owned) memory. If pinuse is set for |
2176 | any given chunk, then you CANNOT determine the size of the |
2177 | previous chunk, and might even get a memory addressing fault when |
2178 | trying to do so. |
2179 | |
2180 | The C (CINUSE_BIT) bit, stored in the unused second-lowest bit of |
2181 | the chunk size redundantly records whether the current chunk is |
2182 | inuse (unless the chunk is mmapped). This redundancy enables usage |
2183 | checks within free and realloc, and reduces indirection when freeing |
2184 | and consolidating chunks. |
2185 | |
2186 | Each freshly allocated chunk must have both cinuse and pinuse set. |
2187 | That is, each allocated chunk borders either a previously allocated |
2188 | and still in-use chunk, or the base of its memory arena. This is |
2189 | ensured by making all allocations from the `lowest' part of any |
2190 | found chunk. Further, no free chunk physically borders another one, |
2191 | so each free chunk is known to be preceded and followed by either |
2192 | inuse chunks or the ends of memory. |
2193 | |
2194 | Note that the `foot' of the current chunk is actually represented |
2195 | as the prev_foot of the NEXT chunk. This makes it easier to |
2196 | deal with alignments etc but can be very confusing when trying |
2197 | to extend or adapt this code. |
2198 | |
2199 | The exceptions to all this are |
2200 | |
2201 | 1. The special chunk `top' is the top-most available chunk (i.e., |
2202 | the one bordering the end of available memory). It is treated |
2203 | specially. Top is never included in any bin, is used only if |
2204 | no other chunk is available, and is released back to the |
2205 | system if it is very large (see M_TRIM_THRESHOLD). In effect, |
2206 | the top chunk is treated as larger (and thus less well |
2207 | fitting) than any other available chunk. The top chunk |
2208 | doesn't update its trailing size field since there is no next |
2209 | contiguous chunk that would have to index off it. However, |
2210 | space is still allocated for it (TOP_FOOT_SIZE) to enable |
2211 | separation or merging when space is extended. |
2212 | |
2213 | 3. Chunks allocated via mmap, have both cinuse and pinuse bits |
2214 | cleared in their head fields. Because they are allocated |
2215 | one-by-one, each must carry its own prev_foot field, which is |
2216 | also used to hold the offset this chunk has within its mmapped |
2217 | region, which is needed to preserve alignment. Each mmapped |
2218 | chunk is trailed by the first two fields of a fake next-chunk |
2219 | for sake of usage checks. |
2220 | |
2221 | */ |
2222 | |
2223 | struct malloc_chunk { |
2224 | size_t prev_foot; /* Size of previous chunk (if free). */ |
2225 | size_t head; /* Size and inuse bits. */ |
2226 | struct malloc_chunk* fd; /* double links -- used only if free. */ |
2227 | struct malloc_chunk* bk; |
2228 | }; |
2229 | |
2230 | typedef struct malloc_chunk mchunk; |
2231 | typedef struct malloc_chunk* mchunkptr; |
2232 | typedef struct malloc_chunk* sbinptr; /* The type of bins of chunks */ |
2233 | typedef unsigned int bindex_t; /* Described below */ |
2234 | typedef unsigned int binmap_t; /* Described below */ |
2235 | typedef unsigned int flag_t; /* The type of various bit flag sets */ |
2236 | |
2237 | /* ------------------- Chunks sizes and alignments ----------------------- */ |
2238 | |
2239 | #define MCHUNK_SIZE (sizeof(mchunk)) |
2240 | |
2241 | #if FOOTERS |
2242 | #define CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
2243 | #else /* FOOTERS */ |
2244 | #define CHUNK_OVERHEAD (SIZE_T_SIZE) |
2245 | #endif /* FOOTERS */ |
2246 | |
2247 | /* MMapped chunks need a second word of overhead ... */ |
2248 | #define MMAP_CHUNK_OVERHEAD (TWO_SIZE_T_SIZES) |
2249 | /* ... and additional padding for fake next-chunk at foot */ |
2250 | #define MMAP_FOOT_PAD (FOUR_SIZE_T_SIZES) |
2251 | |
2252 | /* The smallest size we can malloc is an aligned minimal chunk */ |
2253 | #define MIN_CHUNK_SIZE\ |
2254 | ((MCHUNK_SIZE + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
2255 | |
2256 | /* conversion from malloc headers to user pointers, and back */ |
2257 | #define chunk2mem(p) ((void*)((char*)(p) + TWO_SIZE_T_SIZES)) |
2258 | #define mem2chunk(mem) ((mchunkptr)((char*)(mem) - TWO_SIZE_T_SIZES)) |
2259 | /* chunk associated with aligned address A */ |
2260 | #define align_as_chunk(A) (mchunkptr)((A) + align_offset(chunk2mem(A))) |
2261 | |
2262 | /* Bounds on request (not chunk) sizes. */ |
2263 | #define MAX_REQUEST ((-MIN_CHUNK_SIZE) << 2) |
2264 | #define MIN_REQUEST (MIN_CHUNK_SIZE - CHUNK_OVERHEAD - SIZE_T_ONE) |
2265 | |
2266 | /* pad request bytes into a usable size */ |
2267 | #define pad_request(req) \ |
2268 | (((req) + CHUNK_OVERHEAD + CHUNK_ALIGN_MASK) & ~CHUNK_ALIGN_MASK) |
2269 | |
2270 | /* pad request, checking for minimum (but not maximum) */ |
2271 | #define request2size(req) \ |
2272 | (((req) < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(req)) |
2273 | |
2274 | |
2275 | /* ------------------ Operations on head and foot fields ----------------- */ |
2276 | |
2277 | /* |
2278 | The head field of a chunk is or'ed with PINUSE_BIT when previous |
2279 | adjacent chunk in use, and or'ed with CINUSE_BIT if this chunk is in |
2280 | use, unless mmapped, in which case both bits are cleared. |
2281 | |
2282 | FLAG4_BIT is not used by this malloc, but might be useful in extensions. |
2283 | */ |
2284 | |
2285 | #define PINUSE_BIT (SIZE_T_ONE) |
2286 | #define CINUSE_BIT (SIZE_T_TWO) |
2287 | #define FLAG4_BIT (SIZE_T_FOUR) |
2288 | #define INUSE_BITS (PINUSE_BIT|CINUSE_BIT) |
2289 | #define FLAG_BITS (PINUSE_BIT|CINUSE_BIT|FLAG4_BIT) |
2290 | |
2291 | /* Head value for fenceposts */ |
2292 | #define FENCEPOST_HEAD (INUSE_BITS|SIZE_T_SIZE) |
2293 | |
2294 | /* extraction of fields from head words */ |
2295 | #define cinuse(p) ((p)->head & CINUSE_BIT) |
2296 | #define pinuse(p) ((p)->head & PINUSE_BIT) |
2297 | #define flag4inuse(p) ((p)->head & FLAG4_BIT) |
2298 | #define is_inuse(p) (((p)->head & INUSE_BITS) != PINUSE_BIT) |
2299 | #define is_mmapped(p) (((p)->head & INUSE_BITS) == 0) |
2300 | |
2301 | #define chunksize(p) ((p)->head & ~(FLAG_BITS)) |
2302 | |
2303 | #define clear_pinuse(p) ((p)->head &= ~PINUSE_BIT) |
2304 | #define set_flag4(p) ((p)->head |= FLAG4_BIT) |
2305 | #define clear_flag4(p) ((p)->head &= ~FLAG4_BIT) |
2306 | |
2307 | /* Treat space at ptr +/- offset as a chunk */ |
2308 | #define chunk_plus_offset(p, s) ((mchunkptr)(((char*)(p)) + (s))) |
2309 | #define chunk_minus_offset(p, s) ((mchunkptr)(((char*)(p)) - (s))) |
2310 | |
2311 | /* Ptr to next or previous physical malloc_chunk. */ |
2312 | #define next_chunk(p) ((mchunkptr)( ((char*)(p)) + ((p)->head & ~FLAG_BITS))) |
2313 | #define prev_chunk(p) ((mchunkptr)( ((char*)(p)) - ((p)->prev_foot) )) |
2314 | |
2315 | /* extract next chunk's pinuse bit */ |
2316 | #define next_pinuse(p) ((next_chunk(p)->head) & PINUSE_BIT) |
2317 | |
2318 | /* Get/set size at footer */ |
2319 | #define get_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot) |
2320 | #define set_foot(p, s) (((mchunkptr)((char*)(p) + (s)))->prev_foot = (s)) |
2321 | |
2322 | /* Set size, pinuse bit, and foot */ |
2323 | #define set_size_and_pinuse_of_free_chunk(p, s)\ |
2324 | ((p)->head = (s|PINUSE_BIT), set_foot(p, s)) |
2325 | |
2326 | /* Set size, pinuse bit, foot, and clear next pinuse */ |
2327 | #define set_free_with_pinuse(p, s, n)\ |
2328 | (clear_pinuse(n), set_size_and_pinuse_of_free_chunk(p, s)) |
2329 | |
2330 | /* Get the internal overhead associated with chunk p */ |
2331 | #define overhead_for(p)\ |
2332 | (is_mmapped(p)? MMAP_CHUNK_OVERHEAD : CHUNK_OVERHEAD) |
2333 | |
2334 | /* Return true if malloced space is not necessarily cleared */ |
2335 | #if MMAP_CLEARS |
2336 | #define calloc_must_clear(p) (!is_mmapped(p)) |
2337 | #else /* MMAP_CLEARS */ |
2338 | #define calloc_must_clear(p) (1) |
2339 | #endif /* MMAP_CLEARS */ |
2340 | |
2341 | /* ---------------------- Overlaid data structures ----------------------- */ |
2342 | |
2343 | /* |
2344 | When chunks are not in use, they are treated as nodes of either |
2345 | lists or trees. |
2346 | |
2347 | "Small" chunks are stored in circular doubly-linked lists, and look |
2348 | like this: |
2349 | |
2350 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2351 | | Size of previous chunk | |
2352 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2353 | `head:' | Size of chunk, in bytes |P| |
2354 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2355 | | Forward pointer to next chunk in list | |
2356 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2357 | | Back pointer to previous chunk in list | |
2358 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2359 | | Unused space (may be 0 bytes long) . |
2360 | . . |
2361 | . | |
2362 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2363 | `foot:' | Size of chunk, in bytes | |
2364 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2365 | |
2366 | Larger chunks are kept in a form of bitwise digital trees (aka |
2367 | tries) keyed on chunksizes. Because malloc_tree_chunks are only for |
2368 | free chunks greater than 256 bytes, their size doesn't impose any |
2369 | constraints on user chunk sizes. Each node looks like: |
2370 | |
2371 | chunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2372 | | Size of previous chunk | |
2373 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2374 | `head:' | Size of chunk, in bytes |P| |
2375 | mem-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2376 | | Forward pointer to next chunk of same size | |
2377 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2378 | | Back pointer to previous chunk of same size | |
2379 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2380 | | Pointer to left child (child[0]) | |
2381 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2382 | | Pointer to right child (child[1]) | |
2383 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2384 | | Pointer to parent | |
2385 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2386 | | bin index of this chunk | |
2387 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2388 | | Unused space . |
2389 | . | |
2390 | nextchunk-> +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2391 | `foot:' | Size of chunk, in bytes | |
2392 | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
2393 | |
2394 | Each tree holding treenodes is a tree of unique chunk sizes. Chunks |
2395 | of the same size are arranged in a circularly-linked list, with only |
2396 | the oldest chunk (the next to be used, in our FIFO ordering) |
2397 | actually in the tree. (Tree members are distinguished by a non-null |
2398 | parent pointer.) If a chunk with the same size an an existing node |
2399 | is inserted, it is linked off the existing node using pointers that |
2400 | work in the same way as fd/bk pointers of small chunks. |
2401 | |
2402 | Each tree contains a power of 2 sized range of chunk sizes (the |
2403 | smallest is 0x100 <= x < 0x180), which is is divided in half at each |
2404 | tree level, with the chunks in the smaller half of the range (0x100 |
2405 | <= x < 0x140 for the top nose) in the left subtree and the larger |
2406 | half (0x140 <= x < 0x180) in the right subtree. This is, of course, |
2407 | done by inspecting individual bits. |
2408 | |
2409 | Using these rules, each node's left subtree contains all smaller |
2410 | sizes than its right subtree. However, the node at the root of each |
2411 | subtree has no particular ordering relationship to either. (The |
2412 | dividing line between the subtree sizes is based on trie relation.) |
2413 | If we remove the last chunk of a given size from the interior of the |
2414 | tree, we need to replace it with a leaf node. The tree ordering |
2415 | rules permit a node to be replaced by any leaf below it. |
2416 | |
2417 | The smallest chunk in a tree (a common operation in a best-fit |
2418 | allocator) can be found by walking a path to the leftmost leaf in |
2419 | the tree. Unlike a usual binary tree, where we follow left child |
2420 | pointers until we reach a null, here we follow the right child |
2421 | pointer any time the left one is null, until we reach a leaf with |
2422 | both child pointers null. The smallest chunk in the tree will be |
2423 | somewhere along that path. |
2424 | |
2425 | The worst case number of steps to add, find, or remove a node is |
2426 | bounded by the number of bits differentiating chunks within |
2427 | bins. Under current bin calculations, this ranges from 6 up to 21 |
2428 | (for 32 bit sizes) or up to 53 (for 64 bit sizes). The typical case |
2429 | is of course much better. |
2430 | */ |
2431 | |
2432 | struct malloc_tree_chunk { |
2433 | /* The first four fields must be compatible with malloc_chunk */ |
2434 | size_t prev_foot; |
2435 | size_t head; |
2436 | struct malloc_tree_chunk* fd; |
2437 | struct malloc_tree_chunk* bk; |
2438 | |
2439 | struct malloc_tree_chunk* child[2]; |
2440 | struct malloc_tree_chunk* parent; |
2441 | bindex_t index; |
2442 | }; |
2443 | |
2444 | typedef struct malloc_tree_chunk tchunk; |
2445 | typedef struct malloc_tree_chunk* tchunkptr; |
2446 | typedef struct malloc_tree_chunk* tbinptr; /* The type of bins of trees */ |
2447 | |
2448 | /* A little helper macro for trees */ |
2449 | #define leftmost_child(t) ((t)->child[0] != 0? (t)->child[0] : (t)->child[1]) |
2450 | |
2451 | /* ----------------------------- Segments -------------------------------- */ |
2452 | |
2453 | /* |
2454 | Each malloc space may include non-contiguous segments, held in a |
2455 | list headed by an embedded malloc_segment record representing the |
2456 | top-most space. Segments also include flags holding properties of |
2457 | the space. Large chunks that are directly allocated by mmap are not |
2458 | included in this list. They are instead independently created and |
2459 | destroyed without otherwise keeping track of them. |
2460 | |
2461 | Segment management mainly comes into play for spaces allocated by |
2462 | MMAP. Any call to MMAP might or might not return memory that is |
2463 | adjacent to an existing segment. MORECORE normally contiguously |
2464 | extends the current space, so this space is almost always adjacent, |
2465 | which is simpler and faster to deal with. (This is why MORECORE is |
2466 | used preferentially to MMAP when both are available -- see |
2467 | sys_alloc.) When allocating using MMAP, we don't use any of the |
2468 | hinting mechanisms (inconsistently) supported in various |
2469 | implementations of unix mmap, or distinguish reserving from |
2470 | committing memory. Instead, we just ask for space, and exploit |
2471 | contiguity when we get it. It is probably possible to do |
2472 | better than this on some systems, but no general scheme seems |
2473 | to be significantly better. |
2474 | |
2475 | Management entails a simpler variant of the consolidation scheme |
2476 | used for chunks to reduce fragmentation -- new adjacent memory is |
2477 | normally prepended or appended to an existing segment. However, |
2478 | there are limitations compared to chunk consolidation that mostly |
2479 | reflect the fact that segment processing is relatively infrequent |
2480 | (occurring only when getting memory from system) and that we |
2481 | don't expect to have huge numbers of segments: |
2482 | |
2483 | * Segments are not indexed, so traversal requires linear scans. (It |
2484 | would be possible to index these, but is not worth the extra |
2485 | overhead and complexity for most programs on most platforms.) |
2486 | * New segments are only appended to old ones when holding top-most |
2487 | memory; if they cannot be prepended to others, they are held in |
2488 | different segments. |
2489 | |
2490 | Except for the top-most segment of an mstate, each segment record |
2491 | is kept at the tail of its segment. Segments are added by pushing |
2492 | segment records onto the list headed by &mstate.seg for the |
2493 | containing mstate. |
2494 | |
2495 | Segment flags control allocation/merge/deallocation policies: |
2496 | * If EXTERN_BIT set, then we did not allocate this segment, |
2497 | and so should not try to deallocate or merge with others. |
2498 | (This currently holds only for the initial segment passed |
2499 | into create_mspace_with_base.) |
2500 | * If USE_MMAP_BIT set, the segment may be merged with |
2501 | other surrounding mmapped segments and trimmed/de-allocated |
2502 | using munmap. |
2503 | * If neither bit is set, then the segment was obtained using |
2504 | MORECORE so can be merged with surrounding MORECORE'd segments |
2505 | and deallocated/trimmed using MORECORE with negative arguments. |
2506 | */ |
2507 | |
2508 | struct malloc_segment { |
2509 | char* base; /* base address */ |
2510 | size_t size; /* allocated size */ |
2511 | struct malloc_segment* next; /* ptr to next segment */ |
2512 | flag_t sflags; /* mmap and extern flag */ |
2513 | }; |
2514 | |
2515 | #define is_mmapped_segment(S) ((S)->sflags & USE_MMAP_BIT) |
2516 | #define is_extern_segment(S) ((S)->sflags & EXTERN_BIT) |
2517 | |
2518 | typedef struct malloc_segment msegment; |
2519 | typedef struct malloc_segment* msegmentptr; |
2520 | |
2521 | /* ---------------------------- malloc_state ----------------------------- */ |
2522 | |
2523 | /* |
2524 | A malloc_state holds all of the bookkeeping for a space. |
2525 | The main fields are: |
2526 | |
2527 | Top |
2528 | The topmost chunk of the currently active segment. Its size is |
2529 | cached in topsize. The actual size of topmost space is |
2530 | topsize+TOP_FOOT_SIZE, which includes space reserved for adding |
2531 | fenceposts and segment records if necessary when getting more |
2532 | space from the system. The size at which to autotrim top is |
2533 | cached from mparams in trim_check, except that it is disabled if |
2534 | an autotrim fails. |
2535 | |
2536 | Designated victim (dv) |
2537 | This is the preferred chunk for servicing small requests that |
2538 | don't have exact fits. It is normally the chunk split off most |
2539 | recently to service another small request. Its size is cached in |
2540 | dvsize. The link fields of this chunk are not maintained since it |
2541 | is not kept in a bin. |
2542 | |
2543 | SmallBins |
2544 | An array of bin headers for free chunks. These bins hold chunks |
2545 | with sizes less than MIN_LARGE_SIZE bytes. Each bin contains |
2546 | chunks of all the same size, spaced 8 bytes apart. To simplify |
2547 | use in double-linked lists, each bin header acts as a malloc_chunk |
2548 | pointing to the real first node, if it exists (else pointing to |
2549 | itself). This avoids special-casing for headers. But to avoid |
2550 | waste, we allocate only the fd/bk pointers of bins, and then use |
2551 | repositioning tricks to treat these as the fields of a chunk. |
2552 | |
2553 | TreeBins |
2554 | Treebins are pointers to the roots of trees holding a range of |
2555 | sizes. There are 2 equally spaced treebins for each power of two |
2556 | from TREE_SHIFT to TREE_SHIFT+16. The last bin holds anything |
2557 | larger. |
2558 | |
2559 | Bin maps |
2560 | There is one bit map for small bins ("smallmap") and one for |
2561 | treebins ("treemap). Each bin sets its bit when non-empty, and |
2562 | clears the bit when empty. Bit operations are then used to avoid |
2563 | bin-by-bin searching -- nearly all "search" is done without ever |
2564 | looking at bins that won't be selected. The bit maps |
2565 | conservatively use 32 bits per map word, even if on 64bit system. |
2566 | For a good description of some of the bit-based techniques used |
2567 | here, see Henry S. Warren Jr's book "Hacker's Delight" (and |
2568 | supplement at http://hackersdelight.org/). Many of these are |
2569 | intended to reduce the branchiness of paths through malloc etc, as |
2570 | well as to reduce the number of memory locations read or written. |
2571 | |
2572 | Segments |
2573 | A list of segments headed by an embedded malloc_segment record |
2574 | representing the initial space. |
2575 | |
2576 | Address check support |
2577 | The least_addr field is the least address ever obtained from |
2578 | MORECORE or MMAP. Attempted frees and reallocs of any address less |
2579 | than this are trapped (unless INSECURE is defined). |
2580 | |
2581 | Magic tag |
2582 | A cross-check field that should always hold same value as mparams.magic. |
2583 | |
2584 | Max allowed footprint |
2585 | The maximum allowed bytes to allocate from system (zero means no limit) |
2586 | |
2587 | Flags |
2588 | Bits recording whether to use MMAP, locks, or contiguous MORECORE |
2589 | |
2590 | Statistics |
2591 | Each space keeps track of current and maximum system memory |
2592 | obtained via MORECORE or MMAP. |
2593 | |
2594 | Trim support |
2595 | Fields holding the amount of unused topmost memory that should trigger |
2596 | trimming, and a counter to force periodic scanning to release unused |
2597 | non-topmost segments. |
2598 | |
2599 | Locking |
2600 | If USE_LOCKS is defined, the "mutex" lock is acquired and released |
2601 | around every public call using this mspace. |
2602 | |
2603 | Extension support |
2604 | A void* pointer and a size_t field that can be used to help implement |
2605 | extensions to this malloc. |
2606 | */ |
2607 | |
2608 | /* Bin types, widths and sizes */ |
2609 | #define NSMALLBINS (32U) |
2610 | #define NTREEBINS (32U) |
2611 | #define SMALLBIN_SHIFT (3U) |
2612 | #define SMALLBIN_WIDTH (SIZE_T_ONE << SMALLBIN_SHIFT) |
2613 | #define TREEBIN_SHIFT (8U) |
2614 | #define MIN_LARGE_SIZE (SIZE_T_ONE << TREEBIN_SHIFT) |
2615 | #define MAX_SMALL_SIZE (MIN_LARGE_SIZE - SIZE_T_ONE) |
2616 | #define MAX_SMALL_REQUEST (MAX_SMALL_SIZE - CHUNK_ALIGN_MASK - CHUNK_OVERHEAD) |
2617 | |
2618 | struct malloc_state { |
2619 | binmap_t smallmap; |
2620 | binmap_t treemap; |
2621 | size_t dvsize; |
2622 | size_t topsize; |
2623 | char* least_addr; |
2624 | mchunkptr dv; |
2625 | mchunkptr top; |
2626 | size_t trim_check; |
2627 | size_t release_checks; |
2628 | size_t magic; |
2629 | mchunkptr smallbins[(NSMALLBINS+1)*2]; |
2630 | tbinptr treebins[NTREEBINS]; |
2631 | size_t footprint; |
2632 | size_t max_footprint; |
2633 | size_t footprint_limit; /* zero means no limit */ |
2634 | flag_t mflags; |
2635 | #if USE_LOCKS |
2636 | MLOCK_T mutex; /* locate lock among fields that rarely change */ |
2637 | #endif /* USE_LOCKS */ |
2638 | msegment seg; |
2639 | void* extp; /* Unused but available for extensions */ |
2640 | size_t exts; |
2641 | }; |
2642 | |
2643 | typedef struct malloc_state* mstate; |
2644 | |
2645 | /* ------------- Global malloc_state and malloc_params ------------------- */ |
2646 | |
2647 | /* |
2648 | malloc_params holds global properties, including those that can be |
2649 | dynamically set using mallopt. There is a single instance, mparams, |
2650 | initialized in init_mparams. Note that the non-zeroness of "magic" |
2651 | also serves as an initialization flag. |
2652 | */ |
2653 | |
2654 | struct malloc_params { |
2655 | size_t magic; |
2656 | size_t page_size; |
2657 | size_t granularity; |
2658 | size_t mmap_threshold; |
2659 | size_t trim_threshold; |
2660 | flag_t default_mflags; |
2661 | }; |
2662 | |
2663 | static struct malloc_params mparams; |
2664 | |
2665 | /* Ensure mparams initialized */ |
2666 | #define ensure_initialization() (void)(mparams.magic != 0 || init_mparams()) |
2667 | |
2668 | #if !ONLY_MSPACES |
2669 | |
2670 | /* The global malloc_state used for all non-"mspace" calls */ |
2671 | static struct malloc_state _gm_; |
2672 | #define gm (&_gm_) |
2673 | #define is_global(M) ((M) == &_gm_) |
2674 | |
2675 | #endif /* !ONLY_MSPACES */ |
2676 | |
2677 | #define is_initialized(M) ((M)->top != 0) |
2678 | |
2679 | /* -------------------------- system alloc setup ------------------------- */ |
2680 | |
2681 | /* Operations on mflags */ |
2682 | |
2683 | #define use_lock(M) ((M)->mflags & USE_LOCK_BIT) |
2684 | #define enable_lock(M) ((M)->mflags |= USE_LOCK_BIT) |
2685 | #if USE_LOCKS |
2686 | #define disable_lock(M) ((M)->mflags &= ~USE_LOCK_BIT) |
2687 | #else |
2688 | #define disable_lock(M) |
2689 | #endif |
2690 | |
2691 | #define use_mmap(M) ((M)->mflags & USE_MMAP_BIT) |
2692 | #define enable_mmap(M) ((M)->mflags |= USE_MMAP_BIT) |
2693 | #if HAVE_MMAP |
2694 | #define disable_mmap(M) ((M)->mflags &= ~USE_MMAP_BIT) |
2695 | #else |
2696 | #define disable_mmap(M) |
2697 | #endif |
2698 | |
2699 | #define use_noncontiguous(M) ((M)->mflags & USE_NONCONTIGUOUS_BIT) |
2700 | #define disable_contiguous(M) ((M)->mflags |= USE_NONCONTIGUOUS_BIT) |
2701 | |
2702 | #define set_lock(M,L)\ |
2703 | ((M)->mflags = (L)?\ |
2704 | ((M)->mflags | USE_LOCK_BIT) :\ |
2705 | ((M)->mflags & ~USE_LOCK_BIT)) |
2706 | |
2707 | /* page-align a size */ |
2708 | #define page_align(S)\ |
2709 | (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE)) |
2710 | |
2711 | /* granularity-align a size */ |
2712 | #define granularity_align(S)\ |
2713 | (((S) + (mparams.granularity - SIZE_T_ONE))\ |
2714 | & ~(mparams.granularity - SIZE_T_ONE)) |
2715 | |
2716 | |
2717 | /* For mmap, use granularity alignment on windows, else page-align */ |
2718 | #ifdef WIN32 |
2719 | #define mmap_align(S) granularity_align(S) |
2720 | #else |
2721 | #define mmap_align(S) page_align(S) |
2722 | #endif |
2723 | |
2724 | /* For sys_alloc, enough padding to ensure can malloc request on success */ |
2725 | #define SYS_ALLOC_PADDING (TOP_FOOT_SIZE + MALLOC_ALIGNMENT) |
2726 | |
2727 | #define is_page_aligned(S)\ |
2728 | (((size_t)(S) & (mparams.page_size - SIZE_T_ONE)) == 0) |
2729 | #define is_granularity_aligned(S)\ |
2730 | (((size_t)(S) & (mparams.granularity - SIZE_T_ONE)) == 0) |
2731 | |
2732 | /* True if segment S holds address A */ |
2733 | #define segment_holds(S, A)\ |
2734 | ((char*)(A) >= S->base && (char*)(A) < S->base + S->size) |
2735 | |
2736 | /* Return segment holding given address */ |
2737 | static msegmentptr segment_holding(mstate m, char* addr) { |
2738 | msegmentptr sp = &m->seg; |
2739 | for (;;) { |
2740 | if (addr >= sp->base && addr < sp->base + sp->size) |
2741 | return sp; |
2742 | if ((sp = sp->next) == 0) |
2743 | return 0; |
2744 | } |
2745 | } |
2746 | |
2747 | /* Return true if segment contains a segment link */ |
2748 | static int has_segment_link(mstate m, msegmentptr ss) { |
2749 | msegmentptr sp = &m->seg; |
2750 | for (;;) { |
2751 | if ((char*)sp >= ss->base && (char*)sp < ss->base + ss->size) |
2752 | return 1; |
2753 | if ((sp = sp->next) == 0) |
2754 | return 0; |
2755 | } |
2756 | } |
2757 | |
2758 | #ifndef MORECORE_CANNOT_TRIM |
2759 | #define should_trim(M,s) ((s) > (M)->trim_check) |
2760 | #else /* MORECORE_CANNOT_TRIM */ |
2761 | #define should_trim(M,s) (0) |
2762 | #endif /* MORECORE_CANNOT_TRIM */ |
2763 | |
2764 | /* |
2765 | TOP_FOOT_SIZE is padding at the end of a segment, including space |
2766 | that may be needed to place segment records and fenceposts when new |
2767 | noncontiguous segments are added. |
2768 | */ |
2769 | #define TOP_FOOT_SIZE\ |
2770 | (align_offset(chunk2mem(0))+pad_request(sizeof(struct malloc_segment))+MIN_CHUNK_SIZE) |
2771 | |
2772 | |
2773 | /* ------------------------------- Hooks -------------------------------- */ |
2774 | |
2775 | /* |
2776 | PREACTION should be defined to return 0 on success, and nonzero on |
2777 | failure. If you are not using locking, you can redefine these to do |
2778 | anything you like. |
2779 | */ |
2780 | |
2781 | #if USE_LOCKS |
2782 | #define PREACTION(M) ((use_lock(M))? ACQUIRE_LOCK(&(M)->mutex) : 0) |
2783 | #define POSTACTION(M) { if (use_lock(M)) RELEASE_LOCK(&(M)->mutex); } |
2784 | #else /* USE_LOCKS */ |
2785 | |
2786 | #ifndef PREACTION |
2787 | #define PREACTION(M) (0) |
2788 | #endif /* PREACTION */ |
2789 | |
2790 | #ifndef POSTACTION |
2791 | #define POSTACTION(M) |
2792 | #endif /* POSTACTION */ |
2793 | |
2794 | #endif /* USE_LOCKS */ |
2795 | |
2796 | /* |
2797 | CORRUPTION_ERROR_ACTION is triggered upon detected bad addresses. |
2798 | USAGE_ERROR_ACTION is triggered on detected bad frees and |
2799 | reallocs. The argument p is an address that might have triggered the |
2800 | fault. It is ignored by the two predefined actions, but might be |
2801 | useful in custom actions that try to help diagnose errors. |
2802 | */ |
2803 | |
2804 | #if PROCEED_ON_ERROR |
2805 | |
2806 | /* A count of the number of corruption errors causing resets */ |
2807 | int malloc_corruption_error_count; |
2808 | |
2809 | /* default corruption action */ |
2810 | static void reset_on_error(mstate m); |
2811 | |
2812 | #define CORRUPTION_ERROR_ACTION(m) reset_on_error(m) |
2813 | #define USAGE_ERROR_ACTION(m, p) |
2814 | |
2815 | #else /* PROCEED_ON_ERROR */ |
2816 | |
2817 | #ifndef CORRUPTION_ERROR_ACTION |
2818 | #define CORRUPTION_ERROR_ACTION(m) ABORT |
2819 | #endif /* CORRUPTION_ERROR_ACTION */ |
2820 | |
2821 | #ifndef USAGE_ERROR_ACTION |
2822 | #define USAGE_ERROR_ACTION(m,p) ABORT |
2823 | #endif /* USAGE_ERROR_ACTION */ |
2824 | |
2825 | #endif /* PROCEED_ON_ERROR */ |
2826 | |
2827 | |
2828 | /* -------------------------- Debugging setup ---------------------------- */ |
2829 | |
2830 | #if ! DEBUG |
2831 | |
2832 | #define check_free_chunk(M,P) |
2833 | #define check_inuse_chunk(M,P) |
2834 | #define check_malloced_chunk(M,P,N) |
2835 | #define check_mmapped_chunk(M,P) |
2836 | #define check_malloc_state(M) |
2837 | #define check_top_chunk(M,P) |
2838 | |
2839 | #else /* DEBUG */ |
2840 | #define check_free_chunk(M,P) do_check_free_chunk(M,P) |
2841 | #define check_inuse_chunk(M,P) do_check_inuse_chunk(M,P) |
2842 | #define check_top_chunk(M,P) do_check_top_chunk(M,P) |
2843 | #define check_malloced_chunk(M,P,N) do_check_malloced_chunk(M,P,N) |
2844 | #define check_mmapped_chunk(M,P) do_check_mmapped_chunk(M,P) |
2845 | #define check_malloc_state(M) do_check_malloc_state(M) |
2846 | |
2847 | static void do_check_any_chunk(mstate m, mchunkptr p); |
2848 | static void do_check_top_chunk(mstate m, mchunkptr p); |
2849 | static void do_check_mmapped_chunk(mstate m, mchunkptr p); |
2850 | static void do_check_inuse_chunk(mstate m, mchunkptr p); |
2851 | static void do_check_free_chunk(mstate m, mchunkptr p); |
2852 | static void do_check_malloced_chunk(mstate m, void* mem, size_t s); |
2853 | static void do_check_tree(mstate m, tchunkptr t); |
2854 | static void do_check_treebin(mstate m, bindex_t i); |
2855 | static void do_check_smallbin(mstate m, bindex_t i); |
2856 | static void do_check_malloc_state(mstate m); |
2857 | static int bin_find(mstate m, mchunkptr x); |
2858 | static size_t traverse_and_check(mstate m); |
2859 | #endif /* DEBUG */ |
2860 | |
2861 | /* ---------------------------- Indexing Bins ---------------------------- */ |
2862 | |
2863 | #define is_small(s) (((s) >> SMALLBIN_SHIFT) < NSMALLBINS) |
2864 | #define small_index(s) (bindex_t)((s) >> SMALLBIN_SHIFT) |
2865 | #define small_index2size(i) ((i) << SMALLBIN_SHIFT) |
2866 | #define MIN_SMALL_INDEX (small_index(MIN_CHUNK_SIZE)) |
2867 | |
2868 | /* addressing by index. See above about smallbin repositioning */ |
2869 | #define smallbin_at(M, i) ((sbinptr)((char*)&((M)->smallbins[(i)<<1]))) |
2870 | #define treebin_at(M,i) (&((M)->treebins[i])) |
2871 | |
2872 | /* assign tree index for size S to variable I. Use x86 asm if possible */ |
2873 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) |
2874 | #define compute_tree_index(S, I)\ |
2875 | {\ |
2876 | unsigned int X = S >> TREEBIN_SHIFT;\ |
2877 | if (X == 0)\ |
2878 | I = 0;\ |
2879 | else if (X > 0xFFFF)\ |
2880 | I = NTREEBINS-1;\ |
2881 | else {\ |
2882 | unsigned int K = (unsigned) sizeof(X)*__CHAR_BIT__ - 1 - (unsigned) __builtin_clz(X); \ |
2883 | I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
2884 | }\ |
2885 | } |
2886 | |
2887 | #elif defined (__INTEL_COMPILER) |
2888 | #define compute_tree_index(S, I)\ |
2889 | {\ |
2890 | size_t X = S >> TREEBIN_SHIFT;\ |
2891 | if (X == 0)\ |
2892 | I = 0;\ |
2893 | else if (X > 0xFFFF)\ |
2894 | I = NTREEBINS-1;\ |
2895 | else {\ |
2896 | unsigned int K = _bit_scan_reverse (X); \ |
2897 | I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
2898 | }\ |
2899 | } |
2900 | |
2901 | #elif defined(_MSC_VER) && _MSC_VER>=1300 |
2902 | #define compute_tree_index(S, I)\ |
2903 | {\ |
2904 | size_t X = S >> TREEBIN_SHIFT;\ |
2905 | if (X == 0)\ |
2906 | I = 0;\ |
2907 | else if (X > 0xFFFF)\ |
2908 | I = NTREEBINS-1;\ |
2909 | else {\ |
2910 | unsigned int K;\ |
2911 | _BitScanReverse((DWORD *) &K, (DWORD) X);\ |
2912 | I = (bindex_t)((K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1)));\ |
2913 | }\ |
2914 | } |
2915 | |
2916 | #else /* GNUC */ |
2917 | #define compute_tree_index(S, I)\ |
2918 | {\ |
2919 | size_t X = S >> TREEBIN_SHIFT;\ |
2920 | if (X == 0)\ |
2921 | I = 0;\ |
2922 | else if (X > 0xFFFF)\ |
2923 | I = NTREEBINS-1;\ |
2924 | else {\ |
2925 | unsigned int Y = (unsigned int)X;\ |
2926 | unsigned int N = ((Y - 0x100) >> 16) & 8;\ |
2927 | unsigned int K = (((Y <<= N) - 0x1000) >> 16) & 4;\ |
2928 | N += K;\ |
2929 | N += K = (((Y <<= K) - 0x4000) >> 16) & 2;\ |
2930 | K = 14 - N + ((Y <<= K) >> 15);\ |
2931 | I = (K << 1) + ((S >> (K + (TREEBIN_SHIFT-1)) & 1));\ |
2932 | }\ |
2933 | } |
2934 | #endif /* GNUC */ |
2935 | |
2936 | /* Bit representing maximum resolved size in a treebin at i */ |
2937 | #define bit_for_tree_index(i) \ |
2938 | (i == NTREEBINS-1)? (SIZE_T_BITSIZE-1) : (((i) >> 1) + TREEBIN_SHIFT - 2) |
2939 | |
2940 | /* Shift placing maximum resolved bit in a treebin at i as sign bit */ |
2941 | #define leftshift_for_tree_index(i) \ |
2942 | ((i == NTREEBINS-1)? 0 : \ |
2943 | ((SIZE_T_BITSIZE-SIZE_T_ONE) - (((i) >> 1) + TREEBIN_SHIFT - 2))) |
2944 | |
2945 | /* The size of the smallest chunk held in bin with index i */ |
2946 | #define minsize_for_tree_index(i) \ |
2947 | ((SIZE_T_ONE << (((i) >> 1) + TREEBIN_SHIFT)) | \ |
2948 | (((size_t)((i) & SIZE_T_ONE)) << (((i) >> 1) + TREEBIN_SHIFT - 1))) |
2949 | |
2950 | |
2951 | /* ------------------------ Operations on bin maps ----------------------- */ |
2952 | |
2953 | /* bit corresponding to given index */ |
2954 | #define idx2bit(i) ((binmap_t)(1) << (i)) |
2955 | |
2956 | /* Mark/Clear bits with given index */ |
2957 | #define mark_smallmap(M,i) ((M)->smallmap |= idx2bit(i)) |
2958 | #define clear_smallmap(M,i) ((M)->smallmap &= ~idx2bit(i)) |
2959 | #define smallmap_is_marked(M,i) ((M)->smallmap & idx2bit(i)) |
2960 | |
2961 | #define mark_treemap(M,i) ((M)->treemap |= idx2bit(i)) |
2962 | #define clear_treemap(M,i) ((M)->treemap &= ~idx2bit(i)) |
2963 | #define treemap_is_marked(M,i) ((M)->treemap & idx2bit(i)) |
2964 | |
2965 | /* isolate the least set bit of a bitmap */ |
2966 | #define least_bit(x) ((x) & -(x)) |
2967 | |
2968 | /* mask with all bits to left of least bit of x on */ |
2969 | #define left_bits(x) ((x<<1) | -(x<<1)) |
2970 | |
2971 | /* mask with all bits to left of or equal to least bit of x on */ |
2972 | #define same_or_left_bits(x) ((x) | -(x)) |
2973 | |
2974 | /* index corresponding to given bit. Use x86 asm if possible */ |
2975 | |
2976 | #if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) |
2977 | #define compute_bit2idx(X, I)\ |
2978 | {\ |
2979 | unsigned int J;\ |
2980 | J = __builtin_ctz(X); \ |
2981 | I = (bindex_t)J;\ |
2982 | } |
2983 | |
2984 | #elif defined (__INTEL_COMPILER) |
2985 | #define compute_bit2idx(X, I)\ |
2986 | {\ |
2987 | unsigned int J;\ |
2988 | J = _bit_scan_forward (X); \ |
2989 | I = (bindex_t)J;\ |
2990 | } |
2991 | |
2992 | #elif defined(_MSC_VER) && _MSC_VER>=1300 |
2993 | #define compute_bit2idx(X, I)\ |
2994 | {\ |
2995 | unsigned int J;\ |
2996 | _BitScanForward((DWORD *) &J, X);\ |
2997 | I = (bindex_t)J;\ |
2998 | } |
2999 | |
3000 | #elif USE_BUILTIN_FFS |
3001 | #define compute_bit2idx(X, I) I = ffs(X)-1 |
3002 | |
3003 | #else |
3004 | #define compute_bit2idx(X, I)\ |
3005 | {\ |
3006 | unsigned int Y = X - 1;\ |
3007 | unsigned int K = Y >> (16-4) & 16;\ |
3008 | unsigned int N = K; Y >>= K;\ |
3009 | N += K = Y >> (8-3) & 8; Y >>= K;\ |
3010 | N += K = Y >> (4-2) & 4; Y >>= K;\ |
3011 | N += K = Y >> (2-1) & 2; Y >>= K;\ |
3012 | N += K = Y >> (1-0) & 1; Y >>= K;\ |
3013 | I = (bindex_t)(N + Y);\ |
3014 | } |
3015 | #endif /* GNUC */ |
3016 | |
3017 | |
3018 | /* ----------------------- Runtime Check Support ------------------------- */ |
3019 | |
3020 | /* |
3021 | For security, the main invariant is that malloc/free/etc never |
3022 | writes to a static address other than malloc_state, unless static |
3023 | malloc_state itself has been corrupted, which cannot occur via |
3024 | malloc (because of these checks). In essence this means that we |
3025 | believe all pointers, sizes, maps etc held in malloc_state, but |
3026 | check all of those linked or offsetted from other embedded data |
3027 | structures. These checks are interspersed with main code in a way |
3028 | that tends to minimize their run-time cost. |
3029 | |
3030 | When FOOTERS is defined, in addition to range checking, we also |
3031 | verify footer fields of inuse chunks, which can be used guarantee |
3032 | that the mstate controlling malloc/free is intact. This is a |
3033 | streamlined version of the approach described by William Robertson |
3034 | et al in "Run-time Detection of Heap-based Overflows" LISA'03 |
3035 | http://www.usenix.org/events/lisa03/tech/robertson.html The footer |
3036 | of an inuse chunk holds the xor of its mstate and a random seed, |
3037 | that is checked upon calls to free() and realloc(). This is |
3038 | (probabalistically) unguessable from outside the program, but can be |
3039 | computed by any code successfully malloc'ing any chunk, so does not |
3040 | itself provide protection against code that has already broken |
3041 | security through some other means. Unlike Robertson et al, we |
3042 | always dynamically check addresses of all offset chunks (previous, |
3043 | next, etc). This turns out to be cheaper than relying on hashes. |
3044 | */ |
3045 | |
3046 | #if !INSECURE |
3047 | /* Check if address a is at least as high as any from MORECORE or MMAP */ |
3048 | #define ok_address(M, a) ((char*)(a) >= (M)->least_addr) |
3049 | /* Check if address of next chunk n is higher than base chunk p */ |
3050 | #define ok_next(p, n) ((char*)(p) < (char*)(n)) |
3051 | /* Check if p has inuse status */ |
3052 | #define ok_inuse(p) is_inuse(p) |
3053 | /* Check if p has its pinuse bit on */ |
3054 | #define ok_pinuse(p) pinuse(p) |
3055 | |
3056 | #else /* !INSECURE */ |
3057 | #define ok_address(M, a) (1) |
3058 | #define ok_next(b, n) (1) |
3059 | #define ok_inuse(p) (1) |
3060 | #define ok_pinuse(p) (1) |
3061 | #endif /* !INSECURE */ |
3062 | |
3063 | #if (FOOTERS && !INSECURE) |
3064 | /* Check if (alleged) mstate m has expected magic field */ |
3065 | #define ok_magic(M) ((M)->magic == mparams.magic) |
3066 | #else /* (FOOTERS && !INSECURE) */ |
3067 | #define ok_magic(M) (1) |
3068 | #endif /* (FOOTERS && !INSECURE) */ |
3069 | |
3070 | /* In gcc, use __builtin_expect to minimize impact of checks */ |
3071 | #if !INSECURE |
3072 | #if defined(__GNUC__) && __GNUC__ >= 3 |
3073 | #define RTCHECK(e) __builtin_expect(e, 1) |
3074 | #else /* GNUC */ |
3075 | #define RTCHECK(e) (e) |
3076 | #endif /* GNUC */ |
3077 | #else /* !INSECURE */ |
3078 | #define RTCHECK(e) (1) |
3079 | #endif /* !INSECURE */ |
3080 | |
3081 | /* macros to set up inuse chunks with or without footers */ |
3082 | |
3083 | #if !FOOTERS |
3084 | |
3085 | #define mark_inuse_foot(M,p,s) |
3086 | |
3087 | /* Macros for setting head/foot of non-mmapped chunks */ |
3088 | |
3089 | /* Set cinuse bit and pinuse bit of next chunk */ |
3090 | #define set_inuse(M,p,s)\ |
3091 | ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
3092 | ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
3093 | |
3094 | /* Set cinuse and pinuse of this chunk and pinuse of next chunk */ |
3095 | #define set_inuse_and_pinuse(M,p,s)\ |
3096 | ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
3097 | ((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT) |
3098 | |
3099 | /* Set size, cinuse and pinuse bit of this chunk */ |
3100 | #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
3101 | ((p)->head = (s|PINUSE_BIT|CINUSE_BIT)) |
3102 | |
3103 | #else /* FOOTERS */ |
3104 | |
3105 | /* Set foot of inuse chunk to be xor of mstate and seed */ |
3106 | #define mark_inuse_foot(M,p,s)\ |
3107 | (((mchunkptr)((char*)(p) + (s)))->prev_foot = ((size_t)(M) ^ mparams.magic)) |
3108 | |
3109 | #define get_mstate_for(p)\ |
3110 | ((mstate)(((mchunkptr)((char*)(p) +\ |
3111 | (chunksize(p))))->prev_foot ^ mparams.magic)) |
3112 | |
3113 | #define set_inuse(M,p,s)\ |
3114 | ((p)->head = (((p)->head & PINUSE_BIT)|s|CINUSE_BIT),\ |
3115 | (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT), \ |
3116 | mark_inuse_foot(M,p,s)) |
3117 | |
3118 | #define set_inuse_and_pinuse(M,p,s)\ |
3119 | ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
3120 | (((mchunkptr)(((char*)(p)) + (s)))->head |= PINUSE_BIT),\ |
3121 | mark_inuse_foot(M,p,s)) |
3122 | |
3123 | #define set_size_and_pinuse_of_inuse_chunk(M, p, s)\ |
3124 | ((p)->head = (s|PINUSE_BIT|CINUSE_BIT),\ |
3125 | mark_inuse_foot(M, p, s)) |
3126 | |
3127 | #endif /* !FOOTERS */ |
3128 | |
3129 | /* ---------------------------- setting mparams -------------------------- */ |
3130 | |
3131 | #if LOCK_AT_FORK |
3132 | static void pre_fork(void) { ACQUIRE_LOCK(&(gm)->mutex); } |
3133 | static void post_fork_parent(void) { RELEASE_LOCK(&(gm)->mutex); } |
3134 | static void post_fork_child(void) { INITIAL_LOCK(&(gm)->mutex); } |
3135 | #endif /* LOCK_AT_FORK */ |
3136 | |
3137 | /* Initialize mparams */ |
3138 | static int init_mparams(void) { |
3139 | #ifdef NEED_GLOBAL_LOCK_INIT |
3140 | if (malloc_global_mutex_status <= 0) |
3141 | init_malloc_global_mutex(); |
3142 | #endif |
3143 | |
3144 | ACQUIRE_MALLOC_GLOBAL_LOCK(); |
3145 | if (mparams.magic == 0) { |
3146 | size_t magic; |
3147 | size_t psize; |
3148 | size_t gsize; |
3149 | |
3150 | #ifndef WIN32 |
3151 | psize = malloc_getpagesize; |
3152 | gsize = ((DEFAULT_GRANULARITY != 0)? DEFAULT_GRANULARITY : psize); |
3153 | #else /* WIN32 */ |
3154 | { |
3155 | SYSTEM_INFO system_info; |
3156 | GetSystemInfo(&system_info); |
3157 | psize = system_info.dwPageSize; |
3158 | gsize = ((DEFAULT_GRANULARITY != 0)? |
3159 | DEFAULT_GRANULARITY : system_info.dwAllocationGranularity); |
3160 | } |
3161 | #endif /* WIN32 */ |
3162 | |
3163 | /* Sanity-check configuration: |
3164 | size_t must be unsigned and as wide as pointer type. |
3165 | ints must be at least 4 bytes. |
3166 | alignment must be at least 8. |
3167 | Alignment, min chunk size, and page size must all be powers of 2. |
3168 | */ |
3169 | if ((sizeof(size_t) != sizeof(char*)) || |
3170 | (MAX_SIZE_T < MIN_CHUNK_SIZE) || |
3171 | (sizeof(int) < 4) || |
3172 | (MALLOC_ALIGNMENT < (size_t)8U) || |
3173 | ((MALLOC_ALIGNMENT & (MALLOC_ALIGNMENT-SIZE_T_ONE)) != 0) || |
3174 | ((MCHUNK_SIZE & (MCHUNK_SIZE-SIZE_T_ONE)) != 0) || |
3175 | ((gsize & (gsize-SIZE_T_ONE)) != 0) || |
3176 | ((psize & (psize-SIZE_T_ONE)) != 0)) |
3177 | ABORT; |
3178 | mparams.granularity = gsize; |
3179 | mparams.page_size = psize; |
3180 | mparams.mmap_threshold = DEFAULT_MMAP_THRESHOLD; |
3181 | mparams.trim_threshold = DEFAULT_TRIM_THRESHOLD; |
3182 | #if MORECORE_CONTIGUOUS |
3183 | mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT; |
3184 | #else /* MORECORE_CONTIGUOUS */ |
3185 | mparams.default_mflags = USE_LOCK_BIT|USE_MMAP_BIT|USE_NONCONTIGUOUS_BIT; |
3186 | #endif /* MORECORE_CONTIGUOUS */ |
3187 | |
3188 | #if !ONLY_MSPACES |
3189 | /* Set up lock for main malloc area */ |
3190 | gm->mflags = mparams.default_mflags; |
3191 | (void)INITIAL_LOCK(&gm->mutex); |
3192 | #endif |
3193 | #if LOCK_AT_FORK |
3194 | pthread_atfork(&pre_fork, &post_fork_parent, &post_fork_child); |
3195 | #endif |
3196 | |
3197 | { |
3198 | #if USE_DEV_RANDOM |
3199 | int fd; |
3200 | unsigned char buf[sizeof(size_t)]; |
3201 | /* Try to use /dev/urandom, else fall back on using time */ |
3202 | if ((fd = open("/dev/urandom" , O_RDONLY)) >= 0 && |
3203 | read(fd, buf, sizeof(buf)) == sizeof(buf)) { |
3204 | magic = *((size_t *) buf); |
3205 | close(fd); |
3206 | } |
3207 | else |
3208 | #endif /* USE_DEV_RANDOM */ |
3209 | #ifdef WIN32 |
3210 | magic = (size_t)(GetTickCount() ^ (size_t)0x55555555U); |
3211 | #elif defined(LACKS_TIME_H) |
3212 | magic = (size_t)&magic ^ (size_t)0x55555555U; |
3213 | #else |
3214 | magic = (size_t)(time(0) ^ (size_t)0x55555555U); |
3215 | #endif |
3216 | magic |= (size_t)8U; /* ensure nonzero */ |
3217 | magic &= ~(size_t)7U; /* improve chances of fault for bad values */ |
3218 | /* Until memory modes commonly available, use volatile-write */ |
3219 | (*(volatile size_t *)(&(mparams.magic))) = magic; |
3220 | } |
3221 | } |
3222 | |
3223 | RELEASE_MALLOC_GLOBAL_LOCK(); |
3224 | return 1; |
3225 | } |
3226 | |
3227 | /* support for mallopt */ |
3228 | static int change_mparam(int param_number, int value) { |
3229 | size_t val; |
3230 | ensure_initialization(); |
3231 | val = (value == -1)? MAX_SIZE_T : (size_t)value; |
3232 | switch(param_number) { |
3233 | case M_TRIM_THRESHOLD: |
3234 | mparams.trim_threshold = val; |
3235 | return 1; |
3236 | case M_GRANULARITY: |
3237 | if (val >= mparams.page_size && ((val & (val-1)) == 0)) { |
3238 | mparams.granularity = val; |
3239 | return 1; |
3240 | } |
3241 | else |
3242 | return 0; |
3243 | case M_MMAP_THRESHOLD: |
3244 | mparams.mmap_threshold = val; |
3245 | return 1; |
3246 | default: |
3247 | return 0; |
3248 | } |
3249 | } |
3250 | |
3251 | #if DEBUG |
3252 | /* ------------------------- Debugging Support --------------------------- */ |
3253 | |
3254 | /* Check properties of any chunk, whether free, inuse, mmapped etc */ |
3255 | static void do_check_any_chunk(mstate m, mchunkptr p) { |
3256 | assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
3257 | assert(ok_address(m, p)); |
3258 | } |
3259 | |
3260 | /* Check properties of top chunk */ |
3261 | static void do_check_top_chunk(mstate m, mchunkptr p) { |
3262 | msegmentptr sp = segment_holding(m, (char*)p); |
3263 | size_t sz = p->head & ~INUSE_BITS; /* third-lowest bit can be set! */ |
3264 | assert(sp != 0); |
3265 | assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
3266 | assert(ok_address(m, p)); |
3267 | assert(sz == m->topsize); |
3268 | assert(sz > 0); |
3269 | assert(sz == ((sp->base + sp->size) - (char*)p) - TOP_FOOT_SIZE); |
3270 | assert(pinuse(p)); |
3271 | assert(!pinuse(chunk_plus_offset(p, sz))); |
3272 | } |
3273 | |
3274 | /* Check properties of (inuse) mmapped chunks */ |
3275 | static void do_check_mmapped_chunk(mstate m, mchunkptr p) { |
3276 | size_t sz = chunksize(p); |
3277 | size_t len = (sz + (p->prev_foot) + MMAP_FOOT_PAD); |
3278 | assert(is_mmapped(p)); |
3279 | assert(use_mmap(m)); |
3280 | assert((is_aligned(chunk2mem(p))) || (p->head == FENCEPOST_HEAD)); |
3281 | assert(ok_address(m, p)); |
3282 | assert(!is_small(sz)); |
3283 | assert((len & (mparams.page_size-SIZE_T_ONE)) == 0); |
3284 | assert(chunk_plus_offset(p, sz)->head == FENCEPOST_HEAD); |
3285 | assert(chunk_plus_offset(p, sz+SIZE_T_SIZE)->head == 0); |
3286 | } |
3287 | |
3288 | /* Check properties of inuse chunks */ |
3289 | static void do_check_inuse_chunk(mstate m, mchunkptr p) { |
3290 | do_check_any_chunk(m, p); |
3291 | assert(is_inuse(p)); |
3292 | assert(next_pinuse(p)); |
3293 | /* If not pinuse and not mmapped, previous chunk has OK offset */ |
3294 | assert(is_mmapped(p) || pinuse(p) || next_chunk(prev_chunk(p)) == p); |
3295 | if (is_mmapped(p)) |
3296 | do_check_mmapped_chunk(m, p); |
3297 | } |
3298 | |
3299 | /* Check properties of free chunks */ |
3300 | static void do_check_free_chunk(mstate m, mchunkptr p) { |
3301 | size_t sz = chunksize(p); |
3302 | mchunkptr next = chunk_plus_offset(p, sz); |
3303 | do_check_any_chunk(m, p); |
3304 | assert(!is_inuse(p)); |
3305 | assert(!next_pinuse(p)); |
3306 | assert (!is_mmapped(p)); |
3307 | if (p != m->dv && p != m->top) { |
3308 | if (sz >= MIN_CHUNK_SIZE) { |
3309 | assert((sz & CHUNK_ALIGN_MASK) == 0); |
3310 | assert(is_aligned(chunk2mem(p))); |
3311 | assert(next->prev_foot == sz); |
3312 | assert(pinuse(p)); |
3313 | assert (next == m->top || is_inuse(next)); |
3314 | assert(p->fd->bk == p); |
3315 | assert(p->bk->fd == p); |
3316 | } |
3317 | else /* markers are always of size SIZE_T_SIZE */ |
3318 | assert(sz == SIZE_T_SIZE); |
3319 | } |
3320 | } |
3321 | |
3322 | /* Check properties of malloced chunks at the point they are malloced */ |
3323 | static void do_check_malloced_chunk(mstate m, void* mem, size_t s) { |
3324 | if (mem != 0) { |
3325 | mchunkptr p = mem2chunk(mem); |
3326 | size_t sz = p->head & ~INUSE_BITS; |
3327 | do_check_inuse_chunk(m, p); |
3328 | assert((sz & CHUNK_ALIGN_MASK) == 0); |
3329 | assert(sz >= MIN_CHUNK_SIZE); |
3330 | assert(sz >= s); |
3331 | /* unless mmapped, size is less than MIN_CHUNK_SIZE more than request */ |
3332 | assert(is_mmapped(p) || sz < (s + MIN_CHUNK_SIZE)); |
3333 | } |
3334 | } |
3335 | |
3336 | /* Check a tree and its subtrees. */ |
3337 | static void do_check_tree(mstate m, tchunkptr t) { |
3338 | tchunkptr head = 0; |
3339 | tchunkptr u = t; |
3340 | bindex_t tindex = t->index; |
3341 | size_t tsize = chunksize(t); |
3342 | bindex_t idx; |
3343 | compute_tree_index(tsize, idx); |
3344 | assert(tindex == idx); |
3345 | assert(tsize >= MIN_LARGE_SIZE); |
3346 | assert(tsize >= minsize_for_tree_index(idx)); |
3347 | assert((idx == NTREEBINS-1) || (tsize < minsize_for_tree_index((idx+1)))); |
3348 | |
3349 | do { /* traverse through chain of same-sized nodes */ |
3350 | do_check_any_chunk(m, ((mchunkptr)u)); |
3351 | assert(u->index == tindex); |
3352 | assert(chunksize(u) == tsize); |
3353 | assert(!is_inuse(u)); |
3354 | assert(!next_pinuse(u)); |
3355 | assert(u->fd->bk == u); |
3356 | assert(u->bk->fd == u); |
3357 | if (u->parent == 0) { |
3358 | assert(u->child[0] == 0); |
3359 | assert(u->child[1] == 0); |
3360 | } |
3361 | else { |
3362 | assert(head == 0); /* only one node on chain has parent */ |
3363 | head = u; |
3364 | assert(u->parent != u); |
3365 | assert (u->parent->child[0] == u || |
3366 | u->parent->child[1] == u || |
3367 | *((tbinptr*)(u->parent)) == u); |
3368 | if (u->child[0] != 0) { |
3369 | assert(u->child[0]->parent == u); |
3370 | assert(u->child[0] != u); |
3371 | do_check_tree(m, u->child[0]); |
3372 | } |
3373 | if (u->child[1] != 0) { |
3374 | assert(u->child[1]->parent == u); |
3375 | assert(u->child[1] != u); |
3376 | do_check_tree(m, u->child[1]); |
3377 | } |
3378 | if (u->child[0] != 0 && u->child[1] != 0) { |
3379 | assert(chunksize(u->child[0]) < chunksize(u->child[1])); |
3380 | } |
3381 | } |
3382 | u = u->fd; |
3383 | } while (u != t); |
3384 | assert(head != 0); |
3385 | } |
3386 | |
3387 | /* Check all the chunks in a treebin. */ |
3388 | static void do_check_treebin(mstate m, bindex_t i) { |
3389 | tbinptr* tb = treebin_at(m, i); |
3390 | tchunkptr t = *tb; |
3391 | int empty = (m->treemap & (1U << i)) == 0; |
3392 | if (t == 0) |
3393 | assert(empty); |
3394 | if (!empty) |
3395 | do_check_tree(m, t); |
3396 | } |
3397 | |
3398 | /* Check all the chunks in a smallbin. */ |
3399 | static void do_check_smallbin(mstate m, bindex_t i) { |
3400 | sbinptr b = smallbin_at(m, i); |
3401 | mchunkptr p = b->bk; |
3402 | unsigned int empty = (m->smallmap & (1U << i)) == 0; |
3403 | if (p == b) |
3404 | assert(empty); |
3405 | if (!empty) { |
3406 | for (; p != b; p = p->bk) { |
3407 | size_t size = chunksize(p); |
3408 | mchunkptr q; |
3409 | /* each chunk claims to be free */ |
3410 | do_check_free_chunk(m, p); |
3411 | /* chunk belongs in bin */ |
3412 | assert(small_index(size) == i); |
3413 | assert(p->bk == b || chunksize(p->bk) == chunksize(p)); |
3414 | /* chunk is followed by an inuse chunk */ |
3415 | q = next_chunk(p); |
3416 | if (q->head != FENCEPOST_HEAD) |
3417 | do_check_inuse_chunk(m, q); |
3418 | } |
3419 | } |
3420 | } |
3421 | |
3422 | /* Find x in a bin. Used in other check functions. */ |
3423 | static int bin_find(mstate m, mchunkptr x) { |
3424 | size_t size = chunksize(x); |
3425 | if (is_small(size)) { |
3426 | bindex_t sidx = small_index(size); |
3427 | sbinptr b = smallbin_at(m, sidx); |
3428 | if (smallmap_is_marked(m, sidx)) { |
3429 | mchunkptr p = b; |
3430 | do { |
3431 | if (p == x) |
3432 | return 1; |
3433 | } while ((p = p->fd) != b); |
3434 | } |
3435 | } |
3436 | else { |
3437 | bindex_t tidx; |
3438 | compute_tree_index(size, tidx); |
3439 | if (treemap_is_marked(m, tidx)) { |
3440 | tchunkptr t = *treebin_at(m, tidx); |
3441 | size_t sizebits = size << leftshift_for_tree_index(tidx); |
3442 | while (t != 0 && chunksize(t) != size) { |
3443 | t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
3444 | sizebits <<= 1; |
3445 | } |
3446 | if (t != 0) { |
3447 | tchunkptr u = t; |
3448 | do { |
3449 | if (u == (tchunkptr)x) |
3450 | return 1; |
3451 | } while ((u = u->fd) != t); |
3452 | } |
3453 | } |
3454 | } |
3455 | return 0; |
3456 | } |
3457 | |
3458 | /* Traverse each chunk and check it; return total */ |
3459 | static size_t traverse_and_check(mstate m) { |
3460 | size_t sum = 0; |
3461 | if (is_initialized(m)) { |
3462 | msegmentptr s = &m->seg; |
3463 | sum += m->topsize + TOP_FOOT_SIZE; |
3464 | while (s != 0) { |
3465 | mchunkptr q = align_as_chunk(s->base); |
3466 | mchunkptr lastq = 0; |
3467 | assert(pinuse(q)); |
3468 | while (segment_holds(s, q) && |
3469 | q != m->top && q->head != FENCEPOST_HEAD) { |
3470 | sum += chunksize(q); |
3471 | if (is_inuse(q)) { |
3472 | assert(!bin_find(m, q)); |
3473 | do_check_inuse_chunk(m, q); |
3474 | } |
3475 | else { |
3476 | assert(q == m->dv || bin_find(m, q)); |
3477 | assert(lastq == 0 || is_inuse(lastq)); /* Not 2 consecutive free */ |
3478 | do_check_free_chunk(m, q); |
3479 | } |
3480 | lastq = q; |
3481 | q = next_chunk(q); |
3482 | } |
3483 | s = s->next; |
3484 | } |
3485 | } |
3486 | return sum; |
3487 | } |
3488 | |
3489 | |
3490 | /* Check all properties of malloc_state. */ |
3491 | static void do_check_malloc_state(mstate m) { |
3492 | bindex_t i; |
3493 | size_t total; |
3494 | /* check bins */ |
3495 | for (i = 0; i < NSMALLBINS; ++i) |
3496 | do_check_smallbin(m, i); |
3497 | for (i = 0; i < NTREEBINS; ++i) |
3498 | do_check_treebin(m, i); |
3499 | |
3500 | if (m->dvsize != 0) { /* check dv chunk */ |
3501 | do_check_any_chunk(m, m->dv); |
3502 | assert(m->dvsize == chunksize(m->dv)); |
3503 | assert(m->dvsize >= MIN_CHUNK_SIZE); |
3504 | assert(bin_find(m, m->dv) == 0); |
3505 | } |
3506 | |
3507 | if (m->top != 0) { /* check top chunk */ |
3508 | do_check_top_chunk(m, m->top); |
3509 | /*assert(m->topsize == chunksize(m->top)); redundant */ |
3510 | assert(m->topsize > 0); |
3511 | assert(bin_find(m, m->top) == 0); |
3512 | } |
3513 | |
3514 | total = traverse_and_check(m); |
3515 | assert(total <= m->footprint); |
3516 | assert(m->footprint <= m->max_footprint); |
3517 | } |
3518 | #endif /* DEBUG */ |
3519 | |
3520 | /* ----------------------------- statistics ------------------------------ */ |
3521 | |
3522 | #if !NO_MALLINFO |
3523 | static struct mallinfo internal_mallinfo(mstate m) { |
3524 | struct mallinfo nm = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; |
3525 | ensure_initialization(); |
3526 | if (!PREACTION(m)) { |
3527 | check_malloc_state(m); |
3528 | if (is_initialized(m)) { |
3529 | size_t nfree = SIZE_T_ONE; /* top always free */ |
3530 | size_t mfree = m->topsize + TOP_FOOT_SIZE; |
3531 | size_t sum = mfree; |
3532 | msegmentptr s = &m->seg; |
3533 | while (s != 0) { |
3534 | mchunkptr q = align_as_chunk(s->base); |
3535 | while (segment_holds(s, q) && |
3536 | q != m->top && q->head != FENCEPOST_HEAD) { |
3537 | size_t sz = chunksize(q); |
3538 | sum += sz; |
3539 | if (!is_inuse(q)) { |
3540 | mfree += sz; |
3541 | ++nfree; |
3542 | } |
3543 | q = next_chunk(q); |
3544 | } |
3545 | s = s->next; |
3546 | } |
3547 | |
3548 | nm.arena = sum; |
3549 | nm.ordblks = nfree; |
3550 | nm.hblkhd = m->footprint - sum; |
3551 | nm.usmblks = m->max_footprint; |
3552 | nm.uordblks = m->footprint - mfree; |
3553 | nm.fordblks = mfree; |
3554 | nm.keepcost = m->topsize; |
3555 | } |
3556 | |
3557 | POSTACTION(m); |
3558 | } |
3559 | return nm; |
3560 | } |
3561 | #endif /* !NO_MALLINFO */ |
3562 | |
3563 | #if !NO_MALLOC_STATS |
3564 | static void internal_malloc_stats(mstate m) { |
3565 | ensure_initialization(); |
3566 | if (!PREACTION(m)) { |
3567 | size_t maxfp = 0; |
3568 | size_t fp = 0; |
3569 | size_t used = 0; |
3570 | check_malloc_state(m); |
3571 | if (is_initialized(m)) { |
3572 | msegmentptr s = &m->seg; |
3573 | maxfp = m->max_footprint; |
3574 | fp = m->footprint; |
3575 | used = fp - (m->topsize + TOP_FOOT_SIZE); |
3576 | |
3577 | while (s != 0) { |
3578 | mchunkptr q = align_as_chunk(s->base); |
3579 | while (segment_holds(s, q) && |
3580 | q != m->top && q->head != FENCEPOST_HEAD) { |
3581 | if (!is_inuse(q)) |
3582 | used -= chunksize(q); |
3583 | q = next_chunk(q); |
3584 | } |
3585 | s = s->next; |
3586 | } |
3587 | } |
3588 | POSTACTION(m); /* drop lock */ |
3589 | fprintf(stderr, "max system bytes = %10lu\n" , (unsigned long)(maxfp)); |
3590 | fprintf(stderr, "system bytes = %10lu\n" , (unsigned long)(fp)); |
3591 | fprintf(stderr, "in use bytes = %10lu\n" , (unsigned long)(used)); |
3592 | } |
3593 | } |
3594 | #endif /* NO_MALLOC_STATS */ |
3595 | |
3596 | /* ----------------------- Operations on smallbins ----------------------- */ |
3597 | |
3598 | /* |
3599 | Various forms of linking and unlinking are defined as macros. Even |
3600 | the ones for trees, which are very long but have very short typical |
3601 | paths. This is ugly but reduces reliance on inlining support of |
3602 | compilers. |
3603 | */ |
3604 | |
3605 | /* Link a free chunk into a smallbin */ |
3606 | #define insert_small_chunk(M, P, S) {\ |
3607 | bindex_t I = small_index(S);\ |
3608 | mchunkptr B = smallbin_at(M, I);\ |
3609 | mchunkptr F = B;\ |
3610 | assert(S >= MIN_CHUNK_SIZE);\ |
3611 | if (!smallmap_is_marked(M, I))\ |
3612 | mark_smallmap(M, I);\ |
3613 | else if (RTCHECK(ok_address(M, B->fd)))\ |
3614 | F = B->fd;\ |
3615 | else {\ |
3616 | CORRUPTION_ERROR_ACTION(M);\ |
3617 | }\ |
3618 | B->fd = P;\ |
3619 | F->bk = P;\ |
3620 | P->fd = F;\ |
3621 | P->bk = B;\ |
3622 | } |
3623 | |
3624 | /* Unlink a chunk from a smallbin */ |
3625 | #define unlink_small_chunk(M, P, S) {\ |
3626 | mchunkptr F = P->fd;\ |
3627 | mchunkptr B = P->bk;\ |
3628 | bindex_t I = small_index(S);\ |
3629 | assert(P != B);\ |
3630 | assert(P != F);\ |
3631 | assert(chunksize(P) == small_index2size(I));\ |
3632 | if (RTCHECK(F == smallbin_at(M,I) || (ok_address(M, F) && F->bk == P))) { \ |
3633 | if (B == F) {\ |
3634 | clear_smallmap(M, I);\ |
3635 | }\ |
3636 | else if (RTCHECK(B == smallbin_at(M,I) ||\ |
3637 | (ok_address(M, B) && B->fd == P))) {\ |
3638 | F->bk = B;\ |
3639 | B->fd = F;\ |
3640 | }\ |
3641 | else {\ |
3642 | CORRUPTION_ERROR_ACTION(M);\ |
3643 | }\ |
3644 | }\ |
3645 | else {\ |
3646 | CORRUPTION_ERROR_ACTION(M);\ |
3647 | }\ |
3648 | } |
3649 | |
3650 | /* Unlink the first chunk from a smallbin */ |
3651 | #define unlink_first_small_chunk(M, B, P, I) {\ |
3652 | mchunkptr F = P->fd;\ |
3653 | assert(P != B);\ |
3654 | assert(P != F);\ |
3655 | assert(chunksize(P) == small_index2size(I));\ |
3656 | if (B == F) {\ |
3657 | clear_smallmap(M, I);\ |
3658 | }\ |
3659 | else if (RTCHECK(ok_address(M, F) && F->bk == P)) {\ |
3660 | F->bk = B;\ |
3661 | B->fd = F;\ |
3662 | }\ |
3663 | else {\ |
3664 | CORRUPTION_ERROR_ACTION(M);\ |
3665 | }\ |
3666 | } |
3667 | |
3668 | /* Replace dv node, binning the old one */ |
3669 | /* Used only when dvsize known to be small */ |
3670 | #define replace_dv(M, P, S) {\ |
3671 | size_t DVS = M->dvsize;\ |
3672 | assert(is_small(DVS));\ |
3673 | if (DVS != 0) {\ |
3674 | mchunkptr DV = M->dv;\ |
3675 | insert_small_chunk(M, DV, DVS);\ |
3676 | }\ |
3677 | M->dvsize = S;\ |
3678 | M->dv = P;\ |
3679 | } |
3680 | |
3681 | /* ------------------------- Operations on trees ------------------------- */ |
3682 | |
3683 | /* Insert chunk into tree */ |
3684 | #define insert_large_chunk(M, X, S) {\ |
3685 | tbinptr* H;\ |
3686 | bindex_t I;\ |
3687 | compute_tree_index(S, I);\ |
3688 | H = treebin_at(M, I);\ |
3689 | X->index = I;\ |
3690 | X->child[0] = X->child[1] = 0;\ |
3691 | if (!treemap_is_marked(M, I)) {\ |
3692 | mark_treemap(M, I);\ |
3693 | *H = X;\ |
3694 | X->parent = (tchunkptr)H;\ |
3695 | X->fd = X->bk = X;\ |
3696 | }\ |
3697 | else {\ |
3698 | tchunkptr T = *H;\ |
3699 | size_t K = S << leftshift_for_tree_index(I);\ |
3700 | for (;;) {\ |
3701 | if (chunksize(T) != S) {\ |
3702 | tchunkptr* C = &(T->child[(K >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]);\ |
3703 | K <<= 1;\ |
3704 | if (*C != 0)\ |
3705 | T = *C;\ |
3706 | else if (RTCHECK(ok_address(M, C))) {\ |
3707 | *C = X;\ |
3708 | X->parent = T;\ |
3709 | X->fd = X->bk = X;\ |
3710 | break;\ |
3711 | }\ |
3712 | else {\ |
3713 | CORRUPTION_ERROR_ACTION(M);\ |
3714 | break;\ |
3715 | }\ |
3716 | }\ |
3717 | else {\ |
3718 | tchunkptr F = T->fd;\ |
3719 | if (RTCHECK(ok_address(M, T) && ok_address(M, F))) {\ |
3720 | T->fd = F->bk = X;\ |
3721 | X->fd = F;\ |
3722 | X->bk = T;\ |
3723 | X->parent = 0;\ |
3724 | break;\ |
3725 | }\ |
3726 | else {\ |
3727 | CORRUPTION_ERROR_ACTION(M);\ |
3728 | break;\ |
3729 | }\ |
3730 | }\ |
3731 | }\ |
3732 | }\ |
3733 | } |
3734 | |
3735 | /* |
3736 | Unlink steps: |
3737 | |
3738 | 1. If x is a chained node, unlink it from its same-sized fd/bk links |
3739 | and choose its bk node as its replacement. |
3740 | 2. If x was the last node of its size, but not a leaf node, it must |
3741 | be replaced with a leaf node (not merely one with an open left or |
3742 | right), to make sure that lefts and rights of descendents |
3743 | correspond properly to bit masks. We use the rightmost descendent |
3744 | of x. We could use any other leaf, but this is easy to locate and |
3745 | tends to counteract removal of leftmosts elsewhere, and so keeps |
3746 | paths shorter than minimally guaranteed. This doesn't loop much |
3747 | because on average a node in a tree is near the bottom. |
3748 | 3. If x is the base of a chain (i.e., has parent links) relink |
3749 | x's parent and children to x's replacement (or null if none). |
3750 | */ |
3751 | |
3752 | #define unlink_large_chunk(M, X) {\ |
3753 | tchunkptr XP = X->parent;\ |
3754 | tchunkptr R;\ |
3755 | if (X->bk != X) {\ |
3756 | tchunkptr F = X->fd;\ |
3757 | R = X->bk;\ |
3758 | if (RTCHECK(ok_address(M, F) && F->bk == X && R->fd == X)) {\ |
3759 | F->bk = R;\ |
3760 | R->fd = F;\ |
3761 | }\ |
3762 | else {\ |
3763 | CORRUPTION_ERROR_ACTION(M);\ |
3764 | }\ |
3765 | }\ |
3766 | else {\ |
3767 | tchunkptr* RP;\ |
3768 | if (((R = *(RP = &(X->child[1]))) != 0) ||\ |
3769 | ((R = *(RP = &(X->child[0]))) != 0)) {\ |
3770 | tchunkptr* CP;\ |
3771 | while ((*(CP = &(R->child[1])) != 0) ||\ |
3772 | (*(CP = &(R->child[0])) != 0)) {\ |
3773 | R = *(RP = CP);\ |
3774 | }\ |
3775 | if (RTCHECK(ok_address(M, RP)))\ |
3776 | *RP = 0;\ |
3777 | else {\ |
3778 | CORRUPTION_ERROR_ACTION(M);\ |
3779 | }\ |
3780 | }\ |
3781 | }\ |
3782 | if (XP != 0) {\ |
3783 | tbinptr* H = treebin_at(M, X->index);\ |
3784 | if (X == *H) {\ |
3785 | if ((*H = R) == 0) \ |
3786 | clear_treemap(M, X->index);\ |
3787 | }\ |
3788 | else if (RTCHECK(ok_address(M, XP))) {\ |
3789 | if (XP->child[0] == X) \ |
3790 | XP->child[0] = R;\ |
3791 | else \ |
3792 | XP->child[1] = R;\ |
3793 | }\ |
3794 | else\ |
3795 | CORRUPTION_ERROR_ACTION(M);\ |
3796 | if (R != 0) {\ |
3797 | if (RTCHECK(ok_address(M, R))) {\ |
3798 | tchunkptr C0, C1;\ |
3799 | R->parent = XP;\ |
3800 | if ((C0 = X->child[0]) != 0) {\ |
3801 | if (RTCHECK(ok_address(M, C0))) {\ |
3802 | R->child[0] = C0;\ |
3803 | C0->parent = R;\ |
3804 | }\ |
3805 | else\ |
3806 | CORRUPTION_ERROR_ACTION(M);\ |
3807 | }\ |
3808 | if ((C1 = X->child[1]) != 0) {\ |
3809 | if (RTCHECK(ok_address(M, C1))) {\ |
3810 | R->child[1] = C1;\ |
3811 | C1->parent = R;\ |
3812 | }\ |
3813 | else\ |
3814 | CORRUPTION_ERROR_ACTION(M);\ |
3815 | }\ |
3816 | }\ |
3817 | else\ |
3818 | CORRUPTION_ERROR_ACTION(M);\ |
3819 | }\ |
3820 | }\ |
3821 | } |
3822 | |
3823 | /* Relays to large vs small bin operations */ |
3824 | |
3825 | #define insert_chunk(M, P, S)\ |
3826 | if (is_small(S)) insert_small_chunk(M, P, S)\ |
3827 | else { tchunkptr TP = (tchunkptr)(P); insert_large_chunk(M, TP, S); } |
3828 | |
3829 | #define unlink_chunk(M, P, S)\ |
3830 | if (is_small(S)) unlink_small_chunk(M, P, S)\ |
3831 | else { tchunkptr TP = (tchunkptr)(P); unlink_large_chunk(M, TP); } |
3832 | |
3833 | |
3834 | /* Relays to internal calls to malloc/free from realloc, memalign etc */ |
3835 | |
3836 | #if ONLY_MSPACES |
3837 | #define internal_malloc(m, b) mspace_malloc(m, b) |
3838 | #define internal_free(m, mem) mspace_free(m,mem); |
3839 | #else /* ONLY_MSPACES */ |
3840 | #if MSPACES |
3841 | #define internal_malloc(m, b)\ |
3842 | ((m == gm)? dlmalloc(b) : mspace_malloc(m, b)) |
3843 | #define internal_free(m, mem)\ |
3844 | if (m == gm) dlfree(mem); else mspace_free(m,mem); |
3845 | #else /* MSPACES */ |
3846 | #define internal_malloc(m, b) dlmalloc(b) |
3847 | #define internal_free(m, mem) dlfree(mem) |
3848 | #endif /* MSPACES */ |
3849 | #endif /* ONLY_MSPACES */ |
3850 | |
3851 | /* ----------------------- Direct-mmapping chunks ----------------------- */ |
3852 | |
3853 | /* |
3854 | Directly mmapped chunks are set up with an offset to the start of |
3855 | the mmapped region stored in the prev_foot field of the chunk. This |
3856 | allows reconstruction of the required argument to MUNMAP when freed, |
3857 | and also allows adjustment of the returned chunk to meet alignment |
3858 | requirements (especially in memalign). |
3859 | */ |
3860 | |
3861 | /* Malloc using mmap */ |
3862 | static void* mmap_alloc(mstate m, size_t nb) { |
3863 | size_t mmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
3864 | if (m->footprint_limit != 0) { |
3865 | size_t fp = m->footprint + mmsize; |
3866 | if (fp <= m->footprint || fp > m->footprint_limit) |
3867 | return 0; |
3868 | } |
3869 | if (mmsize > nb) { /* Check for wrap around 0 */ |
3870 | char* mm = (char*)(CALL_DIRECT_MMAP(mmsize)); |
3871 | if (mm != CMFAIL) { |
3872 | size_t offset = align_offset(chunk2mem(mm)); |
3873 | size_t psize = mmsize - offset - MMAP_FOOT_PAD; |
3874 | mchunkptr p = (mchunkptr)(mm + offset); |
3875 | p->prev_foot = offset; |
3876 | p->head = psize; |
3877 | mark_inuse_foot(m, p, psize); |
3878 | chunk_plus_offset(p, psize)->head = FENCEPOST_HEAD; |
3879 | chunk_plus_offset(p, psize+SIZE_T_SIZE)->head = 0; |
3880 | |
3881 | if (m->least_addr == 0 || mm < m->least_addr) |
3882 | m->least_addr = mm; |
3883 | if ((m->footprint += mmsize) > m->max_footprint) |
3884 | m->max_footprint = m->footprint; |
3885 | assert(is_aligned(chunk2mem(p))); |
3886 | check_mmapped_chunk(m, p); |
3887 | return chunk2mem(p); |
3888 | } |
3889 | } |
3890 | return 0; |
3891 | } |
3892 | |
3893 | /* Realloc using mmap */ |
3894 | static mchunkptr mmap_resize(mstate m, mchunkptr oldp, size_t nb, int flags) { |
3895 | size_t oldsize = chunksize(oldp); |
3896 | (void)flags; /* placate people compiling -Wunused */ |
3897 | if (is_small(nb)) /* Can't shrink mmap regions below small size */ |
3898 | return 0; |
3899 | /* Keep old chunk if big enough but not too big */ |
3900 | if (oldsize >= nb + SIZE_T_SIZE && |
3901 | (oldsize - nb) <= (mparams.granularity << 1)) |
3902 | return oldp; |
3903 | else { |
3904 | size_t offset = oldp->prev_foot; |
3905 | size_t oldmmsize = oldsize + offset + MMAP_FOOT_PAD; |
3906 | size_t newmmsize = mmap_align(nb + SIX_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
3907 | char* cp = (char*)CALL_MREMAP((char*)oldp - offset, |
3908 | oldmmsize, newmmsize, flags); |
3909 | if (cp != CMFAIL) { |
3910 | mchunkptr newp = (mchunkptr)(cp + offset); |
3911 | size_t psize = newmmsize - offset - MMAP_FOOT_PAD; |
3912 | newp->head = psize; |
3913 | mark_inuse_foot(m, newp, psize); |
3914 | chunk_plus_offset(newp, psize)->head = FENCEPOST_HEAD; |
3915 | chunk_plus_offset(newp, psize+SIZE_T_SIZE)->head = 0; |
3916 | |
3917 | if (cp < m->least_addr) |
3918 | m->least_addr = cp; |
3919 | if ((m->footprint += newmmsize - oldmmsize) > m->max_footprint) |
3920 | m->max_footprint = m->footprint; |
3921 | check_mmapped_chunk(m, newp); |
3922 | return newp; |
3923 | } |
3924 | } |
3925 | return 0; |
3926 | } |
3927 | |
3928 | |
3929 | /* -------------------------- mspace management -------------------------- */ |
3930 | |
3931 | /* Initialize top chunk and its size */ |
3932 | static void init_top(mstate m, mchunkptr p, size_t psize) { |
3933 | /* Ensure alignment */ |
3934 | size_t offset = align_offset(chunk2mem(p)); |
3935 | p = (mchunkptr)((char*)p + offset); |
3936 | psize -= offset; |
3937 | |
3938 | m->top = p; |
3939 | m->topsize = psize; |
3940 | p->head = psize | PINUSE_BIT; |
3941 | /* set size of fake trailing chunk holding overhead space only once */ |
3942 | chunk_plus_offset(p, psize)->head = TOP_FOOT_SIZE; |
3943 | m->trim_check = mparams.trim_threshold; /* reset on each update */ |
3944 | } |
3945 | |
3946 | /* Initialize bins for a new mstate that is otherwise zeroed out */ |
3947 | static void init_bins(mstate m) { |
3948 | /* Establish circular links for smallbins */ |
3949 | bindex_t i; |
3950 | for (i = 0; i < NSMALLBINS; ++i) { |
3951 | sbinptr bin = smallbin_at(m,i); |
3952 | bin->fd = bin->bk = bin; |
3953 | } |
3954 | } |
3955 | |
3956 | #if PROCEED_ON_ERROR |
3957 | |
3958 | /* default corruption action */ |
3959 | static void reset_on_error(mstate m) { |
3960 | int i; |
3961 | ++malloc_corruption_error_count; |
3962 | /* Reinitialize fields to forget about all memory */ |
3963 | m->smallmap = m->treemap = 0; |
3964 | m->dvsize = m->topsize = 0; |
3965 | m->seg.base = 0; |
3966 | m->seg.size = 0; |
3967 | m->seg.next = 0; |
3968 | m->top = m->dv = 0; |
3969 | for (i = 0; i < NTREEBINS; ++i) |
3970 | *treebin_at(m, i) = 0; |
3971 | init_bins(m); |
3972 | } |
3973 | #endif /* PROCEED_ON_ERROR */ |
3974 | |
3975 | /* Allocate chunk and prepend remainder with chunk in successor base. */ |
3976 | static void* prepend_alloc(mstate m, char* newbase, char* oldbase, |
3977 | size_t nb) { |
3978 | mchunkptr p = align_as_chunk(newbase); |
3979 | mchunkptr oldfirst = align_as_chunk(oldbase); |
3980 | size_t psize = (char*)oldfirst - (char*)p; |
3981 | mchunkptr q = chunk_plus_offset(p, nb); |
3982 | size_t qsize = psize - nb; |
3983 | set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
3984 | |
3985 | assert((char*)oldfirst > (char*)q); |
3986 | assert(pinuse(oldfirst)); |
3987 | assert(qsize >= MIN_CHUNK_SIZE); |
3988 | |
3989 | /* consolidate remainder with first chunk of old base */ |
3990 | if (oldfirst == m->top) { |
3991 | size_t tsize = m->topsize += qsize; |
3992 | m->top = q; |
3993 | q->head = tsize | PINUSE_BIT; |
3994 | check_top_chunk(m, q); |
3995 | } |
3996 | else if (oldfirst == m->dv) { |
3997 | size_t dsize = m->dvsize += qsize; |
3998 | m->dv = q; |
3999 | set_size_and_pinuse_of_free_chunk(q, dsize); |
4000 | } |
4001 | else { |
4002 | if (!is_inuse(oldfirst)) { |
4003 | size_t nsize = chunksize(oldfirst); |
4004 | unlink_chunk(m, oldfirst, nsize); |
4005 | oldfirst = chunk_plus_offset(oldfirst, nsize); |
4006 | qsize += nsize; |
4007 | } |
4008 | set_free_with_pinuse(q, qsize, oldfirst); |
4009 | insert_chunk(m, q, qsize); |
4010 | check_free_chunk(m, q); |
4011 | } |
4012 | |
4013 | check_malloced_chunk(m, chunk2mem(p), nb); |
4014 | return chunk2mem(p); |
4015 | } |
4016 | |
4017 | /* Add a segment to hold a new noncontiguous region */ |
4018 | static void add_segment(mstate m, char* tbase, size_t tsize, flag_t mmapped) { |
4019 | /* Determine locations and sizes of segment, fenceposts, old top */ |
4020 | char* old_top = (char*)m->top; |
4021 | msegmentptr oldsp = segment_holding(m, old_top); |
4022 | char* old_end = oldsp->base + oldsp->size; |
4023 | size_t ssize = pad_request(sizeof(struct malloc_segment)); |
4024 | char* rawsp = old_end - (ssize + FOUR_SIZE_T_SIZES + CHUNK_ALIGN_MASK); |
4025 | size_t offset = align_offset(chunk2mem(rawsp)); |
4026 | char* asp = rawsp + offset; |
4027 | char* csp = (asp < (old_top + MIN_CHUNK_SIZE))? old_top : asp; |
4028 | mchunkptr sp = (mchunkptr)csp; |
4029 | msegmentptr ss = (msegmentptr)(chunk2mem(sp)); |
4030 | mchunkptr tnext = chunk_plus_offset(sp, ssize); |
4031 | mchunkptr p = tnext; |
4032 | int nfences = 0; |
4033 | |
4034 | /* reset top to new space */ |
4035 | init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
4036 | |
4037 | /* Set up segment record */ |
4038 | assert(is_aligned(ss)); |
4039 | set_size_and_pinuse_of_inuse_chunk(m, sp, ssize); |
4040 | *ss = m->seg; /* Push current record */ |
4041 | m->seg.base = tbase; |
4042 | m->seg.size = tsize; |
4043 | m->seg.sflags = mmapped; |
4044 | m->seg.next = ss; |
4045 | |
4046 | /* Insert trailing fenceposts */ |
4047 | for (;;) { |
4048 | mchunkptr nextp = chunk_plus_offset(p, SIZE_T_SIZE); |
4049 | p->head = FENCEPOST_HEAD; |
4050 | ++nfences; |
4051 | if ((char*)(&(nextp->head)) < old_end) |
4052 | p = nextp; |
4053 | else |
4054 | break; |
4055 | } |
4056 | (void)nfences; |
4057 | assert(nfences >= 2); |
4058 | |
4059 | /* Insert the rest of old top into a bin as an ordinary free chunk */ |
4060 | if (csp != old_top) { |
4061 | mchunkptr q = (mchunkptr)old_top; |
4062 | size_t psize = csp - old_top; |
4063 | mchunkptr tn = chunk_plus_offset(q, psize); |
4064 | set_free_with_pinuse(q, psize, tn); |
4065 | insert_chunk(m, q, psize); |
4066 | } |
4067 | |
4068 | check_top_chunk(m, m->top); |
4069 | } |
4070 | |
4071 | /* -------------------------- System allocation -------------------------- */ |
4072 | |
4073 | /* Get memory from system using MORECORE or MMAP */ |
4074 | static void* sys_alloc(mstate m, size_t nb) { |
4075 | char* tbase = CMFAIL; |
4076 | size_t tsize = 0; |
4077 | flag_t mmap_flag = 0; |
4078 | size_t asize; /* allocation size */ |
4079 | |
4080 | ensure_initialization(); |
4081 | |
4082 | /* Directly map large chunks, but only if already initialized */ |
4083 | if (use_mmap(m) && nb >= mparams.mmap_threshold && m->topsize != 0) { |
4084 | void* mem = mmap_alloc(m, nb); |
4085 | if (mem != 0) |
4086 | return mem; |
4087 | } |
4088 | |
4089 | asize = granularity_align(nb + SYS_ALLOC_PADDING); |
4090 | if (asize <= nb) |
4091 | return 0; /* wraparound */ |
4092 | if (m->footprint_limit != 0) { |
4093 | size_t fp = m->footprint + asize; |
4094 | if (fp <= m->footprint || fp > m->footprint_limit) |
4095 | return 0; |
4096 | } |
4097 | |
4098 | /* |
4099 | Try getting memory in any of three ways (in most-preferred to |
4100 | least-preferred order): |
4101 | 1. A call to MORECORE that can normally contiguously extend memory. |
4102 | (disabled if not MORECORE_CONTIGUOUS or not HAVE_MORECORE or |
4103 | or main space is mmapped or a previous contiguous call failed) |
4104 | 2. A call to MMAP new space (disabled if not HAVE_MMAP). |
4105 | Note that under the default settings, if MORECORE is unable to |
4106 | fulfill a request, and HAVE_MMAP is true, then mmap is |
4107 | used as a noncontiguous system allocator. This is a useful backup |
4108 | strategy for systems with holes in address spaces -- in this case |
4109 | sbrk cannot contiguously expand the heap, but mmap may be able to |
4110 | find space. |
4111 | 3. A call to MORECORE that cannot usually contiguously extend memory. |
4112 | (disabled if not HAVE_MORECORE) |
4113 | |
4114 | In all cases, we need to request enough bytes from system to ensure |
4115 | we can malloc nb bytes upon success, so pad with enough space for |
4116 | top_foot, plus alignment-pad to make sure we don't lose bytes if |
4117 | not on boundary, and round this up to a granularity unit. |
4118 | */ |
4119 | |
4120 | if (MORECORE_CONTIGUOUS && !use_noncontiguous(m)) { |
4121 | char* br = CMFAIL; |
4122 | size_t ssize = asize; /* sbrk call size */ |
4123 | msegmentptr ss = (m->top == 0)? 0 : segment_holding(m, (char*)m->top); |
4124 | ACQUIRE_MALLOC_GLOBAL_LOCK(); |
4125 | |
4126 | if (ss == 0) { /* First time through or recovery */ |
4127 | char* base = (char*)CALL_MORECORE(0); |
4128 | if (base != CMFAIL) { |
4129 | size_t fp; |
4130 | /* Adjust to end on a page boundary */ |
4131 | if (!is_page_aligned(base)) |
4132 | ssize += (page_align((size_t)base) - (size_t)base); |
4133 | fp = m->footprint + ssize; /* recheck limits */ |
4134 | if (ssize > nb && ssize < HALF_MAX_SIZE_T && |
4135 | (m->footprint_limit == 0 || |
4136 | (fp > m->footprint && fp <= m->footprint_limit)) && |
4137 | (br = (char*)(CALL_MORECORE(ssize))) == base) { |
4138 | tbase = base; |
4139 | tsize = ssize; |
4140 | } |
4141 | } |
4142 | } |
4143 | else { |
4144 | /* Subtract out existing available top space from MORECORE request. */ |
4145 | ssize = granularity_align(nb - m->topsize + SYS_ALLOC_PADDING); |
4146 | /* Use mem here only if it did continuously extend old space */ |
4147 | if (ssize < HALF_MAX_SIZE_T && |
4148 | (br = (char*)(CALL_MORECORE(ssize))) == ss->base+ss->size) { |
4149 | tbase = br; |
4150 | tsize = ssize; |
4151 | } |
4152 | } |
4153 | |
4154 | if (tbase == CMFAIL) { /* Cope with partial failure */ |
4155 | if (br != CMFAIL) { /* Try to use/extend the space we did get */ |
4156 | if (ssize < HALF_MAX_SIZE_T && |
4157 | ssize < nb + SYS_ALLOC_PADDING) { |
4158 | size_t esize = granularity_align(nb + SYS_ALLOC_PADDING - ssize); |
4159 | if (esize < HALF_MAX_SIZE_T) { |
4160 | char* end = (char*)CALL_MORECORE(esize); |
4161 | if (end != CMFAIL) |
4162 | ssize += esize; |
4163 | else { /* Can't use; try to release */ |
4164 | (void) CALL_MORECORE(-ssize); |
4165 | br = CMFAIL; |
4166 | } |
4167 | } |
4168 | } |
4169 | } |
4170 | if (br != CMFAIL) { /* Use the space we did get */ |
4171 | tbase = br; |
4172 | tsize = ssize; |
4173 | } |
4174 | else |
4175 | disable_contiguous(m); /* Don't try contiguous path in the future */ |
4176 | } |
4177 | |
4178 | RELEASE_MALLOC_GLOBAL_LOCK(); |
4179 | } |
4180 | |
4181 | if (HAVE_MMAP && tbase == CMFAIL) { /* Try MMAP */ |
4182 | char* mp = (char*)(CALL_MMAP(asize)); |
4183 | if (mp != CMFAIL) { |
4184 | tbase = mp; |
4185 | tsize = asize; |
4186 | mmap_flag = USE_MMAP_BIT; |
4187 | } |
4188 | } |
4189 | |
4190 | if (HAVE_MORECORE && tbase == CMFAIL) { /* Try noncontiguous MORECORE */ |
4191 | if (asize < HALF_MAX_SIZE_T) { |
4192 | char* br = CMFAIL; |
4193 | char* end = CMFAIL; |
4194 | ACQUIRE_MALLOC_GLOBAL_LOCK(); |
4195 | br = (char*)(CALL_MORECORE(asize)); |
4196 | end = (char*)(CALL_MORECORE(0)); |
4197 | RELEASE_MALLOC_GLOBAL_LOCK(); |
4198 | if (br != CMFAIL && end != CMFAIL && br < end) { |
4199 | size_t ssize = end - br; |
4200 | if (ssize > nb + TOP_FOOT_SIZE) { |
4201 | tbase = br; |
4202 | tsize = ssize; |
4203 | } |
4204 | } |
4205 | } |
4206 | } |
4207 | |
4208 | if (tbase != CMFAIL) { |
4209 | |
4210 | if ((m->footprint += tsize) > m->max_footprint) |
4211 | m->max_footprint = m->footprint; |
4212 | |
4213 | if (!is_initialized(m)) { /* first-time initialization */ |
4214 | if (m->least_addr == 0 || tbase < m->least_addr) |
4215 | m->least_addr = tbase; |
4216 | m->seg.base = tbase; |
4217 | m->seg.size = tsize; |
4218 | m->seg.sflags = mmap_flag; |
4219 | m->magic = mparams.magic; |
4220 | m->release_checks = MAX_RELEASE_CHECK_RATE; |
4221 | init_bins(m); |
4222 | #if !ONLY_MSPACES |
4223 | if (is_global(m)) |
4224 | init_top(m, (mchunkptr)tbase, tsize - TOP_FOOT_SIZE); |
4225 | else |
4226 | #endif |
4227 | { |
4228 | /* Offset top by embedded malloc_state */ |
4229 | mchunkptr mn = next_chunk(mem2chunk(m)); |
4230 | init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) -TOP_FOOT_SIZE); |
4231 | } |
4232 | } |
4233 | |
4234 | else { |
4235 | /* Try to merge with an existing segment */ |
4236 | msegmentptr sp = &m->seg; |
4237 | /* Only consider most recent segment if traversal suppressed */ |
4238 | while (sp != 0 && tbase != sp->base + sp->size) |
4239 | sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
4240 | if (sp != 0 && |
4241 | !is_extern_segment(sp) && |
4242 | (sp->sflags & USE_MMAP_BIT) == mmap_flag && |
4243 | segment_holds(sp, m->top)) { /* append */ |
4244 | sp->size += tsize; |
4245 | init_top(m, m->top, m->topsize + tsize); |
4246 | } |
4247 | else { |
4248 | if (tbase < m->least_addr) |
4249 | m->least_addr = tbase; |
4250 | sp = &m->seg; |
4251 | while (sp != 0 && sp->base != tbase + tsize) |
4252 | sp = (NO_SEGMENT_TRAVERSAL) ? 0 : sp->next; |
4253 | if (sp != 0 && |
4254 | !is_extern_segment(sp) && |
4255 | (sp->sflags & USE_MMAP_BIT) == mmap_flag) { |
4256 | char* oldbase = sp->base; |
4257 | sp->base = tbase; |
4258 | sp->size += tsize; |
4259 | return prepend_alloc(m, tbase, oldbase, nb); |
4260 | } |
4261 | else |
4262 | add_segment(m, tbase, tsize, mmap_flag); |
4263 | } |
4264 | } |
4265 | |
4266 | if (nb < m->topsize) { /* Allocate from new or extended top space */ |
4267 | size_t rsize = m->topsize -= nb; |
4268 | mchunkptr p = m->top; |
4269 | mchunkptr r = m->top = chunk_plus_offset(p, nb); |
4270 | r->head = rsize | PINUSE_BIT; |
4271 | set_size_and_pinuse_of_inuse_chunk(m, p, nb); |
4272 | check_top_chunk(m, m->top); |
4273 | check_malloced_chunk(m, chunk2mem(p), nb); |
4274 | return chunk2mem(p); |
4275 | } |
4276 | } |
4277 | |
4278 | MALLOC_FAILURE_ACTION; |
4279 | return 0; |
4280 | } |
4281 | |
4282 | /* ----------------------- system deallocation -------------------------- */ |
4283 | |
4284 | /* Unmap and unlink any mmapped segments that don't contain used chunks */ |
4285 | static size_t release_unused_segments(mstate m) { |
4286 | size_t released = 0; |
4287 | int nsegs = 0; |
4288 | msegmentptr pred = &m->seg; |
4289 | msegmentptr sp = pred->next; |
4290 | while (sp != 0) { |
4291 | char* base = sp->base; |
4292 | size_t size = sp->size; |
4293 | msegmentptr next = sp->next; |
4294 | ++nsegs; |
4295 | if (is_mmapped_segment(sp) && !is_extern_segment(sp)) { |
4296 | mchunkptr p = align_as_chunk(base); |
4297 | size_t psize = chunksize(p); |
4298 | /* Can unmap if first chunk holds entire segment and not pinned */ |
4299 | if (!is_inuse(p) && (char*)p + psize >= base + size - TOP_FOOT_SIZE) { |
4300 | tchunkptr tp = (tchunkptr)p; |
4301 | assert(segment_holds(sp, (char*)sp)); |
4302 | if (p == m->dv) { |
4303 | m->dv = 0; |
4304 | m->dvsize = 0; |
4305 | } |
4306 | else { |
4307 | unlink_large_chunk(m, tp); |
4308 | } |
4309 | if (CALL_MUNMAP(base, size) == 0) { |
4310 | released += size; |
4311 | m->footprint -= size; |
4312 | /* unlink obsoleted record */ |
4313 | sp = pred; |
4314 | sp->next = next; |
4315 | } |
4316 | else { /* back out if cannot unmap */ |
4317 | insert_large_chunk(m, tp, psize); |
4318 | } |
4319 | } |
4320 | } |
4321 | if (NO_SEGMENT_TRAVERSAL) /* scan only first segment */ |
4322 | break; |
4323 | pred = sp; |
4324 | sp = next; |
4325 | } |
4326 | /* Reset check counter */ |
4327 | m->release_checks = (((size_t) nsegs > (size_t) MAX_RELEASE_CHECK_RATE)? |
4328 | (size_t) nsegs : (size_t) MAX_RELEASE_CHECK_RATE); |
4329 | return released; |
4330 | } |
4331 | |
4332 | static int sys_trim(mstate m, size_t pad) { |
4333 | size_t released = 0; |
4334 | ensure_initialization(); |
4335 | if (pad < MAX_REQUEST && is_initialized(m)) { |
4336 | pad += TOP_FOOT_SIZE; /* ensure enough room for segment overhead */ |
4337 | |
4338 | if (m->topsize > pad) { |
4339 | /* Shrink top space in granularity-size units, keeping at least one */ |
4340 | size_t unit = mparams.granularity; |
4341 | size_t extra = ((m->topsize - pad + (unit - SIZE_T_ONE)) / unit - |
4342 | SIZE_T_ONE) * unit; |
4343 | msegmentptr sp = segment_holding(m, (char*)m->top); |
4344 | |
4345 | if (!is_extern_segment(sp)) { |
4346 | if (is_mmapped_segment(sp)) { |
4347 | if (HAVE_MMAP && |
4348 | sp->size >= extra && |
4349 | !has_segment_link(m, sp)) { /* can't shrink if pinned */ |
4350 | size_t newsize = sp->size - extra; |
4351 | (void)newsize; /* placate people compiling -Wunused-variable */ |
4352 | /* Prefer mremap, fall back to munmap */ |
4353 | if ((CALL_MREMAP(sp->base, sp->size, newsize, 0) != MFAIL) || |
4354 | (CALL_MUNMAP(sp->base + newsize, extra) == 0)) { |
4355 | released = extra; |
4356 | } |
4357 | } |
4358 | } |
4359 | else if (HAVE_MORECORE) { |
4360 | if (extra >= HALF_MAX_SIZE_T) /* Avoid wrapping negative */ |
4361 | extra = (HALF_MAX_SIZE_T) + SIZE_T_ONE - unit; |
4362 | ACQUIRE_MALLOC_GLOBAL_LOCK(); |
4363 | { |
4364 | /* Make sure end of memory is where we last set it. */ |
4365 | char* old_br = (char*)(CALL_MORECORE(0)); |
4366 | if (old_br == sp->base + sp->size) { |
4367 | char* rel_br = (char*)(CALL_MORECORE(-extra)); |
4368 | char* new_br = (char*)(CALL_MORECORE(0)); |
4369 | if (rel_br != CMFAIL && new_br < old_br) |
4370 | released = old_br - new_br; |
4371 | } |
4372 | } |
4373 | RELEASE_MALLOC_GLOBAL_LOCK(); |
4374 | } |
4375 | } |
4376 | |
4377 | if (released != 0) { |
4378 | sp->size -= released; |
4379 | m->footprint -= released; |
4380 | init_top(m, m->top, m->topsize - released); |
4381 | check_top_chunk(m, m->top); |
4382 | } |
4383 | } |
4384 | |
4385 | /* Unmap any unused mmapped segments */ |
4386 | if (HAVE_MMAP) |
4387 | released += release_unused_segments(m); |
4388 | |
4389 | /* On failure, disable autotrim to avoid repeated failed future calls */ |
4390 | if (released == 0 && m->topsize > m->trim_check) |
4391 | m->trim_check = MAX_SIZE_T; |
4392 | } |
4393 | |
4394 | return (released != 0)? 1 : 0; |
4395 | } |
4396 | |
4397 | /* Consolidate and bin a chunk. Differs from exported versions |
4398 | of free mainly in that the chunk need not be marked as inuse. |
4399 | */ |
4400 | static void dispose_chunk(mstate m, mchunkptr p, size_t psize) { |
4401 | mchunkptr next = chunk_plus_offset(p, psize); |
4402 | if (!pinuse(p)) { |
4403 | mchunkptr prev; |
4404 | size_t prevsize = p->prev_foot; |
4405 | if (is_mmapped(p)) { |
4406 | psize += prevsize + MMAP_FOOT_PAD; |
4407 | if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
4408 | m->footprint -= psize; |
4409 | return; |
4410 | } |
4411 | prev = chunk_minus_offset(p, prevsize); |
4412 | psize += prevsize; |
4413 | p = prev; |
4414 | if (RTCHECK(ok_address(m, prev))) { /* consolidate backward */ |
4415 | if (p != m->dv) { |
4416 | unlink_chunk(m, p, prevsize); |
4417 | } |
4418 | else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
4419 | m->dvsize = psize; |
4420 | set_free_with_pinuse(p, psize, next); |
4421 | return; |
4422 | } |
4423 | } |
4424 | else { |
4425 | CORRUPTION_ERROR_ACTION(m); |
4426 | return; |
4427 | } |
4428 | } |
4429 | if (RTCHECK(ok_address(m, next))) { |
4430 | if (!cinuse(next)) { /* consolidate forward */ |
4431 | if (next == m->top) { |
4432 | size_t tsize = m->topsize += psize; |
4433 | m->top = p; |
4434 | p->head = tsize | PINUSE_BIT; |
4435 | if (p == m->dv) { |
4436 | m->dv = 0; |
4437 | m->dvsize = 0; |
4438 | } |
4439 | return; |
4440 | } |
4441 | else if (next == m->dv) { |
4442 | size_t dsize = m->dvsize += psize; |
4443 | m->dv = p; |
4444 | set_size_and_pinuse_of_free_chunk(p, dsize); |
4445 | return; |
4446 | } |
4447 | else { |
4448 | size_t nsize = chunksize(next); |
4449 | psize += nsize; |
4450 | unlink_chunk(m, next, nsize); |
4451 | set_size_and_pinuse_of_free_chunk(p, psize); |
4452 | if (p == m->dv) { |
4453 | m->dvsize = psize; |
4454 | return; |
4455 | } |
4456 | } |
4457 | } |
4458 | else { |
4459 | set_free_with_pinuse(p, psize, next); |
4460 | } |
4461 | insert_chunk(m, p, psize); |
4462 | } |
4463 | else { |
4464 | CORRUPTION_ERROR_ACTION(m); |
4465 | } |
4466 | } |
4467 | |
4468 | /* ---------------------------- malloc --------------------------- */ |
4469 | |
4470 | /* allocate a large request from the best fitting chunk in a treebin */ |
4471 | static void* tmalloc_large(mstate m, size_t nb) { |
4472 | tchunkptr v = 0; |
4473 | size_t rsize = -nb; /* Unsigned negation */ |
4474 | tchunkptr t; |
4475 | bindex_t idx; |
4476 | compute_tree_index(nb, idx); |
4477 | if ((t = *treebin_at(m, idx)) != 0) { |
4478 | /* Traverse tree for this bin looking for node with size == nb */ |
4479 | size_t sizebits = nb << leftshift_for_tree_index(idx); |
4480 | tchunkptr rst = 0; /* The deepest untaken right subtree */ |
4481 | for (;;) { |
4482 | tchunkptr rt; |
4483 | size_t trem = chunksize(t) - nb; |
4484 | if (trem < rsize) { |
4485 | v = t; |
4486 | if ((rsize = trem) == 0) |
4487 | break; |
4488 | } |
4489 | rt = t->child[1]; |
4490 | t = t->child[(sizebits >> (SIZE_T_BITSIZE-SIZE_T_ONE)) & 1]; |
4491 | if (rt != 0 && rt != t) |
4492 | rst = rt; |
4493 | if (t == 0) { |
4494 | t = rst; /* set t to least subtree holding sizes > nb */ |
4495 | break; |
4496 | } |
4497 | sizebits <<= 1; |
4498 | } |
4499 | } |
4500 | if (t == 0 && v == 0) { /* set t to root of next non-empty treebin */ |
4501 | binmap_t leftbits = left_bits(idx2bit(idx)) & m->treemap; |
4502 | if (leftbits != 0) { |
4503 | bindex_t i; |
4504 | binmap_t leastbit = least_bit(leftbits); |
4505 | compute_bit2idx(leastbit, i); |
4506 | t = *treebin_at(m, i); |
4507 | } |
4508 | } |
4509 | |
4510 | while (t != 0) { /* find smallest of tree or subtree */ |
4511 | size_t trem = chunksize(t) - nb; |
4512 | if (trem < rsize) { |
4513 | rsize = trem; |
4514 | v = t; |
4515 | } |
4516 | t = leftmost_child(t); |
4517 | } |
4518 | |
4519 | /* If dv is a better fit, return 0 so malloc will use it */ |
4520 | if (v != 0 && rsize < (size_t)(m->dvsize - nb)) { |
4521 | if (RTCHECK(ok_address(m, v))) { /* split */ |
4522 | mchunkptr r = chunk_plus_offset(v, nb); |
4523 | assert(chunksize(v) == rsize + nb); |
4524 | if (RTCHECK(ok_next(v, r))) { |
4525 | unlink_large_chunk(m, v); |
4526 | if (rsize < MIN_CHUNK_SIZE) |
4527 | set_inuse_and_pinuse(m, v, (rsize + nb)); |
4528 | else { |
4529 | set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
4530 | set_size_and_pinuse_of_free_chunk(r, rsize); |
4531 | insert_chunk(m, r, rsize); |
4532 | } |
4533 | return chunk2mem(v); |
4534 | } |
4535 | } |
4536 | CORRUPTION_ERROR_ACTION(m); |
4537 | } |
4538 | return 0; |
4539 | } |
4540 | |
4541 | /* allocate a small request from the best fitting chunk in a treebin */ |
4542 | static void* tmalloc_small(mstate m, size_t nb) { |
4543 | tchunkptr t, v; |
4544 | size_t rsize; |
4545 | bindex_t i; |
4546 | binmap_t leastbit = least_bit(m->treemap); |
4547 | compute_bit2idx(leastbit, i); |
4548 | v = t = *treebin_at(m, i); |
4549 | rsize = chunksize(t) - nb; |
4550 | |
4551 | while ((t = leftmost_child(t)) != 0) { |
4552 | size_t trem = chunksize(t) - nb; |
4553 | if (trem < rsize) { |
4554 | rsize = trem; |
4555 | v = t; |
4556 | } |
4557 | } |
4558 | |
4559 | if (RTCHECK(ok_address(m, v))) { |
4560 | mchunkptr r = chunk_plus_offset(v, nb); |
4561 | assert(chunksize(v) == rsize + nb); |
4562 | if (RTCHECK(ok_next(v, r))) { |
4563 | unlink_large_chunk(m, v); |
4564 | if (rsize < MIN_CHUNK_SIZE) |
4565 | set_inuse_and_pinuse(m, v, (rsize + nb)); |
4566 | else { |
4567 | set_size_and_pinuse_of_inuse_chunk(m, v, nb); |
4568 | set_size_and_pinuse_of_free_chunk(r, rsize); |
4569 | replace_dv(m, r, rsize); |
4570 | } |
4571 | return chunk2mem(v); |
4572 | } |
4573 | } |
4574 | |
4575 | CORRUPTION_ERROR_ACTION(m); |
4576 | return 0; |
4577 | } |
4578 | |
4579 | #if !ONLY_MSPACES |
4580 | |
4581 | void* dlmalloc(size_t bytes) { |
4582 | /* |
4583 | Basic algorithm: |
4584 | If a small request (< 256 bytes minus per-chunk overhead): |
4585 | 1. If one exists, use a remainderless chunk in associated smallbin. |
4586 | (Remainderless means that there are too few excess bytes to |
4587 | represent as a chunk.) |
4588 | 2. If it is big enough, use the dv chunk, which is normally the |
4589 | chunk adjacent to the one used for the most recent small request. |
4590 | 3. If one exists, split the smallest available chunk in a bin, |
4591 | saving remainder in dv. |
4592 | 4. If it is big enough, use the top chunk. |
4593 | 5. If available, get memory from system and use it |
4594 | Otherwise, for a large request: |
4595 | 1. Find the smallest available binned chunk that fits, and use it |
4596 | if it is better fitting than dv chunk, splitting if necessary. |
4597 | 2. If better fitting than any binned chunk, use the dv chunk. |
4598 | 3. If it is big enough, use the top chunk. |
4599 | 4. If request size >= mmap threshold, try to directly mmap this chunk. |
4600 | 5. If available, get memory from system and use it |
4601 | |
4602 | The ugly goto's here ensure that postaction occurs along all paths. |
4603 | */ |
4604 | |
4605 | #if USE_LOCKS |
4606 | ensure_initialization(); /* initialize in sys_alloc if not using locks */ |
4607 | #endif |
4608 | |
4609 | if (!PREACTION(gm)) { |
4610 | void* mem; |
4611 | size_t nb; |
4612 | if (bytes <= MAX_SMALL_REQUEST) { |
4613 | bindex_t idx; |
4614 | binmap_t smallbits; |
4615 | nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
4616 | idx = small_index(nb); |
4617 | smallbits = gm->smallmap >> idx; |
4618 | |
4619 | if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
4620 | mchunkptr b, p; |
4621 | idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
4622 | b = smallbin_at(gm, idx); |
4623 | p = b->fd; |
4624 | assert(chunksize(p) == small_index2size(idx)); |
4625 | unlink_first_small_chunk(gm, b, p, idx); |
4626 | set_inuse_and_pinuse(gm, p, small_index2size(idx)); |
4627 | mem = chunk2mem(p); |
4628 | check_malloced_chunk(gm, mem, nb); |
4629 | goto postaction; |
4630 | } |
4631 | |
4632 | else if (nb > gm->dvsize) { |
4633 | if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
4634 | mchunkptr b, p, r; |
4635 | size_t rsize; |
4636 | bindex_t i; |
4637 | binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
4638 | binmap_t leastbit = least_bit(leftbits); |
4639 | compute_bit2idx(leastbit, i); |
4640 | b = smallbin_at(gm, i); |
4641 | p = b->fd; |
4642 | assert(chunksize(p) == small_index2size(i)); |
4643 | unlink_first_small_chunk(gm, b, p, i); |
4644 | rsize = small_index2size(i) - nb; |
4645 | /* Fit here cannot be remainderless if 4byte sizes */ |
4646 | if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
4647 | set_inuse_and_pinuse(gm, p, small_index2size(i)); |
4648 | else { |
4649 | set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
4650 | r = chunk_plus_offset(p, nb); |
4651 | set_size_and_pinuse_of_free_chunk(r, rsize); |
4652 | replace_dv(gm, r, rsize); |
4653 | } |
4654 | mem = chunk2mem(p); |
4655 | check_malloced_chunk(gm, mem, nb); |
4656 | goto postaction; |
4657 | } |
4658 | |
4659 | else if (gm->treemap != 0 && (mem = tmalloc_small(gm, nb)) != 0) { |
4660 | check_malloced_chunk(gm, mem, nb); |
4661 | goto postaction; |
4662 | } |
4663 | } |
4664 | } |
4665 | else if (bytes >= MAX_REQUEST) |
4666 | nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
4667 | else { |
4668 | nb = pad_request(bytes); |
4669 | if (gm->treemap != 0 && (mem = tmalloc_large(gm, nb)) != 0) { |
4670 | check_malloced_chunk(gm, mem, nb); |
4671 | goto postaction; |
4672 | } |
4673 | } |
4674 | |
4675 | if (nb <= gm->dvsize) { |
4676 | size_t rsize = gm->dvsize - nb; |
4677 | mchunkptr p = gm->dv; |
4678 | if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
4679 | mchunkptr r = gm->dv = chunk_plus_offset(p, nb); |
4680 | gm->dvsize = rsize; |
4681 | set_size_and_pinuse_of_free_chunk(r, rsize); |
4682 | set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
4683 | } |
4684 | else { /* exhaust dv */ |
4685 | size_t dvs = gm->dvsize; |
4686 | gm->dvsize = 0; |
4687 | gm->dv = 0; |
4688 | set_inuse_and_pinuse(gm, p, dvs); |
4689 | } |
4690 | mem = chunk2mem(p); |
4691 | check_malloced_chunk(gm, mem, nb); |
4692 | goto postaction; |
4693 | } |
4694 | |
4695 | else if (nb < gm->topsize) { /* Split top */ |
4696 | size_t rsize = gm->topsize -= nb; |
4697 | mchunkptr p = gm->top; |
4698 | mchunkptr r = gm->top = chunk_plus_offset(p, nb); |
4699 | r->head = rsize | PINUSE_BIT; |
4700 | set_size_and_pinuse_of_inuse_chunk(gm, p, nb); |
4701 | mem = chunk2mem(p); |
4702 | check_top_chunk(gm, gm->top); |
4703 | check_malloced_chunk(gm, mem, nb); |
4704 | goto postaction; |
4705 | } |
4706 | |
4707 | mem = sys_alloc(gm, nb); |
4708 | |
4709 | postaction: |
4710 | POSTACTION(gm); |
4711 | return mem; |
4712 | } |
4713 | |
4714 | return 0; |
4715 | } |
4716 | |
4717 | /* ---------------------------- free --------------------------- */ |
4718 | |
4719 | void dlfree(void* mem) { |
4720 | /* |
4721 | Consolidate freed chunks with preceeding or succeeding bordering |
4722 | free chunks, if they exist, and then place in a bin. Intermixed |
4723 | with special cases for top, dv, mmapped chunks, and usage errors. |
4724 | */ |
4725 | |
4726 | if (mem != 0) { |
4727 | mchunkptr p = mem2chunk(mem); |
4728 | #if FOOTERS |
4729 | mstate fm = get_mstate_for(p); |
4730 | if (!ok_magic(fm)) { |
4731 | USAGE_ERROR_ACTION(fm, p); |
4732 | return; |
4733 | } |
4734 | #else /* FOOTERS */ |
4735 | #define fm gm |
4736 | #endif /* FOOTERS */ |
4737 | if (!PREACTION(fm)) { |
4738 | check_inuse_chunk(fm, p); |
4739 | if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { |
4740 | size_t psize = chunksize(p); |
4741 | mchunkptr next = chunk_plus_offset(p, psize); |
4742 | if (!pinuse(p)) { |
4743 | size_t prevsize = p->prev_foot; |
4744 | if (is_mmapped(p)) { |
4745 | psize += prevsize + MMAP_FOOT_PAD; |
4746 | if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
4747 | fm->footprint -= psize; |
4748 | goto postaction; |
4749 | } |
4750 | else { |
4751 | mchunkptr prev = chunk_minus_offset(p, prevsize); |
4752 | psize += prevsize; |
4753 | p = prev; |
4754 | if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ |
4755 | if (p != fm->dv) { |
4756 | unlink_chunk(fm, p, prevsize); |
4757 | } |
4758 | else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
4759 | fm->dvsize = psize; |
4760 | set_free_with_pinuse(p, psize, next); |
4761 | goto postaction; |
4762 | } |
4763 | } |
4764 | else |
4765 | goto erroraction; |
4766 | } |
4767 | } |
4768 | |
4769 | if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { |
4770 | if (!cinuse(next)) { /* consolidate forward */ |
4771 | if (next == fm->top) { |
4772 | size_t tsize = fm->topsize += psize; |
4773 | fm->top = p; |
4774 | p->head = tsize | PINUSE_BIT; |
4775 | if (p == fm->dv) { |
4776 | fm->dv = 0; |
4777 | fm->dvsize = 0; |
4778 | } |
4779 | if (should_trim(fm, tsize)) |
4780 | sys_trim(fm, 0); |
4781 | goto postaction; |
4782 | } |
4783 | else if (next == fm->dv) { |
4784 | size_t dsize = fm->dvsize += psize; |
4785 | fm->dv = p; |
4786 | set_size_and_pinuse_of_free_chunk(p, dsize); |
4787 | goto postaction; |
4788 | } |
4789 | else { |
4790 | size_t nsize = chunksize(next); |
4791 | psize += nsize; |
4792 | unlink_chunk(fm, next, nsize); |
4793 | set_size_and_pinuse_of_free_chunk(p, psize); |
4794 | if (p == fm->dv) { |
4795 | fm->dvsize = psize; |
4796 | goto postaction; |
4797 | } |
4798 | } |
4799 | } |
4800 | else |
4801 | set_free_with_pinuse(p, psize, next); |
4802 | |
4803 | if (is_small(psize)) { |
4804 | insert_small_chunk(fm, p, psize); |
4805 | check_free_chunk(fm, p); |
4806 | } |
4807 | else { |
4808 | tchunkptr tp = (tchunkptr)p; |
4809 | insert_large_chunk(fm, tp, psize); |
4810 | check_free_chunk(fm, p); |
4811 | if (--fm->release_checks == 0) |
4812 | release_unused_segments(fm); |
4813 | } |
4814 | goto postaction; |
4815 | } |
4816 | } |
4817 | erroraction: |
4818 | USAGE_ERROR_ACTION(fm, p); |
4819 | postaction: |
4820 | POSTACTION(fm); |
4821 | } |
4822 | } |
4823 | #if !FOOTERS |
4824 | #undef fm |
4825 | #endif /* FOOTERS */ |
4826 | } |
4827 | |
4828 | void* dlcalloc(size_t n_elements, size_t elem_size) { |
4829 | void* mem; |
4830 | size_t req = 0; |
4831 | if (n_elements != 0) { |
4832 | req = n_elements * elem_size; |
4833 | if (((n_elements | elem_size) & ~(size_t)0xffff) && |
4834 | (req / n_elements != elem_size)) |
4835 | req = MAX_SIZE_T; /* force downstream failure on overflow */ |
4836 | } |
4837 | mem = dlmalloc(req); |
4838 | if (mem != 0 && calloc_must_clear(mem2chunk(mem))) |
4839 | memset(mem, 0, req); |
4840 | return mem; |
4841 | } |
4842 | |
4843 | #endif /* !ONLY_MSPACES */ |
4844 | |
4845 | /* ------------ Internal support for realloc, memalign, etc -------------- */ |
4846 | |
4847 | /* Try to realloc; only in-place unless can_move true */ |
4848 | static mchunkptr try_realloc_chunk(mstate m, mchunkptr p, size_t nb, |
4849 | int can_move) { |
4850 | mchunkptr newp = 0; |
4851 | size_t oldsize = chunksize(p); |
4852 | mchunkptr next = chunk_plus_offset(p, oldsize); |
4853 | if (RTCHECK(ok_address(m, p) && ok_inuse(p) && |
4854 | ok_next(p, next) && ok_pinuse(next))) { |
4855 | if (is_mmapped(p)) { |
4856 | newp = mmap_resize(m, p, nb, can_move); |
4857 | } |
4858 | else if (oldsize >= nb) { /* already big enough */ |
4859 | size_t rsize = oldsize - nb; |
4860 | if (rsize >= MIN_CHUNK_SIZE) { /* split off remainder */ |
4861 | mchunkptr r = chunk_plus_offset(p, nb); |
4862 | set_inuse(m, p, nb); |
4863 | set_inuse(m, r, rsize); |
4864 | dispose_chunk(m, r, rsize); |
4865 | } |
4866 | newp = p; |
4867 | } |
4868 | else if (next == m->top) { /* extend into top */ |
4869 | if (oldsize + m->topsize > nb) { |
4870 | size_t newsize = oldsize + m->topsize; |
4871 | size_t newtopsize = newsize - nb; |
4872 | mchunkptr newtop = chunk_plus_offset(p, nb); |
4873 | set_inuse(m, p, nb); |
4874 | newtop->head = newtopsize |PINUSE_BIT; |
4875 | m->top = newtop; |
4876 | m->topsize = newtopsize; |
4877 | newp = p; |
4878 | } |
4879 | } |
4880 | else if (next == m->dv) { /* extend into dv */ |
4881 | size_t dvs = m->dvsize; |
4882 | if (oldsize + dvs >= nb) { |
4883 | size_t dsize = oldsize + dvs - nb; |
4884 | if (dsize >= MIN_CHUNK_SIZE) { |
4885 | mchunkptr r = chunk_plus_offset(p, nb); |
4886 | mchunkptr n = chunk_plus_offset(r, dsize); |
4887 | set_inuse(m, p, nb); |
4888 | set_size_and_pinuse_of_free_chunk(r, dsize); |
4889 | clear_pinuse(n); |
4890 | m->dvsize = dsize; |
4891 | m->dv = r; |
4892 | } |
4893 | else { /* exhaust dv */ |
4894 | size_t newsize = oldsize + dvs; |
4895 | set_inuse(m, p, newsize); |
4896 | m->dvsize = 0; |
4897 | m->dv = 0; |
4898 | } |
4899 | newp = p; |
4900 | } |
4901 | } |
4902 | else if (!cinuse(next)) { /* extend into next free chunk */ |
4903 | size_t nextsize = chunksize(next); |
4904 | if (oldsize + nextsize >= nb) { |
4905 | size_t rsize = oldsize + nextsize - nb; |
4906 | unlink_chunk(m, next, nextsize); |
4907 | if (rsize < MIN_CHUNK_SIZE) { |
4908 | size_t newsize = oldsize + nextsize; |
4909 | set_inuse(m, p, newsize); |
4910 | } |
4911 | else { |
4912 | mchunkptr r = chunk_plus_offset(p, nb); |
4913 | set_inuse(m, p, nb); |
4914 | set_inuse(m, r, rsize); |
4915 | dispose_chunk(m, r, rsize); |
4916 | } |
4917 | newp = p; |
4918 | } |
4919 | } |
4920 | } |
4921 | else { |
4922 | USAGE_ERROR_ACTION(m, chunk2mem(p)); |
4923 | } |
4924 | return newp; |
4925 | } |
4926 | |
4927 | static void* internal_memalign(mstate m, size_t alignment, size_t bytes) { |
4928 | void* mem = 0; |
4929 | if (alignment < MIN_CHUNK_SIZE) /* must be at least a minimum chunk size */ |
4930 | alignment = MIN_CHUNK_SIZE; |
4931 | if ((alignment & (alignment-SIZE_T_ONE)) != 0) {/* Ensure a power of 2 */ |
4932 | size_t a = MALLOC_ALIGNMENT << 1; |
4933 | while (a < alignment) a <<= 1; |
4934 | alignment = a; |
4935 | } |
4936 | if (bytes >= MAX_REQUEST - alignment) { |
4937 | if (m != 0) { /* Test isn't needed but avoids compiler warning */ |
4938 | MALLOC_FAILURE_ACTION; |
4939 | } |
4940 | } |
4941 | else { |
4942 | size_t nb = request2size(bytes); |
4943 | size_t req = nb + alignment + MIN_CHUNK_SIZE - CHUNK_OVERHEAD; |
4944 | mem = internal_malloc(m, req); |
4945 | if (mem != 0) { |
4946 | mchunkptr p = mem2chunk(mem); |
4947 | if (PREACTION(m)) |
4948 | return 0; |
4949 | if ((((size_t)(mem)) & (alignment - 1)) != 0) { /* misaligned */ |
4950 | /* |
4951 | Find an aligned spot inside chunk. Since we need to give |
4952 | back leading space in a chunk of at least MIN_CHUNK_SIZE, if |
4953 | the first calculation places us at a spot with less than |
4954 | MIN_CHUNK_SIZE leader, we can move to the next aligned spot. |
4955 | We've allocated enough total room so that this is always |
4956 | possible. |
4957 | */ |
4958 | char* br = (char*)mem2chunk((size_t)(((size_t)((char*)mem + alignment - |
4959 | SIZE_T_ONE)) & |
4960 | -alignment)); |
4961 | char* pos = ((size_t)(br - (char*)(p)) >= MIN_CHUNK_SIZE)? |
4962 | br : br+alignment; |
4963 | mchunkptr newp = (mchunkptr)pos; |
4964 | size_t leadsize = pos - (char*)(p); |
4965 | size_t newsize = chunksize(p) - leadsize; |
4966 | |
4967 | if (is_mmapped(p)) { /* For mmapped chunks, just adjust offset */ |
4968 | newp->prev_foot = p->prev_foot + leadsize; |
4969 | newp->head = newsize; |
4970 | } |
4971 | else { /* Otherwise, give back leader, use the rest */ |
4972 | set_inuse(m, newp, newsize); |
4973 | set_inuse(m, p, leadsize); |
4974 | dispose_chunk(m, p, leadsize); |
4975 | } |
4976 | p = newp; |
4977 | } |
4978 | |
4979 | /* Give back spare room at the end */ |
4980 | if (!is_mmapped(p)) { |
4981 | size_t size = chunksize(p); |
4982 | if (size > nb + MIN_CHUNK_SIZE) { |
4983 | size_t remainder_size = size - nb; |
4984 | mchunkptr remainder = chunk_plus_offset(p, nb); |
4985 | set_inuse(m, p, nb); |
4986 | set_inuse(m, remainder, remainder_size); |
4987 | dispose_chunk(m, remainder, remainder_size); |
4988 | } |
4989 | } |
4990 | |
4991 | mem = chunk2mem(p); |
4992 | assert (chunksize(p) >= nb); |
4993 | assert(((size_t)mem & (alignment - 1)) == 0); |
4994 | check_inuse_chunk(m, p); |
4995 | POSTACTION(m); |
4996 | } |
4997 | } |
4998 | return mem; |
4999 | } |
5000 | |
5001 | /* |
5002 | Common support for independent_X routines, handling |
5003 | all of the combinations that can result. |
5004 | The opts arg has: |
5005 | bit 0 set if all elements are same size (using sizes[0]) |
5006 | bit 1 set if elements should be zeroed |
5007 | */ |
5008 | static void** ialloc(mstate m, |
5009 | size_t n_elements, |
5010 | size_t* sizes, |
5011 | int opts, |
5012 | void* chunks[]) { |
5013 | |
5014 | size_t element_size; /* chunksize of each element, if all same */ |
5015 | size_t contents_size; /* total size of elements */ |
5016 | size_t array_size; /* request size of pointer array */ |
5017 | void* mem; /* malloced aggregate space */ |
5018 | mchunkptr p; /* corresponding chunk */ |
5019 | size_t remainder_size; /* remaining bytes while splitting */ |
5020 | void** marray; /* either "chunks" or malloced ptr array */ |
5021 | mchunkptr array_chunk; /* chunk for malloced ptr array */ |
5022 | flag_t was_enabled; /* to disable mmap */ |
5023 | size_t size; |
5024 | size_t i; |
5025 | |
5026 | ensure_initialization(); |
5027 | /* compute array length, if needed */ |
5028 | if (chunks != 0) { |
5029 | if (n_elements == 0) |
5030 | return chunks; /* nothing to do */ |
5031 | marray = chunks; |
5032 | array_size = 0; |
5033 | } |
5034 | else { |
5035 | /* if empty req, must still return chunk representing empty array */ |
5036 | if (n_elements == 0) |
5037 | return (void**)internal_malloc(m, 0); |
5038 | marray = 0; |
5039 | array_size = request2size(n_elements * (sizeof(void*))); |
5040 | } |
5041 | |
5042 | /* compute total element size */ |
5043 | if (opts & 0x1) { /* all-same-size */ |
5044 | element_size = request2size(*sizes); |
5045 | contents_size = n_elements * element_size; |
5046 | } |
5047 | else { /* add up all the sizes */ |
5048 | element_size = 0; |
5049 | contents_size = 0; |
5050 | for (i = 0; i != n_elements; ++i) |
5051 | contents_size += request2size(sizes[i]); |
5052 | } |
5053 | |
5054 | size = contents_size + array_size; |
5055 | |
5056 | /* |
5057 | Allocate the aggregate chunk. First disable direct-mmapping so |
5058 | malloc won't use it, since we would not be able to later |
5059 | free/realloc space internal to a segregated mmap region. |
5060 | */ |
5061 | was_enabled = use_mmap(m); |
5062 | disable_mmap(m); |
5063 | mem = internal_malloc(m, size - CHUNK_OVERHEAD); |
5064 | if (was_enabled) |
5065 | enable_mmap(m); |
5066 | if (mem == 0) |
5067 | return 0; |
5068 | |
5069 | if (PREACTION(m)) return 0; |
5070 | p = mem2chunk(mem); |
5071 | remainder_size = chunksize(p); |
5072 | |
5073 | assert(!is_mmapped(p)); |
5074 | |
5075 | if (opts & 0x2) { /* optionally clear the elements */ |
5076 | memset((size_t*)mem, 0, remainder_size - SIZE_T_SIZE - array_size); |
5077 | } |
5078 | |
5079 | /* If not provided, allocate the pointer array as final part of chunk */ |
5080 | if (marray == 0) { |
5081 | size_t array_chunk_size; |
5082 | array_chunk = chunk_plus_offset(p, contents_size); |
5083 | array_chunk_size = remainder_size - contents_size; |
5084 | marray = (void**) (chunk2mem(array_chunk)); |
5085 | set_size_and_pinuse_of_inuse_chunk(m, array_chunk, array_chunk_size); |
5086 | remainder_size = contents_size; |
5087 | } |
5088 | |
5089 | /* split out elements */ |
5090 | for (i = 0; ; ++i) { |
5091 | marray[i] = chunk2mem(p); |
5092 | if (i != n_elements-1) { |
5093 | if (element_size != 0) |
5094 | size = element_size; |
5095 | else |
5096 | size = request2size(sizes[i]); |
5097 | remainder_size -= size; |
5098 | set_size_and_pinuse_of_inuse_chunk(m, p, size); |
5099 | p = chunk_plus_offset(p, size); |
5100 | } |
5101 | else { /* the final element absorbs any overallocation slop */ |
5102 | set_size_and_pinuse_of_inuse_chunk(m, p, remainder_size); |
5103 | break; |
5104 | } |
5105 | } |
5106 | |
5107 | #if DEBUG |
5108 | if (marray != chunks) { |
5109 | /* final element must have exactly exhausted chunk */ |
5110 | if (element_size != 0) { |
5111 | assert(remainder_size == element_size); |
5112 | } |
5113 | else { |
5114 | assert(remainder_size == request2size(sizes[i])); |
5115 | } |
5116 | check_inuse_chunk(m, mem2chunk(marray)); |
5117 | } |
5118 | for (i = 0; i != n_elements; ++i) |
5119 | check_inuse_chunk(m, mem2chunk(marray[i])); |
5120 | |
5121 | #endif /* DEBUG */ |
5122 | |
5123 | POSTACTION(m); |
5124 | return marray; |
5125 | } |
5126 | |
5127 | /* Try to free all pointers in the given array. |
5128 | Note: this could be made faster, by delaying consolidation, |
5129 | at the price of disabling some user integrity checks, We |
5130 | still optimize some consolidations by combining adjacent |
5131 | chunks before freeing, which will occur often if allocated |
5132 | with ialloc or the array is sorted. |
5133 | */ |
5134 | static size_t internal_bulk_free(mstate m, void* array[], size_t nelem) { |
5135 | size_t unfreed = 0; |
5136 | if (!PREACTION(m)) { |
5137 | void** a; |
5138 | void** fence = &(array[nelem]); |
5139 | for (a = array; a != fence; ++a) { |
5140 | void* mem = *a; |
5141 | if (mem != 0) { |
5142 | mchunkptr p = mem2chunk(mem); |
5143 | size_t psize = chunksize(p); |
5144 | #if FOOTERS |
5145 | if (get_mstate_for(p) != m) { |
5146 | ++unfreed; |
5147 | continue; |
5148 | } |
5149 | #endif |
5150 | check_inuse_chunk(m, p); |
5151 | *a = 0; |
5152 | if (RTCHECK(ok_address(m, p) && ok_inuse(p))) { |
5153 | void ** b = a + 1; /* try to merge with next chunk */ |
5154 | mchunkptr next = next_chunk(p); |
5155 | if (b != fence && *b == chunk2mem(next)) { |
5156 | size_t newsize = chunksize(next) + psize; |
5157 | set_inuse(m, p, newsize); |
5158 | *b = chunk2mem(p); |
5159 | } |
5160 | else |
5161 | dispose_chunk(m, p, psize); |
5162 | } |
5163 | else { |
5164 | CORRUPTION_ERROR_ACTION(m); |
5165 | break; |
5166 | } |
5167 | } |
5168 | } |
5169 | if (should_trim(m, m->topsize)) |
5170 | sys_trim(m, 0); |
5171 | POSTACTION(m); |
5172 | } |
5173 | return unfreed; |
5174 | } |
5175 | |
5176 | /* Traversal */ |
5177 | #if MALLOC_INSPECT_ALL |
5178 | static void internal_inspect_all(mstate m, |
5179 | void(*handler)(void *start, |
5180 | void *end, |
5181 | size_t used_bytes, |
5182 | void* callback_arg), |
5183 | void* arg) { |
5184 | if (is_initialized(m)) { |
5185 | mchunkptr top = m->top; |
5186 | msegmentptr s; |
5187 | for (s = &m->seg; s != 0; s = s->next) { |
5188 | mchunkptr q = align_as_chunk(s->base); |
5189 | while (segment_holds(s, q) && q->head != FENCEPOST_HEAD) { |
5190 | mchunkptr next = next_chunk(q); |
5191 | size_t sz = chunksize(q); |
5192 | size_t used; |
5193 | void* start; |
5194 | if (is_inuse(q)) { |
5195 | used = sz - CHUNK_OVERHEAD; /* must not be mmapped */ |
5196 | start = chunk2mem(q); |
5197 | } |
5198 | else { |
5199 | used = 0; |
5200 | if (is_small(sz)) { /* offset by possible bookkeeping */ |
5201 | start = (void*)((char*)q + sizeof(struct malloc_chunk)); |
5202 | } |
5203 | else { |
5204 | start = (void*)((char*)q + sizeof(struct malloc_tree_chunk)); |
5205 | } |
5206 | } |
5207 | if (start < (void*)next) /* skip if all space is bookkeeping */ |
5208 | handler(start, next, used, arg); |
5209 | if (q == top) |
5210 | break; |
5211 | q = next; |
5212 | } |
5213 | } |
5214 | } |
5215 | } |
5216 | #endif /* MALLOC_INSPECT_ALL */ |
5217 | |
5218 | /* ------------------ Exported realloc, memalign, etc -------------------- */ |
5219 | |
5220 | #if !ONLY_MSPACES |
5221 | |
5222 | void* dlrealloc(void* oldmem, size_t bytes) { |
5223 | void* mem = 0; |
5224 | if (oldmem == 0) { |
5225 | mem = dlmalloc(bytes); |
5226 | } |
5227 | else if (bytes >= MAX_REQUEST) { |
5228 | MALLOC_FAILURE_ACTION; |
5229 | } |
5230 | #ifdef REALLOC_ZERO_BYTES_FREES |
5231 | else if (bytes == 0) { |
5232 | dlfree(oldmem); |
5233 | } |
5234 | #endif /* REALLOC_ZERO_BYTES_FREES */ |
5235 | else { |
5236 | size_t nb = request2size(bytes); |
5237 | mchunkptr oldp = mem2chunk(oldmem); |
5238 | #if ! FOOTERS |
5239 | mstate m = gm; |
5240 | #else /* FOOTERS */ |
5241 | mstate m = get_mstate_for(oldp); |
5242 | if (!ok_magic(m)) { |
5243 | USAGE_ERROR_ACTION(m, oldmem); |
5244 | return 0; |
5245 | } |
5246 | #endif /* FOOTERS */ |
5247 | if (!PREACTION(m)) { |
5248 | mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); |
5249 | POSTACTION(m); |
5250 | if (newp != 0) { |
5251 | check_inuse_chunk(m, newp); |
5252 | mem = chunk2mem(newp); |
5253 | } |
5254 | else { |
5255 | mem = internal_malloc(m, bytes); |
5256 | if (mem != 0) { |
5257 | size_t oc = chunksize(oldp) - overhead_for(oldp); |
5258 | memcpy(mem, oldmem, (oc < bytes)? oc : bytes); |
5259 | internal_free(m, oldmem); |
5260 | } |
5261 | } |
5262 | } |
5263 | } |
5264 | return mem; |
5265 | } |
5266 | |
5267 | void* dlrealloc_in_place(void* oldmem, size_t bytes) { |
5268 | void* mem = 0; |
5269 | if (oldmem != 0) { |
5270 | if (bytes >= MAX_REQUEST) { |
5271 | MALLOC_FAILURE_ACTION; |
5272 | } |
5273 | else { |
5274 | size_t nb = request2size(bytes); |
5275 | mchunkptr oldp = mem2chunk(oldmem); |
5276 | #if ! FOOTERS |
5277 | mstate m = gm; |
5278 | #else /* FOOTERS */ |
5279 | mstate m = get_mstate_for(oldp); |
5280 | if (!ok_magic(m)) { |
5281 | USAGE_ERROR_ACTION(m, oldmem); |
5282 | return 0; |
5283 | } |
5284 | #endif /* FOOTERS */ |
5285 | if (!PREACTION(m)) { |
5286 | mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); |
5287 | POSTACTION(m); |
5288 | if (newp == oldp) { |
5289 | check_inuse_chunk(m, newp); |
5290 | mem = oldmem; |
5291 | } |
5292 | } |
5293 | } |
5294 | } |
5295 | return mem; |
5296 | } |
5297 | |
5298 | void* dlmemalign(size_t alignment, size_t bytes) { |
5299 | if (alignment <= MALLOC_ALIGNMENT) { |
5300 | return dlmalloc(bytes); |
5301 | } |
5302 | return internal_memalign(gm, alignment, bytes); |
5303 | } |
5304 | |
5305 | int dlposix_memalign(void** pp, size_t alignment, size_t bytes) { |
5306 | void* mem = 0; |
5307 | if (alignment == MALLOC_ALIGNMENT) |
5308 | mem = dlmalloc(bytes); |
5309 | else { |
5310 | size_t d = alignment / sizeof(void*); |
5311 | size_t r = alignment % sizeof(void*); |
5312 | if (r != 0 || d == 0 || (d & (d-SIZE_T_ONE)) != 0) |
5313 | return EINVAL; |
5314 | else if (bytes <= MAX_REQUEST - alignment) { |
5315 | if (alignment < MIN_CHUNK_SIZE) |
5316 | alignment = MIN_CHUNK_SIZE; |
5317 | mem = internal_memalign(gm, alignment, bytes); |
5318 | } |
5319 | } |
5320 | if (mem == 0) |
5321 | return ENOMEM; |
5322 | else { |
5323 | *pp = mem; |
5324 | return 0; |
5325 | } |
5326 | } |
5327 | |
5328 | void* dlvalloc(size_t bytes) { |
5329 | size_t pagesz; |
5330 | ensure_initialization(); |
5331 | pagesz = mparams.page_size; |
5332 | return dlmemalign(pagesz, bytes); |
5333 | } |
5334 | |
5335 | void* dlpvalloc(size_t bytes) { |
5336 | size_t pagesz; |
5337 | ensure_initialization(); |
5338 | pagesz = mparams.page_size; |
5339 | return dlmemalign(pagesz, (bytes + pagesz - SIZE_T_ONE) & ~(pagesz - SIZE_T_ONE)); |
5340 | } |
5341 | |
5342 | void** dlindependent_calloc(size_t n_elements, size_t elem_size, |
5343 | void* chunks[]) { |
5344 | size_t sz = elem_size; /* serves as 1-element array */ |
5345 | return ialloc(gm, n_elements, &sz, 3, chunks); |
5346 | } |
5347 | |
5348 | void** dlindependent_comalloc(size_t n_elements, size_t sizes[], |
5349 | void* chunks[]) { |
5350 | return ialloc(gm, n_elements, sizes, 0, chunks); |
5351 | } |
5352 | |
5353 | size_t dlbulk_free(void* array[], size_t nelem) { |
5354 | return internal_bulk_free(gm, array, nelem); |
5355 | } |
5356 | |
5357 | #if MALLOC_INSPECT_ALL |
5358 | void dlmalloc_inspect_all(void(*handler)(void *start, |
5359 | void *end, |
5360 | size_t used_bytes, |
5361 | void* callback_arg), |
5362 | void* arg) { |
5363 | ensure_initialization(); |
5364 | if (!PREACTION(gm)) { |
5365 | internal_inspect_all(gm, handler, arg); |
5366 | POSTACTION(gm); |
5367 | } |
5368 | } |
5369 | #endif /* MALLOC_INSPECT_ALL */ |
5370 | |
5371 | int dlmalloc_trim(size_t pad) { |
5372 | int result = 0; |
5373 | ensure_initialization(); |
5374 | if (!PREACTION(gm)) { |
5375 | result = sys_trim(gm, pad); |
5376 | POSTACTION(gm); |
5377 | } |
5378 | return result; |
5379 | } |
5380 | |
5381 | size_t dlmalloc_footprint(void) { |
5382 | return gm->footprint; |
5383 | } |
5384 | |
5385 | size_t dlmalloc_max_footprint(void) { |
5386 | return gm->max_footprint; |
5387 | } |
5388 | |
5389 | size_t dlmalloc_footprint_limit(void) { |
5390 | size_t maf = gm->footprint_limit; |
5391 | return maf == 0 ? MAX_SIZE_T : maf; |
5392 | } |
5393 | |
5394 | size_t dlmalloc_set_footprint_limit(size_t bytes) { |
5395 | size_t result; /* invert sense of 0 */ |
5396 | if (bytes == 0) |
5397 | result = granularity_align(1); /* Use minimal size */ |
5398 | if (bytes == MAX_SIZE_T) |
5399 | result = 0; /* disable */ |
5400 | else |
5401 | result = granularity_align(bytes); |
5402 | return gm->footprint_limit = result; |
5403 | } |
5404 | |
5405 | #if !NO_MALLINFO |
5406 | struct mallinfo dlmallinfo(void) { |
5407 | return internal_mallinfo(gm); |
5408 | } |
5409 | #endif /* NO_MALLINFO */ |
5410 | |
5411 | #if !NO_MALLOC_STATS |
5412 | void dlmalloc_stats() { |
5413 | internal_malloc_stats(gm); |
5414 | } |
5415 | #endif /* NO_MALLOC_STATS */ |
5416 | |
5417 | int dlmallopt(int param_number, int value) { |
5418 | return change_mparam(param_number, value); |
5419 | } |
5420 | |
5421 | size_t dlmalloc_usable_size(void* mem) { |
5422 | if (mem != 0) { |
5423 | mchunkptr p = mem2chunk(mem); |
5424 | if (is_inuse(p)) |
5425 | return chunksize(p) - overhead_for(p); |
5426 | } |
5427 | return 0; |
5428 | } |
5429 | |
5430 | #endif /* !ONLY_MSPACES */ |
5431 | |
5432 | /* ----------------------------- user mspaces ---------------------------- */ |
5433 | |
5434 | #if MSPACES |
5435 | |
5436 | static mstate init_user_mstate(char* tbase, size_t tsize) { |
5437 | size_t msize = pad_request(sizeof(struct malloc_state)); |
5438 | mchunkptr mn; |
5439 | mchunkptr msp = align_as_chunk(tbase); |
5440 | mstate m = (mstate)(chunk2mem(msp)); |
5441 | memset(m, 0, msize); |
5442 | (void)INITIAL_LOCK(&m->mutex); |
5443 | msp->head = (msize|INUSE_BITS); |
5444 | m->seg.base = m->least_addr = tbase; |
5445 | m->seg.size = m->footprint = m->max_footprint = tsize; |
5446 | m->magic = mparams.magic; |
5447 | m->release_checks = MAX_RELEASE_CHECK_RATE; |
5448 | m->mflags = mparams.default_mflags; |
5449 | m->extp = 0; |
5450 | m->exts = 0; |
5451 | disable_contiguous(m); |
5452 | init_bins(m); |
5453 | mn = next_chunk(mem2chunk(m)); |
5454 | init_top(m, mn, (size_t)((tbase + tsize) - (char*)mn) - TOP_FOOT_SIZE); |
5455 | check_top_chunk(m, m->top); |
5456 | return m; |
5457 | } |
5458 | |
5459 | mspace create_mspace(size_t capacity, int locked) { |
5460 | mstate m = 0; |
5461 | size_t msize; |
5462 | ensure_initialization(); |
5463 | msize = pad_request(sizeof(struct malloc_state)); |
5464 | if (capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { |
5465 | size_t rs = ((capacity == 0)? mparams.granularity : |
5466 | (capacity + TOP_FOOT_SIZE + msize)); |
5467 | size_t tsize = granularity_align(rs); |
5468 | char* tbase = (char*)(CALL_MMAP(tsize)); |
5469 | if (tbase != CMFAIL) { |
5470 | m = init_user_mstate(tbase, tsize); |
5471 | m->seg.sflags = USE_MMAP_BIT; |
5472 | set_lock(m, locked); |
5473 | } |
5474 | } |
5475 | return (mspace)m; |
5476 | } |
5477 | |
5478 | mspace create_mspace_with_base(void* base, size_t capacity, int locked) { |
5479 | mstate m = 0; |
5480 | size_t msize; |
5481 | ensure_initialization(); |
5482 | msize = pad_request(sizeof(struct malloc_state)); |
5483 | if (capacity > msize + TOP_FOOT_SIZE && |
5484 | capacity < (size_t) -(msize + TOP_FOOT_SIZE + mparams.page_size)) { |
5485 | m = init_user_mstate((char*)base, capacity); |
5486 | m->seg.sflags = EXTERN_BIT; |
5487 | set_lock(m, locked); |
5488 | } |
5489 | return (mspace)m; |
5490 | } |
5491 | |
5492 | int mspace_track_large_chunks(mspace msp, int enable) { |
5493 | int ret = 0; |
5494 | mstate ms = (mstate)msp; |
5495 | if (!PREACTION(ms)) { |
5496 | if (!use_mmap(ms)) { |
5497 | ret = 1; |
5498 | } |
5499 | if (!enable) { |
5500 | enable_mmap(ms); |
5501 | } else { |
5502 | disable_mmap(ms); |
5503 | } |
5504 | POSTACTION(ms); |
5505 | } |
5506 | return ret; |
5507 | } |
5508 | |
5509 | size_t destroy_mspace(mspace msp) { |
5510 | size_t freed = 0; |
5511 | mstate ms = (mstate)msp; |
5512 | if (ok_magic(ms)) { |
5513 | msegmentptr sp = &ms->seg; |
5514 | (void)DESTROY_LOCK(&ms->mutex); /* destroy before unmapped */ |
5515 | while (sp != 0) { |
5516 | char* base = sp->base; |
5517 | size_t size = sp->size; |
5518 | flag_t flag = sp->sflags; |
5519 | (void)base; /* placate people compiling -Wunused-variable */ |
5520 | sp = sp->next; |
5521 | if ((flag & USE_MMAP_BIT) && !(flag & EXTERN_BIT) && |
5522 | CALL_MUNMAP(base, size) == 0) |
5523 | freed += size; |
5524 | } |
5525 | } |
5526 | else { |
5527 | USAGE_ERROR_ACTION(ms,ms); |
5528 | } |
5529 | return freed; |
5530 | } |
5531 | |
5532 | /* |
5533 | mspace versions of routines are near-clones of the global |
5534 | versions. This is not so nice but better than the alternatives. |
5535 | */ |
5536 | |
5537 | void* mspace_malloc(mspace msp, size_t bytes) { |
5538 | mstate ms = (mstate)msp; |
5539 | if (!ok_magic(ms)) { |
5540 | USAGE_ERROR_ACTION(ms,ms); |
5541 | return 0; |
5542 | } |
5543 | if (!PREACTION(ms)) { |
5544 | void* mem; |
5545 | size_t nb; |
5546 | if (bytes <= MAX_SMALL_REQUEST) { |
5547 | bindex_t idx; |
5548 | binmap_t smallbits; |
5549 | nb = (bytes < MIN_REQUEST)? MIN_CHUNK_SIZE : pad_request(bytes); |
5550 | idx = small_index(nb); |
5551 | smallbits = ms->smallmap >> idx; |
5552 | |
5553 | if ((smallbits & 0x3U) != 0) { /* Remainderless fit to a smallbin. */ |
5554 | mchunkptr b, p; |
5555 | idx += ~smallbits & 1; /* Uses next bin if idx empty */ |
5556 | b = smallbin_at(ms, idx); |
5557 | p = b->fd; |
5558 | assert(chunksize(p) == small_index2size(idx)); |
5559 | unlink_first_small_chunk(ms, b, p, idx); |
5560 | set_inuse_and_pinuse(ms, p, small_index2size(idx)); |
5561 | mem = chunk2mem(p); |
5562 | check_malloced_chunk(ms, mem, nb); |
5563 | goto postaction; |
5564 | } |
5565 | |
5566 | else if (nb > ms->dvsize) { |
5567 | if (smallbits != 0) { /* Use chunk in next nonempty smallbin */ |
5568 | mchunkptr b, p, r; |
5569 | size_t rsize; |
5570 | bindex_t i; |
5571 | binmap_t leftbits = (smallbits << idx) & left_bits(idx2bit(idx)); |
5572 | binmap_t leastbit = least_bit(leftbits); |
5573 | compute_bit2idx(leastbit, i); |
5574 | b = smallbin_at(ms, i); |
5575 | p = b->fd; |
5576 | assert(chunksize(p) == small_index2size(i)); |
5577 | unlink_first_small_chunk(ms, b, p, i); |
5578 | rsize = small_index2size(i) - nb; |
5579 | /* Fit here cannot be remainderless if 4byte sizes */ |
5580 | if (SIZE_T_SIZE != 4 && rsize < MIN_CHUNK_SIZE) |
5581 | set_inuse_and_pinuse(ms, p, small_index2size(i)); |
5582 | else { |
5583 | set_size_and_pinuse_of_inuse_chunk(ms, p, nb); |
5584 | r = chunk_plus_offset(p, nb); |
5585 | set_size_and_pinuse_of_free_chunk(r, rsize); |
5586 | replace_dv(ms, r, rsize); |
5587 | } |
5588 | mem = chunk2mem(p); |
5589 | check_malloced_chunk(ms, mem, nb); |
5590 | goto postaction; |
5591 | } |
5592 | |
5593 | else if (ms->treemap != 0 && (mem = tmalloc_small(ms, nb)) != 0) { |
5594 | check_malloced_chunk(ms, mem, nb); |
5595 | goto postaction; |
5596 | } |
5597 | } |
5598 | } |
5599 | else if (bytes >= MAX_REQUEST) |
5600 | nb = MAX_SIZE_T; /* Too big to allocate. Force failure (in sys alloc) */ |
5601 | else { |
5602 | nb = pad_request(bytes); |
5603 | if (ms->treemap != 0 && (mem = tmalloc_large(ms, nb)) != 0) { |
5604 | check_malloced_chunk(ms, mem, nb); |
5605 | goto postaction; |
5606 | } |
5607 | } |
5608 | |
5609 | if (nb <= ms->dvsize) { |
5610 | size_t rsize = ms->dvsize - nb; |
5611 | mchunkptr p = ms->dv; |
5612 | if (rsize >= MIN_CHUNK_SIZE) { /* split dv */ |
5613 | mchunkptr r = ms->dv = chunk_plus_offset(p, nb); |
5614 | ms->dvsize = rsize; |
5615 | set_size_and_pinuse_of_free_chunk(r, rsize); |
5616 | set_size_and_pinuse_of_inuse_chunk(ms, p, nb); |
5617 | } |
5618 | else { /* exhaust dv */ |
5619 | size_t dvs = ms->dvsize; |
5620 | ms->dvsize = 0; |
5621 | ms->dv = 0; |
5622 | set_inuse_and_pinuse(ms, p, dvs); |
5623 | } |
5624 | mem = chunk2mem(p); |
5625 | check_malloced_chunk(ms, mem, nb); |
5626 | goto postaction; |
5627 | } |
5628 | |
5629 | else if (nb < ms->topsize) { /* Split top */ |
5630 | size_t rsize = ms->topsize -= nb; |
5631 | mchunkptr p = ms->top; |
5632 | mchunkptr r = ms->top = chunk_plus_offset(p, nb); |
5633 | r->head = rsize | PINUSE_BIT; |
5634 | set_size_and_pinuse_of_inuse_chunk(ms, p, nb); |
5635 | mem = chunk2mem(p); |
5636 | check_top_chunk(ms, ms->top); |
5637 | check_malloced_chunk(ms, mem, nb); |
5638 | goto postaction; |
5639 | } |
5640 | |
5641 | mem = sys_alloc(ms, nb); |
5642 | |
5643 | postaction: |
5644 | POSTACTION(ms); |
5645 | return mem; |
5646 | } |
5647 | |
5648 | return 0; |
5649 | } |
5650 | |
5651 | void mspace_free(mspace msp, void* mem) { |
5652 | if (mem != 0) { |
5653 | mchunkptr p = mem2chunk(mem); |
5654 | #if FOOTERS |
5655 | mstate fm = get_mstate_for(p); |
5656 | (void)msp; /* placate people compiling -Wunused */ |
5657 | #else /* FOOTERS */ |
5658 | mstate fm = (mstate)msp; |
5659 | #endif /* FOOTERS */ |
5660 | if (!ok_magic(fm)) { |
5661 | USAGE_ERROR_ACTION(fm, p); |
5662 | return; |
5663 | } |
5664 | if (!PREACTION(fm)) { |
5665 | check_inuse_chunk(fm, p); |
5666 | if (RTCHECK(ok_address(fm, p) && ok_inuse(p))) { |
5667 | size_t psize = chunksize(p); |
5668 | mchunkptr next = chunk_plus_offset(p, psize); |
5669 | if (!pinuse(p)) { |
5670 | size_t prevsize = p->prev_foot; |
5671 | if (is_mmapped(p)) { |
5672 | psize += prevsize + MMAP_FOOT_PAD; |
5673 | if (CALL_MUNMAP((char*)p - prevsize, psize) == 0) |
5674 | fm->footprint -= psize; |
5675 | goto postaction; |
5676 | } |
5677 | else { |
5678 | mchunkptr prev = chunk_minus_offset(p, prevsize); |
5679 | psize += prevsize; |
5680 | p = prev; |
5681 | if (RTCHECK(ok_address(fm, prev))) { /* consolidate backward */ |
5682 | if (p != fm->dv) { |
5683 | unlink_chunk(fm, p, prevsize); |
5684 | } |
5685 | else if ((next->head & INUSE_BITS) == INUSE_BITS) { |
5686 | fm->dvsize = psize; |
5687 | set_free_with_pinuse(p, psize, next); |
5688 | goto postaction; |
5689 | } |
5690 | } |
5691 | else |
5692 | goto erroraction; |
5693 | } |
5694 | } |
5695 | |
5696 | if (RTCHECK(ok_next(p, next) && ok_pinuse(next))) { |
5697 | if (!cinuse(next)) { /* consolidate forward */ |
5698 | if (next == fm->top) { |
5699 | size_t tsize = fm->topsize += psize; |
5700 | fm->top = p; |
5701 | p->head = tsize | PINUSE_BIT; |
5702 | if (p == fm->dv) { |
5703 | fm->dv = 0; |
5704 | fm->dvsize = 0; |
5705 | } |
5706 | if (should_trim(fm, tsize)) |
5707 | sys_trim(fm, 0); |
5708 | goto postaction; |
5709 | } |
5710 | else if (next == fm->dv) { |
5711 | size_t dsize = fm->dvsize += psize; |
5712 | fm->dv = p; |
5713 | set_size_and_pinuse_of_free_chunk(p, dsize); |
5714 | goto postaction; |
5715 | } |
5716 | else { |
5717 | size_t nsize = chunksize(next); |
5718 | psize += nsize; |
5719 | unlink_chunk(fm, next, nsize); |
5720 | set_size_and_pinuse_of_free_chunk(p, psize); |
5721 | if (p == fm->dv) { |
5722 | fm->dvsize = psize; |
5723 | goto postaction; |
5724 | } |
5725 | } |
5726 | } |
5727 | else |
5728 | set_free_with_pinuse(p, psize, next); |
5729 | |
5730 | if (is_small(psize)) { |
5731 | insert_small_chunk(fm, p, psize); |
5732 | check_free_chunk(fm, p); |
5733 | } |
5734 | else { |
5735 | tchunkptr tp = (tchunkptr)p; |
5736 | insert_large_chunk(fm, tp, psize); |
5737 | check_free_chunk(fm, p); |
5738 | if (--fm->release_checks == 0) |
5739 | release_unused_segments(fm); |
5740 | } |
5741 | goto postaction; |
5742 | } |
5743 | } |
5744 | erroraction: |
5745 | USAGE_ERROR_ACTION(fm, p); |
5746 | postaction: |
5747 | POSTACTION(fm); |
5748 | } |
5749 | } |
5750 | } |
5751 | |
5752 | void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size) { |
5753 | void* mem; |
5754 | size_t req = 0; |
5755 | mstate ms = (mstate)msp; |
5756 | if (!ok_magic(ms)) { |
5757 | USAGE_ERROR_ACTION(ms,ms); |
5758 | return 0; |
5759 | } |
5760 | if (n_elements != 0) { |
5761 | req = n_elements * elem_size; |
5762 | if (((n_elements | elem_size) & ~(size_t)0xffff) && |
5763 | (req / n_elements != elem_size)) |
5764 | req = MAX_SIZE_T; /* force downstream failure on overflow */ |
5765 | } |
5766 | mem = internal_malloc(ms, req); |
5767 | if (mem != 0 && calloc_must_clear(mem2chunk(mem))) |
5768 | memset(mem, 0, req); |
5769 | return mem; |
5770 | } |
5771 | |
5772 | void* mspace_realloc(mspace msp, void* oldmem, size_t bytes) { |
5773 | void* mem = 0; |
5774 | if (oldmem == 0) { |
5775 | mem = mspace_malloc(msp, bytes); |
5776 | } |
5777 | else if (bytes >= MAX_REQUEST) { |
5778 | MALLOC_FAILURE_ACTION; |
5779 | } |
5780 | #ifdef REALLOC_ZERO_BYTES_FREES |
5781 | else if (bytes == 0) { |
5782 | mspace_free(msp, oldmem); |
5783 | } |
5784 | #endif /* REALLOC_ZERO_BYTES_FREES */ |
5785 | else { |
5786 | size_t nb = request2size(bytes); |
5787 | mchunkptr oldp = mem2chunk(oldmem); |
5788 | #if ! FOOTERS |
5789 | mstate m = (mstate)msp; |
5790 | #else /* FOOTERS */ |
5791 | mstate m = get_mstate_for(oldp); |
5792 | if (!ok_magic(m)) { |
5793 | USAGE_ERROR_ACTION(m, oldmem); |
5794 | return 0; |
5795 | } |
5796 | #endif /* FOOTERS */ |
5797 | if (!PREACTION(m)) { |
5798 | mchunkptr newp = try_realloc_chunk(m, oldp, nb, 1); |
5799 | POSTACTION(m); |
5800 | if (newp != 0) { |
5801 | check_inuse_chunk(m, newp); |
5802 | mem = chunk2mem(newp); |
5803 | } |
5804 | else { |
5805 | mem = mspace_malloc(m, bytes); |
5806 | if (mem != 0) { |
5807 | size_t oc = chunksize(oldp) - overhead_for(oldp); |
5808 | memcpy(mem, oldmem, (oc < bytes)? oc : bytes); |
5809 | mspace_free(m, oldmem); |
5810 | } |
5811 | } |
5812 | } |
5813 | } |
5814 | return mem; |
5815 | } |
5816 | |
5817 | void* mspace_realloc_in_place(mspace msp, void* oldmem, size_t bytes) { |
5818 | void* mem = 0; |
5819 | if (oldmem != 0) { |
5820 | if (bytes >= MAX_REQUEST) { |
5821 | MALLOC_FAILURE_ACTION; |
5822 | } |
5823 | else { |
5824 | size_t nb = request2size(bytes); |
5825 | mchunkptr oldp = mem2chunk(oldmem); |
5826 | #if ! FOOTERS |
5827 | mstate m = (mstate)msp; |
5828 | #else /* FOOTERS */ |
5829 | mstate m = get_mstate_for(oldp); |
5830 | (void)msp; /* placate people compiling -Wunused */ |
5831 | if (!ok_magic(m)) { |
5832 | USAGE_ERROR_ACTION(m, oldmem); |
5833 | return 0; |
5834 | } |
5835 | #endif /* FOOTERS */ |
5836 | if (!PREACTION(m)) { |
5837 | mchunkptr newp = try_realloc_chunk(m, oldp, nb, 0); |
5838 | POSTACTION(m); |
5839 | if (newp == oldp) { |
5840 | check_inuse_chunk(m, newp); |
5841 | mem = oldmem; |
5842 | } |
5843 | } |
5844 | } |
5845 | } |
5846 | return mem; |
5847 | } |
5848 | |
5849 | void* mspace_memalign(mspace msp, size_t alignment, size_t bytes) { |
5850 | mstate ms = (mstate)msp; |
5851 | if (!ok_magic(ms)) { |
5852 | USAGE_ERROR_ACTION(ms,ms); |
5853 | return 0; |
5854 | } |
5855 | if (alignment <= MALLOC_ALIGNMENT) |
5856 | return mspace_malloc(msp, bytes); |
5857 | return internal_memalign(ms, alignment, bytes); |
5858 | } |
5859 | |
5860 | void** mspace_independent_calloc(mspace msp, size_t n_elements, |
5861 | size_t elem_size, void* chunks[]) { |
5862 | size_t sz = elem_size; /* serves as 1-element array */ |
5863 | mstate ms = (mstate)msp; |
5864 | if (!ok_magic(ms)) { |
5865 | USAGE_ERROR_ACTION(ms,ms); |
5866 | return 0; |
5867 | } |
5868 | return ialloc(ms, n_elements, &sz, 3, chunks); |
5869 | } |
5870 | |
5871 | void** mspace_independent_comalloc(mspace msp, size_t n_elements, |
5872 | size_t sizes[], void* chunks[]) { |
5873 | mstate ms = (mstate)msp; |
5874 | if (!ok_magic(ms)) { |
5875 | USAGE_ERROR_ACTION(ms,ms); |
5876 | return 0; |
5877 | } |
5878 | return ialloc(ms, n_elements, sizes, 0, chunks); |
5879 | } |
5880 | |
5881 | size_t mspace_bulk_free(mspace msp, void* array[], size_t nelem) { |
5882 | return internal_bulk_free((mstate)msp, array, nelem); |
5883 | } |
5884 | |
5885 | #if MALLOC_INSPECT_ALL |
5886 | void mspace_inspect_all(mspace msp, |
5887 | void(*handler)(void *start, |
5888 | void *end, |
5889 | size_t used_bytes, |
5890 | void* callback_arg), |
5891 | void* arg) { |
5892 | mstate ms = (mstate)msp; |
5893 | if (ok_magic(ms)) { |
5894 | if (!PREACTION(ms)) { |
5895 | internal_inspect_all(ms, handler, arg); |
5896 | POSTACTION(ms); |
5897 | } |
5898 | } |
5899 | else { |
5900 | USAGE_ERROR_ACTION(ms,ms); |
5901 | } |
5902 | } |
5903 | #endif /* MALLOC_INSPECT_ALL */ |
5904 | |
5905 | int mspace_trim(mspace msp, size_t pad) { |
5906 | int result = 0; |
5907 | mstate ms = (mstate)msp; |
5908 | if (ok_magic(ms)) { |
5909 | if (!PREACTION(ms)) { |
5910 | result = sys_trim(ms, pad); |
5911 | POSTACTION(ms); |
5912 | } |
5913 | } |
5914 | else { |
5915 | USAGE_ERROR_ACTION(ms,ms); |
5916 | } |
5917 | return result; |
5918 | } |
5919 | |
5920 | #if !NO_MALLOC_STATS |
5921 | void mspace_malloc_stats(mspace msp) { |
5922 | mstate ms = (mstate)msp; |
5923 | if (ok_magic(ms)) { |
5924 | internal_malloc_stats(ms); |
5925 | } |
5926 | else { |
5927 | USAGE_ERROR_ACTION(ms,ms); |
5928 | } |
5929 | } |
5930 | #endif /* NO_MALLOC_STATS */ |
5931 | |
5932 | size_t mspace_footprint(mspace msp) { |
5933 | size_t result = 0; |
5934 | mstate ms = (mstate)msp; |
5935 | if (ok_magic(ms)) { |
5936 | result = ms->footprint; |
5937 | } |
5938 | else { |
5939 | USAGE_ERROR_ACTION(ms,ms); |
5940 | } |
5941 | return result; |
5942 | } |
5943 | |
5944 | size_t mspace_max_footprint(mspace msp) { |
5945 | size_t result = 0; |
5946 | mstate ms = (mstate)msp; |
5947 | if (ok_magic(ms)) { |
5948 | result = ms->max_footprint; |
5949 | } |
5950 | else { |
5951 | USAGE_ERROR_ACTION(ms,ms); |
5952 | } |
5953 | return result; |
5954 | } |
5955 | |
5956 | size_t mspace_footprint_limit(mspace msp) { |
5957 | size_t result = 0; |
5958 | mstate ms = (mstate)msp; |
5959 | if (ok_magic(ms)) { |
5960 | size_t maf = ms->footprint_limit; |
5961 | result = (maf == 0) ? MAX_SIZE_T : maf; |
5962 | } |
5963 | else { |
5964 | USAGE_ERROR_ACTION(ms,ms); |
5965 | } |
5966 | return result; |
5967 | } |
5968 | |
5969 | size_t mspace_set_footprint_limit(mspace msp, size_t bytes) { |
5970 | size_t result = 0; |
5971 | mstate ms = (mstate)msp; |
5972 | if (ok_magic(ms)) { |
5973 | if (bytes == 0) |
5974 | result = granularity_align(1); /* Use minimal size */ |
5975 | if (bytes == MAX_SIZE_T) |
5976 | result = 0; /* disable */ |
5977 | else |
5978 | result = granularity_align(bytes); |
5979 | ms->footprint_limit = result; |
5980 | } |
5981 | else { |
5982 | USAGE_ERROR_ACTION(ms,ms); |
5983 | } |
5984 | return result; |
5985 | } |
5986 | |
5987 | #if !NO_MALLINFO |
5988 | struct mallinfo mspace_mallinfo(mspace msp) { |
5989 | mstate ms = (mstate)msp; |
5990 | if (!ok_magic(ms)) { |
5991 | USAGE_ERROR_ACTION(ms,ms); |
5992 | } |
5993 | return internal_mallinfo(ms); |
5994 | } |
5995 | #endif /* NO_MALLINFO */ |
5996 | |
5997 | size_t mspace_usable_size(const void* mem) { |
5998 | if (mem != 0) { |
5999 | mchunkptr p = mem2chunk(mem); |
6000 | if (is_inuse(p)) |
6001 | return chunksize(p) - overhead_for(p); |
6002 | } |
6003 | return 0; |
6004 | } |
6005 | |
6006 | int mspace_mallopt(int param_number, int value) { |
6007 | return change_mparam(param_number, value); |
6008 | } |
6009 | |
6010 | #endif /* MSPACES */ |
6011 | |
6012 | |
6013 | /* -------------------- Alternative MORECORE functions ------------------- */ |
6014 | |
6015 | /* |
6016 | Guidelines for creating a custom version of MORECORE: |
6017 | |
6018 | * For best performance, MORECORE should allocate in multiples of pagesize. |
6019 | * MORECORE may allocate more memory than requested. (Or even less, |
6020 | but this will usually result in a malloc failure.) |
6021 | * MORECORE must not allocate memory when given argument zero, but |
6022 | instead return one past the end address of memory from previous |
6023 | nonzero call. |
6024 | * For best performance, consecutive calls to MORECORE with positive |
6025 | arguments should return increasing addresses, indicating that |
6026 | space has been contiguously extended. |
6027 | * Even though consecutive calls to MORECORE need not return contiguous |
6028 | addresses, it must be OK for malloc'ed chunks to span multiple |
6029 | regions in those cases where they do happen to be contiguous. |
6030 | * MORECORE need not handle negative arguments -- it may instead |
6031 | just return MFAIL when given negative arguments. |
6032 | Negative arguments are always multiples of pagesize. MORECORE |
6033 | must not misinterpret negative args as large positive unsigned |
6034 | args. You can suppress all such calls from even occurring by defining |
6035 | MORECORE_CANNOT_TRIM, |
6036 | |
6037 | As an example alternative MORECORE, here is a custom allocator |
6038 | kindly contributed for pre-OSX macOS. It uses virtually but not |
6039 | necessarily physically contiguous non-paged memory (locked in, |
6040 | present and won't get swapped out). You can use it by uncommenting |
6041 | this section, adding some #includes, and setting up the appropriate |
6042 | defines above: |
6043 | |
6044 | #define MORECORE osMoreCore |
6045 | |
6046 | There is also a shutdown routine that should somehow be called for |
6047 | cleanup upon program exit. |
6048 | |
6049 | #define MAX_POOL_ENTRIES 100 |
6050 | #define MINIMUM_MORECORE_SIZE (64 * 1024U) |
6051 | static int next_os_pool; |
6052 | void *our_os_pools[MAX_POOL_ENTRIES]; |
6053 | |
6054 | void *osMoreCore(int size) |
6055 | { |
6056 | void *ptr = 0; |
6057 | static void *sbrk_top = 0; |
6058 | |
6059 | if (size > 0) |
6060 | { |
6061 | if (size < MINIMUM_MORECORE_SIZE) |
6062 | size = MINIMUM_MORECORE_SIZE; |
6063 | if (CurrentExecutionLevel() == kTaskLevel) |
6064 | ptr = PoolAllocateResident(size + RM_PAGE_SIZE, 0); |
6065 | if (ptr == 0) |
6066 | { |
6067 | return (void *) MFAIL; |
6068 | } |
6069 | // save ptrs so they can be freed during cleanup |
6070 | our_os_pools[next_os_pool] = ptr; |
6071 | next_os_pool++; |
6072 | ptr = (void *) ((((size_t) ptr) + RM_PAGE_MASK) & ~RM_PAGE_MASK); |
6073 | sbrk_top = (char *) ptr + size; |
6074 | return ptr; |
6075 | } |
6076 | else if (size < 0) |
6077 | { |
6078 | // we don't currently support shrink behavior |
6079 | return (void *) MFAIL; |
6080 | } |
6081 | else |
6082 | { |
6083 | return sbrk_top; |
6084 | } |
6085 | } |
6086 | |
6087 | // cleanup any allocated memory pools |
6088 | // called as last thing before shutting down driver |
6089 | |
6090 | void osCleanupMem(void) |
6091 | { |
6092 | void **ptr; |
6093 | |
6094 | for (ptr = our_os_pools; ptr < &our_os_pools[MAX_POOL_ENTRIES]; ptr++) |
6095 | if (*ptr) |
6096 | { |
6097 | PoolDeallocate(*ptr); |
6098 | *ptr = 0; |
6099 | } |
6100 | } |
6101 | |
6102 | */ |
6103 | |
6104 | |
6105 | /* ----------------------------------------------------------------------- |
6106 | History: |
6107 | v2.8.6 Wed Aug 29 06:57:58 2012 Doug Lea |
6108 | * fix bad comparison in dlposix_memalign |
6109 | * don't reuse adjusted asize in sys_alloc |
6110 | * add LOCK_AT_FORK -- thanks to Kirill Artamonov for the suggestion |
6111 | * reduce compiler warnings -- thanks to all who reported/suggested these |
6112 | |
6113 | v2.8.5 Sun May 22 10:26:02 2011 Doug Lea (dl at gee) |
6114 | * Always perform unlink checks unless INSECURE |
6115 | * Add posix_memalign. |
6116 | * Improve realloc to expand in more cases; expose realloc_in_place. |
6117 | Thanks to Peter Buhr for the suggestion. |
6118 | * Add footprint_limit, inspect_all, bulk_free. Thanks |
6119 | to Barry Hayes and others for the suggestions. |
6120 | * Internal refactorings to avoid calls while holding locks |
6121 | * Use non-reentrant locks by default. Thanks to Roland McGrath |
6122 | for the suggestion. |
6123 | * Small fixes to mspace_destroy, reset_on_error. |
6124 | * Various configuration extensions/changes. Thanks |
6125 | to all who contributed these. |
6126 | |
6127 | V2.8.4a Thu Apr 28 14:39:43 2011 (dl at gee.cs.oswego.edu) |
6128 | * Update Creative Commons URL |
6129 | |
6130 | V2.8.4 Wed May 27 09:56:23 2009 Doug Lea (dl at gee) |
6131 | * Use zeros instead of prev foot for is_mmapped |
6132 | * Add mspace_track_large_chunks; thanks to Jean Brouwers |
6133 | * Fix set_inuse in internal_realloc; thanks to Jean Brouwers |
6134 | * Fix insufficient sys_alloc padding when using 16byte alignment |
6135 | * Fix bad error check in mspace_footprint |
6136 | * Adaptations for ptmalloc; thanks to Wolfram Gloger. |
6137 | * Reentrant spin locks; thanks to Earl Chew and others |
6138 | * Win32 improvements; thanks to Niall Douglas and Earl Chew |
6139 | * Add NO_SEGMENT_TRAVERSAL and MAX_RELEASE_CHECK_RATE options |
6140 | * Extension hook in malloc_state |
6141 | * Various small adjustments to reduce warnings on some compilers |
6142 | * Various configuration extensions/changes for more platforms. Thanks |
6143 | to all who contributed these. |
6144 | |
6145 | V2.8.3 Thu Sep 22 11:16:32 2005 Doug Lea (dl at gee) |
6146 | * Add max_footprint functions |
6147 | * Ensure all appropriate literals are size_t |
6148 | * Fix conditional compilation problem for some #define settings |
6149 | * Avoid concatenating segments with the one provided |
6150 | in create_mspace_with_base |
6151 | * Rename some variables to avoid compiler shadowing warnings |
6152 | * Use explicit lock initialization. |
6153 | * Better handling of sbrk interference. |
6154 | * Simplify and fix segment insertion, trimming and mspace_destroy |
6155 | * Reinstate REALLOC_ZERO_BYTES_FREES option from 2.7.x |
6156 | * Thanks especially to Dennis Flanagan for help on these. |
6157 | |
6158 | V2.8.2 Sun Jun 12 16:01:10 2005 Doug Lea (dl at gee) |
6159 | * Fix memalign brace error. |
6160 | |
6161 | V2.8.1 Wed Jun 8 16:11:46 2005 Doug Lea (dl at gee) |
6162 | * Fix improper #endif nesting in C++ |
6163 | * Add explicit casts needed for C++ |
6164 | |
6165 | V2.8.0 Mon May 30 14:09:02 2005 Doug Lea (dl at gee) |
6166 | * Use trees for large bins |
6167 | * Support mspaces |
6168 | * Use segments to unify sbrk-based and mmap-based system allocation, |
6169 | removing need for emulation on most platforms without sbrk. |
6170 | * Default safety checks |
6171 | * Optional footer checks. Thanks to William Robertson for the idea. |
6172 | * Internal code refactoring |
6173 | * Incorporate suggestions and platform-specific changes. |
6174 | Thanks to Dennis Flanagan, Colin Plumb, Niall Douglas, |
6175 | Aaron Bachmann, Emery Berger, and others. |
6176 | * Speed up non-fastbin processing enough to remove fastbins. |
6177 | * Remove useless cfree() to avoid conflicts with other apps. |
6178 | * Remove internal memcpy, memset. Compilers handle builtins better. |
6179 | * Remove some options that no one ever used and rename others. |
6180 | |
6181 | V2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee) |
6182 | * Fix malloc_state bitmap array misdeclaration |
6183 | |
6184 | V2.7.1 Thu Jul 25 10:58:03 2002 Doug Lea (dl at gee) |
6185 | * Allow tuning of FIRST_SORTED_BIN_SIZE |
6186 | * Use PTR_UINT as type for all ptr->int casts. Thanks to John Belmonte. |
6187 | * Better detection and support for non-contiguousness of MORECORE. |
6188 | Thanks to Andreas Mueller, Conal Walsh, and Wolfram Gloger |
6189 | * Bypass most of malloc if no frees. Thanks To Emery Berger. |
6190 | * Fix freeing of old top non-contiguous chunk im sysmalloc. |
6191 | * Raised default trim and map thresholds to 256K. |
6192 | * Fix mmap-related #defines. Thanks to Lubos Lunak. |
6193 | * Fix copy macros; added LACKS_FCNTL_H. Thanks to Neal Walfield. |
6194 | * Branch-free bin calculation |
6195 | * Default trim and mmap thresholds now 256K. |
6196 | |
6197 | V2.7.0 Sun Mar 11 14:14:06 2001 Doug Lea (dl at gee) |
6198 | * Introduce independent_comalloc and independent_calloc. |
6199 | Thanks to Michael Pachos for motivation and help. |
6200 | * Make optional .h file available |
6201 | * Allow > 2GB requests on 32bit systems. |
6202 | * new WIN32 sbrk, mmap, munmap, lock code from <Walter@GeNeSys-e.de>. |
6203 | Thanks also to Andreas Mueller <a.mueller at paradatec.de>, |
6204 | and Anonymous. |
6205 | * Allow override of MALLOC_ALIGNMENT (Thanks to Ruud Waij for |
6206 | helping test this.) |
6207 | * memalign: check alignment arg |
6208 | * realloc: don't try to shift chunks backwards, since this |
6209 | leads to more fragmentation in some programs and doesn't |
6210 | seem to help in any others. |
6211 | * Collect all cases in malloc requiring system memory into sysmalloc |
6212 | * Use mmap as backup to sbrk |
6213 | * Place all internal state in malloc_state |
6214 | * Introduce fastbins (although similar to 2.5.1) |
6215 | * Many minor tunings and cosmetic improvements |
6216 | * Introduce USE_PUBLIC_MALLOC_WRAPPERS, USE_MALLOC_LOCK |
6217 | * Introduce MALLOC_FAILURE_ACTION, MORECORE_CONTIGUOUS |
6218 | Thanks to Tony E. Bennett <tbennett@nvidia.com> and others. |
6219 | * Include errno.h to support default failure action. |
6220 | |
6221 | V2.6.6 Sun Dec 5 07:42:19 1999 Doug Lea (dl at gee) |
6222 | * return null for negative arguments |
6223 | * Added Several WIN32 cleanups from Martin C. Fong <mcfong at yahoo.com> |
6224 | * Add 'LACKS_SYS_PARAM_H' for those systems without 'sys/param.h' |
6225 | (e.g. WIN32 platforms) |
6226 | * Cleanup header file inclusion for WIN32 platforms |
6227 | * Cleanup code to avoid Microsoft Visual C++ compiler complaints |
6228 | * Add 'USE_DL_PREFIX' to quickly allow co-existence with existing |
6229 | memory allocation routines |
6230 | * Set 'malloc_getpagesize' for WIN32 platforms (needs more work) |
6231 | * Use 'assert' rather than 'ASSERT' in WIN32 code to conform to |
6232 | usage of 'assert' in non-WIN32 code |
6233 | * Improve WIN32 'sbrk()' emulation's 'findRegion()' routine to |
6234 | avoid infinite loop |
6235 | * Always call 'fREe()' rather than 'free()' |
6236 | |
6237 | V2.6.5 Wed Jun 17 15:57:31 1998 Doug Lea (dl at gee) |
6238 | * Fixed ordering problem with boundary-stamping |
6239 | |
6240 | V2.6.3 Sun May 19 08:17:58 1996 Doug Lea (dl at gee) |
6241 | * Added pvalloc, as recommended by H.J. Liu |
6242 | * Added 64bit pointer support mainly from Wolfram Gloger |
6243 | * Added anonymously donated WIN32 sbrk emulation |
6244 | * Malloc, calloc, getpagesize: add optimizations from Raymond Nijssen |
6245 | * malloc_extend_top: fix mask error that caused wastage after |
6246 | foreign sbrks |
6247 | * Add linux mremap support code from HJ Liu |
6248 | |
6249 | V2.6.2 Tue Dec 5 06:52:55 1995 Doug Lea (dl at gee) |
6250 | * Integrated most documentation with the code. |
6251 | * Add support for mmap, with help from |
6252 | Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
6253 | * Use last_remainder in more cases. |
6254 | * Pack bins using idea from colin@nyx10.cs.du.edu |
6255 | * Use ordered bins instead of best-fit threshhold |
6256 | * Eliminate block-local decls to simplify tracing and debugging. |
6257 | * Support another case of realloc via move into top |
6258 | * Fix error occuring when initial sbrk_base not word-aligned. |
6259 | * Rely on page size for units instead of SBRK_UNIT to |
6260 | avoid surprises about sbrk alignment conventions. |
6261 | * Add mallinfo, mallopt. Thanks to Raymond Nijssen |
6262 | (raymond@es.ele.tue.nl) for the suggestion. |
6263 | * Add `pad' argument to malloc_trim and top_pad mallopt parameter. |
6264 | * More precautions for cases where other routines call sbrk, |
6265 | courtesy of Wolfram Gloger (Gloger@lrz.uni-muenchen.de). |
6266 | * Added macros etc., allowing use in linux libc from |
6267 | H.J. Lu (hjl@gnu.ai.mit.edu) |
6268 | * Inverted this history list |
6269 | |
6270 | V2.6.1 Sat Dec 2 14:10:57 1995 Doug Lea (dl at gee) |
6271 | * Re-tuned and fixed to behave more nicely with V2.6.0 changes. |
6272 | * Removed all preallocation code since under current scheme |
6273 | the work required to undo bad preallocations exceeds |
6274 | the work saved in good cases for most test programs. |
6275 | * No longer use return list or unconsolidated bins since |
6276 | no scheme using them consistently outperforms those that don't |
6277 | given above changes. |
6278 | * Use best fit for very large chunks to prevent some worst-cases. |
6279 | * Added some support for debugging |
6280 | |
6281 | V2.6.0 Sat Nov 4 07:05:23 1995 Doug Lea (dl at gee) |
6282 | * Removed footers when chunks are in use. Thanks to |
6283 | Paul Wilson (wilson@cs.texas.edu) for the suggestion. |
6284 | |
6285 | V2.5.4 Wed Nov 1 07:54:51 1995 Doug Lea (dl at gee) |
6286 | * Added malloc_trim, with help from Wolfram Gloger |
6287 | (wmglo@Dent.MED.Uni-Muenchen.DE). |
6288 | |
6289 | V2.5.3 Tue Apr 26 10:16:01 1994 Doug Lea (dl at g) |
6290 | |
6291 | V2.5.2 Tue Apr 5 16:20:40 1994 Doug Lea (dl at g) |
6292 | * realloc: try to expand in both directions |
6293 | * malloc: swap order of clean-bin strategy; |
6294 | * realloc: only conditionally expand backwards |
6295 | * Try not to scavenge used bins |
6296 | * Use bin counts as a guide to preallocation |
6297 | * Occasionally bin return list chunks in first scan |
6298 | * Add a few optimizations from colin@nyx10.cs.du.edu |
6299 | |
6300 | V2.5.1 Sat Aug 14 15:40:43 1993 Doug Lea (dl at g) |
6301 | * faster bin computation & slightly different binning |
6302 | * merged all consolidations to one part of malloc proper |
6303 | (eliminating old malloc_find_space & malloc_clean_bin) |
6304 | * Scan 2 returns chunks (not just 1) |
6305 | * Propagate failure in realloc if malloc returns 0 |
6306 | * Add stuff to allow compilation on non-ANSI compilers |
6307 | from kpv@research.att.com |
6308 | |
6309 | V2.5 Sat Aug 7 07:41:59 1993 Doug Lea (dl at g.oswego.edu) |
6310 | * removed potential for odd address access in prev_chunk |
6311 | * removed dependency on getpagesize.h |
6312 | * misc cosmetics and a bit more internal documentation |
6313 | * anticosmetics: mangled names in macros to evade debugger strangeness |
6314 | * tested on sparc, hp-700, dec-mips, rs6000 |
6315 | with gcc & native cc (hp, dec only) allowing |
6316 | Detlefs & Zorn comparison study (in SIGPLAN Notices.) |
6317 | |
6318 | Trial version Fri Aug 28 13:14:29 1992 Doug Lea (dl at g.oswego.edu) |
6319 | * Based loosely on libg++-1.2X malloc. (It retains some of the overall |
6320 | structure of old version, but most details differ.) |
6321 | |
6322 | */ |
6323 | |
6324 | #endif /* !HAVE_MALLOC */ |
6325 | |
6326 | #ifdef HAVE_MALLOC |
6327 | static void * SDLCALL real_malloc(size_t s) { return malloc(s); } |
6328 | static void * SDLCALL real_calloc(size_t n, size_t s) { return calloc(n, s); } |
6329 | static void * SDLCALL real_realloc(void *p, size_t s) { return realloc(p,s); } |
6330 | static void SDLCALL real_free(void *p) { free(p); } |
6331 | #else |
6332 | #define real_malloc dlmalloc |
6333 | #define real_calloc dlcalloc |
6334 | #define real_realloc dlrealloc |
6335 | #define real_free dlfree |
6336 | #endif |
6337 | |
6338 | // mark the allocator entry points as KEEPALIVE so we can call these from JavaScript. |
6339 | // otherwise they could could get so aggressively inlined that their symbols |
6340 | // don't exist at all in the final binary! |
6341 | #ifdef SDL_PLATFORM_EMSCRIPTEN |
6342 | #include <emscripten/emscripten.h> |
6343 | extern SDL_DECLSPEC SDL_MALLOC EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_malloc(size_t size); |
6344 | extern SDL_DECLSPEC SDL_MALLOC SDL_ALLOC_SIZE2(1, 2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_calloc(size_t nmemb, size_t size); |
6345 | extern SDL_DECLSPEC SDL_ALLOC_SIZE(2) EMSCRIPTEN_KEEPALIVE void * SDLCALL SDL_realloc(void *mem, size_t size); |
6346 | extern SDL_DECLSPEC EMSCRIPTEN_KEEPALIVE void SDLCALL SDL_free(void *mem); |
6347 | #endif |
6348 | |
6349 | /* Memory functions used by SDL that can be replaced by the application */ |
6350 | static struct |
6351 | { |
6352 | SDL_malloc_func malloc_func; |
6353 | SDL_calloc_func calloc_func; |
6354 | SDL_realloc_func realloc_func; |
6355 | SDL_free_func free_func; |
6356 | SDL_AtomicInt num_allocations; |
6357 | } s_mem = { |
6358 | real_malloc, real_calloc, real_realloc, real_free, { 0 } |
6359 | }; |
6360 | |
6361 | // Define this if you want to track the number of allocations active |
6362 | // #define SDL_TRACK_ALLOCATION_COUNT |
6363 | #ifdef SDL_TRACK_ALLOCATION_COUNT |
6364 | #define INCREMENT_ALLOCATION_COUNT() (void)SDL_AtomicIncRef(&s_mem.num_allocations) |
6365 | #define DECREMENT_ALLOCATION_COUNT() (void)SDL_AtomicDecRef(&s_mem.num_allocations) |
6366 | #else |
6367 | #define INCREMENT_ALLOCATION_COUNT() |
6368 | #define DECREMENT_ALLOCATION_COUNT() |
6369 | #endif |
6370 | |
6371 | |
6372 | void SDL_GetOriginalMemoryFunctions(SDL_malloc_func *malloc_func, |
6373 | SDL_calloc_func *calloc_func, |
6374 | SDL_realloc_func *realloc_func, |
6375 | SDL_free_func *free_func) |
6376 | { |
6377 | if (malloc_func) { |
6378 | *malloc_func = real_malloc; |
6379 | } |
6380 | if (calloc_func) { |
6381 | *calloc_func = real_calloc; |
6382 | } |
6383 | if (realloc_func) { |
6384 | *realloc_func = real_realloc; |
6385 | } |
6386 | if (free_func) { |
6387 | *free_func = real_free; |
6388 | } |
6389 | } |
6390 | |
6391 | void SDL_GetMemoryFunctions(SDL_malloc_func *malloc_func, |
6392 | SDL_calloc_func *calloc_func, |
6393 | SDL_realloc_func *realloc_func, |
6394 | SDL_free_func *free_func) |
6395 | { |
6396 | if (malloc_func) { |
6397 | *malloc_func = s_mem.malloc_func; |
6398 | } |
6399 | if (calloc_func) { |
6400 | *calloc_func = s_mem.calloc_func; |
6401 | } |
6402 | if (realloc_func) { |
6403 | *realloc_func = s_mem.realloc_func; |
6404 | } |
6405 | if (free_func) { |
6406 | *free_func = s_mem.free_func; |
6407 | } |
6408 | } |
6409 | |
6410 | bool SDL_SetMemoryFunctions(SDL_malloc_func malloc_func, |
6411 | SDL_calloc_func calloc_func, |
6412 | SDL_realloc_func realloc_func, |
6413 | SDL_free_func free_func) |
6414 | { |
6415 | if (!malloc_func) { |
6416 | return SDL_InvalidParamError("malloc_func" ); |
6417 | } |
6418 | if (!calloc_func) { |
6419 | return SDL_InvalidParamError("calloc_func" ); |
6420 | } |
6421 | if (!realloc_func) { |
6422 | return SDL_InvalidParamError("realloc_func" ); |
6423 | } |
6424 | if (!free_func) { |
6425 | return SDL_InvalidParamError("free_func" ); |
6426 | } |
6427 | |
6428 | s_mem.malloc_func = malloc_func; |
6429 | s_mem.calloc_func = calloc_func; |
6430 | s_mem.realloc_func = realloc_func; |
6431 | s_mem.free_func = free_func; |
6432 | return true; |
6433 | } |
6434 | |
6435 | int SDL_GetNumAllocations(void) |
6436 | { |
6437 | #ifdef SDL_TRACK_ALLOCATION_COUNT |
6438 | return SDL_GetAtomicInt(&s_mem.num_allocations); |
6439 | #else |
6440 | return -1; |
6441 | #endif |
6442 | } |
6443 | |
6444 | void *SDL_malloc(size_t size) |
6445 | { |
6446 | void *mem; |
6447 | |
6448 | if (!size) { |
6449 | size = 1; |
6450 | } |
6451 | |
6452 | mem = s_mem.malloc_func(size); |
6453 | if (mem) { |
6454 | INCREMENT_ALLOCATION_COUNT(); |
6455 | } else { |
6456 | SDL_OutOfMemory(); |
6457 | } |
6458 | |
6459 | return mem; |
6460 | } |
6461 | |
6462 | void *SDL_calloc(size_t nmemb, size_t size) |
6463 | { |
6464 | void *mem; |
6465 | |
6466 | if (!nmemb || !size) { |
6467 | nmemb = 1; |
6468 | size = 1; |
6469 | } |
6470 | |
6471 | mem = s_mem.calloc_func(nmemb, size); |
6472 | if (mem) { |
6473 | INCREMENT_ALLOCATION_COUNT(); |
6474 | } else { |
6475 | SDL_OutOfMemory(); |
6476 | } |
6477 | |
6478 | return mem; |
6479 | } |
6480 | |
6481 | void *SDL_realloc(void *ptr, size_t size) |
6482 | { |
6483 | void *mem; |
6484 | |
6485 | if (!size) { |
6486 | size = 1; |
6487 | } |
6488 | |
6489 | mem = s_mem.realloc_func(ptr, size); |
6490 | if (mem && !ptr) { |
6491 | INCREMENT_ALLOCATION_COUNT(); |
6492 | } else if (!mem) { |
6493 | SDL_OutOfMemory(); |
6494 | } |
6495 | |
6496 | return mem; |
6497 | } |
6498 | |
6499 | void SDL_free(void *ptr) |
6500 | { |
6501 | if (!ptr) { |
6502 | return; |
6503 | } |
6504 | |
6505 | s_mem.free_func(ptr); |
6506 | DECREMENT_ALLOCATION_COUNT(); |
6507 | } |
6508 | |