1 | /* |
2 | * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers |
3 | * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. |
4 | * Copyright 1996-1999 by Silicon Graphics. All rights reserved. |
5 | * Copyright 1999 by Hewlett-Packard Company. All rights reserved. |
6 | * Copyright (C) 2007 Free Software Foundation, Inc |
7 | * Copyright (c) 2000-2011 by Hewlett-Packard Development Company. |
8 | * |
9 | * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED |
10 | * OR IMPLIED. ANY USE IS AT YOUR OWN RISK. |
11 | * |
12 | * Permission is hereby granted to use or copy this program |
13 | * for any purpose, provided the above notices are retained on all copies. |
14 | * Permission to modify the code and to distribute modified code is granted, |
15 | * provided the above notices are retained, and a notice that the code was |
16 | * modified is included with the above copyright notice. |
17 | */ |
18 | |
19 | /* |
20 | * Note that this defines a large number of tuning hooks, which can |
21 | * safely be ignored in nearly all cases. For normal use it suffices |
22 | * to call only GC_MALLOC and perhaps GC_REALLOC. |
23 | * For better performance, also look at GC_MALLOC_ATOMIC, and |
24 | * GC_enable_incremental. If you need an action to be performed |
25 | * immediately before an object is collected, look at GC_register_finalizer. |
26 | * If you are using Solaris threads, look at the end of this file. |
27 | * Everything else is best ignored unless you encounter performance |
28 | * problems. |
29 | */ |
30 | |
31 | #ifndef GC_H |
32 | #define GC_H |
33 | |
34 | #include "gc_version.h" |
35 | /* Define version numbers here to allow test on build machine */ |
36 | /* for cross-builds. Note that this defines the header */ |
37 | /* version number, which may or may not match that of the */ |
38 | /* dynamic library. GC_get_version() can be used to obtain */ |
39 | /* the latter. */ |
40 | |
41 | #include "gc_config_macros.h" |
42 | |
43 | #ifdef __cplusplus |
44 | extern "C" { |
45 | #endif |
46 | |
47 | typedef void * GC_PTR; /* preserved only for backward compatibility */ |
48 | |
49 | /* Define word and signed_word to be unsigned and signed types of the */ |
50 | /* size as char * or void *. There seems to be no way to do this */ |
51 | /* even semi-portably. The following is probably no better/worse */ |
52 | /* than almost anything else. */ |
53 | /* The ANSI standard suggests that size_t and ptrdiff_t might be */ |
54 | /* better choices. But those had incorrect definitions on some older */ |
55 | /* systems. Notably "typedef int size_t" is WRONG. */ |
56 | #ifdef _WIN64 |
57 | # ifdef __int64 |
58 | typedef unsigned __int64 GC_word; |
59 | typedef __int64 GC_signed_word; |
60 | # else |
61 | typedef unsigned long long GC_word; |
62 | typedef long long GC_signed_word; |
63 | # endif |
64 | #else |
65 | typedef unsigned long GC_word; |
66 | typedef long GC_signed_word; |
67 | #endif |
68 | |
69 | /* Get the GC library version. The returned value is a constant in the */ |
70 | /* form: ((version_major<<16) | (version_minor<<8) | version_micro). */ |
71 | GC_API unsigned GC_CALL GC_get_version(void); |
72 | |
73 | /* Public read-only variables */ |
74 | /* The supplied getter functions are preferred for new code. */ |
75 | |
76 | GC_API GC_ATTR_DEPRECATED GC_word GC_gc_no; |
77 | /* Counter incremented per collection. */ |
78 | /* Includes empty GCs at startup. */ |
79 | GC_API GC_word GC_CALL GC_get_gc_no(void); |
80 | /* GC_get_gc_no() is unsynchronized, so */ |
81 | /* it requires GC_call_with_alloc_lock() to */ |
82 | /* avoid data races on multiprocessors. */ |
83 | |
84 | #ifdef GC_THREADS |
85 | GC_API GC_ATTR_DEPRECATED int GC_parallel; |
86 | /* GC is parallelized for performance on */ |
87 | /* multiprocessors. Currently set only */ |
88 | /* implicitly if collector is built with */ |
89 | /* PARALLEL_MARK defined and if either: */ |
90 | /* Env variable GC_NPROC is set to > 1, or */ |
91 | /* GC_NPROC is not set and this is an MP. */ |
92 | /* If GC_parallel is on (non-zero), incremental */ |
93 | /* collection is only partially functional, */ |
94 | /* and may not be desirable. The getter does */ |
95 | /* not use or need synchronization (i.e. */ |
96 | /* acquiring the GC lock). Starting from */ |
97 | /* GC v7.3, GC_parallel value is equal to the */ |
98 | /* number of marker threads minus one (i.e. */ |
99 | /* number of existing parallel marker threads */ |
100 | /* excluding the initiating one). */ |
101 | GC_API int GC_CALL GC_get_parallel(void); |
102 | #endif |
103 | |
104 | |
105 | /* Public R/W variables */ |
106 | /* The supplied setter and getter functions are preferred for new code. */ |
107 | |
108 | typedef void * (GC_CALLBACK * GC_oom_func)(size_t /* bytes_requested */); |
109 | GC_API GC_ATTR_DEPRECATED GC_oom_func GC_oom_fn; |
110 | /* When there is insufficient memory to satisfy */ |
111 | /* an allocation request, we return */ |
112 | /* (*GC_oom_fn)(size). By default this just */ |
113 | /* returns NULL. */ |
114 | /* If it returns, it must return 0 or a valid */ |
115 | /* pointer to a previously allocated heap */ |
116 | /* object. GC_oom_fn must not be 0. */ |
117 | /* Both the supplied setter and the getter */ |
118 | /* acquire the GC lock (to avoid data races). */ |
119 | GC_API void GC_CALL GC_set_oom_fn(GC_oom_func) GC_ATTR_NONNULL(1); |
120 | GC_API GC_oom_func GC_CALL GC_get_oom_fn(void); |
121 | |
122 | typedef void (GC_CALLBACK * GC_on_heap_resize_proc)(GC_word /* new_size */); |
123 | GC_API GC_ATTR_DEPRECATED GC_on_heap_resize_proc GC_on_heap_resize; |
124 | /* Invoked when the heap grows or shrinks. */ |
125 | /* Called with the world stopped (and the */ |
126 | /* allocation lock held). May be 0. */ |
127 | GC_API void GC_CALL GC_set_on_heap_resize(GC_on_heap_resize_proc); |
128 | GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void); |
129 | /* Both the supplied setter and the getter */ |
130 | /* acquire the GC lock (to avoid data races). */ |
131 | |
132 | GC_API GC_ATTR_DEPRECATED int GC_find_leak; |
133 | /* Do not actually garbage collect, but simply */ |
134 | /* report inaccessible memory that was not */ |
135 | /* deallocated with GC_free. Initial value */ |
136 | /* is determined by FIND_LEAK macro. */ |
137 | /* The value should not typically be modified */ |
138 | /* after GC initialization (and, thus, it does */ |
139 | /* not use or need synchronization). */ |
140 | GC_API void GC_CALL GC_set_find_leak(int); |
141 | GC_API int GC_CALL GC_get_find_leak(void); |
142 | |
143 | GC_API GC_ATTR_DEPRECATED int GC_all_interior_pointers; |
144 | /* Arrange for pointers to object interiors to */ |
145 | /* be recognized as valid. Typically should */ |
146 | /* not be changed after GC initialization (in */ |
147 | /* case of calling it after the GC is */ |
148 | /* initialized, the setter acquires the GC lock */ |
149 | /* (to avoid data races). The initial value */ |
150 | /* depends on whether the GC is built with */ |
151 | /* ALL_INTERIOR_POINTERS macro defined or not. */ |
152 | /* Unless DONT_ADD_BYTE_AT_END is defined, this */ |
153 | /* also affects whether sizes are increased by */ |
154 | /* at least a byte to allow "off the end" */ |
155 | /* pointer recognition. Must be only 0 or 1. */ |
156 | GC_API void GC_CALL GC_set_all_interior_pointers(int); |
157 | GC_API int GC_CALL GC_get_all_interior_pointers(void); |
158 | |
159 | GC_API GC_ATTR_DEPRECATED int GC_finalize_on_demand; |
160 | /* If nonzero, finalizers will only be run in */ |
161 | /* response to an explicit GC_invoke_finalizers */ |
162 | /* call. The default is determined by whether */ |
163 | /* the FINALIZE_ON_DEMAND macro is defined */ |
164 | /* when the collector is built. */ |
165 | /* The setter and getter are unsynchronized. */ |
166 | GC_API void GC_CALL GC_set_finalize_on_demand(int); |
167 | GC_API int GC_CALL GC_get_finalize_on_demand(void); |
168 | |
169 | GC_API GC_ATTR_DEPRECATED int GC_java_finalization; |
170 | /* Mark objects reachable from finalizable */ |
171 | /* objects in a separate post-pass. This makes */ |
172 | /* it a bit safer to use non-topologically- */ |
173 | /* ordered finalization. Default value is */ |
174 | /* determined by JAVA_FINALIZATION macro. */ |
175 | /* Enables register_finalizer_unreachable to */ |
176 | /* work correctly. */ |
177 | /* The setter and getter are unsynchronized. */ |
178 | GC_API void GC_CALL GC_set_java_finalization(int); |
179 | GC_API int GC_CALL GC_get_java_finalization(void); |
180 | |
181 | typedef void (GC_CALLBACK * GC_finalizer_notifier_proc)(void); |
182 | GC_API GC_ATTR_DEPRECATED GC_finalizer_notifier_proc GC_finalizer_notifier; |
183 | /* Invoked by the collector when there are */ |
184 | /* objects to be finalized. Invoked at most */ |
185 | /* once per GC cycle. Never invoked unless */ |
186 | /* GC_finalize_on_demand is set. */ |
187 | /* Typically this will notify a finalization */ |
188 | /* thread, which will call GC_invoke_finalizers */ |
189 | /* in response. May be 0 (means no notifier). */ |
190 | /* Both the supplied setter and the getter */ |
191 | /* acquire the GC lock (to avoid data races). */ |
192 | GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc); |
193 | GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void); |
194 | |
195 | GC_API |
196 | # ifndef GC_DONT_GC |
197 | GC_ATTR_DEPRECATED |
198 | # endif |
199 | int GC_dont_gc; /* != 0 ==> Don't collect. In versions 6.2a1+, */ |
200 | /* this overrides explicit GC_gcollect() calls. */ |
201 | /* Used as a counter, so that nested enabling */ |
202 | /* and disabling work correctly. Should */ |
203 | /* normally be updated with GC_enable() and */ |
204 | /* GC_disable() calls. Direct assignment to */ |
205 | /* GC_dont_gc is deprecated. To check whether */ |
206 | /* GC is disabled, GC_is_disabled() is */ |
207 | /* preferred for new code. */ |
208 | |
209 | GC_API GC_ATTR_DEPRECATED int GC_dont_expand; |
210 | /* Do not expand the heap unless explicitly */ |
211 | /* requested or forced to. The setter and */ |
212 | /* getter are unsynchronized. */ |
213 | GC_API void GC_CALL GC_set_dont_expand(int); |
214 | GC_API int GC_CALL GC_get_dont_expand(void); |
215 | |
216 | GC_API GC_ATTR_DEPRECATED int GC_use_entire_heap; |
217 | /* Causes the non-incremental collector to use the */ |
218 | /* entire heap before collecting. This was the only */ |
219 | /* option for GC versions < 5.0. This sometimes */ |
220 | /* results in more large block fragmentation, since */ |
221 | /* very large blocks will tend to get broken up */ |
222 | /* during each GC cycle. It is likely to result in a */ |
223 | /* larger working set, but lower collection */ |
224 | /* frequencies, and hence fewer instructions executed */ |
225 | /* in the collector. */ |
226 | |
227 | GC_API GC_ATTR_DEPRECATED int GC_full_freq; |
228 | /* Number of partial collections between */ |
229 | /* full collections. Matters only if */ |
230 | /* GC_incremental is set. */ |
231 | /* Full collections are also triggered if */ |
232 | /* the collector detects a substantial */ |
233 | /* increase in the number of in-use heap */ |
234 | /* blocks. Values in the tens are now */ |
235 | /* perfectly reasonable, unlike for */ |
236 | /* earlier GC versions. */ |
237 | /* The setter and getter are unsynchronized, so */ |
238 | /* GC_call_with_alloc_lock() is required to */ |
239 | /* avoid data races (if the value is modified */ |
240 | /* after the GC is put to multi-threaded mode). */ |
241 | GC_API void GC_CALL GC_set_full_freq(int); |
242 | GC_API int GC_CALL GC_get_full_freq(void); |
243 | |
244 | GC_API GC_ATTR_DEPRECATED GC_word GC_non_gc_bytes; |
245 | /* Bytes not considered candidates for */ |
246 | /* collection. Used only to control scheduling */ |
247 | /* of collections. Updated by */ |
248 | /* GC_malloc_uncollectable and GC_free. */ |
249 | /* Wizards only. */ |
250 | /* The setter and getter are unsynchronized, so */ |
251 | /* GC_call_with_alloc_lock() is required to */ |
252 | /* avoid data races (if the value is modified */ |
253 | /* after the GC is put to multi-threaded mode). */ |
254 | GC_API void GC_CALL GC_set_non_gc_bytes(GC_word); |
255 | GC_API GC_word GC_CALL GC_get_non_gc_bytes(void); |
256 | |
257 | GC_API GC_ATTR_DEPRECATED int GC_no_dls; |
258 | /* Don't register dynamic library data segments. */ |
259 | /* Wizards only. Should be used only if the */ |
260 | /* application explicitly registers all roots. */ |
261 | /* (In some environments like Microsoft Windows */ |
262 | /* and Apple's Darwin, this may also prevent */ |
263 | /* registration of the main data segment as part */ |
264 | /* of the root set.) */ |
265 | /* The setter and getter are unsynchronized. */ |
266 | GC_API void GC_CALL GC_set_no_dls(int); |
267 | GC_API int GC_CALL GC_get_no_dls(void); |
268 | |
269 | GC_API GC_ATTR_DEPRECATED GC_word GC_free_space_divisor; |
270 | /* We try to make sure that we allocate at */ |
271 | /* least N/GC_free_space_divisor bytes between */ |
272 | /* collections, where N is twice the number */ |
273 | /* of traced bytes, plus the number of untraced */ |
274 | /* bytes (bytes in "atomic" objects), plus */ |
275 | /* a rough estimate of the root set size. */ |
276 | /* N approximates GC tracing work per GC. */ |
277 | /* Initially, GC_free_space_divisor = 3. */ |
278 | /* Increasing its value will use less space */ |
279 | /* but more collection time. Decreasing it */ |
280 | /* will appreciably decrease collection time */ |
281 | /* at the expense of space. */ |
282 | /* The setter and getter are unsynchronized, so */ |
283 | /* GC_call_with_alloc_lock() is required to */ |
284 | /* avoid data races (if the value is modified */ |
285 | /* after the GC is put to multi-threaded mode). */ |
286 | GC_API void GC_CALL GC_set_free_space_divisor(GC_word); |
287 | GC_API GC_word GC_CALL GC_get_free_space_divisor(void); |
288 | |
289 | GC_API GC_ATTR_DEPRECATED GC_word GC_max_retries; |
290 | /* The maximum number of GCs attempted before */ |
291 | /* reporting out of memory after heap */ |
292 | /* expansion fails. Initially 0. */ |
293 | /* The setter and getter are unsynchronized, so */ |
294 | /* GC_call_with_alloc_lock() is required to */ |
295 | /* avoid data races (if the value is modified */ |
296 | /* after the GC is put to multi-threaded mode). */ |
297 | GC_API void GC_CALL GC_set_max_retries(GC_word); |
298 | GC_API GC_word GC_CALL GC_get_max_retries(void); |
299 | |
300 | |
301 | GC_API GC_ATTR_DEPRECATED char *GC_stackbottom; |
302 | /* Cool end of user stack. */ |
303 | /* May be set in the client prior to */ |
304 | /* calling any GC_ routines. This */ |
305 | /* avoids some overhead, and */ |
306 | /* potentially some signals that can */ |
307 | /* confuse debuggers. Otherwise the */ |
308 | /* collector attempts to set it */ |
309 | /* automatically. */ |
310 | /* For multi-threaded code, this is the */ |
311 | /* cold end of the stack for the */ |
312 | /* primordial thread. Portable clients */ |
313 | /* should use GC_get_stack_base(), */ |
314 | /* GC_call_with_gc_active() and */ |
315 | /* GC_register_my_thread() instead. */ |
316 | |
317 | GC_API GC_ATTR_DEPRECATED int GC_dont_precollect; |
318 | /* Do not collect as part of GC */ |
319 | /* initialization. Should be set only */ |
320 | /* if the client wants a chance to */ |
321 | /* manually initialize the root set */ |
322 | /* before the first collection. */ |
323 | /* Interferes with blacklisting. */ |
324 | /* Wizards only. The setter and getter */ |
325 | /* are unsynchronized (and no external */ |
326 | /* locking is needed since the value is */ |
327 | /* accessed at GC initialization only). */ |
328 | GC_API void GC_CALL GC_set_dont_precollect(int); |
329 | GC_API int GC_CALL GC_get_dont_precollect(void); |
330 | |
331 | GC_API GC_ATTR_DEPRECATED unsigned long GC_time_limit; |
332 | /* If incremental collection is enabled, */ |
333 | /* We try to terminate collections */ |
334 | /* after this many milliseconds. Not a */ |
335 | /* hard time bound. Setting this to */ |
336 | /* GC_TIME_UNLIMITED will essentially */ |
337 | /* disable incremental collection while */ |
338 | /* leaving generational collection */ |
339 | /* enabled. */ |
340 | #define GC_TIME_UNLIMITED 999999 |
341 | /* Setting GC_time_limit to this value */ |
342 | /* will disable the "pause time exceeded"*/ |
343 | /* tests. */ |
344 | /* The setter and getter are unsynchronized, so */ |
345 | /* GC_call_with_alloc_lock() is required to */ |
346 | /* avoid data races (if the value is modified */ |
347 | /* after the GC is put to multi-threaded mode). */ |
348 | GC_API void GC_CALL GC_set_time_limit(unsigned long); |
349 | GC_API unsigned long GC_CALL GC_get_time_limit(void); |
350 | |
351 | /* Public procedures */ |
352 | |
353 | /* Set whether the GC will allocate executable memory pages or not. */ |
354 | /* A non-zero argument instructs the collector to allocate memory with */ |
355 | /* the executable flag on. Must be called before the collector is */ |
356 | /* initialized. May have no effect on some platforms. The default */ |
357 | /* value is controlled by NO_EXECUTE_PERMISSION macro (if present then */ |
358 | /* the flag is off). Portable clients should have */ |
359 | /* GC_set_pages_executable(1) call (before GC_INIT) provided they are */ |
360 | /* going to execute code on any of the GC-allocated memory objects. */ |
361 | GC_API void GC_CALL GC_set_pages_executable(int); |
362 | |
363 | /* Returns non-zero value if the GC is set to the allocate-executable */ |
364 | /* mode. The mode could be changed by GC_set_pages_executable (before */ |
365 | /* GC_INIT) unless the former has no effect on the platform. Does not */ |
366 | /* use or need synchronization (i.e. acquiring the allocator lock). */ |
367 | GC_API int GC_CALL GC_get_pages_executable(void); |
368 | |
369 | /* Overrides the default handle-fork mode. Non-zero value means GC */ |
370 | /* should install proper pthread_atfork handlers. Has effect only if */ |
371 | /* called before GC_INIT. Clients should invoke GC_set_handle_fork */ |
372 | /* with non-zero argument if going to use fork with GC functions called */ |
373 | /* in the forked child. (Note that such client and atfork handlers */ |
374 | /* activities are not fully POSIX-compliant.) GC_set_handle_fork */ |
375 | /* instructs GC_init to setup GC fork handlers using pthread_atfork, */ |
376 | /* the latter might fail (or, even, absent on some targets) causing */ |
377 | /* abort at GC initialization. Starting from 7.3alpha3, problems with */ |
378 | /* missing (or failed) pthread_atfork() could be avoided by invocation */ |
379 | /* of GC_set_handle_fork(-1) at application start-up and surrounding */ |
380 | /* each fork() with the relevant GC_atfork_prepare/parent/child calls. */ |
381 | GC_API void GC_CALL GC_set_handle_fork(int); |
382 | |
383 | /* Routines to handle POSIX fork() manually (no-op if handled */ |
384 | /* automatically). GC_atfork_prepare should be called immediately */ |
385 | /* before fork(); GC_atfork_parent should be invoked just after fork in */ |
386 | /* the branch that corresponds to parent process (i.e., fork result is */ |
387 | /* non-zero); GC_atfork_child is to be called immediately in the child */ |
388 | /* branch (i.e., fork result is 0). Note that GC_atfork_child() call */ |
389 | /* should, of course, precede GC_start_mark_threads call (if any). */ |
390 | GC_API void GC_CALL GC_atfork_prepare(void); |
391 | GC_API void GC_CALL GC_atfork_parent(void); |
392 | GC_API void GC_CALL GC_atfork_child(void); |
393 | |
394 | /* Initialize the collector. Portable clients should call GC_INIT() */ |
395 | /* from the main program instead. */ |
396 | GC_API void GC_CALL GC_init(void); |
397 | |
398 | /* General purpose allocation routines, with roughly malloc calling */ |
399 | /* conv. The atomic versions promise that no relevant pointers are */ |
400 | /* contained in the object. The non-atomic versions guarantee that the */ |
401 | /* new object is cleared. GC_malloc_stubborn promises that no changes */ |
402 | /* to the object will occur after GC_end_stubborn_change has been */ |
403 | /* called on the result of GC_malloc_stubborn. GC_malloc_uncollectable */ |
404 | /* allocates an object that is scanned for pointers to collectible */ |
405 | /* objects, but is not itself collectible. The object is scanned even */ |
406 | /* if it does not appear to be reachable. GC_malloc_uncollectable and */ |
407 | /* GC_free called on the resulting object implicitly update */ |
408 | /* GC_non_gc_bytes appropriately. */ |
409 | /* Note that the GC_malloc_stubborn support doesn't really exist */ |
410 | /* anymore. MANUAL_VDB provides comparable functionality. */ |
411 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
412 | GC_malloc(size_t /* size_in_bytes */); |
413 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
414 | GC_malloc_atomic(size_t /* size_in_bytes */); |
415 | GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *); |
416 | GC_API GC_ATTR_MALLOC char * GC_CALL |
417 | GC_strndup(const char *, size_t) GC_ATTR_NONNULL(1); |
418 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
419 | GC_malloc_uncollectable(size_t /* size_in_bytes */); |
420 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
421 | GC_malloc_stubborn(size_t /* size_in_bytes */); |
422 | |
423 | /* GC_memalign() is not well tested. */ |
424 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2) void * GC_CALL |
425 | GC_memalign(size_t /* align */, size_t /* lb */); |
426 | GC_API int GC_CALL GC_posix_memalign(void ** /* memptr */, size_t /* align */, |
427 | size_t /* lb */) GC_ATTR_NONNULL(1); |
428 | |
429 | /* Explicitly deallocate an object. Dangerous if used incorrectly. */ |
430 | /* Requires a pointer to the base of an object. */ |
431 | /* If the argument is stubborn, it should not be changeable when freed. */ |
432 | /* An object should not be enabled for finalization when it is */ |
433 | /* explicitly deallocated. */ |
434 | /* GC_free(0) is a no-op, as required by ANSI C for free. */ |
435 | GC_API void GC_CALL GC_free(void *); |
436 | |
437 | /* Stubborn objects may be changed only if the collector is explicitly */ |
438 | /* informed. The collector is implicitly informed of coming change */ |
439 | /* when such an object is first allocated. The following routines */ |
440 | /* inform the collector that an object will no longer be changed, or */ |
441 | /* that it will once again be changed. Only non-NULL pointer stores */ |
442 | /* into the object are considered to be changes. The argument to */ |
443 | /* GC_end_stubborn_change must be exactly the value returned by */ |
444 | /* GC_malloc_stubborn or passed to GC_change_stubborn. (In the second */ |
445 | /* case, it may be an interior pointer within 512 bytes of the */ |
446 | /* beginning of the objects.) There is a performance penalty for */ |
447 | /* allowing more than one stubborn object to be changed at once, but it */ |
448 | /* is acceptable to do so. The same applies to dropping stubborn */ |
449 | /* objects that are still changeable. */ |
450 | GC_API void GC_CALL GC_change_stubborn(const void *) GC_ATTR_NONNULL(1); |
451 | GC_API void GC_CALL GC_end_stubborn_change(const void *) GC_ATTR_NONNULL(1); |
452 | |
453 | /* Return a pointer to the base (lowest address) of an object given */ |
454 | /* a pointer to a location within the object. */ |
455 | /* I.e., map an interior pointer to the corresponding base pointer. */ |
456 | /* Note that with debugging allocation, this returns a pointer to the */ |
457 | /* actual base of the object, i.e. the debug information, not to */ |
458 | /* the base of the user object. */ |
459 | /* Return 0 if displaced_pointer doesn't point to within a valid */ |
460 | /* object. */ |
461 | /* Note that a deallocated object in the garbage collected heap */ |
462 | /* may be considered valid, even if it has been deallocated with */ |
463 | /* GC_free. */ |
464 | GC_API void * GC_CALL GC_base(void * /* displaced_pointer */); |
465 | |
466 | /* Return non-zero (TRUE) if and only if the argument points to */ |
467 | /* somewhere in GC heap. Primary use is as a fast alternative to */ |
468 | /* GC_base to check whether the pointed object is allocated by GC */ |
469 | /* or not. It is assumed that the collector is already initialized. */ |
470 | GC_API int GC_CALL GC_is_heap_ptr(const void *); |
471 | |
472 | /* Given a pointer to the base of an object, return its size in bytes. */ |
473 | /* The returned size may be slightly larger than what was originally */ |
474 | /* requested. */ |
475 | GC_API size_t GC_CALL GC_size(const void * /* obj_addr */) GC_ATTR_NONNULL(1); |
476 | |
477 | /* For compatibility with C library. This is occasionally faster than */ |
478 | /* a malloc followed by a bcopy. But if you rely on that, either here */ |
479 | /* or with the standard C library, your code is broken. In my */ |
480 | /* opinion, it shouldn't have been invented, but now we're stuck. -HB */ |
481 | /* The resulting object has the same kind as the original. */ |
482 | /* If the argument is stubborn, the result will have changes enabled. */ |
483 | /* It is an error to have changes enabled for the original object. */ |
484 | /* Follows ANSI conventions for NULL old_object. */ |
485 | GC_API void * GC_CALL GC_realloc(void * /* old_object */, |
486 | size_t /* new_size_in_bytes */) |
487 | /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2); |
488 | |
489 | /* Explicitly increase the heap size. */ |
490 | /* Returns 0 on failure, 1 on success. */ |
491 | GC_API int GC_CALL GC_expand_hp(size_t /* number_of_bytes */); |
492 | |
493 | /* Limit the heap size to n bytes. Useful when you're debugging, */ |
494 | /* especially on systems that don't handle running out of memory well. */ |
495 | /* n == 0 ==> unbounded. This is the default. This setter function is */ |
496 | /* unsynchronized (so it might require GC_call_with_alloc_lock to avoid */ |
497 | /* data races). */ |
498 | GC_API void GC_CALL GC_set_max_heap_size(GC_word /* n */); |
499 | |
500 | /* Inform the collector that a certain section of statically allocated */ |
501 | /* memory contains no pointers to garbage collected memory. Thus it */ |
502 | /* need not be scanned. This is sometimes important if the application */ |
503 | /* maps large read/write files into the address space, which could be */ |
504 | /* mistaken for dynamic library data segments on some systems. */ |
505 | /* Both section start and end are not needed to be pointer-aligned. */ |
506 | GC_API void GC_CALL GC_exclude_static_roots(void * /* low_address */, |
507 | void * /* high_address_plus_1 */); |
508 | |
509 | /* Clear the set of root segments. Wizards only. */ |
510 | GC_API void GC_CALL GC_clear_roots(void); |
511 | |
512 | /* Add a root segment. Wizards only. */ |
513 | /* Both segment start and end are not needed to be pointer-aligned. */ |
514 | /* low_address must not be greater than high_address_plus_1. */ |
515 | GC_API void GC_CALL GC_add_roots(void * /* low_address */, |
516 | void * /* high_address_plus_1 */); |
517 | |
518 | /* Remove a root segment. Wizards only. */ |
519 | /* May be unimplemented on some platforms. */ |
520 | GC_API void GC_CALL GC_remove_roots(void * /* low_address */, |
521 | void * /* high_address_plus_1 */); |
522 | |
523 | /* Add a displacement to the set of those considered valid by the */ |
524 | /* collector. GC_register_displacement(n) means that if p was returned */ |
525 | /* by GC_malloc, then (char *)p + n will be considered to be a valid */ |
526 | /* pointer to p. N must be small and less than the size of p. */ |
527 | /* (All pointers to the interior of objects from the stack are */ |
528 | /* considered valid in any case. This applies to heap objects and */ |
529 | /* static data.) */ |
530 | /* Preferably, this should be called before any other GC procedures. */ |
531 | /* Calling it later adds to the probability of excess memory */ |
532 | /* retention. */ |
533 | /* This is a no-op if the collector has recognition of */ |
534 | /* arbitrary interior pointers enabled, which is now the default. */ |
535 | GC_API void GC_CALL GC_register_displacement(size_t /* n */); |
536 | |
537 | /* The following version should be used if any debugging allocation is */ |
538 | /* being done. */ |
539 | GC_API void GC_CALL GC_debug_register_displacement(size_t /* n */); |
540 | |
541 | /* Explicitly trigger a full, world-stop collection. */ |
542 | GC_API void GC_CALL GC_gcollect(void); |
543 | |
544 | /* Same as above but ignores the default stop_func setting and tries to */ |
545 | /* unmap as much memory as possible (regardless of the corresponding */ |
546 | /* switch setting). The recommended usage: on receiving a system */ |
547 | /* low-memory event; before retrying a system call failed because of */ |
548 | /* the system is running out of resources. */ |
549 | GC_API void GC_CALL GC_gcollect_and_unmap(void); |
550 | |
551 | /* Trigger a full world-stopped collection. Abort the collection if */ |
552 | /* and when stop_func returns a nonzero value. Stop_func will be */ |
553 | /* called frequently, and should be reasonably fast. (stop_func is */ |
554 | /* called with the allocation lock held and the world might be stopped; */ |
555 | /* it's not allowed for stop_func to manipulate pointers to the garbage */ |
556 | /* collected heap or call most of GC functions.) This works even */ |
557 | /* if virtual dirty bits, and hence incremental collection is not */ |
558 | /* available for this architecture. Collections can be aborted faster */ |
559 | /* than normal pause times for incremental collection. However, */ |
560 | /* aborted collections do no useful work; the next collection needs */ |
561 | /* to start from the beginning. stop_func must not be 0. */ |
562 | /* GC_try_to_collect() returns 0 if the collection was aborted (or the */ |
563 | /* collections are disabled), 1 if it succeeded. */ |
564 | typedef int (GC_CALLBACK * GC_stop_func)(void); |
565 | GC_API int GC_CALL GC_try_to_collect(GC_stop_func /* stop_func */) |
566 | GC_ATTR_NONNULL(1); |
567 | |
568 | /* Set and get the default stop_func. The default stop_func is used by */ |
569 | /* GC_gcollect() and by implicitly trigged collections (except for the */ |
570 | /* case when handling out of memory). Must not be 0. */ |
571 | /* Both the setter and getter acquire the GC lock to avoid data races. */ |
572 | GC_API void GC_CALL GC_set_stop_func(GC_stop_func /* stop_func */) |
573 | GC_ATTR_NONNULL(1); |
574 | GC_API GC_stop_func GC_CALL GC_get_stop_func(void); |
575 | |
576 | /* Return the number of bytes in the heap. Excludes collector private */ |
577 | /* data structures. Excludes the unmapped memory (returned to the OS). */ |
578 | /* Includes empty blocks and fragmentation loss. Includes some pages */ |
579 | /* that were allocated but never written. */ |
580 | /* This is an unsynchronized getter, so it should be called typically */ |
581 | /* with the GC lock held to avoid data races on multiprocessors (the */ |
582 | /* alternative is to use GC_get_heap_usage_safe or GC_get_prof_stats */ |
583 | /* API calls instead). */ |
584 | /* This getter remains lock-free (unsynchronized) for compatibility */ |
585 | /* reason since some existing clients call it from a GC callback */ |
586 | /* holding the allocator lock. (This API function and the following */ |
587 | /* four ones bellow were made thread-safe in GC v7.2alpha1 and */ |
588 | /* reverted back in v7.2alpha7 for the reason described.) */ |
589 | GC_API size_t GC_CALL GC_get_heap_size(void); |
590 | |
591 | /* Return a lower bound on the number of free bytes in the heap */ |
592 | /* (excluding the unmapped memory space). This is an unsynchronized */ |
593 | /* getter (see GC_get_heap_size comment regarding thread-safety). */ |
594 | GC_API size_t GC_CALL GC_get_free_bytes(void); |
595 | |
596 | /* Return the size (in bytes) of the unmapped memory (which is returned */ |
597 | /* to the OS but could be remapped back by the collector later unless */ |
598 | /* the OS runs out of system/virtual memory). This is an unsynchronized */ |
599 | /* getter (see GC_get_heap_size comment regarding thread-safety). */ |
600 | GC_API size_t GC_CALL GC_get_unmapped_bytes(void); |
601 | |
602 | /* Return the number of bytes allocated since the last collection. */ |
603 | /* This is an unsynchronized getter (see GC_get_heap_size comment */ |
604 | /* regarding thread-safety). */ |
605 | GC_API size_t GC_CALL GC_get_bytes_since_gc(void); |
606 | |
607 | /* Return the total number of bytes allocated in this process. */ |
608 | /* Never decreases, except due to wrapping. This is an unsynchronized */ |
609 | /* getter (see GC_get_heap_size comment regarding thread-safety). */ |
610 | GC_API size_t GC_CALL GC_get_total_bytes(void); |
611 | |
612 | /* Return the heap usage information. This is a thread-safe (atomic) */ |
613 | /* alternative for the five above getters. (This function acquires */ |
614 | /* the allocator lock thus preventing data racing and returning the */ |
615 | /* consistent result.) Passing NULL pointer is allowed for any */ |
616 | /* argument. Returned (filled in) values are of word type. */ |
617 | /* (This API function was introduced in GC v7.2alpha7 at the same time */ |
618 | /* when GC_get_heap_size and the friends were made lock-free again.) */ |
619 | GC_API void GC_CALL GC_get_heap_usage_safe(GC_word * /* pheap_size */, |
620 | GC_word * /* pfree_bytes */, |
621 | GC_word * /* punmapped_bytes */, |
622 | GC_word * /* pbytes_since_gc */, |
623 | GC_word * /* ptotal_bytes */); |
624 | |
625 | /* Structure used to query GC statistics (profiling information). */ |
626 | /* More fields could be added in the future. To preserve compatibility */ |
627 | /* new fields should be added only to the end, and no deprecated fields */ |
628 | /* should be removed from. */ |
629 | struct GC_prof_stats_s { |
630 | GC_word heapsize_full; |
631 | /* Heap size in bytes (including the area unmapped to OS). */ |
632 | /* Same as GC_get_heap_size() + GC_get_unmapped_bytes(). */ |
633 | GC_word free_bytes_full; |
634 | /* Total bytes contained in free and unmapped blocks. */ |
635 | /* Same as GC_get_free_bytes() + GC_get_unmapped_bytes(). */ |
636 | GC_word unmapped_bytes; |
637 | /* Amount of memory unmapped to OS. Same as the value */ |
638 | /* returned by GC_get_unmapped_bytes(). */ |
639 | GC_word bytes_allocd_since_gc; |
640 | /* Number of bytes allocated since the recent collection. */ |
641 | /* Same as returned by GC_get_bytes_since_gc(). */ |
642 | GC_word allocd_bytes_before_gc; |
643 | /* Number of bytes allocated before the recent garbage */ |
644 | /* collection. The value may wrap. Same as the result of */ |
645 | /* GC_get_total_bytes() - GC_get_bytes_since_gc(). */ |
646 | GC_word non_gc_bytes; |
647 | /* Number of bytes not considered candidates for garbage */ |
648 | /* collection. Same as returned by GC_get_non_gc_bytes(). */ |
649 | GC_word gc_no; |
650 | /* Garbage collection cycle number. The value may wrap */ |
651 | /* (and could be -1). Same as returned by GC_get_gc_no(). */ |
652 | GC_word markers_m1; |
653 | /* Number of marker threads (excluding the initiating one). */ |
654 | /* Same as returned by GC_get_parallel (or 0 if the */ |
655 | /* collector is single-threaded). */ |
656 | GC_word bytes_reclaimed_since_gc; |
657 | /* Approximate number of reclaimed bytes after recent GC. */ |
658 | GC_word reclaimed_bytes_before_gc; |
659 | /* Approximate number of bytes reclaimed before the recent */ |
660 | /* garbage collection. The value may wrap. */ |
661 | }; |
662 | |
663 | /* Atomically get GC statistics (various global counters). Clients */ |
664 | /* should pass the size of the buffer (of GC_prof_stats_s type) to fill */ |
665 | /* in the values - this is for interoperability between different GC */ |
666 | /* versions, an old client could have fewer fields, and vice versa, */ |
667 | /* client could use newer gc.h (with more entries declared in the */ |
668 | /* structure) than that of the linked libgc binary; in the latter case, */ |
669 | /* unsupported (unknown) fields are filled in with -1. Return the size */ |
670 | /* (in bytes) of the filled in part of the structure (excluding all */ |
671 | /* unknown fields, if any). */ |
672 | GC_API size_t GC_CALL GC_get_prof_stats(struct GC_prof_stats_s *, |
673 | size_t /* stats_sz */); |
674 | #ifdef GC_THREADS |
675 | /* Same as above but unsynchronized (i.e., not holding the allocation */ |
676 | /* lock). Clients should call it using GC_call_with_alloc_lock to */ |
677 | /* avoid data races on multiprocessors. */ |
678 | GC_API size_t GC_CALL GC_get_prof_stats_unsafe(struct GC_prof_stats_s *, |
679 | size_t /* stats_sz */); |
680 | #endif |
681 | |
682 | /* Disable garbage collection. Even GC_gcollect calls will be */ |
683 | /* ineffective. */ |
684 | GC_API void GC_CALL GC_disable(void); |
685 | |
686 | /* Return non-zero (TRUE) if and only if garbage collection is disabled */ |
687 | /* (i.e., GC_dont_gc value is non-zero). Does not acquire the lock. */ |
688 | GC_API int GC_CALL GC_is_disabled(void); |
689 | |
690 | /* Try to re-enable garbage collection. GC_disable() and GC_enable() */ |
691 | /* calls nest. Garbage collection is enabled if the number of calls to */ |
692 | /* both functions is equal. */ |
693 | GC_API void GC_CALL GC_enable(void); |
694 | |
695 | /* Enable incremental/generational collection. Not advisable unless */ |
696 | /* dirty bits are available or most heap objects are pointer-free */ |
697 | /* (atomic) or immutable. Don't use in leak finding mode. Ignored if */ |
698 | /* GC_dont_gc is non-zero. Only the generational piece of this is */ |
699 | /* functional if GC_parallel is non-zero or if GC_time_limit is */ |
700 | /* GC_TIME_UNLIMITED. Causes thread-local variant of GC_gcj_malloc() */ |
701 | /* to revert to locked allocation. Must be called before any such */ |
702 | /* GC_gcj_malloc() calls. For best performance, should be called as */ |
703 | /* early as possible. On some platforms, calling it later may have */ |
704 | /* adverse effects. */ |
705 | /* Safe to call before GC_INIT(). Includes a GC_init() call. */ |
706 | GC_API void GC_CALL GC_enable_incremental(void); |
707 | |
708 | /* Does incremental mode write-protect pages? Returns zero or */ |
709 | /* more of the following, or'ed together: */ |
710 | #define GC_PROTECTS_POINTER_HEAP 1 /* May protect non-atomic objs. */ |
711 | #define GC_PROTECTS_PTRFREE_HEAP 2 |
712 | #define GC_PROTECTS_STATIC_DATA 4 /* Currently never. */ |
713 | #define GC_PROTECTS_STACK 8 /* Probably impractical. */ |
714 | |
715 | #define GC_PROTECTS_NONE 0 |
716 | /* The collector is assumed to be initialized before this call. */ |
717 | GC_API int GC_CALL GC_incremental_protection_needs(void); |
718 | |
719 | /* Perform some garbage collection work, if appropriate. */ |
720 | /* Return 0 if there is no more work to be done. */ |
721 | /* Typically performs an amount of work corresponding roughly */ |
722 | /* to marking from one page. May do more work if further */ |
723 | /* progress requires it, e.g. if incremental collection is */ |
724 | /* disabled. It is reasonable to call this in a wait loop */ |
725 | /* until it returns 0. */ |
726 | GC_API int GC_CALL GC_collect_a_little(void); |
727 | |
728 | /* Allocate an object of size lb bytes. The client guarantees that */ |
729 | /* as long as the object is live, it will be referenced by a pointer */ |
730 | /* that points to somewhere within the first 256 bytes of the object. */ |
731 | /* (This should normally be declared volatile to prevent the compiler */ |
732 | /* from invalidating this assertion.) This routine is only useful */ |
733 | /* if a large array is being allocated. It reduces the chance of */ |
734 | /* accidentally retaining such an array as a result of scanning an */ |
735 | /* integer that happens to be an address inside the array. (Actually, */ |
736 | /* it reduces the chance of the allocator not finding space for such */ |
737 | /* an array, since it will try hard to avoid introducing such a false */ |
738 | /* reference.) On a SunOS 4.X or MS Windows system this is recommended */ |
739 | /* for arrays likely to be larger than 100K or so. For other systems, */ |
740 | /* or if the collector is not configured to recognize all interior */ |
741 | /* pointers, the threshold is normally much higher. */ |
742 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
743 | GC_malloc_ignore_off_page(size_t /* lb */); |
744 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
745 | GC_malloc_atomic_ignore_off_page(size_t /* lb */); |
746 | |
747 | #ifdef GC_ADD_CALLER |
748 | # define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__ |
749 | # define GC_EXTRA_PARAMS GC_word ra, const char * s, int i |
750 | #else |
751 | # define __FILE__, __LINE__ |
752 | # define const char * s, int i |
753 | #endif |
754 | |
755 | /* The following is only defined if the library has been suitably */ |
756 | /* compiled: */ |
757 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
758 | GC_malloc_atomic_uncollectable(size_t /* size_in_bytes */); |
759 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
760 | GC_debug_malloc_atomic_uncollectable(size_t, GC_EXTRA_PARAMS); |
761 | |
762 | /* Debugging (annotated) allocation. GC_gcollect will check */ |
763 | /* objects allocated in this way for overwrites, etc. */ |
764 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
765 | GC_debug_malloc(size_t /* size_in_bytes */, GC_EXTRA_PARAMS); |
766 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
767 | GC_debug_malloc_atomic(size_t /* size_in_bytes */, GC_EXTRA_PARAMS); |
768 | GC_API GC_ATTR_MALLOC char * GC_CALL |
769 | GC_debug_strdup(const char *, GC_EXTRA_PARAMS); |
770 | GC_API GC_ATTR_MALLOC char * GC_CALL |
771 | GC_debug_strndup(const char *, size_t, GC_EXTRA_PARAMS) |
772 | GC_ATTR_NONNULL(1); |
773 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
774 | GC_debug_malloc_uncollectable(size_t /* size_in_bytes */, |
775 | GC_EXTRA_PARAMS); |
776 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
777 | GC_debug_malloc_stubborn(size_t /* size_in_bytes */, GC_EXTRA_PARAMS); |
778 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
779 | GC_debug_malloc_ignore_off_page(size_t /* size_in_bytes */, |
780 | GC_EXTRA_PARAMS); |
781 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
782 | GC_debug_malloc_atomic_ignore_off_page(size_t /* size_in_bytes */, |
783 | GC_EXTRA_PARAMS); |
784 | GC_API void GC_CALL GC_debug_free(void *); |
785 | GC_API void * GC_CALL GC_debug_realloc(void * /* old_object */, |
786 | size_t /* new_size_in_bytes */, GC_EXTRA_PARAMS) |
787 | /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2); |
788 | GC_API void GC_CALL GC_debug_change_stubborn(const void *) GC_ATTR_NONNULL(1); |
789 | GC_API void GC_CALL GC_debug_end_stubborn_change(const void *) |
790 | GC_ATTR_NONNULL(1); |
791 | |
792 | /* Routines that allocate objects with debug information (like the */ |
793 | /* above), but just fill in dummy file and line number information. */ |
794 | /* Thus they can serve as drop-in malloc/realloc replacements. This */ |
795 | /* can be useful for two reasons: */ |
796 | /* 1) It allows the collector to be built with DBG_HDRS_ALL defined */ |
797 | /* even if some allocation calls come from 3rd party libraries */ |
798 | /* that can't be recompiled. */ |
799 | /* 2) On some platforms, the file and line information is redundant, */ |
800 | /* since it can be reconstructed from a stack trace. On such */ |
801 | /* platforms it may be more convenient not to recompile, e.g. for */ |
802 | /* leak detection. This can be accomplished by instructing the */ |
803 | /* linker to replace malloc/realloc with these. */ |
804 | GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL |
805 | GC_debug_malloc_replacement(size_t /* size_in_bytes */); |
806 | GC_API /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2) void * GC_CALL |
807 | GC_debug_realloc_replacement(void * /* object_addr */, |
808 | size_t /* size_in_bytes */); |
809 | |
810 | #ifdef GC_DEBUG_REPLACEMENT |
811 | # define GC_MALLOC(sz) GC_debug_malloc_replacement(sz) |
812 | # define GC_REALLOC(old, sz) GC_debug_realloc_replacement(old, sz) |
813 | #elif defined(GC_DEBUG) |
814 | # define GC_MALLOC(sz) GC_debug_malloc(sz, GC_EXTRAS) |
815 | # define GC_REALLOC(old, sz) GC_debug_realloc(old, sz, GC_EXTRAS) |
816 | #else |
817 | # define GC_MALLOC(sz) GC_malloc(sz) |
818 | # define GC_REALLOC(old, sz) GC_realloc(old, sz) |
819 | #endif /* !GC_DEBUG_REPLACEMENT && !GC_DEBUG */ |
820 | |
821 | #ifdef GC_DEBUG |
822 | # define GC_MALLOC_ATOMIC(sz) GC_debug_malloc_atomic(sz, GC_EXTRAS) |
823 | # define GC_STRDUP(s) GC_debug_strdup(s, GC_EXTRAS) |
824 | # define GC_STRNDUP(s, sz) GC_debug_strndup(s, sz, GC_EXTRAS) |
825 | # define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz) \ |
826 | GC_debug_malloc_atomic_uncollectable(sz, GC_EXTRAS) |
827 | # define GC_MALLOC_UNCOLLECTABLE(sz) \ |
828 | GC_debug_malloc_uncollectable(sz, GC_EXTRAS) |
829 | # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \ |
830 | GC_debug_malloc_ignore_off_page(sz, GC_EXTRAS) |
831 | # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \ |
832 | GC_debug_malloc_atomic_ignore_off_page(sz, GC_EXTRAS) |
833 | # define GC_FREE(p) GC_debug_free(p) |
834 | # define GC_REGISTER_FINALIZER(p, f, d, of, od) \ |
835 | GC_debug_register_finalizer(p, f, d, of, od) |
836 | # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ |
837 | GC_debug_register_finalizer_ignore_self(p, f, d, of, od) |
838 | # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \ |
839 | GC_debug_register_finalizer_no_order(p, f, d, of, od) |
840 | # define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \ |
841 | GC_debug_register_finalizer_unreachable(p, f, d, of, od) |
842 | # define GC_MALLOC_STUBBORN(sz) GC_debug_malloc_stubborn(sz, GC_EXTRAS) |
843 | # define GC_CHANGE_STUBBORN(p) GC_debug_change_stubborn(p) |
844 | # define GC_END_STUBBORN_CHANGE(p) GC_debug_end_stubborn_change(p) |
845 | # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ |
846 | GC_general_register_disappearing_link(link, \ |
847 | GC_base((/* no const */ void *)(obj))) |
848 | # define GC_REGISTER_LONG_LINK(link, obj) \ |
849 | GC_register_long_link(link, GC_base((/* no const */ void *)(obj))) |
850 | # define GC_REGISTER_DISPLACEMENT(n) GC_debug_register_displacement(n) |
851 | #else |
852 | # define GC_MALLOC_ATOMIC(sz) GC_malloc_atomic(sz) |
853 | # define GC_STRDUP(s) GC_strdup(s) |
854 | # define GC_STRNDUP(s, sz) GC_strndup(s, sz) |
855 | # define GC_MALLOC_ATOMIC_UNCOLLECTABLE(sz) GC_malloc_atomic_uncollectable(sz) |
856 | # define GC_MALLOC_UNCOLLECTABLE(sz) GC_malloc_uncollectable(sz) |
857 | # define GC_MALLOC_IGNORE_OFF_PAGE(sz) \ |
858 | GC_malloc_ignore_off_page(sz) |
859 | # define GC_MALLOC_ATOMIC_IGNORE_OFF_PAGE(sz) \ |
860 | GC_malloc_atomic_ignore_off_page(sz) |
861 | # define GC_FREE(p) GC_free(p) |
862 | # define GC_REGISTER_FINALIZER(p, f, d, of, od) \ |
863 | GC_register_finalizer(p, f, d, of, od) |
864 | # define GC_REGISTER_FINALIZER_IGNORE_SELF(p, f, d, of, od) \ |
865 | GC_register_finalizer_ignore_self(p, f, d, of, od) |
866 | # define GC_REGISTER_FINALIZER_NO_ORDER(p, f, d, of, od) \ |
867 | GC_register_finalizer_no_order(p, f, d, of, od) |
868 | # define GC_REGISTER_FINALIZER_UNREACHABLE(p, f, d, of, od) \ |
869 | GC_register_finalizer_unreachable(p, f, d, of, od) |
870 | # define GC_MALLOC_STUBBORN(sz) GC_malloc_stubborn(sz) |
871 | # define GC_CHANGE_STUBBORN(p) GC_change_stubborn(p) |
872 | # define GC_END_STUBBORN_CHANGE(p) GC_end_stubborn_change(p) |
873 | # define GC_GENERAL_REGISTER_DISAPPEARING_LINK(link, obj) \ |
874 | GC_general_register_disappearing_link(link, obj) |
875 | # define GC_REGISTER_LONG_LINK(link, obj) \ |
876 | GC_register_long_link(link, obj) |
877 | # define GC_REGISTER_DISPLACEMENT(n) GC_register_displacement(n) |
878 | #endif /* !GC_DEBUG */ |
879 | |
880 | /* The following are included because they are often convenient, and */ |
881 | /* reduce the chance for a misspecified size argument. But calls may */ |
882 | /* expand to something syntactically incorrect if t is a complicated */ |
883 | /* type expression. Note that, unlike C++ new operator, these ones */ |
884 | /* may return NULL (if out of memory). */ |
885 | #define GC_NEW(t) ((t*)GC_MALLOC(sizeof(t))) |
886 | #define GC_NEW_ATOMIC(t) ((t*)GC_MALLOC_ATOMIC(sizeof(t))) |
887 | #define GC_NEW_STUBBORN(t) ((t*)GC_MALLOC_STUBBORN(sizeof(t))) |
888 | #define GC_NEW_UNCOLLECTABLE(t) ((t*)GC_MALLOC_UNCOLLECTABLE(sizeof(t))) |
889 | |
890 | #ifdef GC_REQUIRE_WCSDUP |
891 | /* This might be unavailable on some targets (or not needed). */ |
892 | /* wchar_t should be defined in stddef.h */ |
893 | GC_API GC_ATTR_MALLOC wchar_t * GC_CALL |
894 | GC_wcsdup(const wchar_t *) GC_ATTR_NONNULL(1); |
895 | GC_API GC_ATTR_MALLOC wchar_t * GC_CALL |
896 | GC_debug_wcsdup(const wchar_t *, GC_EXTRA_PARAMS) GC_ATTR_NONNULL(1); |
897 | # ifdef GC_DEBUG |
898 | # define GC_WCSDUP(s) GC_debug_wcsdup(s, GC_EXTRAS) |
899 | # else |
900 | # define GC_WCSDUP(s) GC_wcsdup(s) |
901 | # endif |
902 | #endif /* GC_REQUIRE_WCSDUP */ |
903 | |
904 | /* Finalization. Some of these primitives are grossly unsafe. */ |
905 | /* The idea is to make them both cheap, and sufficient to build */ |
906 | /* a safer layer, closer to Modula-3, Java, or PCedar finalization. */ |
907 | /* The interface represents my conclusions from a long discussion */ |
908 | /* with Alan Demers, Dan Greene, Carl Hauser, Barry Hayes, */ |
909 | /* Christian Jacobi, and Russ Atkinson. It's not perfect, and */ |
910 | /* probably nobody else agrees with it. Hans-J. Boehm 3/13/92 */ |
911 | typedef void (GC_CALLBACK * GC_finalization_proc)(void * /* obj */, |
912 | void * /* client_data */); |
913 | |
914 | GC_API void GC_CALL GC_register_finalizer(void * /* obj */, |
915 | GC_finalization_proc /* fn */, void * /* cd */, |
916 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
917 | GC_ATTR_NONNULL(1); |
918 | GC_API void GC_CALL GC_debug_register_finalizer(void * /* obj */, |
919 | GC_finalization_proc /* fn */, void * /* cd */, |
920 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
921 | GC_ATTR_NONNULL(1); |
922 | /* When obj is no longer accessible, invoke */ |
923 | /* (*fn)(obj, cd). If a and b are inaccessible, and */ |
924 | /* a points to b (after disappearing links have been */ |
925 | /* made to disappear), then only a will be */ |
926 | /* finalized. (If this does not create any new */ |
927 | /* pointers to b, then b will be finalized after the */ |
928 | /* next collection.) Any finalizable object that */ |
929 | /* is reachable from itself by following one or more */ |
930 | /* pointers will not be finalized (or collected). */ |
931 | /* Thus cycles involving finalizable objects should */ |
932 | /* be avoided, or broken by disappearing links. */ |
933 | /* All but the last finalizer registered for an object */ |
934 | /* is ignored. */ |
935 | /* Finalization may be removed by passing 0 as fn. */ |
936 | /* Finalizers are implicitly unregistered when they are */ |
937 | /* enqueued for finalization (i.e. become ready to be */ |
938 | /* finalized). */ |
939 | /* The old finalizer and client data are stored in */ |
940 | /* *ofn and *ocd. (ofn and/or ocd may be NULL. */ |
941 | /* The allocation lock is held while *ofn and *ocd are */ |
942 | /* updated. In case of error (no memory to register */ |
943 | /* new finalizer), *ofn and *ocd remain unchanged.) */ |
944 | /* Fn is never invoked on an accessible object, */ |
945 | /* provided hidden pointers are converted to real */ |
946 | /* pointers only if the allocation lock is held, and */ |
947 | /* such conversions are not performed by finalization */ |
948 | /* routines. */ |
949 | /* If GC_register_finalizer is aborted as a result of */ |
950 | /* a signal, the object may be left with no */ |
951 | /* finalization, even if neither the old nor new */ |
952 | /* finalizer were NULL. */ |
953 | /* Obj should be the starting address of an object */ |
954 | /* allocated by GC_malloc or friends. Obj may also be */ |
955 | /* NULL or point to something outside GC heap (in this */ |
956 | /* case, fn is ignored, *ofn and *ocd are set to NULL). */ |
957 | /* Note that any garbage collectible object referenced */ |
958 | /* by cd will be considered accessible until the */ |
959 | /* finalizer is invoked. */ |
960 | |
961 | /* Another versions of the above follow. It ignores */ |
962 | /* self-cycles, i.e. pointers from a finalizable object to */ |
963 | /* itself. There is a stylistic argument that this is wrong, */ |
964 | /* but it's unavoidable for C++, since the compiler may */ |
965 | /* silently introduce these. It's also benign in that specific */ |
966 | /* case. And it helps if finalizable objects are split to */ |
967 | /* avoid cycles. */ |
968 | /* Note that cd will still be viewed as accessible, even if it */ |
969 | /* refers to the object itself. */ |
970 | GC_API void GC_CALL GC_register_finalizer_ignore_self(void * /* obj */, |
971 | GC_finalization_proc /* fn */, void * /* cd */, |
972 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
973 | GC_ATTR_NONNULL(1); |
974 | GC_API void GC_CALL GC_debug_register_finalizer_ignore_self(void * /* obj */, |
975 | GC_finalization_proc /* fn */, void * /* cd */, |
976 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
977 | GC_ATTR_NONNULL(1); |
978 | |
979 | /* Another version of the above. It ignores all cycles. */ |
980 | /* It should probably only be used by Java implementations. */ |
981 | /* Note that cd will still be viewed as accessible, even if it */ |
982 | /* refers to the object itself. */ |
983 | GC_API void GC_CALL GC_register_finalizer_no_order(void * /* obj */, |
984 | GC_finalization_proc /* fn */, void * /* cd */, |
985 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
986 | GC_ATTR_NONNULL(1); |
987 | GC_API void GC_CALL GC_debug_register_finalizer_no_order(void * /* obj */, |
988 | GC_finalization_proc /* fn */, void * /* cd */, |
989 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
990 | GC_ATTR_NONNULL(1); |
991 | |
992 | /* This is a special finalizer that is useful when an object's */ |
993 | /* finalizer must be run when the object is known to be no */ |
994 | /* longer reachable, not even from other finalizable objects. */ |
995 | /* It behaves like "normal" finalization, except that the */ |
996 | /* finalizer is not run while the object is reachable from */ |
997 | /* other objects specifying unordered finalization. */ |
998 | /* Effectively it allows an object referenced, possibly */ |
999 | /* indirectly, from an unordered finalizable object to override */ |
1000 | /* the unordered finalization request. */ |
1001 | /* This can be used in combination with finalizer_no_order so */ |
1002 | /* as to release resources that must not be released while an */ |
1003 | /* object can still be brought back to life by other */ |
1004 | /* finalizers. */ |
1005 | /* Only works if GC_java_finalization is set. Probably only */ |
1006 | /* of interest when implementing a language that requires */ |
1007 | /* unordered finalization (e.g. Java, C#). */ |
1008 | GC_API void GC_CALL GC_register_finalizer_unreachable(void * /* obj */, |
1009 | GC_finalization_proc /* fn */, void * /* cd */, |
1010 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
1011 | GC_ATTR_NONNULL(1); |
1012 | GC_API void GC_CALL GC_debug_register_finalizer_unreachable(void * /* obj */, |
1013 | GC_finalization_proc /* fn */, void * /* cd */, |
1014 | GC_finalization_proc * /* ofn */, void ** /* ocd */) |
1015 | GC_ATTR_NONNULL(1); |
1016 | |
1017 | #define GC_NO_MEMORY 2 /* Failure due to lack of memory. */ |
1018 | |
1019 | /* The following routine may be used to break cycles between */ |
1020 | /* finalizable objects, thus causing cyclic finalizable */ |
1021 | /* objects to be finalized in the correct order. Standard */ |
1022 | /* use involves calling GC_register_disappearing_link(&p), */ |
1023 | /* where p is a pointer that is not followed by finalization */ |
1024 | /* code, and should not be considered in determining */ |
1025 | /* finalization order. */ |
1026 | GC_API int GC_CALL GC_register_disappearing_link(void ** /* link */) |
1027 | GC_ATTR_NONNULL(1); |
1028 | /* Link should point to a field of a heap allocated */ |
1029 | /* object obj. *link will be cleared when obj is */ |
1030 | /* found to be inaccessible. This happens BEFORE any */ |
1031 | /* finalization code is invoked, and BEFORE any */ |
1032 | /* decisions about finalization order are made. */ |
1033 | /* This is useful in telling the finalizer that */ |
1034 | /* some pointers are not essential for proper */ |
1035 | /* finalization. This may avoid finalization cycles. */ |
1036 | /* Note that obj may be resurrected by another */ |
1037 | /* finalizer, and thus the clearing of *link may */ |
1038 | /* be visible to non-finalization code. */ |
1039 | /* There's an argument that an arbitrary action should */ |
1040 | /* be allowed here, instead of just clearing a pointer. */ |
1041 | /* But this causes problems if that action alters, or */ |
1042 | /* examines connectivity. Returns GC_DUPLICATE if link */ |
1043 | /* was already registered, GC_SUCCESS if registration */ |
1044 | /* succeeded, GC_NO_MEMORY if it failed for lack of */ |
1045 | /* memory, and GC_oom_fn did not handle the problem. */ |
1046 | /* Only exists for backward compatibility. See below: */ |
1047 | |
1048 | GC_API int GC_CALL GC_general_register_disappearing_link(void ** /* link */, |
1049 | const void * /* obj */) |
1050 | GC_ATTR_NONNULL(1) GC_ATTR_NONNULL(2); |
1051 | /* A slight generalization of the above. *link is */ |
1052 | /* cleared when obj first becomes inaccessible. This */ |
1053 | /* can be used to implement weak pointers easily and */ |
1054 | /* safely. Typically link will point to a location */ |
1055 | /* holding a disguised pointer to obj. (A pointer */ |
1056 | /* inside an "atomic" object is effectively disguised.) */ |
1057 | /* In this way, weak pointers are broken before any */ |
1058 | /* object reachable from them gets finalized. */ |
1059 | /* Each link may be registered only with one obj value, */ |
1060 | /* i.e. all objects but the last one (link registered */ |
1061 | /* with) are ignored. This was added after a long */ |
1062 | /* email discussion with John Ellis. */ |
1063 | /* link must be non-NULL (and be properly aligned). */ |
1064 | /* obj must be a pointer to the first word of an object */ |
1065 | /* allocated by GC_malloc or friends. It is unsafe to */ |
1066 | /* explicitly deallocate the object containing link. */ |
1067 | /* Explicit deallocation of obj may or may not cause */ |
1068 | /* link to eventually be cleared. */ |
1069 | /* This function can be used to implement certain types */ |
1070 | /* of weak pointers. Note, however, this generally */ |
1071 | /* requires that the allocation lock is held (see */ |
1072 | /* GC_call_with_alloc_lock() below) when the disguised */ |
1073 | /* pointer is accessed. Otherwise a strong pointer */ |
1074 | /* could be recreated between the time the collector */ |
1075 | /* decides to reclaim the object and the link is */ |
1076 | /* cleared. Returns GC_SUCCESS if registration */ |
1077 | /* succeeded (a new link is registered), GC_DUPLICATE */ |
1078 | /* if link was already registered (with some object), */ |
1079 | /* GC_NO_MEMORY if registration failed for lack of */ |
1080 | /* memory (and GC_oom_fn did not handle the problem). */ |
1081 | |
1082 | GC_API int GC_CALL GC_move_disappearing_link(void ** /* link */, |
1083 | void ** /* new_link */) |
1084 | GC_ATTR_NONNULL(2); |
1085 | /* Moves a link previously registered via */ |
1086 | /* GC_general_register_disappearing_link (or */ |
1087 | /* GC_register_disappearing_link). Does not change the */ |
1088 | /* target object of the weak reference. Does not */ |
1089 | /* change (*new_link) content. May be called with */ |
1090 | /* new_link equal to link (to check whether link has */ |
1091 | /* been registered). Returns GC_SUCCESS on success, */ |
1092 | /* GC_DUPLICATE if there is already another */ |
1093 | /* disappearing link at the new location (never */ |
1094 | /* returned if new_link is equal to link), GC_NOT_FOUND */ |
1095 | /* if no link is registered at the original location. */ |
1096 | |
1097 | GC_API int GC_CALL GC_unregister_disappearing_link(void ** /* link */); |
1098 | /* Undoes a registration by either of the above two */ |
1099 | /* routines. Returns 0 if link was not actually */ |
1100 | /* registered (otherwise returns 1). */ |
1101 | |
1102 | GC_API int GC_CALL GC_register_long_link(void ** /* link */, |
1103 | const void * /* obj */) |
1104 | GC_ATTR_NONNULL(1) GC_ATTR_NONNULL(2); |
1105 | /* Similar to GC_general_register_disappearing_link but */ |
1106 | /* *link only gets cleared when obj becomes truly */ |
1107 | /* inaccessible. An object becomes truly inaccessible */ |
1108 | /* when it can no longer be resurrected from its */ |
1109 | /* finalizer (e.g. by assigning itself to a pointer */ |
1110 | /* traceable from root). This can be used to implement */ |
1111 | /* long weak pointers easily and safely. */ |
1112 | |
1113 | GC_API int GC_CALL GC_move_long_link(void ** /* link */, |
1114 | void ** /* new_link */) |
1115 | GC_ATTR_NONNULL(2); |
1116 | /* Similar to GC_move_disappearing_link but for a link */ |
1117 | /* previously registered via GC_register_long_link. */ |
1118 | |
1119 | GC_API int GC_CALL GC_unregister_long_link(void ** /* link */); |
1120 | /* Similar to GC_unregister_disappearing_link but for a */ |
1121 | /* registration by either of the above two routines. */ |
1122 | |
1123 | /* Returns !=0 if GC_invoke_finalizers has something to do. */ |
1124 | GC_API int GC_CALL GC_should_invoke_finalizers(void); |
1125 | |
1126 | GC_API int GC_CALL GC_invoke_finalizers(void); |
1127 | /* Run finalizers for all objects that are ready to */ |
1128 | /* be finalized. Return the number of finalizers */ |
1129 | /* that were run. Normally this is also called */ |
1130 | /* implicitly during some allocations. If */ |
1131 | /* GC_finalize_on_demand is nonzero, it must be called */ |
1132 | /* explicitly. */ |
1133 | |
1134 | /* Explicitly tell the collector that an object is reachable */ |
1135 | /* at a particular program point. This prevents the argument */ |
1136 | /* pointer from being optimized away, even it is otherwise no */ |
1137 | /* longer needed. It should have no visible effect in the */ |
1138 | /* absence of finalizers or disappearing links. But it may be */ |
1139 | /* needed to prevent finalizers from running while the */ |
1140 | /* associated external resource is still in use. */ |
1141 | /* The function is sometimes called keep_alive in other */ |
1142 | /* settings. */ |
1143 | #if defined(__GNUC__) && !defined(__INTEL_COMPILER) |
1144 | # define GC_reachable_here(ptr) \ |
1145 | __asm__ __volatile__(" " : : "X"(ptr) : "memory") |
1146 | #else |
1147 | GC_API void GC_CALL GC_noop1(GC_word); |
1148 | # define GC_reachable_here(ptr) GC_noop1((GC_word)(ptr)) |
1149 | #endif |
1150 | |
1151 | /* GC_set_warn_proc can be used to redirect or filter warning messages. */ |
1152 | /* p may not be a NULL pointer. msg is printf format string (arg must */ |
1153 | /* match the format). Both the setter and the getter acquire the GC */ |
1154 | /* lock (to avoid data races). */ |
1155 | typedef void (GC_CALLBACK * GC_warn_proc)(char * /* msg */, |
1156 | GC_word /* arg */); |
1157 | GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc /* p */) GC_ATTR_NONNULL(1); |
1158 | /* GC_get_warn_proc returns the current warn_proc. */ |
1159 | GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void); |
1160 | |
1161 | /* GC_ignore_warn_proc may be used as an argument for GC_set_warn_proc */ |
1162 | /* to suppress all warnings (unless statistics printing is turned on). */ |
1163 | GC_API void GC_CALLBACK GC_ignore_warn_proc(char *, GC_word); |
1164 | |
1165 | /* abort_func is invoked on GC fatal aborts (just before OS-dependent */ |
1166 | /* abort or exit(1) is called). Must be non-NULL. The default one */ |
1167 | /* outputs msg to stderr provided msg is non-NULL. msg is NULL if */ |
1168 | /* invoked before exit(1) otherwise msg is non-NULL (i.e., if invoked */ |
1169 | /* before abort). Both the setter and getter acquire the GC lock. */ |
1170 | /* Both the setter and getter are defined only if the library has been */ |
1171 | /* compiled without SMALL_CONFIG. */ |
1172 | typedef void (GC_CALLBACK * GC_abort_func)(const char * /* msg */); |
1173 | GC_API void GC_CALL GC_set_abort_func(GC_abort_func) GC_ATTR_NONNULL(1); |
1174 | GC_API GC_abort_func GC_CALL GC_get_abort_func(void); |
1175 | |
1176 | /* The following is intended to be used by a higher level */ |
1177 | /* (e.g. Java-like) finalization facility. It is expected */ |
1178 | /* that finalization code will arrange for hidden pointers to */ |
1179 | /* disappear. Otherwise objects can be accessed after they */ |
1180 | /* have been collected. */ |
1181 | /* Note that putting pointers in atomic objects or in */ |
1182 | /* non-pointer slots of "typed" objects is equivalent to */ |
1183 | /* disguising them in this way, and may have other advantages. */ |
1184 | typedef GC_word GC_hidden_pointer; |
1185 | #define GC_HIDE_POINTER(p) (~(GC_hidden_pointer)(p)) |
1186 | /* Converting a hidden pointer to a real pointer requires verifying */ |
1187 | /* that the object still exists. This involves acquiring the */ |
1188 | /* allocator lock to avoid a race with the collector. */ |
1189 | #define GC_REVEAL_POINTER(p) ((void *)GC_HIDE_POINTER(p)) |
1190 | |
1191 | #if defined(I_HIDE_POINTERS) || defined(GC_I_HIDE_POINTERS) |
1192 | /* This exists only for compatibility (the GC-prefixed symbols are */ |
1193 | /* preferred for new code). */ |
1194 | # define HIDE_POINTER(p) GC_HIDE_POINTER(p) |
1195 | # define REVEAL_POINTER(p) GC_REVEAL_POINTER(p) |
1196 | #endif |
1197 | |
1198 | typedef void * (GC_CALLBACK * GC_fn_type)(void * /* client_data */); |
1199 | GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type /* fn */, |
1200 | void * /* client_data */) GC_ATTR_NONNULL(1); |
1201 | |
1202 | /* These routines are intended to explicitly notify the collector */ |
1203 | /* of new threads. Often this is unnecessary because thread creation */ |
1204 | /* is implicitly intercepted by the collector, using header-file */ |
1205 | /* defines, or linker-based interception. In the long run the intent */ |
1206 | /* is to always make redundant registration safe. In the short run, */ |
1207 | /* this is being implemented a platform at a time. */ |
1208 | /* The interface is complicated by the fact that we probably will not */ |
1209 | /* ever be able to automatically determine the stack base for thread */ |
1210 | /* stacks on all platforms. */ |
1211 | |
1212 | /* Structure representing the base of a thread stack. On most */ |
1213 | /* platforms this contains just a single address. */ |
1214 | struct GC_stack_base { |
1215 | void * mem_base; /* Base of memory stack. */ |
1216 | # if defined(__ia64) || defined(__ia64__) || defined(_M_IA64) |
1217 | void * reg_base; /* Base of separate register stack. */ |
1218 | # endif |
1219 | }; |
1220 | |
1221 | typedef void * (GC_CALLBACK * GC_stack_base_func)( |
1222 | struct GC_stack_base * /* sb */, void * /* arg */); |
1223 | |
1224 | /* Call a function with a stack base structure corresponding to */ |
1225 | /* somewhere in the GC_call_with_stack_base frame. This often can */ |
1226 | /* be used to provide a sufficiently accurate stack base. And we */ |
1227 | /* implement it everywhere. */ |
1228 | GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func /* fn */, |
1229 | void * /* arg */) GC_ATTR_NONNULL(1); |
1230 | |
1231 | #define GC_SUCCESS 0 |
1232 | #define GC_DUPLICATE 1 /* Was already registered. */ |
1233 | #define GC_NO_THREADS 2 /* No thread support in GC. */ |
1234 | /* GC_NO_THREADS is not returned by any GC function anymore. */ |
1235 | #define GC_UNIMPLEMENTED 3 /* Not yet implemented on this platform. */ |
1236 | #define GC_NOT_FOUND 4 /* Requested link not found (returned */ |
1237 | /* by GC_move_disappearing_link). */ |
1238 | |
1239 | #if defined(GC_DARWIN_THREADS) || defined(GC_WIN32_THREADS) |
1240 | /* Use implicit thread registration and processing (via Win32 DllMain */ |
1241 | /* or Darwin task_threads). Deprecated. Must be called before */ |
1242 | /* GC_INIT() and other GC routines. Should be avoided if */ |
1243 | /* GC_pthread_create, GC_beginthreadex (or GC_CreateThread) could be */ |
1244 | /* called instead. Disables parallelized GC on Win32. */ |
1245 | GC_API void GC_CALL GC_use_threads_discovery(void); |
1246 | #endif |
1247 | |
1248 | #ifdef GC_THREADS |
1249 | /* Suggest the GC to use the specific signal to suspend threads. */ |
1250 | /* Has no effect after GC_init and on non-POSIX systems. */ |
1251 | GC_API void GC_CALL GC_set_suspend_signal(int); |
1252 | |
1253 | /* Suggest the GC to use the specific signal to resume threads. */ |
1254 | /* Has no effect after GC_init and on non-POSIX systems. */ |
1255 | GC_API void GC_CALL GC_set_thr_restart_signal(int); |
1256 | |
1257 | /* Return the signal number (constant after initialization) used by */ |
1258 | /* the GC to suspend threads on POSIX systems. Return -1 otherwise. */ |
1259 | GC_API int GC_CALL GC_get_suspend_signal(void); |
1260 | |
1261 | /* Return the signal number (constant after initialization) used by */ |
1262 | /* the garbage collector to restart (resume) threads on POSIX */ |
1263 | /* systems. Return -1 otherwise. */ |
1264 | GC_API int GC_CALL GC_get_thr_restart_signal(void); |
1265 | |
1266 | /* Restart marker threads after POSIX fork in child. Meaningless in */ |
1267 | /* other situations. Should not be called if fork followed by exec. */ |
1268 | GC_API void GC_CALL GC_start_mark_threads(void); |
1269 | |
1270 | /* Explicitly enable GC_register_my_thread() invocation. */ |
1271 | /* Done implicitly if a GC thread-creation function is called (or */ |
1272 | /* implicit thread registration is activated). Otherwise, it must */ |
1273 | /* be called from the main (or any previously registered) thread */ |
1274 | /* between the collector initialization and the first explicit */ |
1275 | /* registering of a thread (it should be called as late as possible). */ |
1276 | GC_API void GC_CALL GC_allow_register_threads(void); |
1277 | |
1278 | /* Register the current thread, with the indicated stack base, as */ |
1279 | /* a new thread whose stack(s) should be traced by the GC. If it */ |
1280 | /* is not implicitly called by the GC, this must be called before a */ |
1281 | /* thread can allocate garbage collected memory, or assign pointers */ |
1282 | /* to the garbage collected heap. Once registered, a thread will be */ |
1283 | /* stopped during garbage collections. */ |
1284 | /* This call must be previously enabled (see above). */ |
1285 | /* This should never be called from the main thread, where it is */ |
1286 | /* always done implicitly. This is normally done implicitly if GC_ */ |
1287 | /* functions are called to create the thread, e.g. by including gc.h */ |
1288 | /* (which redefines some system functions) before calling the system */ |
1289 | /* thread creation function. Nonetheless, thread cleanup routines */ |
1290 | /* (e.g., pthread key destructor) typically require manual thread */ |
1291 | /* registering (and unregistering) if pointers to GC-allocated */ |
1292 | /* objects are manipulated inside. */ |
1293 | /* It is also always done implicitly on some platforms if */ |
1294 | /* GC_use_threads_discovery() is called at start-up. Except for the */ |
1295 | /* latter case, the explicit call is normally required for threads */ |
1296 | /* created by third-party libraries. */ |
1297 | /* A manually registered thread requires manual unregistering. */ |
1298 | GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *) |
1299 | GC_ATTR_NONNULL(1); |
1300 | |
1301 | /* Return non-zero (TRUE) if and only if the calling thread is */ |
1302 | /* registered with the garbage collector. */ |
1303 | GC_API int GC_CALL GC_thread_is_registered(void); |
1304 | |
1305 | /* Unregister the current thread. Only an explicitly registered */ |
1306 | /* thread (i.e. for which GC_register_my_thread() returns GC_SUCCESS) */ |
1307 | /* is allowed (and required) to call this function. (As a special */ |
1308 | /* exception, it is also allowed to once unregister the main thread.) */ |
1309 | /* The thread may no longer allocate garbage collected memory or */ |
1310 | /* manipulate pointers to the garbage collected heap after making */ |
1311 | /* this call. Specifically, if it wants to return or otherwise */ |
1312 | /* communicate a pointer to the garbage-collected heap to another */ |
1313 | /* thread, it must do this before calling GC_unregister_my_thread, */ |
1314 | /* most probably by saving it in a global data structure. Must not */ |
1315 | /* be called inside a GC callback function (except for */ |
1316 | /* GC_call_with_stack_base() one). */ |
1317 | GC_API int GC_CALL GC_unregister_my_thread(void); |
1318 | #endif /* GC_THREADS */ |
1319 | |
1320 | /* Wrapper for functions that are likely to block (or, at least, do not */ |
1321 | /* allocate garbage collected memory and/or manipulate pointers to the */ |
1322 | /* garbage collected heap) for an appreciable length of time. While fn */ |
1323 | /* is running, the collector is said to be in the "inactive" state for */ |
1324 | /* the current thread (this means that the thread is not suspended and */ |
1325 | /* the thread's stack frames "belonging" to the functions in the */ |
1326 | /* "inactive" state are not scanned during garbage collections). It is */ |
1327 | /* allowed for fn to call GC_call_with_gc_active() (even recursively), */ |
1328 | /* thus temporarily toggling the collector's state back to "active". */ |
1329 | GC_API void * GC_CALL GC_do_blocking(GC_fn_type /* fn */, |
1330 | void * /* client_data */) GC_ATTR_NONNULL(1); |
1331 | |
1332 | /* Call a function switching to the "active" state of the collector for */ |
1333 | /* the current thread (i.e. the user function is allowed to call any */ |
1334 | /* GC function and/or manipulate pointers to the garbage collected */ |
1335 | /* heap). GC_call_with_gc_active() has the functionality opposite to */ |
1336 | /* GC_do_blocking() one. It is assumed that the collector is already */ |
1337 | /* initialized and the current thread is registered. fn may toggle */ |
1338 | /* the collector thread's state temporarily to "inactive" one by using */ |
1339 | /* GC_do_blocking. GC_call_with_gc_active() often can be used to */ |
1340 | /* provide a sufficiently accurate stack base. */ |
1341 | GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type /* fn */, |
1342 | void * /* client_data */) GC_ATTR_NONNULL(1); |
1343 | |
1344 | /* Attempt to fill in the GC_stack_base structure with the stack base */ |
1345 | /* for this thread. This appears to be required to implement anything */ |
1346 | /* like the JNI AttachCurrentThread in an environment in which new */ |
1347 | /* threads are not automatically registered with the collector. */ |
1348 | /* It is also unfortunately hard to implement well on many platforms. */ |
1349 | /* Returns GC_SUCCESS or GC_UNIMPLEMENTED. This function acquires the */ |
1350 | /* GC lock on some platforms. */ |
1351 | GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *) |
1352 | GC_ATTR_NONNULL(1); |
1353 | |
1354 | /* The following routines are primarily intended for use with a */ |
1355 | /* preprocessor which inserts calls to check C pointer arithmetic. */ |
1356 | /* They indicate failure by invoking the corresponding _print_proc. */ |
1357 | |
1358 | /* Check that p and q point to the same object. */ |
1359 | /* Fail conspicuously if they don't. */ |
1360 | /* Returns the first argument. */ |
1361 | /* Succeeds if neither p nor q points to the heap. */ |
1362 | /* May succeed if both p and q point to between heap objects. */ |
1363 | GC_API void * GC_CALL GC_same_obj(void * /* p */, void * /* q */); |
1364 | |
1365 | /* Checked pointer pre- and post- increment operations. Note that */ |
1366 | /* the second argument is in units of bytes, not multiples of the */ |
1367 | /* object size. This should either be invoked from a macro, or the */ |
1368 | /* call should be automatically generated. */ |
1369 | GC_API void * GC_CALL GC_pre_incr(void **, ptrdiff_t /* how_much */) |
1370 | GC_ATTR_NONNULL(1); |
1371 | GC_API void * GC_CALL GC_post_incr(void **, ptrdiff_t /* how_much */) |
1372 | GC_ATTR_NONNULL(1); |
1373 | |
1374 | /* Check that p is visible */ |
1375 | /* to the collector as a possibly pointer containing location. */ |
1376 | /* If it isn't fail conspicuously. */ |
1377 | /* Returns the argument in all cases. May erroneously succeed */ |
1378 | /* in hard cases. (This is intended for debugging use with */ |
1379 | /* untyped allocations. The idea is that it should be possible, though */ |
1380 | /* slow, to add such a call to all indirect pointer stores.) */ |
1381 | /* Currently useless for multi-threaded worlds. */ |
1382 | GC_API void * GC_CALL GC_is_visible(void * /* p */); |
1383 | |
1384 | /* Check that if p is a pointer to a heap page, then it points to */ |
1385 | /* a valid displacement within a heap object. */ |
1386 | /* Fail conspicuously if this property does not hold. */ |
1387 | /* Uninteresting with GC_all_interior_pointers. */ |
1388 | /* Always returns its argument. */ |
1389 | GC_API void * GC_CALL GC_is_valid_displacement(void * /* p */); |
1390 | |
1391 | /* Explicitly dump the GC state. This is most often called from the */ |
1392 | /* debugger, or by setting the GC_DUMP_REGULARLY environment variable, */ |
1393 | /* but it may be useful to call it from client code during debugging. */ |
1394 | /* Defined only if the library has been compiled without NO_DEBUGGING. */ |
1395 | GC_API void GC_CALL GC_dump(void); |
1396 | |
1397 | /* Safer, but slow, pointer addition. Probably useful mainly with */ |
1398 | /* a preprocessor. Useful only for heap pointers. */ |
1399 | /* Only the macros without trailing digits are meant to be used */ |
1400 | /* by clients. These are designed to model the available C pointer */ |
1401 | /* arithmetic expressions. */ |
1402 | /* Even then, these are probably more useful as */ |
1403 | /* documentation than as part of the API. */ |
1404 | /* Note that GC_PTR_ADD evaluates the first argument more than once. */ |
1405 | #if defined(GC_DEBUG) && defined(__GNUC__) |
1406 | # define GC_PTR_ADD3(x, n, type_of_result) \ |
1407 | ((type_of_result)GC_same_obj((x)+(n), (x))) |
1408 | # define GC_PRE_INCR3(x, n, type_of_result) \ |
1409 | ((type_of_result)GC_pre_incr((void **)(&(x)), (n)*sizeof(*x))) |
1410 | # define GC_POST_INCR3(x, n, type_of_result) \ |
1411 | ((type_of_result)GC_post_incr((void **)(&(x)), (n)*sizeof(*x))) |
1412 | # define GC_PTR_ADD(x, n) GC_PTR_ADD3(x, n, typeof(x)) |
1413 | # define GC_PRE_INCR(x, n) GC_PRE_INCR3(x, n, typeof(x)) |
1414 | # define GC_POST_INCR(x) GC_POST_INCR3(x, 1, typeof(x)) |
1415 | # define GC_POST_DECR(x) GC_POST_INCR3(x, -1, typeof(x)) |
1416 | #else /* !GC_DEBUG || !__GNUC__ */ |
1417 | /* We can't do this right without typeof, which ANSI decided was not */ |
1418 | /* sufficiently useful. Without it we resort to the non-debug version. */ |
1419 | /* FIXME: This should eventually support C++0x decltype. */ |
1420 | # define GC_PTR_ADD(x, n) ((x)+(n)) |
1421 | # define GC_PRE_INCR(x, n) ((x) += (n)) |
1422 | # define GC_POST_INCR(x) ((x)++) |
1423 | # define GC_POST_DECR(x) ((x)--) |
1424 | #endif /* !GC_DEBUG || !__GNUC__ */ |
1425 | |
1426 | /* Safer assignment of a pointer to a non-stack location. */ |
1427 | #ifdef GC_DEBUG |
1428 | # define GC_PTR_STORE(p, q) \ |
1429 | (*(void **)GC_is_visible(p) = GC_is_valid_displacement(q)) |
1430 | #else |
1431 | # define GC_PTR_STORE(p, q) (*(p) = (q)) |
1432 | #endif |
1433 | |
1434 | /* Functions called to report pointer checking errors */ |
1435 | GC_API void (GC_CALLBACK * GC_same_obj_print_proc)(void * /* p */, |
1436 | void * /* q */); |
1437 | GC_API void (GC_CALLBACK * GC_is_valid_displacement_print_proc)(void *); |
1438 | GC_API void (GC_CALLBACK * GC_is_visible_print_proc)(void *); |
1439 | |
1440 | #ifdef GC_PTHREADS |
1441 | /* For pthread support, we generally need to intercept a number of */ |
1442 | /* thread library calls. We do that here by macro defining them. */ |
1443 | # include "gc_pthread_redirects.h" |
1444 | #endif |
1445 | |
1446 | /* This returns a list of objects, linked through their first word. */ |
1447 | /* Its use can greatly reduce lock contention problems, since the */ |
1448 | /* allocation lock can be acquired and released many fewer times. */ |
1449 | GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t /* lb */); |
1450 | #define GC_NEXT(p) (*(void * *)(p)) /* Retrieve the next element */ |
1451 | /* in returned list. */ |
1452 | |
1453 | /* A filter function to control the scanning of dynamic libraries. */ |
1454 | /* If implemented, called by GC before registering a dynamic library */ |
1455 | /* (discovered by GC) section as a static data root (called only as */ |
1456 | /* a last reason not to register). The filename of the library, the */ |
1457 | /* address and the length of the memory region (section) are passed. */ |
1458 | /* This routine should return nonzero if that region should be scanned. */ |
1459 | /* Always called with the allocation lock held. Depending on the */ |
1460 | /* platform, might be called with the "world" stopped. */ |
1461 | typedef int (GC_CALLBACK * GC_has_static_roots_func)( |
1462 | const char * /* dlpi_name */, |
1463 | void * /* section_start */, |
1464 | size_t /* section_size */); |
1465 | |
1466 | /* Register a new callback (a user-supplied filter) to control the */ |
1467 | /* scanning of dynamic libraries. Replaces any previously registered */ |
1468 | /* callback. May be 0 (means no filtering). May be unused on some */ |
1469 | /* platforms (if the filtering is unimplemented or inappropriate). */ |
1470 | GC_API void GC_CALL GC_register_has_static_roots_callback( |
1471 | GC_has_static_roots_func); |
1472 | |
1473 | #if defined(GC_WIN32_THREADS) \ |
1474 | && (!defined(GC_PTHREADS) || defined(GC_BUILD) || defined(WINAPI)) |
1475 | /* Note: for Cygwin and win32-pthread, this is skipped */ |
1476 | /* unless windows.h is included before gc.h. */ |
1477 | |
1478 | # if !defined(GC_NO_THREAD_DECLS) || defined(GC_BUILD) |
1479 | |
1480 | # ifdef __cplusplus |
1481 | } /* Including windows.h in an extern "C" context no longer works. */ |
1482 | # endif |
1483 | |
1484 | # if !defined(_WIN32_WCE) && !defined(__CEGCC__) |
1485 | # include <process.h> /* For _beginthreadex, _endthreadex */ |
1486 | # endif |
1487 | |
1488 | # include <windows.h> |
1489 | |
1490 | # ifdef __cplusplus |
1491 | extern "C" { |
1492 | # endif |
1493 | |
1494 | # ifdef GC_UNDERSCORE_STDCALL |
1495 | /* Explicitly prefix exported/imported WINAPI (__stdcall) symbols */ |
1496 | /* with '_' (underscore). Might be useful if MinGW/x86 is used. */ |
1497 | # define GC_CreateThread _GC_CreateThread |
1498 | # define GC_ExitThread _GC_ExitThread |
1499 | # endif |
1500 | |
1501 | # ifdef GC_INSIDE_DLL |
1502 | /* Export GC DllMain to be invoked from client DllMain. */ |
1503 | # ifdef GC_UNDERSCORE_STDCALL |
1504 | # define GC_DllMain _GC_DllMain |
1505 | # endif |
1506 | GC_API BOOL WINAPI GC_DllMain(HINSTANCE /* inst */, ULONG /* reason */, |
1507 | LPVOID /* reserved */); |
1508 | # endif /* GC_INSIDE_DLL */ |
1509 | |
1510 | # if !defined(_UINTPTR_T) && !defined(_UINTPTR_T_DEFINED) \ |
1511 | && !defined(UINTPTR_MAX) |
1512 | typedef GC_word GC_uintptr_t; |
1513 | # else |
1514 | typedef uintptr_t GC_uintptr_t; |
1515 | # endif |
1516 | # define GC_WIN32_SIZE_T GC_uintptr_t |
1517 | |
1518 | /* All threads must be created using GC_CreateThread or */ |
1519 | /* GC_beginthreadex, or must explicitly call GC_register_my_thread */ |
1520 | /* (and call GC_unregister_my_thread before thread termination), so */ |
1521 | /* that they will be recorded in the thread table. For backward */ |
1522 | /* compatibility, it is possible to build the GC with GC_DLL */ |
1523 | /* defined, and to call GC_use_threads_discovery. This implicitly */ |
1524 | /* registers all created threads, but appears to be less robust. */ |
1525 | /* Currently the collector expects all threads to fall through and */ |
1526 | /* terminate normally, or call GC_endthreadex() or GC_ExitThread, */ |
1527 | /* so that the thread is properly unregistered. */ |
1528 | GC_API HANDLE WINAPI GC_CreateThread( |
1529 | LPSECURITY_ATTRIBUTES /* lpThreadAttributes */, |
1530 | GC_WIN32_SIZE_T /* dwStackSize */, |
1531 | LPTHREAD_START_ROUTINE /* lpStartAddress */, |
1532 | LPVOID /* lpParameter */, DWORD /* dwCreationFlags */, |
1533 | LPDWORD /* lpThreadId */); |
1534 | |
1535 | # ifndef DECLSPEC_NORETURN |
1536 | /* Typically defined in winnt.h. */ |
1537 | # define DECLSPEC_NORETURN /* empty */ |
1538 | # endif |
1539 | |
1540 | GC_API DECLSPEC_NORETURN void WINAPI GC_ExitThread( |
1541 | DWORD /* dwExitCode */); |
1542 | |
1543 | # if !defined(_WIN32_WCE) && !defined(__CEGCC__) |
1544 | GC_API GC_uintptr_t GC_CALL GC_beginthreadex( |
1545 | void * /* security */, unsigned /* stack_size */, |
1546 | unsigned (__stdcall *)(void *), |
1547 | void * /* arglist */, unsigned /* initflag */, |
1548 | unsigned * /* thrdaddr */); |
1549 | |
1550 | /* Note: _endthreadex() is not currently marked as no-return in */ |
1551 | /* VC++ and MinGW headers, so we don't mark it neither. */ |
1552 | GC_API void GC_CALL GC_endthreadex(unsigned /* retval */); |
1553 | # endif /* !_WIN32_WCE */ |
1554 | |
1555 | # endif /* !GC_NO_THREAD_DECLS */ |
1556 | |
1557 | # ifdef GC_WINMAIN_REDIRECT |
1558 | /* win32_threads.c implements the real WinMain(), which will start */ |
1559 | /* a new thread to call GC_WinMain() after initializing the garbage */ |
1560 | /* collector. */ |
1561 | # define WinMain GC_WinMain |
1562 | # endif |
1563 | |
1564 | /* For compatibility only. */ |
1565 | # define GC_use_DllMain GC_use_threads_discovery |
1566 | |
1567 | # ifndef GC_NO_THREAD_REDIRECTS |
1568 | # define CreateThread GC_CreateThread |
1569 | # define ExitThread GC_ExitThread |
1570 | # undef _beginthreadex |
1571 | # define _beginthreadex GC_beginthreadex |
1572 | # undef _endthreadex |
1573 | # define _endthreadex GC_endthreadex |
1574 | /* #define _beginthread { > "Please use _beginthreadex instead of _beginthread" < } */ |
1575 | # endif /* !GC_NO_THREAD_REDIRECTS */ |
1576 | |
1577 | #endif /* GC_WIN32_THREADS */ |
1578 | |
1579 | /* Public setter and getter for switching "unmap as much as possible" */ |
1580 | /* mode on(1) and off(0). Has no effect unless unmapping is turned on. */ |
1581 | /* Has no effect on implicitly-initiated garbage collections. Initial */ |
1582 | /* value is controlled by GC_FORCE_UNMAP_ON_GCOLLECT. The setter and */ |
1583 | /* getter are unsynchronized. */ |
1584 | GC_API void GC_CALL GC_set_force_unmap_on_gcollect(int); |
1585 | GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void); |
1586 | |
1587 | /* Fully portable code should call GC_INIT() from the main program */ |
1588 | /* before making any other GC_ calls. On most platforms this is a */ |
1589 | /* no-op and the collector self-initializes. But a number of */ |
1590 | /* platforms make that too hard. */ |
1591 | /* A GC_INIT call is required if the collector is built with */ |
1592 | /* THREAD_LOCAL_ALLOC defined and the initial allocation call is not */ |
1593 | /* to GC_malloc() or GC_malloc_atomic(). */ |
1594 | |
1595 | #if defined(__CYGWIN32__) || defined(__CYGWIN__) |
1596 | /* Similarly gnu-win32 DLLs need explicit initialization from the */ |
1597 | /* main program, as does AIX. */ |
1598 | extern int _data_start__[], _data_end__[], _bss_start__[], _bss_end__[]; |
1599 | # define GC_DATASTART ((GC_word)_data_start__ < (GC_word)_bss_start__ ? \ |
1600 | (void *)_data_start__ : (void *)_bss_start__) |
1601 | # define GC_DATAEND ((GC_word)_data_end__ > (GC_word)_bss_end__ ? \ |
1602 | (void *)_data_end__ : (void *)_bss_end__) |
1603 | # define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART, GC_DATAEND); \ |
1604 | GC_gcollect() /* For blacklisting. */ |
1605 | /* Required at least if GC is in a DLL. And doesn't hurt. */ |
1606 | #elif defined(_AIX) |
1607 | extern int _data[], _end[]; |
1608 | # define GC_DATASTART ((void *)((ulong)_data)) |
1609 | # define GC_DATAEND ((void *)((ulong)_end)) |
1610 | # define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART, GC_DATAEND) |
1611 | #elif (defined(PLATFORM_ANDROID) || defined(__ANDROID__)) \ |
1612 | && !defined(GC_NOT_DLL) |
1613 | # pragma weak __data_start |
1614 | extern int __data_start[], _end[]; |
1615 | # pragma weak _etext |
1616 | # pragma weak __dso_handle |
1617 | extern int _etext[], __dso_handle[]; |
1618 | /* Explicitly register caller static data roots (__data_start points */ |
1619 | /* to the beginning typically but NDK "gold" linker could provide it */ |
1620 | /* incorrectly, so the workaround is to check the value and use */ |
1621 | /* __dso_handle as an alternative data start reference if provided). */ |
1622 | /* It also works for Android/x86 target where __data_start is not */ |
1623 | /* defined currently (regardless of linker used). */ |
1624 | # define GC_INIT_CONF_ROOTS \ |
1625 | (void)((GC_word)__data_start < (GC_word)_etext \ |
1626 | && (GC_word)_etext < (GC_word)__dso_handle ? \ |
1627 | (GC_add_roots(__dso_handle, _end), 0) : \ |
1628 | (GC_word)__data_start != 0 ? \ |
1629 | (GC_add_roots(__data_start, _end), 0) : 0) |
1630 | #else |
1631 | # define GC_INIT_CONF_ROOTS /* empty */ |
1632 | #endif |
1633 | |
1634 | #ifdef GC_DONT_EXPAND |
1635 | /* Set GC_dont_expand to TRUE at start-up */ |
1636 | # define GC_INIT_CONF_DONT_EXPAND GC_set_dont_expand(1) |
1637 | #else |
1638 | # define GC_INIT_CONF_DONT_EXPAND /* empty */ |
1639 | #endif |
1640 | |
1641 | #ifdef GC_FORCE_UNMAP_ON_GCOLLECT |
1642 | /* Turn on "unmap as much as possible on explicit GC" mode at start-up */ |
1643 | # define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT \ |
1644 | GC_set_force_unmap_on_gcollect(1) |
1645 | #else |
1646 | # define GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT /* empty */ |
1647 | #endif |
1648 | |
1649 | #ifdef GC_DONT_GC |
1650 | /* This is for debugging only (useful if environment variables are */ |
1651 | /* unsupported); cannot call GC_disable as goes before GC_init. */ |
1652 | # define GC_INIT_CONF_MAX_RETRIES (void)(GC_dont_gc = 1) |
1653 | #elif defined(GC_MAX_RETRIES) |
1654 | /* Set GC_max_retries to the desired value at start-up */ |
1655 | # define GC_INIT_CONF_MAX_RETRIES GC_set_max_retries(GC_MAX_RETRIES) |
1656 | #else |
1657 | # define GC_INIT_CONF_MAX_RETRIES /* empty */ |
1658 | #endif |
1659 | |
1660 | #ifdef GC_FREE_SPACE_DIVISOR |
1661 | /* Set GC_free_space_divisor to the desired value at start-up */ |
1662 | # define GC_INIT_CONF_FREE_SPACE_DIVISOR \ |
1663 | GC_set_free_space_divisor(GC_FREE_SPACE_DIVISOR) |
1664 | #else |
1665 | # define GC_INIT_CONF_FREE_SPACE_DIVISOR /* empty */ |
1666 | #endif |
1667 | |
1668 | #ifdef GC_FULL_FREQ |
1669 | /* Set GC_full_freq to the desired value at start-up */ |
1670 | # define GC_INIT_CONF_FULL_FREQ GC_set_full_freq(GC_FULL_FREQ) |
1671 | #else |
1672 | # define GC_INIT_CONF_FULL_FREQ /* empty */ |
1673 | #endif |
1674 | |
1675 | #ifdef GC_TIME_LIMIT |
1676 | /* Set GC_time_limit to the desired value at start-up */ |
1677 | # define GC_INIT_CONF_TIME_LIMIT GC_set_time_limit(GC_TIME_LIMIT) |
1678 | #else |
1679 | # define GC_INIT_CONF_TIME_LIMIT /* empty */ |
1680 | #endif |
1681 | |
1682 | #if defined(GC_SIG_SUSPEND) && defined(GC_THREADS) |
1683 | # define GC_INIT_CONF_SUSPEND_SIGNAL GC_set_suspend_signal(GC_SIG_SUSPEND) |
1684 | #else |
1685 | # define GC_INIT_CONF_SUSPEND_SIGNAL /* empty */ |
1686 | #endif |
1687 | |
1688 | #if defined(GC_SIG_THR_RESTART) && defined(GC_THREADS) |
1689 | # define GC_INIT_CONF_THR_RESTART_SIGNAL \ |
1690 | GC_set_thr_restart_signal(GC_SIG_THR_RESTART) |
1691 | #else |
1692 | # define GC_INIT_CONF_THR_RESTART_SIGNAL /* empty */ |
1693 | #endif |
1694 | |
1695 | #ifdef GC_MAXIMUM_HEAP_SIZE |
1696 | /* Limit the heap size to the desired value (useful for debugging). */ |
1697 | /* The limit could be overridden either at the program start-up by */ |
1698 | /* the similar environment variable or anytime later by the */ |
1699 | /* corresponding API function call. */ |
1700 | # define GC_INIT_CONF_MAXIMUM_HEAP_SIZE \ |
1701 | GC_set_max_heap_size(GC_MAXIMUM_HEAP_SIZE) |
1702 | #else |
1703 | # define GC_INIT_CONF_MAXIMUM_HEAP_SIZE /* empty */ |
1704 | #endif |
1705 | |
1706 | #ifdef GC_IGNORE_WARN |
1707 | /* Turn off all warnings at start-up (after GC initialization) */ |
1708 | # define GC_INIT_CONF_IGNORE_WARN GC_set_warn_proc(GC_ignore_warn_proc) |
1709 | #else |
1710 | # define GC_INIT_CONF_IGNORE_WARN /* empty */ |
1711 | #endif |
1712 | |
1713 | #ifdef GC_INITIAL_HEAP_SIZE |
1714 | /* Set heap size to the desired value at start-up */ |
1715 | # define GC_INIT_CONF_INITIAL_HEAP_SIZE \ |
1716 | { size_t heap_size = GC_get_heap_size(); \ |
1717 | if (heap_size < (GC_INITIAL_HEAP_SIZE)) \ |
1718 | (void)GC_expand_hp((GC_INITIAL_HEAP_SIZE) - heap_size); } |
1719 | #else |
1720 | # define GC_INIT_CONF_INITIAL_HEAP_SIZE /* empty */ |
1721 | #endif |
1722 | |
1723 | /* Portable clients should call this at the program start-up. More */ |
1724 | /* over, some platforms require this call to be done strictly from the */ |
1725 | /* primordial thread. */ |
1726 | #define GC_INIT() { GC_INIT_CONF_DONT_EXPAND; /* pre-init */ \ |
1727 | GC_INIT_CONF_FORCE_UNMAP_ON_GCOLLECT; \ |
1728 | GC_INIT_CONF_MAX_RETRIES; \ |
1729 | GC_INIT_CONF_FREE_SPACE_DIVISOR; \ |
1730 | GC_INIT_CONF_FULL_FREQ; \ |
1731 | GC_INIT_CONF_TIME_LIMIT; \ |
1732 | GC_INIT_CONF_SUSPEND_SIGNAL; \ |
1733 | GC_INIT_CONF_THR_RESTART_SIGNAL; \ |
1734 | GC_INIT_CONF_MAXIMUM_HEAP_SIZE; \ |
1735 | GC_init(); /* real GC initialization */ \ |
1736 | GC_INIT_CONF_ROOTS; /* post-init */ \ |
1737 | GC_INIT_CONF_IGNORE_WARN; \ |
1738 | GC_INIT_CONF_INITIAL_HEAP_SIZE; } |
1739 | |
1740 | /* win32S may not free all resources on process exit. */ |
1741 | /* This explicitly deallocates the heap. */ |
1742 | GC_API void GC_CALL GC_win32_free_heap(void); |
1743 | |
1744 | #if defined(__SYMBIAN32__) |
1745 | void GC_init_global_static_roots(void); |
1746 | #endif |
1747 | |
1748 | #if defined(_AMIGA) && !defined(GC_AMIGA_MAKINGLIB) |
1749 | /* Allocation really goes through GC_amiga_allocwrapper_do. */ |
1750 | void *GC_amiga_realloc(void *, size_t); |
1751 | # define GC_realloc(a,b) GC_amiga_realloc(a,b) |
1752 | void GC_amiga_set_toany(void (*)(void)); |
1753 | extern int GC_amiga_free_space_divisor_inc; |
1754 | extern void *(*GC_amiga_allocwrapper_do)(size_t, void *(GC_CALL *)(size_t)); |
1755 | # define GC_malloc(a) \ |
1756 | (*GC_amiga_allocwrapper_do)(a,GC_malloc) |
1757 | # define GC_malloc_atomic(a) \ |
1758 | (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic) |
1759 | # define GC_malloc_uncollectable(a) \ |
1760 | (*GC_amiga_allocwrapper_do)(a,GC_malloc_uncollectable) |
1761 | # define GC_malloc_stubborn(a) \ |
1762 | (*GC_amiga_allocwrapper_do)(a,GC_malloc_stubborn) |
1763 | # define GC_malloc_atomic_uncollectable(a) \ |
1764 | (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_uncollectable) |
1765 | # define GC_malloc_ignore_off_page(a) \ |
1766 | (*GC_amiga_allocwrapper_do)(a,GC_malloc_ignore_off_page) |
1767 | # define GC_malloc_atomic_ignore_off_page(a) \ |
1768 | (*GC_amiga_allocwrapper_do)(a,GC_malloc_atomic_ignore_off_page) |
1769 | #endif /* _AMIGA && !GC_AMIGA_MAKINGLIB */ |
1770 | |
1771 | #ifdef __cplusplus |
1772 | } /* end of extern "C" */ |
1773 | #endif |
1774 | |
1775 | #endif /* GC_H */ |
1776 | |