1 | #define JEMALLOC_TSD_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" |
3 | |
4 | /******************************************************************************/ |
5 | /* Data. */ |
6 | |
7 | static unsigned ncleanups; |
8 | static malloc_tsd_cleanup_t cleanups[MALLOC_TSD_CLEANUPS_MAX]; |
9 | |
10 | malloc_tsd_data(, , tsd_t, TSD_INITIALIZER) |
11 | |
12 | /******************************************************************************/ |
13 | |
14 | void * |
15 | malloc_tsd_malloc(size_t size) |
16 | { |
17 | |
18 | return (a0malloc(CACHELINE_CEILING(size))); |
19 | } |
20 | |
21 | void |
22 | malloc_tsd_dalloc(void *wrapper) |
23 | { |
24 | |
25 | a0dalloc(wrapper); |
26 | } |
27 | |
28 | void |
29 | malloc_tsd_no_cleanup(void *arg) |
30 | { |
31 | |
32 | not_reached(); |
33 | } |
34 | |
35 | #if defined(JEMALLOC_MALLOC_THREAD_CLEANUP) || defined(_WIN32) |
36 | #ifndef _WIN32 |
37 | JEMALLOC_EXPORT |
38 | #endif |
39 | void |
40 | _malloc_thread_cleanup(void) |
41 | { |
42 | bool pending[MALLOC_TSD_CLEANUPS_MAX], again; |
43 | unsigned i; |
44 | |
45 | for (i = 0; i < ncleanups; i++) |
46 | pending[i] = true; |
47 | |
48 | do { |
49 | again = false; |
50 | for (i = 0; i < ncleanups; i++) { |
51 | if (pending[i]) { |
52 | pending[i] = cleanups[i](); |
53 | if (pending[i]) |
54 | again = true; |
55 | } |
56 | } |
57 | } while (again); |
58 | } |
59 | #endif |
60 | |
61 | void |
62 | malloc_tsd_cleanup_register(bool (*f)(void)) |
63 | { |
64 | |
65 | assert(ncleanups < MALLOC_TSD_CLEANUPS_MAX); |
66 | cleanups[ncleanups] = f; |
67 | ncleanups++; |
68 | } |
69 | |
70 | void |
71 | tsd_cleanup(void *arg) |
72 | { |
73 | tsd_t *tsd = (tsd_t *)arg; |
74 | |
75 | switch (tsd->state) { |
76 | case tsd_state_uninitialized: |
77 | /* Do nothing. */ |
78 | break; |
79 | case tsd_state_nominal: |
80 | #define O(n, t) \ |
81 | n##_cleanup(tsd); |
82 | MALLOC_TSD |
83 | #undef O |
84 | tsd->state = tsd_state_purgatory; |
85 | tsd_set(tsd); |
86 | break; |
87 | case tsd_state_purgatory: |
88 | /* |
89 | * The previous time this destructor was called, we set the |
90 | * state to tsd_state_purgatory so that other destructors |
91 | * wouldn't cause re-creation of the tsd. This time, do |
92 | * nothing, and do not request another callback. |
93 | */ |
94 | break; |
95 | case tsd_state_reincarnated: |
96 | /* |
97 | * Another destructor deallocated memory after this destructor |
98 | * was called. Reset state to tsd_state_purgatory and request |
99 | * another callback. |
100 | */ |
101 | tsd->state = tsd_state_purgatory; |
102 | tsd_set(tsd); |
103 | break; |
104 | default: |
105 | not_reached(); |
106 | } |
107 | } |
108 | |
109 | tsd_t * |
110 | malloc_tsd_boot0(void) |
111 | { |
112 | tsd_t *tsd; |
113 | |
114 | ncleanups = 0; |
115 | if (tsd_boot0()) |
116 | return (NULL); |
117 | tsd = tsd_fetch(); |
118 | *tsd_arenas_tdata_bypassp_get(tsd) = true; |
119 | return (tsd); |
120 | } |
121 | |
122 | void |
123 | malloc_tsd_boot1(void) |
124 | { |
125 | |
126 | tsd_boot1(); |
127 | *tsd_arenas_tdata_bypassp_get(tsd_fetch()) = false; |
128 | } |
129 | |
130 | #ifdef _WIN32 |
131 | static BOOL WINAPI |
132 | _tls_callback(HINSTANCE hinstDLL, DWORD fdwReason, LPVOID lpvReserved) |
133 | { |
134 | |
135 | switch (fdwReason) { |
136 | #ifdef JEMALLOC_LAZY_LOCK |
137 | case DLL_THREAD_ATTACH: |
138 | isthreaded = true; |
139 | break; |
140 | #endif |
141 | case DLL_THREAD_DETACH: |
142 | _malloc_thread_cleanup(); |
143 | break; |
144 | default: |
145 | break; |
146 | } |
147 | return (true); |
148 | } |
149 | |
150 | #ifdef _MSC_VER |
151 | # ifdef _M_IX86 |
152 | # pragma comment(linker, "/INCLUDE:__tls_used") |
153 | # pragma comment(linker, "/INCLUDE:_tls_callback") |
154 | # else |
155 | # pragma comment(linker, "/INCLUDE:_tls_used") |
156 | # pragma comment(linker, "/INCLUDE:tls_callback") |
157 | # endif |
158 | # pragma section(".CRT$XLY",long,read) |
159 | #endif |
160 | JEMALLOC_SECTION(".CRT$XLY" ) JEMALLOC_ATTR(used) |
161 | BOOL (WINAPI *const tls_callback)(HINSTANCE hinstDLL, |
162 | DWORD fdwReason, LPVOID lpvReserved) = _tls_callback; |
163 | #endif |
164 | |
165 | #if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \ |
166 | !defined(_WIN32)) |
167 | void * |
168 | tsd_init_check_recursion(tsd_init_head_t *head, tsd_init_block_t *block) |
169 | { |
170 | pthread_t self = pthread_self(); |
171 | tsd_init_block_t *iter; |
172 | |
173 | /* Check whether this thread has already inserted into the list. */ |
174 | malloc_mutex_lock(NULL, &head->lock); |
175 | ql_foreach(iter, &head->blocks, link) { |
176 | if (iter->thread == self) { |
177 | malloc_mutex_unlock(NULL, &head->lock); |
178 | return (iter->data); |
179 | } |
180 | } |
181 | /* Insert block into list. */ |
182 | ql_elm_new(block, link); |
183 | block->thread = self; |
184 | ql_tail_insert(&head->blocks, block, link); |
185 | malloc_mutex_unlock(NULL, &head->lock); |
186 | return (NULL); |
187 | } |
188 | |
189 | void |
190 | tsd_init_finish(tsd_init_head_t *head, tsd_init_block_t *block) |
191 | { |
192 | |
193 | malloc_mutex_lock(NULL, &head->lock); |
194 | ql_remove(&head->blocks, block, link); |
195 | malloc_mutex_unlock(NULL, &head->lock); |
196 | } |
197 | #endif |
198 | |