1 | #define JEMALLOC_LARGE_C_ |
2 | #include "jemalloc/internal/jemalloc_preamble.h" |
3 | #include "jemalloc/internal/jemalloc_internal_includes.h" |
4 | |
5 | #include "jemalloc/internal/assert.h" |
6 | #include "jemalloc/internal/extent_mmap.h" |
7 | #include "jemalloc/internal/mutex.h" |
8 | #include "jemalloc/internal/rtree.h" |
9 | #include "jemalloc/internal/util.h" |
10 | |
11 | /******************************************************************************/ |
12 | |
13 | void * |
14 | large_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero) { |
15 | assert(usize == sz_s2u(usize)); |
16 | |
17 | return large_palloc(tsdn, arena, usize, CACHELINE, zero); |
18 | } |
19 | |
20 | void * |
21 | large_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, |
22 | bool zero) { |
23 | size_t ausize; |
24 | extent_t *extent; |
25 | bool is_zeroed; |
26 | UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); |
27 | |
28 | assert(!tsdn_null(tsdn) || arena != NULL); |
29 | |
30 | ausize = sz_sa2u(usize, alignment); |
31 | if (unlikely(ausize == 0 || ausize > SC_LARGE_MAXCLASS)) { |
32 | return NULL; |
33 | } |
34 | |
35 | if (config_fill && unlikely(opt_zero)) { |
36 | zero = true; |
37 | } |
38 | /* |
39 | * Copy zero into is_zeroed and pass the copy when allocating the |
40 | * extent, so that it is possible to make correct junk/zero fill |
41 | * decisions below, even if is_zeroed ends up true when zero is false. |
42 | */ |
43 | is_zeroed = zero; |
44 | if (likely(!tsdn_null(tsdn))) { |
45 | arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, usize); |
46 | } |
47 | if (unlikely(arena == NULL) || (extent = arena_extent_alloc_large(tsdn, |
48 | arena, usize, alignment, &is_zeroed)) == NULL) { |
49 | return NULL; |
50 | } |
51 | |
52 | /* See comments in arena_bin_slabs_full_insert(). */ |
53 | if (!arena_is_auto(arena)) { |
54 | /* Insert extent into large. */ |
55 | malloc_mutex_lock(tsdn, &arena->large_mtx); |
56 | extent_list_append(&arena->large, extent); |
57 | malloc_mutex_unlock(tsdn, &arena->large_mtx); |
58 | } |
59 | if (config_prof && arena_prof_accum(tsdn, arena, usize)) { |
60 | prof_idump(tsdn); |
61 | } |
62 | |
63 | if (zero) { |
64 | assert(is_zeroed); |
65 | } else if (config_fill && unlikely(opt_junk_alloc)) { |
66 | memset(extent_addr_get(extent), JEMALLOC_ALLOC_JUNK, |
67 | extent_usize_get(extent)); |
68 | } |
69 | |
70 | arena_decay_tick(tsdn, arena); |
71 | return extent_addr_get(extent); |
72 | } |
73 | |
74 | static void |
75 | large_dalloc_junk_impl(void *ptr, size_t size) { |
76 | memset(ptr, JEMALLOC_FREE_JUNK, size); |
77 | } |
78 | large_dalloc_junk_t *JET_MUTABLE large_dalloc_junk = large_dalloc_junk_impl; |
79 | |
80 | static void |
81 | large_dalloc_maybe_junk_impl(void *ptr, size_t size) { |
82 | if (config_fill && have_dss && unlikely(opt_junk_free)) { |
83 | /* |
84 | * Only bother junk filling if the extent isn't about to be |
85 | * unmapped. |
86 | */ |
87 | if (opt_retain || (have_dss && extent_in_dss(ptr))) { |
88 | large_dalloc_junk(ptr, size); |
89 | } |
90 | } |
91 | } |
92 | large_dalloc_maybe_junk_t *JET_MUTABLE large_dalloc_maybe_junk = |
93 | large_dalloc_maybe_junk_impl; |
94 | |
95 | static bool |
96 | large_ralloc_no_move_shrink(tsdn_t *tsdn, extent_t *extent, size_t usize) { |
97 | arena_t *arena = extent_arena_get(extent); |
98 | size_t oldusize = extent_usize_get(extent); |
99 | extent_hooks_t *extent_hooks = extent_hooks_get(arena); |
100 | size_t diff = extent_size_get(extent) - (usize + sz_large_pad); |
101 | |
102 | assert(oldusize > usize); |
103 | |
104 | if (extent_hooks->split == NULL) { |
105 | return true; |
106 | } |
107 | |
108 | /* Split excess pages. */ |
109 | if (diff != 0) { |
110 | extent_t *trail = extent_split_wrapper(tsdn, arena, |
111 | &extent_hooks, extent, usize + sz_large_pad, |
112 | sz_size2index(usize), false, diff, SC_NSIZES, false); |
113 | if (trail == NULL) { |
114 | return true; |
115 | } |
116 | |
117 | if (config_fill && unlikely(opt_junk_free)) { |
118 | large_dalloc_maybe_junk(extent_addr_get(trail), |
119 | extent_size_get(trail)); |
120 | } |
121 | |
122 | arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, trail); |
123 | } |
124 | |
125 | arena_extent_ralloc_large_shrink(tsdn, arena, extent, oldusize); |
126 | |
127 | return false; |
128 | } |
129 | |
130 | static bool |
131 | large_ralloc_no_move_expand(tsdn_t *tsdn, extent_t *extent, size_t usize, |
132 | bool zero) { |
133 | arena_t *arena = extent_arena_get(extent); |
134 | size_t oldusize = extent_usize_get(extent); |
135 | extent_hooks_t *extent_hooks = extent_hooks_get(arena); |
136 | size_t trailsize = usize - oldusize; |
137 | |
138 | if (extent_hooks->merge == NULL) { |
139 | return true; |
140 | } |
141 | |
142 | if (config_fill && unlikely(opt_zero)) { |
143 | zero = true; |
144 | } |
145 | /* |
146 | * Copy zero into is_zeroed_trail and pass the copy when allocating the |
147 | * extent, so that it is possible to make correct junk/zero fill |
148 | * decisions below, even if is_zeroed_trail ends up true when zero is |
149 | * false. |
150 | */ |
151 | bool is_zeroed_trail = zero; |
152 | bool commit = true; |
153 | extent_t *trail; |
154 | bool new_mapping; |
155 | if ((trail = extents_alloc(tsdn, arena, &extent_hooks, |
156 | &arena->extents_dirty, extent_past_get(extent), trailsize, 0, |
157 | CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL |
158 | || (trail = extents_alloc(tsdn, arena, &extent_hooks, |
159 | &arena->extents_muzzy, extent_past_get(extent), trailsize, 0, |
160 | CACHELINE, false, SC_NSIZES, &is_zeroed_trail, &commit)) != NULL) { |
161 | if (config_stats) { |
162 | new_mapping = false; |
163 | } |
164 | } else { |
165 | if ((trail = extent_alloc_wrapper(tsdn, arena, &extent_hooks, |
166 | extent_past_get(extent), trailsize, 0, CACHELINE, false, |
167 | SC_NSIZES, &is_zeroed_trail, &commit)) == NULL) { |
168 | return true; |
169 | } |
170 | if (config_stats) { |
171 | new_mapping = true; |
172 | } |
173 | } |
174 | |
175 | if (extent_merge_wrapper(tsdn, arena, &extent_hooks, extent, trail)) { |
176 | extent_dalloc_wrapper(tsdn, arena, &extent_hooks, trail); |
177 | return true; |
178 | } |
179 | rtree_ctx_t rtree_ctx_fallback; |
180 | rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); |
181 | szind_t szind = sz_size2index(usize); |
182 | extent_szind_set(extent, szind); |
183 | rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, |
184 | (uintptr_t)extent_addr_get(extent), szind, false); |
185 | |
186 | if (config_stats && new_mapping) { |
187 | arena_stats_mapped_add(tsdn, &arena->stats, trailsize); |
188 | } |
189 | |
190 | if (zero) { |
191 | if (config_cache_oblivious) { |
192 | /* |
193 | * Zero the trailing bytes of the original allocation's |
194 | * last page, since they are in an indeterminate state. |
195 | * There will always be trailing bytes, because ptr's |
196 | * offset from the beginning of the extent is a multiple |
197 | * of CACHELINE in [0 .. PAGE). |
198 | */ |
199 | void *zbase = (void *) |
200 | ((uintptr_t)extent_addr_get(extent) + oldusize); |
201 | void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + |
202 | PAGE)); |
203 | size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; |
204 | assert(nzero > 0); |
205 | memset(zbase, 0, nzero); |
206 | } |
207 | assert(is_zeroed_trail); |
208 | } else if (config_fill && unlikely(opt_junk_alloc)) { |
209 | memset((void *)((uintptr_t)extent_addr_get(extent) + oldusize), |
210 | JEMALLOC_ALLOC_JUNK, usize - oldusize); |
211 | } |
212 | |
213 | arena_extent_ralloc_large_expand(tsdn, arena, extent, oldusize); |
214 | |
215 | return false; |
216 | } |
217 | |
218 | bool |
219 | large_ralloc_no_move(tsdn_t *tsdn, extent_t *extent, size_t usize_min, |
220 | size_t usize_max, bool zero) { |
221 | size_t oldusize = extent_usize_get(extent); |
222 | |
223 | /* The following should have been caught by callers. */ |
224 | assert(usize_min > 0 && usize_max <= SC_LARGE_MAXCLASS); |
225 | /* Both allocation sizes must be large to avoid a move. */ |
226 | assert(oldusize >= SC_LARGE_MINCLASS |
227 | && usize_max >= SC_LARGE_MINCLASS); |
228 | |
229 | if (usize_max > oldusize) { |
230 | /* Attempt to expand the allocation in-place. */ |
231 | if (!large_ralloc_no_move_expand(tsdn, extent, usize_max, |
232 | zero)) { |
233 | arena_decay_tick(tsdn, extent_arena_get(extent)); |
234 | return false; |
235 | } |
236 | /* Try again, this time with usize_min. */ |
237 | if (usize_min < usize_max && usize_min > oldusize && |
238 | large_ralloc_no_move_expand(tsdn, extent, usize_min, |
239 | zero)) { |
240 | arena_decay_tick(tsdn, extent_arena_get(extent)); |
241 | return false; |
242 | } |
243 | } |
244 | |
245 | /* |
246 | * Avoid moving the allocation if the existing extent size accommodates |
247 | * the new size. |
248 | */ |
249 | if (oldusize >= usize_min && oldusize <= usize_max) { |
250 | arena_decay_tick(tsdn, extent_arena_get(extent)); |
251 | return false; |
252 | } |
253 | |
254 | /* Attempt to shrink the allocation in-place. */ |
255 | if (oldusize > usize_max) { |
256 | if (!large_ralloc_no_move_shrink(tsdn, extent, usize_max)) { |
257 | arena_decay_tick(tsdn, extent_arena_get(extent)); |
258 | return false; |
259 | } |
260 | } |
261 | return true; |
262 | } |
263 | |
264 | static void * |
265 | large_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, |
266 | size_t alignment, bool zero) { |
267 | if (alignment <= CACHELINE) { |
268 | return large_malloc(tsdn, arena, usize, zero); |
269 | } |
270 | return large_palloc(tsdn, arena, usize, alignment, zero); |
271 | } |
272 | |
273 | void * |
274 | large_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t usize, |
275 | size_t alignment, bool zero, tcache_t *tcache, |
276 | hook_ralloc_args_t *hook_args) { |
277 | extent_t *extent = iealloc(tsdn, ptr); |
278 | |
279 | size_t oldusize = extent_usize_get(extent); |
280 | /* The following should have been caught by callers. */ |
281 | assert(usize > 0 && usize <= SC_LARGE_MAXCLASS); |
282 | /* Both allocation sizes must be large to avoid a move. */ |
283 | assert(oldusize >= SC_LARGE_MINCLASS |
284 | && usize >= SC_LARGE_MINCLASS); |
285 | |
286 | /* Try to avoid moving the allocation. */ |
287 | if (!large_ralloc_no_move(tsdn, extent, usize, usize, zero)) { |
288 | hook_invoke_expand(hook_args->is_realloc |
289 | ? hook_expand_realloc : hook_expand_rallocx, ptr, oldusize, |
290 | usize, (uintptr_t)ptr, hook_args->args); |
291 | return extent_addr_get(extent); |
292 | } |
293 | |
294 | /* |
295 | * usize and old size are different enough that we need to use a |
296 | * different size class. In that case, fall back to allocating new |
297 | * space and copying. |
298 | */ |
299 | void *ret = large_ralloc_move_helper(tsdn, arena, usize, alignment, |
300 | zero); |
301 | if (ret == NULL) { |
302 | return NULL; |
303 | } |
304 | |
305 | hook_invoke_alloc(hook_args->is_realloc |
306 | ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, |
307 | hook_args->args); |
308 | hook_invoke_dalloc(hook_args->is_realloc |
309 | ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); |
310 | |
311 | size_t copysize = (usize < oldusize) ? usize : oldusize; |
312 | memcpy(ret, extent_addr_get(extent), copysize); |
313 | isdalloct(tsdn, extent_addr_get(extent), oldusize, tcache, NULL, true); |
314 | return ret; |
315 | } |
316 | |
317 | /* |
318 | * junked_locked indicates whether the extent's data have been junk-filled, and |
319 | * whether the arena's large_mtx is currently held. |
320 | */ |
321 | static void |
322 | large_dalloc_prep_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent, |
323 | bool junked_locked) { |
324 | if (!junked_locked) { |
325 | /* See comments in arena_bin_slabs_full_insert(). */ |
326 | if (!arena_is_auto(arena)) { |
327 | malloc_mutex_lock(tsdn, &arena->large_mtx); |
328 | extent_list_remove(&arena->large, extent); |
329 | malloc_mutex_unlock(tsdn, &arena->large_mtx); |
330 | } |
331 | large_dalloc_maybe_junk(extent_addr_get(extent), |
332 | extent_usize_get(extent)); |
333 | } else { |
334 | /* Only hold the large_mtx if necessary. */ |
335 | if (!arena_is_auto(arena)) { |
336 | malloc_mutex_assert_owner(tsdn, &arena->large_mtx); |
337 | extent_list_remove(&arena->large, extent); |
338 | } |
339 | } |
340 | arena_extent_dalloc_large_prep(tsdn, arena, extent); |
341 | } |
342 | |
343 | static void |
344 | large_dalloc_finish_impl(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { |
345 | extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; |
346 | arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, extent); |
347 | } |
348 | |
349 | void |
350 | large_dalloc_prep_junked_locked(tsdn_t *tsdn, extent_t *extent) { |
351 | large_dalloc_prep_impl(tsdn, extent_arena_get(extent), extent, true); |
352 | } |
353 | |
354 | void |
355 | large_dalloc_finish(tsdn_t *tsdn, extent_t *extent) { |
356 | large_dalloc_finish_impl(tsdn, extent_arena_get(extent), extent); |
357 | } |
358 | |
359 | void |
360 | large_dalloc(tsdn_t *tsdn, extent_t *extent) { |
361 | arena_t *arena = extent_arena_get(extent); |
362 | large_dalloc_prep_impl(tsdn, arena, extent, false); |
363 | large_dalloc_finish_impl(tsdn, arena, extent); |
364 | arena_decay_tick(tsdn, arena); |
365 | } |
366 | |
367 | size_t |
368 | large_salloc(tsdn_t *tsdn, const extent_t *extent) { |
369 | return extent_usize_get(extent); |
370 | } |
371 | |
372 | prof_tctx_t * |
373 | large_prof_tctx_get(tsdn_t *tsdn, const extent_t *extent) { |
374 | return extent_prof_tctx_get(extent); |
375 | } |
376 | |
377 | void |
378 | large_prof_tctx_set(tsdn_t *tsdn, extent_t *extent, prof_tctx_t *tctx) { |
379 | extent_prof_tctx_set(extent, tctx); |
380 | } |
381 | |
382 | void |
383 | large_prof_tctx_reset(tsdn_t *tsdn, extent_t *extent) { |
384 | large_prof_tctx_set(tsdn, extent, (prof_tctx_t *)(uintptr_t)1U); |
385 | } |
386 | |
387 | nstime_t |
388 | large_prof_alloc_time_get(const extent_t *extent) { |
389 | return extent_prof_alloc_time_get(extent); |
390 | } |
391 | |
392 | void |
393 | large_prof_alloc_time_set(extent_t *extent, nstime_t t) { |
394 | extent_prof_alloc_time_set(extent, t); |
395 | } |
396 | |