1 | /* |
2 | * alloc.c |
3 | * |
4 | * Copyright (C) 2008-2019 Aerospike, Inc. |
5 | * |
6 | * Portions may be licensed to Aerospike, Inc. under one or more contributor |
7 | * license agreements. |
8 | * |
9 | * This program is free software: you can redistribute it and/or modify it under |
10 | * the terms of the GNU Affero General Public License as published by the Free |
11 | * Software Foundation, either version 3 of the License, or (at your option) any |
12 | * later version. |
13 | * |
14 | * This program is distributed in the hope that it will be useful, but WITHOUT |
15 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
16 | * FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more |
17 | * details. |
18 | * |
19 | * You should have received a copy of the GNU Affero General Public License |
20 | * along with this program. If not, see http://www.gnu.org/licenses/ |
21 | */ |
22 | |
23 | // Make sure that stdlib.h gives us aligned_alloc(). |
24 | #define _ISOC11_SOURCE |
25 | |
26 | #include "enhanced_alloc.h" |
27 | |
28 | #include <errno.h> |
29 | #include <inttypes.h> |
30 | #include <malloc.h> |
31 | #include <stdarg.h> |
32 | #include <stdbool.h> |
33 | #include <stddef.h> |
34 | #include <stdint.h> |
35 | #include <stdio.h> |
36 | #include <stdlib.h> |
37 | #include <string.h> |
38 | #include <time.h> |
39 | #include <unistd.h> |
40 | |
41 | #include <jemalloc/jemalloc.h> |
42 | |
43 | #include <sys/syscall.h> |
44 | #include <sys/types.h> |
45 | |
46 | #include "cf_thread.h" |
47 | #include "fault.h" |
48 | |
49 | #include "aerospike/as_random.h" |
50 | #include "aerospike/ck/ck_pr.h" |
51 | #include "citrusleaf/cf_atomic.h" |
52 | #include "citrusleaf/cf_clock.h" |
53 | |
54 | #include "warnings.h" |
55 | |
56 | #undef strdup |
57 | #undef strndup |
58 | |
59 | #define N_ARENAS 149 // used to be 150; now arena 149 is startup arena |
60 | #define PAGE_SZ 4096 |
61 | |
62 | #define MAX_SITES 4096 |
63 | #define MAX_THREADS 1024 |
64 | |
65 | #define MULT 3486784401u |
66 | #define MULT_INV 3396732273u |
67 | |
68 | #define MULT_64 12157665459056928801ul |
69 | #define MULT_INV_64 12381265223964269537ul |
70 | |
71 | #define STR_(x) #x |
72 | #define STR(x) STR_(x) |
73 | |
74 | #define MAX_INDENT (32 * 8) |
75 | |
76 | typedef struct site_info_s { |
77 | uint32_t site_id; |
78 | pid_t thread_id; |
79 | size_t size_lo; |
80 | size_t size_hi; |
81 | } site_info; |
82 | |
83 | // Old glibc versions don't provide this; work around compiler warning. |
84 | void *aligned_alloc(size_t align, size_t sz); |
85 | |
86 | // When fortification is disabled, glibc's headers don't provide this. |
87 | int32_t __asprintf_chk(char **res, int32_t flags, const char *form, ...); |
88 | |
89 | const char *jem_malloc_conf = "narenas:" STR(N_ARENAS); |
90 | |
91 | extern size_t je_chunksize_mask; |
92 | extern void *je_huge_aalloc(const void *p); |
93 | |
94 | __thread int32_t g_ns_arena = -1; |
95 | static __thread int32_t g_ns_tcache = -1; |
96 | |
97 | static const void *g_site_ras[MAX_SITES]; |
98 | static uint32_t g_n_site_ras; |
99 | |
100 | static site_info g_site_infos[MAX_SITES * MAX_THREADS]; |
101 | // Start at 1, then we can use site ID 0 to mean "no site ID". |
102 | static uint64_t g_n_site_infos = 1; |
103 | |
104 | static __thread uint32_t g_thread_site_infos[MAX_SITES]; |
105 | |
106 | bool g_alloc_started = false; |
107 | static int32_t g_startup_arena = -1; |
108 | |
109 | static cf_alloc_debug g_debug; |
110 | static bool g_indent; |
111 | |
112 | static __thread as_random g_rand = { .initialized = false }; |
113 | |
114 | // All the hook_*() functions are invoked from hook functions that hook into |
115 | // malloc() and friends for memory accounting purposes. |
116 | // |
117 | // This means that we have no idea who called us and, for example, which locks |
118 | // they hold. Let's be careful when calling back into asd code. |
119 | |
120 | static int32_t |
121 | hook_get_arena(const void *p_indent) |
122 | { |
123 | // Disregard indent by rounding down to page boundary. Works universally: |
124 | // |
125 | // - Small / large: chunk's base aligned to 2 MiB && p >= base + 0x1000. |
126 | // - Huge: p aligned to 2 MiB && MAX_INDENT < 0x1000. |
127 | // |
128 | // A huge allocations is thus rounded to its actual p (aligned to 2 MiB), |
129 | // but a small or large allocation is never rounded to the chunk's base. |
130 | |
131 | const void *p = (const void *)((uint64_t)p_indent & ~0xffful); |
132 | |
133 | int32_t **base = (int32_t **)((uint64_t)p & ~je_chunksize_mask); |
134 | int32_t *arena; |
135 | |
136 | if (base != p) { |
137 | // Small or large allocation. |
138 | arena = base[0]; |
139 | } |
140 | else { |
141 | // Huge allocation. |
142 | arena = je_huge_aalloc(p); |
143 | } |
144 | |
145 | return arena[0]; |
146 | } |
147 | |
148 | // Map a 64-bit address to a 12-bit site ID. |
149 | |
150 | static uint32_t |
151 | hook_get_site_id(const void *ra) |
152 | { |
153 | uint32_t site_id = (uint32_t)(uint64_t)ra & (MAX_SITES - 1); |
154 | |
155 | for (uint32_t i = 0; i < MAX_SITES; ++i) { |
156 | const void *site_ra = ck_pr_load_ptr(g_site_ras + site_id); |
157 | |
158 | // The allocation site is already registered and we found its |
159 | // slot. Return the slot index. |
160 | |
161 | if (site_ra == ra) { |
162 | return site_id; |
163 | } |
164 | |
165 | // We reached an empty slot, i.e., the allocation site isn't yet |
166 | // registered. Try to register it. If somebody else managed to grab |
167 | // this slot in the meantime, keep looping. Otherwise return the |
168 | // slot index. |
169 | |
170 | if (site_ra == NULL && ck_pr_cas_ptr(g_site_ras + site_id, NULL, (void *)ra)) { |
171 | ck_pr_inc_32(&g_n_site_ras); |
172 | return site_id; |
173 | } |
174 | |
175 | site_id = (site_id + 1) & (MAX_SITES - 1); |
176 | } |
177 | |
178 | // More than MAX_SITES call sites. |
179 | cf_crash(CF_ALLOC, "too many call sites" ); |
180 | // Not reached. |
181 | return 0; |
182 | } |
183 | |
184 | static uint32_t |
185 | hook_new_site_info_id(void) |
186 | { |
187 | uint64_t info_id = ck_pr_faa_64(&g_n_site_infos, 1); |
188 | |
189 | if (info_id < MAX_SITES * MAX_THREADS) { |
190 | return (uint32_t)info_id; |
191 | } |
192 | |
193 | if (info_id == MAX_SITES * MAX_THREADS) { |
194 | cf_warning(CF_ALLOC, "site info pool exhausted" ); |
195 | } |
196 | |
197 | return 0; |
198 | } |
199 | |
200 | // Get the info ID of the site_info record for the given site ID and the current |
201 | // thread. In case the current thread doesn't yet have a site_info record for the |
202 | // given site ID, a new site_info record is allocated. |
203 | |
204 | static uint32_t |
205 | hook_get_site_info_id(uint32_t site_id) |
206 | { |
207 | uint32_t info_id = g_thread_site_infos[site_id]; |
208 | |
209 | // This thread encountered this allocation site before. We already |
210 | // have a site info record. |
211 | |
212 | if (info_id != 0) { |
213 | return info_id; |
214 | } |
215 | |
216 | // This is the first time that this thread encounters this allocation |
217 | // site. We need to allocate a site_info record. |
218 | |
219 | if ((info_id = hook_new_site_info_id()) == 0) { |
220 | return 0; |
221 | } |
222 | |
223 | site_info *info = g_site_infos + info_id; |
224 | |
225 | info->site_id = site_id; |
226 | info->thread_id = cf_thread_sys_tid(); |
227 | info->size_lo = 0; |
228 | info->size_hi = 0; |
229 | |
230 | g_thread_site_infos[site_id] = info_id; |
231 | return info_id; |
232 | } |
233 | |
234 | // Account for an allocation by the current thread for the allocation site |
235 | // with the given address. |
236 | |
237 | static void |
238 | hook_handle_alloc(const void *ra, void *p, void *p_indent, size_t sz) |
239 | { |
240 | if (p == NULL) { |
241 | return; |
242 | } |
243 | |
244 | size_t jem_sz = jem_sallocx(p, 0); |
245 | |
246 | uint32_t site_id = hook_get_site_id(ra); |
247 | uint32_t info_id = hook_get_site_info_id(site_id); |
248 | |
249 | if (info_id != 0) { |
250 | site_info *info = g_site_infos + info_id; |
251 | |
252 | size_t size_lo = info->size_lo; |
253 | info->size_lo += jem_sz; |
254 | |
255 | // Carry? |
256 | |
257 | if (info->size_lo < size_lo) { |
258 | ++info->size_hi; |
259 | } |
260 | } |
261 | |
262 | uint8_t *data = (uint8_t *)p + jem_sz - sizeof(uint32_t); |
263 | uint32_t *data32 = (uint32_t *)data; |
264 | |
265 | uint8_t *mark = (uint8_t *)p_indent + sz; |
266 | size_t delta = (size_t)(data - mark); |
267 | |
268 | // Keep 0xffff as a marker for double free detection. |
269 | |
270 | if (delta > 0xfffe) { |
271 | delta = 0; |
272 | } |
273 | |
274 | *data32 = ((site_id << 16) | (uint32_t)delta) * MULT + 1; |
275 | |
276 | for (uint32_t i = 0; i < 4 && i < delta; ++i) { |
277 | mark[i] = data[i]; |
278 | } |
279 | } |
280 | |
281 | // Account for a deallocation by the current thread for the allocation |
282 | // site with the given address. |
283 | |
284 | static void |
285 | hook_handle_free(const void *ra, void *p, size_t jem_sz) |
286 | { |
287 | uint8_t *data = (uint8_t *)p + jem_sz - sizeof(uint32_t); |
288 | uint32_t *data32 = (uint32_t *)data; |
289 | |
290 | uint32_t val = (*data32 - 1) * MULT_INV; |
291 | uint32_t site_id = val >> 16; |
292 | uint32_t delta = val & 0xffff; |
293 | |
294 | if (site_id >= MAX_SITES) { |
295 | cf_crash(CF_ALLOC, "corruption %zu@%p RA %p, invalid site ID" , jem_sz, p, ra); |
296 | } |
297 | |
298 | const void *data_ra = ck_pr_load_ptr(g_site_ras + site_id); |
299 | |
300 | if (delta == 0xffff) { |
301 | cf_crash(CF_ALLOC, "corruption %zu@%p RA %p, potential double free, possibly freed before with RA %p" , |
302 | jem_sz, p, ra, data_ra); |
303 | } |
304 | |
305 | if (delta > jem_sz - sizeof(uint32_t)) { |
306 | cf_crash(CF_ALLOC, "corruption %zu@%p RA %p, invalid delta length, possibly allocated with RA %p" , |
307 | jem_sz, p, ra, data_ra); |
308 | } |
309 | |
310 | uint8_t *mark = data - delta; |
311 | |
312 | for (uint32_t i = 0; i < 4 && i < delta; ++i) { |
313 | if (mark[i] != data[i]) { |
314 | cf_crash(CF_ALLOC, "corruption %zu@%p RA %p, invalid mark, possibly allocated with RA %p" , |
315 | jem_sz, p, ra, data_ra); |
316 | } |
317 | } |
318 | |
319 | uint32_t info_id = hook_get_site_info_id(site_id); |
320 | |
321 | if (info_id != 0) { |
322 | site_info *info = g_site_infos + info_id; |
323 | |
324 | size_t size_lo = info->size_lo; |
325 | info->size_lo -= jem_sz; |
326 | |
327 | // Borrow? |
328 | |
329 | if (info->size_lo > size_lo) { |
330 | --info->size_hi; |
331 | } |
332 | } |
333 | |
334 | // Replace the allocation site with the deallocation site to facilitate |
335 | // double-free debugging. |
336 | |
337 | site_id = hook_get_site_id(ra); |
338 | |
339 | // Also invalidate the delta length, so that we are more likely to detect |
340 | // double frees. |
341 | |
342 | *data32 = ((site_id << 16) | 0xffff) * MULT + 1; |
343 | |
344 | for (uint32_t i = 0; i < 4 && i < delta; ++i) { |
345 | mark[i] = data[i]; |
346 | } |
347 | } |
348 | |
349 | static uint32_t |
350 | indent_hops(void *p) |
351 | { |
352 | if (!g_rand.initialized) { |
353 | g_rand.seed0 = (uint64_t)cf_thread_sys_tid(); |
354 | g_rand.seed1 = cf_getns(); |
355 | g_rand.initialized = true; |
356 | } |
357 | |
358 | uint32_t n_hops; |
359 | void **p_indent; |
360 | |
361 | // Indented pointer must not look like aligned allocation. See outdent(). |
362 | |
363 | do { |
364 | n_hops = 2 + (as_random_next_uint32(&g_rand) % ((MAX_INDENT / 8) - 1)); |
365 | p_indent = (void **)p + n_hops; |
366 | } |
367 | while (((uint64_t)p_indent & 0xfff) == 0); |
368 | |
369 | return n_hops; |
370 | } |
371 | |
372 | static void * |
373 | indent(void *p) |
374 | { |
375 | if (p == NULL) { |
376 | return NULL; |
377 | } |
378 | |
379 | uint32_t n_hops = indent_hops(p); |
380 | uint64_t *p_indent = (uint64_t *)p + n_hops; |
381 | |
382 | p_indent[-1] = (uint64_t)p * MULT_64; |
383 | *(uint64_t *)p = (uint64_t)p_indent * MULT_64; |
384 | |
385 | return (void *)p_indent; |
386 | } |
387 | |
388 | static void * |
389 | reindent(void *p2, size_t sz, void *p, void *p_indent) |
390 | { |
391 | if (p2 == NULL) { |
392 | return NULL; |
393 | } |
394 | |
395 | uint32_t n_hops = (uint32_t)(((uint8_t *)p_indent - (uint8_t *)p)) / 8; |
396 | void **from = (void **)p2 + n_hops; |
397 | |
398 | uint32_t n_hops2 = indent_hops(p2); |
399 | uint64_t *p2_indent = (uint64_t *)p2 + n_hops2; |
400 | |
401 | memmove(p2_indent, from, sz); |
402 | |
403 | p2_indent[-1] = (uint64_t)p2 * MULT_64; |
404 | *(uint64_t *)p2 = (uint64_t)p2_indent * MULT_64; |
405 | |
406 | return (void *)p2_indent; |
407 | } |
408 | |
409 | static void * |
410 | outdent(void *p_indent) |
411 | { |
412 | // Aligned allocations aren't indented. |
413 | |
414 | if (((uint64_t)p_indent & 0xfff) == 0) { |
415 | return p_indent; |
416 | } |
417 | |
418 | uint64_t p = ((uint64_t *)p_indent)[-1] * MULT_INV_64; |
419 | int64_t diff = (int64_t)p_indent - (int64_t)p; |
420 | |
421 | if (diff < 16 || diff > MAX_INDENT || diff % 8 != 0) { |
422 | cf_crash(CF_ALLOC, "bad free of %p via %p" , (void *)p, p_indent); |
423 | } |
424 | |
425 | uint64_t p_expect = *(uint64_t *)p * MULT_INV_64; |
426 | |
427 | if ((uint64_t)p_indent != p_expect) { |
428 | cf_crash(CF_ALLOC, "bad free of %p via %p (vs. %p)" , (void *)p, |
429 | p_indent, (void *)p_expect); |
430 | } |
431 | |
432 | return (void *)p; |
433 | } |
434 | |
435 | static void |
436 | valgrind_check(void) |
437 | { |
438 | // Make sure that we actually call into JEMalloc when invoking malloc(). |
439 | // |
440 | // By default, Valgrind redirects the standard allocation API functions, |
441 | // i.e., malloc(), calloc(), etc., to glibc. |
442 | // |
443 | // The problem with this is that Valgrind only redirects the standard API |
444 | // functions. It does not know about, and thus doesn't redirect, our |
445 | // non-standard functions, e.g., cf_alloc_malloc_arena(). |
446 | // |
447 | // As we use both, standard and non-standard functions, to allocate memory, |
448 | // we would end up with an inconsistent mix of allocations, some allocated |
449 | // by JEMalloc and some by glibc's allocator. |
450 | // |
451 | // Sooner or later, we will thus end up passing a memory block allocated by |
452 | // JEMalloc to free(), which Valgrind has redirected to glibc's allocator. |
453 | |
454 | uint32_t tries; |
455 | |
456 | void *p1[2]; |
457 | void *p2[2]; |
458 | |
459 | for (tries = 0; tries < 2; ++tries) { |
460 | p1[tries] = malloc(1); // known API function, possibly redirected |
461 | p2[tries] = cf_alloc_try_malloc(1); // our own, never redirected |
462 | |
463 | // If both of the above allocations are handled by JEMalloc, then their |
464 | // base addresses will be identical (cache enabled), contiguous (cache |
465 | // disabled), or unrelated (cache disabled, different runs). Trying |
466 | // twice prevents the latter. |
467 | // |
468 | // If the first allocation is handled by glibc, then the base addresses |
469 | // will always be unrelated. |
470 | |
471 | ptrdiff_t diff = (uint8_t *)p2[tries] - (uint8_t *)p1[tries]; |
472 | |
473 | if (diff > -1024 && diff < 1024) { |
474 | break; |
475 | } |
476 | } |
477 | |
478 | if (tries == 2) { |
479 | cf_crash_nostack(CF_ALLOC, "Valgrind redirected malloc() to glibc; please run Valgrind with --soname-synonyms=somalloc=nouserintercepts" ); |
480 | } |
481 | |
482 | for (uint32_t i = 0; i < tries; ++i) { |
483 | free(p1[tries]); |
484 | cf_free(p2[tries]); |
485 | } |
486 | } |
487 | |
488 | void |
489 | cf_alloc_init(void) |
490 | { |
491 | valgrind_check(); |
492 | |
493 | // Turn off libstdc++'s memory caching, as it just duplicates JEMalloc's. |
494 | |
495 | if (setenv("GLIBCXX_FORCE_NEW" , "1" , 1) < 0) { |
496 | cf_crash(CF_ALLOC, "setenv() failed: %d (%s)" , errno, cf_strerror(errno)); |
497 | } |
498 | |
499 | // Double-check that hook_get_arena() works, as it depends on JEMalloc's |
500 | // internal data structures. |
501 | |
502 | int32_t err = jem_mallctl("thread.tcache.flush" , NULL, NULL, NULL, 0); |
503 | |
504 | if (err != 0) { |
505 | cf_crash(CF_ALLOC, "error while flushing thread cache: %d (%s)" , err, cf_strerror(err)); |
506 | } |
507 | |
508 | for (size_t sz = 1; sz <= 16 * 1024 * 1024; sz *= 2) { |
509 | void *p = malloc(sz); |
510 | int32_t arena = hook_get_arena(p); |
511 | |
512 | if (arena != N_ARENAS) { |
513 | cf_crash(CF_ALLOC, "arena mismatch: %d vs. %d" , arena, N_ARENAS / 2); |
514 | } |
515 | |
516 | free(p); |
517 | } |
518 | } |
519 | |
520 | void |
521 | cf_alloc_set_debug(cf_alloc_debug debug_allocations, bool indent_allocations) |
522 | { |
523 | g_debug = debug_allocations; |
524 | g_indent = indent_allocations; |
525 | |
526 | g_alloc_started = true; |
527 | } |
528 | |
529 | int32_t |
530 | cf_alloc_create_arena(void) |
531 | { |
532 | int32_t arena; |
533 | size_t arena_len = sizeof(arena); |
534 | |
535 | int32_t err = jem_mallctl("arenas.extend" , &arena, &arena_len, NULL, 0); |
536 | |
537 | if (err != 0) { |
538 | cf_crash(CF_ALLOC, "failed to create new arena: %d (%s)" , err, cf_strerror(err)); |
539 | } |
540 | |
541 | cf_debug(CF_ALLOC, "created new arena %d" , arena); |
542 | return arena; |
543 | } |
544 | |
545 | void |
546 | cf_alloc_heap_stats(size_t *allocated_kbytes, size_t *active_kbytes, size_t *mapped_kbytes, |
547 | double *efficiency_pct, uint32_t *site_count) |
548 | { |
549 | uint64_t epoch = 1; |
550 | size_t len = sizeof(epoch); |
551 | |
552 | int32_t err = jem_mallctl("epoch" , &epoch, &len, &epoch, len); |
553 | |
554 | if (err != 0) { |
555 | cf_crash(CF_ALLOC, "failed to retrieve epoch: %d (%s)" , err, cf_strerror(err)); |
556 | } |
557 | |
558 | size_t allocated; |
559 | len = sizeof(allocated); |
560 | |
561 | err = jem_mallctl("stats.allocated" , &allocated, &len, NULL, 0); |
562 | |
563 | if (err != 0) { |
564 | cf_crash(CF_ALLOC, "failed to retrieve stats.allocated: %d (%s)" , err, cf_strerror(err)); |
565 | } |
566 | |
567 | size_t active; |
568 | len = sizeof(active); |
569 | |
570 | err = jem_mallctl("stats.active" , &active, &len, NULL, 0); |
571 | |
572 | if (err != 0) { |
573 | cf_crash(CF_ALLOC, "failed to retrieve stats.active: %d (%s)" , err, cf_strerror(err)); |
574 | } |
575 | |
576 | size_t mapped; |
577 | len = sizeof(mapped); |
578 | |
579 | err = jem_mallctl("stats.mapped" , &mapped, &len, NULL, 0); |
580 | |
581 | if (err != 0) { |
582 | cf_crash(CF_ALLOC, "failed to retrieve stats.mapped: %d (%s)" , err, cf_strerror(err)); |
583 | } |
584 | |
585 | if (allocated_kbytes) { |
586 | *allocated_kbytes = allocated / 1024; |
587 | } |
588 | |
589 | if (active_kbytes) { |
590 | *active_kbytes = active / 1024; |
591 | } |
592 | |
593 | if (mapped_kbytes) { |
594 | *mapped_kbytes = mapped / 1024; |
595 | } |
596 | |
597 | if (efficiency_pct) { |
598 | *efficiency_pct = mapped != 0 ? |
599 | (double)allocated * 100.0 / (double)mapped : 0.0; |
600 | } |
601 | |
602 | if (site_count) { |
603 | *site_count = ck_pr_load_32(&g_n_site_ras); |
604 | } |
605 | } |
606 | |
607 | static void |
608 | line_to_log(void *data, const char *line) |
609 | { |
610 | (void)data; |
611 | |
612 | char buff[1000]; |
613 | size_t i; |
614 | |
615 | for (i = 0; i < sizeof(buff) - 1 && line[i] != 0 && line[i] != '\n'; ++i) { |
616 | buff[i] = line[i]; |
617 | } |
618 | |
619 | buff[i] = 0; |
620 | cf_info(CF_ALLOC, "%s" , buff); |
621 | } |
622 | |
623 | static void |
624 | line_to_file(void *data, const char *line) |
625 | { |
626 | fprintf((FILE *)data, "%s" , line); |
627 | } |
628 | |
629 | static void |
630 | time_to_file(FILE *fh) |
631 | { |
632 | time_t now = time(NULL); |
633 | |
634 | if (now == (time_t)-1) { |
635 | cf_crash(CF_ALLOC, "time() failed: %d (%s)" , errno, cf_strerror(errno)); |
636 | } |
637 | |
638 | struct tm gmt; |
639 | |
640 | if (gmtime_r(&now, &gmt) == NULL) { |
641 | cf_crash(CF_ALLOC, "gmtime_r() failed" ); |
642 | } |
643 | |
644 | char text[250]; |
645 | |
646 | if (strftime(text, sizeof(text), "%b %d %Y %T %Z" , &gmt) == 0) { |
647 | cf_crash(CF_ALLOC, "strftime() failed" ); |
648 | } |
649 | |
650 | fprintf(fh, "---------- %s ----------\n" , text); |
651 | } |
652 | |
653 | void |
654 | cf_alloc_log_stats(const char *file, const char *opts) |
655 | { |
656 | if (file == NULL) { |
657 | jem_malloc_stats_print(line_to_log, NULL, opts); |
658 | return; |
659 | } |
660 | |
661 | FILE *fh = fopen(file, "a" ); |
662 | |
663 | if (fh == NULL) { |
664 | cf_warning(CF_ALLOC, "failed to open allocation stats file %s: %d (%s)" , |
665 | file, errno, cf_strerror(errno)); |
666 | return; |
667 | } |
668 | |
669 | time_to_file(fh); |
670 | jem_malloc_stats_print(line_to_file, fh, opts); |
671 | fclose(fh); |
672 | } |
673 | |
674 | void |
675 | cf_alloc_log_site_infos(const char *file) |
676 | { |
677 | FILE *fh = fopen(file, "a" ); |
678 | |
679 | if (fh == NULL) { |
680 | cf_warning(CF_ALLOC, "failed to open site info file %s: %d (%s)" , |
681 | file, errno, cf_strerror(errno)); |
682 | return; |
683 | } |
684 | |
685 | time_to_file(fh); |
686 | uint64_t n_site_infos = ck_pr_load_64(&g_n_site_infos); |
687 | |
688 | if (n_site_infos > MAX_SITES * MAX_THREADS) { |
689 | n_site_infos = MAX_SITES * MAX_THREADS; |
690 | } |
691 | |
692 | for (uint64_t i = 1; i < n_site_infos; ++i) { |
693 | site_info *info = g_site_infos + i; |
694 | const void *ra = ck_pr_load_ptr(g_site_ras + info->site_id); |
695 | fprintf(fh, "0x%016" PRIx64 " %9d 0x%016zx 0x%016zx\n" , (uint64_t)ra, info->thread_id, |
696 | info->size_hi, info->size_lo); |
697 | } |
698 | |
699 | fclose(fh); |
700 | } |
701 | |
702 | static bool |
703 | is_transient(int32_t arena) |
704 | { |
705 | // Note that this also considers -1 (i.e., the default thread arena) |
706 | // to be transient, in addition to arenas 0 .. (N_ARENAS - 1). |
707 | |
708 | return arena < N_ARENAS; |
709 | } |
710 | |
711 | static bool |
712 | want_debug(int32_t arena) |
713 | { |
714 | // No debugging during startup and for startup arena. |
715 | |
716 | if (!g_alloc_started || arena == N_ARENAS) { |
717 | return false; |
718 | } |
719 | |
720 | switch (g_debug) { |
721 | case CF_ALLOC_DEBUG_NONE: |
722 | return false; |
723 | |
724 | case CF_ALLOC_DEBUG_TRANSIENT: |
725 | return is_transient(arena); |
726 | |
727 | case CF_ALLOC_DEBUG_PERSISTENT: |
728 | return !is_transient(arena); |
729 | |
730 | case CF_ALLOC_DEBUG_ALL: |
731 | return true; |
732 | } |
733 | |
734 | // Not reached. |
735 | return false; |
736 | } |
737 | |
738 | static int32_t |
739 | calc_free_flags(int32_t arena) |
740 | { |
741 | cf_assert(g_alloc_started || arena == N_ARENAS, CF_ALLOC, |
742 | "bad arena %d during startup" , arena); |
743 | |
744 | // Bypass the thread-local cache for allocations in the startup arena. |
745 | |
746 | if (arena == g_startup_arena) { |
747 | return MALLOCX_TCACHE_NONE; |
748 | } |
749 | |
750 | // If it's a transient allocation, then simply use the default |
751 | // thread-local cache. No flags needed. Same, if we don't debug |
752 | // at all; then we can save ourselves the second cache. |
753 | |
754 | if (is_transient(arena) || g_debug == CF_ALLOC_DEBUG_NONE) { |
755 | return 0; |
756 | } |
757 | |
758 | // If it's a persistent allocation, then use the second per-thread |
759 | // cache. Add it to the flags. See calc_alloc_flags() for more on |
760 | // this second cache. |
761 | |
762 | return MALLOCX_TCACHE(g_ns_tcache); |
763 | } |
764 | |
765 | static void |
766 | do_free(void *p_indent, const void *ra) |
767 | { |
768 | if (p_indent == NULL) { |
769 | return; |
770 | } |
771 | |
772 | int32_t arena = hook_get_arena(p_indent); |
773 | int32_t flags = calc_free_flags(arena); |
774 | |
775 | if (!want_debug(arena)) { |
776 | jem_dallocx(p_indent, flags); // not indented |
777 | return; |
778 | } |
779 | |
780 | void *p = g_indent ? outdent(p_indent) : p_indent; |
781 | size_t jem_sz = jem_sallocx(p, 0); |
782 | |
783 | hook_handle_free(ra, p, jem_sz); |
784 | jem_sdallocx(p, jem_sz, flags); |
785 | } |
786 | |
787 | void |
788 | __attribute__ ((noinline)) |
789 | free(void *p_indent) |
790 | { |
791 | do_free(p_indent, __builtin_return_address(0)); |
792 | } |
793 | |
794 | static int32_t |
795 | calc_alloc_flags(int32_t flags, int32_t arena) |
796 | { |
797 | cf_assert(g_alloc_started || arena < 0, CF_ALLOC, |
798 | "bad arena %d during startup" , arena); |
799 | |
800 | // During startup, allocate from the startup arena and bypass the |
801 | // thread-local cache. |
802 | |
803 | if (!g_alloc_started) { |
804 | // Create startup arena, if necessary. |
805 | if (g_startup_arena < 0) { |
806 | size_t len = sizeof(g_startup_arena); |
807 | int32_t err = jem_mallctl("arenas.extend" , &g_startup_arena, &len, |
808 | NULL, 0); |
809 | |
810 | if (err != 0) { |
811 | cf_crash(CF_ALLOC, "failed to create startup arena: %d (%s)" , |
812 | err, cf_strerror(err)); |
813 | } |
814 | |
815 | // Expect arena 149. |
816 | cf_assert(g_startup_arena == N_ARENAS, CF_ALLOC, |
817 | "bad startup arena %d" , g_startup_arena); |
818 | } |
819 | |
820 | // Set startup arena, bypass thread-local cache. |
821 | flags |= MALLOCX_ARENA(g_startup_arena) | MALLOCX_TCACHE_NONE; |
822 | |
823 | return flags; |
824 | } |
825 | |
826 | // Default arena and default thread-local cache. No additional flags |
827 | // needed. |
828 | |
829 | if (arena < 0) { |
830 | return flags; |
831 | } |
832 | |
833 | // We're allocating from a specific arena. Add it to the flags. |
834 | |
835 | flags |= MALLOCX_ARENA(arena); |
836 | |
837 | // If it's an arena for transient allocations, then we use the default |
838 | // thread-local cache. No additional flags needed. Same, if we don't |
839 | // debug at all; then we can save ourselves the second cache. |
840 | |
841 | if (is_transient(arena) || g_debug == CF_ALLOC_DEBUG_NONE) { |
842 | return flags; |
843 | } |
844 | |
845 | // We have a second per-thread cache for persistent allocations. In this |
846 | // way we never mix persistent allocations and transient allocations in |
847 | // the same cache. We need to keep them apart, because debugging may be |
848 | // enabled for one, but not the other. |
849 | |
850 | // Create the second per-thread cache, if we haven't already done so. |
851 | |
852 | if (g_ns_tcache < 0) { |
853 | size_t len = sizeof(g_ns_tcache); |
854 | int32_t err = jem_mallctl("tcache.create" , &g_ns_tcache, &len, NULL, 0); |
855 | |
856 | if (err != 0) { |
857 | cf_crash(CF_ALLOC, "failed to create new cache: %d (%s)" , err, cf_strerror(err)); |
858 | } |
859 | } |
860 | |
861 | // Add the second (non-default) per-thread cache to the flags. |
862 | |
863 | flags |= MALLOCX_TCACHE(g_ns_tcache); |
864 | return flags; |
865 | } |
866 | |
867 | static void * |
868 | do_mallocx(size_t sz, int32_t arena, const void *ra) |
869 | { |
870 | int32_t flags = calc_alloc_flags(0, arena); |
871 | |
872 | if (!want_debug(arena)) { |
873 | return jem_mallocx(sz == 0 ? 1 : sz, flags); |
874 | } |
875 | |
876 | size_t ext_sz = sz + sizeof(uint32_t); |
877 | |
878 | if (g_indent) { |
879 | ext_sz += MAX_INDENT; |
880 | } |
881 | |
882 | void *p = jem_mallocx(ext_sz, flags); |
883 | void *p_indent = g_indent ? indent(p) : p; |
884 | |
885 | hook_handle_alloc(ra, p, p_indent, sz); |
886 | |
887 | return p_indent; |
888 | } |
889 | |
890 | void * |
891 | cf_alloc_try_malloc(size_t sz) |
892 | { |
893 | // Allowed to return NULL. |
894 | return do_mallocx(sz, -1, __builtin_return_address(0)); |
895 | } |
896 | |
897 | void * |
898 | cf_alloc_malloc_arena(size_t sz, int32_t arena) |
899 | { |
900 | cf_assert(g_alloc_started, CF_ALLOC, "arena allocation during startup" ); |
901 | |
902 | void *p_indent = do_mallocx(sz, arena, __builtin_return_address(0)); |
903 | |
904 | cf_assert(p_indent != NULL, CF_ALLOC, "malloc_ns failed sz %zu arena %d" , |
905 | sz, arena); |
906 | |
907 | return p_indent; |
908 | } |
909 | |
910 | void * |
911 | __attribute__ ((noinline)) |
912 | malloc(size_t sz) |
913 | { |
914 | void *p_indent = do_mallocx(sz, -1, __builtin_return_address(0)); |
915 | |
916 | cf_assert(p_indent != NULL, CF_ALLOC, "malloc failed sz %zu" , sz); |
917 | |
918 | return p_indent; |
919 | } |
920 | |
921 | static void * |
922 | do_callocx(size_t n, size_t sz, int32_t arena, const void *ra) |
923 | { |
924 | int32_t flags = calc_alloc_flags(MALLOCX_ZERO, arena); |
925 | size_t tot_sz = n * sz; |
926 | |
927 | if (!want_debug(arena)) { |
928 | return jem_mallocx(tot_sz == 0 ? 1 : tot_sz, flags); |
929 | } |
930 | |
931 | size_t ext_sz = tot_sz + sizeof(uint32_t); |
932 | |
933 | if (g_indent) { |
934 | ext_sz += MAX_INDENT; |
935 | } |
936 | |
937 | void *p = jem_mallocx(ext_sz, flags); |
938 | void *p_indent = g_indent ? indent(p) : p; |
939 | |
940 | hook_handle_alloc(ra, p, p_indent, tot_sz); |
941 | |
942 | return p_indent; |
943 | } |
944 | |
945 | void * |
946 | cf_alloc_calloc_arena(size_t n, size_t sz, int32_t arena) |
947 | { |
948 | cf_assert(g_alloc_started, CF_ALLOC, "arena allocation during startup" ); |
949 | |
950 | void *p_indent = do_callocx(n, sz, arena, __builtin_return_address(0)); |
951 | |
952 | cf_assert(p_indent != NULL, CF_ALLOC, |
953 | "calloc_ns failed n %zu sz %zu arena %d" , n, sz, arena); |
954 | |
955 | return p_indent; |
956 | } |
957 | |
958 | void * |
959 | calloc(size_t n, size_t sz) |
960 | { |
961 | void *p_indent = do_callocx(n, sz, -1, __builtin_return_address(0)); |
962 | |
963 | cf_assert(p_indent != NULL, CF_ALLOC, "calloc failed n %zu sz %zu" , n, sz); |
964 | |
965 | return p_indent; |
966 | } |
967 | |
968 | static void * |
969 | do_rallocx(void *p_indent, size_t sz, int32_t arena, const void *ra) |
970 | { |
971 | if (p_indent == NULL) { |
972 | return do_mallocx(sz, arena, ra); |
973 | } |
974 | |
975 | int32_t arena_p = hook_get_arena(p_indent); |
976 | |
977 | bool debug_p = want_debug(arena_p); |
978 | bool debug = want_debug(arena); |
979 | |
980 | // Allow debug change for startup arena - handled below. |
981 | |
982 | if (debug != debug_p && arena_p != N_ARENAS) { |
983 | cf_crash(CF_ALLOC, "debug change - p_indent %p arena_p %d arena %d" , |
984 | p_indent, arena_p, arena); |
985 | } |
986 | |
987 | if (sz == 0) { |
988 | do_free(p_indent, ra); |
989 | return NULL; |
990 | } |
991 | |
992 | int32_t flags = calc_alloc_flags(0, arena); |
993 | |
994 | // Going from startup or non-debug arena to non-debug arena. |
995 | |
996 | if (!debug) { |
997 | return jem_rallocx(p_indent, sz, flags); // not indented |
998 | } |
999 | |
1000 | // Going from startup arena to debug arena. |
1001 | |
1002 | if (arena_p == N_ARENAS) { |
1003 | void *p = p_indent; // not indented |
1004 | void *p_move = do_mallocx(sz, arena, ra); |
1005 | |
1006 | size_t sz_move = jem_sallocx(p, 0); |
1007 | |
1008 | if (sz < sz_move) { |
1009 | sz_move = sz; |
1010 | } |
1011 | |
1012 | memcpy(p_move, p, sz_move); |
1013 | cf_free(p); |
1014 | |
1015 | return p_move; |
1016 | } |
1017 | |
1018 | // Going from debug arena to debug arena. |
1019 | |
1020 | void *p = g_indent ? outdent(p_indent) : p_indent; |
1021 | size_t jem_sz = jem_sallocx(p, 0); |
1022 | |
1023 | hook_handle_free(ra, p, jem_sz); |
1024 | |
1025 | size_t ext_sz = sz + sizeof(uint32_t); |
1026 | |
1027 | if (g_indent) { |
1028 | ext_sz += MAX_INDENT; |
1029 | } |
1030 | |
1031 | void *p2 = jem_rallocx(p, ext_sz, flags); |
1032 | void *p2_indent = g_indent ? reindent(p2, sz, p, p_indent) : p2; |
1033 | |
1034 | hook_handle_alloc(ra, p2, p2_indent, sz); |
1035 | |
1036 | return p2_indent; |
1037 | } |
1038 | |
1039 | void * |
1040 | cf_alloc_realloc_arena(void *p_indent, size_t sz, int32_t arena) |
1041 | { |
1042 | cf_assert(g_alloc_started, CF_ALLOC, "arena allocation during startup" ); |
1043 | |
1044 | void *p2_indent = do_rallocx(p_indent, sz, arena, |
1045 | __builtin_return_address(0)); |
1046 | |
1047 | cf_assert(p2_indent != NULL || sz == 0, CF_ALLOC, |
1048 | "realloc_ns failed sz %zu arena %d" , sz, arena); |
1049 | |
1050 | return p2_indent; |
1051 | } |
1052 | |
1053 | void * |
1054 | realloc(void *p_indent, size_t sz) |
1055 | { |
1056 | void *p2_indent = do_rallocx(p_indent, sz, -1, __builtin_return_address(0)); |
1057 | |
1058 | cf_assert(p2_indent != NULL || sz == 0, CF_ALLOC, "realloc failed sz %zu" , |
1059 | sz); |
1060 | |
1061 | return p2_indent; |
1062 | } |
1063 | |
1064 | static char * |
1065 | do_strdup(const char *s, size_t n, const void *ra) |
1066 | { |
1067 | int32_t flags = calc_alloc_flags(0, -1); |
1068 | |
1069 | size_t sz = n + 1; |
1070 | size_t ext_sz = sz; |
1071 | |
1072 | if (want_debug(-1)) { |
1073 | ext_sz += sizeof(uint32_t); |
1074 | |
1075 | if (g_indent) { |
1076 | ext_sz += MAX_INDENT; |
1077 | } |
1078 | } |
1079 | |
1080 | char *s2 = jem_mallocx(ext_sz, flags); |
1081 | char *s2_indent = s2; |
1082 | |
1083 | if (want_debug(-1)) { |
1084 | if (g_indent) { |
1085 | s2_indent = indent(s2); |
1086 | } |
1087 | |
1088 | hook_handle_alloc(ra, s2, s2_indent, sz); |
1089 | } |
1090 | |
1091 | memcpy(s2_indent, s, n); |
1092 | s2_indent[n] = 0; |
1093 | |
1094 | return s2_indent; |
1095 | } |
1096 | |
1097 | char * |
1098 | strdup(const char *s) |
1099 | { |
1100 | return do_strdup(s, strlen(s), __builtin_return_address(0)); |
1101 | } |
1102 | |
1103 | char * |
1104 | strndup(const char *s, size_t n) |
1105 | { |
1106 | size_t n2 = 0; |
1107 | |
1108 | while (n2 < n && s[n2] != 0) { |
1109 | ++n2; |
1110 | } |
1111 | |
1112 | return do_strdup(s, n2, __builtin_return_address(0)); |
1113 | } |
1114 | |
1115 | static int32_t |
1116 | do_asprintf(char **res, const char *form, va_list va, const void *ra) |
1117 | { |
1118 | char buff[25000]; |
1119 | int32_t n = vsnprintf(buff, sizeof(buff), form, va); |
1120 | |
1121 | if ((size_t)n >= sizeof(buff)) { |
1122 | cf_crash(CF_ALLOC, "asprintf overflow len %d" , n); |
1123 | } |
1124 | |
1125 | *res = do_strdup(buff, (size_t)n, ra); |
1126 | return n; |
1127 | } |
1128 | |
1129 | int32_t |
1130 | asprintf(char **res, const char *form, ...) |
1131 | { |
1132 | va_list va; |
1133 | va_start(va, form); |
1134 | |
1135 | int32_t n = do_asprintf(res, form, va, __builtin_return_address(0)); |
1136 | |
1137 | va_end(va); |
1138 | return n; |
1139 | } |
1140 | |
1141 | int32_t |
1142 | __asprintf_chk(char **res, int32_t flags, const char *form, ...) |
1143 | { |
1144 | (void)flags; |
1145 | |
1146 | va_list va; |
1147 | va_start(va, form); |
1148 | |
1149 | int32_t n = do_asprintf(res, form, va, __builtin_return_address(0)); |
1150 | |
1151 | va_end(va); |
1152 | return n; |
1153 | } |
1154 | |
1155 | int32_t |
1156 | posix_memalign(void **p, size_t align, size_t sz) |
1157 | { |
1158 | cf_assert(g_alloc_started, CF_ALLOC, "aligned allocation during startup" ); |
1159 | cf_assert((align & (align - 1)) == 0, CF_ALLOC, "bad alignment" ); |
1160 | |
1161 | if (!want_debug(-1)) { |
1162 | return jem_posix_memalign(p, align, sz == 0 ? 1 : sz); |
1163 | } |
1164 | |
1165 | if (g_indent) { |
1166 | align = (align + 0xffful) & ~0xffful; |
1167 | } |
1168 | |
1169 | size_t ext_sz = sz + sizeof(uint32_t); |
1170 | int32_t err = jem_posix_memalign(p, align, ext_sz); |
1171 | |
1172 | if (err != 0) { |
1173 | return err; |
1174 | } |
1175 | |
1176 | hook_handle_alloc(__builtin_return_address(0), *p, *p, sz); |
1177 | return 0; |
1178 | } |
1179 | |
1180 | void * |
1181 | aligned_alloc(size_t align, size_t sz) |
1182 | { |
1183 | cf_assert(g_alloc_started, CF_ALLOC, "aligned allocation during startup" ); |
1184 | cf_assert((align & (align - 1)) == 0, CF_ALLOC, "bad alignment" ); |
1185 | |
1186 | if (!want_debug(-1)) { |
1187 | return jem_aligned_alloc(align, sz == 0 ? 1 : sz); |
1188 | } |
1189 | |
1190 | if (g_indent) { |
1191 | align = (align + 0xffful) & ~0xffful; |
1192 | } |
1193 | |
1194 | size_t ext_sz = sz + sizeof(uint32_t); |
1195 | |
1196 | void *p = jem_aligned_alloc(align, ext_sz); |
1197 | hook_handle_alloc(__builtin_return_address(0), p, p, sz); |
1198 | |
1199 | return p; |
1200 | } |
1201 | |
1202 | static void * |
1203 | do_valloc(size_t sz) |
1204 | { |
1205 | if (!want_debug(-1)) { |
1206 | return jem_aligned_alloc(PAGE_SZ, sz == 0 ? 1 : sz); |
1207 | } |
1208 | |
1209 | size_t ext_sz = sz + sizeof(uint32_t); |
1210 | |
1211 | void *p = jem_aligned_alloc(PAGE_SZ, ext_sz); |
1212 | hook_handle_alloc(__builtin_return_address(0), p, p, sz); |
1213 | |
1214 | return p; |
1215 | } |
1216 | |
1217 | void * |
1218 | valloc(size_t sz) |
1219 | { |
1220 | cf_assert(g_alloc_started, CF_ALLOC, "aligned allocation during startup" ); |
1221 | |
1222 | void *p = do_valloc(sz); |
1223 | cf_assert(p, CF_ALLOC, "valloc failed sz %zu" , sz); |
1224 | return p; |
1225 | } |
1226 | |
1227 | void * |
1228 | memalign(size_t align, size_t sz) |
1229 | { |
1230 | cf_assert(g_alloc_started, CF_ALLOC, "aligned allocation during startup" ); |
1231 | cf_assert((align & (align - 1)) == 0, CF_ALLOC, "bad alignment" ); |
1232 | |
1233 | if (!want_debug(-1)) { |
1234 | return jem_aligned_alloc(align, sz == 0 ? 1 : sz); |
1235 | } |
1236 | |
1237 | if (g_indent) { |
1238 | align = (align + 0xffful) & ~0xffful; |
1239 | } |
1240 | |
1241 | size_t ext_sz = sz + sizeof(uint32_t); |
1242 | |
1243 | void *p = jem_aligned_alloc(align, ext_sz); |
1244 | hook_handle_alloc(__builtin_return_address(0), p, p, sz); |
1245 | |
1246 | return p; |
1247 | } |
1248 | |
1249 | void * |
1250 | pvalloc(size_t sz) |
1251 | { |
1252 | (void)sz; |
1253 | cf_crash(CF_ALLOC, "obsolete pvalloc() called" ); |
1254 | // Not reached. |
1255 | return NULL; |
1256 | } |
1257 | |
1258 | void * |
1259 | cf_rc_alloc(size_t sz) |
1260 | { |
1261 | int32_t flags = calc_alloc_flags(0, -1); |
1262 | |
1263 | size_t tot_sz = sizeof(cf_rc_header) + sz; |
1264 | size_t ext_sz = tot_sz; |
1265 | |
1266 | if (want_debug(-1)) { |
1267 | ext_sz += sizeof(uint32_t); |
1268 | |
1269 | if (g_indent) { |
1270 | ext_sz += MAX_INDENT; |
1271 | } |
1272 | } |
1273 | |
1274 | void *p = jem_mallocx(ext_sz, flags); |
1275 | void *p_indent = p; |
1276 | |
1277 | if (want_debug(-1)) { |
1278 | if (g_indent) { |
1279 | p_indent = indent(p); |
1280 | } |
1281 | |
1282 | hook_handle_alloc(__builtin_return_address(0), p, p_indent, tot_sz); |
1283 | } |
1284 | |
1285 | cf_rc_header *head = p_indent; |
1286 | |
1287 | head->rc = 1; |
1288 | head->sz = (uint32_t)sz; |
1289 | |
1290 | return head + 1; // body |
1291 | } |
1292 | |
1293 | static void |
1294 | do_rc_free(void *body, void *ra) |
1295 | { |
1296 | if (body == NULL) { |
1297 | cf_crash(CF_ALLOC, "trying to cf_rc_free() null pointer" ); |
1298 | } |
1299 | |
1300 | cf_rc_header *head = (cf_rc_header *)body - 1; |
1301 | |
1302 | int32_t arena = hook_get_arena(head); |
1303 | int32_t flags = calc_free_flags(arena); |
1304 | |
1305 | if (!want_debug(arena)) { |
1306 | jem_dallocx(head, flags); // not indented |
1307 | return; |
1308 | } |
1309 | |
1310 | void *p = g_indent ? outdent(head) : head; |
1311 | size_t jem_sz = jem_sallocx(p, 0); |
1312 | |
1313 | hook_handle_free(ra, p, jem_sz); |
1314 | jem_sdallocx(p, jem_sz, flags); |
1315 | } |
1316 | |
1317 | void |
1318 | cf_rc_free(void *body) |
1319 | { |
1320 | do_rc_free(body, __builtin_return_address(0)); |
1321 | } |
1322 | |
1323 | int32_t |
1324 | cf_rc_reserve(void *body) |
1325 | { |
1326 | cf_rc_header *head = (cf_rc_header *)body - 1; |
1327 | return cf_atomic32_incr(&head->rc); |
1328 | } |
1329 | |
1330 | int32_t |
1331 | cf_rc_release(void *body) |
1332 | { |
1333 | cf_rc_header *head = (cf_rc_header *)body - 1; |
1334 | int32_t rc = cf_atomic32_decr(&head->rc); |
1335 | |
1336 | cf_assert(rc >= 0, CF_ALLOC, "reference count underflow: %d (0x%x)" , rc, |
1337 | rc); |
1338 | |
1339 | return rc; |
1340 | } |
1341 | |
1342 | int32_t |
1343 | cf_rc_releaseandfree(void *body) |
1344 | { |
1345 | cf_rc_header *head = (cf_rc_header *)body - 1; |
1346 | int32_t rc = cf_atomic32_decr(&head->rc); |
1347 | |
1348 | cf_assert(rc >= 0, CF_ALLOC, "reference count underflow: %d (0x%x)" , rc, |
1349 | rc); |
1350 | |
1351 | if (rc > 0) { |
1352 | return rc; |
1353 | } |
1354 | |
1355 | do_rc_free(body, __builtin_return_address(0)); |
1356 | return 0; |
1357 | } |
1358 | |
1359 | int32_t |
1360 | cf_rc_count(const void *body) |
1361 | { |
1362 | const cf_rc_header *head = (const cf_rc_header *)body - 1; |
1363 | return (int32_t)head->rc; |
1364 | } |
1365 | |