1 | /* |
2 | * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. |
3 | * |
4 | * Licensed under the Apache License, Version 2.0 (the "License"). |
5 | * You may not use this file except in compliance with the License. |
6 | * A copy of the License is located at |
7 | * |
8 | * http://aws.amazon.com/apache2.0 |
9 | * |
10 | * or in the "license" file accompanying this file. This file is distributed |
11 | * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either |
12 | * express or implied. See the License for the specific language governing |
13 | * permissions and limitations under the License. |
14 | */ |
15 | |
16 | #include <aws/common/atomics.h> |
17 | #include <aws/common/byte_buf.h> |
18 | #include <aws/common/hash_table.h> |
19 | #include <aws/common/logging.h> |
20 | #include <aws/common/mutex.h> |
21 | #include <aws/common/priority_queue.h> |
22 | #include <aws/common/string.h> |
23 | #include <aws/common/system_info.h> |
24 | #include <aws/common/time.h> |
25 | |
26 | /* describes a single live allocation */ |
27 | struct alloc_info { |
28 | size_t size; |
29 | time_t time; |
30 | uint64_t stack; /* hash of stack frame pointers */ |
31 | }; |
32 | |
33 | /* Using a flexible array member is the C99 compliant way to have the frames immediately follow the header. |
34 | * |
35 | * MSVC doesn't know this for some reason so we need to use a pragma to make |
36 | * it happy. |
37 | */ |
38 | #ifdef _MSC_VER |
39 | # pragma warning(push) |
40 | # pragma warning(disable : 4200) /* nonstandard extension used: zero-sized array in struct/union */ |
41 | #endif |
42 | |
43 | /* one of these is stored per unique stack */ |
44 | struct stack_trace { |
45 | size_t depth; /* length of frames[] */ |
46 | void *const frames[]; /* rest of frames are allocated after */ |
47 | }; |
48 | |
49 | #ifdef _MSC_VER |
50 | # pragma warning(pop) |
51 | #endif |
52 | |
53 | /* Tracking structure, used as the allocator impl */ |
54 | struct alloc_tracer { |
55 | struct aws_allocator *allocator; /* underlying allocator */ |
56 | struct aws_allocator *system_allocator; /* bookkeeping allocator */ |
57 | enum aws_mem_trace_level level; /* level to trace at */ |
58 | size_t frames_per_stack; /* how many frames to keep per stack */ |
59 | struct aws_atomic_var allocated; /* bytes currently allocated */ |
60 | struct aws_mutex mutex; /* protects everything below */ |
61 | struct aws_hash_table allocs; /* live allocations, maps address -> alloc_info */ |
62 | struct aws_hash_table stacks; /* unique stack traces, maps hash -> stack_trace */ |
63 | struct aws_hash_table stack_info; /* only used during dumps, maps stack hash/id -> stack_metadata */ |
64 | }; |
65 | |
66 | /* number of frames to skip in call stacks (s_alloc_tracer_track, and the vtable function) */ |
67 | #define FRAMES_TO_SKIP 2 |
68 | |
69 | static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size); |
70 | static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr); |
71 | static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size); |
72 | static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size); |
73 | |
74 | static struct aws_allocator s_trace_allocator = { |
75 | .mem_acquire = s_trace_mem_acquire, |
76 | .mem_release = s_trace_mem_release, |
77 | .mem_realloc = s_trace_mem_realloc, |
78 | .mem_calloc = s_trace_mem_calloc, |
79 | }; |
80 | |
81 | /* for the hash table, to destroy elements */ |
82 | static void s_destroy_alloc(void *data) { |
83 | struct aws_allocator *allocator = aws_default_allocator(); |
84 | struct alloc_info *alloc = data; |
85 | aws_mem_release(allocator, alloc); |
86 | } |
87 | |
88 | static void s_destroy_stacktrace(void *data) { |
89 | struct aws_allocator *allocator = aws_default_allocator(); |
90 | struct stack_trace *stack = data; |
91 | aws_mem_release(allocator, stack); |
92 | } |
93 | |
94 | static void s_alloc_tracer_init( |
95 | struct alloc_tracer *tracer, |
96 | struct aws_allocator *allocator, |
97 | struct aws_allocator *system_allocator, |
98 | enum aws_mem_trace_level level, |
99 | size_t frames_per_stack) { |
100 | |
101 | void *stack[1]; |
102 | if (!aws_backtrace(stack, 1)) { |
103 | /* clamp level if tracing isn't available */ |
104 | level = level > AWS_MEMTRACE_BYTES ? AWS_MEMTRACE_BYTES : level; |
105 | } |
106 | |
107 | tracer->allocator = allocator; |
108 | tracer->system_allocator = system_allocator; |
109 | tracer->level = level; |
110 | |
111 | if (tracer->level >= AWS_MEMTRACE_BYTES) { |
112 | aws_atomic_init_int(&tracer->allocated, 0); |
113 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mutex_init(&tracer->mutex)); |
114 | AWS_FATAL_ASSERT( |
115 | AWS_OP_SUCCESS == |
116 | aws_hash_table_init( |
117 | &tracer->allocs, tracer->system_allocator, 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_alloc)); |
118 | } |
119 | |
120 | if (tracer->level == AWS_MEMTRACE_STACKS) { |
121 | if (frames_per_stack > 128) { |
122 | frames_per_stack = 128; |
123 | } |
124 | tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8; |
125 | AWS_FATAL_ASSERT( |
126 | AWS_OP_SUCCESS == |
127 | aws_hash_table_init( |
128 | &tracer->stacks, tracer->system_allocator, 1024, aws_hash_ptr, aws_ptr_eq, NULL, s_destroy_stacktrace)); |
129 | } |
130 | } |
131 | |
132 | static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t size) { |
133 | if (tracer->level == AWS_MEMTRACE_NONE) { |
134 | return; |
135 | } |
136 | |
137 | aws_atomic_fetch_add(&tracer->allocated, size); |
138 | |
139 | struct alloc_info *alloc = aws_mem_calloc(tracer->system_allocator, 1, sizeof(struct alloc_info)); |
140 | alloc->size = size; |
141 | alloc->time = time(NULL); |
142 | |
143 | if (tracer->level == AWS_MEMTRACE_STACKS) { |
144 | /* capture stack frames, skip 2 for this function and the allocation vtable function */ |
145 | AWS_VARIABLE_LENGTH_ARRAY(void *, stack_frames, (FRAMES_TO_SKIP + tracer->frames_per_stack)); |
146 | size_t stack_depth = aws_backtrace(stack_frames, FRAMES_TO_SKIP + tracer->frames_per_stack); |
147 | if (stack_depth) { |
148 | /* hash the stack pointers */ |
149 | struct aws_byte_cursor stack_cursor = |
150 | aws_byte_cursor_from_array(stack_frames, stack_depth * sizeof(void *)); |
151 | uint64_t stack_id = aws_hash_byte_cursor_ptr(&stack_cursor); |
152 | alloc->stack = stack_id; /* associate the stack with the alloc */ |
153 | |
154 | aws_mutex_lock(&tracer->mutex); |
155 | struct aws_hash_element *item = NULL; |
156 | int was_created = 0; |
157 | AWS_FATAL_ASSERT( |
158 | AWS_OP_SUCCESS == |
159 | aws_hash_table_create(&tracer->stacks, (void *)(uintptr_t)stack_id, &item, &was_created)); |
160 | /* If this is a new stack, save it to the hash */ |
161 | if (was_created) { |
162 | struct stack_trace *stack = aws_mem_calloc( |
163 | tracer->system_allocator, |
164 | 1, |
165 | sizeof(struct stack_trace) + (sizeof(void *) * tracer->frames_per_stack)); |
166 | memcpy( |
167 | (void **)&stack->frames[0], |
168 | &stack_frames[FRAMES_TO_SKIP], |
169 | (stack_depth - FRAMES_TO_SKIP) * sizeof(void *)); |
170 | stack->depth = stack_depth - FRAMES_TO_SKIP; |
171 | item->value = stack; |
172 | } |
173 | aws_mutex_unlock(&tracer->mutex); |
174 | } |
175 | } |
176 | |
177 | aws_mutex_lock(&tracer->mutex); |
178 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_put(&tracer->allocs, ptr, alloc, NULL)); |
179 | aws_mutex_unlock(&tracer->mutex); |
180 | } |
181 | |
182 | static void s_alloc_tracer_untrack(struct alloc_tracer *tracer, void *ptr) { |
183 | if (tracer->level == AWS_MEMTRACE_NONE) { |
184 | return; |
185 | } |
186 | |
187 | aws_mutex_lock(&tracer->mutex); |
188 | struct aws_hash_element *item; |
189 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(&tracer->allocs, ptr, &item)); |
190 | /* because the tracer can be installed at any time, it is possible for an allocation to not |
191 | * be tracked. Therefore, we make sure the find succeeds, but then check the returned |
192 | * value */ |
193 | if (item) { |
194 | AWS_FATAL_ASSERT(item->key == ptr && item->value); |
195 | struct alloc_info *alloc = item->value; |
196 | aws_atomic_fetch_sub(&tracer->allocated, alloc->size); |
197 | s_destroy_alloc(item->value); |
198 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_remove_element(&tracer->allocs, item)); |
199 | } |
200 | aws_mutex_unlock(&tracer->mutex); |
201 | } |
202 | |
203 | /* used only to resolve stacks -> trace, count, size at dump time */ |
204 | struct stack_metadata { |
205 | struct aws_string *trace; |
206 | size_t count; |
207 | size_t size; |
208 | }; |
209 | |
210 | static int s_collect_stack_trace(void *context, struct aws_hash_element *item) { |
211 | struct alloc_tracer *tracer = context; |
212 | struct aws_hash_table *all_stacks = &tracer->stacks; |
213 | struct aws_allocator *allocator = tracer->system_allocator; |
214 | struct stack_metadata *stack_info = item->value; |
215 | struct aws_hash_element *stack_item = NULL; |
216 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_hash_table_find(all_stacks, item->key, &stack_item)); |
217 | AWS_FATAL_ASSERT(stack_item); |
218 | struct stack_trace *stack = stack_item->value; |
219 | void *const *stack_frames = &stack->frames[0]; |
220 | |
221 | /* convert the frame pointers to symbols, and concat into a buffer */ |
222 | char buf[4096] = {0}; |
223 | struct aws_byte_buf stacktrace = aws_byte_buf_from_empty_array(buf, AWS_ARRAY_SIZE(buf)); |
224 | struct aws_byte_cursor newline = aws_byte_cursor_from_c_str("\n" ); |
225 | char **symbols = aws_backtrace_addr2line(stack_frames, stack->depth); |
226 | for (size_t idx = 0; idx < stack->depth; ++idx) { |
227 | if (idx > 0) { |
228 | aws_byte_buf_append(&stacktrace, &newline); |
229 | } |
230 | const char *caller = symbols[idx]; |
231 | if (!caller || !caller[0]) { |
232 | break; |
233 | } |
234 | struct aws_byte_cursor cursor = aws_byte_cursor_from_c_str(caller); |
235 | aws_byte_buf_append(&stacktrace, &cursor); |
236 | } |
237 | free(symbols); |
238 | /* record the resultant buffer as a string */ |
239 | stack_info->trace = aws_string_new_from_array(allocator, stacktrace.buffer, stacktrace.len); |
240 | aws_byte_buf_clean_up(&stacktrace); |
241 | return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; |
242 | } |
243 | |
244 | static int s_stack_info_compare_size(const void *a, const void *b) { |
245 | const struct stack_metadata *stack_a = *(const struct stack_metadata **)a; |
246 | const struct stack_metadata *stack_b = *(const struct stack_metadata **)b; |
247 | return stack_b->size > stack_a->size; |
248 | } |
249 | |
250 | static int s_stack_info_compare_count(const void *a, const void *b) { |
251 | const struct stack_metadata *stack_a = *(const struct stack_metadata **)a; |
252 | const struct stack_metadata *stack_b = *(const struct stack_metadata **)b; |
253 | return stack_b->count > stack_a->count; |
254 | } |
255 | |
256 | static void s_stack_info_destroy(void *data) { |
257 | struct stack_metadata *stack = data; |
258 | struct aws_allocator *allocator = stack->trace->allocator; |
259 | aws_string_destroy(stack->trace); |
260 | aws_mem_release(allocator, stack); |
261 | } |
262 | |
263 | /* tally up count/size per stack from all allocs */ |
264 | static int s_collect_stack_stats(void *context, struct aws_hash_element *item) { |
265 | struct alloc_tracer *tracer = context; |
266 | struct alloc_info *alloc = item->value; |
267 | struct aws_hash_element *stack_item = NULL; |
268 | int was_created = 0; |
269 | AWS_FATAL_ASSERT( |
270 | AWS_OP_SUCCESS == |
271 | aws_hash_table_create(&tracer->stack_info, (void *)(uintptr_t)alloc->stack, &stack_item, &was_created)); |
272 | if (was_created) { |
273 | stack_item->value = aws_mem_calloc(tracer->system_allocator, 1, sizeof(struct stack_metadata)); |
274 | } |
275 | struct stack_metadata *stack = stack_item->value; |
276 | stack->count++; |
277 | stack->size += alloc->size; |
278 | return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; |
279 | } |
280 | |
281 | static int s_insert_stacks(void *context, struct aws_hash_element *item) { |
282 | struct aws_priority_queue *pq = context; |
283 | struct stack_metadata *stack = item->value; |
284 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(pq, &stack)); |
285 | return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; |
286 | } |
287 | |
288 | static int s_insert_allocs(void *context, struct aws_hash_element *item) { |
289 | struct aws_priority_queue *allocs = context; |
290 | struct alloc_info *alloc = item->value; |
291 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_priority_queue_push(allocs, &alloc)); |
292 | return AWS_COMMON_HASH_TABLE_ITER_CONTINUE; |
293 | } |
294 | |
295 | static int s_alloc_compare(const void *a, const void *b) { |
296 | const struct alloc_info *alloc_a = *(const struct alloc_info **)a; |
297 | const struct alloc_info *alloc_b = *(const struct alloc_info **)b; |
298 | return alloc_a->time > alloc_b->time; |
299 | } |
300 | |
301 | static void s_alloc_tracer_dump(struct alloc_tracer *tracer) { |
302 | if (tracer->level == AWS_MEMTRACE_NONE || aws_atomic_load_int(&tracer->allocated) == 0) { |
303 | return; |
304 | } |
305 | |
306 | aws_mutex_lock(&tracer->mutex); |
307 | |
308 | size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs); |
309 | AWS_LOGF_TRACE( |
310 | AWS_LS_COMMON_MEMTRACE, "################################################################################\n" ); |
311 | AWS_LOGF_TRACE( |
312 | AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n" ); |
313 | AWS_LOGF_TRACE( |
314 | AWS_LS_COMMON_MEMTRACE, "################################################################################\n" ); |
315 | AWS_LOGF_TRACE( |
316 | AWS_LS_COMMON_MEMTRACE, |
317 | "tracer: %zu bytes still allocated in %zu allocations\n" , |
318 | aws_atomic_load_int(&tracer->allocated), |
319 | num_allocs); |
320 | |
321 | /* convert stacks from pointers -> symbols */ |
322 | if (tracer->level == AWS_MEMTRACE_STACKS) { |
323 | AWS_FATAL_ASSERT( |
324 | AWS_OP_SUCCESS == |
325 | aws_hash_table_init( |
326 | &tracer->stack_info, tracer->allocator, 64, aws_hash_ptr, aws_ptr_eq, NULL, s_stack_info_destroy)); |
327 | /* collect active stacks, tally up sizes and counts */ |
328 | aws_hash_table_foreach(&tracer->allocs, s_collect_stack_stats, tracer); |
329 | /* collect stack traces for active stacks */ |
330 | aws_hash_table_foreach(&tracer->stack_info, s_collect_stack_trace, tracer); |
331 | } |
332 | |
333 | /* sort allocs by time */ |
334 | struct aws_priority_queue allocs; |
335 | aws_priority_queue_init_dynamic( |
336 | &allocs, tracer->allocator, num_allocs, sizeof(struct alloc_info *), s_alloc_compare); |
337 | aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs); |
338 | /* dump allocs by time */ |
339 | AWS_LOGF_TRACE( |
340 | AWS_LS_COMMON_MEMTRACE, "################################################################################\n" ); |
341 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n" ); |
342 | AWS_LOGF_TRACE( |
343 | AWS_LS_COMMON_MEMTRACE, "################################################################################\n" ); |
344 | while (aws_priority_queue_size(&allocs)) { |
345 | struct alloc_info *alloc = NULL; |
346 | aws_priority_queue_pop(&allocs, &alloc); |
347 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n" , alloc->size); |
348 | if (alloc->stack) { |
349 | struct aws_hash_element *item = NULL; |
350 | AWS_FATAL_ASSERT( |
351 | AWS_OP_SUCCESS == aws_hash_table_find(&tracer->stack_info, (void *)(uintptr_t)alloc->stack, &item)); |
352 | struct stack_metadata *stack = item->value; |
353 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n" , (const char *)aws_string_bytes(stack->trace)); |
354 | } |
355 | } |
356 | |
357 | aws_priority_queue_clean_up(&allocs); |
358 | |
359 | if (tracer->level == AWS_MEMTRACE_STACKS) { |
360 | size_t num_stacks = aws_hash_table_get_entry_count(&tracer->stack_info); |
361 | /* sort stacks by total size leaked */ |
362 | struct aws_priority_queue stacks_by_size; |
363 | AWS_FATAL_ASSERT( |
364 | AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( |
365 | &stacks_by_size, |
366 | tracer->allocator, |
367 | num_stacks, |
368 | sizeof(struct stack_metadata *), |
369 | s_stack_info_compare_size)); |
370 | aws_hash_table_foreach(&tracer->stack_info, s_insert_stacks, &stacks_by_size); |
371 | AWS_LOGF_TRACE( |
372 | AWS_LS_COMMON_MEMTRACE, |
373 | "################################################################################\n" ); |
374 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n" ); |
375 | AWS_LOGF_TRACE( |
376 | AWS_LS_COMMON_MEMTRACE, |
377 | "################################################################################\n" ); |
378 | while (aws_priority_queue_size(&stacks_by_size) > 0) { |
379 | struct stack_metadata *stack = NULL; |
380 | aws_priority_queue_pop(&stacks_by_size, &stack); |
381 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n" , stack->size, stack->count); |
382 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n" , (const char *)aws_string_bytes(stack->trace)); |
383 | } |
384 | aws_priority_queue_clean_up(&stacks_by_size); |
385 | |
386 | /* sort stacks by number of leaks */ |
387 | struct aws_priority_queue stacks_by_count; |
388 | AWS_FATAL_ASSERT( |
389 | AWS_OP_SUCCESS == aws_priority_queue_init_dynamic( |
390 | &stacks_by_count, |
391 | tracer->allocator, |
392 | num_stacks, |
393 | sizeof(struct stack_metadata *), |
394 | s_stack_info_compare_count)); |
395 | AWS_LOGF_TRACE( |
396 | AWS_LS_COMMON_MEMTRACE, |
397 | "################################################################################\n" ); |
398 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n" ); |
399 | AWS_LOGF_TRACE( |
400 | AWS_LS_COMMON_MEMTRACE, |
401 | "################################################################################\n" ); |
402 | aws_hash_table_foreach(&tracer->stack_info, s_insert_stacks, &stacks_by_count); |
403 | while (aws_priority_queue_size(&stacks_by_count) > 0) { |
404 | struct stack_metadata *stack = NULL; |
405 | aws_priority_queue_pop(&stacks_by_count, &stack); |
406 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n" , stack->count, stack->size); |
407 | AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n" , (const char *)aws_string_bytes(stack->trace)); |
408 | } |
409 | aws_priority_queue_clean_up(&stacks_by_count); |
410 | aws_hash_table_clean_up(&tracer->stack_info); |
411 | } |
412 | |
413 | AWS_LOGF_TRACE( |
414 | AWS_LS_COMMON_MEMTRACE, "################################################################################\n" ); |
415 | AWS_LOGF_TRACE( |
416 | AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n" ); |
417 | AWS_LOGF_TRACE( |
418 | AWS_LS_COMMON_MEMTRACE, "################################################################################\n" ); |
419 | |
420 | aws_mutex_unlock(&tracer->mutex); |
421 | } |
422 | |
423 | static void *s_trace_mem_acquire(struct aws_allocator *allocator, size_t size) { |
424 | struct alloc_tracer *tracer = allocator->impl; |
425 | void *ptr = aws_mem_acquire(tracer->allocator, size); |
426 | s_alloc_tracer_track(tracer, ptr, size); |
427 | return ptr; |
428 | } |
429 | |
430 | static void s_trace_mem_release(struct aws_allocator *allocator, void *ptr) { |
431 | struct alloc_tracer *tracer = allocator->impl; |
432 | s_alloc_tracer_untrack(tracer, ptr); |
433 | aws_mem_release(tracer->allocator, ptr); |
434 | } |
435 | |
436 | static void *s_trace_mem_realloc(struct aws_allocator *allocator, void *ptr, size_t old_size, size_t new_size) { |
437 | struct alloc_tracer *tracer = allocator->impl; |
438 | void *new_ptr = ptr; |
439 | |
440 | AWS_FATAL_ASSERT(AWS_OP_SUCCESS == aws_mem_realloc(tracer->allocator, &new_ptr, old_size, new_size)); |
441 | |
442 | s_alloc_tracer_untrack(tracer, ptr); |
443 | s_alloc_tracer_track(tracer, new_ptr, new_size); |
444 | |
445 | return new_ptr; |
446 | } |
447 | |
448 | static void *s_trace_mem_calloc(struct aws_allocator *allocator, size_t num, size_t size) { |
449 | struct alloc_tracer *tracer = allocator->impl; |
450 | void *ptr = aws_mem_calloc(tracer->allocator, num, size); |
451 | s_alloc_tracer_track(tracer, ptr, num * size); |
452 | return ptr; |
453 | } |
454 | |
455 | struct aws_allocator *aws_mem_tracer_new( |
456 | struct aws_allocator *allocator, |
457 | struct aws_allocator *system_allocator, |
458 | enum aws_mem_trace_level level, |
459 | size_t frames_per_stack) { |
460 | |
461 | if (!system_allocator) { |
462 | system_allocator = aws_default_allocator(); |
463 | } |
464 | |
465 | struct alloc_tracer *tracer = NULL; |
466 | struct aws_allocator *trace_allocator = NULL; |
467 | aws_mem_acquire_many( |
468 | system_allocator, 2, &tracer, sizeof(struct alloc_tracer), &trace_allocator, sizeof(struct aws_allocator)); |
469 | |
470 | AWS_FATAL_ASSERT(trace_allocator); |
471 | AWS_FATAL_ASSERT(tracer); |
472 | |
473 | AWS_ZERO_STRUCT(*trace_allocator); |
474 | AWS_ZERO_STRUCT(*tracer); |
475 | |
476 | /* copy the template vtable s*/ |
477 | *trace_allocator = s_trace_allocator; |
478 | trace_allocator->impl = tracer; |
479 | |
480 | s_alloc_tracer_init(tracer, allocator, system_allocator, level, frames_per_stack); |
481 | return trace_allocator; |
482 | } |
483 | |
484 | struct aws_allocator *aws_mem_tracer_destroy(struct aws_allocator *trace_allocator) { |
485 | struct alloc_tracer *tracer = trace_allocator->impl; |
486 | struct aws_allocator *allocator = tracer->allocator; |
487 | |
488 | /* This is not necessary, as if you are destroying the allocator, what are your |
489 | * expectations? Either way, we can, so we might as well... |
490 | */ |
491 | aws_mutex_lock(&tracer->mutex); |
492 | aws_hash_table_clean_up(&tracer->allocs); |
493 | aws_hash_table_clean_up(&tracer->stacks); |
494 | aws_mutex_unlock(&tracer->mutex); |
495 | aws_mutex_clean_up(&tracer->mutex); |
496 | |
497 | struct aws_allocator *system_allocator = tracer->system_allocator; |
498 | aws_mem_release(system_allocator, tracer); |
499 | /* trace_allocator is freed as part of the block tracer was allocated in */ |
500 | return allocator; |
501 | } |
502 | |
503 | void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) { |
504 | struct alloc_tracer *tracer = trace_allocator->impl; |
505 | s_alloc_tracer_dump(tracer); |
506 | } |
507 | |
508 | size_t aws_mem_tracer_bytes(struct aws_allocator *trace_allocator) { |
509 | struct alloc_tracer *tracer = trace_allocator->impl; |
510 | return aws_atomic_load_int(&tracer->allocated); |
511 | } |
512 | |
513 | size_t aws_mem_tracer_count(struct aws_allocator *trace_allocator) { |
514 | struct alloc_tracer *tracer = trace_allocator->impl; |
515 | aws_mutex_lock(&tracer->mutex); |
516 | size_t count = aws_hash_table_get_entry_count(&tracer->allocs); |
517 | aws_mutex_unlock(&tracer->mutex); |
518 | return count; |
519 | } |
520 | |