1/* -*- mode: C++; c-basic-offset: 4; indent-tabs-mode: nil -*- */
2// vim: ft=cpp:expandtab:ts=8:sw=4:softtabstop=4:
3#ident "$Id$"
4/*======
5This file is part of PerconaFT.
6
7
8Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved.
9
10 PerconaFT is free software: you can redistribute it and/or modify
11 it under the terms of the GNU General Public License, version 2,
12 as published by the Free Software Foundation.
13
14 PerconaFT is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
21
22----------------------------------------
23
24 PerconaFT is free software: you can redistribute it and/or modify
25 it under the terms of the GNU Affero General Public License, version 3,
26 as published by the Free Software Foundation.
27
28 PerconaFT is distributed in the hope that it will be useful,
29 but WITHOUT ANY WARRANTY; without even the implied warranty of
30 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
31 GNU Affero General Public License for more details.
32
33 You should have received a copy of the GNU Affero General Public License
34 along with PerconaFT. If not, see <http://www.gnu.org/licenses/>.
35======= */
36
37#ident "Copyright (c) 2006, 2015, Percona and/or its affiliates. All rights reserved."
38
39#include <portability/toku_config.h>
40
41#include <toku_portability.h>
42#include <string.h>
43#include <stdio.h>
44#include <stdlib.h>
45#if defined(HAVE_MALLOC_H)
46# include <malloc.h>
47#elif defined(HAVE_SYS_MALLOC_H)
48# include <sys/malloc.h>
49#endif
50#include <dlfcn.h>
51#include <toku_race_tools.h>
52#include "memory.h"
53#include "toku_assert.h"
54#include <portability/toku_atomic.h>
55
56static malloc_fun_t t_malloc = 0;
57static malloc_aligned_fun_t t_malloc_aligned = 0;
58static malloc_fun_t t_xmalloc = 0;
59static malloc_aligned_fun_t t_xmalloc_aligned = 0;
60static free_fun_t t_free = 0;
61static realloc_fun_t t_realloc = 0;
62static realloc_aligned_fun_t t_realloc_aligned = 0;
63static realloc_fun_t t_xrealloc = 0;
64
65static LOCAL_MEMORY_STATUS_S status;
66int toku_memory_do_stats = 0;
67
68static bool memory_startup_complete;
69
70int
71toku_memory_startup(void) {
72 if (memory_startup_complete) {
73 return 0;
74 }
75 memory_startup_complete = true;
76
77 int result = 0;
78
79#if defined(HAVE_M_MMAP_THRESHOLD)
80 // initialize libc malloc
81 size_t mmap_threshold = 64 * 1024; // 64K and larger should be malloced with mmap().
82 int success = mallopt(M_MMAP_THRESHOLD, mmap_threshold);
83 if (success) {
84 status.mallocator_version = "libc";
85 status.mmap_threshold = mmap_threshold;
86 } else
87 result = EINVAL;
88#else
89 // just a guess
90 status.mallocator_version = "darwin";
91 status.mmap_threshold = 16 * 1024;
92#endif
93
94 // jemalloc has a mallctl function, while libc malloc does not. we can check if jemalloc
95 // is loaded by checking if the mallctl function can be found. if it can, we call it
96 // to get version and mmap threshold configuration.
97 typedef int (*mallctl_fun_t)(const char *, void *, size_t *, void *, size_t);
98 mallctl_fun_t mallctl_f;
99 mallctl_f = (mallctl_fun_t) dlsym(RTLD_DEFAULT, "mallctl");
100 if (mallctl_f) { // jemalloc is loaded
101 size_t version_length = sizeof status.mallocator_version;
102 result = mallctl_f("version", &status.mallocator_version, &version_length, NULL, 0);
103 if (result == 0) {
104 size_t lg_chunk; // log2 of the mmap threshold
105 size_t lg_chunk_length = sizeof lg_chunk;
106 result = mallctl_f("opt.lg_chunk", &lg_chunk, &lg_chunk_length, NULL, 0);
107 if (result)
108 {
109 status.mmap_threshold = 1 << 21; // Default value.
110 // Incompatible jemalloc change.
111 result = 0;
112 }
113 else
114 status.mmap_threshold = 1 << lg_chunk;
115 }
116 }
117
118 return result;
119}
120
121static bool memory_shutdown_complete;
122
123void
124toku_memory_shutdown(void) {
125 if (memory_shutdown_complete) {
126 return;
127 }
128 memory_shutdown_complete = true;
129}
130
131void
132toku_memory_get_status(LOCAL_MEMORY_STATUS s) {
133 *s = status;
134}
135
136// jemalloc's malloc_usable_size does not work with a NULL pointer, so we implement a version that works
137static size_t
138my_malloc_usable_size(void *p) {
139 return p == NULL ? 0 : os_malloc_usable_size(p);
140}
141
142// Note that max_in_use may be slightly off because use of max_in_use is not thread-safe.
143// It is not worth the overhead to make it completely accurate, but
144// this logic is intended to guarantee that it increases monotonically.
145// Note that status.sum_used and status.sum_freed increase monotonically
146// and that status.max_in_use is declared volatile.
147static inline void
148set_max(uint64_t sum_used, uint64_t sum_freed) {
149 if (sum_used >= sum_freed) {
150 uint64_t in_use = sum_used - sum_freed;
151 uint64_t old_max;
152 do {
153 old_max = status.max_in_use;
154 } while (old_max < in_use &&
155 !toku_sync_bool_compare_and_swap(&status.max_in_use, old_max, in_use));
156 }
157}
158
159// Effect: Like toku_memory_footprint, except instead of passing p,
160// we pass toku_malloc_usable_size(p).
161size_t
162toku_memory_footprint_given_usable_size(size_t touched, size_t usable)
163{
164 size_t pagesize = toku_os_get_pagesize();
165 if (usable >= status.mmap_threshold) {
166 int num_pages = (touched + pagesize) / pagesize;
167 return num_pages * pagesize;
168 }
169 return usable;
170}
171
172// Effect: Return an estimate how how much space an object is using, possibly by
173// using toku_malloc_usable_size(p).
174// If p is NULL then returns 0.
175size_t
176toku_memory_footprint(void * p, size_t touched)
177{
178 if (!p) return 0;
179 return toku_memory_footprint_given_usable_size(touched,
180 my_malloc_usable_size(p));
181}
182
183void *
184toku_malloc(size_t size) {
185#if defined(__APPLE__)
186 if (size == 0) {
187 return nullptr;
188 }
189#endif
190
191 if (size > status.max_requested_size) {
192 status.max_requested_size = size;
193 }
194 void *p = t_malloc ? t_malloc(size) : os_malloc(size);
195 if (p) {
196 TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
197 if (toku_memory_do_stats) {
198 size_t used = my_malloc_usable_size(p);
199 toku_sync_add_and_fetch(&status.malloc_count, 1);
200 toku_sync_add_and_fetch(&status.requested,size);
201 toku_sync_add_and_fetch(&status.used, used);
202 set_max(status.used, status.freed);
203 }
204 } else {
205 toku_sync_add_and_fetch(&status.malloc_fail, 1);
206 status.last_failed_size = size;
207 }
208 return p;
209}
210
211void *toku_malloc_aligned(size_t alignment, size_t size) {
212#if defined(__APPLE__)
213 if (size == 0) {
214 return nullptr;
215 }
216#endif
217
218 if (size > status.max_requested_size) {
219 status.max_requested_size = size;
220 }
221 void *p = t_malloc_aligned ? t_malloc_aligned(alignment, size) : os_malloc_aligned(alignment, size);
222 if (p) {
223 TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
224 if (toku_memory_do_stats) {
225 size_t used = my_malloc_usable_size(p);
226 toku_sync_add_and_fetch(&status.malloc_count, 1);
227 toku_sync_add_and_fetch(&status.requested,size);
228 toku_sync_add_and_fetch(&status.used, used);
229 set_max(status.used, status.freed);
230 }
231 } else {
232 toku_sync_add_and_fetch(&status.malloc_fail, 1);
233 status.last_failed_size = size;
234 }
235 return p;
236}
237
238void *
239toku_calloc(size_t nmemb, size_t size) {
240 size_t newsize = nmemb * size;
241 void *p = toku_malloc(newsize);
242 if (p) memset(p, 0, newsize);
243 return p;
244}
245
246void *
247toku_realloc(void *p, size_t size) {
248#if defined(__APPLE__)
249 if (size == 0) {
250 if (p != nullptr) {
251 toku_free(p);
252 }
253 return nullptr;
254 }
255#endif
256
257 if (size > status.max_requested_size) {
258 status.max_requested_size = size;
259 }
260 size_t used_orig = p ? my_malloc_usable_size(p) : 0;
261 void *q = t_realloc ? t_realloc(p, size) : os_realloc(p, size);
262 if (q) {
263 if (toku_memory_do_stats) {
264 size_t used = my_malloc_usable_size(q);
265 toku_sync_add_and_fetch(&status.realloc_count, 1);
266 toku_sync_add_and_fetch(&status.requested, size);
267 toku_sync_add_and_fetch(&status.used, used);
268 toku_sync_add_and_fetch(&status.freed, used_orig);
269 set_max(status.used, status.freed);
270 }
271 } else {
272 toku_sync_add_and_fetch(&status.realloc_fail, 1);
273 status.last_failed_size = size;
274 }
275 return q;
276}
277
278void *toku_realloc_aligned(size_t alignment, void *p, size_t size) {
279#if defined(__APPLE__)
280 if (size == 0) {
281 if (p != nullptr) {
282 toku_free(p);
283 }
284 return nullptr;
285 }
286#endif
287
288 if (size > status.max_requested_size) {
289 status.max_requested_size = size;
290 }
291 size_t used_orig = p ? my_malloc_usable_size(p) : 0;
292 void *q = t_realloc_aligned ? t_realloc_aligned(alignment, p, size) : os_realloc_aligned(alignment, p, size);
293 if (q) {
294 if (toku_memory_do_stats) {
295 size_t used = my_malloc_usable_size(q);
296 toku_sync_add_and_fetch(&status.realloc_count, 1);
297 toku_sync_add_and_fetch(&status.requested, size);
298 toku_sync_add_and_fetch(&status.used, used);
299 toku_sync_add_and_fetch(&status.freed, used_orig);
300 set_max(status.used, status.freed);
301 }
302 } else {
303 toku_sync_add_and_fetch(&status.realloc_fail, 1);
304 status.last_failed_size = size;
305 }
306 return q;
307}
308
309
310void *
311toku_memdup(const void *v, size_t len) {
312 void *p = toku_malloc(len);
313 if (p) memcpy(p, v,len);
314 return p;
315}
316
317char *
318toku_strdup(const char *s) {
319 return (char *) toku_memdup(s, strlen(s)+1);
320}
321
322char *toku_strndup(const char *s, size_t n) {
323 size_t s_size = strlen(s);
324 size_t bytes_to_copy = n > s_size ? s_size : n;
325 ++bytes_to_copy;
326 char *result = (char *)toku_memdup(s, bytes_to_copy);
327 result[bytes_to_copy - 1] = 0;
328 return result;
329}
330
331void
332toku_free(void *p) {
333 if (p) {
334 if (toku_memory_do_stats) {
335 size_t used = my_malloc_usable_size(p);
336 toku_sync_add_and_fetch(&status.free_count, 1);
337 toku_sync_add_and_fetch(&status.freed, used);
338 }
339 if (t_free)
340 t_free(p);
341 else
342 os_free(p);
343 }
344}
345
346void *
347toku_xmalloc(size_t size) {
348#if defined(__APPLE__)
349 if (size == 0) {
350 return nullptr;
351 }
352#endif
353
354 if (size > status.max_requested_size) {
355 status.max_requested_size = size;
356 }
357 void *p = t_xmalloc ? t_xmalloc(size) : os_malloc(size);
358 if (p == NULL) { // avoid function call in common case
359 status.last_failed_size = size;
360 resource_assert(p);
361 }
362 TOKU_ANNOTATE_NEW_MEMORY(p, size); // see #4671 and https://bugs.kde.org/show_bug.cgi?id=297147
363 if (toku_memory_do_stats) {
364 size_t used = my_malloc_usable_size(p);
365 toku_sync_add_and_fetch(&status.malloc_count, 1);
366 toku_sync_add_and_fetch(&status.requested, size);
367 toku_sync_add_and_fetch(&status.used, used);
368 set_max(status.used, status.freed);
369 }
370 return p;
371}
372
373void* toku_xmalloc_aligned(size_t alignment, size_t size)
374// Effect: Perform a malloc(size) with the additional property that the returned pointer is a multiple of ALIGNMENT.
375// Fail with a resource_assert if the allocation fails (don't return an error code).
376// Requires: alignment is a power of two.
377{
378#if defined(__APPLE__)
379 if (size == 0) {
380 return nullptr;
381 }
382#endif
383
384 if (size > status.max_requested_size) {
385 status.max_requested_size = size;
386 }
387 void *p = t_xmalloc_aligned ? t_xmalloc_aligned(alignment, size) : os_malloc_aligned(alignment,size);
388 if (p == NULL && size != 0) {
389 status.last_failed_size = size;
390 resource_assert(p);
391 }
392 if (toku_memory_do_stats) {
393 size_t used = my_malloc_usable_size(p);
394 toku_sync_add_and_fetch(&status.malloc_count, 1);
395 toku_sync_add_and_fetch(&status.requested, size);
396 toku_sync_add_and_fetch(&status.used, used);
397 set_max(status.used, status.freed);
398 }
399 return p;
400}
401
402void *
403toku_xcalloc(size_t nmemb, size_t size) {
404 size_t newsize = nmemb * size;
405 void *vp = toku_xmalloc(newsize);
406 if (vp) memset(vp, 0, newsize);
407 return vp;
408}
409
410void *
411toku_xrealloc(void *v, size_t size) {
412#if defined(__APPLE__)
413 if (size == 0) {
414 if (v != nullptr) {
415 toku_free(v);
416 }
417 return nullptr;
418 }
419#endif
420
421 if (size > status.max_requested_size) {
422 status.max_requested_size = size;
423 }
424 size_t used_orig = v ? my_malloc_usable_size(v) : 0;
425 void *p = t_xrealloc ? t_xrealloc(v, size) : os_realloc(v, size);
426 if (p == 0) { // avoid function call in common case
427 status.last_failed_size = size;
428 resource_assert(p);
429 }
430 if (toku_memory_do_stats) {
431 size_t used = my_malloc_usable_size(p);
432 toku_sync_add_and_fetch(&status.realloc_count, 1);
433 toku_sync_add_and_fetch(&status.requested, size);
434 toku_sync_add_and_fetch(&status.used, used);
435 toku_sync_add_and_fetch(&status.freed, used_orig);
436 set_max(status.used, status.freed);
437 }
438 return p;
439}
440
441size_t
442toku_malloc_usable_size(void *p) {
443 return my_malloc_usable_size(p);
444}
445
446void *
447toku_xmemdup (const void *v, size_t len) {
448 void *p = toku_xmalloc(len);
449 memcpy(p, v, len);
450 return p;
451}
452
453char *
454toku_xstrdup (const char *s) {
455 return (char *) toku_xmemdup(s, strlen(s)+1);
456}
457
458void
459toku_set_func_malloc(malloc_fun_t f) {
460 t_malloc = f;
461 t_xmalloc = f;
462}
463
464void
465toku_set_func_xmalloc_only(malloc_fun_t f) {
466 t_xmalloc = f;
467}
468
469void
470toku_set_func_malloc_only(malloc_fun_t f) {
471 t_malloc = f;
472}
473
474void
475toku_set_func_realloc(realloc_fun_t f) {
476 t_realloc = f;
477 t_xrealloc = f;
478}
479
480void
481toku_set_func_xrealloc_only(realloc_fun_t f) {
482 t_xrealloc = f;
483}
484
485void
486toku_set_func_realloc_only(realloc_fun_t f) {
487 t_realloc = f;
488
489}
490
491void
492toku_set_func_free(free_fun_t f) {
493 t_free = f;
494}
495
496#include <toku_race_tools.h>
497void __attribute__((constructor)) toku_memory_helgrind_ignore(void);
498void
499toku_memory_helgrind_ignore(void) {
500 TOKU_VALGRIND_HG_DISABLE_CHECKING(&status, sizeof status);
501}
502