1/*
2 * Copyright 2018-present Facebook, Inc.
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17// http://www.canonware.com/download/jemalloc/jemalloc-latest/doc/jemalloc.html
18
19#pragma once
20
21#include <folly/CPortability.h>
22#include <folly/memory/Malloc.h>
23#include <folly/portability/Config.h>
24#include <folly/portability/Memory.h>
25#include <folly/portability/SysMman.h>
26
27#include <cstddef>
28#include <cstdint>
29
30namespace folly {
31
32/**
33 * An allocator which uses Jemalloc to create a dedicated huge page arena,
34 * backed by 2MB huge pages (on linux x86-64).
35 *
36 * This allocator is specifically intended for linux with the transparent
37 * huge page support set to 'madvise' and defrag policy set to 'madvise'
38 * or 'defer+madvise'.
39 * These can be controller via /sys/kernel/mm/transparent_hugepage/enabled
40 * and /sys/kernel/mm/transparent_hugepage/defrag.
41 *
42 * The allocator reserves a fixed-size area using mmap, and sets the
43 * MADV_HUGEPAGE page attribute using the madvise system call.
44 * A custom jemalloc hook is installed which is called when creating a new
45 * extent of memory. This will allocate from the reserved area if possible,
46 * and otherwise fall back to the default method.
47 * Jemalloc does not use allocated extents across different arenas without
48 * first unmapping them, and the advice flags are cleared on munmap.
49 * A regular malloc will never end up allocating memory from this arena.
50 *
51 * If binary isn't linked with jemalloc, the logic falls back to malloc / free.
52 *
53 * Note that the madvise call does not guarantee huge pages, it is best effort.
54 *
55 * 1GB Huge Pages are not supported at this point.
56 */
57class JemallocHugePageAllocator {
58 public:
59 static bool init(int nr_pages);
60
61 static void* allocate(size_t size) {
62 // If uninitialized, flags_ will be 0 and the mallocx behavior
63 // will match that of a regular malloc
64 return hugePagesSupported ? mallocx(size, flags_) : malloc(size);
65 }
66
67 static void* reallocate(void* p, size_t size) {
68 return hugePagesSupported ? rallocx(p, size, flags_) : realloc(p, size);
69 }
70
71 static void deallocate(void* p, size_t = 0) {
72 hugePagesSupported ? dallocx(p, flags_) : free(p);
73 }
74
75 static bool initialized() {
76 return flags_ != 0;
77 }
78
79 static size_t freeSpace();
80 static bool addressInArena(void* address);
81
82 private:
83 static int flags_;
84 static bool hugePagesSupported;
85};
86
87// STL compatible huge page allocator, for use with STL-style containers
88template <typename T>
89class CxxHugePageAllocator {
90 private:
91 using Self = CxxHugePageAllocator<T>;
92
93 public:
94 using value_type = T;
95
96 CxxHugePageAllocator() {}
97
98 template <typename U>
99 explicit CxxHugePageAllocator(CxxHugePageAllocator<U> const&) {}
100
101 T* allocate(std::size_t n) {
102 return static_cast<T*>(JemallocHugePageAllocator::allocate(sizeof(T) * n));
103 }
104 void deallocate(T* p, std::size_t n) {
105 JemallocHugePageAllocator::deallocate(p, sizeof(T) * n);
106 }
107
108 friend bool operator==(Self const&, Self const&) noexcept {
109 return true;
110 }
111 friend bool operator!=(Self const&, Self const&) noexcept {
112 return false;
113 }
114};
115
116} // namespace folly
117