1 | //===------------------------ fallback_malloc.cpp -------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | // Define _LIBCPP_BUILDING_LIBRARY to ensure _LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION |
10 | // is only defined when libc aligned allocation is not available. |
11 | #define _LIBCPP_BUILDING_LIBRARY |
12 | #include "fallback_malloc.h" |
13 | |
14 | #include <__threading_support> |
15 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
16 | #if defined(__ELF__) && defined(_LIBCXXABI_LINK_PTHREAD_LIB) |
17 | #pragma comment(lib, "pthread") |
18 | #endif |
19 | #endif |
20 | |
21 | #include <stdlib.h> // for malloc, calloc, free |
22 | #include <string.h> // for memset |
23 | |
24 | // A small, simple heap manager based (loosely) on |
25 | // the startup heap manager from FreeBSD, optimized for space. |
26 | // |
27 | // Manages a fixed-size memory pool, supports malloc and free only. |
28 | // No support for realloc. |
29 | // |
30 | // Allocates chunks in multiples of four bytes, with a four byte header |
31 | // for each chunk. The overhead of each chunk is kept low by keeping pointers |
32 | // as two byte offsets within the heap, rather than (4 or 8 byte) pointers. |
33 | |
34 | namespace { |
35 | |
36 | // When POSIX threads are not available, make the mutex operations a nop |
37 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
38 | _LIBCPP_SAFE_STATIC |
39 | static std::__libcpp_mutex_t heap_mutex = _LIBCPP_MUTEX_INITIALIZER; |
40 | #else |
41 | static void* heap_mutex = 0; |
42 | #endif |
43 | |
44 | class mutexor { |
45 | public: |
46 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
47 | mutexor(std::__libcpp_mutex_t* m) : mtx_(m) { |
48 | std::__libcpp_mutex_lock(mtx_); |
49 | } |
50 | ~mutexor() { std::__libcpp_mutex_unlock(mtx_); } |
51 | #else |
52 | mutexor(void*) {} |
53 | ~mutexor() {} |
54 | #endif |
55 | private: |
56 | mutexor(const mutexor& rhs); |
57 | mutexor& operator=(const mutexor& rhs); |
58 | #ifndef _LIBCXXABI_HAS_NO_THREADS |
59 | std::__libcpp_mutex_t* mtx_; |
60 | #endif |
61 | }; |
62 | |
63 | static const size_t HEAP_SIZE = 512; |
64 | char heap[HEAP_SIZE] __attribute__((aligned)); |
65 | |
66 | typedef unsigned short heap_offset; |
67 | typedef unsigned short heap_size; |
68 | |
69 | struct heap_node { |
70 | heap_offset next_node; // offset into heap |
71 | heap_size len; // size in units of "sizeof(heap_node)" |
72 | }; |
73 | |
74 | static const heap_node* list_end = |
75 | (heap_node*)(&heap[HEAP_SIZE]); // one past the end of the heap |
76 | static heap_node* freelist = NULL; |
77 | |
78 | heap_node* node_from_offset(const heap_offset offset) { |
79 | return (heap_node*)(heap + (offset * sizeof(heap_node))); |
80 | } |
81 | |
82 | heap_offset offset_from_node(const heap_node* ptr) { |
83 | return static_cast<heap_offset>( |
84 | static_cast<size_t>(reinterpret_cast<const char*>(ptr) - heap) / |
85 | sizeof(heap_node)); |
86 | } |
87 | |
88 | void init_heap() { |
89 | freelist = (heap_node*)heap; |
90 | freelist->next_node = offset_from_node(list_end); |
91 | freelist->len = HEAP_SIZE / sizeof(heap_node); |
92 | } |
93 | |
94 | // How big a chunk we allocate |
95 | size_t alloc_size(size_t len) { |
96 | return (len + sizeof(heap_node) - 1) / sizeof(heap_node) + 1; |
97 | } |
98 | |
99 | bool is_fallback_ptr(void* ptr) { |
100 | return ptr >= heap && ptr < (heap + HEAP_SIZE); |
101 | } |
102 | |
103 | void* fallback_malloc(size_t len) { |
104 | heap_node *p, *prev; |
105 | const size_t nelems = alloc_size(len); |
106 | mutexor mtx(&heap_mutex); |
107 | |
108 | if (NULL == freelist) |
109 | init_heap(); |
110 | |
111 | // Walk the free list, looking for a "big enough" chunk |
112 | for (p = freelist, prev = 0; p && p != list_end; |
113 | prev = p, p = node_from_offset(p->next_node)) { |
114 | |
115 | if (p->len > nelems) { // chunk is larger, shorten, and return the tail |
116 | heap_node* q; |
117 | |
118 | p->len = static_cast<heap_size>(p->len - nelems); |
119 | q = p + p->len; |
120 | q->next_node = 0; |
121 | q->len = static_cast<heap_size>(nelems); |
122 | return (void*)(q + 1); |
123 | } |
124 | |
125 | if (p->len == nelems) { // exact size match |
126 | if (prev == 0) |
127 | freelist = node_from_offset(p->next_node); |
128 | else |
129 | prev->next_node = p->next_node; |
130 | p->next_node = 0; |
131 | return (void*)(p + 1); |
132 | } |
133 | } |
134 | return NULL; // couldn't find a spot big enough |
135 | } |
136 | |
137 | // Return the start of the next block |
138 | heap_node* after(struct heap_node* p) { return p + p->len; } |
139 | |
140 | void fallback_free(void* ptr) { |
141 | struct heap_node* cp = ((struct heap_node*)ptr) - 1; // retrieve the chunk |
142 | struct heap_node *p, *prev; |
143 | |
144 | mutexor mtx(&heap_mutex); |
145 | |
146 | #ifdef DEBUG_FALLBACK_MALLOC |
147 | std::cout << "Freeing item at " << offset_from_node(cp) << " of size " |
148 | << cp->len << std::endl; |
149 | #endif |
150 | |
151 | for (p = freelist, prev = 0; p && p != list_end; |
152 | prev = p, p = node_from_offset(p->next_node)) { |
153 | #ifdef DEBUG_FALLBACK_MALLOC |
154 | std::cout << " p, cp, after (p), after(cp) " << offset_from_node(p) << ' ' |
155 | << offset_from_node(cp) << ' ' << offset_from_node(after(p)) |
156 | << ' ' << offset_from_node(after(cp)) << std::endl; |
157 | #endif |
158 | if (after(p) == cp) { |
159 | #ifdef DEBUG_FALLBACK_MALLOC |
160 | std::cout << " Appending onto chunk at " << offset_from_node(p) |
161 | << std::endl; |
162 | #endif |
163 | p->len = static_cast<heap_size>( |
164 | p->len + cp->len); // make the free heap_node larger |
165 | return; |
166 | } else if (after(cp) == p) { // there's a free heap_node right after |
167 | #ifdef DEBUG_FALLBACK_MALLOC |
168 | std::cout << " Appending free chunk at " << offset_from_node(p) |
169 | << std::endl; |
170 | #endif |
171 | cp->len = static_cast<heap_size>(cp->len + p->len); |
172 | if (prev == 0) { |
173 | freelist = cp; |
174 | cp->next_node = p->next_node; |
175 | } else |
176 | prev->next_node = offset_from_node(cp); |
177 | return; |
178 | } |
179 | } |
180 | // Nothing to merge with, add it to the start of the free list |
181 | #ifdef DEBUG_FALLBACK_MALLOC |
182 | std::cout << " Making new free list entry " << offset_from_node(cp) |
183 | << std::endl; |
184 | #endif |
185 | cp->next_node = offset_from_node(freelist); |
186 | freelist = cp; |
187 | } |
188 | |
189 | #ifdef INSTRUMENT_FALLBACK_MALLOC |
190 | size_t print_free_list() { |
191 | struct heap_node *p, *prev; |
192 | heap_size total_free = 0; |
193 | if (NULL == freelist) |
194 | init_heap(); |
195 | |
196 | for (p = freelist, prev = 0; p && p != list_end; |
197 | prev = p, p = node_from_offset(p->next_node)) { |
198 | std::cout << (prev == 0 ? "" : " " ) << "Offset: " << offset_from_node(p) |
199 | << "\tsize: " << p->len << " Next: " << p->next_node << std::endl; |
200 | total_free += p->len; |
201 | } |
202 | std::cout << "Total Free space: " << total_free << std::endl; |
203 | return total_free; |
204 | } |
205 | #endif |
206 | } // end unnamed namespace |
207 | |
208 | namespace __cxxabiv1 { |
209 | |
210 | struct __attribute__((aligned)) __aligned_type {}; |
211 | |
212 | void* __aligned_malloc_with_fallback(size_t size) { |
213 | #if defined(_WIN32) |
214 | if (void* dest = _aligned_malloc(size, alignof(__aligned_type))) |
215 | return dest; |
216 | #elif defined(_LIBCPP_HAS_NO_LIBRARY_ALIGNED_ALLOCATION) |
217 | if (void* dest = ::malloc(size)) |
218 | return dest; |
219 | #else |
220 | if (size == 0) |
221 | size = 1; |
222 | void* dest; |
223 | if (::posix_memalign(&dest, __alignof(__aligned_type), size) == 0) |
224 | return dest; |
225 | #endif |
226 | return fallback_malloc(size); |
227 | } |
228 | |
229 | void* __calloc_with_fallback(size_t count, size_t size) { |
230 | void* ptr = ::calloc(count, size); |
231 | if (NULL != ptr) |
232 | return ptr; |
233 | // if calloc fails, fall back to emergency stash |
234 | ptr = fallback_malloc(size * count); |
235 | if (NULL != ptr) |
236 | ::memset(ptr, 0, size * count); |
237 | return ptr; |
238 | } |
239 | |
240 | void __aligned_free_with_fallback(void* ptr) { |
241 | if (is_fallback_ptr(ptr)) |
242 | fallback_free(ptr); |
243 | else { |
244 | #if defined(_WIN32) |
245 | ::_aligned_free(ptr); |
246 | #else |
247 | ::free(ptr); |
248 | #endif |
249 | } |
250 | } |
251 | |
252 | void __free_with_fallback(void* ptr) { |
253 | if (is_fallback_ptr(ptr)) |
254 | fallback_free(ptr); |
255 | else |
256 | ::free(ptr); |
257 | } |
258 | |
259 | } // namespace __cxxabiv1 |
260 | |