1 | /* ---------------------------------------------------------------------------- |
2 | Copyright (c) 2018, Microsoft Research, Daan Leijen |
3 | This is free software; you can redistribute it and/or modify it under the |
4 | terms of the MIT license. A copy of the license can be found in the file |
5 | "LICENSE" at the root of this distribution. |
6 | -----------------------------------------------------------------------------*/ |
7 | #ifndef _DEFAULT_SOURCE |
8 | #define _DEFAULT_SOURCE // ensure mmap flags are defined |
9 | #endif |
10 | |
11 | #include "mimalloc.h" |
12 | #include "mimalloc-internal.h" |
13 | #include "mimalloc-atomic.h" |
14 | |
15 | #include <string.h> // strerror |
16 | #include <errno.h> |
17 | |
18 | #if defined(_WIN32) |
19 | #include <windows.h> |
20 | #elif defined(__wasi__) |
21 | // stdlib.h is all we need, and has already been included in mimalloc.h |
22 | #else |
23 | #include <sys/mman.h> // mmap |
24 | #include <unistd.h> // sysconf |
25 | #if defined(__linux__) |
26 | #include <linux/mman.h> // linux mmap flags |
27 | #endif |
28 | #if defined(__APPLE__) |
29 | #include <mach/vm_statistics.h> |
30 | #endif |
31 | #endif |
32 | |
33 | /* ----------------------------------------------------------- |
34 | Initialization. |
35 | On windows initializes support for aligned allocation and |
36 | large OS pages (if MIMALLOC_LARGE_OS_PAGES is true). |
37 | ----------------------------------------------------------- */ |
38 | bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats); |
39 | bool _mi_os_is_huge_reserved(void* p); |
40 | void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment); |
41 | |
42 | static void* mi_align_up_ptr(void* p, size_t alignment) { |
43 | return (void*)_mi_align_up((uintptr_t)p, alignment); |
44 | } |
45 | |
46 | static uintptr_t _mi_align_down(uintptr_t sz, size_t alignment) { |
47 | return (sz / alignment) * alignment; |
48 | } |
49 | |
50 | static void* mi_align_down_ptr(void* p, size_t alignment) { |
51 | return (void*)_mi_align_down((uintptr_t)p, alignment); |
52 | } |
53 | |
54 | // page size (initialized properly in `os_init`) |
55 | static size_t os_page_size = 4096; |
56 | |
57 | // minimal allocation granularity |
58 | static size_t os_alloc_granularity = 4096; |
59 | |
60 | // if non-zero, use large page allocation |
61 | static size_t large_os_page_size = 0; |
62 | |
63 | // OS (small) page size |
64 | size_t _mi_os_page_size() { |
65 | return os_page_size; |
66 | } |
67 | |
68 | // if large OS pages are supported (2 or 4MiB), then return the size, otherwise return the small page size (4KiB) |
69 | size_t _mi_os_large_page_size() { |
70 | return (large_os_page_size != 0 ? large_os_page_size : _mi_os_page_size()); |
71 | } |
72 | |
73 | static bool use_large_os_page(size_t size, size_t alignment) { |
74 | // if we have access, check the size and alignment requirements |
75 | if (large_os_page_size == 0 || !mi_option_is_enabled(mi_option_large_os_pages)) return false; |
76 | return ((size % large_os_page_size) == 0 && (alignment % large_os_page_size) == 0); |
77 | } |
78 | |
79 | // round to a good OS allocation size (bounded by max 12.5% waste) |
80 | size_t _mi_os_good_alloc_size(size_t size) { |
81 | size_t align_size; |
82 | if (size < 512*KiB) align_size = _mi_os_page_size(); |
83 | else if (size < 2*MiB) align_size = 64*KiB; |
84 | else if (size < 8*MiB) align_size = 256*KiB; |
85 | else if (size < 32*MiB) align_size = 1*MiB; |
86 | else align_size = 4*MiB; |
87 | if (size >= (SIZE_MAX - align_size)) return size; // possible overflow? |
88 | return _mi_align_up(size, align_size); |
89 | } |
90 | |
91 | #if defined(_WIN32) |
92 | // We use VirtualAlloc2 for aligned allocation, but it is only supported on Windows 10 and Windows Server 2016. |
93 | // So, we need to look it up dynamically to run on older systems. (use __stdcall for 32-bit compatibility) |
94 | // NtAllocateVirtualAllocEx is used for huge OS page allocation (1GiB) |
95 | // We hide MEM_EXTENDED_PARAMETER to compile with older SDK's. |
96 | #include <winternl.h> |
97 | typedef PVOID (__stdcall *PVirtualAlloc2)(HANDLE, PVOID, SIZE_T, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ void*, ULONG); |
98 | typedef NTSTATUS (__stdcall *PNtAllocateVirtualMemoryEx)(HANDLE, PVOID*, SIZE_T*, ULONG, ULONG, /* MEM_EXTENDED_PARAMETER* */ PVOID, ULONG); |
99 | static PVirtualAlloc2 pVirtualAlloc2 = NULL; |
100 | static PNtAllocateVirtualMemoryEx pNtAllocateVirtualMemoryEx = NULL; |
101 | |
102 | static bool mi_win_enable_large_os_pages() |
103 | { |
104 | if (large_os_page_size > 0) return true; |
105 | |
106 | // Try to see if large OS pages are supported |
107 | // To use large pages on Windows, we first need access permission |
108 | // Set "Lock pages in memory" permission in the group policy editor |
109 | // <https://devblogs.microsoft.com/oldnewthing/20110128-00/?p=11643> |
110 | unsigned long err = 0; |
111 | HANDLE token = NULL; |
112 | BOOL ok = OpenProcessToken(GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &token); |
113 | if (ok) { |
114 | TOKEN_PRIVILEGES tp; |
115 | ok = LookupPrivilegeValue(NULL, TEXT("SeLockMemoryPrivilege" ), &tp.Privileges[0].Luid); |
116 | if (ok) { |
117 | tp.PrivilegeCount = 1; |
118 | tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; |
119 | ok = AdjustTokenPrivileges(token, FALSE, &tp, 0, (PTOKEN_PRIVILEGES)NULL, 0); |
120 | if (ok) { |
121 | err = GetLastError(); |
122 | ok = (err == ERROR_SUCCESS); |
123 | if (ok) { |
124 | large_os_page_size = GetLargePageMinimum(); |
125 | } |
126 | } |
127 | } |
128 | CloseHandle(token); |
129 | } |
130 | if (!ok) { |
131 | if (err == 0) err = GetLastError(); |
132 | _mi_warning_message("cannot enable large OS page support, error %lu\n" , err); |
133 | } |
134 | return (ok!=0); |
135 | } |
136 | |
137 | void _mi_os_init(void) { |
138 | // get the page size |
139 | SYSTEM_INFO si; |
140 | GetSystemInfo(&si); |
141 | if (si.dwPageSize > 0) os_page_size = si.dwPageSize; |
142 | if (si.dwAllocationGranularity > 0) os_alloc_granularity = si.dwAllocationGranularity; |
143 | // get the VirtualAlloc2 function |
144 | HINSTANCE hDll; |
145 | hDll = LoadLibrary(TEXT("kernelbase.dll" )); |
146 | if (hDll != NULL) { |
147 | // use VirtualAlloc2FromApp if possible as it is available to Windows store apps |
148 | pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2FromApp" ); |
149 | if (pVirtualAlloc2==NULL) pVirtualAlloc2 = (PVirtualAlloc2)(void (*)(void))GetProcAddress(hDll, "VirtualAlloc2" ); |
150 | FreeLibrary(hDll); |
151 | } |
152 | hDll = LoadLibrary(TEXT("ntdll.dll" )); |
153 | if (hDll != NULL) { |
154 | pNtAllocateVirtualMemoryEx = (PNtAllocateVirtualMemoryEx)(void (*)(void))GetProcAddress(hDll, "NtAllocateVirtualMemoryEx" ); |
155 | FreeLibrary(hDll); |
156 | } |
157 | if (mi_option_is_enabled(mi_option_large_os_pages) || mi_option_is_enabled(mi_option_reserve_huge_os_pages)) { |
158 | mi_win_enable_large_os_pages(); |
159 | } |
160 | } |
161 | #elif defined(__wasi__) |
162 | void _mi_os_init() { |
163 | os_page_size = 0x10000; // WebAssembly has a fixed page size: 64KB |
164 | os_alloc_granularity = 16; |
165 | } |
166 | #else |
167 | void _mi_os_init() { |
168 | // get the page size |
169 | long result = sysconf(_SC_PAGESIZE); |
170 | if (result > 0) { |
171 | os_page_size = (size_t)result; |
172 | os_alloc_granularity = os_page_size; |
173 | } |
174 | if (mi_option_is_enabled(mi_option_large_os_pages)) { |
175 | large_os_page_size = (1UL << 21); // 2MiB |
176 | } |
177 | } |
178 | #endif |
179 | |
180 | |
181 | /* ----------------------------------------------------------- |
182 | Raw allocation on Windows (VirtualAlloc) and Unix's (mmap). |
183 | ----------------------------------------------------------- */ |
184 | |
185 | static bool mi_os_mem_free(void* addr, size_t size, bool was_committed, mi_stats_t* stats) |
186 | { |
187 | if (addr == NULL || size == 0 || _mi_os_is_huge_reserved(addr)) return true; |
188 | bool err = false; |
189 | #if defined(_WIN32) |
190 | err = (VirtualFree(addr, 0, MEM_RELEASE) == 0); |
191 | #elif defined(__wasi__) |
192 | err = 0; // WebAssembly's heap cannot be shrunk |
193 | #else |
194 | err = (munmap(addr, size) == -1); |
195 | #endif |
196 | if (was_committed) _mi_stat_decrease(&stats->committed, size); |
197 | _mi_stat_decrease(&stats->reserved, size); |
198 | if (err) { |
199 | #pragma warning(suppress:4996) |
200 | _mi_warning_message("munmap failed: %s, addr 0x%8li, size %lu\n" , strerror(errno), (size_t)addr, size); |
201 | return false; |
202 | } |
203 | else { |
204 | return true; |
205 | } |
206 | } |
207 | |
208 | static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size); |
209 | |
210 | #ifdef _WIN32 |
211 | static void* mi_win_virtual_allocx(void* addr, size_t size, size_t try_alignment, DWORD flags) { |
212 | #if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) |
213 | // on modern Windows try use NtAllocateVirtualMemoryEx for 1GiB huge pages |
214 | if ((size % ((uintptr_t)1 << 30)) == 0 /* 1GiB multiple */ |
215 | && (flags & MEM_LARGE_PAGES) != 0 && (flags & MEM_COMMIT) != 0 && (flags & MEM_RESERVE) != 0 |
216 | && (addr != NULL || try_alignment == 0 || try_alignment % _mi_os_page_size() == 0) |
217 | && pNtAllocateVirtualMemoryEx != NULL) |
218 | { |
219 | #ifndef MEM_EXTENDED_PARAMETER_NONPAGED_HUGE |
220 | #define MEM_EXTENDED_PARAMETER_NONPAGED_HUGE (0x10) |
221 | #endif |
222 | MEM_EXTENDED_PARAMETER param = { 0, 0 }; |
223 | param.Type = 5; // == MemExtendedParameterAttributeFlags; |
224 | param.ULong64 = MEM_EXTENDED_PARAMETER_NONPAGED_HUGE; |
225 | SIZE_T psize = size; |
226 | void* base = addr; |
227 | NTSTATUS err = (*pNtAllocateVirtualMemoryEx)(GetCurrentProcess(), &base, &psize, flags, PAGE_READWRITE, ¶m, 1); |
228 | if (err == 0) { |
229 | return base; |
230 | } |
231 | else { |
232 | // else fall back to regular large OS pages |
233 | _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (error 0x%lx)\n" , err); |
234 | } |
235 | } |
236 | #endif |
237 | #if (MI_INTPTR_SIZE >= 8) |
238 | // on 64-bit systems, try to use the virtual address area after 4TiB for 4MiB aligned allocations |
239 | void* hint; |
240 | if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment,size)) != NULL) { |
241 | return VirtualAlloc(hint, size, flags, PAGE_READWRITE); |
242 | } |
243 | #endif |
244 | #if defined(MEM_EXTENDED_PARAMETER_TYPE_BITS) |
245 | // on modern Windows try use VirtualAlloc2 for aligned allocation |
246 | if (try_alignment > 0 && (try_alignment % _mi_os_page_size()) == 0 && pVirtualAlloc2 != NULL) { |
247 | MEM_ADDRESS_REQUIREMENTS reqs = { 0 }; |
248 | reqs.Alignment = try_alignment; |
249 | MEM_EXTENDED_PARAMETER param = { 0 }; |
250 | param.Type = MemExtendedParameterAddressRequirements; |
251 | param.Pointer = &reqs; |
252 | return (*pVirtualAlloc2)(GetCurrentProcess(), addr, size, flags, PAGE_READWRITE, ¶m, 1); |
253 | } |
254 | #endif |
255 | return VirtualAlloc(addr, size, flags, PAGE_READWRITE); |
256 | } |
257 | |
258 | static void* mi_win_virtual_alloc(void* addr, size_t size, size_t try_alignment, DWORD flags, bool large_only, bool allow_large, bool* is_large) { |
259 | mi_assert_internal(!(large_only && !allow_large)); |
260 | static volatile _Atomic(uintptr_t) large_page_try_ok; // = 0; |
261 | void* p = NULL; |
262 | if ((large_only || use_large_os_page(size, try_alignment)) |
263 | && allow_large && (flags&MEM_COMMIT)!=0 && (flags&MEM_RESERVE)!=0) { |
264 | uintptr_t try_ok = mi_atomic_read(&large_page_try_ok); |
265 | if (!large_only && try_ok > 0) { |
266 | // if a large page allocation fails, it seems the calls to VirtualAlloc get very expensive. |
267 | // therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times. |
268 | mi_atomic_cas_weak(&large_page_try_ok, try_ok - 1, try_ok); |
269 | } |
270 | else { |
271 | // large OS pages must always reserve and commit. |
272 | *is_large = true; |
273 | p = mi_win_virtual_allocx(addr, size, try_alignment, flags | MEM_LARGE_PAGES); |
274 | if (large_only) return p; |
275 | // fall back to non-large page allocation on error (`p == NULL`). |
276 | if (p == NULL) { |
277 | mi_atomic_write(&large_page_try_ok,10); // on error, don't try again for the next N allocations |
278 | } |
279 | } |
280 | } |
281 | if (p == NULL) { |
282 | *is_large = ((flags&MEM_LARGE_PAGES) != 0); |
283 | p = mi_win_virtual_allocx(addr, size, try_alignment, flags); |
284 | } |
285 | if (p == NULL) { |
286 | _mi_warning_message("unable to allocate memory: error code: %i, addr: %p, size: 0x%x, large only: %d, allow_large: %d\n" , GetLastError(), addr, size, large_only, allow_large); |
287 | } |
288 | return p; |
289 | } |
290 | |
291 | #elif defined(__wasi__) |
292 | static void* mi_wasm_heap_grow(size_t size, size_t try_alignment) { |
293 | uintptr_t base = __builtin_wasm_memory_size(0) * _mi_os_page_size(); |
294 | uintptr_t aligned_base = _mi_align_up(base, (uintptr_t) try_alignment); |
295 | size_t alloc_size = _mi_align_up( aligned_base - base + size, _mi_os_page_size()); |
296 | mi_assert(alloc_size >= size && (alloc_size % _mi_os_page_size()) == 0); |
297 | if (alloc_size < size) return NULL; |
298 | if (__builtin_wasm_memory_grow(0, alloc_size / _mi_os_page_size()) == SIZE_MAX) { |
299 | errno = ENOMEM; |
300 | return NULL; |
301 | } |
302 | return (void*)aligned_base; |
303 | } |
304 | #else |
305 | #define MI_OS_USE_MMAP |
306 | static void* mi_unix_mmapx(void* addr, size_t size, size_t try_alignment, int protect_flags, int flags, int fd) { |
307 | void* p = NULL; |
308 | #if (MI_INTPTR_SIZE >= 8) && !defined(MAP_ALIGNED) |
309 | // on 64-bit systems, use the virtual address area after 4TiB for 4MiB aligned allocations |
310 | void* hint; |
311 | if (addr == NULL && (hint = mi_os_get_aligned_hint(try_alignment, size)) != NULL) { |
312 | p = mmap(hint,size,protect_flags,flags,fd,0); |
313 | if (p==MAP_FAILED) p = NULL; // fall back to regular mmap |
314 | } |
315 | #else |
316 | UNUSED(try_alignment); |
317 | #endif |
318 | if (p==NULL) { |
319 | p = mmap(addr,size,protect_flags,flags,fd,0); |
320 | if (p==MAP_FAILED) p = NULL; |
321 | } |
322 | return p; |
323 | } |
324 | |
325 | static void* mi_unix_mmap(void* addr, size_t size, size_t try_alignment, int protect_flags, bool large_only, bool allow_large, bool* is_large) { |
326 | void* p = NULL; |
327 | #if !defined(MAP_ANONYMOUS) |
328 | #define MAP_ANONYMOUS MAP_ANON |
329 | #endif |
330 | int flags = MAP_PRIVATE | MAP_ANONYMOUS; |
331 | int fd = -1; |
332 | #if defined(MAP_ALIGNED) // BSD |
333 | if (try_alignment > 0) { |
334 | size_t n = _mi_bsr(try_alignment); |
335 | if (((size_t)1 << n) == try_alignment && n >= 12 && n <= 30) { // alignment is a power of 2 and 4096 <= alignment <= 1GiB |
336 | flags |= MAP_ALIGNED(n); |
337 | } |
338 | } |
339 | #endif |
340 | #if defined(PROT_MAX) |
341 | protect_flags |= PROT_MAX(PROT_READ | PROT_WRITE); // BSD |
342 | #endif |
343 | #if defined(VM_MAKE_TAG) |
344 | // macOS: tracking anonymous page with a specific ID. (All up to 98 are taken officially but LLVM sanitizers had taken 99) |
345 | int os_tag = (int)mi_option_get(mi_option_os_tag); |
346 | if (os_tag < 100 || os_tag > 255) os_tag = 100; |
347 | fd = VM_MAKE_TAG(os_tag); |
348 | #endif |
349 | if ((large_only || use_large_os_page(size, try_alignment)) && allow_large) { |
350 | static volatile _Atomic(uintptr_t) large_page_try_ok; // = 0; |
351 | uintptr_t try_ok = mi_atomic_read(&large_page_try_ok); |
352 | if (!large_only && try_ok > 0) { |
353 | // If the OS is not configured for large OS pages, or the user does not have |
354 | // enough permission, the `mmap` will always fail (but it might also fail for other reasons). |
355 | // Therefore, once a large page allocation failed, we don't try again for `large_page_try_ok` times |
356 | // to avoid too many failing calls to mmap. |
357 | mi_atomic_cas_weak(&large_page_try_ok, try_ok - 1, try_ok); |
358 | } |
359 | else { |
360 | int lflags = flags; |
361 | int lfd = fd; |
362 | #ifdef MAP_ALIGNED_SUPER |
363 | lflags |= MAP_ALIGNED_SUPER; |
364 | #endif |
365 | #ifdef MAP_HUGETLB |
366 | lflags |= MAP_HUGETLB; |
367 | #endif |
368 | #ifdef MAP_HUGE_1GB |
369 | if ((size % ((uintptr_t)1 << 30)) == 0) { |
370 | lflags |= MAP_HUGE_1GB; |
371 | } |
372 | else |
373 | #endif |
374 | { |
375 | #ifdef MAP_HUGE_2MB |
376 | lflags |= MAP_HUGE_2MB; |
377 | #endif |
378 | } |
379 | #ifdef VM_FLAGS_SUPERPAGE_SIZE_2MB |
380 | lfd |= VM_FLAGS_SUPERPAGE_SIZE_2MB; |
381 | #endif |
382 | if (large_only || lflags != flags) { |
383 | // try large OS page allocation |
384 | *is_large = true; |
385 | p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd); |
386 | #ifdef MAP_HUGE_1GB |
387 | if (p == NULL && (lflags & MAP_HUGE_1GB) != 0) { |
388 | _mi_warning_message("unable to allocate huge (1GiB) page, trying large (2MiB) pages instead (error %i)\n" , errno); |
389 | lflags = ((lflags & ~MAP_HUGE_1GB) | MAP_HUGE_2MB); |
390 | p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, lflags, lfd); |
391 | } |
392 | #endif |
393 | if (large_only) return p; |
394 | if (p == NULL) { |
395 | mi_atomic_write(&large_page_try_ok, 10); // on error, don't try again for the next N allocations |
396 | } |
397 | } |
398 | } |
399 | } |
400 | if (p == NULL) { |
401 | *is_large = false; |
402 | p = mi_unix_mmapx(addr, size, try_alignment, protect_flags, flags, fd); |
403 | #if defined(MADV_HUGEPAGE) |
404 | // Many Linux systems don't allow MAP_HUGETLB but they support instead |
405 | // transparent huge pages (TPH). It is not required to call `madvise` with MADV_HUGE |
406 | // though since properly aligned allocations will already use large pages if available |
407 | // in that case -- in particular for our large regions (in `memory.c`). |
408 | // However, some systems only allow TPH if called with explicit `madvise`, so |
409 | // when large OS pages are enabled for mimalloc, we call `madvice` anyways. |
410 | if (allow_large && use_large_os_page(size, try_alignment)) { |
411 | if (madvise(p, size, MADV_HUGEPAGE) == 0) { |
412 | *is_large = true; // possibly |
413 | }; |
414 | } |
415 | #endif |
416 | } |
417 | return p; |
418 | } |
419 | #endif |
420 | |
421 | // On 64-bit systems, we can do efficient aligned allocation by using |
422 | // the 4TiB to 30TiB area to allocate them. |
423 | #if (MI_INTPTR_SIZE >= 8) && (defined(_WIN32) || (defined(MI_OS_USE_MMAP) && !defined(MAP_ALIGNED))) |
424 | static volatile _Atomic(intptr_t) aligned_base; |
425 | |
426 | // Return a 4MiB aligned address that is probably available |
427 | static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) { |
428 | if (try_alignment == 0 || try_alignment > MI_SEGMENT_SIZE) return NULL; |
429 | if ((size%MI_SEGMENT_SIZE) != 0) return NULL; |
430 | intptr_t hint = mi_atomic_add(&aligned_base, size); |
431 | if (hint == 0 || hint > ((intptr_t)30<<40)) { // try to wrap around after 30TiB (area after 32TiB is used for huge OS pages) |
432 | intptr_t init = ((intptr_t)4 << 40); // start at 4TiB area |
433 | #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of aligned allocations unless in debug mode |
434 | uintptr_t r = _mi_random_init((uintptr_t)&mi_os_get_aligned_hint ^ hint); |
435 | init = init + (MI_SEGMENT_SIZE * ((r>>17) & 0xFFFF)); // (randomly 0-64k)*4MiB == 0 to 256GiB |
436 | #endif |
437 | mi_atomic_cas_strong(mi_atomic_cast(uintptr_t, &aligned_base), init, hint + size); |
438 | hint = mi_atomic_add(&aligned_base, size); // this may still give 0 or > 30TiB but that is ok, it is a hint after all |
439 | } |
440 | if (hint%try_alignment != 0) return NULL; |
441 | return (void*)hint; |
442 | } |
443 | #else |
444 | static void* mi_os_get_aligned_hint(size_t try_alignment, size_t size) { |
445 | UNUSED(try_alignment); UNUSED(size); |
446 | return NULL; |
447 | } |
448 | #endif |
449 | |
450 | |
451 | // Primitive allocation from the OS. |
452 | // Note: the `try_alignment` is just a hint and the returned pointer is not guaranteed to be aligned. |
453 | static void* mi_os_mem_alloc(size_t size, size_t try_alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) { |
454 | mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); |
455 | if (size == 0) return NULL; |
456 | if (!commit) allow_large = false; |
457 | |
458 | void* p = NULL; |
459 | /* |
460 | if (commit && allow_large) { |
461 | p = _mi_os_try_alloc_from_huge_reserved(size, try_alignment); |
462 | if (p != NULL) { |
463 | *is_large = true; |
464 | return p; |
465 | } |
466 | } |
467 | */ |
468 | |
469 | #if defined(_WIN32) |
470 | int flags = MEM_RESERVE; |
471 | if (commit) flags |= MEM_COMMIT; |
472 | p = mi_win_virtual_alloc(NULL, size, try_alignment, flags, false, allow_large, is_large); |
473 | #elif defined(__wasi__) |
474 | *is_large = false; |
475 | p = mi_wasm_heap_grow(size, try_alignment); |
476 | #else |
477 | int protect_flags = (commit ? (PROT_WRITE | PROT_READ) : PROT_NONE); |
478 | p = mi_unix_mmap(NULL, size, try_alignment, protect_flags, false, allow_large, is_large); |
479 | #endif |
480 | mi_stat_counter_increase(stats->mmap_calls, 1); |
481 | if (p != NULL) { |
482 | _mi_stat_increase(&stats->reserved, size); |
483 | if (commit) { _mi_stat_increase(&stats->committed, size); } |
484 | } |
485 | return p; |
486 | } |
487 | |
488 | |
489 | // Primitive aligned allocation from the OS. |
490 | // This function guarantees the allocated memory is aligned. |
491 | static void* mi_os_mem_alloc_aligned(size_t size, size_t alignment, bool commit, bool allow_large, bool* is_large, mi_stats_t* stats) { |
492 | mi_assert_internal(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0)); |
493 | mi_assert_internal(size > 0 && (size % _mi_os_page_size()) == 0); |
494 | if (!commit) allow_large = false; |
495 | if (!(alignment >= _mi_os_page_size() && ((alignment & (alignment - 1)) == 0))) return NULL; |
496 | size = _mi_align_up(size, _mi_os_page_size()); |
497 | |
498 | // try first with a hint (this will be aligned directly on Win 10+ or BSD) |
499 | void* p = mi_os_mem_alloc(size, alignment, commit, allow_large, is_large, stats); |
500 | if (p == NULL) return NULL; |
501 | |
502 | // if not aligned, free it, overallocate, and unmap around it |
503 | if (((uintptr_t)p % alignment != 0)) { |
504 | mi_os_mem_free(p, size, commit, stats); |
505 | if (size >= (SIZE_MAX - alignment)) return NULL; // overflow |
506 | size_t over_size = size + alignment; |
507 | |
508 | #if _WIN32 |
509 | // over-allocate and than re-allocate exactly at an aligned address in there. |
510 | // this may fail due to threads allocating at the same time so we |
511 | // retry this at most 3 times before giving up. |
512 | // (we can not decommit around the overallocation on Windows, because we can only |
513 | // free the original pointer, not one pointing inside the area) |
514 | int flags = MEM_RESERVE; |
515 | if (commit) flags |= MEM_COMMIT; |
516 | for (int tries = 0; tries < 3; tries++) { |
517 | // over-allocate to determine a virtual memory range |
518 | p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats); |
519 | if (p == NULL) return NULL; // error |
520 | if (((uintptr_t)p % alignment) == 0) { |
521 | // if p happens to be aligned, just decommit the left-over area |
522 | _mi_os_decommit((uint8_t*)p + size, over_size - size, stats); |
523 | break; |
524 | } |
525 | else { |
526 | // otherwise free and allocate at an aligned address in there |
527 | mi_os_mem_free(p, over_size, commit, stats); |
528 | void* aligned_p = mi_align_up_ptr(p, alignment); |
529 | p = mi_win_virtual_alloc(aligned_p, size, alignment, flags, false, allow_large, is_large); |
530 | if (p == aligned_p) break; // success! |
531 | if (p != NULL) { // should not happen? |
532 | mi_os_mem_free(p, size, commit, stats); |
533 | p = NULL; |
534 | } |
535 | } |
536 | } |
537 | #else |
538 | // overallocate... |
539 | p = mi_os_mem_alloc(over_size, alignment, commit, false, is_large, stats); |
540 | if (p == NULL) return NULL; |
541 | // and selectively unmap parts around the over-allocated area. |
542 | void* aligned_p = mi_align_up_ptr(p, alignment); |
543 | size_t pre_size = (uint8_t*)aligned_p - (uint8_t*)p; |
544 | size_t mid_size = _mi_align_up(size, _mi_os_page_size()); |
545 | size_t post_size = over_size - pre_size - mid_size; |
546 | mi_assert_internal(pre_size < over_size && post_size < over_size && mid_size >= size); |
547 | if (pre_size > 0) mi_os_mem_free(p, pre_size, commit, stats); |
548 | if (post_size > 0) mi_os_mem_free((uint8_t*)aligned_p + mid_size, post_size, commit, stats); |
549 | // we can return the aligned pointer on `mmap` systems |
550 | p = aligned_p; |
551 | #endif |
552 | } |
553 | |
554 | mi_assert_internal(p == NULL || (p != NULL && ((uintptr_t)p % alignment) == 0)); |
555 | return p; |
556 | } |
557 | |
558 | /* ----------------------------------------------------------- |
559 | OS API: alloc, free, alloc_aligned |
560 | ----------------------------------------------------------- */ |
561 | |
562 | void* _mi_os_alloc(size_t size, mi_stats_t* stats) { |
563 | if (size == 0) return NULL; |
564 | size = _mi_os_good_alloc_size(size); |
565 | bool is_large = false; |
566 | return mi_os_mem_alloc(size, 0, true, false, &is_large, stats); |
567 | } |
568 | |
569 | void _mi_os_free_ex(void* p, size_t size, bool was_committed, mi_stats_t* stats) { |
570 | if (size == 0 || p == NULL) return; |
571 | size = _mi_os_good_alloc_size(size); |
572 | mi_os_mem_free(p, size, was_committed, stats); |
573 | } |
574 | |
575 | void _mi_os_free(void* p, size_t size, mi_stats_t* stats) { |
576 | _mi_os_free_ex(p, size, true, stats); |
577 | } |
578 | |
579 | void* _mi_os_alloc_aligned(size_t size, size_t alignment, bool commit, bool* large, mi_os_tld_t* tld) |
580 | { |
581 | if (size == 0) return NULL; |
582 | size = _mi_os_good_alloc_size(size); |
583 | alignment = _mi_align_up(alignment, _mi_os_page_size()); |
584 | bool allow_large = false; |
585 | if (large != NULL) { |
586 | allow_large = *large; |
587 | *large = false; |
588 | } |
589 | return mi_os_mem_alloc_aligned(size, alignment, commit, allow_large, (large!=NULL?large:&allow_large), tld->stats); |
590 | } |
591 | |
592 | |
593 | |
594 | /* ----------------------------------------------------------- |
595 | OS memory API: reset, commit, decommit, protect, unprotect. |
596 | ----------------------------------------------------------- */ |
597 | |
598 | |
599 | // OS page align within a given area, either conservative (pages inside the area only), |
600 | // or not (straddling pages outside the area is possible) |
601 | static void* mi_os_page_align_areax(bool conservative, void* addr, size_t size, size_t* newsize) { |
602 | mi_assert(addr != NULL && size > 0); |
603 | if (newsize != NULL) *newsize = 0; |
604 | if (size == 0 || addr == NULL) return NULL; |
605 | |
606 | // page align conservatively within the range |
607 | void* start = (conservative ? mi_align_up_ptr(addr, _mi_os_page_size()) |
608 | : mi_align_down_ptr(addr, _mi_os_page_size())); |
609 | void* end = (conservative ? mi_align_down_ptr((uint8_t*)addr + size, _mi_os_page_size()) |
610 | : mi_align_up_ptr((uint8_t*)addr + size, _mi_os_page_size())); |
611 | ptrdiff_t diff = (uint8_t*)end - (uint8_t*)start; |
612 | if (diff <= 0) return NULL; |
613 | |
614 | mi_assert_internal((conservative && (size_t)diff <= size) || (!conservative && (size_t)diff >= size)); |
615 | if (newsize != NULL) *newsize = (size_t)diff; |
616 | return start; |
617 | } |
618 | |
619 | static void* mi_os_page_align_area_conservative(void* addr, size_t size, size_t* newsize) { |
620 | return mi_os_page_align_areax(true, addr, size, newsize); |
621 | } |
622 | |
623 | // Commit/Decommit memory. |
624 | // Usuelly commit is aligned liberal, while decommit is aligned conservative. |
625 | // (but not for the reset version where we want commit to be conservative as well) |
626 | static bool mi_os_commitx(void* addr, size_t size, bool commit, bool conservative, bool* is_zero, mi_stats_t* stats) { |
627 | // page align in the range, commit liberally, decommit conservative |
628 | *is_zero = false; |
629 | size_t csize; |
630 | void* start = mi_os_page_align_areax(conservative, addr, size, &csize); |
631 | if (csize == 0 || _mi_os_is_huge_reserved(addr)) return true; |
632 | int err = 0; |
633 | if (commit) { |
634 | _mi_stat_increase(&stats->committed, csize); |
635 | _mi_stat_counter_increase(&stats->commit_calls, 1); |
636 | } |
637 | else { |
638 | _mi_stat_decrease(&stats->committed, csize); |
639 | } |
640 | |
641 | #if defined(_WIN32) |
642 | if (commit) { |
643 | // if the memory was already committed, the call succeeds but it is not zero'd |
644 | // *is_zero = true; |
645 | void* p = VirtualAlloc(start, csize, MEM_COMMIT, PAGE_READWRITE); |
646 | err = (p == start ? 0 : GetLastError()); |
647 | } |
648 | else { |
649 | BOOL ok = VirtualFree(start, csize, MEM_DECOMMIT); |
650 | err = (ok ? 0 : GetLastError()); |
651 | } |
652 | #elif defined(__wasi__) |
653 | // WebAssembly guests can't control memory protection |
654 | #else |
655 | err = mprotect(start, csize, (commit ? (PROT_READ | PROT_WRITE) : PROT_NONE)); |
656 | if (err != 0) { err = errno; } |
657 | #endif |
658 | if (err != 0) { |
659 | _mi_warning_message("commit/decommit error: start: 0x%p, csize: 0x%x, err: %i\n" , start, csize, err); |
660 | } |
661 | mi_assert_internal(err == 0); |
662 | return (err == 0); |
663 | } |
664 | |
665 | bool _mi_os_commit(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { |
666 | return mi_os_commitx(addr, size, true, false /* conservative? */, is_zero, stats); |
667 | } |
668 | |
669 | bool _mi_os_decommit(void* addr, size_t size, mi_stats_t* stats) { |
670 | bool is_zero; |
671 | return mi_os_commitx(addr, size, false, true /* conservative? */, &is_zero, stats); |
672 | } |
673 | |
674 | bool _mi_os_commit_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { |
675 | return mi_os_commitx(addr, size, true, true /* conservative? */, is_zero, stats); |
676 | } |
677 | |
678 | |
679 | // Signal to the OS that the address range is no longer in use |
680 | // but may be used later again. This will release physical memory |
681 | // pages and reduce swapping while keeping the memory committed. |
682 | // We page align to a conservative area inside the range to reset. |
683 | static bool mi_os_resetx(void* addr, size_t size, bool reset, mi_stats_t* stats) { |
684 | // page align conservatively within the range |
685 | size_t csize; |
686 | void* start = mi_os_page_align_area_conservative(addr, size, &csize); |
687 | if (csize == 0 || _mi_os_is_huge_reserved(addr)) return true; |
688 | if (reset) _mi_stat_increase(&stats->reset, csize); |
689 | else _mi_stat_decrease(&stats->reset, csize); |
690 | if (!reset) return true; // nothing to do on unreset! |
691 | |
692 | #if (MI_DEBUG>1) |
693 | if (MI_SECURE==0) { |
694 | memset(start, 0, csize); // pretend it is eagerly reset |
695 | } |
696 | #endif |
697 | |
698 | #if defined(_WIN32) |
699 | // Testing shows that for us (on `malloc-large`) MEM_RESET is 2x faster than DiscardVirtualMemory |
700 | void* p = VirtualAlloc(start, csize, MEM_RESET, PAGE_READWRITE); |
701 | mi_assert_internal(p == start); |
702 | #if 1 |
703 | if (p == start && start != NULL) { |
704 | VirtualUnlock(start,csize); // VirtualUnlock after MEM_RESET removes the memory from the working set |
705 | } |
706 | #endif |
707 | if (p != start) return false; |
708 | #else |
709 | #if defined(MADV_FREE) |
710 | static int advice = MADV_FREE; |
711 | int err = madvise(start, csize, advice); |
712 | if (err != 0 && errno == EINVAL && advice == MADV_FREE) { |
713 | // if MADV_FREE is not supported, fall back to MADV_DONTNEED from now on |
714 | advice = MADV_DONTNEED; |
715 | err = madvise(start, csize, advice); |
716 | } |
717 | #elif defined(__wasi__) |
718 | int err = 0; |
719 | #else |
720 | int err = madvise(start, csize, MADV_DONTNEED); |
721 | #endif |
722 | if (err != 0) { |
723 | _mi_warning_message("madvise reset error: start: 0x%p, csize: 0x%x, errno: %i\n" , start, csize, errno); |
724 | } |
725 | //mi_assert(err == 0); |
726 | if (err != 0) return false; |
727 | #endif |
728 | return true; |
729 | } |
730 | |
731 | // Signal to the OS that the address range is no longer in use |
732 | // but may be used later again. This will release physical memory |
733 | // pages and reduce swapping while keeping the memory committed. |
734 | // We page align to a conservative area inside the range to reset. |
735 | bool _mi_os_reset(void* addr, size_t size, mi_stats_t* stats) { |
736 | if (mi_option_is_enabled(mi_option_reset_decommits)) { |
737 | return _mi_os_decommit(addr,size,stats); |
738 | } |
739 | else { |
740 | return mi_os_resetx(addr, size, true, stats); |
741 | } |
742 | } |
743 | |
744 | bool _mi_os_unreset(void* addr, size_t size, bool* is_zero, mi_stats_t* stats) { |
745 | if (mi_option_is_enabled(mi_option_reset_decommits)) { |
746 | return _mi_os_commit_unreset(addr, size, is_zero, stats); // re-commit it (conservatively!) |
747 | } |
748 | else { |
749 | *is_zero = false; |
750 | return mi_os_resetx(addr, size, false, stats); |
751 | } |
752 | } |
753 | |
754 | |
755 | // Protect a region in memory to be not accessible. |
756 | static bool mi_os_protectx(void* addr, size_t size, bool protect) { |
757 | // page align conservatively within the range |
758 | size_t csize = 0; |
759 | void* start = mi_os_page_align_area_conservative(addr, size, &csize); |
760 | if (csize == 0) return false; |
761 | if (_mi_os_is_huge_reserved(addr)) { |
762 | _mi_warning_message("cannot mprotect memory allocated in huge OS pages\n" ); |
763 | } |
764 | int err = 0; |
765 | #ifdef _WIN32 |
766 | DWORD oldprotect = 0; |
767 | BOOL ok = VirtualProtect(start, csize, protect ? PAGE_NOACCESS : PAGE_READWRITE, &oldprotect); |
768 | err = (ok ? 0 : GetLastError()); |
769 | #elif defined(__wasi__) |
770 | err = 0; |
771 | #else |
772 | err = mprotect(start, csize, protect ? PROT_NONE : (PROT_READ | PROT_WRITE)); |
773 | if (err != 0) { err = errno; } |
774 | #endif |
775 | if (err != 0) { |
776 | _mi_warning_message("mprotect error: start: 0x%p, csize: 0x%x, err: %i\n" , start, csize, err); |
777 | } |
778 | return (err == 0); |
779 | } |
780 | |
781 | bool _mi_os_protect(void* addr, size_t size) { |
782 | return mi_os_protectx(addr, size, true); |
783 | } |
784 | |
785 | bool _mi_os_unprotect(void* addr, size_t size) { |
786 | return mi_os_protectx(addr, size, false); |
787 | } |
788 | |
789 | |
790 | |
791 | bool _mi_os_shrink(void* p, size_t oldsize, size_t newsize, mi_stats_t* stats) { |
792 | // page align conservatively within the range |
793 | mi_assert_internal(oldsize > newsize && p != NULL); |
794 | if (oldsize < newsize || p == NULL) return false; |
795 | if (oldsize == newsize) return true; |
796 | |
797 | // oldsize and newsize should be page aligned or we cannot shrink precisely |
798 | void* addr = (uint8_t*)p + newsize; |
799 | size_t size = 0; |
800 | void* start = mi_os_page_align_area_conservative(addr, oldsize - newsize, &size); |
801 | if (size == 0 || start != addr) return false; |
802 | |
803 | #ifdef _WIN32 |
804 | // we cannot shrink on windows, but we can decommit |
805 | return _mi_os_decommit(start, size, stats); |
806 | #else |
807 | return mi_os_mem_free(start, size, true, stats); |
808 | #endif |
809 | } |
810 | |
811 | |
812 | /* ---------------------------------------------------------------------------- |
813 | Support for huge OS pages (1Gib) that are reserved up-front and never |
814 | released. Only regions are allocated in here (see `memory.c`) so the memory |
815 | will be reused. |
816 | -----------------------------------------------------------------------------*/ |
817 | #define MI_HUGE_OS_PAGE_SIZE ((size_t)1 << 30) // 1GiB |
818 | |
819 | typedef struct mi_huge_info_s { |
820 | volatile _Atomic(void*) start; // start of huge page area (32TiB) |
821 | volatile _Atomic(size_t) reserved; // total reserved size |
822 | volatile _Atomic(size_t) used; // currently allocated |
823 | } mi_huge_info_t; |
824 | |
825 | static mi_huge_info_t os_huge_reserved = { NULL, 0, ATOMIC_VAR_INIT(0) }; |
826 | |
827 | bool _mi_os_is_huge_reserved(void* p) { |
828 | return (mi_atomic_read_ptr(&os_huge_reserved.start) != NULL && |
829 | p >= mi_atomic_read_ptr(&os_huge_reserved.start) && |
830 | (uint8_t*)p < (uint8_t*)mi_atomic_read_ptr(&os_huge_reserved.start) + mi_atomic_read(&os_huge_reserved.reserved)); |
831 | } |
832 | |
833 | void* _mi_os_try_alloc_from_huge_reserved(size_t size, size_t try_alignment) |
834 | { |
835 | // only allow large aligned allocations (e.g. regions) |
836 | if (size < MI_SEGMENT_SIZE || (size % MI_SEGMENT_SIZE) != 0) return NULL; |
837 | if (try_alignment > MI_SEGMENT_SIZE) return NULL; |
838 | if (mi_atomic_read_ptr(&os_huge_reserved.start)==NULL) return NULL; |
839 | if (mi_atomic_read(&os_huge_reserved.used) >= mi_atomic_read(&os_huge_reserved.reserved)) return NULL; // already full |
840 | |
841 | // always aligned |
842 | mi_assert_internal(mi_atomic_read(&os_huge_reserved.used) % MI_SEGMENT_SIZE == 0 ); |
843 | mi_assert_internal( (uintptr_t)mi_atomic_read_ptr(&os_huge_reserved.start) % MI_SEGMENT_SIZE == 0 ); |
844 | |
845 | // try to reserve space |
846 | size_t base = mi_atomic_addu( &os_huge_reserved.used, size ); |
847 | if ((base + size) > os_huge_reserved.reserved) { |
848 | // "free" our over-allocation |
849 | mi_atomic_subu( &os_huge_reserved.used, size); |
850 | return NULL; |
851 | } |
852 | |
853 | // success! |
854 | uint8_t* p = (uint8_t*)mi_atomic_read_ptr(&os_huge_reserved.start) + base; |
855 | mi_assert_internal( (uintptr_t)p % MI_SEGMENT_SIZE == 0 ); |
856 | return p; |
857 | } |
858 | |
859 | /* |
860 | static void mi_os_free_huge_reserved() { |
861 | uint8_t* addr = os_huge_reserved.start; |
862 | size_t total = os_huge_reserved.reserved; |
863 | os_huge_reserved.reserved = 0; |
864 | os_huge_reserved.start = NULL; |
865 | for( size_t current = 0; current < total; current += MI_HUGE_OS_PAGE_SIZE) { |
866 | _mi_os_free(addr + current, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main); |
867 | } |
868 | } |
869 | */ |
870 | |
871 | #if !(MI_INTPTR_SIZE >= 8 && (defined(_WIN32) || defined(MI_OS_USE_MMAP))) |
872 | int mi_reserve_huge_os_pages(size_t pages, double max_secs, size_t* pages_reserved) mi_attr_noexcept { |
873 | UNUSED(pages); UNUSED(max_secs); |
874 | if (pages_reserved != NULL) *pages_reserved = 0; |
875 | return ENOMEM; |
876 | } |
877 | #else |
878 | int mi_reserve_huge_os_pages( size_t pages, double max_secs, size_t* pages_reserved ) mi_attr_noexcept |
879 | { |
880 | if (pages_reserved != NULL) *pages_reserved = 0; |
881 | if (max_secs==0) return ETIMEDOUT; // timeout |
882 | if (pages==0) return 0; // ok |
883 | if (!mi_atomic_cas_ptr_strong(&os_huge_reserved.start,(void*)1,NULL)) return ETIMEDOUT; // already reserved |
884 | |
885 | // Set the start address after the 32TiB area |
886 | uint8_t* start = (uint8_t*)((uintptr_t)32 << 40); // 32TiB virtual start address |
887 | #if (MI_SECURE>0 || MI_DEBUG==0) // security: randomize start of huge pages unless in debug mode |
888 | uintptr_t r = _mi_random_init((uintptr_t)&mi_reserve_huge_os_pages); |
889 | start = start + ((uintptr_t)MI_HUGE_OS_PAGE_SIZE * ((r>>17) & 0x3FF)); // (randomly 0-1024)*1GiB == 0 to 1TiB |
890 | #endif |
891 | |
892 | // Allocate one page at the time but try to place them contiguously |
893 | // We allocate one page at the time to be able to abort if it takes too long |
894 | double start_t = _mi_clock_start(); |
895 | uint8_t* addr = start; // current top of the allocations |
896 | for (size_t page = 0; page < pages; page++, addr += MI_HUGE_OS_PAGE_SIZE ) { |
897 | // allocate a page |
898 | void* p = NULL; |
899 | bool is_large = true; |
900 | #ifdef _WIN32 |
901 | if (page==0) { mi_win_enable_large_os_pages(); } |
902 | p = mi_win_virtual_alloc(addr, MI_HUGE_OS_PAGE_SIZE, 0, MEM_LARGE_PAGES | MEM_COMMIT | MEM_RESERVE, true, true, &is_large); |
903 | #elif defined(MI_OS_USE_MMAP) |
904 | p = mi_unix_mmap(addr, MI_HUGE_OS_PAGE_SIZE, 0, PROT_READ | PROT_WRITE, true, true, &is_large); |
905 | #else |
906 | // always fail |
907 | #endif |
908 | |
909 | // Did we succeed at a contiguous address? |
910 | if (p != addr) { |
911 | // no success, issue a warning and return with an error |
912 | if (p != NULL) { |
913 | _mi_warning_message("could not allocate contiguous huge page %zu at 0x%p\n" , page, addr); |
914 | _mi_os_free(p, MI_HUGE_OS_PAGE_SIZE, &_mi_stats_main ); |
915 | } |
916 | else { |
917 | #ifdef _WIN32 |
918 | int err = GetLastError(); |
919 | #else |
920 | int err = errno; |
921 | #endif |
922 | _mi_warning_message("could not allocate huge page %zu at 0x%p, error: %i\n" , page, addr, err); |
923 | } |
924 | return ENOMEM; |
925 | } |
926 | // success, record it |
927 | if (page==0) { |
928 | mi_atomic_write_ptr(&os_huge_reserved.start, addr); // don't switch the order of these writes |
929 | mi_atomic_write(&os_huge_reserved.reserved, MI_HUGE_OS_PAGE_SIZE); |
930 | } |
931 | else { |
932 | mi_atomic_addu(&os_huge_reserved.reserved,MI_HUGE_OS_PAGE_SIZE); |
933 | } |
934 | _mi_stat_increase(&_mi_stats_main.committed, MI_HUGE_OS_PAGE_SIZE); |
935 | _mi_stat_increase(&_mi_stats_main.reserved, MI_HUGE_OS_PAGE_SIZE); |
936 | if (pages_reserved != NULL) { *pages_reserved = page + 1; } |
937 | |
938 | // check for timeout |
939 | double elapsed = _mi_clock_end(start_t); |
940 | if (elapsed > max_secs) return ETIMEDOUT; |
941 | if (page >= 1) { |
942 | double estimate = ((elapsed / (double)(page+1)) * (double)pages); |
943 | if (estimate > 1.5*max_secs) return ETIMEDOUT; // seems like we are going to timeout |
944 | } |
945 | } |
946 | _mi_verbose_message("reserved %zu huge pages\n" , pages); |
947 | return 0; |
948 | } |
949 | #endif |
950 | |
951 | |