| 1 | #define JEMALLOC_CHUNK_MMAP_C_ |
| 2 | #include "jemalloc/internal/jemalloc_internal.h" |
| 3 | |
| 4 | /******************************************************************************/ |
| 5 | |
| 6 | static void * |
| 7 | chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) |
| 8 | { |
| 9 | void *ret; |
| 10 | size_t alloc_size; |
| 11 | |
| 12 | alloc_size = size + alignment - PAGE; |
| 13 | /* Beware size_t wrap-around. */ |
| 14 | if (alloc_size < size) |
| 15 | return (NULL); |
| 16 | do { |
| 17 | void *pages; |
| 18 | size_t leadsize; |
| 19 | pages = pages_map(NULL, alloc_size, commit); |
| 20 | if (pages == NULL) |
| 21 | return (NULL); |
| 22 | leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) - |
| 23 | (uintptr_t)pages; |
| 24 | ret = pages_trim(pages, alloc_size, leadsize, size, commit); |
| 25 | } while (ret == NULL); |
| 26 | |
| 27 | assert(ret != NULL); |
| 28 | *zero = true; |
| 29 | return (ret); |
| 30 | } |
| 31 | |
| 32 | void * |
| 33 | chunk_alloc_mmap(void *new_addr, size_t size, size_t alignment, bool *zero, |
| 34 | bool *commit) |
| 35 | { |
| 36 | void *ret; |
| 37 | size_t offset; |
| 38 | |
| 39 | /* |
| 40 | * Ideally, there would be a way to specify alignment to mmap() (like |
| 41 | * NetBSD has), but in the absence of such a feature, we have to work |
| 42 | * hard to efficiently create aligned mappings. The reliable, but |
| 43 | * slow method is to create a mapping that is over-sized, then trim the |
| 44 | * excess. However, that always results in one or two calls to |
| 45 | * pages_unmap(). |
| 46 | * |
| 47 | * Optimistically try mapping precisely the right amount before falling |
| 48 | * back to the slow method, with the expectation that the optimistic |
| 49 | * approach works most of the time. |
| 50 | */ |
| 51 | |
| 52 | assert(alignment != 0); |
| 53 | assert((alignment & chunksize_mask) == 0); |
| 54 | |
| 55 | ret = pages_map(new_addr, size, commit); |
| 56 | if (ret == NULL || ret == new_addr) |
| 57 | return (ret); |
| 58 | assert(new_addr == NULL); |
| 59 | offset = ALIGNMENT_ADDR2OFFSET(ret, alignment); |
| 60 | if (offset != 0) { |
| 61 | pages_unmap(ret, size); |
| 62 | return (chunk_alloc_mmap_slow(size, alignment, zero, commit)); |
| 63 | } |
| 64 | |
| 65 | assert(ret != NULL); |
| 66 | *zero = true; |
| 67 | return (ret); |
| 68 | } |
| 69 | |
| 70 | bool |
| 71 | chunk_dalloc_mmap(void *chunk, size_t size) |
| 72 | { |
| 73 | |
| 74 | if (config_munmap) |
| 75 | pages_unmap(chunk, size); |
| 76 | |
| 77 | return (!config_munmap); |
| 78 | } |
| 79 | |