| 1 | /* |
| 2 | Copyright (c) 2005-2019 Intel Corporation |
| 3 | |
| 4 | Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | you may not use this file except in compliance with the License. |
| 6 | You may obtain a copy of the License at |
| 7 | |
| 8 | http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | |
| 10 | Unless required by applicable law or agreed to in writing, software |
| 11 | distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | See the License for the specific language governing permissions and |
| 14 | limitations under the License. |
| 15 | */ |
| 16 | |
| 17 | #ifndef _itt_shared_malloc_MapMemory_H |
| 18 | #define _itt_shared_malloc_MapMemory_H |
| 19 | |
| 20 | #include <stdlib.h> |
| 21 | |
| 22 | void *ErrnoPreservingMalloc(size_t bytes) |
| 23 | { |
| 24 | int prevErrno = errno; |
| 25 | void *ret = malloc( bytes ); |
| 26 | if (!ret) |
| 27 | errno = prevErrno; |
| 28 | return ret; |
| 29 | } |
| 30 | |
| 31 | #if __linux__ || __APPLE__ || __sun || __FreeBSD__ |
| 32 | |
| 33 | #if __sun && !defined(_XPG4_2) |
| 34 | // To have void* as mmap's 1st argument |
| 35 | #define _XPG4_2 1 |
| 36 | #define XPG4_WAS_DEFINED 1 |
| 37 | #endif |
| 38 | |
| 39 | #include <sys/mman.h> |
| 40 | #if __linux__ |
| 41 | /* __TBB_MAP_HUGETLB is MAP_HUGETLB from system header linux/mman.h. |
| 42 | The header is not included here, as on some Linux flavors inclusion of |
| 43 | linux/mman.h leads to compilation error, |
| 44 | while changing of MAP_HUGETLB is highly unexpected. |
| 45 | */ |
| 46 | #define __TBB_MAP_HUGETLB 0x40000 |
| 47 | #else |
| 48 | #define __TBB_MAP_HUGETLB 0 |
| 49 | #endif |
| 50 | |
| 51 | #if XPG4_WAS_DEFINED |
| 52 | #undef _XPG4_2 |
| 53 | #undef XPG4_WAS_DEFINED |
| 54 | #endif |
| 55 | |
| 56 | inline void* mmap_impl(size_t map_size, void* map_hint = NULL, int map_flags = 0) { |
| 57 | #ifndef MAP_ANONYMOUS |
| 58 | // macOS* defines MAP_ANON, which is deprecated in Linux*. |
| 59 | #define MAP_ANONYMOUS MAP_ANON |
| 60 | #endif /* MAP_ANONYMOUS */ |
| 61 | return mmap(map_hint, map_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | map_flags, -1, 0); |
| 62 | } |
| 63 | |
| 64 | inline void* mmapTHP(size_t bytes) { |
| 65 | // Initializes in zero-initialized data section |
| 66 | static void* hint; |
| 67 | |
| 68 | // Optimistically try to use a last huge page aligned region end |
| 69 | // as a hint for mmap. |
| 70 | hint = hint ? (void*)((uintptr_t)hint - bytes) : hint; |
| 71 | void* result = mmap_impl(bytes, hint); |
| 72 | |
| 73 | // Something went wrong |
| 74 | if (result == MAP_FAILED) { |
| 75 | hint = NULL; |
| 76 | return MAP_FAILED; |
| 77 | } |
| 78 | |
| 79 | // Otherwise, fall back to the slow path - map oversized region |
| 80 | // and trim excess parts. |
| 81 | if (!isAligned(result, HUGE_PAGE_SIZE)) { |
| 82 | // Undo previous try |
| 83 | munmap(result, bytes); |
| 84 | |
| 85 | // Map oversized on huge page size region |
| 86 | result = mmap_impl(bytes + HUGE_PAGE_SIZE); |
| 87 | |
| 88 | // Something went wrong |
| 89 | if (result == MAP_FAILED) { |
| 90 | hint = NULL; |
| 91 | return MAP_FAILED; |
| 92 | } |
| 93 | |
| 94 | // Misalignment offset |
| 95 | uintptr_t offset = 0; |
| 96 | |
| 97 | if (!isAligned(result, HUGE_PAGE_SIZE)) { |
| 98 | // Trim excess head of a region if it is no aligned |
| 99 | offset = HUGE_PAGE_SIZE - ((uintptr_t)result & (HUGE_PAGE_SIZE - 1)); |
| 100 | munmap(result, offset); |
| 101 | |
| 102 | // New region beginning |
| 103 | result = (void*)((uintptr_t)result + offset); |
| 104 | } |
| 105 | |
| 106 | // Trim excess tail of a region |
| 107 | munmap((void*)((uintptr_t)result + bytes), HUGE_PAGE_SIZE - offset); |
| 108 | } |
| 109 | |
| 110 | // Assume, that mmap virtual addresses grow down by default |
| 111 | // So, set a hint as a result of a last successful allocation |
| 112 | // and then use it minus requested size as a new mapping point. |
| 113 | // TODO: Atomic store is meant here, fence not needed, but |
| 114 | // currently we don't have such function. |
| 115 | hint = result; |
| 116 | |
| 117 | MALLOC_ASSERT(isAligned(result, HUGE_PAGE_SIZE), "Mapped address is not aligned on huge page size." ); |
| 118 | |
| 119 | return result; |
| 120 | } |
| 121 | |
| 122 | #define MEMORY_MAPPING_USES_MALLOC 0 |
| 123 | void* MapMemory (size_t bytes, PageType pageType) |
| 124 | { |
| 125 | void* result = 0; |
| 126 | int prevErrno = errno; |
| 127 | |
| 128 | switch (pageType) { |
| 129 | case REGULAR: |
| 130 | { |
| 131 | result = mmap_impl(bytes); |
| 132 | break; |
| 133 | } |
| 134 | case PREALLOCATED_HUGE_PAGE: |
| 135 | { |
| 136 | MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size" ); |
| 137 | result = mmap_impl(bytes, NULL, __TBB_MAP_HUGETLB); |
| 138 | break; |
| 139 | } |
| 140 | case TRANSPARENT_HUGE_PAGE: |
| 141 | { |
| 142 | MALLOC_ASSERT((bytes % HUGE_PAGE_SIZE) == 0, "Mapping size should be divisible by huge page size" ); |
| 143 | result = mmapTHP(bytes); |
| 144 | break; |
| 145 | } |
| 146 | default: |
| 147 | { |
| 148 | MALLOC_ASSERT(false, "Unknown page type" ); |
| 149 | } |
| 150 | } |
| 151 | |
| 152 | if (result == MAP_FAILED) { |
| 153 | errno = prevErrno; |
| 154 | return 0; |
| 155 | } |
| 156 | |
| 157 | return result; |
| 158 | } |
| 159 | |
| 160 | int UnmapMemory(void *area, size_t bytes) |
| 161 | { |
| 162 | int prevErrno = errno; |
| 163 | int ret = munmap(area, bytes); |
| 164 | if (-1 == ret) |
| 165 | errno = prevErrno; |
| 166 | return ret; |
| 167 | } |
| 168 | |
| 169 | #elif (_WIN32 || _WIN64) && !__TBB_WIN8UI_SUPPORT |
| 170 | #include <windows.h> |
| 171 | |
| 172 | #define MEMORY_MAPPING_USES_MALLOC 0 |
| 173 | void* MapMemory (size_t bytes, PageType) |
| 174 | { |
| 175 | /* Is VirtualAlloc thread safe? */ |
| 176 | return VirtualAlloc(NULL, bytes, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE); |
| 177 | } |
| 178 | |
| 179 | int UnmapMemory(void *area, size_t /*bytes*/) |
| 180 | { |
| 181 | BOOL result = VirtualFree(area, 0, MEM_RELEASE); |
| 182 | return !result; |
| 183 | } |
| 184 | |
| 185 | #else |
| 186 | |
| 187 | #define MEMORY_MAPPING_USES_MALLOC 1 |
| 188 | void* MapMemory (size_t bytes, PageType) |
| 189 | { |
| 190 | return ErrnoPreservingMalloc( bytes ); |
| 191 | } |
| 192 | |
| 193 | int UnmapMemory(void *area, size_t /*bytes*/) |
| 194 | { |
| 195 | free( area ); |
| 196 | return 0; |
| 197 | } |
| 198 | |
| 199 | #endif /* OS dependent */ |
| 200 | |
| 201 | #if MALLOC_CHECK_RECURSION && MEMORY_MAPPING_USES_MALLOC |
| 202 | #error Impossible to protect against malloc recursion when memory mapping uses malloc. |
| 203 | #endif |
| 204 | |
| 205 | #endif /* _itt_shared_malloc_MapMemory_H */ |
| 206 | |