| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | |
| 5 | #include "jitpch.h" |
| 6 | |
| 7 | #if defined(_MSC_VER) |
| 8 | #pragma hdrstop |
| 9 | #endif // defined(_MSC_VER) |
| 10 | |
| 11 | //------------------------------------------------------------------------ |
| 12 | // ArenaAllocator::bypassHostAllocator: |
| 13 | // Indicates whether or not the ArenaAllocator should bypass the JIT |
| 14 | // host when allocating memory for arena pages. |
| 15 | // |
| 16 | // Return Value: |
| 17 | // True if the JIT should bypass the JIT host; false otherwise. |
| 18 | bool ArenaAllocator::bypassHostAllocator() |
| 19 | { |
| 20 | #if defined(DEBUG) |
| 21 | // When JitDirectAlloc is set, all JIT allocations requests are forwarded |
| 22 | // directly to the OS. This allows taking advantage of pageheap and other gflag |
| 23 | // knobs for ensuring that we do not have buffer overruns in the JIT. |
| 24 | |
| 25 | return JitConfig.JitDirectAlloc() != 0; |
| 26 | #else // defined(DEBUG) |
| 27 | return false; |
| 28 | #endif // !defined(DEBUG) |
| 29 | } |
| 30 | |
| 31 | //------------------------------------------------------------------------ |
| 32 | // ArenaAllocator::getDefaultPageSize: |
| 33 | // Returns the default size of an arena page. |
| 34 | // |
| 35 | // Return Value: |
| 36 | // The default size of an arena page. |
| 37 | size_t ArenaAllocator::getDefaultPageSize() |
| 38 | { |
| 39 | return DEFAULT_PAGE_SIZE; |
| 40 | } |
| 41 | |
| 42 | //------------------------------------------------------------------------ |
| 43 | // ArenaAllocator::ArenaAllocator: |
| 44 | // Default-constructs an arena allocator. |
| 45 | ArenaAllocator::ArenaAllocator() |
| 46 | : m_firstPage(nullptr), m_lastPage(nullptr), m_nextFreeByte(nullptr), m_lastFreeByte(nullptr) |
| 47 | { |
| 48 | #if MEASURE_MEM_ALLOC |
| 49 | memset(&m_stats, 0, sizeof(m_stats)); |
| 50 | memset(&m_statsAllocators, 0, sizeof(m_statsAllocators)); |
| 51 | #endif // MEASURE_MEM_ALLOC |
| 52 | } |
| 53 | |
| 54 | //------------------------------------------------------------------------ |
| 55 | // ArenaAllocator::allocateNewPage: |
| 56 | // Allocates a new arena page. |
| 57 | // |
| 58 | // Arguments: |
| 59 | // size - The number of bytes that were requested by the allocation |
| 60 | // that triggered this request to allocate a new arena page. |
| 61 | // |
| 62 | // Return Value: |
| 63 | // A pointer to the first usable byte of the newly allocated page. |
| 64 | void* ArenaAllocator::allocateNewPage(size_t size) |
| 65 | { |
| 66 | size_t pageSize = sizeof(PageDescriptor) + size; |
| 67 | |
| 68 | // Check for integer overflow |
| 69 | if (pageSize < size) |
| 70 | { |
| 71 | NOMEM(); |
| 72 | } |
| 73 | |
| 74 | // If the current page is now full, update a few statistics |
| 75 | if (m_lastPage != nullptr) |
| 76 | { |
| 77 | // Undo the "+=" done in allocateMemory() |
| 78 | m_nextFreeByte -= size; |
| 79 | |
| 80 | // Save the actual used size of the page |
| 81 | m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents; |
| 82 | } |
| 83 | |
| 84 | PageDescriptor* newPage = nullptr; |
| 85 | |
| 86 | if (!bypassHostAllocator()) |
| 87 | { |
| 88 | // Round to the nearest multiple of default page size |
| 89 | pageSize = roundUp(pageSize, DEFAULT_PAGE_SIZE); |
| 90 | } |
| 91 | |
| 92 | if (newPage == nullptr) |
| 93 | { |
| 94 | // Allocate the new page |
| 95 | newPage = static_cast<PageDescriptor*>(allocateHostMemory(pageSize, &pageSize)); |
| 96 | |
| 97 | if (newPage == nullptr) |
| 98 | { |
| 99 | NOMEM(); |
| 100 | } |
| 101 | } |
| 102 | |
| 103 | // Append the new page to the end of the list |
| 104 | newPage->m_next = nullptr; |
| 105 | newPage->m_pageBytes = pageSize; |
| 106 | newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated. |
| 107 | // Instead of letting it contain garbage (so to confuse us), |
| 108 | // set it to zero. |
| 109 | |
| 110 | if (m_lastPage != nullptr) |
| 111 | { |
| 112 | m_lastPage->m_next = newPage; |
| 113 | } |
| 114 | else |
| 115 | { |
| 116 | m_firstPage = newPage; |
| 117 | } |
| 118 | |
| 119 | m_lastPage = newPage; |
| 120 | |
| 121 | // Adjust the next/last free byte pointers |
| 122 | m_nextFreeByte = newPage->m_contents + size; |
| 123 | m_lastFreeByte = (BYTE*)newPage + pageSize; |
| 124 | assert((m_lastFreeByte - m_nextFreeByte) >= 0); |
| 125 | |
| 126 | return newPage->m_contents; |
| 127 | } |
| 128 | |
| 129 | //------------------------------------------------------------------------ |
| 130 | // ArenaAllocator::destroy: |
| 131 | // Performs any necessary teardown for an `ArenaAllocator`. |
| 132 | void ArenaAllocator::destroy() |
| 133 | { |
| 134 | PageDescriptor* page = m_firstPage; |
| 135 | |
| 136 | // Free all of the allocated pages |
| 137 | for (PageDescriptor* next; page != nullptr; page = next) |
| 138 | { |
| 139 | next = page->m_next; |
| 140 | freeHostMemory(page, page->m_pageBytes); |
| 141 | } |
| 142 | |
| 143 | // Clear out the allocator's fields |
| 144 | m_firstPage = nullptr; |
| 145 | m_lastPage = nullptr; |
| 146 | m_nextFreeByte = nullptr; |
| 147 | m_lastFreeByte = nullptr; |
| 148 | } |
| 149 | |
| 150 | // The debug version of the allocator may allocate directly from the |
| 151 | // OS rather than going through the hosting APIs. In order to do so, |
| 152 | // it must undef the macros that are usually in place to prevent |
| 153 | // accidental uses of the OS allocator. |
| 154 | #if defined(DEBUG) |
| 155 | #undef GetProcessHeap |
| 156 | #undef HeapAlloc |
| 157 | #undef HeapFree |
| 158 | #endif |
| 159 | |
| 160 | //------------------------------------------------------------------------ |
| 161 | // ArenaAllocator::allocateHostMemory: |
| 162 | // Allocates memory from the host (or the OS if `bypassHostAllocator()` |
| 163 | // returns `true`). |
| 164 | // |
| 165 | // Arguments: |
| 166 | // size - The number of bytes to allocate. |
| 167 | // pActualSize - The number of byte actually allocated. |
| 168 | // |
| 169 | // Return Value: |
| 170 | // A pointer to the allocated memory. |
| 171 | void* ArenaAllocator::allocateHostMemory(size_t size, size_t* pActualSize) |
| 172 | { |
| 173 | #if defined(DEBUG) |
| 174 | if (bypassHostAllocator()) |
| 175 | { |
| 176 | *pActualSize = size; |
| 177 | return ::HeapAlloc(GetProcessHeap(), 0, size); |
| 178 | } |
| 179 | #endif // !defined(DEBUG) |
| 180 | |
| 181 | return g_jitHost->allocateSlab(size, pActualSize); |
| 182 | } |
| 183 | |
| 184 | //------------------------------------------------------------------------ |
| 185 | // ArenaAllocator::freeHostMemory: |
| 186 | // Frees memory allocated by a previous call to `allocateHostMemory`. |
| 187 | // |
| 188 | // Arguments: |
| 189 | // block - A pointer to the memory to free. |
| 190 | void ArenaAllocator::freeHostMemory(void* block, size_t size) |
| 191 | { |
| 192 | #if defined(DEBUG) |
| 193 | if (bypassHostAllocator()) |
| 194 | { |
| 195 | ::HeapFree(GetProcessHeap(), 0, block); |
| 196 | return; |
| 197 | } |
| 198 | #endif // !defined(DEBUG) |
| 199 | |
| 200 | g_jitHost->freeSlab(block, size); |
| 201 | } |
| 202 | |
| 203 | //------------------------------------------------------------------------ |
| 204 | // ArenaAllocator::getTotalBytesAllocated: |
| 205 | // Gets the total number of bytes allocated for all of the arena pages |
| 206 | // for an `ArenaAllocator`. |
| 207 | // |
| 208 | // Return Value: |
| 209 | // See above. |
| 210 | size_t ArenaAllocator::getTotalBytesAllocated() |
| 211 | { |
| 212 | size_t bytes = 0; |
| 213 | for (PageDescriptor* page = m_firstPage; page != nullptr; page = page->m_next) |
| 214 | { |
| 215 | bytes += page->m_pageBytes; |
| 216 | } |
| 217 | |
| 218 | return bytes; |
| 219 | } |
| 220 | |
| 221 | //------------------------------------------------------------------------ |
| 222 | // ArenaAllocator::getTotalBytesAllocated: |
| 223 | // Gets the total number of bytes used in all of the arena pages for |
| 224 | // an `ArenaAllocator`. |
| 225 | // |
| 226 | // Return Value: |
| 227 | // See above. |
| 228 | // |
| 229 | // Notes: |
| 230 | // An arena page may have unused space at the very end. This happens |
| 231 | // when an allocation request comes in (via a call to `allocateMemory`) |
| 232 | // that will not fit in the remaining bytes for the current page. |
| 233 | // Another way to understand this method is as returning the total |
| 234 | // number of bytes allocated for arena pages minus the number of bytes |
| 235 | // that are unused across all area pages. |
| 236 | size_t ArenaAllocator::getTotalBytesUsed() |
| 237 | { |
| 238 | if (m_lastPage != nullptr) |
| 239 | { |
| 240 | m_lastPage->m_usedBytes = m_nextFreeByte - m_lastPage->m_contents; |
| 241 | } |
| 242 | |
| 243 | size_t bytes = 0; |
| 244 | for (PageDescriptor* page = m_firstPage; page != nullptr; page = page->m_next) |
| 245 | { |
| 246 | bytes += page->m_usedBytes; |
| 247 | } |
| 248 | |
| 249 | return bytes; |
| 250 | } |
| 251 | |
| 252 | #if MEASURE_MEM_ALLOC |
| 253 | CritSecObject ArenaAllocator::s_statsLock; |
| 254 | ArenaAllocator::AggregateMemStats ArenaAllocator::s_aggStats; |
| 255 | ArenaAllocator::MemStats ArenaAllocator::s_maxStats; |
| 256 | |
| 257 | const char* ArenaAllocator::MemStats::s_CompMemKindNames[] = { |
| 258 | #define CompMemKindMacro(kind) #kind, |
| 259 | #include "compmemkind.h" |
| 260 | }; |
| 261 | |
| 262 | void ArenaAllocator::MemStats::Print(FILE* f) |
| 263 | { |
| 264 | fprintf(f, "count: %10u, size: %10llu, max = %10llu\n" , allocCnt, allocSz, allocSzMax); |
| 265 | fprintf(f, "allocateMemory: %10llu, nraUsed: %10llu\n" , nraTotalSizeAlloc, nraTotalSizeUsed); |
| 266 | PrintByKind(f); |
| 267 | } |
| 268 | |
| 269 | void ArenaAllocator::MemStats::PrintByKind(FILE* f) |
| 270 | { |
| 271 | fprintf(f, "\nAlloc'd bytes by kind:\n %20s | %10s | %7s\n" , "kind" , "size" , "pct" ); |
| 272 | fprintf(f, " %20s-+-%10s-+-%7s\n" , "--------------------" , "----------" , "-------" ); |
| 273 | float allocSzF = static_cast<float>(allocSz); |
| 274 | for (int cmk = 0; cmk < CMK_Count; cmk++) |
| 275 | { |
| 276 | float pct = 100.0f * static_cast<float>(allocSzByKind[cmk]) / allocSzF; |
| 277 | fprintf(f, " %20s | %10llu | %6.2f%%\n" , s_CompMemKindNames[cmk], allocSzByKind[cmk], pct); |
| 278 | } |
| 279 | fprintf(f, "\n" ); |
| 280 | } |
| 281 | |
| 282 | void ArenaAllocator::AggregateMemStats::Print(FILE* f) |
| 283 | { |
| 284 | fprintf(f, "For %9u methods:\n" , nMethods); |
| 285 | if (nMethods == 0) |
| 286 | { |
| 287 | return; |
| 288 | } |
| 289 | fprintf(f, " count: %12u (avg %7u per method)\n" , allocCnt, allocCnt / nMethods); |
| 290 | fprintf(f, " alloc size : %12llu (avg %7llu per method)\n" , allocSz, allocSz / nMethods); |
| 291 | fprintf(f, " max alloc : %12llu\n" , allocSzMax); |
| 292 | fprintf(f, "\n" ); |
| 293 | fprintf(f, " allocateMemory : %12llu (avg %7llu per method)\n" , nraTotalSizeAlloc, nraTotalSizeAlloc / nMethods); |
| 294 | fprintf(f, " nraUsed : %12llu (avg %7llu per method)\n" , nraTotalSizeUsed, nraTotalSizeUsed / nMethods); |
| 295 | PrintByKind(f); |
| 296 | } |
| 297 | |
| 298 | ArenaAllocator::MemStatsAllocator* ArenaAllocator::getMemStatsAllocator(CompMemKind kind) |
| 299 | { |
| 300 | assert(kind < CMK_Count); |
| 301 | |
| 302 | if (m_statsAllocators[kind].m_arena == nullptr) |
| 303 | { |
| 304 | m_statsAllocators[kind].m_arena = this; |
| 305 | m_statsAllocators[kind].m_kind = kind; |
| 306 | } |
| 307 | |
| 308 | return &m_statsAllocators[kind]; |
| 309 | } |
| 310 | |
| 311 | void ArenaAllocator::finishMemStats() |
| 312 | { |
| 313 | m_stats.nraTotalSizeAlloc = getTotalBytesAllocated(); |
| 314 | m_stats.nraTotalSizeUsed = getTotalBytesUsed(); |
| 315 | |
| 316 | CritSecHolder statsLock(s_statsLock); |
| 317 | s_aggStats.Add(m_stats); |
| 318 | if (m_stats.allocSz > s_maxStats.allocSz) |
| 319 | { |
| 320 | s_maxStats = m_stats; |
| 321 | } |
| 322 | } |
| 323 | |
| 324 | void ArenaAllocator::dumpMemStats(FILE* file) |
| 325 | { |
| 326 | m_stats.Print(file); |
| 327 | } |
| 328 | |
| 329 | void ArenaAllocator::dumpAggregateMemStats(FILE* file) |
| 330 | { |
| 331 | s_aggStats.Print(file); |
| 332 | } |
| 333 | |
| 334 | void ArenaAllocator::dumpMaxMemStats(FILE* file) |
| 335 | { |
| 336 | s_maxStats.Print(file); |
| 337 | } |
| 338 | #endif // MEASURE_MEM_ALLOC |
| 339 | |