1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | #ifndef _ALLOC_H_ |
6 | #define _ALLOC_H_ |
7 | |
8 | #if !defined(_HOST_H_) |
9 | #include "host.h" |
10 | #endif // defined(_HOST_H_) |
11 | |
12 | // CompMemKind values are used to tag memory allocations performed via |
13 | // the compiler's allocator so that the memory usage of various compiler |
14 | // components can be tracked separately (when MEASURE_MEM_ALLOC is defined). |
15 | |
16 | enum CompMemKind |
17 | { |
18 | #define CompMemKindMacro(kind) CMK_##kind, |
19 | #include "compmemkind.h" |
20 | CMK_Count |
21 | }; |
22 | |
23 | class ArenaAllocator |
24 | { |
25 | private: |
26 | ArenaAllocator(const ArenaAllocator& other) = delete; |
27 | ArenaAllocator& operator=(const ArenaAllocator& other) = delete; |
28 | ArenaAllocator& operator=(ArenaAllocator&& other) = delete; |
29 | |
30 | struct PageDescriptor |
31 | { |
32 | PageDescriptor* m_next; |
33 | |
34 | size_t m_pageBytes; // # of bytes allocated |
35 | size_t m_usedBytes; // # of bytes actually used. (This is only valid when we've allocated a new page.) |
36 | // See ArenaAllocator::allocateNewPage. |
37 | |
38 | BYTE m_contents[]; |
39 | }; |
40 | |
41 | enum |
42 | { |
43 | DEFAULT_PAGE_SIZE = 0x10000, |
44 | }; |
45 | |
46 | PageDescriptor* m_firstPage; |
47 | PageDescriptor* m_lastPage; |
48 | |
49 | // These two pointers (when non-null) will always point into 'm_lastPage'. |
50 | BYTE* m_nextFreeByte; |
51 | BYTE* m_lastFreeByte; |
52 | |
53 | void* allocateNewPage(size_t size); |
54 | |
55 | static void* allocateHostMemory(size_t size, size_t* pActualSize); |
56 | static void freeHostMemory(void* block, size_t size); |
57 | |
58 | #if MEASURE_MEM_ALLOC |
59 | struct MemStats |
60 | { |
61 | unsigned allocCnt; // # of allocs |
62 | UINT64 allocSz; // total size of those alloc. |
63 | UINT64 allocSzMax; // Maximum single allocation. |
64 | UINT64 allocSzByKind[CMK_Count]; // Classified by "kind". |
65 | UINT64 nraTotalSizeAlloc; |
66 | UINT64 nraTotalSizeUsed; |
67 | |
68 | static const char* s_CompMemKindNames[]; // Names of the kinds. |
69 | |
70 | void AddAlloc(size_t sz, CompMemKind cmk) |
71 | { |
72 | allocCnt += 1; |
73 | allocSz += sz; |
74 | if (sz > allocSzMax) |
75 | { |
76 | allocSzMax = sz; |
77 | } |
78 | allocSzByKind[cmk] += sz; |
79 | } |
80 | |
81 | void Print(FILE* f); // Print these stats to file. |
82 | void PrintByKind(FILE* f); // Do just the by-kind histogram part. |
83 | }; |
84 | |
85 | struct AggregateMemStats : public MemStats |
86 | { |
87 | unsigned nMethods; |
88 | |
89 | void Add(const MemStats& ms) |
90 | { |
91 | nMethods++; |
92 | allocCnt += ms.allocCnt; |
93 | allocSz += ms.allocSz; |
94 | allocSzMax = max(allocSzMax, ms.allocSzMax); |
95 | for (int i = 0; i < CMK_Count; i++) |
96 | { |
97 | allocSzByKind[i] += ms.allocSzByKind[i]; |
98 | } |
99 | nraTotalSizeAlloc += ms.nraTotalSizeAlloc; |
100 | nraTotalSizeUsed += ms.nraTotalSizeUsed; |
101 | } |
102 | |
103 | void Print(FILE* f); // Print these stats to file. |
104 | }; |
105 | |
106 | public: |
107 | struct MemStatsAllocator |
108 | { |
109 | ArenaAllocator* m_arena; |
110 | CompMemKind m_kind; |
111 | |
112 | void* allocateMemory(size_t sz) |
113 | { |
114 | m_arena->m_stats.AddAlloc(sz, m_kind); |
115 | return m_arena->allocateMemory(sz); |
116 | } |
117 | }; |
118 | |
119 | private: |
120 | static CritSecObject s_statsLock; // This lock protects the data structures below. |
121 | static MemStats s_maxStats; // Stats for the allocator with the largest amount allocated. |
122 | static AggregateMemStats s_aggStats; // Aggregates statistics for all allocators. |
123 | |
124 | MemStats m_stats; |
125 | MemStatsAllocator m_statsAllocators[CMK_Count]; |
126 | |
127 | public: |
128 | MemStatsAllocator* getMemStatsAllocator(CompMemKind kind); |
129 | void finishMemStats(); |
130 | void dumpMemStats(FILE* file); |
131 | |
132 | static void dumpMaxMemStats(FILE* file); |
133 | static void dumpAggregateMemStats(FILE* file); |
134 | #endif // MEASURE_MEM_ALLOC |
135 | |
136 | public: |
137 | ArenaAllocator(); |
138 | |
139 | // NOTE: it would be nice to have a destructor on this type to ensure that any value that |
140 | // goes out of scope is either uninitialized or has been torn down via a call to |
141 | // destroy(), but this interacts badly in methods that use SEH. #3058 tracks |
142 | // revisiting EH in the JIT; such a destructor could be added if SEH is removed |
143 | // as part of that work. |
144 | |
145 | void destroy(); |
146 | |
147 | inline void* allocateMemory(size_t sz); |
148 | |
149 | size_t getTotalBytesAllocated(); |
150 | size_t getTotalBytesUsed(); |
151 | |
152 | static bool bypassHostAllocator(); |
153 | static size_t getDefaultPageSize(); |
154 | }; |
155 | |
156 | //------------------------------------------------------------------------ |
157 | // ArenaAllocator::allocateMemory: |
158 | // Allocates memory using an `ArenaAllocator`. |
159 | // |
160 | // Arguments: |
161 | // size - The number of bytes to allocate. |
162 | // |
163 | // Return Value: |
164 | // A pointer to the allocated memory. |
165 | // |
166 | // Note: |
167 | // The DEBUG version of the method has some abilities that the release |
168 | // version does not: it may inject faults into the allocator and |
169 | // seeds all allocations with a specified pattern to help catch |
170 | // use-before-init problems. |
171 | // |
172 | inline void* ArenaAllocator::allocateMemory(size_t size) |
173 | { |
174 | assert(size != 0); |
175 | |
176 | // Ensure that we always allocate in pointer sized increments. |
177 | size = roundUp(size, sizeof(size_t)); |
178 | |
179 | #if defined(DEBUG) |
180 | if (JitConfig.ShouldInjectFault() != 0) |
181 | { |
182 | // Force the underlying memory allocator (either the OS or the CLR hoster) |
183 | // to allocate the memory. Any fault injection will kick in. |
184 | void* p = ClrAllocInProcessHeap(0, S_SIZE_T(1)); |
185 | if (p != nullptr) |
186 | { |
187 | ClrFreeInProcessHeap(0, p); |
188 | } |
189 | else |
190 | { |
191 | NOMEM(); // Throw! |
192 | } |
193 | } |
194 | #endif |
195 | |
196 | void* block = m_nextFreeByte; |
197 | m_nextFreeByte += size; |
198 | |
199 | if (m_nextFreeByte > m_lastFreeByte) |
200 | { |
201 | block = allocateNewPage(size); |
202 | } |
203 | |
204 | #if defined(DEBUG) |
205 | memset(block, UninitializedWord<char>(nullptr), size); |
206 | #endif |
207 | |
208 | return block; |
209 | } |
210 | |
211 | // Allows general purpose code (e.g. collection classes) to allocate |
212 | // memory of a pre-determined kind via an arena allocator. |
213 | |
214 | class CompAllocator |
215 | { |
216 | #if MEASURE_MEM_ALLOC |
217 | ArenaAllocator::MemStatsAllocator* m_arena; |
218 | #else |
219 | ArenaAllocator* m_arena; |
220 | #endif |
221 | |
222 | public: |
223 | CompAllocator(ArenaAllocator* arena, CompMemKind cmk) |
224 | #if MEASURE_MEM_ALLOC |
225 | : m_arena(arena->getMemStatsAllocator(cmk)) |
226 | #else |
227 | : m_arena(arena) |
228 | #endif |
229 | { |
230 | } |
231 | |
232 | // Allocate a block of memory suitable to store `count` objects of type `T`. |
233 | // Zero-length allocations are not allowed. |
234 | template <typename T> |
235 | T* allocate(size_t count) |
236 | { |
237 | // Ensure that count * sizeof(T) does not overflow. |
238 | if (count > (SIZE_MAX / sizeof(T))) |
239 | { |
240 | NOMEM(); |
241 | } |
242 | |
243 | void* p = m_arena->allocateMemory(count * sizeof(T)); |
244 | |
245 | // Ensure that the allocator returned sizeof(size_t) aligned memory. |
246 | assert((size_t(p) & (sizeof(size_t) - 1)) == 0); |
247 | |
248 | return static_cast<T*>(p); |
249 | } |
250 | |
251 | // Deallocate a block of memory previously allocated by `allocate`. |
252 | // The arena allocator does not release memory so this doesn't do anything. |
253 | void deallocate(void* p) |
254 | { |
255 | } |
256 | }; |
257 | |
258 | // Global operator new overloads that work with CompAllocator |
259 | |
260 | inline void* __cdecl operator new(size_t n, CompAllocator alloc) |
261 | { |
262 | return alloc.allocate<char>(n); |
263 | } |
264 | |
265 | inline void* __cdecl operator new[](size_t n, CompAllocator alloc) |
266 | { |
267 | return alloc.allocate<char>(n); |
268 | } |
269 | |
270 | // A CompAllocator wrapper that implements IAllocator and allows zero-length |
271 | // memory allocations (the arena allocator does not support zero-length |
272 | // allocation). |
273 | |
274 | class CompIAllocator : public IAllocator |
275 | { |
276 | CompAllocator m_alloc; |
277 | char m_zeroLenAllocTarg; |
278 | |
279 | public: |
280 | CompIAllocator(CompAllocator alloc) : m_alloc(alloc) |
281 | { |
282 | } |
283 | |
284 | // Allocates a block of memory at least `sz` in size. |
285 | virtual void* Alloc(size_t sz) override |
286 | { |
287 | if (sz == 0) |
288 | { |
289 | return &m_zeroLenAllocTarg; |
290 | } |
291 | else |
292 | { |
293 | return m_alloc.allocate<char>(sz); |
294 | } |
295 | } |
296 | |
297 | // Allocates a block of memory at least `elems * elemSize` in size. |
298 | virtual void* ArrayAlloc(size_t elems, size_t elemSize) override |
299 | { |
300 | if ((elems == 0) || (elemSize == 0)) |
301 | { |
302 | return &m_zeroLenAllocTarg; |
303 | } |
304 | else |
305 | { |
306 | // Ensure that elems * elemSize does not overflow. |
307 | if (elems > (SIZE_MAX / elemSize)) |
308 | { |
309 | NOMEM(); |
310 | } |
311 | |
312 | return m_alloc.allocate<char>(elems * elemSize); |
313 | } |
314 | } |
315 | |
316 | // Frees the block of memory pointed to by p. |
317 | virtual void Free(void* p) override |
318 | { |
319 | m_alloc.deallocate(p); |
320 | } |
321 | }; |
322 | |
323 | #endif // _ALLOC_H_ |
324 | |