1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /*++ |
6 | |
7 | |
8 | |
9 | Module Name: |
10 | |
11 | virtual.cpp |
12 | |
13 | Abstract: |
14 | |
15 | Implementation of virtual memory management functions. |
16 | |
17 | |
18 | |
19 | --*/ |
20 | |
21 | #include "pal/dbgmsg.h" |
22 | |
23 | SET_DEFAULT_DEBUG_CHANNEL(VIRTUAL); // some headers have code with asserts, so do this first |
24 | |
25 | #include "pal/thread.hpp" |
26 | #include "pal/cs.hpp" |
27 | #include "pal/malloc.hpp" |
28 | #include "pal/file.hpp" |
29 | #include "pal/seh.hpp" |
30 | #include "pal/virtual.h" |
31 | #include "pal/map.h" |
32 | #include "pal/init.h" |
33 | #include "pal/utils.h" |
34 | #include "common.h" |
35 | |
36 | #include <sys/types.h> |
37 | #include <sys/mman.h> |
38 | #include <errno.h> |
39 | #include <string.h> |
40 | #include <unistd.h> |
41 | #include <limits.h> |
42 | |
43 | #if HAVE_VM_ALLOCATE |
44 | #include <mach/vm_map.h> |
45 | #include <mach/mach_init.h> |
46 | #endif // HAVE_VM_ALLOCATE |
47 | |
48 | using namespace CorUnix; |
49 | |
50 | CRITICAL_SECTION virtual_critsec; |
51 | |
52 | // The first node in our list of allocated blocks. |
53 | static PCMI pVirtualMemory; |
54 | |
55 | static size_t s_virtualPageSize = 0; |
56 | |
57 | /* We need MAP_ANON. However on some platforms like HP-UX, it is defined as MAP_ANONYMOUS */ |
58 | #if !defined(MAP_ANON) && defined(MAP_ANONYMOUS) |
59 | #define MAP_ANON MAP_ANONYMOUS |
60 | #endif |
61 | |
62 | /*++ |
63 | Function: |
64 | ReserveVirtualMemory() |
65 | |
66 | Helper function that is used by Virtual* APIs and ExecutableMemoryAllocator |
67 | to reserve virtual memory from the OS. |
68 | |
69 | --*/ |
70 | static LPVOID ReserveVirtualMemory( |
71 | IN CPalThread *pthrCurrent, /* Currently executing thread */ |
72 | IN LPVOID lpAddress, /* Region to reserve or commit */ |
73 | IN SIZE_T dwSize); /* Size of Region */ |
74 | |
75 | |
76 | // A memory allocator that allocates memory from a pre-reserved region |
77 | // of virtual memory that is located near the CoreCLR library. |
78 | static ExecutableMemoryAllocator g_executableMemoryAllocator; |
79 | |
80 | // |
81 | // |
82 | // Virtual Memory Logging |
83 | // |
84 | // We maintain a lightweight in-memory circular buffer recording virtual |
85 | // memory operations so that we can better diagnose failures and crashes |
86 | // caused by one of these operations mishandling memory in some way. |
87 | // |
88 | // |
89 | namespace VirtualMemoryLogging |
90 | { |
91 | // Specifies the operation being logged |
92 | enum class VirtualOperation |
93 | { |
94 | Allocate = 0x10, |
95 | Reserve = 0x20, |
96 | Commit = 0x30, |
97 | Decommit = 0x40, |
98 | Release = 0x50, |
99 | Reset = 0x60, |
100 | ReserveFromExecutableMemoryAllocatorWithinRange = 0x70 |
101 | }; |
102 | |
103 | // Indicates that the attempted operation has failed |
104 | const DWORD FailedOperationMarker = 0x80000000; |
105 | |
106 | // An entry in the in-memory log |
107 | struct LogRecord |
108 | { |
109 | LONG RecordId; |
110 | DWORD Operation; |
111 | LPVOID CurrentThread; |
112 | LPVOID RequestedAddress; |
113 | LPVOID ReturnedAddress; |
114 | SIZE_T Size; |
115 | DWORD AllocationType; |
116 | DWORD Protect; |
117 | }; |
118 | |
119 | // Maximum number of records in the in-memory log |
120 | const LONG MaxRecords = 128; |
121 | |
122 | // Buffer used to store the logged data |
123 | volatile LogRecord logRecords[MaxRecords]; |
124 | |
125 | // Current record number. Use (recordNumber % MaxRecords) to determine |
126 | // the current position in the circular buffer. |
127 | volatile LONG recordNumber = 0; |
128 | |
129 | // Record an entry in the in-memory log |
130 | void LogVaOperation( |
131 | IN VirtualOperation operation, |
132 | IN LPVOID requestedAddress, |
133 | IN SIZE_T size, |
134 | IN DWORD flAllocationType, |
135 | IN DWORD flProtect, |
136 | IN LPVOID returnedAddress, |
137 | IN BOOL result) |
138 | { |
139 | LONG i = InterlockedIncrement(&recordNumber) - 1; |
140 | LogRecord* curRec = (LogRecord*)&logRecords[i % MaxRecords]; |
141 | |
142 | curRec->RecordId = i; |
143 | curRec->CurrentThread = (LPVOID)pthread_self(); |
144 | curRec->RequestedAddress = requestedAddress; |
145 | curRec->ReturnedAddress = returnedAddress; |
146 | curRec->Size = size; |
147 | curRec->AllocationType = flAllocationType; |
148 | curRec->Protect = flProtect; |
149 | curRec->Operation = static_cast<DWORD>(operation) | (result ? 0 : FailedOperationMarker); |
150 | } |
151 | } |
152 | |
153 | /*++ |
154 | Function: |
155 | VIRTUALInitialize() |
156 | |
157 | Initializes this section's critical section. |
158 | |
159 | Return value: |
160 | TRUE if initialization succeeded |
161 | FALSE otherwise. |
162 | |
163 | --*/ |
164 | extern "C" |
165 | BOOL |
166 | VIRTUALInitialize(bool initializeExecutableMemoryAllocator) |
167 | { |
168 | s_virtualPageSize = getpagesize(); |
169 | |
170 | TRACE("Initializing the Virtual Critical Sections. \n" ); |
171 | |
172 | InternalInitializeCriticalSection(&virtual_critsec); |
173 | |
174 | pVirtualMemory = NULL; |
175 | |
176 | if (initializeExecutableMemoryAllocator) |
177 | { |
178 | g_executableMemoryAllocator.Initialize(); |
179 | } |
180 | |
181 | return TRUE; |
182 | } |
183 | |
184 | /*** |
185 | * |
186 | * VIRTUALCleanup() |
187 | * Deletes this section's critical section. |
188 | * |
189 | */ |
190 | extern "C" |
191 | void VIRTUALCleanup() |
192 | { |
193 | PCMI pEntry; |
194 | PCMI pTempEntry; |
195 | CPalThread * pthrCurrent = InternalGetCurrentThread(); |
196 | |
197 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
198 | |
199 | // Clean up the allocated memory. |
200 | pEntry = pVirtualMemory; |
201 | while ( pEntry ) |
202 | { |
203 | WARN( "The memory at %d was not freed through a call to VirtualFree.\n" , |
204 | pEntry->startBoundary ); |
205 | free(pEntry->pAllocState); |
206 | free(pEntry->pProtectionState ); |
207 | pTempEntry = pEntry; |
208 | pEntry = pEntry->pNext; |
209 | free(pTempEntry ); |
210 | } |
211 | pVirtualMemory = NULL; |
212 | |
213 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
214 | |
215 | TRACE( "Deleting the Virtual Critical Sections. \n" ); |
216 | DeleteCriticalSection( &virtual_critsec ); |
217 | } |
218 | |
219 | /*** |
220 | * |
221 | * VIRTUALContainsInvalidProtectionFlags() |
222 | * Returns TRUE if an invalid flag is specified. FALSE otherwise. |
223 | */ |
224 | static BOOL VIRTUALContainsInvalidProtectionFlags( IN DWORD flProtect ) |
225 | { |
226 | if ( ( flProtect & ~( PAGE_NOACCESS | PAGE_READONLY | |
227 | PAGE_READWRITE | PAGE_EXECUTE | PAGE_EXECUTE_READ | |
228 | PAGE_EXECUTE_READWRITE ) ) != 0 ) |
229 | { |
230 | return TRUE; |
231 | } |
232 | else |
233 | { |
234 | return FALSE; |
235 | } |
236 | } |
237 | |
238 | |
239 | /**** |
240 | * |
241 | * VIRTUALIsPageCommitted |
242 | * |
243 | * SIZE_T nBitToRetrieve - Which page to check. |
244 | * |
245 | * Returns TRUE if committed, FALSE otherwise. |
246 | * |
247 | */ |
248 | static BOOL VIRTUALIsPageCommitted( SIZE_T nBitToRetrieve, CONST PCMI pInformation ) |
249 | { |
250 | SIZE_T nByteOffset = 0; |
251 | UINT nBitOffset = 0; |
252 | UINT byteMask = 0; |
253 | |
254 | if ( !pInformation ) |
255 | { |
256 | ERROR( "pInformation was NULL!\n" ); |
257 | return FALSE; |
258 | } |
259 | |
260 | nByteOffset = nBitToRetrieve / CHAR_BIT; |
261 | nBitOffset = nBitToRetrieve % CHAR_BIT; |
262 | |
263 | byteMask = 1 << nBitOffset; |
264 | |
265 | if ( pInformation->pAllocState[ nByteOffset ] & byteMask ) |
266 | { |
267 | return TRUE; |
268 | } |
269 | else |
270 | { |
271 | return FALSE; |
272 | } |
273 | } |
274 | |
275 | /********* |
276 | * |
277 | * VIRTUALGetAllocationType |
278 | * |
279 | * IN SIZE_T Index - The page within the range to retrieve |
280 | * the state for. |
281 | * |
282 | * IN pInformation - The virtual memory object. |
283 | * |
284 | */ |
285 | static INT VIRTUALGetAllocationType( SIZE_T Index, CONST PCMI pInformation ) |
286 | { |
287 | if ( VIRTUALIsPageCommitted( Index, pInformation ) ) |
288 | { |
289 | return MEM_COMMIT; |
290 | } |
291 | else |
292 | { |
293 | return MEM_RESERVE; |
294 | } |
295 | } |
296 | |
297 | /**** |
298 | * |
299 | * VIRTUALSetPageBits |
300 | * |
301 | * IN UINT nStatus - Bit set / reset [0: reset, any other value: set]. |
302 | * IN SIZE_T nStartingBit - The bit to set. |
303 | * |
304 | * IN SIZE_T nNumberOfBits - The range of bits to set. |
305 | * IN BYTE* pBitArray - A pointer the array to be manipulated. |
306 | * |
307 | * Returns TRUE on success, FALSE otherwise. |
308 | * Turn on/off memory status bits. |
309 | * |
310 | */ |
311 | static BOOL VIRTUALSetPageBits ( UINT nStatus, SIZE_T nStartingBit, |
312 | SIZE_T nNumberOfBits, BYTE * pBitArray ) |
313 | { |
314 | /* byte masks for optimized modification of partial bytes (changing less |
315 | than 8 bits in a single byte). note that bits are treated in little |
316 | endian order : value 1 is bit 0; value 128 is bit 7. in the binary |
317 | representations below, bit 0 is on the right */ |
318 | |
319 | /* start masks : for modifying bits >= n while preserving bits < n. |
320 | example : if nStartignBit%8 is 3, then bits 0, 1, 2 remain unchanged |
321 | while bits 3..7 are changed; startmasks[3] can be used for this. */ |
322 | static const BYTE startmasks[8] = { |
323 | 0xff, /* start at 0 : 1111 1111 */ |
324 | 0xfe, /* start at 1 : 1111 1110 */ |
325 | 0xfc, /* start at 2 : 1111 1100 */ |
326 | 0xf8, /* start at 3 : 1111 1000 */ |
327 | 0xf0, /* start at 4 : 1111 0000 */ |
328 | 0xe0, /* start at 5 : 1110 0000 */ |
329 | 0xc0, /* start at 6 : 1100 0000 */ |
330 | 0x80 /* start at 7 : 1000 0000 */ |
331 | }; |
332 | |
333 | /* end masks : for modifying bits <= n while preserving bits > n. |
334 | example : if the last bit to change is 5, then bits 6 & 7 stay unchanged |
335 | while bits 1..5 are changed; endmasks[5] can be used for this. */ |
336 | static const BYTE endmasks[8] = { |
337 | 0x01, /* end at 0 : 0000 0001 */ |
338 | 0x03, /* end at 1 : 0000 0011 */ |
339 | 0x07, /* end at 2 : 0000 0111 */ |
340 | 0x0f, /* end at 3 : 0000 1111 */ |
341 | 0x1f, /* end at 4 : 0001 1111 */ |
342 | 0x3f, /* end at 5 : 0011 1111 */ |
343 | 0x7f, /* end at 6 : 0111 1111 */ |
344 | 0xff /* end at 7 : 1111 1111 */ |
345 | }; |
346 | /* last example : if only the middle of a byte must be changed, both start |
347 | and end masks can be combined (bitwise AND) to obtain the correct mask. |
348 | if we want to change bits 2 to 4 : |
349 | startmasks[2] : 0xfc 1111 1100 (change 2,3,4,5,6,7) |
350 | endmasks[4]: 0x1f 0001 1111 (change 0,1,2,3,4) |
351 | bitwise AND : 0x1c 0001 1100 (change 2,3,4) |
352 | */ |
353 | |
354 | BYTE byte_mask; |
355 | SIZE_T nLastBit; |
356 | SIZE_T nFirstByte; |
357 | SIZE_T nLastByte; |
358 | SIZE_T nFullBytes; |
359 | |
360 | TRACE( "VIRTUALSetPageBits( nStatus = %d, nStartingBit = %d, " |
361 | "nNumberOfBits = %d, pBitArray = 0x%p )\n" , |
362 | nStatus, nStartingBit, nNumberOfBits, pBitArray ); |
363 | |
364 | if ( 0 == nNumberOfBits ) |
365 | { |
366 | ERROR( "nNumberOfBits was 0!\n" ); |
367 | return FALSE; |
368 | } |
369 | |
370 | nLastBit = nStartingBit+nNumberOfBits-1; |
371 | nFirstByte = nStartingBit / 8; |
372 | nLastByte = nLastBit / 8; |
373 | |
374 | /* handle partial first byte (if any) */ |
375 | if(0 != (nStartingBit % 8)) |
376 | { |
377 | byte_mask = startmasks[nStartingBit % 8]; |
378 | |
379 | /* if 1st byte is the only changing byte, combine endmask to preserve |
380 | trailing bits (see 3rd example above) */ |
381 | if( nLastByte == nFirstByte) |
382 | { |
383 | byte_mask &= endmasks[nLastBit % 8]; |
384 | } |
385 | |
386 | /* byte_mask contains 1 for bits to change, 0 for bits to leave alone */ |
387 | if(0 == nStatus) |
388 | { |
389 | /* bits to change must be set to 0 : invert byte_mask (giving 0 for |
390 | bits to change), use bitwise AND */ |
391 | pBitArray[nFirstByte] &= ~byte_mask; |
392 | } |
393 | else |
394 | { |
395 | /* bits to change must be set to 1 : use bitwise OR */ |
396 | pBitArray[nFirstByte] |= byte_mask; |
397 | } |
398 | |
399 | /* stop right away if only 1 byte is being modified */ |
400 | if(nLastByte == nFirstByte) |
401 | { |
402 | return TRUE; |
403 | } |
404 | |
405 | /* we're done with the 1st byte; skip over it */ |
406 | nFirstByte++; |
407 | } |
408 | |
409 | /* number of bytes to change, excluding the last byte (handled separately)*/ |
410 | nFullBytes = nLastByte - nFirstByte; |
411 | |
412 | if(0 != nFullBytes) |
413 | { |
414 | // Turn off/on dirty bits |
415 | memset( &(pBitArray[nFirstByte]), (0 == nStatus) ? 0 : 0xFF, nFullBytes ); |
416 | } |
417 | |
418 | /* handle last (possibly partial) byte */ |
419 | byte_mask = endmasks[nLastBit % 8]; |
420 | |
421 | /* byte_mask contains 1 for bits to change, 0 for bits to leave alone */ |
422 | if(0 == nStatus) |
423 | { |
424 | /* bits to change must be set to 0 : invert byte_mask (giving 0 for |
425 | bits to change), use bitwise AND */ |
426 | pBitArray[nLastByte] &= ~byte_mask; |
427 | } |
428 | else |
429 | { |
430 | /* bits to change must be set to 1 : use bitwise OR */ |
431 | pBitArray[nLastByte] |= byte_mask; |
432 | } |
433 | |
434 | return TRUE; |
435 | } |
436 | |
437 | /**** |
438 | * |
439 | * VIRTUALSetAllocState |
440 | * |
441 | * IN UINT nAction - Which action to perform. |
442 | * IN SIZE_T nStartingBit - The bit to set. |
443 | * |
444 | * IN SIZE_T nNumberOfBits - The range of bits to set. |
445 | * IN PCMI pStateArray - A pointer the array to be manipulated. |
446 | * |
447 | * Returns TRUE on success, FALSE otherwise. |
448 | * Turn bit on to indicate committed, turn bit off to indicate reserved. |
449 | * |
450 | */ |
451 | static BOOL VIRTUALSetAllocState( UINT nAction, SIZE_T nStartingBit, |
452 | SIZE_T nNumberOfBits, CONST PCMI pInformation ) |
453 | { |
454 | TRACE( "VIRTUALSetAllocState( nAction = %d, nStartingBit = %d, " |
455 | "nNumberOfBits = %d, pStateArray = 0x%p )\n" , |
456 | nAction, nStartingBit, nNumberOfBits, pInformation ); |
457 | |
458 | if ( !pInformation ) |
459 | { |
460 | ERROR( "pInformation was invalid!\n" ); |
461 | return FALSE; |
462 | } |
463 | |
464 | return VIRTUALSetPageBits((MEM_COMMIT == nAction) ? 1 : 0, nStartingBit, |
465 | nNumberOfBits, pInformation->pAllocState); |
466 | } |
467 | |
468 | /**** |
469 | * |
470 | * VIRTUALFindRegionInformation( ) |
471 | * |
472 | * IN UINT_PTR address - The address to look for. |
473 | * |
474 | * Returns the PCMI if found, NULL otherwise. |
475 | */ |
476 | static PCMI VIRTUALFindRegionInformation( IN UINT_PTR address ) |
477 | { |
478 | PCMI pEntry = NULL; |
479 | |
480 | TRACE( "VIRTUALFindRegionInformation( %#x )\n" , address ); |
481 | |
482 | pEntry = pVirtualMemory; |
483 | |
484 | while( pEntry ) |
485 | { |
486 | if ( pEntry->startBoundary > address ) |
487 | { |
488 | /* Gone past the possible location in the list. */ |
489 | pEntry = NULL; |
490 | break; |
491 | } |
492 | if ( pEntry->startBoundary + pEntry->memSize > address ) |
493 | { |
494 | break; |
495 | } |
496 | |
497 | pEntry = pEntry->pNext; |
498 | } |
499 | return pEntry; |
500 | } |
501 | |
502 | /*++ |
503 | Function : |
504 | |
505 | VIRTUALReleaseMemory |
506 | |
507 | Removes a PCMI entry from the list. |
508 | |
509 | Returns true on success. FALSE otherwise. |
510 | --*/ |
511 | static BOOL VIRTUALReleaseMemory( PCMI pMemoryToBeReleased ) |
512 | { |
513 | BOOL bRetVal = TRUE; |
514 | |
515 | if ( !pMemoryToBeReleased ) |
516 | { |
517 | ASSERT( "Invalid pointer.\n" ); |
518 | return FALSE; |
519 | } |
520 | |
521 | if ( pMemoryToBeReleased == pVirtualMemory ) |
522 | { |
523 | /* This is either the first entry, or the only entry. */ |
524 | pVirtualMemory = pMemoryToBeReleased->pNext; |
525 | if ( pMemoryToBeReleased->pNext ) |
526 | { |
527 | pMemoryToBeReleased->pNext->pPrevious = NULL; |
528 | } |
529 | } |
530 | else /* Could be anywhere in the list. */ |
531 | { |
532 | /* Delete the entry from the linked list. */ |
533 | if ( pMemoryToBeReleased->pPrevious ) |
534 | { |
535 | pMemoryToBeReleased->pPrevious->pNext = pMemoryToBeReleased->pNext; |
536 | } |
537 | |
538 | if ( pMemoryToBeReleased->pNext ) |
539 | { |
540 | pMemoryToBeReleased->pNext->pPrevious = pMemoryToBeReleased->pPrevious; |
541 | } |
542 | } |
543 | |
544 | free( pMemoryToBeReleased->pAllocState ); |
545 | pMemoryToBeReleased->pAllocState = NULL; |
546 | |
547 | free( pMemoryToBeReleased->pProtectionState ); |
548 | pMemoryToBeReleased->pProtectionState = NULL; |
549 | |
550 | free( pMemoryToBeReleased ); |
551 | pMemoryToBeReleased = NULL; |
552 | |
553 | return bRetVal; |
554 | } |
555 | |
556 | /**** |
557 | * VIRTUALConvertWinFlags() - |
558 | * Converts win32 protection flags to |
559 | * internal VIRTUAL flags. |
560 | * |
561 | */ |
562 | static BYTE VIRTUALConvertWinFlags( IN DWORD flProtect ) |
563 | { |
564 | BYTE MemAccessControl = 0; |
565 | |
566 | switch ( flProtect & 0xff ) |
567 | { |
568 | case PAGE_NOACCESS : |
569 | MemAccessControl = VIRTUAL_NOACCESS; |
570 | break; |
571 | case PAGE_READONLY : |
572 | MemAccessControl = VIRTUAL_READONLY; |
573 | break; |
574 | case PAGE_READWRITE : |
575 | MemAccessControl = VIRTUAL_READWRITE; |
576 | break; |
577 | case PAGE_EXECUTE : |
578 | MemAccessControl = VIRTUAL_EXECUTE; |
579 | break; |
580 | case PAGE_EXECUTE_READ : |
581 | MemAccessControl = VIRTUAL_EXECUTE_READ; |
582 | break; |
583 | case PAGE_EXECUTE_READWRITE: |
584 | MemAccessControl = VIRTUAL_EXECUTE_READWRITE; |
585 | break; |
586 | |
587 | default : |
588 | MemAccessControl = 0; |
589 | ERROR( "Incorrect or no protection flags specified.\n" ); |
590 | break; |
591 | } |
592 | return MemAccessControl; |
593 | } |
594 | |
595 | /**** |
596 | * VIRTUALConvertVirtualFlags() - |
597 | * Converts internal virtual protection |
598 | * flags to their win32 counterparts. |
599 | */ |
600 | static DWORD VIRTUALConvertVirtualFlags( IN BYTE VirtualProtect ) |
601 | { |
602 | DWORD MemAccessControl = 0; |
603 | |
604 | if ( VirtualProtect == VIRTUAL_READONLY ) |
605 | { |
606 | MemAccessControl = PAGE_READONLY; |
607 | } |
608 | else if ( VirtualProtect == VIRTUAL_READWRITE ) |
609 | { |
610 | MemAccessControl = PAGE_READWRITE; |
611 | } |
612 | else if ( VirtualProtect == VIRTUAL_EXECUTE_READWRITE ) |
613 | { |
614 | MemAccessControl = PAGE_EXECUTE_READWRITE; |
615 | } |
616 | else if ( VirtualProtect == VIRTUAL_EXECUTE_READ ) |
617 | { |
618 | MemAccessControl = PAGE_EXECUTE_READ; |
619 | } |
620 | else if ( VirtualProtect == VIRTUAL_EXECUTE ) |
621 | { |
622 | MemAccessControl = PAGE_EXECUTE; |
623 | } |
624 | else if ( VirtualProtect == VIRTUAL_NOACCESS ) |
625 | { |
626 | MemAccessControl = PAGE_NOACCESS; |
627 | } |
628 | |
629 | else |
630 | { |
631 | MemAccessControl = 0; |
632 | ERROR( "Incorrect or no protection flags specified.\n" ); |
633 | } |
634 | return MemAccessControl; |
635 | } |
636 | |
637 | /*** |
638 | * Displays the linked list. |
639 | * |
640 | */ |
641 | #if defined _DEBUG |
642 | static void VIRTUALDisplayList( void ) |
643 | { |
644 | if (!DBG_ENABLED(DLI_TRACE, defdbgchan)) |
645 | return; |
646 | |
647 | PCMI p; |
648 | SIZE_T count; |
649 | SIZE_T index; |
650 | CPalThread * pthrCurrent = InternalGetCurrentThread(); |
651 | |
652 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
653 | |
654 | p = pVirtualMemory; |
655 | count = 0; |
656 | while ( p ) { |
657 | |
658 | DBGOUT( "Entry %d : \n" , count ); |
659 | DBGOUT( "\t startBoundary %#x \n" , p->startBoundary ); |
660 | DBGOUT( "\t memSize %d \n" , p->memSize ); |
661 | |
662 | DBGOUT( "\t pAllocState " ); |
663 | for ( index = 0; index < p->memSize / GetVirtualPageSize(); index++) |
664 | { |
665 | DBGOUT( "[%d] " , VIRTUALGetAllocationType( index, p ) ); |
666 | } |
667 | DBGOUT( "\t pProtectionState " ); |
668 | for ( index = 0; index < p->memSize / GetVirtualPageSize(); index++ ) |
669 | { |
670 | DBGOUT( "[%d] " , (UINT)p->pProtectionState[ index ] ); |
671 | } |
672 | DBGOUT( "\n" ); |
673 | DBGOUT( "\t accessProtection %d \n" , p->accessProtection ); |
674 | DBGOUT( "\t allocationType %d \n" , p->allocationType ); |
675 | DBGOUT( "\t pNext %p \n" , p->pNext ); |
676 | DBGOUT( "\t pLast %p \n" , p->pPrevious ); |
677 | |
678 | count++; |
679 | p = p->pNext; |
680 | } |
681 | |
682 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
683 | } |
684 | #endif |
685 | |
686 | #ifdef DEBUG |
687 | void VerifyRightEntry(PCMI pEntry) |
688 | { |
689 | volatile PCMI pRight = pEntry->pNext; |
690 | SIZE_T endAddress; |
691 | if (pRight != nullptr) |
692 | { |
693 | endAddress = ((SIZE_T)pEntry->startBoundary) + pEntry->memSize; |
694 | _ASSERTE(endAddress <= (SIZE_T)pRight->startBoundary); |
695 | } |
696 | } |
697 | |
698 | void VerifyLeftEntry(PCMI pEntry) |
699 | { |
700 | volatile PCMI pLeft = pEntry->pPrevious; |
701 | SIZE_T endAddress; |
702 | if (pLeft != NULL) |
703 | { |
704 | endAddress = ((SIZE_T)pLeft->startBoundary) + pLeft->memSize; |
705 | _ASSERTE(endAddress <= (SIZE_T)pEntry->startBoundary); |
706 | } |
707 | } |
708 | #endif // DEBUG |
709 | |
710 | /**** |
711 | * VIRTUALStoreAllocationInfo() |
712 | * |
713 | * Stores the allocation information in the linked list. |
714 | * NOTE: The caller must own the critical section. |
715 | */ |
716 | static BOOL VIRTUALStoreAllocationInfo( |
717 | IN UINT_PTR startBoundary, /* Start of the region. */ |
718 | IN SIZE_T memSize, /* Size of the region. */ |
719 | IN DWORD flAllocationType, /* Allocation Types. */ |
720 | IN DWORD flProtection ) /* Protections flags on the memory. */ |
721 | { |
722 | PCMI pNewEntry = nullptr; |
723 | PCMI pMemInfo = nullptr; |
724 | SIZE_T nBufferSize = 0; |
725 | |
726 | if (!IS_ALIGNED(memSize, GetVirtualPageSize())) |
727 | { |
728 | ERROR("The memory size was not a multiple of the page size. \n" ); |
729 | return FALSE; |
730 | } |
731 | |
732 | if (!(pNewEntry = (PCMI)InternalMalloc(sizeof(*pNewEntry)))) |
733 | { |
734 | ERROR( "Unable to allocate memory for the structure.\n" ); |
735 | return FALSE; |
736 | } |
737 | |
738 | pNewEntry->startBoundary = startBoundary; |
739 | pNewEntry->memSize = memSize; |
740 | pNewEntry->allocationType = flAllocationType; |
741 | pNewEntry->accessProtection = flProtection; |
742 | |
743 | nBufferSize = memSize / GetVirtualPageSize() / CHAR_BIT; |
744 | if ((memSize / GetVirtualPageSize()) % CHAR_BIT != 0) |
745 | { |
746 | nBufferSize++; |
747 | } |
748 | |
749 | pNewEntry->pAllocState = (BYTE*)InternalMalloc(nBufferSize); |
750 | pNewEntry->pProtectionState = (BYTE*)InternalMalloc((memSize / GetVirtualPageSize())); |
751 | |
752 | if (pNewEntry->pAllocState && pNewEntry->pProtectionState) |
753 | { |
754 | /* Set the intial allocation state, and initial allocation protection. */ |
755 | VIRTUALSetAllocState(MEM_RESERVE, 0, nBufferSize * CHAR_BIT, pNewEntry); |
756 | memset(pNewEntry->pProtectionState, |
757 | VIRTUALConvertWinFlags(flProtection), |
758 | memSize / GetVirtualPageSize()); |
759 | } |
760 | else |
761 | { |
762 | ERROR( "Unable to allocate memory for the structure.\n" ); |
763 | |
764 | if (pNewEntry->pProtectionState) free(pNewEntry->pProtectionState); |
765 | pNewEntry->pProtectionState = nullptr; |
766 | |
767 | if (pNewEntry->pAllocState) free(pNewEntry->pAllocState); |
768 | pNewEntry->pAllocState = nullptr; |
769 | |
770 | free(pNewEntry); |
771 | pNewEntry = nullptr; |
772 | |
773 | return FALSE; |
774 | } |
775 | |
776 | pMemInfo = pVirtualMemory; |
777 | |
778 | if (pMemInfo && pMemInfo->startBoundary < startBoundary) |
779 | { |
780 | /* Look for the correct insert point */ |
781 | TRACE("Looking for the correct insert location.\n" ); |
782 | while (pMemInfo->pNext && (pMemInfo->pNext->startBoundary < startBoundary)) |
783 | { |
784 | pMemInfo = pMemInfo->pNext; |
785 | } |
786 | |
787 | pNewEntry->pNext = pMemInfo->pNext; |
788 | pNewEntry->pPrevious = pMemInfo; |
789 | |
790 | if (pNewEntry->pNext) |
791 | { |
792 | pNewEntry->pNext->pPrevious = pNewEntry; |
793 | } |
794 | |
795 | pMemInfo->pNext = pNewEntry; |
796 | } |
797 | else |
798 | { |
799 | /* This is the first entry in the list. */ |
800 | pNewEntry->pNext = pMemInfo; |
801 | pNewEntry->pPrevious = nullptr; |
802 | |
803 | if (pNewEntry->pNext) |
804 | { |
805 | pNewEntry->pNext->pPrevious = pNewEntry; |
806 | } |
807 | |
808 | pVirtualMemory = pNewEntry ; |
809 | } |
810 | |
811 | #ifdef DEBUG |
812 | VerifyRightEntry(pNewEntry); |
813 | VerifyLeftEntry(pNewEntry); |
814 | #endif // DEBUG |
815 | |
816 | return TRUE; |
817 | } |
818 | |
819 | /****** |
820 | * |
821 | * VIRTUALResetMemory() - Helper function that resets the memory |
822 | * |
823 | * |
824 | */ |
825 | static LPVOID VIRTUALResetMemory( |
826 | IN CPalThread *pthrCurrent, /* Currently executing thread */ |
827 | IN LPVOID lpAddress, /* Region to reserve or commit */ |
828 | IN SIZE_T dwSize) /* Size of Region */ |
829 | { |
830 | LPVOID pRetVal = NULL; |
831 | UINT_PTR StartBoundary; |
832 | SIZE_T MemSize; |
833 | |
834 | TRACE( "Resetting the memory now..\n" ); |
835 | |
836 | StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize()); |
837 | MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary; |
838 | |
839 | int st; |
840 | #if HAVE_MADV_FREE |
841 | // Try to use MADV_FREE if supported. It tells the kernel that the application doesn't |
842 | // need the pages in the range. Freeing the pages can be delayed until a memory pressure |
843 | // occurs. |
844 | st = madvise((LPVOID)StartBoundary, MemSize, MADV_FREE); |
845 | if (st != 0) |
846 | #endif |
847 | { |
848 | // In case the MADV_FREE is not supported, use MADV_DONTNEED |
849 | st = madvise((LPVOID)StartBoundary, MemSize, MADV_DONTNEED); |
850 | } |
851 | |
852 | if (st == 0) |
853 | { |
854 | pRetVal = lpAddress; |
855 | } |
856 | |
857 | LogVaOperation( |
858 | VirtualMemoryLogging::VirtualOperation::Reset, |
859 | lpAddress, |
860 | dwSize, |
861 | 0, |
862 | 0, |
863 | pRetVal, |
864 | pRetVal != NULL); |
865 | |
866 | return pRetVal; |
867 | } |
868 | |
869 | /****** |
870 | * |
871 | * VIRTUALReserveMemory() - Helper function that actually reserves the memory. |
872 | * |
873 | * NOTE: I call SetLastError in here, because many different error states |
874 | * exists, and that would be very complicated to work around. |
875 | * |
876 | */ |
877 | static LPVOID VIRTUALReserveMemory( |
878 | IN CPalThread *pthrCurrent, /* Currently executing thread */ |
879 | IN LPVOID lpAddress, /* Region to reserve or commit */ |
880 | IN SIZE_T dwSize, /* Size of Region */ |
881 | IN DWORD flAllocationType, /* Type of allocation */ |
882 | IN DWORD flProtect) /* Type of access protection */ |
883 | { |
884 | LPVOID pRetVal = NULL; |
885 | UINT_PTR StartBoundary; |
886 | SIZE_T MemSize; |
887 | |
888 | TRACE( "Reserving the memory now..\n" ); |
889 | |
890 | // First, figure out where we're trying to reserve the memory and |
891 | // how much we need. On most systems, requests to mmap must be |
892 | // page-aligned and at multiples of the page size. Unlike on Windows, on |
893 | // Unix, the allocation granularity is the page size, so the memory size to |
894 | // reserve is not aligned to 64 KB. Nor should the start boundary need to |
895 | // to be aligned down to 64 KB, but it is expected that there are other |
896 | // components that rely on this alignment when providing a specific address |
897 | // (note that mmap itself does not make any such guarantees). |
898 | StartBoundary = (UINT_PTR)ALIGN_DOWN(lpAddress, VIRTUAL_64KB); |
899 | MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary; |
900 | |
901 | // If this is a request for special executable (JIT'ed) memory then, first of all, |
902 | // try to get memory from the executable memory allocator to satisfy the request. |
903 | if (((flAllocationType & MEM_RESERVE_EXECUTABLE) != 0) && (lpAddress == NULL)) |
904 | { |
905 | // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see |
906 | // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done |
907 | SIZE_T reservationSize = ALIGN_UP(MemSize, VIRTUAL_64KB); |
908 | pRetVal = g_executableMemoryAllocator.AllocateMemory(reservationSize); |
909 | if (pRetVal != nullptr) |
910 | { |
911 | MemSize = reservationSize; |
912 | } |
913 | } |
914 | |
915 | if (pRetVal == NULL) |
916 | { |
917 | // Try to reserve memory from the OS |
918 | pRetVal = ReserveVirtualMemory(pthrCurrent, (LPVOID)StartBoundary, MemSize); |
919 | } |
920 | |
921 | if (pRetVal != NULL) |
922 | { |
923 | if ( !lpAddress ) |
924 | { |
925 | /* Compute the real values instead of the null values. */ |
926 | StartBoundary = (UINT_PTR) ALIGN_DOWN(pRetVal, GetVirtualPageSize()); |
927 | MemSize = ALIGN_UP((UINT_PTR)pRetVal + dwSize, GetVirtualPageSize()) - StartBoundary; |
928 | } |
929 | |
930 | if ( !VIRTUALStoreAllocationInfo( StartBoundary, MemSize, |
931 | flAllocationType, flProtect ) ) |
932 | { |
933 | ASSERT( "Unable to store the structure in the list.\n" ); |
934 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
935 | munmap( pRetVal, MemSize ); |
936 | pRetVal = NULL; |
937 | } |
938 | } |
939 | |
940 | LogVaOperation( |
941 | VirtualMemoryLogging::VirtualOperation::Reserve, |
942 | lpAddress, |
943 | dwSize, |
944 | flAllocationType, |
945 | flProtect, |
946 | pRetVal, |
947 | pRetVal != NULL); |
948 | |
949 | return pRetVal; |
950 | } |
951 | |
952 | /****** |
953 | * |
954 | * ReserveVirtualMemory() - Helper function that is used by Virtual* APIs |
955 | * and ExecutableMemoryAllocator to reserve virtual memory from the OS. |
956 | * |
957 | */ |
958 | static LPVOID ReserveVirtualMemory( |
959 | IN CPalThread *pthrCurrent, /* Currently executing thread */ |
960 | IN LPVOID lpAddress, /* Region to reserve or commit */ |
961 | IN SIZE_T dwSize) /* Size of Region */ |
962 | { |
963 | UINT_PTR StartBoundary = (UINT_PTR)lpAddress; |
964 | SIZE_T MemSize = dwSize; |
965 | |
966 | TRACE( "Reserving the memory now.\n" ); |
967 | |
968 | // Most platforms will only commit memory if it is dirtied, |
969 | // so this should not consume too much swap space. |
970 | int mmapFlags = 0; |
971 | |
972 | #if HAVE_VM_ALLOCATE |
973 | // Allocate with vm_allocate first, then map at the fixed address. |
974 | int result = vm_allocate(mach_task_self(), |
975 | &StartBoundary, |
976 | MemSize, |
977 | ((LPVOID) StartBoundary != nullptr) ? FALSE : TRUE); |
978 | |
979 | if (result != KERN_SUCCESS) |
980 | { |
981 | ERROR("vm_allocate failed to allocated the requested region!\n" ); |
982 | pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS); |
983 | return nullptr; |
984 | } |
985 | |
986 | mmapFlags |= MAP_FIXED; |
987 | #endif // HAVE_VM_ALLOCATE |
988 | |
989 | mmapFlags |= MAP_ANON | MAP_PRIVATE; |
990 | |
991 | LPVOID pRetVal = mmap((LPVOID) StartBoundary, |
992 | MemSize, |
993 | PROT_NONE, |
994 | mmapFlags, |
995 | -1 /* fd */, |
996 | 0 /* offset */); |
997 | |
998 | if (pRetVal == MAP_FAILED) |
999 | { |
1000 | ERROR( "Failed due to insufficient memory.\n" ); |
1001 | |
1002 | #if HAVE_VM_ALLOCATE |
1003 | vm_deallocate(mach_task_self(), StartBoundary, MemSize); |
1004 | #endif // HAVE_VM_ALLOCATE |
1005 | |
1006 | pthrCurrent->SetLastError(ERROR_NOT_ENOUGH_MEMORY); |
1007 | return nullptr; |
1008 | } |
1009 | |
1010 | /* Check to see if the region is what we asked for. */ |
1011 | if (lpAddress != nullptr && StartBoundary != (UINT_PTR)pRetVal) |
1012 | { |
1013 | ERROR("We did not get the region we asked for from mmap!\n" ); |
1014 | pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS); |
1015 | munmap(pRetVal, MemSize); |
1016 | return nullptr; |
1017 | } |
1018 | |
1019 | #if MMAP_ANON_IGNORES_PROTECTION |
1020 | if (mprotect(pRetVal, MemSize, PROT_NONE) != 0) |
1021 | { |
1022 | ERROR("mprotect failed to protect the region!\n" ); |
1023 | pthrCurrent->SetLastError(ERROR_INVALID_ADDRESS); |
1024 | munmap(pRetVal, MemSize); |
1025 | return nullptr; |
1026 | } |
1027 | #endif // MMAP_ANON_IGNORES_PROTECTION |
1028 | |
1029 | return pRetVal; |
1030 | } |
1031 | |
1032 | /****** |
1033 | * |
1034 | * VIRTUALCommitMemory() - Helper function that actually commits the memory. |
1035 | * |
1036 | * NOTE: I call SetLastError in here, because many different error states |
1037 | * exists, and that would be very complicated to work around. |
1038 | * |
1039 | */ |
1040 | static LPVOID |
1041 | VIRTUALCommitMemory( |
1042 | IN CPalThread *pthrCurrent, /* Currently executing thread */ |
1043 | IN LPVOID lpAddress, /* Region to reserve or commit */ |
1044 | IN SIZE_T dwSize, /* Size of Region */ |
1045 | IN DWORD flAllocationType, /* Type of allocation */ |
1046 | IN DWORD flProtect) /* Type of access protection */ |
1047 | { |
1048 | UINT_PTR StartBoundary = 0; |
1049 | SIZE_T MemSize = 0; |
1050 | PCMI pInformation = 0; |
1051 | LPVOID pRetVal = NULL; |
1052 | BOOL IsLocallyReserved = FALSE; |
1053 | SIZE_T totalPages; |
1054 | INT allocationType, curAllocationType; |
1055 | INT protectionState, curProtectionState; |
1056 | SIZE_T initialRunStart; |
1057 | SIZE_T runStart; |
1058 | SIZE_T runLength; |
1059 | SIZE_T index; |
1060 | INT nProtect; |
1061 | INT vProtect; |
1062 | |
1063 | if ( lpAddress ) |
1064 | { |
1065 | StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize()); |
1066 | MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary; |
1067 | } |
1068 | else |
1069 | { |
1070 | MemSize = ALIGN_UP(dwSize, GetVirtualPageSize()); |
1071 | } |
1072 | |
1073 | /* See if we have already reserved this memory. */ |
1074 | pInformation = VIRTUALFindRegionInformation( StartBoundary ); |
1075 | |
1076 | if ( !pInformation ) |
1077 | { |
1078 | /* According to the new MSDN docs, if MEM_COMMIT is specified, |
1079 | and the memory is not reserved, you reserve and then commit. |
1080 | */ |
1081 | LPVOID pReservedMemory = |
1082 | VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize, |
1083 | flAllocationType, flProtect ); |
1084 | |
1085 | TRACE( "Reserve and commit the memory!\n " ); |
1086 | |
1087 | if ( pReservedMemory ) |
1088 | { |
1089 | /* Re-align the addresses and try again to find the memory. */ |
1090 | StartBoundary = (UINT_PTR) ALIGN_DOWN(pReservedMemory, GetVirtualPageSize()); |
1091 | MemSize = ALIGN_UP((UINT_PTR)pReservedMemory + dwSize, GetVirtualPageSize()) - StartBoundary; |
1092 | |
1093 | pInformation = VIRTUALFindRegionInformation( StartBoundary ); |
1094 | |
1095 | if ( !pInformation ) |
1096 | { |
1097 | ASSERT( "Unable to locate the region information.\n" ); |
1098 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
1099 | pRetVal = NULL; |
1100 | goto done; |
1101 | } |
1102 | IsLocallyReserved = TRUE; |
1103 | } |
1104 | else |
1105 | { |
1106 | ERROR( "Unable to reserve the memory.\n" ); |
1107 | /* Don't set last error here, it will already be set. */ |
1108 | pRetVal = NULL; |
1109 | goto done; |
1110 | } |
1111 | } |
1112 | |
1113 | TRACE( "Committing the memory now..\n" ); |
1114 | |
1115 | // Pages that aren't already committed need to be committed. Pages that |
1116 | // are committed don't need to be committed, but they might need to have |
1117 | // their permissions changed. |
1118 | // To get this right, we find runs of pages with similar states and |
1119 | // permissions. If a run is not committed, we commit it and then set |
1120 | // its permissions. If a run is committed but has different permissions |
1121 | // from what we're trying to set, we set its permissions. Finally, |
1122 | // if a run is already committed and has the right permissions, |
1123 | // we don't need to do anything to it. |
1124 | |
1125 | totalPages = MemSize / GetVirtualPageSize(); |
1126 | runStart = (StartBoundary - pInformation->startBoundary) / |
1127 | GetVirtualPageSize(); // Page index |
1128 | initialRunStart = runStart; |
1129 | allocationType = VIRTUALGetAllocationType(runStart, pInformation); |
1130 | protectionState = pInformation->pProtectionState[runStart]; |
1131 | curAllocationType = allocationType; |
1132 | curProtectionState = protectionState; |
1133 | runLength = 1; |
1134 | nProtect = W32toUnixAccessControl(flProtect); |
1135 | vProtect = VIRTUALConvertWinFlags(flProtect); |
1136 | |
1137 | if (totalPages > pInformation->memSize / GetVirtualPageSize() - runStart) |
1138 | { |
1139 | ERROR("Trying to commit beyond the end of the region!\n" ); |
1140 | goto error; |
1141 | } |
1142 | |
1143 | while(runStart < initialRunStart + totalPages) |
1144 | { |
1145 | // Find the next run of pages |
1146 | for(index = runStart + 1; index < initialRunStart + totalPages; |
1147 | index++) |
1148 | { |
1149 | curAllocationType = VIRTUALGetAllocationType(index, pInformation); |
1150 | curProtectionState = pInformation->pProtectionState[index]; |
1151 | if (curAllocationType != allocationType || |
1152 | curProtectionState != protectionState) |
1153 | { |
1154 | break; |
1155 | } |
1156 | runLength++; |
1157 | } |
1158 | |
1159 | StartBoundary = pInformation->startBoundary + runStart * GetVirtualPageSize(); |
1160 | pRetVal = (void *)StartBoundary; |
1161 | MemSize = runLength * GetVirtualPageSize(); |
1162 | |
1163 | if (allocationType != MEM_COMMIT) |
1164 | { |
1165 | // Commit the pages |
1166 | if (mprotect((void *) StartBoundary, MemSize, PROT_WRITE | PROT_READ) != 0) |
1167 | { |
1168 | ERROR("mprotect() failed! Error(%d)=%s\n" , errno, strerror(errno)); |
1169 | goto error; |
1170 | } |
1171 | |
1172 | VIRTUALSetAllocState(MEM_COMMIT, runStart, runLength, pInformation); |
1173 | |
1174 | if (nProtect == (PROT_WRITE | PROT_READ)) |
1175 | { |
1176 | // Handle this case specially so we don't bother |
1177 | // mprotect'ing the region. |
1178 | memset(pInformation->pProtectionState + runStart, |
1179 | vProtect, runLength); |
1180 | } |
1181 | |
1182 | protectionState = VIRTUAL_READWRITE; |
1183 | } |
1184 | |
1185 | if (protectionState != vProtect) |
1186 | { |
1187 | // Change permissions. |
1188 | if (mprotect((void *) StartBoundary, MemSize, nProtect) != -1) |
1189 | { |
1190 | memset(pInformation->pProtectionState + runStart, |
1191 | vProtect, runLength); |
1192 | } |
1193 | else |
1194 | { |
1195 | ERROR("mprotect() failed! Error(%d)=%s\n" , |
1196 | errno, strerror(errno)); |
1197 | goto error; |
1198 | } |
1199 | } |
1200 | |
1201 | runStart = index; |
1202 | runLength = 1; |
1203 | allocationType = curAllocationType; |
1204 | protectionState = curProtectionState; |
1205 | } |
1206 | |
1207 | pRetVal = (void *) (pInformation->startBoundary + initialRunStart * GetVirtualPageSize()); |
1208 | goto done; |
1209 | |
1210 | error: |
1211 | if ( flAllocationType & MEM_RESERVE || IsLocallyReserved ) |
1212 | { |
1213 | munmap( pRetVal, MemSize ); |
1214 | if ( VIRTUALReleaseMemory( pInformation ) == FALSE ) |
1215 | { |
1216 | ASSERT( "Unable to remove the PCMI entry from the list.\n" ); |
1217 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
1218 | pRetVal = NULL; |
1219 | goto done; |
1220 | } |
1221 | } |
1222 | |
1223 | pInformation = NULL; |
1224 | pRetVal = NULL; |
1225 | done: |
1226 | |
1227 | LogVaOperation( |
1228 | VirtualMemoryLogging::VirtualOperation::Commit, |
1229 | lpAddress, |
1230 | dwSize, |
1231 | flAllocationType, |
1232 | flProtect, |
1233 | pRetVal, |
1234 | pRetVal != NULL); |
1235 | |
1236 | return pRetVal; |
1237 | } |
1238 | |
1239 | /*++ |
1240 | Function: |
1241 | PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange |
1242 | |
1243 | This function attempts to allocate the requested amount of memory in the specified address range, from the executable memory |
1244 | allocator. If unable to do so, the function returns nullptr and does not set the last error. |
1245 | |
1246 | lpBeginAddress - Inclusive beginning of range |
1247 | lpEndAddress - Exclusive end of range |
1248 | dwSize - Number of bytes to allocate |
1249 | --*/ |
1250 | LPVOID |
1251 | PALAPI |
1252 | PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange( |
1253 | IN LPCVOID lpBeginAddress, |
1254 | IN LPCVOID lpEndAddress, |
1255 | IN SIZE_T dwSize) |
1256 | { |
1257 | #ifdef BIT64 |
1258 | PERF_ENTRY(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange); |
1259 | ENTRY( |
1260 | "PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange(lpBeginAddress = %p, lpEndAddress = %p, dwSize = %Iu)\n" , |
1261 | lpBeginAddress, |
1262 | lpEndAddress, |
1263 | dwSize); |
1264 | |
1265 | _ASSERTE(lpBeginAddress <= lpEndAddress); |
1266 | |
1267 | // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see |
1268 | // ExecutableMemoryAllocator::AllocateMemory() for the reason why it is done |
1269 | SIZE_T reservationSize = ALIGN_UP(dwSize, VIRTUAL_64KB); |
1270 | |
1271 | CPalThread *currentThread = InternalGetCurrentThread(); |
1272 | InternalEnterCriticalSection(currentThread, &virtual_critsec); |
1273 | |
1274 | void *address = g_executableMemoryAllocator.AllocateMemoryWithinRange(lpBeginAddress, lpEndAddress, reservationSize); |
1275 | if (address != nullptr) |
1276 | { |
1277 | _ASSERTE(IS_ALIGNED(address, GetVirtualPageSize())); |
1278 | if (!VIRTUALStoreAllocationInfo((UINT_PTR)address, reservationSize, MEM_RESERVE | MEM_RESERVE_EXECUTABLE, PAGE_NOACCESS)) |
1279 | { |
1280 | ASSERT("Unable to store the structure in the list.\n" ); |
1281 | munmap(address, reservationSize); |
1282 | address = nullptr; |
1283 | } |
1284 | } |
1285 | |
1286 | LogVaOperation( |
1287 | VirtualMemoryLogging::VirtualOperation::ReserveFromExecutableMemoryAllocatorWithinRange, |
1288 | nullptr, |
1289 | dwSize, |
1290 | MEM_RESERVE | MEM_RESERVE_EXECUTABLE, |
1291 | PAGE_NOACCESS, |
1292 | address, |
1293 | TRUE); |
1294 | |
1295 | InternalLeaveCriticalSection(currentThread, &virtual_critsec); |
1296 | |
1297 | LOGEXIT("PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange returning %p\n" , address); |
1298 | PERF_EXIT(PAL_VirtualReserveFromExecutableMemoryAllocatorWithinRange); |
1299 | return address; |
1300 | #else // !BIT64 |
1301 | return nullptr; |
1302 | #endif // BIT64 |
1303 | } |
1304 | |
1305 | /*++ |
1306 | Function: |
1307 | VirtualAlloc |
1308 | |
1309 | Note: |
1310 | MEM_TOP_DOWN, MEM_PHYSICAL, MEM_WRITE_WATCH are not supported. |
1311 | Unsupported flags are ignored. |
1312 | |
1313 | Page size on i386 is set to 4k. |
1314 | |
1315 | See MSDN doc. |
1316 | --*/ |
1317 | LPVOID |
1318 | PALAPI |
1319 | VirtualAlloc( |
1320 | IN LPVOID lpAddress, /* Region to reserve or commit */ |
1321 | IN SIZE_T dwSize, /* Size of Region */ |
1322 | IN DWORD flAllocationType, /* Type of allocation */ |
1323 | IN DWORD flProtect) /* Type of access protection */ |
1324 | { |
1325 | LPVOID pRetVal = NULL; |
1326 | CPalThread *pthrCurrent; |
1327 | |
1328 | PERF_ENTRY(VirtualAlloc); |
1329 | ENTRY("VirtualAlloc(lpAddress=%p, dwSize=%u, flAllocationType=%#x, \ |
1330 | flProtect=%#x)\n" , lpAddress, dwSize, flAllocationType, flProtect); |
1331 | |
1332 | pthrCurrent = InternalGetCurrentThread(); |
1333 | |
1334 | if ( ( flAllocationType & MEM_WRITE_WATCH ) != 0 ) |
1335 | { |
1336 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1337 | goto done; |
1338 | } |
1339 | |
1340 | /* Test for un-supported flags. */ |
1341 | if ( ( flAllocationType & ~( MEM_COMMIT | MEM_RESERVE | MEM_RESET | MEM_TOP_DOWN | MEM_RESERVE_EXECUTABLE ) ) != 0 ) |
1342 | { |
1343 | ASSERT( "flAllocationType can be one, or any combination of MEM_COMMIT, \ |
1344 | MEM_RESERVE, MEM_TOP_DOWN, or MEM_RESERVE_EXECUTABLE.\n" ); |
1345 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1346 | goto done; |
1347 | } |
1348 | if ( VIRTUALContainsInvalidProtectionFlags( flProtect ) ) |
1349 | { |
1350 | ASSERT( "flProtect can be one of PAGE_READONLY, PAGE_READWRITE, or \ |
1351 | PAGE_EXECUTE_READWRITE || PAGE_NOACCESS. \n" ); |
1352 | |
1353 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1354 | goto done; |
1355 | } |
1356 | if ( flAllocationType & MEM_TOP_DOWN ) |
1357 | { |
1358 | WARN( "Ignoring the allocation flag MEM_TOP_DOWN.\n" ); |
1359 | } |
1360 | |
1361 | LogVaOperation( |
1362 | VirtualMemoryLogging::VirtualOperation::Allocate, |
1363 | lpAddress, |
1364 | dwSize, |
1365 | flAllocationType, |
1366 | flProtect, |
1367 | NULL, |
1368 | TRUE); |
1369 | |
1370 | if ( flAllocationType & MEM_RESET ) |
1371 | { |
1372 | if ( flAllocationType != MEM_RESET ) |
1373 | { |
1374 | ASSERT( "MEM_RESET cannot be used with any other allocation flags in flAllocationType.\n" ); |
1375 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1376 | goto done; |
1377 | } |
1378 | |
1379 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
1380 | pRetVal = VIRTUALResetMemory( pthrCurrent, lpAddress, dwSize ); |
1381 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
1382 | |
1383 | if ( !pRetVal ) |
1384 | { |
1385 | /* Error messages are already displayed, just leave. */ |
1386 | goto done; |
1387 | } |
1388 | } |
1389 | |
1390 | if ( flAllocationType & MEM_RESERVE ) |
1391 | { |
1392 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
1393 | pRetVal = VIRTUALReserveMemory( pthrCurrent, lpAddress, dwSize, flAllocationType, flProtect ); |
1394 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
1395 | |
1396 | if ( !pRetVal ) |
1397 | { |
1398 | /* Error messages are already displayed, just leave. */ |
1399 | goto done; |
1400 | } |
1401 | } |
1402 | |
1403 | if ( flAllocationType & MEM_COMMIT ) |
1404 | { |
1405 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
1406 | if ( pRetVal != NULL ) |
1407 | { |
1408 | /* We are reserving and committing. */ |
1409 | pRetVal = VIRTUALCommitMemory( pthrCurrent, pRetVal, dwSize, |
1410 | flAllocationType, flProtect ); |
1411 | } |
1412 | else |
1413 | { |
1414 | /* Just a commit. */ |
1415 | pRetVal = VIRTUALCommitMemory( pthrCurrent, lpAddress, dwSize, |
1416 | flAllocationType, flProtect ); |
1417 | } |
1418 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
1419 | } |
1420 | |
1421 | done: |
1422 | #if defined _DEBUG |
1423 | VIRTUALDisplayList(); |
1424 | #endif |
1425 | LOGEXIT("VirtualAlloc returning %p\n " , pRetVal ); |
1426 | PERF_EXIT(VirtualAlloc); |
1427 | return pRetVal; |
1428 | } |
1429 | |
1430 | /*++ |
1431 | Function: |
1432 | VirtualFree |
1433 | |
1434 | See MSDN doc. |
1435 | --*/ |
1436 | BOOL |
1437 | PALAPI |
1438 | VirtualFree( |
1439 | IN LPVOID lpAddress, /* Address of region. */ |
1440 | IN SIZE_T dwSize, /* Size of region. */ |
1441 | IN DWORD dwFreeType ) /* Operation type. */ |
1442 | { |
1443 | BOOL bRetVal = TRUE; |
1444 | CPalThread *pthrCurrent; |
1445 | |
1446 | PERF_ENTRY(VirtualFree); |
1447 | ENTRY("VirtualFree(lpAddress=%p, dwSize=%u, dwFreeType=%#x)\n" , |
1448 | lpAddress, dwSize, dwFreeType); |
1449 | |
1450 | pthrCurrent = InternalGetCurrentThread(); |
1451 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
1452 | |
1453 | /* Sanity Checks. */ |
1454 | if ( !lpAddress ) |
1455 | { |
1456 | ERROR( "lpAddress cannot be NULL. You must specify the base address of\ |
1457 | regions to be de-committed. \n" ); |
1458 | pthrCurrent->SetLastError( ERROR_INVALID_ADDRESS ); |
1459 | bRetVal = FALSE; |
1460 | goto VirtualFreeExit; |
1461 | } |
1462 | |
1463 | if ( !( dwFreeType & MEM_RELEASE ) && !(dwFreeType & MEM_DECOMMIT ) ) |
1464 | { |
1465 | ERROR( "dwFreeType must contain one of the following: \ |
1466 | MEM_RELEASE or MEM_DECOMMIT\n" ); |
1467 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1468 | bRetVal = FALSE; |
1469 | goto VirtualFreeExit; |
1470 | } |
1471 | /* You cannot release and decommit in one call.*/ |
1472 | if ( dwFreeType & MEM_RELEASE && dwFreeType & MEM_DECOMMIT ) |
1473 | { |
1474 | ERROR( "MEM_RELEASE cannot be combined with MEM_DECOMMIT.\n" ); |
1475 | bRetVal = FALSE; |
1476 | goto VirtualFreeExit; |
1477 | } |
1478 | |
1479 | if ( dwFreeType & MEM_DECOMMIT ) |
1480 | { |
1481 | UINT_PTR StartBoundary = 0; |
1482 | SIZE_T MemSize = 0; |
1483 | |
1484 | if ( dwSize == 0 ) |
1485 | { |
1486 | ERROR( "dwSize cannot be 0. \n" ); |
1487 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1488 | bRetVal = FALSE; |
1489 | goto VirtualFreeExit; |
1490 | } |
1491 | /* |
1492 | * A two byte range straddling 2 pages caues both pages to be either |
1493 | * released or decommitted. So round the dwSize up to the next page |
1494 | * boundary and round the lpAddress down to the next page boundary. |
1495 | */ |
1496 | StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize()); |
1497 | MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary; |
1498 | |
1499 | PCMI pUnCommittedMem; |
1500 | pUnCommittedMem = VIRTUALFindRegionInformation( StartBoundary ); |
1501 | if (!pUnCommittedMem) |
1502 | { |
1503 | ASSERT( "Unable to locate the region information.\n" ); |
1504 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
1505 | bRetVal = FALSE; |
1506 | goto VirtualFreeExit; |
1507 | } |
1508 | |
1509 | TRACE( "Un-committing the following page(s) %d to %d.\n" , |
1510 | StartBoundary, MemSize ); |
1511 | |
1512 | // Explicitly calling mmap instead of mprotect here makes it |
1513 | // that much more clear to the operating system that we no |
1514 | // longer need these pages. |
1515 | if ( mmap( (LPVOID)StartBoundary, MemSize, PROT_NONE, |
1516 | MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0 ) != MAP_FAILED ) |
1517 | { |
1518 | #if (MMAP_ANON_IGNORES_PROTECTION) |
1519 | if (mprotect((LPVOID) StartBoundary, MemSize, PROT_NONE) != 0) |
1520 | { |
1521 | ASSERT("mprotect failed to protect the region!\n" ); |
1522 | pthrCurrent->SetLastError(ERROR_INTERNAL_ERROR); |
1523 | munmap((LPVOID) StartBoundary, MemSize); |
1524 | bRetVal = FALSE; |
1525 | goto VirtualFreeExit; |
1526 | } |
1527 | #endif // MMAP_ANON_IGNORES_PROTECTION |
1528 | |
1529 | SIZE_T index = 0; |
1530 | SIZE_T nNumOfPagesToChange = 0; |
1531 | |
1532 | /* We can now commit this memory by calling VirtualAlloc().*/ |
1533 | index = (StartBoundary - pUnCommittedMem->startBoundary) / GetVirtualPageSize(); |
1534 | |
1535 | nNumOfPagesToChange = MemSize / GetVirtualPageSize(); |
1536 | VIRTUALSetAllocState( MEM_RESERVE, index, |
1537 | nNumOfPagesToChange, pUnCommittedMem ); |
1538 | |
1539 | goto VirtualFreeExit; |
1540 | } |
1541 | else |
1542 | { |
1543 | ASSERT( "mmap() returned an abnormal value.\n" ); |
1544 | bRetVal = FALSE; |
1545 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
1546 | goto VirtualFreeExit; |
1547 | } |
1548 | } |
1549 | |
1550 | if ( dwFreeType & MEM_RELEASE ) |
1551 | { |
1552 | PCMI pMemoryToBeReleased = |
1553 | VIRTUALFindRegionInformation( (UINT_PTR)lpAddress ); |
1554 | |
1555 | if ( !pMemoryToBeReleased ) |
1556 | { |
1557 | ERROR( "lpAddress must be the base address returned by VirtualAlloc.\n" ); |
1558 | pthrCurrent->SetLastError( ERROR_INVALID_ADDRESS ); |
1559 | bRetVal = FALSE; |
1560 | goto VirtualFreeExit; |
1561 | } |
1562 | if ( dwSize != 0 ) |
1563 | { |
1564 | ERROR( "dwSize must be 0 if you are releasing the memory.\n" ); |
1565 | pthrCurrent->SetLastError( ERROR_INVALID_PARAMETER ); |
1566 | bRetVal = FALSE; |
1567 | goto VirtualFreeExit; |
1568 | } |
1569 | |
1570 | TRACE( "Releasing the following memory %d to %d.\n" , |
1571 | pMemoryToBeReleased->startBoundary, pMemoryToBeReleased->memSize ); |
1572 | |
1573 | if ( munmap( (LPVOID)pMemoryToBeReleased->startBoundary, |
1574 | pMemoryToBeReleased->memSize ) == 0 ) |
1575 | { |
1576 | if ( VIRTUALReleaseMemory( pMemoryToBeReleased ) == FALSE ) |
1577 | { |
1578 | ASSERT( "Unable to remove the PCMI entry from the list.\n" ); |
1579 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
1580 | bRetVal = FALSE; |
1581 | goto VirtualFreeExit; |
1582 | } |
1583 | pMemoryToBeReleased = NULL; |
1584 | } |
1585 | else |
1586 | { |
1587 | ASSERT( "Unable to unmap the memory, munmap() returned an abnormal value.\n" ); |
1588 | pthrCurrent->SetLastError( ERROR_INTERNAL_ERROR ); |
1589 | bRetVal = FALSE; |
1590 | goto VirtualFreeExit; |
1591 | } |
1592 | } |
1593 | |
1594 | VirtualFreeExit: |
1595 | |
1596 | LogVaOperation( |
1597 | (dwFreeType & MEM_DECOMMIT) ? VirtualMemoryLogging::VirtualOperation::Decommit |
1598 | : VirtualMemoryLogging::VirtualOperation::Release, |
1599 | lpAddress, |
1600 | dwSize, |
1601 | dwFreeType, |
1602 | 0, |
1603 | NULL, |
1604 | bRetVal); |
1605 | |
1606 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
1607 | LOGEXIT( "VirtualFree returning %s.\n" , bRetVal == TRUE ? "TRUE" : "FALSE" ); |
1608 | PERF_EXIT(VirtualFree); |
1609 | return bRetVal; |
1610 | } |
1611 | |
1612 | |
1613 | /*++ |
1614 | Function: |
1615 | VirtualProtect |
1616 | |
1617 | See MSDN doc. |
1618 | --*/ |
1619 | BOOL |
1620 | PALAPI |
1621 | VirtualProtect( |
1622 | IN LPVOID lpAddress, |
1623 | IN SIZE_T dwSize, |
1624 | IN DWORD flNewProtect, |
1625 | OUT PDWORD lpflOldProtect) |
1626 | { |
1627 | BOOL bRetVal = FALSE; |
1628 | PCMI pEntry = NULL; |
1629 | SIZE_T MemSize = 0; |
1630 | UINT_PTR StartBoundary = 0; |
1631 | SIZE_T Index = 0; |
1632 | SIZE_T NumberOfPagesToChange = 0; |
1633 | SIZE_T OffSet = 0; |
1634 | CPalThread * pthrCurrent; |
1635 | |
1636 | PERF_ENTRY(VirtualProtect); |
1637 | ENTRY("VirtualProtect(lpAddress=%p, dwSize=%u, flNewProtect=%#x, " |
1638 | "flOldProtect=%p)\n" , |
1639 | lpAddress, dwSize, flNewProtect, lpflOldProtect); |
1640 | |
1641 | pthrCurrent = InternalGetCurrentThread(); |
1642 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
1643 | |
1644 | StartBoundary = (UINT_PTR) ALIGN_DOWN(lpAddress, GetVirtualPageSize()); |
1645 | MemSize = ALIGN_UP((UINT_PTR)lpAddress + dwSize, GetVirtualPageSize()) - StartBoundary; |
1646 | |
1647 | if ( VIRTUALContainsInvalidProtectionFlags( flNewProtect ) ) |
1648 | { |
1649 | ASSERT( "flProtect can be one of PAGE_NOACCESS, PAGE_READONLY, " |
1650 | "PAGE_READWRITE, PAGE_EXECUTE, PAGE_EXECUTE_READ " |
1651 | ", or PAGE_EXECUTE_READWRITE. \n" ); |
1652 | SetLastError( ERROR_INVALID_PARAMETER ); |
1653 | goto ExitVirtualProtect; |
1654 | } |
1655 | |
1656 | if ( !lpflOldProtect) |
1657 | { |
1658 | ERROR( "lpflOldProtect was invalid.\n" ); |
1659 | SetLastError( ERROR_NOACCESS ); |
1660 | goto ExitVirtualProtect; |
1661 | } |
1662 | |
1663 | pEntry = VIRTUALFindRegionInformation( StartBoundary ); |
1664 | if ( NULL != pEntry ) |
1665 | { |
1666 | /* See if the pages are committed. */ |
1667 | Index = OffSet = StartBoundary - pEntry->startBoundary == 0 ? |
1668 | 0 : ( StartBoundary - pEntry->startBoundary ) / GetVirtualPageSize(); |
1669 | NumberOfPagesToChange = MemSize / GetVirtualPageSize(); |
1670 | |
1671 | TRACE( "Number of pages to check %d, starting page %d \n" , NumberOfPagesToChange, Index ); |
1672 | |
1673 | for ( ; Index < NumberOfPagesToChange; Index++ ) |
1674 | { |
1675 | if ( !VIRTUALIsPageCommitted( Index, pEntry ) ) |
1676 | { |
1677 | ERROR( "You can only change the protection attributes" |
1678 | " on committed memory.\n" ) |
1679 | SetLastError( ERROR_INVALID_ADDRESS ); |
1680 | goto ExitVirtualProtect; |
1681 | } |
1682 | } |
1683 | } |
1684 | |
1685 | if ( 0 == mprotect( (LPVOID)StartBoundary, MemSize, |
1686 | W32toUnixAccessControl( flNewProtect ) ) ) |
1687 | { |
1688 | /* Reset the access protection. */ |
1689 | TRACE( "Number of pages to change %d, starting page %d \n" , |
1690 | NumberOfPagesToChange, OffSet ); |
1691 | /* |
1692 | * Set the old protection flags. We only use the first flag, so |
1693 | * if there were several regions with each with different flags only the |
1694 | * first region's protection flag will be returned. |
1695 | */ |
1696 | if ( pEntry ) |
1697 | { |
1698 | *lpflOldProtect = |
1699 | VIRTUALConvertVirtualFlags( pEntry->pProtectionState[ OffSet ] ); |
1700 | |
1701 | memset( pEntry->pProtectionState + OffSet, |
1702 | VIRTUALConvertWinFlags( flNewProtect ), |
1703 | NumberOfPagesToChange ); |
1704 | } |
1705 | else |
1706 | { |
1707 | *lpflOldProtect = PAGE_EXECUTE_READWRITE; |
1708 | } |
1709 | bRetVal = TRUE; |
1710 | } |
1711 | else |
1712 | { |
1713 | ERROR( "%s\n" , strerror( errno ) ); |
1714 | if ( errno == EINVAL ) |
1715 | { |
1716 | SetLastError( ERROR_INVALID_ADDRESS ); |
1717 | } |
1718 | else if ( errno == EACCES ) |
1719 | { |
1720 | SetLastError( ERROR_INVALID_ACCESS ); |
1721 | } |
1722 | } |
1723 | ExitVirtualProtect: |
1724 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
1725 | |
1726 | #if defined _DEBUG |
1727 | VIRTUALDisplayList(); |
1728 | #endif |
1729 | LOGEXIT( "VirtualProtect returning %s.\n" , bRetVal == TRUE ? "TRUE" : "FALSE" ); |
1730 | PERF_EXIT(VirtualProtect); |
1731 | return bRetVal; |
1732 | } |
1733 | |
1734 | #if HAVE_VM_ALLOCATE |
1735 | //--------------------------------------------------------------------------------------- |
1736 | // |
1737 | // Convert a vm_prot_t flag on the Mach kernel to the corresponding memory protection on Windows. |
1738 | // |
1739 | // Arguments: |
1740 | // protection - Mach protection to be converted |
1741 | // |
1742 | // Return Value: |
1743 | // Return the corresponding memory protection on Windows (e.g. PAGE_READ_WRITE, etc.) |
1744 | // |
1745 | |
1746 | static DWORD VirtualMapMachProtectToWinProtect(vm_prot_t protection) |
1747 | { |
1748 | if (protection & VM_PROT_READ) |
1749 | { |
1750 | if (protection & VM_PROT_WRITE) |
1751 | { |
1752 | if (protection & VM_PROT_EXECUTE) |
1753 | { |
1754 | return PAGE_EXECUTE_READWRITE; |
1755 | } |
1756 | else |
1757 | { |
1758 | return PAGE_READWRITE; |
1759 | } |
1760 | } |
1761 | else |
1762 | { |
1763 | if (protection & VM_PROT_EXECUTE) |
1764 | { |
1765 | return PAGE_EXECUTE_READ; |
1766 | } |
1767 | else |
1768 | { |
1769 | return PAGE_READONLY; |
1770 | } |
1771 | } |
1772 | } |
1773 | else |
1774 | { |
1775 | if (protection & VM_PROT_WRITE) |
1776 | { |
1777 | if (protection & VM_PROT_EXECUTE) |
1778 | { |
1779 | return PAGE_EXECUTE_WRITECOPY; |
1780 | } |
1781 | else |
1782 | { |
1783 | return PAGE_WRITECOPY; |
1784 | } |
1785 | } |
1786 | else |
1787 | { |
1788 | if (protection & VM_PROT_EXECUTE) |
1789 | { |
1790 | return PAGE_EXECUTE; |
1791 | } |
1792 | else |
1793 | { |
1794 | return PAGE_NOACCESS; |
1795 | } |
1796 | } |
1797 | } |
1798 | } |
1799 | |
1800 | static void VM_ALLOCATE_VirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer) |
1801 | { |
1802 | kern_return_t MachRet; |
1803 | vm_address_t vm_address; |
1804 | vm_size_t vm_size; |
1805 | vm_region_flavor_t vm_flavor; |
1806 | mach_msg_type_number_t infoCnt; |
1807 | mach_port_t object_name; |
1808 | #ifdef BIT64 |
1809 | vm_region_basic_info_data_64_t info; |
1810 | infoCnt = VM_REGION_BASIC_INFO_COUNT_64; |
1811 | vm_flavor = VM_REGION_BASIC_INFO_64; |
1812 | #else |
1813 | vm_region_basic_info_data_t info; |
1814 | infoCnt = VM_REGION_BASIC_INFO_COUNT; |
1815 | vm_flavor = VM_REGION_BASIC_INFO; |
1816 | #endif |
1817 | |
1818 | vm_address = (vm_address_t)lpAddress; |
1819 | #ifdef BIT64 |
1820 | MachRet = vm_region_64( |
1821 | #else |
1822 | MachRet = vm_region( |
1823 | #endif |
1824 | mach_task_self(), |
1825 | &vm_address, |
1826 | &vm_size, |
1827 | vm_flavor, |
1828 | (vm_region_info_t)&info, |
1829 | &infoCnt, |
1830 | &object_name); |
1831 | if (MachRet != KERN_SUCCESS) { |
1832 | return; |
1833 | } |
1834 | |
1835 | if (vm_address > (vm_address_t)lpAddress) { |
1836 | /* lpAddress was pointing into a free region */ |
1837 | lpBuffer->State = MEM_FREE; |
1838 | return; |
1839 | } |
1840 | |
1841 | lpBuffer->BaseAddress = (PVOID)vm_address; |
1842 | |
1843 | // We don't actually have any information on the Mach kernel which maps to AllocationProtect. |
1844 | lpBuffer->AllocationProtect = VM_PROT_NONE; |
1845 | |
1846 | lpBuffer->RegionSize = (SIZE_T)vm_size; |
1847 | |
1848 | if (info.reserved) |
1849 | { |
1850 | lpBuffer->State = MEM_RESERVE; |
1851 | } |
1852 | else |
1853 | { |
1854 | lpBuffer->State = MEM_COMMIT; |
1855 | } |
1856 | |
1857 | lpBuffer->Protect = VirtualMapMachProtectToWinProtect(info.protection); |
1858 | |
1859 | /* Note that if a mapped region and a private region are adjacent, this |
1860 | will return MEM_PRIVATE but the region size will span |
1861 | both the mapped and private regions. */ |
1862 | if (!info.shared) |
1863 | { |
1864 | lpBuffer->Type = MEM_PRIVATE; |
1865 | } |
1866 | else |
1867 | { |
1868 | // What should this be? It's either MEM_MAPPED or MEM_IMAGE, but without an image list, |
1869 | // we can't determine which one it is. |
1870 | lpBuffer->Type = MEM_MAPPED; |
1871 | } |
1872 | } |
1873 | #endif // HAVE_VM_ALLOCATE |
1874 | |
1875 | /*++ |
1876 | Function: |
1877 | VirtualQuery |
1878 | |
1879 | See MSDN doc. |
1880 | --*/ |
1881 | SIZE_T |
1882 | PALAPI |
1883 | VirtualQuery( |
1884 | IN LPCVOID lpAddress, |
1885 | OUT PMEMORY_BASIC_INFORMATION lpBuffer, |
1886 | IN SIZE_T dwLength) |
1887 | { |
1888 | PCMI pEntry = NULL; |
1889 | UINT_PTR StartBoundary = 0; |
1890 | CPalThread * pthrCurrent; |
1891 | |
1892 | PERF_ENTRY(VirtualQuery); |
1893 | ENTRY("VirtualQuery(lpAddress=%p, lpBuffer=%p, dwLength=%u)\n" , |
1894 | lpAddress, lpBuffer, dwLength); |
1895 | |
1896 | pthrCurrent = InternalGetCurrentThread(); |
1897 | InternalEnterCriticalSection(pthrCurrent, &virtual_critsec); |
1898 | |
1899 | if ( !lpBuffer) |
1900 | { |
1901 | ERROR( "lpBuffer has to be a valid pointer.\n" ); |
1902 | pthrCurrent->SetLastError( ERROR_NOACCESS ); |
1903 | goto ExitVirtualQuery; |
1904 | } |
1905 | if ( dwLength < sizeof( *lpBuffer ) ) |
1906 | { |
1907 | ERROR( "dwLength cannot be smaller then the size of *lpBuffer.\n" ); |
1908 | pthrCurrent->SetLastError( ERROR_BAD_LENGTH ); |
1909 | goto ExitVirtualQuery; |
1910 | } |
1911 | |
1912 | StartBoundary = ALIGN_DOWN((SIZE_T)lpAddress, GetVirtualPageSize()); |
1913 | |
1914 | #if MMAP_IGNORES_HINT |
1915 | // Make sure we have memory to map before we try to query it. |
1916 | VIRTUALGetBackingFile(pthrCurrent); |
1917 | |
1918 | // If we're suballocating, claim that any memory that isn't in our |
1919 | // suballocated block is already allocated. This keeps callers from |
1920 | // using these results to try to allocate those blocks and failing. |
1921 | if (StartBoundary < (UINT_PTR) gBackingBaseAddress || |
1922 | StartBoundary >= (UINT_PTR) gBackingBaseAddress + BACKING_FILE_SIZE) |
1923 | { |
1924 | if (StartBoundary < (UINT_PTR) gBackingBaseAddress) |
1925 | { |
1926 | lpBuffer->RegionSize = (UINT_PTR) gBackingBaseAddress - StartBoundary; |
1927 | } |
1928 | else |
1929 | { |
1930 | lpBuffer->RegionSize = -StartBoundary; |
1931 | } |
1932 | lpBuffer->BaseAddress = (void *) StartBoundary; |
1933 | lpBuffer->State = MEM_COMMIT; |
1934 | lpBuffer->Type = MEM_MAPPED; |
1935 | lpBuffer->AllocationProtect = 0; |
1936 | lpBuffer->Protect = 0; |
1937 | goto ExitVirtualQuery; |
1938 | } |
1939 | #endif // MMAP_IGNORES_HINT |
1940 | |
1941 | /* Find the entry. */ |
1942 | pEntry = VIRTUALFindRegionInformation( StartBoundary ); |
1943 | |
1944 | if ( !pEntry ) |
1945 | { |
1946 | /* Can't find a match, or no list present. */ |
1947 | /* Next, looking for this region in file maps */ |
1948 | if (!MAPGetRegionInfo((LPVOID)StartBoundary, lpBuffer)) |
1949 | { |
1950 | // When all else fails, call vm_region() if it's available. |
1951 | |
1952 | // Initialize the State to be MEM_FREE, in which case AllocationBase, AllocationProtect, |
1953 | // Protect, and Type are all undefined. |
1954 | lpBuffer->BaseAddress = (LPVOID)StartBoundary; |
1955 | lpBuffer->RegionSize = 0; |
1956 | lpBuffer->State = MEM_FREE; |
1957 | #if HAVE_VM_ALLOCATE |
1958 | VM_ALLOCATE_VirtualQuery(lpAddress, lpBuffer); |
1959 | #endif |
1960 | } |
1961 | } |
1962 | else |
1963 | { |
1964 | /* Starting page. */ |
1965 | SIZE_T Index = ( StartBoundary - pEntry->startBoundary ) / GetVirtualPageSize(); |
1966 | |
1967 | /* Attributes to check for. */ |
1968 | BYTE AccessProtection = pEntry->pProtectionState[ Index ]; |
1969 | INT AllocationType = VIRTUALGetAllocationType( Index, pEntry ); |
1970 | SIZE_T RegionSize = 0; |
1971 | |
1972 | TRACE( "Index = %d, Number of Pages = %d. \n" , |
1973 | Index, pEntry->memSize / GetVirtualPageSize() ); |
1974 | |
1975 | while ( Index < pEntry->memSize / GetVirtualPageSize() && |
1976 | VIRTUALGetAllocationType( Index, pEntry ) == AllocationType && |
1977 | pEntry->pProtectionState[ Index ] == AccessProtection ) |
1978 | { |
1979 | RegionSize += GetVirtualPageSize(); |
1980 | Index++; |
1981 | } |
1982 | |
1983 | TRACE( "RegionSize = %d.\n" , RegionSize ); |
1984 | |
1985 | /* Fill the structure.*/ |
1986 | lpBuffer->AllocationProtect = pEntry->accessProtection; |
1987 | lpBuffer->BaseAddress = (LPVOID)StartBoundary; |
1988 | |
1989 | lpBuffer->Protect = AllocationType == MEM_COMMIT ? |
1990 | VIRTUALConvertVirtualFlags( AccessProtection ) : 0; |
1991 | |
1992 | lpBuffer->RegionSize = RegionSize; |
1993 | lpBuffer->State = |
1994 | ( AllocationType == MEM_COMMIT ? MEM_COMMIT : MEM_RESERVE ); |
1995 | WARN( "Ignoring lpBuffer->Type. \n" ); |
1996 | } |
1997 | |
1998 | ExitVirtualQuery: |
1999 | |
2000 | InternalLeaveCriticalSection(pthrCurrent, &virtual_critsec); |
2001 | |
2002 | LOGEXIT( "VirtualQuery returning %d.\n" , sizeof( *lpBuffer ) ); |
2003 | PERF_EXIT(VirtualQuery); |
2004 | return sizeof( *lpBuffer ); |
2005 | } |
2006 | |
2007 | size_t GetVirtualPageSize() |
2008 | { |
2009 | _ASSERTE(s_virtualPageSize); |
2010 | return s_virtualPageSize; |
2011 | } |
2012 | |
2013 | /*++ |
2014 | Function: |
2015 | GetWriteWatch |
2016 | |
2017 | See MSDN doc. |
2018 | --*/ |
2019 | UINT |
2020 | PALAPI |
2021 | GetWriteWatch( |
2022 | IN DWORD dwFlags, |
2023 | IN PVOID lpBaseAddress, |
2024 | IN SIZE_T dwRegionSize, |
2025 | OUT PVOID *lpAddresses, |
2026 | IN OUT PULONG_PTR lpdwCount, |
2027 | OUT PULONG lpdwGranularity |
2028 | ) |
2029 | { |
2030 | // TODO: implement this method |
2031 | *lpAddresses = NULL; |
2032 | *lpdwCount = 0; |
2033 | // Until it is implemented, return non-zero value as an indicator of failure |
2034 | return 1; |
2035 | } |
2036 | |
2037 | /*++ |
2038 | Function: |
2039 | ResetWriteWatch |
2040 | |
2041 | See MSDN doc. |
2042 | --*/ |
2043 | UINT |
2044 | PALAPI |
2045 | ResetWriteWatch( |
2046 | IN LPVOID lpBaseAddress, |
2047 | IN SIZE_T dwRegionSize |
2048 | ) |
2049 | { |
2050 | // TODO: implement this method |
2051 | // Until it is implemented, return non-zero value as an indicator of failure |
2052 | return 1; |
2053 | } |
2054 | |
2055 | /*++ |
2056 | Function : |
2057 | ReserveMemoryFromExecutableAllocator |
2058 | |
2059 | This function is used to reserve a region of virual memory (not commited) |
2060 | that is located close to the coreclr library. The memory comes from the virtual |
2061 | address range that is managed by ExecutableMemoryAllocator. |
2062 | --*/ |
2063 | void* ReserveMemoryFromExecutableAllocator(CPalThread* pThread, SIZE_T allocationSize) |
2064 | { |
2065 | #ifdef BIT64 |
2066 | InternalEnterCriticalSection(pThread, &virtual_critsec); |
2067 | void* mem = g_executableMemoryAllocator.AllocateMemory(allocationSize); |
2068 | InternalLeaveCriticalSection(pThread, &virtual_critsec); |
2069 | |
2070 | return mem; |
2071 | #else // !BIT64 |
2072 | return nullptr; |
2073 | #endif // BIT64 |
2074 | } |
2075 | |
2076 | /*++ |
2077 | Function: |
2078 | ExecutableMemoryAllocator::Initialize() |
2079 | |
2080 | This function initializes the allocator. It should be called early during process startup |
2081 | (when process address space is pretty much empty) in order to have a chance to reserve |
2082 | sufficient amount of memory that is close to the coreclr library. |
2083 | |
2084 | --*/ |
2085 | void ExecutableMemoryAllocator::Initialize() |
2086 | { |
2087 | m_startAddress = NULL; |
2088 | m_nextFreeAddress = NULL; |
2089 | m_totalSizeOfReservedMemory = 0; |
2090 | m_remainingReservedMemory = 0; |
2091 | |
2092 | // Enable the executable memory allocator on 64-bit platforms only |
2093 | // because 32-bit platforms have limited amount of virtual address space. |
2094 | #ifdef BIT64 |
2095 | TryReserveInitialMemory(); |
2096 | #endif // BIT64 |
2097 | |
2098 | } |
2099 | |
2100 | /*++ |
2101 | Function: |
2102 | ExecutableMemoryAllocator::TryReserveInitialMemory() |
2103 | |
2104 | This function is called during PAL initialization. It opportunistically tries to reserve |
2105 | a large chunk of virtual memory that can be later used to store JIT'ed code.\ |
2106 | |
2107 | --*/ |
2108 | void ExecutableMemoryAllocator::TryReserveInitialMemory() |
2109 | { |
2110 | CPalThread* pthrCurrent = InternalGetCurrentThread(); |
2111 | int32_t sizeOfAllocation = MaxExecutableMemorySizeNearCoreClr; |
2112 | int32_t preferredStartAddressIncrement; |
2113 | UINT_PTR preferredStartAddress; |
2114 | UINT_PTR coreclrLoadAddress; |
2115 | const int32_t MemoryProbingIncrement = 128 * 1024 * 1024; |
2116 | |
2117 | // Try to find and reserve an available region of virtual memory that is located |
2118 | // within 2GB range (defined by the MaxExecutableMemorySizeNearCoreClr constant) from the |
2119 | // location of the coreclr library. |
2120 | // Potentially, as a possible future improvement, we can get precise information |
2121 | // about available memory ranges by parsing data from '/proc/self/maps'. |
2122 | // But since this code is called early during process startup, the user address space |
2123 | // is pretty much empty so the simple algorithm that is implemented below is sufficient |
2124 | // for this purpose. |
2125 | |
2126 | // First of all, we need to determine the current address of libcoreclr. Please note that depending on |
2127 | // the OS implementation, the library is usually loaded either at the end or at the start of the user |
2128 | // address space. If the library is loaded at low addresses then try to reserve memory above libcoreclr |
2129 | // (thus avoiding reserving memory below 4GB; besides some operating systems do not allow that). |
2130 | // If libcoreclr is loaded at high addresses then try to reserve memory below its location. |
2131 | coreclrLoadAddress = (UINT_PTR)PAL_GetSymbolModuleBase((void*)VirtualAlloc); |
2132 | if ((coreclrLoadAddress < 0xFFFFFFFF) || ((coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr) < 0xFFFFFFFF)) |
2133 | { |
2134 | // Try to allocate above the location of libcoreclr |
2135 | preferredStartAddress = coreclrLoadAddress + CoreClrLibrarySize; |
2136 | preferredStartAddressIncrement = MemoryProbingIncrement; |
2137 | } |
2138 | else |
2139 | { |
2140 | // Try to allocate below the location of libcoreclr |
2141 | preferredStartAddress = coreclrLoadAddress - MaxExecutableMemorySizeNearCoreClr; |
2142 | preferredStartAddressIncrement = 0; |
2143 | } |
2144 | |
2145 | // Do actual memory reservation. |
2146 | do |
2147 | { |
2148 | m_startAddress = ReserveVirtualMemory(pthrCurrent, (void*)preferredStartAddress, sizeOfAllocation); |
2149 | if (m_startAddress != nullptr) |
2150 | { |
2151 | break; |
2152 | } |
2153 | |
2154 | // Try to allocate a smaller region |
2155 | sizeOfAllocation -= MemoryProbingIncrement; |
2156 | preferredStartAddress += preferredStartAddressIncrement; |
2157 | |
2158 | } while (sizeOfAllocation >= MemoryProbingIncrement); |
2159 | |
2160 | if (m_startAddress == nullptr) |
2161 | { |
2162 | // We were not able to reserve any memory near libcoreclr. Try to reserve approximately 2 GB of address space somewhere |
2163 | // anyway: |
2164 | // - This sets aside address space that can be used for executable code, such that jumps/calls between such code may |
2165 | // continue to use short relative addresses instead of long absolute addresses that would currently require jump |
2166 | // stubs. |
2167 | // - The inability to allocate memory in a specific range for jump stubs is an unrecoverable problem. This reservation |
2168 | // would mitigate such issues that can become prevalent depending on which security features are enabled and to what |
2169 | // extent, such as in particular, PaX's RANDMMAP: |
2170 | // - https://en.wikibooks.org/wiki/Grsecurity/Appendix/Grsecurity_and_PaX_Configuration_Options |
2171 | // - Jump stubs for executable code residing in this region can request memory from this allocator |
2172 | // - Native images can be loaded into this address space, including any jump stubs that are required for its helper |
2173 | // table. This satisfies the vast majority of practical cases where the total amount of loaded native image memory |
2174 | // does not exceed approximately 2 GB. |
2175 | // - The code heap allocator for the JIT can allocate from this address space. Beyond this reservation, one can use |
2176 | // the COMPlus_CodeHeapReserveForJumpStubs environment variable to reserve space for jump stubs. |
2177 | sizeOfAllocation = MaxExecutableMemorySize; |
2178 | m_startAddress = ReserveVirtualMemory(pthrCurrent, nullptr, sizeOfAllocation); |
2179 | if (m_startAddress == nullptr) |
2180 | { |
2181 | return; |
2182 | } |
2183 | } |
2184 | |
2185 | // Memory has been successfully reserved. |
2186 | m_totalSizeOfReservedMemory = sizeOfAllocation; |
2187 | |
2188 | // Randomize the location at which we start allocating from the reserved memory range. Alignment to a 64 KB granularity |
2189 | // should not be necessary, but see AllocateMemory() for the reason why it is done. |
2190 | int32_t randomOffset = GenerateRandomStartOffset(); |
2191 | m_nextFreeAddress = ALIGN_UP((void*)(((UINT_PTR)m_startAddress) + randomOffset), VIRTUAL_64KB); |
2192 | _ASSERTE(sizeOfAllocation >= (UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress); |
2193 | m_remainingReservedMemory = |
2194 | ALIGN_DOWN(sizeOfAllocation - ((UINT_PTR)m_nextFreeAddress - (UINT_PTR)m_startAddress), VIRTUAL_64KB); |
2195 | } |
2196 | |
2197 | /*++ |
2198 | Function: |
2199 | ExecutableMemoryAllocator::AllocateMemory |
2200 | |
2201 | This function attempts to allocate the requested amount of memory from its reserved virtual |
2202 | address space. The function will return null if the allocation request cannot |
2203 | be satisfied by the memory that is currently available in the allocator. |
2204 | |
2205 | Note: This function MUST be called with the virtual_critsec lock held. |
2206 | |
2207 | --*/ |
2208 | void* ExecutableMemoryAllocator::AllocateMemory(SIZE_T allocationSize) |
2209 | { |
2210 | #ifdef BIT64 |
2211 | void* allocatedMemory = nullptr; |
2212 | |
2213 | // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but |
2214 | // VIRTUALReserveMemory() aligns down the specified address to a 64 KB granularity, and as long as that is necessary, the |
2215 | // reservation size here must be aligned to a 64 KB granularity to guarantee that all returned addresses are also aligned to |
2216 | // a 64 KB granularity. Otherwise, attempting to reserve memory starting from an unaligned address returned by this function |
2217 | // would fail in VIRTUALReserveMemory. |
2218 | _ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB)); |
2219 | |
2220 | // The code below assumes that the caller owns the virtual_critsec lock. |
2221 | // So the calculations are not done in thread-safe manner. |
2222 | if ((allocationSize > 0) && (allocationSize <= m_remainingReservedMemory)) |
2223 | { |
2224 | allocatedMemory = m_nextFreeAddress; |
2225 | m_nextFreeAddress = (void*)(((UINT_PTR)m_nextFreeAddress) + allocationSize); |
2226 | m_remainingReservedMemory -= allocationSize; |
2227 | } |
2228 | |
2229 | return allocatedMemory; |
2230 | #else // !BIT64 |
2231 | return nullptr; |
2232 | #endif // BIT64 |
2233 | } |
2234 | |
2235 | /*++ |
2236 | Function: |
2237 | AllocateMemory |
2238 | |
2239 | This function attempts to allocate the requested amount of memory from its reserved virtual |
2240 | address space, if memory is available within the specified range. The function will return |
2241 | null if the allocation request cannot satisfied by the memory that is currently available in |
2242 | the allocator. |
2243 | |
2244 | Note: This function MUST be called with the virtual_critsec lock held. |
2245 | --*/ |
2246 | void *ExecutableMemoryAllocator::AllocateMemoryWithinRange(const void *beginAddress, const void *endAddress, SIZE_T allocationSize) |
2247 | { |
2248 | #ifdef BIT64 |
2249 | _ASSERTE(beginAddress <= endAddress); |
2250 | |
2251 | // Alignment to a 64 KB granularity should not be necessary (alignment to page size should be sufficient), but see |
2252 | // AllocateMemory() for the reason why it is necessary |
2253 | _ASSERTE(IS_ALIGNED(allocationSize, VIRTUAL_64KB)); |
2254 | |
2255 | // The code below assumes that the caller owns the virtual_critsec lock. |
2256 | // So the calculations are not done in thread-safe manner. |
2257 | |
2258 | if (allocationSize == 0 || allocationSize > m_remainingReservedMemory) |
2259 | { |
2260 | return nullptr; |
2261 | } |
2262 | |
2263 | void *address = m_nextFreeAddress; |
2264 | if (address < beginAddress) |
2265 | { |
2266 | return nullptr; |
2267 | } |
2268 | |
2269 | void *nextFreeAddress = (void *)((UINT_PTR)address + allocationSize); |
2270 | if (nextFreeAddress > endAddress) |
2271 | { |
2272 | return nullptr; |
2273 | } |
2274 | |
2275 | m_nextFreeAddress = nextFreeAddress; |
2276 | m_remainingReservedMemory -= allocationSize; |
2277 | return address; |
2278 | #else // !BIT64 |
2279 | return nullptr; |
2280 | #endif // BIT64 |
2281 | } |
2282 | |
2283 | /*++ |
2284 | Function: |
2285 | ExecutableMemoryAllocator::GenerateRandomStartOffset() |
2286 | |
2287 | This function returns a random offset (in multiples of the virtual page size) |
2288 | at which the allocator should start allocating memory from its reserved memory range. |
2289 | |
2290 | --*/ |
2291 | int32_t ExecutableMemoryAllocator::GenerateRandomStartOffset() |
2292 | { |
2293 | int32_t pageCount; |
2294 | const int32_t MaxStartPageOffset = 64; |
2295 | |
2296 | // This code is similar to what coreclr runtime does on Windows. |
2297 | // It generates a random number of pages to skip between 0...MaxStartPageOffset. |
2298 | srandom(time(NULL)); |
2299 | pageCount = (int32_t)(MaxStartPageOffset * (int64_t)random() / RAND_MAX); |
2300 | |
2301 | return pageCount * GetVirtualPageSize(); |
2302 | } |
2303 | |