1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | // |
5 | |
6 | // |
7 | |
8 | |
9 | #include "common.h" |
10 | |
11 | #include "hosting.h" |
12 | #include "mscoree.h" |
13 | #include "mscoreepriv.h" |
14 | #include "corhost.h" |
15 | #include "threads.h" |
16 | |
17 | |
18 | #define countof(x) (sizeof(x) / sizeof(x[0])) |
19 | |
20 | //Copied from winbase.h |
21 | #ifndef STARTF_TITLEISAPPID |
22 | #define STARTF_TITLEISAPPID 0x00001000 |
23 | #endif |
24 | #ifndef STARTF_PREVENTPINNING |
25 | #define STARTF_PREVENTPINNING 0x00002000 |
26 | #endif |
27 | |
28 | //Flags encoded in the first parameter of CorLaunchApplication. |
29 | #define MASK_NOTPINNABLE 0x80000000 |
30 | #define MASK_HOSTTYPE 0x00000003 |
31 | #define MASK_DONT_SHOW_INSTALL_DIALOG 0x00000100 |
32 | |
33 | #ifdef _DEBUG |
34 | // This function adds a static annotation read by SCAN to indicate HOST_CALLS. Its |
35 | // purpose is to be called from the BEGIN_SO_TOLERANT_CODE_CALLING_HOST macro, to |
36 | // effectively mark all functions that use BEGIN_SO_TOLERANT_CODE_CALLING_HOST as being |
37 | // HOST_CALLS. If you hit a SCAN violation that references AddHostCallsStaticMarker, then |
38 | // you have a function marked as HOST_NOCALLS that eventually calls into a function that |
39 | // uses BEGIN_SO_TOLERANT_CODE_CALLING_HOST. |
40 | DEBUG_NOINLINE void AddHostCallsStaticMarker() |
41 | { |
42 | STATIC_CONTRACT_NOTHROW; |
43 | STATIC_CONTRACT_CANNOT_TAKE_LOCK; |
44 | STATIC_CONTRACT_GC_NOTRIGGER; |
45 | STATIC_CONTRACT_SO_TOLERANT; |
46 | STATIC_CONTRACT_HOST_CALLS; |
47 | |
48 | METHOD_CANNOT_BE_FOLDED_DEBUG; |
49 | } |
50 | #endif //_DEBUG |
51 | |
52 | // |
53 | // memory management functions |
54 | // |
55 | |
56 | // global debug only tracking utilities |
57 | #ifdef _DEBUG |
58 | |
59 | static const LONG MaxGlobalAllocCount = 8; |
60 | |
61 | class GlobalAllocStore { |
62 | public: |
63 | static void AddAlloc (LPVOID p) |
64 | { |
65 | LIMITED_METHOD_CONTRACT; |
66 | |
67 | if (!p) { |
68 | return; |
69 | } |
70 | if (m_Disabled) { |
71 | return; |
72 | } |
73 | |
74 | //InterlockedIncrement (&numMemWriter); |
75 | //if (CheckMemFree) { |
76 | // goto Return; |
77 | //} |
78 | |
79 | //m_Count is number of allocation we've ever tried, it's OK to be bigger than |
80 | //size of m_Alloc[] |
81 | InterlockedIncrement (&m_Count); |
82 | |
83 | //this is by no means an accurate record of heap allocation. |
84 | //the algorithm used here can't guarantee an allocation is saved in |
85 | //m_Alloc[] even there's enough free space. However this is only used |
86 | //for debugging purpose and most importantly, m_Count is accurate. |
87 | for (size_t n = 0; n < countof(m_Alloc); n ++) { |
88 | if (m_Alloc[n] == 0) { |
89 | if (InterlockedCompareExchangeT(&m_Alloc[n],p,0) == 0) { |
90 | return; |
91 | } |
92 | } |
93 | } |
94 | |
95 | //InterlockedDecrement (&numMemWriter); |
96 | } |
97 | |
98 | //this is called in non-host case where we don't care the free after |
99 | //alloc store is disabled |
100 | static BOOL RemoveAlloc (LPVOID p) |
101 | { |
102 | LIMITED_METHOD_CONTRACT; |
103 | |
104 | if (m_Disabled) |
105 | { |
106 | return TRUE; |
107 | } |
108 | //decrement the counter even we might not find the allocation |
109 | //in m_Alloc. Because it's possible for an allocation not to be saved |
110 | //in the array |
111 | InterlockedDecrement (&m_Count); |
112 | // Binary search |
113 | for (size_t n = 0; n < countof(m_Alloc); n ++) { |
114 | if (m_Alloc[n] == p) { |
115 | m_Alloc[n] = 0; |
116 | return TRUE; |
117 | } |
118 | } |
119 | return FALSE; |
120 | } |
121 | |
122 | //this is called in host case where if the store is disabled, we want to |
123 | //guarantee we don't try to free anything the host doesn't know about |
124 | static void ValidateFree(LPVOID p) |
125 | { |
126 | LIMITED_METHOD_CONTRACT; |
127 | |
128 | if (p == 0) { |
129 | return; |
130 | } |
131 | if (m_Disabled) { |
132 | for (size_t n = 0; n < countof(m_Alloc); n ++) { |
133 | //there could be miss, because an allocation might not be saved |
134 | //in the array |
135 | if (m_Alloc[n] == p) { |
136 | _ASSERTE (!"Free a memory that host interface does not know" ); |
137 | return; |
138 | } |
139 | } |
140 | } |
141 | } |
142 | |
143 | static void Validate() |
144 | { |
145 | LIMITED_METHOD_CONTRACT; |
146 | |
147 | if (m_Count > MaxGlobalAllocCount) { |
148 | _ASSERTE (!"Using too many memory allocator before Host Interface is set up" ); |
149 | } |
150 | |
151 | //while (numMemWriter != 0) { |
152 | // Sleep(5); |
153 | //} |
154 | //qsort (GlobalMemAddr, (MemAllocCount>MaxAllocCount)?MaxAllocCount:MemAllocCount, sizeof(LPVOID), MemAddrCompare); |
155 | } |
156 | |
157 | static void Disable () |
158 | { |
159 | LIMITED_METHOD_CONTRACT; |
160 | if (!m_Disabled) |
161 | { |
162 | // Let all threads know |
163 | InterlockedIncrement((LONG*)&m_Disabled); |
164 | } |
165 | } |
166 | |
167 | private: |
168 | static BOOL m_Disabled; |
169 | static LPVOID m_Alloc[MaxGlobalAllocCount]; |
170 | //m_Count is number of allocation we tried, it's legal to be bigger than |
171 | //size of m_Alloc[] |
172 | static LONG m_Count; |
173 | // static LONG numMemWriter = 0; |
174 | }; |
175 | |
176 | // used from corhost.cpp |
177 | void ValidateHostInterface() |
178 | { |
179 | WRAPPER_NO_CONTRACT; |
180 | |
181 | GlobalAllocStore::Validate(); |
182 | GlobalAllocStore::Disable(); |
183 | } |
184 | |
185 | void DisableGlobalAllocStore () |
186 | { |
187 | WRAPPER_NO_CONTRACT; |
188 | GlobalAllocStore::Disable(); |
189 | } |
190 | LPVOID GlobalAllocStore::m_Alloc[MaxGlobalAllocCount]; |
191 | LONG GlobalAllocStore::m_Count = 0; |
192 | BOOL GlobalAllocStore::m_Disabled = FALSE; |
193 | |
194 | #endif |
195 | |
196 | |
197 | |
198 | HANDLE g_ExecutableHeapHandle = NULL; |
199 | |
200 | #undef VirtualAlloc |
201 | LPVOID EEVirtualAlloc(LPVOID lpAddress, SIZE_T dwSize, DWORD flAllocationType, DWORD flProtect) { |
202 | CONTRACTL |
203 | { |
204 | NOTHROW; |
205 | GC_NOTRIGGER; |
206 | SO_TOLERANT; |
207 | } |
208 | CONTRACTL_END; |
209 | |
210 | #ifdef FAILPOINTS_ENABLED |
211 | if (RFS_HashStack ()) |
212 | return NULL; |
213 | #endif |
214 | |
215 | |
216 | #ifdef _DEBUG |
217 | if (g_fEEStarted) { |
218 | _ASSERTE (!EEAllocationDisallowed()); |
219 | } |
220 | _ASSERTE (lpAddress || (dwSize % g_SystemInfo.dwAllocationGranularity) == 0); |
221 | #endif |
222 | |
223 | { |
224 | |
225 | LPVOID p = NULL; |
226 | |
227 | #ifdef _DEBUG |
228 | { |
229 | DEBUG_ONLY_REGION(); |
230 | |
231 | if (lpAddress == NULL && (flAllocationType & MEM_RESERVE) != 0 && PEDecoder::GetForceRelocs()) |
232 | { |
233 | #ifdef _WIN64 |
234 | // Try to allocate memory all over the place when we are stressing relocations on _WIN64. |
235 | // This will make sure that we generate jump stubs correctly among other things. |
236 | static BYTE* ptr = (BYTE*)0x234560000; |
237 | ptr += 0x123450000; |
238 | // Wrap around |
239 | if (ptr < (BYTE *)BOT_MEMORY || ptr > (BYTE *)TOP_MEMORY) |
240 | { |
241 | // Make sure to keep the alignment of the ptr so that we are not |
242 | // trying the same places over and over again |
243 | ptr = (BYTE*)BOT_MEMORY + (((SIZE_T)ptr) & 0xFFFFFFFF); |
244 | } |
245 | p = ::VirtualAlloc(ptr, dwSize, flAllocationType, flProtect); |
246 | #else |
247 | // Allocate memory top to bottom to stress ngen fixups with LARGEADDRESSAWARE support. |
248 | p = ::VirtualAlloc(lpAddress, dwSize, flAllocationType | MEM_TOP_DOWN, flProtect); |
249 | #endif // _WIN64 |
250 | } |
251 | } |
252 | #endif // _DEBUG |
253 | |
254 | // Fall back to the default method if the forced relocation failed |
255 | if (p == NULL) |
256 | { |
257 | p = ::VirtualAlloc (lpAddress, dwSize, flAllocationType, flProtect); |
258 | } |
259 | |
260 | #ifdef _DEBUG |
261 | GlobalAllocStore::AddAlloc (p); |
262 | #endif |
263 | |
264 | if(p == NULL){ |
265 | STRESS_LOG_OOM_STACK(dwSize); |
266 | } |
267 | |
268 | return p; |
269 | } |
270 | |
271 | } |
272 | #define VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect) Dont_Use_VirtualAlloc(lpAddress, dwSize, flAllocationType, flProtect) |
273 | |
274 | #undef VirtualFree |
275 | BOOL EEVirtualFree(LPVOID lpAddress, SIZE_T dwSize, DWORD dwFreeType) { |
276 | CONTRACTL |
277 | { |
278 | NOTHROW; |
279 | GC_NOTRIGGER; |
280 | SO_TOLERANT; |
281 | } |
282 | CONTRACTL_END; |
283 | |
284 | BOOL retVal = FALSE; |
285 | |
286 | { |
287 | #ifdef _DEBUG |
288 | GlobalAllocStore::RemoveAlloc (lpAddress); |
289 | #endif |
290 | |
291 | retVal = (BOOL)(BYTE)::VirtualFree (lpAddress, dwSize, dwFreeType); |
292 | } |
293 | |
294 | return retVal; |
295 | } |
296 | #define VirtualFree(lpAddress, dwSize, dwFreeType) Dont_Use_VirtualFree(lpAddress, dwSize, dwFreeType) |
297 | |
298 | #undef VirtualQuery |
299 | SIZE_T EEVirtualQuery(LPCVOID lpAddress, PMEMORY_BASIC_INFORMATION lpBuffer, SIZE_T dwLength) |
300 | { |
301 | CONTRACTL |
302 | { |
303 | NOTHROW; |
304 | GC_NOTRIGGER; |
305 | SO_TOLERANT; |
306 | } |
307 | CONTRACTL_END; |
308 | |
309 | { |
310 | return ::VirtualQuery(lpAddress, lpBuffer, dwLength); |
311 | } |
312 | } |
313 | #define VirtualQuery(lpAddress, lpBuffer, dwLength) Dont_Use_VirtualQuery(lpAddress, lpBuffer, dwLength) |
314 | |
315 | #undef VirtualProtect |
316 | BOOL EEVirtualProtect(LPVOID lpAddress, SIZE_T dwSize, DWORD flNewProtect, PDWORD lpflOldProtect) |
317 | { |
318 | CONTRACTL |
319 | { |
320 | NOTHROW; |
321 | GC_NOTRIGGER; |
322 | SO_TOLERANT; |
323 | } |
324 | CONTRACTL_END; |
325 | |
326 | { |
327 | return ::VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect); |
328 | } |
329 | } |
330 | #define VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) Dont_Use_VirtualProtect(lpAddress, dwSize, flNewProtect, lpflOldProtect) |
331 | |
332 | #undef GetProcessHeap |
333 | HANDLE EEGetProcessHeap() |
334 | { |
335 | // Note: this can be called a little early for real contracts, so we use static contracts instead. |
336 | STATIC_CONTRACT_NOTHROW; |
337 | STATIC_CONTRACT_GC_NOTRIGGER; |
338 | STATIC_CONTRACT_SO_TOLERANT; |
339 | |
340 | { |
341 | return GetProcessHeap(); |
342 | } |
343 | } |
344 | #define GetProcessHeap() Dont_Use_GetProcessHeap() |
345 | |
346 | #undef HeapCreate |
347 | HANDLE EEHeapCreate(DWORD flOptions, SIZE_T dwInitialSize, SIZE_T dwMaximumSize) |
348 | { |
349 | CONTRACTL |
350 | { |
351 | NOTHROW; |
352 | GC_NOTRIGGER; |
353 | SO_TOLERANT; |
354 | } |
355 | CONTRACTL_END; |
356 | |
357 | #ifndef FEATURE_PAL |
358 | |
359 | { |
360 | return ::HeapCreate(flOptions, dwInitialSize, dwMaximumSize); |
361 | } |
362 | #else // !FEATURE_PAL |
363 | return NULL; |
364 | #endif // !FEATURE_PAL |
365 | } |
366 | #define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize) |
367 | |
368 | #undef HeapDestroy |
369 | BOOL EEHeapDestroy(HANDLE hHeap) |
370 | { |
371 | CONTRACTL |
372 | { |
373 | NOTHROW; |
374 | GC_NOTRIGGER; |
375 | SO_TOLERANT; |
376 | } |
377 | CONTRACTL_END; |
378 | |
379 | #ifndef FEATURE_PAL |
380 | |
381 | { |
382 | return ::HeapDestroy(hHeap); |
383 | } |
384 | #else // !FEATURE_PAL |
385 | UNREACHABLE(); |
386 | #endif // !FEATURE_PAL |
387 | } |
388 | #define HeapDestroy(hHeap) Dont_Use_HeapDestroy(hHeap) |
389 | |
390 | #ifdef _DEBUG |
391 | #ifdef _TARGET_X86_ |
392 | #define OS_HEAP_ALIGN 8 |
393 | #else |
394 | #define OS_HEAP_ALIGN 16 |
395 | #endif |
396 | #endif |
397 | |
398 | |
399 | #undef HeapAlloc |
400 | LPVOID EEHeapAlloc(HANDLE hHeap, DWORD dwFlags, SIZE_T dwBytes) |
401 | { |
402 | STATIC_CONTRACT_NOTHROW; |
403 | STATIC_CONTRACT_SO_INTOLERANT; |
404 | |
405 | #ifdef FAILPOINTS_ENABLED |
406 | if (RFS_HashStack ()) |
407 | return NULL; |
408 | #endif |
409 | |
410 | |
411 | { |
412 | |
413 | LPVOID p = NULL; |
414 | #ifdef _DEBUG |
415 | // Store the heap handle to detect heap contamination |
416 | p = ::HeapAlloc (hHeap, dwFlags, dwBytes + OS_HEAP_ALIGN); |
417 | if(p) |
418 | { |
419 | *((HANDLE*)p) = hHeap; |
420 | p = (BYTE*)p + OS_HEAP_ALIGN; |
421 | } |
422 | GlobalAllocStore::AddAlloc (p); |
423 | #else |
424 | p = ::HeapAlloc (hHeap, dwFlags, dwBytes); |
425 | #endif |
426 | |
427 | if(p == NULL |
428 | //under OOM, we might not be able to get Execution Engine and can't access stress log |
429 | && GetExecutionEngine () |
430 | // If we have not created StressLog ring buffer, we should not try to use it. |
431 | // StressLog is going to do a memory allocation. We may enter an endless loop. |
432 | && ClrFlsGetValue(TlsIdx_StressLog) != NULL ) |
433 | { |
434 | STRESS_LOG_OOM_STACK(dwBytes); |
435 | } |
436 | |
437 | return p; |
438 | } |
439 | } |
440 | #define HeapAlloc(hHeap, dwFlags, dwBytes) Dont_Use_HeapAlloc(hHeap, dwFlags, dwBytes) |
441 | |
442 | LPVOID EEHeapAllocInProcessHeap(DWORD dwFlags, SIZE_T dwBytes) |
443 | { |
444 | WRAPPER_NO_CONTRACT; |
445 | STATIC_CONTRACT_SO_TOLERANT; |
446 | |
447 | #ifdef _DEBUG |
448 | // Check whether (indispensable) implicit casting in ClrAllocInProcessHeapBootstrap is safe. |
449 | static FastAllocInProcessHeapFunc pFunc = EEHeapAllocInProcessHeap; |
450 | #endif |
451 | |
452 | static HANDLE ProcessHeap = NULL; |
453 | |
454 | // We need to guarentee a very small stack consumption in allocating. And we can't allow |
455 | // an SO to happen while calling into the host. This will force a hard SO which is OK because |
456 | // we shouldn't ever get this close inside the EE in SO-intolerant code, so this should |
457 | // only fail if we call directly in from outside the EE, such as the JIT. |
458 | MINIMAL_STACK_PROBE_CHECK_THREAD(GetThread()); |
459 | |
460 | if (ProcessHeap == NULL) |
461 | ProcessHeap = EEGetProcessHeap(); |
462 | |
463 | return EEHeapAlloc(ProcessHeap,dwFlags,dwBytes); |
464 | } |
465 | |
466 | #undef HeapFree |
467 | BOOL EEHeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) |
468 | { |
469 | STATIC_CONTRACT_NOTHROW; |
470 | STATIC_CONTRACT_GC_NOTRIGGER; |
471 | STATIC_CONTRACT_SO_TOLERANT; |
472 | |
473 | // @todo - Need a backout validation here. |
474 | CONTRACT_VIOLATION(SOToleranceViolation); |
475 | |
476 | |
477 | BOOL retVal = FALSE; |
478 | |
479 | { |
480 | #ifdef _DEBUG |
481 | GlobalAllocStore::RemoveAlloc (lpMem); |
482 | |
483 | if (lpMem != NULL) |
484 | { |
485 | // Check the heap handle to detect heap contamination |
486 | lpMem = (BYTE*)lpMem - OS_HEAP_ALIGN; |
487 | HANDLE storedHeapHandle = *((HANDLE*)lpMem); |
488 | if(storedHeapHandle != hHeap) |
489 | _ASSERTE(!"Heap contamination detected! HeapFree was called on a heap other than the one that memory was allocated from.\n" |
490 | "Possible cause: you used new (executable) to allocate the memory, but didn't use DeleteExecutable() to free it." ); |
491 | } |
492 | #endif |
493 | // DON'T REMOVE THIS SEEMINGLY USELESS CAST |
494 | // |
495 | // On AMD64 the OS HeapFree calls RtlFreeHeap which returns a 1byte |
496 | // BOOLEAN, HeapFree then doesn't correctly clean the return value |
497 | // so the other 3 bytes which come back can be junk and in that case |
498 | // this return value can never be false. |
499 | retVal = (BOOL)(BYTE)::HeapFree (hHeap, dwFlags, lpMem); |
500 | } |
501 | |
502 | return retVal; |
503 | } |
504 | #define HeapFree(hHeap, dwFlags, lpMem) Dont_Use_HeapFree(hHeap, dwFlags, lpMem) |
505 | |
506 | BOOL EEHeapFreeInProcessHeap(DWORD dwFlags, LPVOID lpMem) |
507 | { |
508 | CONTRACTL |
509 | { |
510 | NOTHROW; |
511 | GC_NOTRIGGER; |
512 | SO_TOLERANT; |
513 | MODE_ANY; |
514 | } |
515 | CONTRACTL_END; |
516 | |
517 | #ifdef _DEBUG |
518 | // Check whether (indispensable) implicit casting in ClrFreeInProcessHeapBootstrap is safe. |
519 | static FastFreeInProcessHeapFunc pFunc = EEHeapFreeInProcessHeap; |
520 | #endif |
521 | |
522 | // Take a look at comment in EEHeapFree and EEHeapAllocInProcessHeap, obviously someone |
523 | // needs to take a little time to think more about this code. |
524 | //CONTRACT_VIOLATION(SOToleranceViolation); |
525 | |
526 | static HANDLE ProcessHeap = NULL; |
527 | |
528 | if (ProcessHeap == NULL) |
529 | ProcessHeap = EEGetProcessHeap(); |
530 | |
531 | return EEHeapFree(ProcessHeap,dwFlags,lpMem); |
532 | } |
533 | |
534 | |
535 | #undef HeapValidate |
536 | BOOL EEHeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem) { |
537 | STATIC_CONTRACT_NOTHROW; |
538 | STATIC_CONTRACT_GC_NOTRIGGER; |
539 | |
540 | #ifndef FEATURE_PAL |
541 | |
542 | { |
543 | return ::HeapValidate(hHeap, dwFlags, lpMem); |
544 | } |
545 | #else // !FEATURE_PAL |
546 | return TRUE; |
547 | #endif // !FEATURE_PAL |
548 | } |
549 | #define HeapValidate(hHeap, dwFlags, lpMem) Dont_Use_HeapValidate(hHeap, dwFlags, lpMem) |
550 | |
551 | HANDLE EEGetProcessExecutableHeap() { |
552 | // Note: this can be called a little early for real contracts, so we use static contracts instead. |
553 | STATIC_CONTRACT_NOTHROW; |
554 | STATIC_CONTRACT_GC_NOTRIGGER; |
555 | |
556 | |
557 | #ifndef FEATURE_PAL |
558 | |
559 | // |
560 | // Create the executable heap lazily |
561 | // |
562 | #undef HeapCreate |
563 | #undef HeapDestroy |
564 | if (g_ExecutableHeapHandle == NULL) |
565 | { |
566 | |
567 | HANDLE ExecutableHeapHandle = HeapCreate( |
568 | HEAP_CREATE_ENABLE_EXECUTE, // heap allocation attributes |
569 | 0, // initial heap size |
570 | 0 // maximum heap size; 0 == growable |
571 | ); |
572 | |
573 | if (ExecutableHeapHandle == NULL) |
574 | return NULL; |
575 | |
576 | HANDLE ExistingValue = InterlockedCompareExchangeT(&g_ExecutableHeapHandle, ExecutableHeapHandle, NULL); |
577 | if (ExistingValue != NULL) |
578 | { |
579 | HeapDestroy(ExecutableHeapHandle); |
580 | } |
581 | } |
582 | |
583 | #define HeapCreate(flOptions, dwInitialSize, dwMaximumSize) Dont_Use_HeapCreate(flOptions, dwInitialSize, dwMaximumSize) |
584 | #define HeapDestroy(hHeap) Dont_Use_HeapDestroy(hHeap) |
585 | |
586 | #else // !FEATURE_PAL |
587 | UNREACHABLE(); |
588 | #endif // !FEATURE_PAL |
589 | |
590 | |
591 | // TODO: implement hosted executable heap |
592 | return g_ExecutableHeapHandle; |
593 | } |
594 | |
595 | |
596 | #undef SleepEx |
597 | #undef Sleep |
598 | DWORD EESleepEx(DWORD dwMilliseconds, BOOL bAlertable) |
599 | { |
600 | CONTRACTL |
601 | { |
602 | NOTHROW; |
603 | GC_NOTRIGGER; |
604 | MODE_ANY; |
605 | SO_TOLERANT; |
606 | } |
607 | CONTRACTL_END; |
608 | |
609 | DWORD res; |
610 | |
611 | { |
612 | res = ::SleepEx(dwMilliseconds, bAlertable); |
613 | } |
614 | |
615 | return res; |
616 | } |
617 | #define SleepEx(dwMilliseconds,bAlertable) \ |
618 | Dont_Use_SleepEx(dwMilliseconds,bAlertable) |
619 | #define Sleep(a) Dont_Use_Sleep(a) |
620 | |
621 | // non-zero return value if this function causes the OS to switch to another thread |
622 | // See file:spinlock.h#SwitchToThreadSpinning for an explanation of dwSwitchCount |
623 | BOOL __SwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount) |
624 | { |
625 | CONTRACTL |
626 | { |
627 | NOTHROW; |
628 | GC_NOTRIGGER; |
629 | MODE_ANY; |
630 | SO_TOLERANT; |
631 | } |
632 | CONTRACTL_END; |
633 | |
634 | return __DangerousSwitchToThread(dwSleepMSec, dwSwitchCount, FALSE); |
635 | } |
636 | |
637 | #undef SleepEx |
638 | BOOL __DangerousSwitchToThread (DWORD dwSleepMSec, DWORD dwSwitchCount, BOOL goThroughOS) |
639 | { |
640 | // If you sleep for a long time, the thread should be in Preemptive GC mode. |
641 | CONTRACTL |
642 | { |
643 | NOTHROW; |
644 | GC_NOTRIGGER; |
645 | MODE_ANY; |
646 | SO_TOLERANT; |
647 | PRECONDITION(dwSleepMSec < 10000 || GetThread() == NULL || !GetThread()->PreemptiveGCDisabled()); |
648 | } |
649 | CONTRACTL_END; |
650 | |
651 | if (dwSleepMSec > 0) |
652 | { |
653 | // when called with goThroughOS make sure to not call into the host. This function |
654 | // may be called from GetRuntimeFunctionCallback() which is called by the OS to determine |
655 | // the personality routine when it needs to unwind managed code off the stack. when this |
656 | // happens in the context of an SO we want to avoid calling into the host |
657 | if (goThroughOS) |
658 | ::SleepEx(dwSleepMSec, FALSE); |
659 | else |
660 | ClrSleepEx(dwSleepMSec,FALSE); |
661 | return TRUE; |
662 | } |
663 | |
664 | // In deciding when to insert sleeps, we wait until we have been spinning |
665 | // for a long time and then always sleep. The former is to let short perf-critical |
666 | // __SwitchToThread loops avoid context switches. The latter is to ensure |
667 | // that if many threads are spinning waiting for a lower-priority thread |
668 | // to run that they will eventually all be asleep at the same time. |
669 | // |
670 | // The specific values are derived from the NDP 2.0 SP1 fix: it waits for |
671 | // 8 million cycles of __SwitchToThread calls where each takes ~300-500, |
672 | // which means we should wait in the neighborhood of 25000 calls. |
673 | // |
674 | // As of early 2011, ARM CPUs are much slower, so we need a lower threshold. |
675 | // The following two values appear to yield roughly equivalent spin times |
676 | // on their respective platforms. |
677 | // |
678 | #ifdef _TARGET_ARM_ |
679 | #define SLEEP_START_THRESHOLD (5 * 1024) |
680 | #else |
681 | #define SLEEP_START_THRESHOLD (32 * 1024) |
682 | #endif |
683 | |
684 | _ASSERTE(CALLER_LIMITS_SPINNING < SLEEP_START_THRESHOLD); |
685 | if (dwSwitchCount >= SLEEP_START_THRESHOLD) |
686 | { |
687 | if (goThroughOS) |
688 | ::SleepEx(1, FALSE); |
689 | else |
690 | ClrSleepEx(1, FALSE); |
691 | } |
692 | |
693 | { |
694 | return SwitchToThread(); |
695 | } |
696 | } |
697 | #define SleepEx(dwMilliseconds,bAlertable) \ |
698 | Dont_Use_SleepEx(dwMilliseconds,bAlertable) |
699 | |
700 | // Locking routines supplied by the EE to the other DLLs of the CLR. In a _DEBUG |
701 | // build of the EE, we poison the Crst as a poor man's attempt to do some argument |
702 | // validation. |
703 | #define POISON_BITS 3 |
704 | |
705 | static inline CRITSEC_COOKIE CrstToCookie(Crst * pCrst) { |
706 | LIMITED_METHOD_CONTRACT; |
707 | |
708 | _ASSERTE((((uintptr_t) pCrst) & POISON_BITS) == 0); |
709 | #ifdef _DEBUG |
710 | if (pCrst) |
711 | { |
712 | pCrst = (Crst *) (((uintptr_t) pCrst) | POISON_BITS); |
713 | } |
714 | #endif |
715 | return (CRITSEC_COOKIE) pCrst; |
716 | } |
717 | |
718 | static inline Crst *CookieToCrst(CRITSEC_COOKIE cookie) { |
719 | LIMITED_METHOD_CONTRACT; |
720 | |
721 | _ASSERTE((((uintptr_t) cookie) & POISON_BITS) == POISON_BITS); |
722 | #ifdef _DEBUG |
723 | cookie = (CRITSEC_COOKIE) (((uintptr_t) cookie) & ~POISON_BITS); |
724 | #endif |
725 | return (Crst *) cookie; |
726 | } |
727 | |
728 | CRITSEC_COOKIE EECreateCriticalSection(CrstType crstType, CrstFlags flags) { |
729 | CONTRACTL |
730 | { |
731 | NOTHROW; |
732 | GC_NOTRIGGER; |
733 | } |
734 | CONTRACTL_END; |
735 | |
736 | CRITSEC_COOKIE ret = NULL; |
737 | |
738 | EX_TRY |
739 | { |
740 | // This may be controversial, but seems like the correct discipline. If the |
741 | // EE has called out to any other DLL of the CLR in cooperative mode, we |
742 | // arbitrarily force lock acquisition to occur in preemptive mode. See our |
743 | // treatment of AcquireLock below. |
744 | //_ASSERTE((flags & (CRST_UNSAFE_COOPGC | CRST_UNSAFE_ANYMODE)) == 0); |
745 | ret = CrstToCookie(new Crst(crstType, flags)); |
746 | } |
747 | EX_CATCH |
748 | { |
749 | } |
750 | EX_END_CATCH(SwallowAllExceptions); |
751 | |
752 | // Note: we'll return NULL if the create fails. That's a true NULL, not a poisoned NULL. |
753 | return ret; |
754 | } |
755 | |
756 | void EEDeleteCriticalSection(CRITSEC_COOKIE cookie) |
757 | { |
758 | CONTRACTL |
759 | { |
760 | NOTHROW; |
761 | WRAPPER(GC_NOTRIGGER); |
762 | SO_TOLERANT; |
763 | } |
764 | CONTRACTL_END; |
765 | |
766 | VALIDATE_BACKOUT_STACK_CONSUMPTION; |
767 | |
768 | Crst *pCrst = CookieToCrst(cookie); |
769 | _ASSERTE(pCrst); |
770 | |
771 | delete pCrst; |
772 | } |
773 | |
774 | DEBUG_NOINLINE void EEEnterCriticalSection(CRITSEC_COOKIE cookie) { |
775 | |
776 | // Entering a critical section has many different contracts |
777 | // depending on the flags used to initialize the critical section. |
778 | // See CrstBase::Enter() for the actual contract. It's much too |
779 | // complex to repeat here. |
780 | |
781 | CONTRACTL |
782 | { |
783 | WRAPPER(THROWS); |
784 | WRAPPER(GC_TRIGGERS); |
785 | SO_INTOLERANT; |
786 | } |
787 | CONTRACTL_END; |
788 | |
789 | ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT; |
790 | |
791 | Crst *pCrst = CookieToCrst(cookie); |
792 | _ASSERTE(pCrst); |
793 | |
794 | pCrst->Enter(); |
795 | } |
796 | |
797 | DEBUG_NOINLINE void EELeaveCriticalSection(CRITSEC_COOKIE cookie) |
798 | { |
799 | CONTRACTL |
800 | { |
801 | NOTHROW; |
802 | GC_NOTRIGGER; |
803 | SO_INTOLERANT; |
804 | } |
805 | CONTRACTL_END; |
806 | |
807 | ANNOTATION_SPECIAL_HOLDER_CALLER_NEEDS_DYNAMIC_CONTRACT; |
808 | |
809 | Crst *pCrst = CookieToCrst(cookie); |
810 | _ASSERTE(pCrst); |
811 | |
812 | pCrst->Leave(); |
813 | } |
814 | |
815 | LPVOID EETlsGetValue(DWORD slot) |
816 | { |
817 | STATIC_CONTRACT_GC_NOTRIGGER; |
818 | STATIC_CONTRACT_NOTHROW; |
819 | STATIC_CONTRACT_MODE_ANY; |
820 | STATIC_CONTRACT_CANNOT_TAKE_LOCK; |
821 | STATIC_CONTRACT_SO_TOLERANT; |
822 | |
823 | // |
824 | // @todo: we don't want TlsGetValue to throw, but CheckThreadState throws right now. Either modify |
825 | // CheckThreadState to not throw, or catch any exception and just return NULL. |
826 | // |
827 | //CONTRACT_VIOLATION(ThrowsViolation); |
828 | SCAN_IGNORE_THROW; |
829 | |
830 | void **pTlsData = CExecutionEngine::CheckThreadState(slot, FALSE); |
831 | |
832 | if (pTlsData) |
833 | return pTlsData[slot]; |
834 | else |
835 | return NULL; |
836 | } |
837 | |
838 | BOOL EETlsCheckValue(DWORD slot, LPVOID * pValue) |
839 | { |
840 | STATIC_CONTRACT_GC_NOTRIGGER; |
841 | STATIC_CONTRACT_NOTHROW; |
842 | STATIC_CONTRACT_MODE_ANY; |
843 | STATIC_CONTRACT_SO_TOLERANT; |
844 | |
845 | // |
846 | // @todo: we don't want TlsGetValue to throw, but CheckThreadState throws right now. Either modify |
847 | // CheckThreadState to not throw, or catch any exception and just return NULL. |
848 | // |
849 | //CONTRACT_VIOLATION(ThrowsViolation); |
850 | SCAN_IGNORE_THROW; |
851 | |
852 | void **pTlsData = CExecutionEngine::CheckThreadState(slot, FALSE); |
853 | |
854 | if (pTlsData) |
855 | { |
856 | *pValue = pTlsData[slot]; |
857 | return TRUE; |
858 | } |
859 | |
860 | return FALSE; |
861 | } |
862 | |
863 | VOID EETlsSetValue(DWORD slot, LPVOID pData) |
864 | { |
865 | STATIC_CONTRACT_GC_NOTRIGGER; |
866 | STATIC_CONTRACT_THROWS; |
867 | STATIC_CONTRACT_MODE_ANY; |
868 | STATIC_CONTRACT_SO_TOLERANT; |
869 | |
870 | void **pTlsData = CExecutionEngine::CheckThreadState(slot); |
871 | |
872 | if (pTlsData) // Yes, CheckThreadState(slot, TRUE) can return NULL now. |
873 | { |
874 | pTlsData[slot] = pData; |
875 | } |
876 | } |
877 | |
878 | BOOL EEAllocationDisallowed() |
879 | { |
880 | WRAPPER_NO_CONTRACT; |
881 | |
882 | #ifdef _DEBUG |
883 | // On Debug build we make sure that a thread is not going to do memory allocation |
884 | // after it suspends another thread, since the another thread may be suspended while |
885 | // having OS Heap lock. |
886 | return !Thread::Debug_AllowCallout(); |
887 | #else |
888 | return FALSE; |
889 | #endif |
890 | } |
891 | |
892 | |