1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | // |
5 | |
6 | // |
7 | //----------------------------------------------------------------------------- |
8 | // Stack Probe Header |
9 | // Used to setup stack guards |
10 | //----------------------------------------------------------------------------- |
11 | |
12 | #ifndef __STACKPROBE_h__ |
13 | #define __STACKPROBE_h__ |
14 | |
15 | //----------------------------------------------------------------------------- |
16 | // Stack Guards. |
17 | // |
18 | // The idea is to force stack overflows to occur at convenient spots. |
19 | // * Fire at RequiresNPagesStack (beggining of func) if this functions locals |
20 | // cause overflow. Note that in a debug mode, initing the locals to garbage |
21 | // will cause the overflow before this macro is executed. |
22 | // |
23 | // * Fire at CheckStack (end of func) if either our nested function calls |
24 | // cause or use of _alloca cause the stack overflow. Note that this macro |
25 | // is debug only, so release builds won't catch on this |
26 | // |
27 | // Some comments: |
28 | // - Stack grows *down*, |
29 | // - Ideally, all funcs would have EBP frame and we'd use EBP instead of ESP, |
30 | // however, we use the 'this' ptr to get the stack ptr, since the guard |
31 | // is declared on the stack. |
32 | // |
33 | // Comments about inlining assembly w/ Macros: |
34 | // - Must use cstyle comments /* ... */ |
35 | // - No semi colons, need __asm keyword at the start of each line |
36 | //----------------------------------------------------------------------------- |
37 | |
38 | //----------------------------------------------------------------------------- |
39 | // *How* to use stack guards. |
40 | // |
41 | // See, in a CLR enlistment, src\ndp\clr\doc\OtherDevDocs\untriaged\clrdev_web\ |
42 | // |
43 | //----------------------------------------------------------------------------- |
44 | |
45 | //----------------------------------------------------------------------------- |
46 | // Stack guards have 3 compiler states: |
47 | //#define FEATURE_STACK_PROBE |
48 | // (All) All stack guard code is completely removed by the preprocessor if |
49 | // not defined. This is used for CoreCLR. |
50 | // |
51 | //#define STACK_GUARDS_DEBUG |
52 | // (DEBUG) Full stack guard debugging including cookies, tracking ips, and |
53 | // chaining. More heavy weight, recommended for a debug build only |
54 | // |
55 | //#define STACK_GUARDS_RELEASE |
56 | // (RELEASE) Light stack guard code. For golden builds. Forces Stack Overflow |
57 | // to happen at "convenient" times. No debugging help. |
58 | //----------------------------------------------------------------------------- |
59 | |
60 | #include "genericstackprobe.h" |
61 | #include "utilcode.h" |
62 | |
63 | /* defining VM_NO_SO_INFRASTRUCTURE_CODE for VM code |
64 | * This macro can be used to have code which will be present |
65 | * only for code inside VM directory when SO infrastructure code is not built. |
66 | * Eg. Currently it is used in macro EX_END_HOOK. |
67 | * For VM code EX_HOOK calls CLREXception::HandleState::SetupCatch(). |
68 | * When Stack guards are disabled we will tear down the process in |
69 | * CLREXception::HandleState::SetupCatch() if there is a StackOverflow. |
70 | * So we should not reach EX_END_HOOK when there is StackOverflow. |
71 | * This change cannot be done for all other code because |
72 | * CLREXception::HandleState::SetupCatch() is not called rather |
73 | * EXception::HandleState::SetupCatch() is called which is a nop. |
74 | */ |
75 | |
76 | #ifndef FEATURE_STACK_PROBE |
77 | #undef VM_NO_SO_INFRASTRUCTURE_CODE |
78 | #define VM_NO_SO_INFRASTRUCTURE_CODE(x) x |
79 | #endif |
80 | |
81 | |
82 | #ifdef FEATURE_STACK_PROBE |
83 | |
84 | #define DEFAULT_INTERIOR_PROBE_AMOUNT 4 |
85 | |
86 | #define MINIMUM_STACK_REQUIREMENT (0.25) |
87 | |
88 | BOOL IsBackoutCalledForEH(BYTE *origSP, BYTE *backoutSP); |
89 | |
90 | //============================================================================= |
91 | // Common code |
92 | //============================================================================= |
93 | // Release version of the probe function |
94 | BOOL RetailStackProbeNoThrow(unsigned int n, Thread *pThread); |
95 | BOOL RetailStackProbeNoThrowWorker(unsigned int n, Thread *pThread); |
96 | void RetailStackProbe(unsigned int n, Thread *pThread); |
97 | void RetailStackProbeWorker(unsigned int n, Thread *pThread); |
98 | void ReportStackOverflow(); |
99 | |
100 | // Retail stack probe with default amount is the most common stack probe. Create |
101 | // a dedicated method for it to reduce code size. |
102 | void DefaultRetailStackProbeWorker(Thread * pThread); |
103 | |
104 | void RetailStackProbe(unsigned int n); |
105 | |
106 | BOOL ShouldProbeOnThisThread(); |
107 | |
108 | int SOTolerantBoundaryFilter(EXCEPTION_POINTERS *pExceptionInfo, DWORD * pdwSOTolerantFlags); |
109 | void SOTolerantCode_RecoverStack(DWORD dwFlags); |
110 | void SOTolerantCode_ExceptBody(DWORD * pdwFlags, Frame * pSafeForSOFrame); |
111 | |
112 | #endif |
113 | |
114 | #if defined(FEATURE_STACK_PROBE) && !defined(DACCESS_COMPILE) |
115 | |
116 | inline bool IsStackProbingEnabled() |
117 | { |
118 | LIMITED_METHOD_CONTRACT; |
119 | return g_StackProbingEnabled; |
120 | } |
121 | |
122 | //============================================================================= |
123 | // DEBUG |
124 | //============================================================================= |
125 | #if defined(STACK_GUARDS_DEBUG) |
126 | |
127 | #include "common.h" |
128 | |
129 | class BaseStackGuard; |
130 | |
131 | //----------------------------------------------------------------------------- |
132 | // Need to chain together stack guard address for nested functions |
133 | // Use a TLS slot to store the head of the chain |
134 | //----------------------------------------------------------------------------- |
135 | extern DWORD g_CurrentStackGuardTlsIdx; |
136 | |
137 | //----------------------------------------------------------------------------- |
138 | // Class |
139 | //----------------------------------------------------------------------------- |
140 | |
141 | // Base version - has no ctor/dtor, so we can use it with SEH |
142 | // |
143 | // *** Don't declare any members here. Put them in BaseStackGuardGeneric. |
144 | // We downcast directly from the base to the derived, using the knowledge |
145 | // that the base class and the derived class are identical for members. |
146 | // |
147 | class BaseStackGuard : public BaseStackGuardGeneric |
148 | { |
149 | protected: |
150 | BaseStackGuard() |
151 | { |
152 | _ASSERTE(!"No default construction allowed" ); |
153 | } |
154 | |
155 | public: |
156 | BaseStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) : |
157 | BaseStackGuardGeneric(szFunction, szFile, lineNum) |
158 | { |
159 | STATIC_CONTRACT_LEAF; |
160 | } |
161 | |
162 | UINT_PTR *Marker() { return m_pMarker; } |
163 | |
164 | unsigned int Depth() { return m_depth; } |
165 | |
166 | const char *FunctionName() { return m_szFunction; } |
167 | |
168 | BOOL IsProbeGuard() |
169 | { |
170 | return (m_isBoundaryGuard == FALSE); |
171 | } |
172 | |
173 | BOOL IsBoundaryGuard() |
174 | { |
175 | return (m_isBoundaryGuard == TRUE); |
176 | } |
177 | |
178 | inline BOOL ShouldCheckPreviousCookieIntegrity(); |
179 | inline BOOL ShouldCheckThisCookieIntegrity(); |
180 | |
181 | BOOL RequiresNStackPages(unsigned int n, BOOL fThrowOnSO = TRUE); |
182 | BOOL RequiresNStackPagesThrowing(unsigned int n); |
183 | BOOL RequiresNStackPagesNoThrow(unsigned int n); |
184 | private: |
185 | BOOL RequiresNStackPagesInternal(unsigned int n, BOOL fThrowOnSO = TRUE); |
186 | public: |
187 | BOOL DoProbe(unsigned int n, BOOL fThrowOnSO); |
188 | void CheckStack(); |
189 | |
190 | static void RestoreCurrentGuard(BOOL fWasDisabled = FALSE); |
191 | void PopGuardForEH(); |
192 | |
193 | // Different error messages for the different times we detemine there's a problem. |
194 | void HandleOverwrittenThisStackGuard(__in_z char *stackID); |
195 | void HandleOverwrittenPreviousStackGuard(int shortFall, __in_z char *stackID); |
196 | void HandleOverwrittenCurrentStackGuard(int shortFall, __in_z char *stackID); |
197 | static void HandleOverwrittenCurrentStackGuard(void *pGuard, int shortFall, __in_z char *stackID); |
198 | |
199 | void CheckMarkerIntegrity(); |
200 | void RestorePreviousGuard(); |
201 | void ProtectMarkerPageInDebugger(); |
202 | void UndoPageProtectionInDebugger(); |
203 | static void ProtectMarkerPageInDebugger(void *pGuard); |
204 | static void UndoPageProtectionInDebugger(void *pGuard); |
205 | |
206 | inline HRESULT PrepGuard() |
207 | { |
208 | WRAPPER_NO_CONTRACT; |
209 | |
210 | // See if it has already been prepped... |
211 | if (ClrFlsGetValue(g_CurrentStackGuardTlsIdx) != NULL) |
212 | return S_OK; |
213 | |
214 | // Let's see if we'll be able to put in a guard page |
215 | ClrFlsSetValue(g_CurrentStackGuardTlsIdx, |
216 | (void*)-1); |
217 | |
218 | if (ClrFlsGetValue(g_CurrentStackGuardTlsIdx) != (void*)-1) |
219 | return E_OUTOFMEMORY; |
220 | |
221 | return S_OK; |
222 | |
223 | } |
224 | |
225 | inline static BaseStackGuard* GetCurrentGuard() |
226 | { |
227 | WRAPPER_NO_CONTRACT; |
228 | if (g_CurrentStackGuardTlsIdx != -1) |
229 | return (BaseStackGuard*) ClrFlsGetValue(g_CurrentStackGuardTlsIdx); |
230 | else |
231 | return NULL; |
232 | } |
233 | |
234 | inline static BOOL IsGuard(BaseStackGuard *probe) |
235 | { |
236 | return (probe != NULL); |
237 | } |
238 | static void SetCurrentGuard(BaseStackGuard* pGuard); |
239 | static void ResetCurrentGuard(BaseStackGuard* pGuard); |
240 | |
241 | inline static BOOL IsProbeGuard(BaseStackGuard *probe) |
242 | { |
243 | LIMITED_METHOD_CONTRACT; |
244 | return (IsGuard(probe) != NULL && probe->IsProbeGuard()); |
245 | } |
246 | |
247 | inline static BOOL IsBoundaryGuard(BaseStackGuard *probe) |
248 | { |
249 | LIMITED_METHOD_CONTRACT; |
250 | return (IsGuard(probe) != NULL && probe->IsBoundaryGuard()); |
251 | } |
252 | |
253 | static void InitProbeReportingToFaultInjectionFramework(); |
254 | BOOL ReportProbeToFaultInjectionFramework(); |
255 | |
256 | static void Terminate(); |
257 | |
258 | |
259 | static HMODULE m_hProbeCallBack; |
260 | typedef BOOL (*ProbeCallbackType)(unsigned, const char *); |
261 | static ProbeCallbackType m_pfnProbeCallback; |
262 | |
263 | }; |
264 | |
265 | |
266 | // Derived version, add a dtor that automatically calls Check_Stack, move convenient, but can't use with SEH. |
267 | class AutoCleanupStackGuard : public BaseStackGuard |
268 | { |
269 | protected: |
270 | AutoCleanupStackGuard() |
271 | { |
272 | _ASSERTE(!"No default construction allowed" ); |
273 | } |
274 | |
275 | public: |
276 | DEBUG_NOINLINE AutoCleanupStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) : |
277 | BaseStackGuard(szFunction, szFile, lineNum) |
278 | { |
279 | SCAN_SCOPE_BEGIN; |
280 | // This CANNOT be a STATIC_CONTRACT_SO_INTOLERANT b/c that isn't |
281 | // really just a static contract, it is actually calls EnsureSOIntolerantOK |
282 | // as well. Instead we just use the annotation. |
283 | ANNOTATION_FN_SO_INTOLERANT; |
284 | } |
285 | |
286 | DEBUG_NOINLINE ~AutoCleanupStackGuard() |
287 | { |
288 | SCAN_SCOPE_END; |
289 | CheckStack(); |
290 | } |
291 | }; |
292 | |
293 | class DebugSOIntolerantTransitionHandlerBeginOnly |
294 | { |
295 | BOOL m_prevSOTolerantState; |
296 | ClrDebugState* m_clrDebugState; |
297 | char *m_ctorSP; |
298 | |
299 | public: |
300 | DEBUG_NOINLINE DebugSOIntolerantTransitionHandlerBeginOnly(EEThreadHandle thread); |
301 | DEBUG_NOINLINE ~DebugSOIntolerantTransitionHandlerBeginOnly(); |
302 | }; |
303 | |
304 | |
305 | |
306 | extern DWORD g_InteriorProbeAmount; |
307 | |
308 | //============================================================================= |
309 | // Macros for transition into SO_INTOLERANT code |
310 | //============================================================================= |
311 | |
312 | FORCEINLINE DWORD DefaultEntryProbeAmount() { return g_EntryPointProbeAmount; } |
313 | |
314 | #define BEGIN_SO_INTOLERANT_CODE(pThread) \ |
315 | BEGIN_SO_INTOLERANT_CODE_FOR(pThread, g_EntryPointProbeAmount) \ |
316 | |
317 | #define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) \ |
318 | { \ |
319 | /*_ASSERTE(pThread); */ \ |
320 | AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
321 | stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(n)); \ |
322 | /* work around unreachable code warning */ \ |
323 | if (true) \ |
324 | { \ |
325 | DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
326 | ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \ |
327 | /* work around unreachable code warning */ \ |
328 | if (true) \ |
329 | { \ |
330 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
331 | |
332 | #define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \ |
333 | { \ |
334 | /*_ASSERTE(pThread || IsGCSpecialThread());*/ \ |
335 | AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
336 | if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount)))\ |
337 | { \ |
338 | stack_guard_XXX.SetNoException(); \ |
339 | ActionOnSO; \ |
340 | } \ |
341 | /* work around unreachable code warning */ \ |
342 | else \ |
343 | { \ |
344 | DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
345 | ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \ |
346 | /* work around unreachable code warning */ \ |
347 | if (true) \ |
348 | { \ |
349 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
350 | |
351 | |
352 | // This is defined just for using in the InternalSetupForComCall macro which |
353 | // doesn't have a corresponding end macro because no exception will pass through it |
354 | // It should not be used in any situation where an exception could pass through |
355 | // the transition. |
356 | #define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \ |
357 | AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
358 | if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount)))\ |
359 | { \ |
360 | ActionOnSO; \ |
361 | } \ |
362 | stack_guard_XXX.SetNoException(); \ |
363 | DebugSOIntolerantTransitionHandlerBeginOnly __soIntolerantTransitionHandler(pThread); \ |
364 | ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); |
365 | |
366 | |
367 | // For some codepaths used during the handling of an SO, we need to guarantee a |
368 | // minimal stack consumption to avoid an SO on that codepath. These are typically host |
369 | // APIS such as allocation. The host is going to use < 1/4 page, so make sure |
370 | // we have that amount before calling. Then use the BACKOUT_VALIDATION to ensure |
371 | // that we don't overrun it. We call ReportStackOverflow, which will generate a hard |
372 | // SO if we have less than a page left. |
373 | |
374 | #define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) \ |
375 | if (IsStackProbingEnabled()) \ |
376 | { \ |
377 | Thread *__pThread = pThread; \ |
378 | if (__pThread && ! __pThread->IsStackSpaceAvailable(MINIMUM_STACK_REQUIREMENT)) \ |
379 | { \ |
380 | ReportStackOverflow(); \ |
381 | } \ |
382 | } \ |
383 | CONTRACT_VIOLATION(SOToleranceViolation); |
384 | |
385 | // We don't use the DebugSOIntolerantTransitionHandler here because we don't need to transition into |
386 | // SO-intolerant code. We're already there. We also don't need to annotate as having probed, |
387 | // because this only matters for entry point functions. |
388 | // We have a way to separate the declaration from the actual probing for cases where need |
389 | // to do a test, such as IsGCThread(), to decide if should probe. |
390 | #define DECLARE_INTERIOR_STACK_PROBE \ |
391 | { \ |
392 | AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__);\ |
393 | DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE) |
394 | |
395 | |
396 | // A function containing an interior probe is implicilty SO-Intolerant because we |
397 | // assume that it is not behind a probe. So confirm that we are in the correct state. |
398 | #define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) \ |
399 | _ASSERTE(pThread != NULL); \ |
400 | stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(n)); \ |
401 | EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__); |
402 | |
403 | #define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \ |
404 | if (ShouldProbeOnThisThread()) \ |
405 | { \ |
406 | DO_INTERIOR_STACK_PROBE_FOR(GetThread(), g_InteriorProbeAmount); \ |
407 | } |
408 | |
409 | // A function containing an interior probe is implicilty SO-Intolerant because we |
410 | // assume that it is not behind a probe. So confirm that we are in the correct state. |
411 | #define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, actionOnSO) \ |
412 | _ASSERTE(pThread != NULL); \ |
413 | if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(n))) \ |
414 | { \ |
415 | stack_guard_XXX.SetNoException(); \ |
416 | actionOnSO; \ |
417 | } \ |
418 | EnsureSOIntolerantOK(__FUNCTION__, __FILE__, __LINE__); |
419 | |
420 | #define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, actionOnSO) \ |
421 | if (ShouldProbeOnThisThread()) \ |
422 | { \ |
423 | DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(GetThread(), n, actionOnSO); \ |
424 | } |
425 | |
426 | |
427 | #define INTERIOR_STACK_PROBE_FOR(pThread, n) \ |
428 | DECLARE_INTERIOR_STACK_PROBE; \ |
429 | DO_INTERIOR_STACK_PROBE_FOR(pThread, n) |
430 | |
431 | #define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \ |
432 | DECLARE_INTERIOR_STACK_PROBE; \ |
433 | DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) |
434 | |
435 | #define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \ |
436 | DECLARE_INTERIOR_STACK_PROBE; \ |
437 | DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) |
438 | |
439 | #define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \ |
440 | DECLARE_INTERIOR_STACK_PROBE; \ |
441 | DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) |
442 | |
443 | |
444 | #define INTERIOR_STACK_PROBE(pThread) \ |
445 | INTERIOR_STACK_PROBE_FOR(pThread, g_InteriorProbeAmount) |
446 | |
447 | #define INTERIOR_STACK_PROBE_CHECK_THREAD \ |
448 | INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(g_InteriorProbeAmount) |
449 | |
450 | #define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) \ |
451 | INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, g_InteriorProbeAmount, ActionOnSO) |
452 | |
453 | #define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) \ |
454 | INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(g_InteriorProbeAmount, ActionOnSO) |
455 | |
456 | |
457 | #define END_INTERIOR_STACK_PROBE \ |
458 | DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \ |
459 | stack_guard_XXX.SetNoException(); \ |
460 | } |
461 | |
462 | #define RETURN_FROM_INTERIOR_PROBE(x) \ |
463 | DEBUG_OK_TO_RETURN_BEGIN(STACK_PROBE) \ |
464 | stack_guard_XXX.SetNoException(); \ |
465 | RETURN(x); \ |
466 | DEBUG_OK_TO_RETURN_END(STACK_PROBE) |
467 | |
468 | |
469 | // This is used for EH code where we are about to throw. |
470 | // To avoid taking an SO during EH processing, want to include it in our probe limits |
471 | // So we will just do a big probe and then throw. |
472 | #define STACK_PROBE_FOR_THROW(pThread) \ |
473 | AutoCleanupStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
474 | if (pThread != NULL) \ |
475 | { \ |
476 | DO_INTERIOR_STACK_PROBE_FOR(pThread, ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT)); \ |
477 | } |
478 | |
479 | // This is used for throws where we cannot use a dtor-based probe. |
480 | #define PUSH_STACK_PROBE_FOR_THROW(pThread) \ |
481 | BaseStackGuard stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
482 | stack_guard_XXX.RequiresNStackPagesThrowing(ADJUST_PROBE(g_EntryPointProbeAmount)); |
483 | |
484 | #define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard) \ |
485 | pGuard = &stack_guard_XXX; |
486 | |
487 | #define RESET_EXCEPTION_FROM_STACK_PROBE_FOR_THROW(pGuard) \ |
488 | pGuard->SetNoException (); |
489 | |
490 | #define POP_STACK_PROBE_FOR_THROW(pGuard) \ |
491 | pGuard->CheckStack(); |
492 | |
493 | //============================================================================= |
494 | // Macros for transition into SO_TOLERANT code |
495 | //============================================================================= |
496 | // @todo : put this assert in when all probes are in place. |
497 | // _ASSERTE(! pThread->IsSOTolerant()); |
498 | |
499 | //********************************************************************************* |
500 | |
501 | // A boundary stack guard is pushed onto the probe stack when we leave the EE and |
502 | // popped when we return. It is used for 1) restoring the original probe's cookie |
503 | // when we return, as managed code could trash it and 2) marking a boundary so that |
504 | // we know not to check for over-written probes before it when install a real probe. |
505 | // |
506 | class BoundaryStackGuard : public BaseStackGuard |
507 | { |
508 | protected: |
509 | BoundaryStackGuard() |
510 | { |
511 | LIMITED_METHOD_CONTRACT; |
512 | |
513 | _ASSERTE(!"No default construction allowed" ); |
514 | } |
515 | |
516 | public: |
517 | DEBUG_NOINLINE BoundaryStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) |
518 | : BaseStackGuard(szFunction, szFile, lineNum) |
519 | { |
520 | SCAN_SCOPE_BEGIN; |
521 | ANNOTATION_FN_SO_TOLERANT; |
522 | |
523 | m_isBoundaryGuard = TRUE; |
524 | } |
525 | |
526 | DEBUG_NOINLINE void Push(); |
527 | DEBUG_NOINLINE void Pop(); |
528 | |
529 | DEBUG_NOINLINE void SetNoExceptionNoPop() |
530 | { |
531 | SCAN_SCOPE_END; |
532 | SetNoException(); |
533 | } |
534 | |
535 | }; |
536 | |
537 | // Derived version, add a dtor that automatically calls Pop, more convenient, but can't use with SEH. |
538 | class AutoCleanupBoundaryStackGuard : public BoundaryStackGuard |
539 | { |
540 | protected: |
541 | AutoCleanupBoundaryStackGuard() |
542 | { |
543 | _ASSERTE(!"No default construction allowed" ); |
544 | } |
545 | |
546 | public: |
547 | DEBUG_NOINLINE AutoCleanupBoundaryStackGuard(const char *szFunction, const char *szFile, unsigned int lineNum) : |
548 | BoundaryStackGuard(szFunction, szFile, lineNum) |
549 | { |
550 | SCAN_SCOPE_BEGIN; |
551 | ANNOTATION_FN_SO_TOLERANT; |
552 | } |
553 | |
554 | DEBUG_NOINLINE ~AutoCleanupBoundaryStackGuard() |
555 | { |
556 | SCAN_SCOPE_END; |
557 | Pop(); |
558 | } |
559 | }; |
560 | |
561 | |
562 | class DebugSOTolerantTransitionHandler |
563 | { |
564 | BOOL m_prevSOTolerantState; |
565 | ClrDebugState* m_clrDebugState; |
566 | |
567 | public: |
568 | void EnterSOTolerantCode(Thread *pThread); |
569 | void ReturnFromSOTolerantCode(); |
570 | }; |
571 | |
572 | class AutoCleanupDebugSOTolerantTransitionHandler : DebugSOTolerantTransitionHandler |
573 | { |
574 | BOOL m_prevSOTolerantState; |
575 | ClrDebugState* m_clrDebugState; |
576 | |
577 | public: |
578 | DEBUG_NOINLINE AutoCleanupDebugSOTolerantTransitionHandler(Thread *pThread) |
579 | { |
580 | SCAN_SCOPE_BEGIN; |
581 | ANNOTATION_FN_SO_INTOLERANT; |
582 | |
583 | EnterSOTolerantCode(pThread); |
584 | } |
585 | DEBUG_NOINLINE ~AutoCleanupDebugSOTolerantTransitionHandler() |
586 | { |
587 | SCAN_SCOPE_END; |
588 | |
589 | ReturnFromSOTolerantCode(); |
590 | } |
591 | }; |
592 | |
593 | |
594 | // When we enter SO-tolerant code, we |
595 | // 1) probe to make sure that we will have enough stack to run our backout code. We don't |
596 | // need to check that the cookie was overrun because we only care that we had enough stack. |
597 | // But we do anyway, to pop off the guard.s |
598 | // The backout code infrastcture ensures that we stay below the BACKOUT_CODE_STACK_LIMIT. |
599 | // 2) Install a boundary guard, which will preserve our cookie and prevent spurious checks if |
600 | // we call back into the EE. |
601 | // 3) Formally transition into SO-tolerant code so that we can make sure we are probing if we call |
602 | // back into the EE. |
603 | // |
604 | |
605 | #undef OPTIONAL_SO_CLEANUP_UNWIND |
606 | #define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) |
607 | |
608 | #define BSTC_RECOVER_STACK 0x1 |
609 | #define BSTC_IS_SO 0x2 |
610 | #define BSTC_IS_SOFT_SO 0x4 |
611 | #define BSTC_TRIGGERING_UNWIND_FOR_SO 0x8 |
612 | |
613 | #define BEGIN_SO_TOLERANT_CODE(pThread) \ |
614 | { /* add an outer scope so that we'll restore our state as soon as we return */ \ |
615 | Thread * const __pThread = pThread; \ |
616 | DWORD __dwFlags = 0; \ |
617 | Frame * __pSafeForSOFrame = __pThread ? __pThread->GetFrame() : NULL; \ |
618 | SCAN_BLOCKMARKER(); \ |
619 | SCAN_BLOCKMARKER_MARK(); \ |
620 | BoundaryStackGuard boundary_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
621 | boundary_guard_XXX.Push(); \ |
622 | DebugSOTolerantTransitionHandler __soTolerantTransitionHandler; \ |
623 | __soTolerantTransitionHandler.EnterSOTolerantCode(__pThread); \ |
624 | __try \ |
625 | { \ |
626 | SCAN_EHMARKER(); \ |
627 | __try \ |
628 | { \ |
629 | SCAN_EHMARKER_TRY(); \ |
630 | DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE) \ |
631 | __try \ |
632 | { |
633 | |
634 | |
635 | // We need to catch any hard SO that comes through in order to get our stack back and make sure that we can run our backout code. |
636 | // Also can't allow a hard SO to propogate into SO-intolerant code, as we can't tell where it came from and would have to rip the process. |
637 | // So install a filter and catch hard SO and rethrow a C++ SO. Note that we don't check the host policy here it only applies to exceptions |
638 | // that will leak back into managed code. |
639 | #define END_SO_TOLERANT_CODE \ |
640 | } \ |
641 | __finally \ |
642 | { \ |
643 | STATIC_CONTRACT_SO_TOLERANT; \ |
644 | if (__dwFlags & BSTC_TRIGGERING_UNWIND_FOR_SO) \ |
645 | { \ |
646 | OPTIONAL_SO_CLEANUP_UNWIND(__pThread, __pSafeForSOFrame) \ |
647 | } \ |
648 | } \ |
649 | DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \ |
650 | boundary_guard_XXX.SetNoException(); \ |
651 | SCAN_EHMARKER_END_TRY(); \ |
652 | } \ |
653 | __except(SOTolerantBoundaryFilter(GetExceptionInformation(), &__dwFlags)) \ |
654 | { \ |
655 | SCAN_EHMARKER_CATCH(); \ |
656 | __soTolerantTransitionHandler.ReturnFromSOTolerantCode(); \ |
657 | SOTolerantCode_ExceptBody(&__dwFlags, __pSafeForSOFrame); \ |
658 | SCAN_EHMARKER_END_CATCH(); \ |
659 | } \ |
660 | /* This will correctly set the annotation back to SOIntolerant if needed */ \ |
661 | SCAN_BLOCKMARKER_USE(); \ |
662 | if (__dwFlags & BSTC_RECOVER_STACK) \ |
663 | { \ |
664 | SOTolerantCode_RecoverStack(__dwFlags); \ |
665 | } \ |
666 | } \ |
667 | __finally \ |
668 | { \ |
669 | __soTolerantTransitionHandler.ReturnFromSOTolerantCode(); \ |
670 | boundary_guard_XXX.Pop(); \ |
671 | } \ |
672 | /* This is actually attached to the SCAN_BLOCKMARKER_USE() in the try scope */ \ |
673 | /* but should hopefully chain the right annotations for a call to a __finally */ \ |
674 | SCAN_BLOCKMARKER_END_USE(); \ |
675 | } |
676 | |
677 | extern unsigned __int64 getTimeStamp(); |
678 | |
679 | INDEBUG(void AddHostCallsStaticMarker();) |
680 | |
681 | // This is used for calling into host |
682 | // We only need to install the boundary guard, and transition into SO-tolerant code. |
683 | #define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \ |
684 | { \ |
685 | ULONGLONG __entryTime = 0; \ |
686 | __int64 __entryTimeStamp = 0; \ |
687 | _ASSERTE(CanThisThreadCallIntoHost()); \ |
688 | _ASSERTE((pThread == NULL) || \ |
689 | (pThread->GetClrDebugState() == NULL) || \ |
690 | ((pThread->GetClrDebugState()->ViolationMask() & \ |
691 | (HostViolation|BadDebugState)) != 0) || \ |
692 | (pThread->GetClrDebugState()->IsHostCaller())); \ |
693 | INDEBUG(AddHostCallsStaticMarker();) \ |
694 | _ASSERTE(pThread == NULL || !pThread->IsInForbidSuspendRegion()); \ |
695 | { \ |
696 | AutoCleanupBoundaryStackGuard boundary_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
697 | boundary_guard_XXX.Push(); \ |
698 | AutoCleanupDebugSOTolerantTransitionHandler __soTolerantTransitionHandler(pThread); \ |
699 | DEBUG_ASSURE_NO_RETURN_BEGIN(STACK_PROBE); \ |
700 | |
701 | #define END_SO_TOLERANT_CODE_CALLING_HOST \ |
702 | DEBUG_ASSURE_NO_RETURN_END(STACK_PROBE) \ |
703 | boundary_guard_XXX.SetNoExceptionNoPop(); \ |
704 | } \ |
705 | } |
706 | |
707 | //----------------------------------------------------------------------------- |
708 | // Startup & Shutdown stack guard subsystem |
709 | //----------------------------------------------------------------------------- |
710 | void InitStackProbes(); |
711 | void TerminateStackProbes(); |
712 | |
713 | #elif defined(STACK_GUARDS_RELEASE) |
714 | //============================================================================= |
715 | // Release - really streamlined, |
716 | //============================================================================= |
717 | |
718 | void InitStackProbesRetail(); |
719 | inline void InitStackProbes() |
720 | { |
721 | InitStackProbesRetail(); |
722 | } |
723 | |
724 | inline void TerminateStackProbes() |
725 | { |
726 | LIMITED_METHOD_CONTRACT; |
727 | } |
728 | |
729 | |
730 | //============================================================================= |
731 | // Macros for transition into SO_INTOLERANT code |
732 | //============================================================================= |
733 | |
734 | FORCEINLINE DWORD DefaultEntryProbeAmount() { return DEFAULT_ENTRY_PROBE_AMOUNT; } |
735 | |
736 | #define BEGIN_SO_INTOLERANT_CODE(pThread) \ |
737 | { \ |
738 | if (IsStackProbingEnabled()) DefaultRetailStackProbeWorker(pThread); \ |
739 | /* match with the else used in other macros */ \ |
740 | if (true) { \ |
741 | SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
742 | /* work around unreachable code warning */ \ |
743 | if (true) { \ |
744 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
745 | |
746 | #define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) \ |
747 | { \ |
748 | if (IsStackProbingEnabled()) RetailStackProbeWorker(ADJUST_PROBE(n), pThread); \ |
749 | /* match with the else used in other macros */ \ |
750 | if (true) { \ |
751 | SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
752 | /* work around unreachable code warning */ \ |
753 | if (true) { \ |
754 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
755 | |
756 | #define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \ |
757 | { \ |
758 | if (IsStackProbingEnabled() && !RetailStackProbeNoThrowWorker(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread)) \ |
759 | { \ |
760 | ActionOnSO; \ |
761 | } else { \ |
762 | SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
763 | /* work around unreachable code warning */ \ |
764 | if (true) { \ |
765 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
766 | |
767 | |
768 | // This is defined just for using in the InternalSetupForComCall macro which |
769 | // doesn't have a corresponding end macro because no exception will pass through it |
770 | // It should not be used in any situation where an exception could pass through |
771 | // the transition. |
772 | #define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) \ |
773 | if (IsStackProbingEnabled() && !RetailStackProbeNoThrowWorker(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread)) \ |
774 | { \ |
775 | ActionOnSO; \ |
776 | } \ |
777 | |
778 | #define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) \ |
779 | if (IsStackProbingEnabled()) \ |
780 | { \ |
781 | Thread *__pThread = pThread; \ |
782 | if (__pThread && ! __pThread->IsStackSpaceAvailable(MINIMUM_STACK_REQUIREMENT)) \ |
783 | { \ |
784 | ReportStackOverflow(); \ |
785 | } \ |
786 | } |
787 | |
788 | #define DECLARE_INTERIOR_STACK_PROBE |
789 | |
790 | |
791 | #define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) \ |
792 | if (IsStackProbingEnabled()) \ |
793 | { \ |
794 | RetailStackProbeWorker(ADJUST_PROBE(n), pThread); \ |
795 | } |
796 | |
797 | #define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \ |
798 | if (IsStackProbingEnabled() && ShouldProbeOnThisThread()) \ |
799 | { \ |
800 | RetailStackProbeWorker(ADJUST_PROBE(n), GetThread()); \ |
801 | } |
802 | |
803 | #define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \ |
804 | if (IsStackProbingEnabled()) \ |
805 | { \ |
806 | if (!RetailStackProbeNoThrowWorker(ADJUST_PROBE(n), pThread)) \ |
807 | { \ |
808 | ActionOnSO; \ |
809 | } \ |
810 | } |
811 | |
812 | #define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \ |
813 | if (IsStackProbingEnabled() && ShouldProbeOnThisThread()) \ |
814 | { \ |
815 | if (!RetailStackProbeNoThrowWorker(ADJUST_PROBE(n), GetThread())) \ |
816 | { \ |
817 | ActionOnSO; \ |
818 | } \ |
819 | } |
820 | |
821 | |
822 | #define INTERIOR_STACK_PROBE_FOR(pThread, n) \ |
823 | DECLARE_INTERIOR_STACK_PROBE; \ |
824 | DO_INTERIOR_STACK_PROBE_FOR(pThread, n) |
825 | |
826 | #define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) \ |
827 | DECLARE_INTERIOR_STACK_PROBE; \ |
828 | DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) |
829 | |
830 | #define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) \ |
831 | DECLARE_INTERIOR_STACK_PROBE; \ |
832 | DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) |
833 | |
834 | #define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) \ |
835 | DECLARE_INTERIOR_STACK_PROBE; \ |
836 | DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) |
837 | |
838 | |
839 | #define INTERIOR_STACK_PROBE(pThread) \ |
840 | INTERIOR_STACK_PROBE_FOR(pThread, DEFAULT_INTERIOR_PROBE_AMOUNT) |
841 | |
842 | #define INTERIOR_STACK_PROBE_CHECK_THREAD \ |
843 | INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(DEFAULT_INTERIOR_PROBE_AMOUNT) |
844 | |
845 | #define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) \ |
846 | INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, DEFAULT_INTERIOR_PROBE_AMOUNT, ActionOnSO) |
847 | |
848 | #define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) \ |
849 | INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(DEFAULT_INTERIOR_PROBE_AMOUNT, ActionOnSO) |
850 | |
851 | |
852 | #define END_INTERIOR_STACK_PROBE |
853 | |
854 | #define RETURN_FROM_INTERIOR_PROBE(x) RETURN(x) |
855 | |
856 | |
857 | // This is used for EH code where we are about to throw |
858 | // To avoid taking an SO during EH processing, want to include it in our probe limits |
859 | // So we will just do a big probe and then throw. |
860 | #define STACK_PROBE_FOR_THROW(pThread) \ |
861 | if (pThread != NULL) \ |
862 | { \ |
863 | RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread); \ |
864 | } \ |
865 | |
866 | #define PUSH_STACK_PROBE_FOR_THROW(pThread) \ |
867 | RetailStackProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT), pThread); |
868 | |
869 | #define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard) |
870 | |
871 | #define POP_STACK_PROBE_FOR_THROW(pGuard) |
872 | |
873 | |
874 | //============================================================================= |
875 | // Macros for transition into SO_TOLERANT code |
876 | //============================================================================= |
877 | |
878 | #undef OPTIONAL_SO_CLEANUP_UNWIND |
879 | #define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) |
880 | |
881 | #define BSTC_RECOVER_STACK 0x1 |
882 | #define BSTC_IS_SO 0x2 |
883 | #define BSTC_IS_SOFT_SO 0x4 |
884 | #define BSTC_TRIGGERING_UNWIND_FOR_SO 0x8 |
885 | |
886 | |
887 | #define BEGIN_SO_TOLERANT_CODE(pThread) \ |
888 | { \ |
889 | Thread * __pThread = pThread; \ |
890 | DWORD __dwFlags = 0; \ |
891 | Frame * __pSafeForSOFrame = __pThread ? __pThread->GetFrame() : NULL; \ |
892 | SCAN_BLOCKMARKER(); \ |
893 | SCAN_BLOCKMARKER_MARK(); \ |
894 | SCAN_EHMARKER(); \ |
895 | __try \ |
896 | { \ |
897 | SCAN_EHMARKER_TRY() \ |
898 | __try \ |
899 | { |
900 | |
901 | // We need to catch any hard SO that comes through in order to get our stack back and make sure that we can run our backout code. |
902 | // Also can't allow a hard SO to propogate into SO-intolerant code, as we can't tell where it came from and would have to rip the process. |
903 | // So install a filter and catch hard SO and rethrow a C++ SO. |
904 | #define END_SO_TOLERANT_CODE \ |
905 | } \ |
906 | __finally \ |
907 | { \ |
908 | STATIC_CONTRACT_SO_TOLERANT; \ |
909 | if (__dwFlags & BSTC_TRIGGERING_UNWIND_FOR_SO) \ |
910 | { \ |
911 | OPTIONAL_SO_CLEANUP_UNWIND(__pThread, __pSafeForSOFrame) \ |
912 | } \ |
913 | } \ |
914 | SCAN_EHMARKER_END_TRY(); \ |
915 | } \ |
916 | __except(SOTolerantBoundaryFilter(GetExceptionInformation(), &__dwFlags)) \ |
917 | { \ |
918 | SCAN_EHMARKER_CATCH(); \ |
919 | SOTolerantCode_ExceptBody(&__dwFlags, __pSafeForSOFrame); \ |
920 | SCAN_EHMARKER_END_CATCH(); \ |
921 | } \ |
922 | SCAN_BLOCKMARKER_USE(); \ |
923 | if (__dwFlags & BSTC_RECOVER_STACK) \ |
924 | { \ |
925 | SOTolerantCode_RecoverStack(__dwFlags); \ |
926 | } \ |
927 | SCAN_BLOCKMARKER_END_USE(); \ |
928 | } |
929 | |
930 | #define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \ |
931 | { \ |
932 | |
933 | #define END_SO_TOLERANT_CODE_CALLING_HOST \ |
934 | } |
935 | |
936 | #endif |
937 | |
938 | #else // FEATURE_STACK_PROBE && !DACCESS_COMPILE |
939 | |
940 | inline void InitStackProbes() |
941 | { |
942 | LIMITED_METHOD_CONTRACT; |
943 | } |
944 | |
945 | inline void TerminateStackProbes() |
946 | { |
947 | LIMITED_METHOD_CONTRACT; |
948 | } |
949 | |
950 | #define BEGIN_SO_INTOLERANT_CODE(pThread) |
951 | #define BEGIN_SO_INTOLERANT_CODE_FOR(pThread, n) |
952 | #define BEGIN_SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) |
953 | #define SO_INTOLERANT_CODE_NOTHROW(pThread, ActionOnSO) |
954 | #define MINIMAL_STACK_PROBE_CHECK_THREAD(pThread) |
955 | |
956 | #define DECLARE_INTERIOR_STACK_PROBE |
957 | |
958 | #define DO_INTERIOR_STACK_PROBE_FOR(pThread, n) |
959 | #define DO_INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) |
960 | #define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) |
961 | #define DO_INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) |
962 | |
963 | #define INTERIOR_STACK_PROBE_FOR(pThread, n) |
964 | #define INTERIOR_STACK_PROBE_FOR_CHECK_THREAD(n) |
965 | #define INTERIOR_STACK_PROBE_FOR_NOTHROW(pThread, n, ActionOnSO) |
966 | #define INTERIOR_STACK_PROBE_FOR_NOTHROW_CHECK_THREAD(n, ActionOnSO) |
967 | |
968 | #define INTERIOR_STACK_PROBE(pThread) |
969 | #define INTERIOR_STACK_PROBE_CHECK_THREAD |
970 | #define INTERIOR_STACK_PROBE_NOTHROW(pThread, ActionOnSO) |
971 | #define INTERIOR_STACK_PROBE_NOTHROW_CHECK_THREAD(ActionOnSO) |
972 | |
973 | #define END_INTERIOR_STACK_PROBE |
974 | #define RETURN_FROM_INTERIOR_PROBE(x) RETURN(x) |
975 | |
976 | #define STACK_PROBE_FOR_THROW(pThread) |
977 | #define PUSH_STACK_PROBE_FOR_THROW(pThread) |
978 | #define SAVE_ADDRESS_OF_STACK_PROBE_FOR_THROW(pGuard) |
979 | #define POP_STACK_PROBE_FOR_THROW(pGuard) |
980 | |
981 | #define BEGIN_SO_TOLERANT_CODE(pThread) |
982 | #define END_SO_TOLERANT_CODE |
983 | #define RETURN_FROM_SO_TOLERANT_CODE_HAS_CATCH |
984 | #define BEGIN_SO_TOLERANT_CODE_CALLING_HOST(pThread) \ |
985 | _ASSERTE(CanThisThreadCallIntoHost()); |
986 | |
987 | #define END_SO_TOLERANT_CODE_CALLING_HOST |
988 | |
989 | #endif // FEATURE_STACK_PROBE && !DACCESS_COMPILE |
990 | |
991 | #endif // __STACKPROBE_h__ |
992 | |