| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | // |
| 5 | |
| 6 | // |
| 7 | //----------------------------------------------------------------------------- |
| 8 | // Generic Stack Probe Code |
| 9 | // Used to setup stack guards and probes outside the VM tree |
| 10 | //----------------------------------------------------------------------------- |
| 11 | |
| 12 | #ifndef __GENERICSTACKPROBE_h__ |
| 13 | #define __GENERICSTACKPROBE_h__ |
| 14 | |
| 15 | #include "staticcontract.h" |
| 16 | #include "predeftlsslot.h" |
| 17 | |
| 18 | #if defined(DISABLE_CONTRACTS) |
| 19 | #undef FEATURE_STACK_PROBE |
| 20 | #endif |
| 21 | |
| 22 | #if defined(FEATURE_STACK_PROBE) |
| 23 | #ifdef _DEBUG |
| 24 | #define STACK_GUARDS_DEBUG |
| 25 | #else |
| 26 | #define STACK_GUARDS_RELEASE |
| 27 | #endif |
| 28 | #endif |
| 29 | |
| 30 | #ifdef FEATURE_STACK_PROBE |
| 31 | #define SO_INFRASTRUCTURE_CODE(x) x |
| 32 | #define NO_SO_INFRASTRUCTURE_CODE_ASSERTE(x) |
| 33 | #else |
| 34 | #define SO_INFRASTRUCTURE_CODE(x) |
| 35 | #define NO_SO_INFRASTRUCTURE_CODE_ASSERTE(x) _ASSERTE(x); |
| 36 | #endif |
| 37 | |
| 38 | /* This macro is redefined in stackprobe.h |
| 39 | * so that code expanded using this macro is present only for files |
| 40 | * within VM directory. See StackProbe.h for more details |
| 41 | */ |
| 42 | #define VM_NO_SO_INFRASTRUCTURE_CODE(x) |
| 43 | |
| 44 | // The types of stack validation we support in holders. |
| 45 | enum HolderStackValidation |
| 46 | { |
| 47 | HSV_NoValidation, |
| 48 | HSV_ValidateMinimumStackReq, |
| 49 | HSV_ValidateNormalStackReq, |
| 50 | }; |
| 51 | |
| 52 | // Used to track transitions into the profiler |
| 53 | #define REMOVE_STACK_GUARD_FOR_PROFILER_CALL \ |
| 54 | REMOVE_STACK_GUARD |
| 55 | |
| 56 | // For AMD64, the stack size is 4K, same as X86, but the pointer size is 64, so the |
| 57 | // stack tends to grow a lot faster than X86. |
| 58 | #ifdef _TARGET_AMD64_ |
| 59 | #define ADJUST_PROBE(n) (2 * (n)) |
| 60 | #else |
| 61 | #define ADJUST_PROBE(n) (n) |
| 62 | #endif |
| 63 | |
| 64 | #if defined(FEATURE_STACK_PROBE) |
| 65 | |
| 66 | #ifdef STACK_GUARDS_DEBUG // DAC and non-DAC - all data structures referenced in DAC'ized code |
| 67 | // must be included so we can calculate layout. SO probes are not |
| 68 | // active in the DAC but the SO probe structures contribute to layout |
| 69 | |
| 70 | |
| 71 | // This class is used to place a marker upstack and verify that it was not overrun. It is |
| 72 | // different from the full blown stack probes in that it does not chain with other probes or |
| 73 | // test for stack overflow. Its sole purpose is to verify stack consumption. |
| 74 | // It is effectively an implicit probe though, because we are guaranteeing that we have |
| 75 | // enought stack to run and will not take an SO. So we enter SO-intolerant code when |
| 76 | // we install one of these. |
| 77 | |
| 78 | class StackMarkerStack; |
| 79 | struct ClrDebugState; |
| 80 | |
| 81 | class BaseStackMarker |
| 82 | { |
| 83 | friend StackMarkerStack; |
| 84 | |
| 85 | ClrDebugState *m_pDebugState; |
| 86 | BOOL m_prevWasSOTolerant; // Were we SO-tolerant when we came in? |
| 87 | BOOL m_fMarkerSet; // Has the marker been set? |
| 88 | BOOL m_fTemporarilyDisabled;// Has the marker been temporarely disabled? |
| 89 | BOOL m_fAddedToStack; // Has this BaseStackMarker been added to the stack of markers for the thread. |
| 90 | float m_numPages; |
| 91 | UINT_PTR *m_pMarker; // Pointer to where to put our marker cookie on the stack. |
| 92 | BaseStackMarker*m_pPrevious; |
| 93 | BOOL m_fProtectedStackPage; |
| 94 | BOOL m_fAllowDisabling; |
| 95 | |
| 96 | BaseStackMarker() {}; // no default construction allowed |
| 97 | |
| 98 | // These should only be called by the ClrDebugState. |
| 99 | void RareDisableMarker(); |
| 100 | void RareReEnableMarker(); |
| 101 | |
| 102 | public: |
| 103 | BaseStackMarker(float numPages, BOOL fAllowDisabling); |
| 104 | |
| 105 | // we have this so that the check of the global can be inlined |
| 106 | // and we don't make the call to CheckMarker unless we need to. |
| 107 | void CheckForBackoutViolation(); |
| 108 | |
| 109 | void SetMarker(float numPages); |
| 110 | void CheckMarker(); |
| 111 | |
| 112 | void ProtectMarkerPageInDebugger(); |
| 113 | void UndoPageProtectionInDebugger(); |
| 114 | |
| 115 | }; |
| 116 | |
| 117 | class StackMarkerStack |
| 118 | { |
| 119 | public: |
| 120 | // Since this is used from the ClrDebugState which can't have a default constructor, |
| 121 | // we need to provide an Init method to intialize the instance instead of having a constructor. |
| 122 | void Init() |
| 123 | { |
| 124 | m_pTopStackMarker = NULL; |
| 125 | m_fDisabled = FALSE; |
| 126 | } |
| 127 | |
| 128 | void PushStackMarker(BaseStackMarker *pStackMarker); |
| 129 | BaseStackMarker *PopStackMarker(); |
| 130 | |
| 131 | BOOL IsEmpty() |
| 132 | { |
| 133 | return (m_pTopStackMarker == NULL); |
| 134 | } |
| 135 | BOOL IsDisabled() |
| 136 | { |
| 137 | return m_fDisabled; |
| 138 | } |
| 139 | |
| 140 | void RareDisableStackMarkers(); |
| 141 | void RareReEnableStackMarkers(); |
| 142 | |
| 143 | private: |
| 144 | BaseStackMarker *m_pTopStackMarker; // The top of the stack of stack markers for the current thread. |
| 145 | BOOL m_fDisabled; |
| 146 | }; |
| 147 | |
| 148 | #endif // STACK_GUARDS_DEBUG |
| 149 | |
| 150 | #if !defined(DACCESS_COMPILE) |
| 151 | |
| 152 | // In debug builds, we redefine DEFAULT_ENTRY_PROBE_AMOUNT to a global static |
| 153 | // so that we can tune the entry point probe size at runtime. |
| 154 | #define DEFAULT_ENTRY_PROBE_SIZE 12 |
| 155 | #define DEFAULT_ENTRY_PROBE_AMOUNT DEFAULT_ENTRY_PROBE_SIZE |
| 156 | |
| 157 | #define BACKOUT_CODE_STACK_LIMIT 4.0 |
| 158 | #define HOLDER_CODE_NORMAL_STACK_LIMIT BACKOUT_CODE_STACK_LIMIT |
| 159 | #define HOLDER_CODE_MINIMUM_STACK_LIMIT 0.25 |
| 160 | |
| 161 | void DontCallDirectlyForceStackOverflow(); |
| 162 | void SOBackoutViolation(const char *szFunction, const char *szFile, int lineNum); |
| 163 | typedef void *EEThreadHandle; |
| 164 | class SOIntolerantTransitionHandler; |
| 165 | extern bool g_StackProbingEnabled; |
| 166 | extern void (*g_fpCheckForSOInSOIntolerantCode)(); |
| 167 | extern void (*g_fpSetSOIntolerantTransitionMarker)(); |
| 168 | extern BOOL (*g_fpDoProbe)(unsigned int n); |
| 169 | extern void (*g_fpHandleSoftStackOverflow)(BOOL fSkipDebugger); |
| 170 | |
| 171 | // Once we enter SO-intolerant code, we can never take a hard SO as we will be |
| 172 | // in an unknown state. SOIntolerantTransitionHandler is used to detect a hard SO in SO-intolerant |
| 173 | // code and to raise a Fatal Error if one occurs. |
| 174 | class SOIntolerantTransitionHandler |
| 175 | { |
| 176 | private: |
| 177 | bool m_exceptionOccurred; |
| 178 | void * m_pPreviousHandler; |
| 179 | |
| 180 | public: |
| 181 | FORCEINLINE SOIntolerantTransitionHandler() |
| 182 | { |
| 183 | if (g_StackProbingEnabled) |
| 184 | { |
| 185 | CtorImpl(); |
| 186 | } |
| 187 | } |
| 188 | |
| 189 | FORCEINLINE ~SOIntolerantTransitionHandler() |
| 190 | { |
| 191 | if (g_StackProbingEnabled) |
| 192 | { |
| 193 | DtorImpl(); |
| 194 | } |
| 195 | } |
| 196 | |
| 197 | NOINLINE void CtorImpl(); |
| 198 | NOINLINE void DtorImpl(); |
| 199 | |
| 200 | void SetNoException() |
| 201 | { |
| 202 | m_exceptionOccurred = false; |
| 203 | } |
| 204 | |
| 205 | bool DidExceptionOccur() |
| 206 | { |
| 207 | return m_exceptionOccurred; |
| 208 | } |
| 209 | }; |
| 210 | |
| 211 | |
| 212 | extern void (*g_fpHandleStackOverflowAfterCatch)(); |
| 213 | void HandleStackOverflowAfterCatch(); |
| 214 | |
| 215 | #if defined(STACK_GUARDS_DEBUG) |
| 216 | |
| 217 | #ifdef _WIN64 |
| 218 | #define STACK_COOKIE_VALUE 0x0123456789ABCDEF |
| 219 | #define DISABLED_STACK_COOKIE_VALUE 0xDCDCDCDCDCDCDCDC |
| 220 | #else |
| 221 | #define STACK_COOKIE_VALUE 0x01234567 |
| 222 | #define DISABLED_STACK_COOKIE_VALUE 0xDCDCDCDC |
| 223 | #endif |
| 224 | |
| 225 | // This allows us to adjust the probe amount at run-time in checked builds |
| 226 | #undef DEFAULT_ENTRY_PROBE_AMOUNT |
| 227 | #define DEFAULT_ENTRY_PROBE_AMOUNT g_EntryPointProbeAmount |
| 228 | |
| 229 | class BaseStackGuardGeneric; |
| 230 | class BaseStackGuard; |
| 231 | |
| 232 | extern void (*g_fpRestoreCurrentStackGuard)(BOOL fDisabled); |
| 233 | extern BOOL (*g_fp_BaseStackGuard_RequiresNStackPages)(BaseStackGuardGeneric *pGuard, unsigned int n, BOOL fThrowOnSO); |
| 234 | extern void (*g_fp_BaseStackGuard_CheckStack)(BaseStackGuardGeneric *pGuard); |
| 235 | extern BOOL (*g_fpCheckNStackPagesAvailable)(unsigned int n); |
| 236 | extern BOOL g_ProtectStackPagesInDebugger; |
| 237 | void RestoreSOToleranceState(); |
| 238 | void EnsureSOTolerant(); |
| 239 | |
| 240 | extern BOOL g_EnableBackoutStackValidation; |
| 241 | extern DWORD g_EntryPointProbeAmount; |
| 242 | |
| 243 | //----------------------------------------------------------------------------- |
| 244 | // Check if a cookie is still at the given marker |
| 245 | //----------------------------------------------------------------------------- |
| 246 | inline BOOL IsMarkerOverrun(UINT_PTR *pMarker) |
| 247 | { |
| 248 | return (*pMarker != STACK_COOKIE_VALUE); |
| 249 | } |
| 250 | |
| 251 | class AutoCleanupStackMarker : public BaseStackMarker |
| 252 | { |
| 253 | public: |
| 254 | DEBUG_NOINLINE AutoCleanupStackMarker(float numPages) : |
| 255 | BaseStackMarker(numPages, TRUE) |
| 256 | { |
| 257 | SCAN_SCOPE_BEGIN; |
| 258 | ANNOTATION_FN_SO_INTOLERANT; |
| 259 | } |
| 260 | |
| 261 | DEBUG_NOINLINE ~AutoCleanupStackMarker() |
| 262 | { |
| 263 | SCAN_SCOPE_END; |
| 264 | CheckForBackoutViolation(); |
| 265 | } |
| 266 | }; |
| 267 | |
| 268 | #define VALIDATE_BACKOUT_STACK_CONSUMPTION \ |
| 269 | AutoCleanupStackMarker __stackMarker(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)); |
| 270 | |
| 271 | #define VALIDATE_BACKOUT_STACK_CONSUMPTION_FOR(numPages) \ |
| 272 | AutoCleanupStackMarker __stackMarker(ADJUST_PROBE(numPages)); |
| 273 | |
| 274 | #define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE \ |
| 275 | BaseStackMarker __stackMarkerNoDisable(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT), FALSE); |
| 276 | |
| 277 | #define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE_FOR(numPages) \ |
| 278 | BaseStackMarker __stackMarkerNoDisable(ADJUST_PROBE(numPages), FALSE); |
| 279 | |
| 280 | #define UNSAFE_END_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE \ |
| 281 | __stackMarkerNoDisable.CheckForBackoutViolation(); |
| 282 | |
| 283 | #define VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(validationType) \ |
| 284 | _ASSERTE(validationType != HSV_NoValidation); \ |
| 285 | AutoCleanupStackMarker __stackMarker( \ |
| 286 | ADJUST_PROBE(validationType == HSV_ValidateNormalStackReq ? HOLDER_CODE_NORMAL_STACK_LIMIT : HOLDER_CODE_MINIMUM_STACK_LIMIT)); |
| 287 | |
| 288 | class AutoCleanupDisableBackoutStackValidation |
| 289 | { |
| 290 | public: |
| 291 | AutoCleanupDisableBackoutStackValidation(); |
| 292 | ~AutoCleanupDisableBackoutStackValidation(); |
| 293 | |
| 294 | private: |
| 295 | BOOL m_fAlreadyDisabled; |
| 296 | |
| 297 | }; |
| 298 | |
| 299 | // This macros disables the backout stack validation in the current scope. It should |
| 300 | // only be used in very rare situations. If you think you might have such a situation, |
| 301 | // please talk to the stack overflow devs before using it. |
| 302 | #define DISABLE_BACKOUT_STACK_VALIDATION \ |
| 303 | AutoCleanupDisableBackoutStackValidation __disableBacoutStackValidation; |
| 304 | |
| 305 | // In debug mode, we want to do a little more work on this transition to note the transition in the thread. |
| 306 | class DebugSOIntolerantTransitionHandler : public SOIntolerantTransitionHandler |
| 307 | { |
| 308 | BOOL m_prevSOTolerantState; |
| 309 | ClrDebugState* m_clrDebugState; |
| 310 | |
| 311 | public: |
| 312 | DebugSOIntolerantTransitionHandler(); |
| 313 | ~DebugSOIntolerantTransitionHandler(); |
| 314 | }; |
| 315 | |
| 316 | // This is the base class structure for our probe infrastructure. We declare it here |
| 317 | // so that we can properly declare instances outside of the VM tree. But we only do the |
| 318 | // probes when we have a managed thread. |
| 319 | class BaseStackGuardGeneric |
| 320 | { |
| 321 | public: |
| 322 | enum |
| 323 | { |
| 324 | cPartialInit, // Not yet intialized |
| 325 | cInit, // Initialized and installed |
| 326 | cUnwound, // Unwound on a normal path (used for debugging) |
| 327 | cEHUnwound // Unwound on an exception path (used for debugging) |
| 328 | } m_eInitialized; |
| 329 | |
| 330 | // *** Following fields must not move. The fault injection framework depends on them. |
| 331 | BaseStackGuard *m_pPrevGuard; // Previous guard for this thread. |
| 332 | UINT_PTR *m_pMarker; // Pointer to where to put our marker cookie on the stack. |
| 333 | unsigned int m_numPages; // space needed, specified in number of pages |
| 334 | BOOL m_isBoundaryGuard; // used to mark when we've left the EE |
| 335 | BOOL m_fDisabled; // Used to enable/disable stack guard |
| 336 | |
| 337 | |
| 338 | // *** End of fault injection-dependent fields |
| 339 | |
| 340 | // The following fields are really here to provide us with some nice debugging information. |
| 341 | const char *m_szFunction; |
| 342 | const char *m_szFile; |
| 343 | unsigned int m_lineNum; |
| 344 | const char *m_szNextFunction; // Name of the probe that came after us. |
| 345 | const char *m_szNextFile; |
| 346 | unsigned int m_nextLineNum; |
| 347 | DWORD m_UniqueId; |
| 348 | unsigned int m_depth; // How deep is this guard in the list of guards for this thread? |
| 349 | BOOL m_fProtectedStackPage; // TRUE if we protected a stack page with PAGE_NOACCESS. |
| 350 | BOOL m_fEHInProgress; // Is an EH in progress? This is cleared on a catch. |
| 351 | BOOL m_exceptionOccurred; // Did an exception occur through this probe? |
| 352 | |
| 353 | protected: |
| 354 | BaseStackGuardGeneric() |
| 355 | { |
| 356 | } |
| 357 | |
| 358 | public: |
| 359 | BaseStackGuardGeneric(const char *szFunction, const char *szFile, unsigned int lineNum) : |
| 360 | m_pPrevGuard(NULL), m_pMarker(NULL), |
| 361 | m_szFunction(szFunction), m_szFile(szFile), m_lineNum(lineNum), |
| 362 | m_szNextFunction(NULL), m_szNextFile(NULL), m_nextLineNum(0), |
| 363 | m_fProtectedStackPage(FALSE), m_UniqueId(-1), m_numPages(0), |
| 364 | m_eInitialized(cPartialInit), m_fDisabled(FALSE), |
| 365 | m_isBoundaryGuard(FALSE), |
| 366 | m_fEHInProgress(FALSE), |
| 367 | m_exceptionOccurred(FALSE) |
| 368 | { |
| 369 | STATIC_CONTRACT_LEAF; |
| 370 | } |
| 371 | |
| 372 | BOOL RequiresNStackPages(unsigned int n, BOOL fThrowOnSO = TRUE) |
| 373 | { |
| 374 | if (g_fp_BaseStackGuard_RequiresNStackPages == NULL) |
| 375 | { |
| 376 | return TRUE; |
| 377 | } |
| 378 | return g_fp_BaseStackGuard_RequiresNStackPages(this, n, fThrowOnSO); |
| 379 | } |
| 380 | |
| 381 | BOOL RequiresNStackPagesThrowing(unsigned int n) |
| 382 | { |
| 383 | if (g_fp_BaseStackGuard_RequiresNStackPages == NULL) |
| 384 | { |
| 385 | return TRUE; |
| 386 | } |
| 387 | return g_fp_BaseStackGuard_RequiresNStackPages(this, n, TRUE); |
| 388 | } |
| 389 | |
| 390 | BOOL RequiresNStackPagesNoThrow(unsigned int n) |
| 391 | { |
| 392 | if (g_fp_BaseStackGuard_RequiresNStackPages == NULL) |
| 393 | { |
| 394 | return TRUE; |
| 395 | } |
| 396 | return g_fp_BaseStackGuard_RequiresNStackPages(this, n, FALSE); |
| 397 | } |
| 398 | |
| 399 | void CheckStack() |
| 400 | { |
| 401 | if (m_eInitialized == cInit) |
| 402 | { |
| 403 | g_fp_BaseStackGuard_CheckStack(this); |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | void SetNoException() |
| 408 | { |
| 409 | m_exceptionOccurred = FALSE; |
| 410 | } |
| 411 | |
| 412 | BOOL DidExceptionOccur() |
| 413 | { |
| 414 | return m_exceptionOccurred; |
| 415 | } |
| 416 | |
| 417 | BOOL Enabled() |
| 418 | { |
| 419 | return !m_fDisabled; |
| 420 | } |
| 421 | |
| 422 | void DisableGuard() |
| 423 | { |
| 424 | // As long as we don't have threads mucking with other thread's stack |
| 425 | // guards, we don't need to synchronize this. |
| 426 | m_fDisabled = TRUE; |
| 427 | } |
| 428 | |
| 429 | void EnableGuard() |
| 430 | { |
| 431 | // As long as we don't have threads mucking with other thread's stack |
| 432 | // guards, we don't need to synchronize this. |
| 433 | m_fDisabled = FALSE; |
| 434 | } |
| 435 | |
| 436 | |
| 437 | }; |
| 438 | |
| 439 | class StackGuardDisabler |
| 440 | { |
| 441 | BOOL m_fDisabledGuard; |
| 442 | |
| 443 | public: |
| 444 | StackGuardDisabler(); |
| 445 | ~StackGuardDisabler(); |
| 446 | void NeverRestoreGuard(); |
| 447 | |
| 448 | }; |
| 449 | |
| 450 | |
| 451 | |
| 452 | // Derived version, add a dtor that automatically calls Check_Stack, move convenient, but can't use with SEH. |
| 453 | class AutoCleanupStackGuardGeneric : public BaseStackGuardGeneric |
| 454 | { |
| 455 | protected: |
| 456 | AutoCleanupStackGuardGeneric() |
| 457 | { |
| 458 | } |
| 459 | |
| 460 | public: |
| 461 | AutoCleanupStackGuardGeneric(const char *szFunction, const char *szFile, unsigned int lineNum) : |
| 462 | BaseStackGuardGeneric(szFunction, szFile, lineNum) |
| 463 | { |
| 464 | STATIC_CONTRACT_LEAF; |
| 465 | } |
| 466 | |
| 467 | ~AutoCleanupStackGuardGeneric() |
| 468 | { |
| 469 | STATIC_CONTRACT_WRAPPER; |
| 470 | CheckStack(); |
| 471 | } |
| 472 | }; |
| 473 | |
| 474 | |
| 475 | // Used to remove stack guard... (kind of like a poor man's BEGIN_SO_TOLERANT |
| 476 | #define REMOVE_STACK_GUARD \ |
| 477 | StackGuardDisabler __guardDisable; |
| 478 | |
| 479 | // Used to transition into intolerant code when handling a SO |
| 480 | #define BEGIN_SO_INTOLERANT_CODE_NOPROBE \ |
| 481 | { \ |
| 482 | DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
| 483 | /* work around unreachable code warning */ \ |
| 484 | if (true) \ |
| 485 | { \ |
| 486 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
| 487 | |
| 488 | #define END_SO_INTOLERANT_CODE_NOPROBE \ |
| 489 | ; \ |
| 490 | DEBUG_ASSURE_NO_RETURN_END(SO_INTOLERANT) \ |
| 491 | } \ |
| 492 | __soIntolerantTransitionHandler.SetNoException(); \ |
| 493 | } \ |
| 494 | |
| 495 | |
| 496 | |
| 497 | #define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ActionOnSO) \ |
| 498 | { \ |
| 499 | AutoCleanupStackGuardGeneric stack_guard_XXX(__FUNCTION__, __FILE__, __LINE__); \ |
| 500 | if (! stack_guard_XXX.RequiresNStackPagesNoThrow(ADJUST_PROBE(g_EntryPointProbeAmount))) \ |
| 501 | { \ |
| 502 | ActionOnSO; \ |
| 503 | } \ |
| 504 | else \ |
| 505 | { \ |
| 506 | DebugSOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
| 507 | ANNOTATION_SO_PROBE_BEGIN(DEFAULT_ENTRY_PROBE_AMOUNT); \ |
| 508 | if (true) \ |
| 509 | { \ |
| 510 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
| 511 | |
| 512 | |
| 513 | #define END_SO_INTOLERANT_CODE \ |
| 514 | ; \ |
| 515 | DEBUG_ASSURE_NO_RETURN_END(SO_INTOLERANT) \ |
| 516 | } \ |
| 517 | ANNOTATION_SO_PROBE_END; \ |
| 518 | __soIntolerantTransitionHandler.SetNoException(); \ |
| 519 | stack_guard_XXX.SetNoException(); \ |
| 520 | } \ |
| 521 | } \ |
| 522 | |
| 523 | |
| 524 | #define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO() \ |
| 525 | EnsureSOTolerant(); \ |
| 526 | BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow()); \ |
| 527 | |
| 528 | |
| 529 | // Restores the SO-tolerance state and the marker for the current guard if any |
| 530 | #define RESTORE_SO_TOLERANCE_STATE \ |
| 531 | RestoreSOToleranceState(); |
| 532 | |
| 533 | #define HANDLE_STACKOVERFLOW_AFTER_CATCH \ |
| 534 | HandleStackOverflowAfterCatch() |
| 535 | |
| 536 | #elif defined(STACK_GUARDS_RELEASE) |
| 537 | |
| 538 | #define VALIDATE_BACKOUT_STACK_CONSUMPTION |
| 539 | #define VALIDATE_BACKOUT_STACK_CONSUMPTION_FOR |
| 540 | #define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE |
| 541 | #define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE_FOR(numPages) |
| 542 | #define UNSAFE_END_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE |
| 543 | #define VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(validationType) |
| 544 | #define RESTORE_SO_TOLERANCE_STATE |
| 545 | #define HANDLE_STACKOVERFLOW_AFTER_CATCH \ |
| 546 | HandleStackOverflowAfterCatch() |
| 547 | #define DISABLE_BACKOUT_STACK_VALIDATION |
| 548 | #define BACKOUT_STACK_VALIDATION_VIOLATION |
| 549 | #define BEGIN_SO_INTOLERANT_CODE_NOPROBE |
| 550 | #define END_SO_INTOLERANT_CODE_NOPROBE |
| 551 | #define REMOVE_STACK_GUARD |
| 552 | |
| 553 | #define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ActionOnSO) \ |
| 554 | { \ |
| 555 | if (g_StackProbingEnabled && !g_fpDoProbe(ADJUST_PROBE(DEFAULT_ENTRY_PROBE_AMOUNT)))\ |
| 556 | { \ |
| 557 | ActionOnSO; \ |
| 558 | } else { \ |
| 559 | SOIntolerantTransitionHandler __soIntolerantTransitionHandler; \ |
| 560 | /* work around unreachable code warning */ \ |
| 561 | if (true) \ |
| 562 | { \ |
| 563 | DEBUG_ASSURE_NO_RETURN_BEGIN(SO_INTOLERANT) |
| 564 | |
| 565 | #define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO() \ |
| 566 | BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(DontCallDirectlyForceStackOverflow()); \ |
| 567 | |
| 568 | #define END_SO_INTOLERANT_CODE \ |
| 569 | ; \ |
| 570 | DEBUG_ASSURE_NO_RETURN_END(SO_INTOLERANT) \ |
| 571 | } \ |
| 572 | __soIntolerantTransitionHandler.SetNoException(); \ |
| 573 | } \ |
| 574 | } |
| 575 | |
| 576 | #endif |
| 577 | |
| 578 | #endif // !DACCESS_COMPILE |
| 579 | #endif // FEATURE_STACK_PROBES |
| 580 | |
| 581 | // if the feature is off or we are compiling for DAC, disable all the probes |
| 582 | #if !defined(FEATURE_STACK_PROBE) || defined(DACCESS_COMPILE) |
| 583 | |
| 584 | #define VALIDATE_BACKOUT_STACK_CONSUMPTION |
| 585 | #define VALIDATE_BACKOUT_STACK_CONSUMPTION_FOR |
| 586 | #define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE |
| 587 | #define UNSAFE_BEGIN_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE_FOR(numPages) |
| 588 | #define UNSAFE_END_VALIDATE_BACKOUT_STACK_CONSUMPTION_NO_DISABLE |
| 589 | #define VALIDATE_HOLDER_STACK_CONSUMPTION_FOR_TYPE(validationType) |
| 590 | #define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD(ActionOnSO) |
| 591 | #define BEGIN_SO_INTOLERANT_CODE_NO_THROW_CHECK_THREAD_FORCE_SO() |
| 592 | #define END_SO_INTOLERANT_CODE |
| 593 | #define RESTORE_SO_TOLERANCE_STATE |
| 594 | |
| 595 | #define HANDLE_STACKOVERFLOW_AFTER_CATCH |
| 596 | |
| 597 | #define DISABLE_BACKOUT_STACK_VALIDATION |
| 598 | #define BACKOUT_STACK_VALIDATION_VIOLATION |
| 599 | #define BEGIN_SO_INTOLERANT_CODE_NOPROBE |
| 600 | #define END_SO_INTOLERANT_CODE_NOPROBE |
| 601 | #define REMOVE_STACK_GUARD |
| 602 | |
| 603 | // Probe size is 0 as Stack Overflow probing is not enabled |
| 604 | #define DEFAULT_ENTRY_PROBE_AMOUNT 0 |
| 605 | |
| 606 | #define BACKOUT_CODE_STACK_LIMIT 0 |
| 607 | |
| 608 | #endif //!FEATURE_STACK_PROBE || DACCESS_COMPILE |
| 609 | |
| 610 | #endif // __GENERICSTACKPROBE_h__ |
| 611 | |