1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | // |
5 | |
6 | // |
7 | |
8 | #include "common.h" |
9 | |
10 | #ifdef WIN64EXCEPTIONS |
11 | #include "exceptionhandling.h" |
12 | #include "dbginterface.h" |
13 | #include "asmconstants.h" |
14 | #include "eetoprofinterfacewrapper.inl" |
15 | #include "eedbginterfaceimpl.inl" |
16 | #include "perfcounters.h" |
17 | #include "eventtrace.h" |
18 | #include "virtualcallstub.h" |
19 | |
20 | #if defined(_TARGET_X86_) |
21 | #define USE_CURRENT_CONTEXT_IN_FILTER |
22 | #endif // _TARGET_X86_ |
23 | |
24 | #if defined(_TARGET_ARM_) || defined(_TARGET_X86_) |
25 | #define VSD_STUB_CAN_THROW_AV |
26 | #endif // _TARGET_ARM_ || _TARGET_X86_ |
27 | |
28 | #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) |
29 | // ARM/ARM64 uses Caller-SP to locate PSPSym in the funclet frame. |
30 | #define USE_CALLER_SP_IN_FUNCLET |
31 | #endif // _TARGET_ARM_ || _TARGET_ARM64_ |
32 | |
33 | #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) || defined(_TARGET_X86_) |
34 | #define ADJUST_PC_UNWOUND_TO_CALL |
35 | #define STACK_RANGE_BOUNDS_ARE_CALLER_SP |
36 | #define USE_FUNCLET_CALL_HELPER |
37 | // For ARM/ARM64, EstablisherFrame is Caller-SP (SP just before executing call instruction). |
38 | // This has been confirmed by AaronGi from the kernel team for Windows. |
39 | // |
40 | // For x86/Linux, RtlVirtualUnwind sets EstablisherFrame as Caller-SP. |
41 | #define ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
42 | #endif // _TARGET_ARM_ || _TARGET_ARM64_ || _TARGET_X86_ |
43 | |
44 | #ifndef FEATURE_PAL |
45 | void __declspec(noinline) |
46 | ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, |
47 | UINT_PTR ReturnValue, |
48 | UINT_PTR TargetIP, |
49 | UINT_PTR TargetFrameSp); |
50 | #endif // !FEATURE_PAL |
51 | |
52 | #ifdef USE_CURRENT_CONTEXT_IN_FILTER |
53 | inline void CaptureNonvolatileRegisters(PKNONVOLATILE_CONTEXT pNonvolatileContext, PCONTEXT pContext) |
54 | { |
55 | #define CALLEE_SAVED_REGISTER(reg) pNonvolatileContext->reg = pContext->reg; |
56 | ENUM_CALLEE_SAVED_REGISTERS(); |
57 | #undef CALLEE_SAVED_REGISTER |
58 | } |
59 | |
60 | inline void RestoreNonvolatileRegisters(PCONTEXT pContext, PKNONVOLATILE_CONTEXT pNonvolatileContext) |
61 | { |
62 | #define CALLEE_SAVED_REGISTER(reg) pContext->reg = pNonvolatileContext->reg; |
63 | ENUM_CALLEE_SAVED_REGISTERS(); |
64 | #undef CALLEE_SAVED_REGISTER |
65 | } |
66 | |
67 | inline void RestoreNonvolatileRegisterPointers(PT_KNONVOLATILE_CONTEXT_POINTERS pContextPointers, PKNONVOLATILE_CONTEXT pNonvolatileContext) |
68 | { |
69 | #define CALLEE_SAVED_REGISTER(reg) pContextPointers->reg = &pNonvolatileContext->reg; |
70 | ENUM_CALLEE_SAVED_REGISTERS(); |
71 | #undef CALLEE_SAVED_REGISTER |
72 | } |
73 | #endif |
74 | #ifndef DACCESS_COMPILE |
75 | |
76 | // o Functions and funclets are tightly associated. In fact, they are laid out in contiguous memory. |
77 | // They also present some interesting issues with respect to EH because we will see callstacks with |
78 | // both functions and funclets, but need to logically treat them as the original single IL function |
79 | // described them. |
80 | // |
81 | // o All funclets are ripped out of line from the main function. Finally clause are pulled out of |
82 | // line and replaced by calls to the funclets. Catch clauses, however, are simply pulled out of |
83 | // line. !!!This causes a loss of nesting information in clause offsets.!!! A canonical example of |
84 | // two different functions which look identical due to clause removal is as shown in the code |
85 | // snippets below. The reason they look identical in the face of out-of-line funclets is that the |
86 | // region bounds for the "try A" region collapse and become identical to the region bounds for |
87 | // region "try B". This will look identical to the region information for Bar because Bar must |
88 | // have a separate entry for each catch clause, both of which will have the same try-region bounds. |
89 | // |
90 | // void Foo() void Bar() |
91 | // { { |
92 | // try A try C |
93 | // { { |
94 | // try B BAR_BLK_1 |
95 | // { } |
96 | // FOO_BLK_1 catch C |
97 | // } { |
98 | // catch B BAR_BLK_2 |
99 | // { } |
100 | // FOO_BLK_2 catch D |
101 | // } { |
102 | // } BAR_BLK_3 |
103 | // catch A } |
104 | // { } |
105 | // FOO_BLK_3 |
106 | // } |
107 | // } |
108 | // |
109 | // O The solution is to duplicate all clauses that logically cover the funclet in its parent |
110 | // method, but with the try-region covering the entire out-of-line funclet code range. This will |
111 | // differentiate the canonical example above because the CatchB funclet will have a try-clause |
112 | // covering it whose associated handler is CatchA. In Bar, there is no such duplication of any clauses. |
113 | // |
114 | // o The behavior of the personality routine depends upon the JIT to properly order the clauses from |
115 | // inside-out. This allows us to properly handle a situation where our control PC is covered by clauses |
116 | // that should not be considered because a more nested clause will catch the exception and resume within |
117 | // the scope of the outer clauses. |
118 | // |
119 | // o This sort of clause duplication for funclets should be done for all clause types, not just catches. |
120 | // Unfortunately, I cannot articulate why at the moment. |
121 | // |
122 | #ifdef _DEBUG |
123 | void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc); |
124 | static void DoEHLog(DWORD lvl, __in_z const char *fmt, ...); |
125 | #define EH_LOG(expr) { DoEHLog expr ; } |
126 | #else |
127 | #define EH_LOG(expr) |
128 | #endif |
129 | |
130 | TrackerAllocator g_theTrackerAllocator; |
131 | |
132 | bool FixNonvolatileRegisters(UINT_PTR uOriginalSP, |
133 | Thread* pThread, |
134 | CONTEXT* pContextRecord, |
135 | bool fAborting |
136 | ); |
137 | |
138 | void FixContext(PCONTEXT pContextRecord) |
139 | { |
140 | #define FIXUPREG(reg, value) \ |
141 | do { \ |
142 | STRESS_LOG2(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p\n", \ |
143 | pContextRecord->reg, \ |
144 | (value)); \ |
145 | pContextRecord->reg = (value); \ |
146 | } while (0) |
147 | |
148 | #ifdef _TARGET_X86_ |
149 | size_t resumeSp = EECodeManager::GetResumeSp(pContextRecord); |
150 | FIXUPREG(Esp, resumeSp); |
151 | #endif // _TARGET_X86_ |
152 | |
153 | #undef FIXUPREG |
154 | } |
155 | |
156 | MethodDesc * GetUserMethodForILStub(Thread * pThread, UINT_PTR uStubSP, MethodDesc * pILStubMD, Frame ** ppFrameOut); |
157 | |
158 | #ifdef FEATURE_PAL |
159 | BOOL HandleHardwareException(PAL_SEHException* ex); |
160 | BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord); |
161 | #endif // FEATURE_PAL |
162 | |
163 | static ExceptionTracker* GetTrackerMemory() |
164 | { |
165 | CONTRACTL |
166 | { |
167 | GC_TRIGGERS; |
168 | NOTHROW; |
169 | MODE_ANY; |
170 | } |
171 | CONTRACTL_END; |
172 | |
173 | return g_theTrackerAllocator.GetTrackerMemory(); |
174 | } |
175 | |
176 | void FreeTrackerMemory(ExceptionTracker* pTracker, TrackerMemoryType mem) |
177 | { |
178 | CONTRACTL |
179 | { |
180 | GC_NOTRIGGER; |
181 | NOTHROW; |
182 | MODE_ANY; |
183 | } |
184 | CONTRACTL_END; |
185 | |
186 | if (mem & memManaged) |
187 | { |
188 | pTracker->ReleaseResources(); |
189 | } |
190 | |
191 | if (mem & memUnmanaged) |
192 | { |
193 | g_theTrackerAllocator.FreeTrackerMemory(pTracker); |
194 | } |
195 | } |
196 | |
197 | static inline void UpdatePerformanceMetrics(CrawlFrame *pcfThisFrame, BOOL bIsRethrownException, BOOL bIsNewException) |
198 | { |
199 | WRAPPER_NO_CONTRACT; |
200 | COUNTER_ONLY(GetPerfCounters().m_Excep.cThrown++); |
201 | |
202 | // Fire an exception thrown ETW event when an exception occurs |
203 | ETW::ExceptionLog::ExceptionThrown(pcfThisFrame, bIsRethrownException, bIsNewException); |
204 | } |
205 | |
206 | void ShutdownEEAndExitProcess() |
207 | { |
208 | ForceEEShutdown(SCA_ExitProcessWhenShutdownComplete); |
209 | } |
210 | |
211 | void InitializeExceptionHandling() |
212 | { |
213 | EH_LOG((LL_INFO100, "InitializeExceptionHandling(): ExceptionTracker size: 0x%x bytes\n" , sizeof(ExceptionTracker))); |
214 | |
215 | InitSavedExceptionInfo(); |
216 | |
217 | CLRAddVectoredHandlers(); |
218 | |
219 | g_theTrackerAllocator.Init(); |
220 | |
221 | // Initialize the lock used for synchronizing access to the stacktrace in the exception object |
222 | g_StackTraceArrayLock.Init(LOCK_TYPE_DEFAULT, TRUE); |
223 | |
224 | #ifdef FEATURE_PAL |
225 | // Register handler of hardware exceptions like null reference in PAL |
226 | PAL_SetHardwareExceptionHandler(HandleHardwareException, IsSafeToHandleHardwareException); |
227 | |
228 | // Register handler for determining whether the specified IP has code that is a GC marker for GCCover |
229 | PAL_SetGetGcMarkerExceptionCode(GetGcMarkerExceptionCode); |
230 | |
231 | // Register handler for termination requests (e.g. SIGTERM) |
232 | PAL_SetTerminationRequestHandler(ShutdownEEAndExitProcess); |
233 | #endif // FEATURE_PAL |
234 | } |
235 | |
236 | struct UpdateObjectRefInResumeContextCallbackState |
237 | { |
238 | UINT_PTR uResumeSP; |
239 | Frame *pHighestFrameWithRegisters; |
240 | TADDR uResumeFrameFP; |
241 | TADDR uICFCalleeSavedFP; |
242 | |
243 | #ifdef _DEBUG |
244 | UINT nFrames; |
245 | bool fFound; |
246 | #endif |
247 | }; |
248 | |
249 | // Stack unwind callback for UpdateObjectRefInResumeContext(). |
250 | StackWalkAction UpdateObjectRefInResumeContextCallback(CrawlFrame* pCF, LPVOID pData) |
251 | { |
252 | CONTRACTL |
253 | { |
254 | MODE_ANY; |
255 | NOTHROW; |
256 | GC_NOTRIGGER; |
257 | } |
258 | CONTRACTL_END; |
259 | |
260 | UpdateObjectRefInResumeContextCallbackState *pState = (UpdateObjectRefInResumeContextCallbackState*)pData; |
261 | CONTEXT* pSrcContext = pCF->GetRegisterSet()->pCurrentContext; |
262 | |
263 | INDEBUG(pState->nFrames++); |
264 | |
265 | // Check to see if we have reached the resume frame. |
266 | if (pCF->IsFrameless()) |
267 | { |
268 | // At this point, we are trying to find the managed frame containing the catch handler to be invoked. |
269 | // This is done by comparing the SP of the managed frame for which this callback was invoked with the |
270 | // SP the OS passed to our personality routine for the current managed frame. If they match, then we have |
271 | // reached the target frame. |
272 | // |
273 | // It is possible that a managed frame may execute a PInvoke after performing a stackalloc: |
274 | // |
275 | // 1) The ARM JIT will always inline the PInvoke in the managed frame, whether or not the frame |
276 | // contains EH. As a result, the ICF will live in the same frame which performs stackalloc. |
277 | // |
278 | // 2) JIT64 will only inline the PInvoke in the managed frame if the frame *does not* contain EH. If it does, |
279 | // then pinvoke will be performed via an ILStub and thus, stackalloc will be performed in a frame different |
280 | // from the one (ILStub) that contains the ICF. |
281 | // |
282 | // Thus, for the scenario where the catch handler lives in the frame that performed stackalloc, in case of |
283 | // ARM JIT, the SP returned by the OS will be the SP *after* the stackalloc has happened. However, |
284 | // the stackwalker will invoke this callback with the CrawlFrameSP that was initialized at the time ICF was setup, i.e., |
285 | // it will be the SP after the prolog has executed (refer to InlinedCallFrame::UpdateRegDisplay). |
286 | // |
287 | // Thus, checking only the SP will not work for this scenario when using the ARM JIT. |
288 | // |
289 | // To address this case, the callback data also contains the frame pointer (FP) passed by the OS. This will |
290 | // be the value that is saved in the "CalleeSavedFP" field of the InlinedCallFrame during ICF |
291 | // initialization. When the stackwalker sees an ICF and invokes this callback, we copy the value of "CalleeSavedFP" in the data |
292 | // structure passed to this callback. |
293 | // |
294 | // Later, when the stackwalker invokes the callback for the managed frame containing the ICF, and the check |
295 | // for SP comaprison fails, we will compare the FP value we got from the ICF with the FP value the OS passed |
296 | // to us. If they match, then we have reached the resume frame. |
297 | // |
298 | // Note: This problem/scenario is not applicable to JIT64 since it does not perform pinvoke inlining if the |
299 | // method containing pinvoke also contains EH. Thus, the SP check will never fail for it. |
300 | if (pState->uResumeSP == GetSP(pSrcContext)) |
301 | { |
302 | INDEBUG(pState->fFound = true); |
303 | |
304 | return SWA_ABORT; |
305 | } |
306 | |
307 | // Perform the FP check, as explained above. |
308 | if ((pState->uICFCalleeSavedFP !=0) && (pState->uICFCalleeSavedFP == pState->uResumeFrameFP)) |
309 | { |
310 | // FP from ICF is the one that was also copied to the FP register in InlinedCallFrame::UpdateRegDisplay. |
311 | _ASSERTE(pState->uICFCalleeSavedFP == GetFP(pSrcContext)); |
312 | |
313 | INDEBUG(pState->fFound = true); |
314 | |
315 | return SWA_ABORT; |
316 | } |
317 | |
318 | // Reset the ICF FP in callback data |
319 | pState->uICFCalleeSavedFP = 0; |
320 | } |
321 | else |
322 | { |
323 | Frame *pFrame = pCF->GetFrame(); |
324 | |
325 | if (pFrame->NeedsUpdateRegDisplay()) |
326 | { |
327 | CONSISTENCY_CHECK(pFrame >= pState->pHighestFrameWithRegisters); |
328 | pState->pHighestFrameWithRegisters = pFrame; |
329 | |
330 | // Is this an InlinedCallFrame? |
331 | if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr()) |
332 | { |
333 | // If we are here, then ICF is expected to be active. |
334 | _ASSERTE(InlinedCallFrame::FrameHasActiveCall(pFrame)); |
335 | |
336 | // Copy the CalleeSavedFP to the data structure that is passed this callback |
337 | // by the stackwalker. This is the value of frame pointer when ICF is setup |
338 | // in a managed frame. |
339 | // |
340 | // Setting this value here is based upon the assumption (which holds true on X64 and ARM) that |
341 | // the stackwalker invokes the callback for explicit frames before their |
342 | // container/corresponding managed frame. |
343 | pState->uICFCalleeSavedFP = ((PTR_InlinedCallFrame)pFrame)->GetCalleeSavedFP(); |
344 | } |
345 | else |
346 | { |
347 | // For any other frame, simply reset uICFCalleeSavedFP field |
348 | pState->uICFCalleeSavedFP = 0; |
349 | } |
350 | } |
351 | } |
352 | |
353 | return SWA_CONTINUE; |
354 | } |
355 | |
356 | |
357 | // |
358 | // Locates the locations of the nonvolatile registers. This will be used to |
359 | // retrieve the latest values of the object references before we resume |
360 | // execution from an exception. |
361 | // |
362 | //static |
363 | bool ExceptionTracker::FindNonvolatileRegisterPointers(Thread* pThread, UINT_PTR uOriginalSP, REGDISPLAY* pRegDisplay, TADDR uResumeFrameFP) |
364 | { |
365 | CONTRACTL |
366 | { |
367 | MODE_ANY; |
368 | NOTHROW; |
369 | GC_NOTRIGGER; |
370 | } |
371 | CONTRACTL_END; |
372 | |
373 | // |
374 | // Find the highest frame below the resume frame that will update the |
375 | // REGDISPLAY. A normal StackWalkFrames will RtlVirtualUnwind through all |
376 | // managed frames on the stack, so this avoids some unnecessary work. The |
377 | // frame we find will have all of the nonvolatile registers/other state |
378 | // needed to start a managed unwind from that point. |
379 | // |
380 | Frame *pHighestFrameWithRegisters = NULL; |
381 | Frame *pFrame = pThread->GetFrame(); |
382 | |
383 | while ((UINT_PTR)pFrame < uOriginalSP) |
384 | { |
385 | if (pFrame->NeedsUpdateRegDisplay()) |
386 | pHighestFrameWithRegisters = pFrame; |
387 | |
388 | pFrame = pFrame->Next(); |
389 | } |
390 | |
391 | // |
392 | // Do a stack walk from this frame. This may find a higher frame within |
393 | // the resume frame (ex. inlined pinvoke frame). This will also update |
394 | // the REGDISPLAY pointers if any intervening managed frames saved |
395 | // nonvolatile registers. |
396 | // |
397 | |
398 | UpdateObjectRefInResumeContextCallbackState state; |
399 | |
400 | state.uResumeSP = uOriginalSP; |
401 | state.uResumeFrameFP = uResumeFrameFP; |
402 | state.uICFCalleeSavedFP = 0; |
403 | state.pHighestFrameWithRegisters = pHighestFrameWithRegisters; |
404 | |
405 | INDEBUG(state.nFrames = 0); |
406 | INDEBUG(state.fFound = false); |
407 | |
408 | pThread->StackWalkFramesEx(pRegDisplay, &UpdateObjectRefInResumeContextCallback, &state, 0, pHighestFrameWithRegisters); |
409 | |
410 | // For managed exceptions, we should at least find a HelperMethodFrame (the one we put in IL_Throw()). |
411 | // For native exceptions such as AV's, we should at least find the FaultingExceptionFrame. |
412 | // If we don't find anything, then we must have hit an SO when we are trying to erect an HMF. |
413 | // Bail out in such situations. |
414 | // |
415 | // Note that pinvoke frames may be inlined in a managed method, so we cannot use the child SP (a.k.a. the current SP) |
416 | // to check for explicit frames "higher" on the stack ("higher" here means closer to the leaf frame). The stackwalker |
417 | // knows how to deal with inlined pinvoke frames, and it issues callbacks for them before issuing the callback for the |
418 | // containing managed method. So we have to do this check after we are done with the stackwalk. |
419 | pHighestFrameWithRegisters = state.pHighestFrameWithRegisters; |
420 | if (pHighestFrameWithRegisters == NULL) |
421 | { |
422 | return false; |
423 | } |
424 | |
425 | CONSISTENCY_CHECK(state.nFrames); |
426 | CONSISTENCY_CHECK(state.fFound); |
427 | CONSISTENCY_CHECK(NULL != pHighestFrameWithRegisters); |
428 | |
429 | // |
430 | // Now the REGDISPLAY has been unwound to the resume frame. The |
431 | // nonvolatile registers will either point into pHighestFrameWithRegisters, |
432 | // an inlined pinvoke frame, or into calling managed frames. |
433 | // |
434 | |
435 | return true; |
436 | } |
437 | |
438 | |
439 | //static |
440 | void ExceptionTracker::UpdateNonvolatileRegisters(CONTEXT *pContextRecord, REGDISPLAY *pRegDisplay, bool fAborting) |
441 | { |
442 | CONTEXT* pAbortContext = NULL; |
443 | if (fAborting) |
444 | { |
445 | pAbortContext = GetThread()->GetAbortContext(); |
446 | } |
447 | |
448 | #ifndef FEATURE_PAL |
449 | #define HANDLE_NULL_CONTEXT_POINTER _ASSERTE(false) |
450 | #else // FEATURE_PAL |
451 | #define HANDLE_NULL_CONTEXT_POINTER |
452 | #endif // FEATURE_PAL |
453 | |
454 | #define UPDATEREG(reg) \ |
455 | do { \ |
456 | if (pRegDisplay->pCurrentContextPointers->reg != NULL) \ |
457 | { \ |
458 | STRESS_LOG3(LF_GCROOTS, LL_INFO100, "Updating " #reg " %p to %p from %p\n", \ |
459 | pContextRecord->reg, \ |
460 | *pRegDisplay->pCurrentContextPointers->reg, \ |
461 | pRegDisplay->pCurrentContextPointers->reg); \ |
462 | pContextRecord->reg = *pRegDisplay->pCurrentContextPointers->reg; \ |
463 | } \ |
464 | else \ |
465 | { \ |
466 | HANDLE_NULL_CONTEXT_POINTER; \ |
467 | } \ |
468 | if (pAbortContext) \ |
469 | { \ |
470 | pAbortContext->reg = pContextRecord->reg; \ |
471 | } \ |
472 | } while (0) |
473 | |
474 | |
475 | #if defined(_TARGET_X86_) |
476 | |
477 | UPDATEREG(Ebx); |
478 | UPDATEREG(Esi); |
479 | UPDATEREG(Edi); |
480 | UPDATEREG(Ebp); |
481 | |
482 | #elif defined(_TARGET_AMD64_) |
483 | |
484 | UPDATEREG(Rbx); |
485 | UPDATEREG(Rbp); |
486 | #ifndef UNIX_AMD64_ABI |
487 | UPDATEREG(Rsi); |
488 | UPDATEREG(Rdi); |
489 | #endif |
490 | UPDATEREG(R12); |
491 | UPDATEREG(R13); |
492 | UPDATEREG(R14); |
493 | UPDATEREG(R15); |
494 | |
495 | #elif defined(_TARGET_ARM_) |
496 | |
497 | UPDATEREG(R4); |
498 | UPDATEREG(R5); |
499 | UPDATEREG(R6); |
500 | UPDATEREG(R7); |
501 | UPDATEREG(R8); |
502 | UPDATEREG(R9); |
503 | UPDATEREG(R10); |
504 | UPDATEREG(R11); |
505 | |
506 | #elif defined(_TARGET_ARM64_) |
507 | |
508 | UPDATEREG(X19); |
509 | UPDATEREG(X20); |
510 | UPDATEREG(X21); |
511 | UPDATEREG(X22); |
512 | UPDATEREG(X23); |
513 | UPDATEREG(X24); |
514 | UPDATEREG(X25); |
515 | UPDATEREG(X26); |
516 | UPDATEREG(X27); |
517 | UPDATEREG(X28); |
518 | UPDATEREG(Fp); |
519 | |
520 | #else |
521 | PORTABILITY_ASSERT("ExceptionTracker::UpdateNonvolatileRegisters" ); |
522 | #endif |
523 | |
524 | #undef UPDATEREG |
525 | } |
526 | |
527 | |
528 | #ifndef _DEBUG |
529 | #define DebugLogExceptionRecord(pExceptionRecord) |
530 | #else // _DEBUG |
531 | #define LOG_FLAG(name) \ |
532 | if (flags & name) \ |
533 | { \ |
534 | LOG((LF_EH, LL_INFO100, "" #name " ")); \ |
535 | } \ |
536 | |
537 | void DebugLogExceptionRecord(EXCEPTION_RECORD* pExceptionRecord) |
538 | { |
539 | ULONG flags = pExceptionRecord->ExceptionFlags; |
540 | |
541 | EH_LOG((LL_INFO100, ">>exr: %p, code: %08x, addr: %p, flags: 0x%02x " , pExceptionRecord, pExceptionRecord->ExceptionCode, pExceptionRecord->ExceptionAddress, flags)); |
542 | |
543 | LOG_FLAG(EXCEPTION_NONCONTINUABLE); |
544 | LOG_FLAG(EXCEPTION_UNWINDING); |
545 | LOG_FLAG(EXCEPTION_EXIT_UNWIND); |
546 | LOG_FLAG(EXCEPTION_STACK_INVALID); |
547 | LOG_FLAG(EXCEPTION_NESTED_CALL); |
548 | LOG_FLAG(EXCEPTION_TARGET_UNWIND); |
549 | LOG_FLAG(EXCEPTION_COLLIDED_UNWIND); |
550 | |
551 | LOG((LF_EH, LL_INFO100, "\n" )); |
552 | |
553 | } |
554 | |
555 | LPCSTR DebugGetExceptionDispositionName(EXCEPTION_DISPOSITION disp) |
556 | { |
557 | |
558 | switch (disp) |
559 | { |
560 | case ExceptionContinueExecution: return "ExceptionContinueExecution" ; |
561 | case ExceptionContinueSearch: return "ExceptionContinueSearch" ; |
562 | case ExceptionNestedException: return "ExceptionNestedException" ; |
563 | case ExceptionCollidedUnwind: return "ExceptionCollidedUnwind" ; |
564 | default: |
565 | UNREACHABLE_MSG("Invalid EXCEPTION_DISPOSITION!" ); |
566 | } |
567 | } |
568 | #endif // _DEBUG |
569 | |
570 | bool ExceptionTracker::IsStackOverflowException() |
571 | { |
572 | if (m_pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException) |
573 | { |
574 | return true; |
575 | } |
576 | |
577 | return false; |
578 | } |
579 | |
580 | UINT_PTR ExceptionTracker::CallCatchHandler(CONTEXT* pContextRecord, bool* pfAborting /*= NULL*/) |
581 | { |
582 | CONTRACTL |
583 | { |
584 | MODE_COOPERATIVE; |
585 | GC_TRIGGERS; |
586 | THROWS; |
587 | |
588 | PRECONDITION(CheckPointer(pContextRecord, NULL_OK)); |
589 | } |
590 | CONTRACTL_END; |
591 | |
592 | UINT_PTR uResumePC = 0; |
593 | ULONG_PTR ulRelOffset; |
594 | StackFrame sfStackFp = m_sfResumeStackFrame; |
595 | Thread* pThread = m_pThread; |
596 | MethodDesc* pMD = m_pMethodDescOfCatcher; |
597 | bool fIntercepted = false; |
598 | |
599 | ThreadExceptionState* pExState = pThread->GetExceptionState(); |
600 | |
601 | #if defined(DEBUGGING_SUPPORTED) |
602 | |
603 | // If the exception is intercepted, use the information stored in the DebuggerExState to resume the |
604 | // exception instead of calling the catch clause (there may not even be one). |
605 | if (pExState->GetFlags()->DebuggerInterceptInfo()) |
606 | { |
607 | _ASSERTE(pMD != NULL); |
608 | |
609 | // retrieve the interception information |
610 | pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&(sfStackFp.SP), &ulRelOffset, NULL); |
611 | |
612 | PCODE pStartAddress = pMD->GetNativeCode(); |
613 | |
614 | EECodeInfo codeInfo(pStartAddress); |
615 | _ASSERTE(codeInfo.IsValid()); |
616 | |
617 | // Note that the value returned for ulRelOffset is actually the offset, |
618 | // so we need to adjust it to get the actual IP. |
619 | _ASSERTE(FitsIn<DWORD>(ulRelOffset)); |
620 | uResumePC = codeInfo.GetJitManager()->GetCodeAddressForRelOffset(codeInfo.GetMethodToken(), static_cast<DWORD>(ulRelOffset)); |
621 | |
622 | // Either we haven't set m_uResumeStackFrame (for unhandled managed exceptions), or we have set it |
623 | // and it equals to MemoryStackFp. |
624 | _ASSERTE(m_sfResumeStackFrame.IsNull() || m_sfResumeStackFrame == sfStackFp); |
625 | |
626 | fIntercepted = true; |
627 | } |
628 | #endif // DEBUGGING_SUPPORTED |
629 | |
630 | _ASSERTE(!sfStackFp.IsNull()); |
631 | |
632 | m_sfResumeStackFrame.Clear(); |
633 | m_pMethodDescOfCatcher = NULL; |
634 | |
635 | _ASSERTE(pContextRecord); |
636 | |
637 | // |
638 | // call the handler |
639 | // |
640 | EH_LOG((LL_INFO100, " calling catch at 0x%p\n" , m_uCatchToCallPC)); |
641 | |
642 | // do not call the catch clause if the exception is intercepted |
643 | if (!fIntercepted) |
644 | { |
645 | _ASSERTE(m_uCatchToCallPC != 0 && m_pClauseForCatchToken != NULL); |
646 | uResumePC = CallHandler(m_uCatchToCallPC, sfStackFp, &m_ClauseForCatch, pMD, Catch X86_ARG(pContextRecord) ARM_ARG(pContextRecord) ARM64_ARG(pContextRecord)); |
647 | } |
648 | else |
649 | { |
650 | // Since the exception has been intercepted and we could resuming execution at any |
651 | // user-specified arbitary location, reset the EH clause index and EstablisherFrame |
652 | // we may have saved for addressing any potential ThreadAbort raise. |
653 | // |
654 | // This is done since the saved EH clause index is related to the catch block executed, |
655 | // which does not happen in interception. As user specifies where we resume execution, |
656 | // we let that behaviour override the index and pretend as if we have no index available. |
657 | m_dwIndexClauseForCatch = 0; |
658 | m_sfEstablisherOfActualHandlerFrame.Clear(); |
659 | m_sfCallerOfActualHandlerFrame.Clear(); |
660 | } |
661 | |
662 | EH_LOG((LL_INFO100, " resume address should be 0x%p\n" , uResumePC)); |
663 | |
664 | // |
665 | // Our tracker may have gone away at this point, don't reference it. |
666 | // |
667 | |
668 | return FinishSecondPass(pThread, uResumePC, sfStackFp, pContextRecord, this, pfAborting); |
669 | } |
670 | |
671 | // static |
672 | UINT_PTR ExceptionTracker::FinishSecondPass( |
673 | Thread* pThread, |
674 | UINT_PTR uResumePC, |
675 | StackFrame sf, |
676 | CONTEXT* pContextRecord, |
677 | ExceptionTracker* pTracker, |
678 | bool* pfAborting /*= NULL*/) |
679 | { |
680 | CONTRACTL |
681 | { |
682 | MODE_COOPERATIVE; |
683 | GC_NOTRIGGER; |
684 | NOTHROW; |
685 | PRECONDITION(CheckPointer(pThread, NULL_NOT_OK)); |
686 | PRECONDITION(CheckPointer((void*)uResumePC, NULL_NOT_OK)); |
687 | PRECONDITION(CheckPointer(pContextRecord, NULL_OK)); |
688 | } |
689 | CONTRACTL_END; |
690 | |
691 | // Between the time when we pop the ExceptionTracker for the current exception and the time |
692 | // when we actually resume execution, it is unsafe to start a funclet-skipping stackwalk. |
693 | // So we set a flag here to indicate that we are in this time window. The only user of this |
694 | // information right now is the profiler. |
695 | ThreadExceptionFlagHolder tefHolder(ThreadExceptionState::TEF_InconsistentExceptionState); |
696 | |
697 | #ifdef DEBUGGING_SUPPORTED |
698 | // This must be done before we pop the trackers. |
699 | BOOL fIntercepted = pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo(); |
700 | #endif // DEBUGGING_SUPPORTED |
701 | |
702 | // Since we may [re]raise ThreadAbort post the catch block execution, |
703 | // save the index, and Establisher, of the EH clause corresponding to the handler |
704 | // we just executed before we release the tracker. This will be used to ensure that reraise |
705 | // proceeds forward and not get stuck in a loop. Refer to |
706 | // ExceptionTracker::ProcessManagedCallFrame for details. |
707 | DWORD ehClauseCurrentHandlerIndex = pTracker->GetCatchHandlerExceptionClauseIndex(); |
708 | StackFrame sfEstablisherOfActualHandlerFrame = pTracker->GetEstablisherOfActualHandlingFrame(); |
709 | |
710 | EH_LOG((LL_INFO100, "second pass finished\n" )); |
711 | EH_LOG((LL_INFO100, "cleaning up ExceptionTracker state\n" )); |
712 | |
713 | // Release the exception trackers till the current (specified) frame. |
714 | ExceptionTracker::PopTrackers(sf, true); |
715 | |
716 | // This will set the last thrown to be either null if we have handled all the exceptions in the nested chain or |
717 | // to whatever the current exception is. |
718 | // |
719 | // In a case when we're nested inside another catch block, the domain in which we're executing may not be the |
720 | // same as the one the domain of the throwable that was just made the current throwable above. Therefore, we |
721 | // make a special effort to preserve the domain of the throwable as we update the the last thrown object. |
722 | // |
723 | // If an exception is active, we dont want to reset the LastThrownObject to NULL as the active exception |
724 | // might be represented by a tracker created in the second pass (refer to |
725 | // CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass to understand how exception trackers can be |
726 | // created in the 2nd pass on 64bit) that does not have a throwable attached to it. Thus, if this exception |
727 | // is caught in the VM and it attempts to get the LastThrownObject using GET_THROWABLE macro, then it should be available. |
728 | // |
729 | // But, if the active exception tracker remains consistent in the 2nd pass (which will happen if the exception is caught |
730 | // in managed code), then the call to SafeUpdateLastThrownObject below will automatically update the LTO as per the |
731 | // active exception. |
732 | if (!pThread->GetExceptionState()->IsExceptionInProgress()) |
733 | { |
734 | pThread->SafeSetLastThrownObject(NULL); |
735 | } |
736 | |
737 | // Sync managed exception state, for the managed thread, based upon any active exception tracker |
738 | pThread->SyncManagedExceptionState(false); |
739 | |
740 | // |
741 | // If we are aborting, we should not resume execution. Instead, we raise another |
742 | // exception. However, we do this by resuming execution at our thread redirecter |
743 | // function (RedirectForThrowControl), which is the same process we use for async |
744 | // thread stops. This redirecter function will cover the stack frame and register |
745 | // stack frame and then throw an exception. When we first see the exception thrown |
746 | // by this redirecter, we fixup the context for the thread stackwalk by copying |
747 | // pThread->m_OSContext into the dispatcher context and restarting the exception |
748 | // dispatch. As a result, we need to save off the "correct" resume context before |
749 | // we resume so the exception processing can work properly after redirect. A side |
750 | // benefit of this mechanism is that it makes synchronous and async thread abort |
751 | // use exactly the same codepaths. |
752 | // |
753 | UINT_PTR uAbortAddr = 0; |
754 | |
755 | #if defined(DEBUGGING_SUPPORTED) |
756 | // Don't honour thread abort requests at this time for intercepted exceptions. |
757 | if (fIntercepted) |
758 | { |
759 | uAbortAddr = 0; |
760 | } |
761 | else |
762 | #endif // !DEBUGGING_SUPPORTED |
763 | { |
764 | CopyOSContext(pThread->m_OSContext, pContextRecord); |
765 | SetIP(pThread->m_OSContext, (PCODE)uResumePC); |
766 | uAbortAddr = (UINT_PTR)COMPlusCheckForAbort(uResumePC); |
767 | } |
768 | |
769 | if (uAbortAddr) |
770 | { |
771 | if (pfAborting != NULL) |
772 | { |
773 | *pfAborting = true; |
774 | } |
775 | |
776 | EH_LOG((LL_INFO100, "thread abort in progress, resuming thread under control...\n" )); |
777 | |
778 | // We are aborting, so keep the reference to the current EH clause index. |
779 | // We will use this when the exception is reraised and we begin commencing |
780 | // exception dispatch. This is done in ExceptionTracker::ProcessOSExceptionNotification. |
781 | // |
782 | // The "if" condition below can be false if the exception has been intercepted (refer to |
783 | // ExceptionTracker::CallCatchHandler for details) |
784 | if ((ehClauseCurrentHandlerIndex > 0) && (!sfEstablisherOfActualHandlerFrame.IsNull())) |
785 | { |
786 | pThread->m_dwIndexClauseForCatch = ehClauseCurrentHandlerIndex; |
787 | pThread->m_sfEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame; |
788 | } |
789 | |
790 | CONSISTENCY_CHECK(CheckPointer(pContextRecord)); |
791 | |
792 | STRESS_LOG1(LF_EH, LL_INFO10, "resume under control: ip: %p\n" , uResumePC); |
793 | |
794 | #ifdef _TARGET_AMD64_ |
795 | pContextRecord->Rcx = uResumePC; |
796 | #elif defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) |
797 | // On ARM & ARM64, we save off the original PC in Lr. This is the same as done |
798 | // in HandleManagedFault for H/W generated exceptions. |
799 | pContextRecord->Lr = uResumePC; |
800 | #endif |
801 | |
802 | uResumePC = uAbortAddr; |
803 | } |
804 | |
805 | CONSISTENCY_CHECK(pThread->DetermineIfGuardPagePresent()); |
806 | |
807 | EH_LOG((LL_INFO100, "FinishSecondPass complete, uResumePC = %p, current SP = %p\n" , uResumePC, GetCurrentSP())); |
808 | return uResumePC; |
809 | } |
810 | |
811 | // On CoreARM, the MemoryStackFp is ULONG when passed by RtlDispatchException, |
812 | // unlike its 64bit counterparts. |
813 | EXTERN_C EXCEPTION_DISPOSITION |
814 | ProcessCLRException(IN PEXCEPTION_RECORD pExceptionRecord |
815 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
816 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
817 | IN OUT PCONTEXT pContextRecord, |
818 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
819 | ) |
820 | { |
821 | // |
822 | // This method doesn't always return, so it will leave its |
823 | // state on the thread if using dynamic contracts. |
824 | // |
825 | STATIC_CONTRACT_MODE_ANY; |
826 | STATIC_CONTRACT_GC_TRIGGERS; |
827 | STATIC_CONTRACT_THROWS; |
828 | |
829 | // We must preserve this so that GCStress=4 eh processing doesnt kill last error. |
830 | DWORD dwLastError = GetLastError(); |
831 | |
832 | EXCEPTION_DISPOSITION returnDisposition = ExceptionContinueSearch; |
833 | |
834 | STRESS_LOG5(LF_EH, LL_INFO10, "Processing exception at establisher=%p, ip=%p disp->cxr: %p, sp: %p, cxr @ exception: %p\n" , |
835 | MemoryStackFp, pDispatcherContext->ControlPc, |
836 | pDispatcherContext->ContextRecord, |
837 | GetSP(pDispatcherContext->ContextRecord), pContextRecord); |
838 | AMD64_ONLY(STRESS_LOG3(LF_EH, LL_INFO10, " rbx=%p, rsi=%p, rdi=%p\n" , pContextRecord->Rbx, pContextRecord->Rsi, pContextRecord->Rdi)); |
839 | |
840 | // sample flags early on because we may change pExceptionRecord below |
841 | // if we are seeing a STATUS_UNWIND_CONSOLIDATE |
842 | DWORD dwExceptionFlags = pExceptionRecord->ExceptionFlags; |
843 | Thread* pThread = GetThread(); |
844 | |
845 | // Stack Overflow is handled specially by the CLR EH mechanism. In fact |
846 | // there are cases where we aren't in managed code, but aren't quite in |
847 | // known unmanaged code yet either... |
848 | // |
849 | // These "boundary code" cases include: |
850 | // - in JIT helper methods which don't have a frame |
851 | // - in JIT helper methods before/during frame setup |
852 | // - in FCALL before/during frame setup |
853 | // |
854 | // In those cases on x86 we take special care to start our unwind looking |
855 | // for a handler which is below the last explicit frame which has been |
856 | // established on the stack as it can't reliably crawl the stack frames |
857 | // above that. |
858 | // NOTE: see code in the CLRVectoredExceptionHandler() routine. |
859 | // |
860 | // From the perspective of the EH subsystem, we can handle unwind correctly |
861 | // even without erecting a transition frame on WIN64. However, since the GC |
862 | // uses the stackwalker to update object references, and since the stackwalker |
863 | // relies on transition frame, we still cannot let an exception be handled |
864 | // by an unprotected managed frame. |
865 | // |
866 | // This code below checks to see if a SO has occurred outside of managed code. |
867 | // If it has, and if we don't have a transition frame higher up the stack, then |
868 | // we don't handle the SO. |
869 | if (!(dwExceptionFlags & EXCEPTION_UNWINDING)) |
870 | { |
871 | if (IsSOExceptionCode(pExceptionRecord->ExceptionCode)) |
872 | { |
873 | // We don't need to unwind the frame chain here because we have backstop |
874 | // personality routines at the U2M boundary to handle do that. They are |
875 | // the personality routines of CallDescrWorker() and UMThunkStubCommon(). |
876 | // |
877 | // See VSW 471619 for more information. |
878 | |
879 | // We should be in cooperative mode if we are going to handle the SO. |
880 | // We track SO state for the thread. |
881 | EEPolicy::HandleStackOverflow(SOD_ManagedFrameHandler, (void*)MemoryStackFp); |
882 | FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0); |
883 | return ExceptionContinueSearch; |
884 | } |
885 | else |
886 | { |
887 | #ifdef FEATURE_STACK_PROBE |
888 | if (GetEEPolicy()->GetActionOnFailure(FAIL_StackOverflow) == eRudeUnloadAppDomain) |
889 | { |
890 | RetailStackProbe(static_cast<unsigned int>(ADJUST_PROBE(BACKOUT_CODE_STACK_LIMIT)), pThread); |
891 | } |
892 | #endif |
893 | } |
894 | } |
895 | else |
896 | { |
897 | DWORD exceptionCode = pExceptionRecord->ExceptionCode; |
898 | |
899 | if (exceptionCode == STATUS_UNWIND) |
900 | // If exceptionCode is STATUS_UNWIND, RtlUnwind is called with a NULL ExceptionRecord, |
901 | // therefore OS uses a faked ExceptionRecord with STATUS_UNWIND code. Then we need to |
902 | // look at our saved exception code. |
903 | exceptionCode = GetCurrentExceptionCode(); |
904 | |
905 | if (IsSOExceptionCode(exceptionCode)) |
906 | { |
907 | return ExceptionContinueSearch; |
908 | } |
909 | } |
910 | |
911 | BEGIN_CONTRACT_VIOLATION(SOToleranceViolation); |
912 | |
913 | StackFrame sf((UINT_PTR)MemoryStackFp); |
914 | |
915 | |
916 | { |
917 | GCX_COOP(); |
918 | // Update the current establisher frame |
919 | if (dwExceptionFlags & EXCEPTION_UNWINDING) |
920 | { |
921 | ExceptionTracker *pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker(); |
922 | if (pCurrentTracker != NULL) |
923 | { |
924 | pCurrentTracker->SetCurrentEstablisherFrame(sf); |
925 | } |
926 | } |
927 | |
928 | #ifdef _DEBUG |
929 | Thread::ObjectRefFlush(pThread); |
930 | #endif // _DEBUG |
931 | } |
932 | |
933 | |
934 | // |
935 | // begin Early Processing |
936 | // |
937 | { |
938 | #ifndef USE_REDIRECT_FOR_GCSTRESS |
939 | if (IsGcMarker(pContextRecord, pExceptionRecord)) |
940 | { |
941 | returnDisposition = ExceptionContinueExecution; |
942 | goto lExit; |
943 | } |
944 | #endif // !USE_REDIRECT_FOR_GCSTRESS |
945 | |
946 | EH_LOG((LL_INFO100, "..................................................................................\n" )); |
947 | EH_LOG((LL_INFO100, "ProcessCLRException enter, sp = 0x%p, ControlPc = 0x%p\n" , MemoryStackFp, pDispatcherContext->ControlPc)); |
948 | DebugLogExceptionRecord(pExceptionRecord); |
949 | |
950 | if (STATUS_UNWIND_CONSOLIDATE == pExceptionRecord->ExceptionCode) |
951 | { |
952 | EH_LOG((LL_INFO100, "STATUS_UNWIND_CONSOLIDATE, retrieving stored exception record\n" )); |
953 | _ASSERTE(pExceptionRecord->NumberParameters >= 7); |
954 | pExceptionRecord = (EXCEPTION_RECORD*)pExceptionRecord->ExceptionInformation[6]; |
955 | DebugLogExceptionRecord(pExceptionRecord); |
956 | } |
957 | |
958 | CONSISTENCY_CHECK_MSG(!DebugIsEECxxException(pExceptionRecord), "EE C++ Exception leaked into managed code!!\n" ); |
959 | } |
960 | // |
961 | // end Early Processing (tm) -- we're now into really processing an exception for managed code |
962 | // |
963 | |
964 | if (!(dwExceptionFlags & EXCEPTION_UNWINDING)) |
965 | { |
966 | // If the exception is a breakpoint, but outside of the runtime or managed code, |
967 | // let it go. It is not ours, so someone else will handle it, or we'll see |
968 | // it again as an unhandled exception. |
969 | if ((pExceptionRecord->ExceptionCode == STATUS_BREAKPOINT) || |
970 | (pExceptionRecord->ExceptionCode == STATUS_SINGLE_STEP)) |
971 | { |
972 | // It is a breakpoint; is it from the runtime or managed code? |
973 | PCODE ip = GetIP(pContextRecord); // IP of the fault. |
974 | |
975 | BOOL fExternalException = FALSE; |
976 | |
977 | BEGIN_SO_INTOLERANT_CODE_NOPROBE; |
978 | |
979 | fExternalException = (!ExecutionManager::IsManagedCode(ip) && |
980 | !IsIPInModule(g_pMSCorEE, ip)); |
981 | |
982 | END_SO_INTOLERANT_CODE_NOPROBE; |
983 | |
984 | if (fExternalException) |
985 | { |
986 | // The breakpoint was not ours. Someone else can handle it. (Or if not, we'll get it again as |
987 | // an unhandled exception.) |
988 | returnDisposition = ExceptionContinueSearch; |
989 | goto lExit; |
990 | } |
991 | } |
992 | } |
993 | |
994 | { |
995 | BOOL bAsynchronousThreadStop = IsThreadHijackedForThreadStop(pThread, pExceptionRecord); |
996 | |
997 | // we already fixed the context in HijackHandler, so let's |
998 | // just clear the thread state. |
999 | pThread->ResetThrowControlForThread(); |
1000 | |
1001 | ExceptionTracker::StackTraceState STState; |
1002 | |
1003 | ExceptionTracker* pTracker = ExceptionTracker::GetOrCreateTracker( |
1004 | pDispatcherContext->ControlPc, |
1005 | sf, |
1006 | pExceptionRecord, |
1007 | pContextRecord, |
1008 | bAsynchronousThreadStop, |
1009 | !(dwExceptionFlags & EXCEPTION_UNWINDING), |
1010 | &STState); |
1011 | |
1012 | #ifdef FEATURE_CORRUPTING_EXCEPTIONS |
1013 | // Only setup the Corruption Severity in the first pass |
1014 | if (!(dwExceptionFlags & EXCEPTION_UNWINDING)) |
1015 | { |
1016 | // Switch to COOP mode |
1017 | GCX_COOP(); |
1018 | |
1019 | if (pTracker && pTracker->GetThrowable() != NULL) |
1020 | { |
1021 | // Setup the state in current exception tracker indicating the corruption severity |
1022 | // of the active exception. |
1023 | CEHelper::SetupCorruptionSeverityForActiveException((STState == ExceptionTracker::STS_FirstRethrowFrame), (pTracker->GetPreviousExceptionTracker() != NULL), |
1024 | CEHelper::ShouldTreatActiveExceptionAsNonCorrupting()); |
1025 | } |
1026 | |
1027 | // Failfast if exception indicates corrupted process state |
1028 | if (pTracker->GetCorruptionSeverity() == ProcessCorrupting) |
1029 | EEPOLICY_HANDLE_FATAL_ERROR(pExceptionRecord->ExceptionCode); |
1030 | } |
1031 | #endif // FEATURE_CORRUPTING_EXCEPTIONS |
1032 | |
1033 | { |
1034 | // Switch to COOP mode since we are going to work |
1035 | // with throwable |
1036 | GCX_COOP(); |
1037 | if (pTracker->GetThrowable() != NULL) |
1038 | { |
1039 | BOOL fIsThrownExceptionAV = FALSE; |
1040 | OBJECTREF oThrowable = NULL; |
1041 | GCPROTECT_BEGIN(oThrowable); |
1042 | oThrowable = pTracker->GetThrowable(); |
1043 | |
1044 | // Check if we are dealing with AV or not and if we are, |
1045 | // ensure that this is a real AV and not managed AV exception |
1046 | if ((pExceptionRecord->ExceptionCode == STATUS_ACCESS_VIOLATION) && |
1047 | (MscorlibBinder::GetException(kAccessViolationException) == oThrowable->GetMethodTable())) |
1048 | { |
1049 | // Its an AV - set the flag |
1050 | fIsThrownExceptionAV = TRUE; |
1051 | } |
1052 | |
1053 | GCPROTECT_END(); |
1054 | |
1055 | // Did we get an AV? |
1056 | if (fIsThrownExceptionAV == TRUE) |
1057 | { |
1058 | // Get the escalation policy action for handling AV |
1059 | EPolicyAction actionAV = GetEEPolicy()->GetActionOnFailure(FAIL_AccessViolation); |
1060 | |
1061 | // Valid actions are: eNoAction (default behviour) or eRudeExitProcess |
1062 | _ASSERTE(((actionAV == eNoAction) || (actionAV == eRudeExitProcess))); |
1063 | if (actionAV == eRudeExitProcess) |
1064 | { |
1065 | LOG((LF_EH, LL_INFO100, "ProcessCLRException: AccessViolation handler found and doing RudeExitProcess due to escalation policy (eRudeExitProcess)\n" )); |
1066 | |
1067 | // EEPolicy::HandleFatalError will help us RudeExit the process. |
1068 | // RudeExitProcess due to AV is to prevent a security risk - we are ripping |
1069 | // at the boundary, without looking for the handlers. |
1070 | EEPOLICY_HANDLE_FATAL_ERROR(COR_E_SECURITY); |
1071 | } |
1072 | } |
1073 | } |
1074 | } |
1075 | |
1076 | #ifndef FEATURE_PAL // Watson is on Windows only |
1077 | // Setup bucketing details for nested exceptions (rethrow and non-rethrow) only if we are in the first pass |
1078 | if (!(dwExceptionFlags & EXCEPTION_UNWINDING)) |
1079 | { |
1080 | ExceptionTracker *pPrevEHTracker = pTracker->GetPreviousExceptionTracker(); |
1081 | if (pPrevEHTracker != NULL) |
1082 | { |
1083 | SetStateForWatsonBucketing((STState == ExceptionTracker::STS_FirstRethrowFrame), pPrevEHTracker->GetThrowableAsHandle()); |
1084 | } |
1085 | } |
1086 | #endif //!FEATURE_PAL |
1087 | |
1088 | CLRUnwindStatus status; |
1089 | |
1090 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1091 | // Refer to comment in ProcessOSExceptionNotification about ICF and codegen difference. |
1092 | InlinedCallFrame *pICFSetAsLimitFrame = NULL; |
1093 | #endif // USE_PER_FRAME_PINVOKE_INIT |
1094 | |
1095 | status = pTracker->ProcessOSExceptionNotification( |
1096 | pExceptionRecord, |
1097 | pContextRecord, |
1098 | pDispatcherContext, |
1099 | dwExceptionFlags, |
1100 | sf, |
1101 | pThread, |
1102 | STState |
1103 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1104 | , (PVOID)pICFSetAsLimitFrame |
1105 | #endif // USE_PER_FRAME_PINVOKE_INIT |
1106 | ); |
1107 | |
1108 | if (FirstPassComplete == status) |
1109 | { |
1110 | EH_LOG((LL_INFO100, "first pass finished, found handler, TargetFrameSp = %p\n" , |
1111 | pDispatcherContext->EstablisherFrame)); |
1112 | |
1113 | SetLastError(dwLastError); |
1114 | |
1115 | #ifndef FEATURE_PAL |
1116 | // |
1117 | // At this point (the end of the 1st pass) we don't know where |
1118 | // we are going to resume to. So, we pass in an address, which |
1119 | // lies in NULL pointer partition of the memory, as the target IP. |
1120 | // |
1121 | // Once we reach the target frame in the second pass unwind, we call |
1122 | // the catch funclet that caused us to resume execution and it |
1123 | // tells us where we are resuming to. At that point, we patch |
1124 | // the context record with the resume IP and RtlUnwind2 finishes |
1125 | // by restoring our context at the right spot. |
1126 | // |
1127 | // If we are unable to set the resume PC for some reason, then |
1128 | // the OS will try to resume at the NULL partition address and the |
1129 | // attempt will fail due to AV, resulting in failfast, helping us |
1130 | // isolate problems in patching the IP. |
1131 | |
1132 | ClrUnwindEx(pExceptionRecord, |
1133 | (UINT_PTR)pThread, |
1134 | INVALID_RESUME_ADDRESS, |
1135 | pDispatcherContext->EstablisherFrame); |
1136 | |
1137 | UNREACHABLE(); |
1138 | // |
1139 | // doesn't return |
1140 | // |
1141 | #else |
1142 | // On Unix, we will return ExceptionStackUnwind back to the custom |
1143 | // exception dispatch system. When it sees this disposition, it will |
1144 | // know that we want to handle the exception and will commence unwind |
1145 | // via the custom unwinder. |
1146 | return ExceptionStackUnwind; |
1147 | |
1148 | #endif // FEATURE_PAL |
1149 | } |
1150 | else if (SecondPassComplete == status) |
1151 | { |
1152 | bool fAborting = false; |
1153 | UINT_PTR uResumePC = (UINT_PTR)-1; |
1154 | UINT_PTR uOriginalSP = GetSP(pContextRecord); |
1155 | |
1156 | Frame* pLimitFrame = pTracker->GetLimitFrame(); |
1157 | |
1158 | pDispatcherContext->ContextRecord = pContextRecord; |
1159 | |
1160 | // We may be in COOP mode at this point - the indefinite switch was done |
1161 | // in ExceptionTracker::ProcessManagedCallFrame. |
1162 | // |
1163 | // However, if a finally was invoked non-exceptionally and raised an exception |
1164 | // that was caught in its parent method, unwind will result in invoking any applicable termination |
1165 | // handlers in the finally funclet and thus, also switching the mode to COOP indefinitely. |
1166 | // |
1167 | // Since the catch block to be executed will lie in the parent method, |
1168 | // we will skip frames till we reach the parent and in the process, switch back to PREEMP mode |
1169 | // as control goes back to the OS. |
1170 | // |
1171 | // Upon reaching the target of unwind, we wont call ExceptionTracker::ProcessManagedCallFrame (since any |
1172 | // handlers in finally or surrounding it will be invoked when we unwind finally funclet). Thus, |
1173 | // we may not be in COOP mode. |
1174 | // |
1175 | // Since CallCatchHandler expects to be in COOP mode, perform the switch here. |
1176 | GCX_COOP_NO_DTOR(); |
1177 | uResumePC = pTracker->CallCatchHandler(pContextRecord, &fAborting); |
1178 | |
1179 | { |
1180 | // |
1181 | // GC must NOT occur after the handler has returned until |
1182 | // we resume at the new address because the stackwalker |
1183 | // EnumGcRefs would try and report things as live from the |
1184 | // try body, that were probably reported dead from the |
1185 | // handler body. |
1186 | // |
1187 | // GC must NOT occur once the frames have been popped because |
1188 | // the values in the unwound CONTEXT are not GC-protected. |
1189 | // |
1190 | GCX_FORBID(); |
1191 | |
1192 | CONSISTENCY_CHECK((UINT_PTR)-1 != uResumePC); |
1193 | |
1194 | // Ensure we are not resuming to the invalid target IP we had set at the end of |
1195 | // first pass |
1196 | _ASSERTE_MSG(INVALID_RESUME_ADDRESS != uResumePC, "CallCatchHandler returned invalid resume PC!" ); |
1197 | |
1198 | // |
1199 | // CallCatchHandler freed the tracker. |
1200 | // |
1201 | INDEBUG(pTracker = (ExceptionTracker*)POISONC); |
1202 | |
1203 | // Note that we should only fail to fix up for SO. |
1204 | bool fFixedUp = FixNonvolatileRegisters(uOriginalSP, pThread, pContextRecord, fAborting); |
1205 | _ASSERTE(fFixedUp || (pExceptionRecord->ExceptionCode == STATUS_STACK_OVERFLOW)); |
1206 | |
1207 | |
1208 | CONSISTENCY_CHECK(pLimitFrame > dac_cast<PTR_VOID>(GetSP(pContextRecord))); |
1209 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1210 | if (pICFSetAsLimitFrame != NULL) |
1211 | { |
1212 | _ASSERTE(pICFSetAsLimitFrame == pLimitFrame); |
1213 | |
1214 | // Mark the ICF as inactive (by setting the return address as NULL). |
1215 | // It will be marked as active at the next PInvoke callsite. |
1216 | // |
1217 | // This ensures that any stackwalk post the catch handler but before |
1218 | // the next pinvoke callsite does not see the frame as active. |
1219 | pICFSetAsLimitFrame->Reset(); |
1220 | } |
1221 | #endif // USE_PER_FRAME_PINVOKE_INIT |
1222 | |
1223 | pThread->SetFrame(pLimitFrame); |
1224 | |
1225 | FixContext(pContextRecord); |
1226 | |
1227 | SetIP(pContextRecord, (PCODE)uResumePC); |
1228 | } |
1229 | |
1230 | #ifdef STACK_GUARDS_DEBUG |
1231 | // We are transitioning back to managed code, so ensure that we are in |
1232 | // SO-tolerant mode before we do so. |
1233 | RestoreSOToleranceState(); |
1234 | #endif |
1235 | RESET_CONTRACT_VIOLATION(); |
1236 | ExceptionTracker::ResumeExecution(pContextRecord, |
1237 | NULL |
1238 | ); |
1239 | UNREACHABLE(); |
1240 | } |
1241 | } |
1242 | |
1243 | lExit: ; |
1244 | |
1245 | EH_LOG((LL_INFO100, "returning %s\n" , DebugGetExceptionDispositionName(returnDisposition))); |
1246 | CONSISTENCY_CHECK( !((dwExceptionFlags & EXCEPTION_TARGET_UNWIND) && (ExceptionContinueSearch == returnDisposition))); |
1247 | |
1248 | if ((ExceptionContinueSearch == returnDisposition)) |
1249 | { |
1250 | GCX_PREEMP_NO_DTOR(); |
1251 | } |
1252 | |
1253 | END_CONTRACT_VIOLATION; |
1254 | |
1255 | SetLastError(dwLastError); |
1256 | |
1257 | return returnDisposition; |
1258 | } |
1259 | |
1260 | // When we hit a native exception such as an AV in managed code, we put up a FaultingExceptionFrame which saves all the |
1261 | // non-volatile registers. The GC may update these registers if they contain object references. However, the CONTEXT |
1262 | // with which we are going to resume execution doesn't have these updated values. Thus, we need to fix up the non-volatile |
1263 | // registers in the CONTEXT with the updated ones stored in the FaultingExceptionFrame. To do so properly, we need |
1264 | // to perform a full stackwalk. |
1265 | bool FixNonvolatileRegisters(UINT_PTR uOriginalSP, |
1266 | Thread* pThread, |
1267 | CONTEXT* pContextRecord, |
1268 | bool fAborting |
1269 | ) |
1270 | { |
1271 | CONTRACTL |
1272 | { |
1273 | MODE_COOPERATIVE; |
1274 | NOTHROW; |
1275 | GC_NOTRIGGER; |
1276 | SO_TOLERANT; |
1277 | } |
1278 | CONTRACTL_END; |
1279 | |
1280 | CONTEXT _ctx = {0}; |
1281 | |
1282 | // Ctor will initialize it to NULL |
1283 | REGDISPLAY regdisp; |
1284 | |
1285 | pThread->FillRegDisplay(®disp, &_ctx); |
1286 | |
1287 | bool fFound = ExceptionTracker::FindNonvolatileRegisterPointers(pThread, uOriginalSP, ®disp, GetFP(pContextRecord)); |
1288 | if (!fFound) |
1289 | { |
1290 | return false; |
1291 | } |
1292 | |
1293 | { |
1294 | // |
1295 | // GC must NOT occur once the frames have been popped because |
1296 | // the values in the unwound CONTEXT are not GC-protected. |
1297 | // |
1298 | GCX_FORBID(); |
1299 | |
1300 | ExceptionTracker::UpdateNonvolatileRegisters(pContextRecord, ®disp, fAborting); |
1301 | } |
1302 | |
1303 | return true; |
1304 | } |
1305 | |
1306 | |
1307 | |
1308 | |
1309 | // static |
1310 | void ExceptionTracker::InitializeCrawlFrameForExplicitFrame(CrawlFrame* pcfThisFrame, Frame* pFrame, MethodDesc *pMD) |
1311 | { |
1312 | CONTRACTL |
1313 | { |
1314 | MODE_ANY; |
1315 | NOTHROW; |
1316 | GC_NOTRIGGER; |
1317 | |
1318 | PRECONDITION(pFrame != FRAME_TOP); |
1319 | } |
1320 | CONTRACTL_END; |
1321 | |
1322 | INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame))); |
1323 | |
1324 | pcfThisFrame->isFrameless = false; |
1325 | pcfThisFrame->pFrame = pFrame; |
1326 | pcfThisFrame->pFunc = pFrame->GetFunction(); |
1327 | |
1328 | if (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr() && |
1329 | !InlinedCallFrame::FrameHasActiveCall(pFrame)) |
1330 | { |
1331 | // Inactive ICFs in IL stubs contain the true interop MethodDesc which must be |
1332 | // reported in the stack trace. |
1333 | if (pMD->IsILStub() && pMD->AsDynamicMethodDesc()->HasMDContextArg()) |
1334 | { |
1335 | // Report interop MethodDesc |
1336 | pcfThisFrame->pFunc = ((InlinedCallFrame *)pFrame)->GetActualInteropMethodDesc(); |
1337 | _ASSERTE(pcfThisFrame->pFunc != NULL); |
1338 | _ASSERTE(pcfThisFrame->pFunc->SanityCheck()); |
1339 | } |
1340 | } |
1341 | |
1342 | pcfThisFrame->pFirstGSCookie = NULL; |
1343 | pcfThisFrame->pCurGSCookie = NULL; |
1344 | } |
1345 | |
1346 | // This method will initialize the RegDisplay in the CrawlFrame with the correct state for current and caller context |
1347 | // See the long description of contexts and their validity in ExceptionTracker::InitializeCrawlFrame for details. |
1348 | void ExceptionTracker::InitializeCurrentContextForCrawlFrame(CrawlFrame* pcfThisFrame, PT_DISPATCHER_CONTEXT pDispatcherContext, StackFrame sfEstablisherFrame) |
1349 | { |
1350 | CONTRACTL |
1351 | { |
1352 | MODE_ANY; |
1353 | NOTHROW; |
1354 | GC_NOTRIGGER; |
1355 | PRECONDITION(IsInFirstPass()); |
1356 | } |
1357 | CONTRACTL_END; |
1358 | |
1359 | if (IsInFirstPass()) |
1360 | { |
1361 | REGDISPLAY *pRD = pcfThisFrame->pRD; |
1362 | |
1363 | #ifndef USE_CURRENT_CONTEXT_IN_FILTER |
1364 | INDEBUG(memset(pRD->pCurrentContext, 0xCC, sizeof(*(pRD->pCurrentContext)))); |
1365 | // Ensure that clients can tell the current context isn't valid. |
1366 | SetIP(pRD->pCurrentContext, 0); |
1367 | #else // !USE_CURRENT_CONTEXT_IN_FILTER |
1368 | RestoreNonvolatileRegisters(pRD->pCurrentContext, pDispatcherContext->CurrentNonVolatileContextRecord); |
1369 | RestoreNonvolatileRegisterPointers(pRD->pCurrentContextPointers, pDispatcherContext->CurrentNonVolatileContextRecord); |
1370 | #endif // USE_CURRENT_CONTEXT_IN_FILTER |
1371 | |
1372 | *(pRD->pCallerContext) = *(pDispatcherContext->ContextRecord); |
1373 | pRD->IsCallerContextValid = TRUE; |
1374 | |
1375 | pRD->SP = sfEstablisherFrame.SP; |
1376 | pRD->ControlPC = pDispatcherContext->ControlPc; |
1377 | |
1378 | #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1379 | pcfThisFrame->pRD->IsCallerSPValid = TRUE; |
1380 | |
1381 | // Assert our first pass assumptions for the Arm/Arm64 |
1382 | _ASSERTE(sfEstablisherFrame.SP == GetSP(pDispatcherContext->ContextRecord)); |
1383 | #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1384 | |
1385 | } |
1386 | |
1387 | EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCurrentContextForCrawlFrame: DispatcherContext->ControlPC = %p; IP in DispatcherContext->ContextRecord = %p.\n" , |
1388 | pDispatcherContext->ControlPc, GetIP(pDispatcherContext->ContextRecord))); |
1389 | } |
1390 | |
1391 | // static |
1392 | void ExceptionTracker::InitializeCrawlFrame(CrawlFrame* pcfThisFrame, Thread* pThread, StackFrame sf, REGDISPLAY* pRD, |
1393 | PDISPATCHER_CONTEXT pDispatcherContext, DWORD_PTR ControlPCForEHSearch, |
1394 | UINT_PTR* puMethodStartPC, |
1395 | ExceptionTracker *pCurrentTracker) |
1396 | { |
1397 | CONTRACTL |
1398 | { |
1399 | MODE_ANY; |
1400 | NOTHROW; |
1401 | GC_NOTRIGGER; |
1402 | } |
1403 | CONTRACTL_END; |
1404 | |
1405 | INDEBUG(memset(pcfThisFrame, 0xCC, sizeof(*pcfThisFrame))); |
1406 | pcfThisFrame->pRD = pRD; |
1407 | |
1408 | #ifdef FEATURE_INTERPRETER |
1409 | pcfThisFrame->pFrame = NULL; |
1410 | #endif // FEATURE_INTERPRETER |
1411 | |
1412 | // Initialize the RegDisplay from DC->ContextRecord. DC->ControlPC always contains the IP |
1413 | // in the frame for which the personality routine was invoked. |
1414 | // |
1415 | // <AMD64> |
1416 | // |
1417 | // During 1st pass, DC->ContextRecord contains the context of the caller of the frame for which personality |
1418 | // routine was invoked. On the other hand, in the 2nd pass, it contains the context of the frame for which |
1419 | // personality routine was invoked. |
1420 | // |
1421 | // </AMD64> |
1422 | // |
1423 | // <ARM and ARM64> |
1424 | // |
1425 | // In the first pass on ARM & ARM64: |
1426 | // |
1427 | // 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time |
1428 | // the current managed method was invoked and thus, is the SP of the caller. This is |
1429 | // the value of DispatcherContext->EstablisherFrame as well. |
1430 | // 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality |
1431 | // routine has been invoked. |
1432 | // 3) DispatcherContext->ContextRecord contains the context record of the caller (and thus, IP |
1433 | // in the caller). Most of the times, these values will be distinct. However, recursion |
1434 | // may result in them being the same (case "run2" of baseservices\Regression\V1\Threads\functional\CS_TryFinally.exe |
1435 | // is an example). In such a case, we ensure that EstablisherFrame value is the same as |
1436 | // the SP in DispatcherContext->ContextRecord (which is (1) above). |
1437 | // |
1438 | // In second pass on ARM & ARM64: |
1439 | // |
1440 | // 1) EstablisherFrame (passed as 'sf' to this method) represents the SP at the time |
1441 | // the current managed method was invoked and thus, is the SP of the caller. This is |
1442 | // the value of DispatcherContext->EstablisherFrame as well. |
1443 | // 2) DispatcherContext->ControlPC is the pc in the current managed method for which personality |
1444 | // routine has been invoked. |
1445 | // 3) DispatcherContext->ContextRecord contains the context record of the current managed method |
1446 | // for which the personality routine is invoked. |
1447 | // |
1448 | // </ARM and ARM64> |
1449 | pThread->InitRegDisplay(pcfThisFrame->pRD, pDispatcherContext->ContextRecord, true); |
1450 | |
1451 | bool fAdjustRegdisplayControlPC = false; |
1452 | |
1453 | // The "if" check below is trying to determine when we have a valid current context in DC->ContextRecord and whether, or not, |
1454 | // RegDisplay needs to be fixed up to set SP and ControlPC to have the values for the current frame for which personality routine |
1455 | // is invoked. |
1456 | // |
1457 | // We do this based upon the current pass for the exception tracker as this will also handle the case when current frame |
1458 | // and its caller have the same return address (i.e. ControlPc). This can happen in cases when, due to certain JIT optimizations, the following callstack |
1459 | // |
1460 | // A -> B -> A -> C |
1461 | // |
1462 | // Could get transformed to the one below when B is inlined in the first (left-most) A resulting in: |
1463 | // |
1464 | // A -> A -> C |
1465 | // |
1466 | // In this case, during 1st pass, when personality routine is invoked for the second A, DC->ControlPc could have the same |
1467 | // value as DC->ContextRecord->Rip even though the DC->ContextRecord actually represents caller context (of first A). |
1468 | // As a result, we will not initialize the value of SP and controlPC in RegDisplay for the current frame (frame for |
1469 | // which personality routine was invoked - second A in the optimized scenario above) resulting in frame specific lookup (e.g. |
1470 | // GenericArgType) to happen incorrectly (against first A). |
1471 | // |
1472 | // Thus, we should always use the pass identification in ExceptionTracker to determine when we need to perform the fixup below. |
1473 | if (pCurrentTracker->IsInFirstPass()) |
1474 | { |
1475 | pCurrentTracker->InitializeCurrentContextForCrawlFrame(pcfThisFrame, pDispatcherContext, sf); |
1476 | } |
1477 | else |
1478 | { |
1479 | #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) |
1480 | // See the comment above the call to InitRegDisplay for this assertion. |
1481 | _ASSERTE(pDispatcherContext->ControlPc == GetIP(pDispatcherContext->ContextRecord)); |
1482 | #endif // _TARGET_ARM_ || _TARGET_ARM64_ |
1483 | |
1484 | #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1485 | // Simply setup the callerSP during the second pass in the caller context. |
1486 | // This is used in setting up the "EnclosingClauseCallerSP" in ExceptionTracker::ProcessManagedCallFrame |
1487 | // when the termination handlers are invoked. |
1488 | ::SetSP(pcfThisFrame->pRD->pCallerContext, sf.SP); |
1489 | pcfThisFrame->pRD->IsCallerSPValid = TRUE; |
1490 | #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1491 | } |
1492 | |
1493 | #ifdef ADJUST_PC_UNWOUND_TO_CALL |
1494 | // Further below, we will adjust the ControlPC based upon whether we are at a callsite or not. |
1495 | // We need to do this for "RegDisplay.ControlPC" field as well so that when data structures like |
1496 | // EECodeInfo initialize themselves using this field, they will have the correct absolute value |
1497 | // that is in sync with the "relOffset" we calculate below. |
1498 | // |
1499 | // However, we do this *only* when "ControlPCForEHSearch" is the same as "DispatcherContext->ControlPC", |
1500 | // indicating we are not using the thread-abort reraise loop prevention logic. |
1501 | // |
1502 | if (pDispatcherContext->ControlPc == ControlPCForEHSearch) |
1503 | { |
1504 | // Since DispatcherContext->ControlPc is used to initialize the |
1505 | // RegDisplay.ControlPC field, assert that it is the same |
1506 | // as the ControlPC we are going to use to initialize the CrawlFrame |
1507 | // with as well. |
1508 | _ASSERTE(pcfThisFrame->pRD->ControlPC == ControlPCForEHSearch); |
1509 | fAdjustRegdisplayControlPC = true; |
1510 | |
1511 | } |
1512 | #endif // ADJUST_PC_UNWOUND_TO_CALL |
1513 | |
1514 | #if defined(_TARGET_ARM_) |
1515 | // Remove the Thumb bit |
1516 | ControlPCForEHSearch = ThumbCodeToDataPointer<DWORD_PTR, DWORD_PTR>(ControlPCForEHSearch); |
1517 | #endif |
1518 | |
1519 | #ifdef ADJUST_PC_UNWOUND_TO_CALL |
1520 | // If the OS indicated that the IP is a callsite, then adjust the ControlPC by decrementing it |
1521 | // by two. This is done because unwinding at callsite will make ControlPC point to the |
1522 | // instruction post the callsite. If a protected region ends "at" the callsite, then |
1523 | // not doing this adjustment will result in a one-off error that can result in us not finding |
1524 | // a handler. |
1525 | // |
1526 | // For async exceptions (e.g. AV), this will be false. |
1527 | // |
1528 | // We decrement by two to be in accordance with how the kernel does as well. |
1529 | if (pDispatcherContext->ControlPcIsUnwound) |
1530 | { |
1531 | ControlPCForEHSearch -= STACKWALK_CONTROLPC_ADJUST_OFFSET; |
1532 | if (fAdjustRegdisplayControlPC == true) |
1533 | { |
1534 | // Once the check above is removed, the assignment below should |
1535 | // be done unconditionally. |
1536 | pcfThisFrame->pRD->ControlPC = ControlPCForEHSearch; |
1537 | // On ARM & ARM64, the IP is either at the callsite (post the adjustment above) |
1538 | // or at the instruction at which async exception took place. |
1539 | pcfThisFrame->isIPadjusted = true; |
1540 | } |
1541 | } |
1542 | #endif // ADJUST_PC_UNWOUND_TO_CALL |
1543 | |
1544 | pcfThisFrame->codeInfo.Init(ControlPCForEHSearch); |
1545 | |
1546 | if (pcfThisFrame->codeInfo.IsValid()) |
1547 | { |
1548 | pcfThisFrame->isFrameless = true; |
1549 | pcfThisFrame->pFunc = pcfThisFrame->codeInfo.GetMethodDesc(); |
1550 | |
1551 | *puMethodStartPC = pcfThisFrame->codeInfo.GetStartAddress(); |
1552 | } |
1553 | else |
1554 | { |
1555 | pcfThisFrame->isFrameless = false; |
1556 | pcfThisFrame->pFunc = NULL; |
1557 | |
1558 | *puMethodStartPC = NULL; |
1559 | } |
1560 | |
1561 | pcfThisFrame->pThread = pThread; |
1562 | pcfThisFrame->hasFaulted = false; |
1563 | |
1564 | Frame* pTopFrame = pThread->GetFrame(); |
1565 | pcfThisFrame->isIPadjusted = (FRAME_TOP != pTopFrame) && (pTopFrame->GetVTablePtr() != FaultingExceptionFrame::GetMethodFrameVPtr()); |
1566 | if (pcfThisFrame->isFrameless && (pcfThisFrame->isIPadjusted == false) && (pcfThisFrame->GetRelOffset() == 0)) |
1567 | { |
1568 | // If we are here, then either a hardware generated exception happened at the first instruction |
1569 | // of a managed method an exception was thrown at that location. |
1570 | // |
1571 | // Adjusting IP in such a case will lead us into unknown code - it could be native code or some |
1572 | // other JITted code. |
1573 | // |
1574 | // Hence, we will flag that the IP is already adjusted. |
1575 | pcfThisFrame->isIPadjusted = true; |
1576 | |
1577 | EH_LOG((LL_INFO100, "ExceptionTracker::InitializeCrawlFrame: Exception at offset zero of the method (MethodDesc %p); setting IP as adjusted.\n" , |
1578 | pcfThisFrame->pFunc)); |
1579 | } |
1580 | |
1581 | pcfThisFrame->pFirstGSCookie = NULL; |
1582 | pcfThisFrame->pCurGSCookie = NULL; |
1583 | |
1584 | pcfThisFrame->isFilterFuncletCached = FALSE; |
1585 | } |
1586 | |
1587 | bool ExceptionTracker::UpdateScannedStackRange(StackFrame sf, bool fIsFirstPass) |
1588 | { |
1589 | CONTRACTL |
1590 | { |
1591 | // Since this function will modify the scanned stack range, which is also accessed during the GC stackwalk, |
1592 | // we invoke it in COOP mode so that that access to the range is synchronized. |
1593 | MODE_COOPERATIVE; |
1594 | GC_TRIGGERS; |
1595 | THROWS; |
1596 | } |
1597 | CONTRACTL_END; |
1598 | |
1599 | // |
1600 | // collapse trackers if a nested exception passes a previous exception |
1601 | // |
1602 | |
1603 | HandleNestedExceptionEscape(sf, fIsFirstPass); |
1604 | |
1605 | // |
1606 | // update stack bounds |
1607 | // |
1608 | BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame(); |
1609 | |
1610 | if (m_ScannedStackRange.Contains(sf)) |
1611 | { |
1612 | // If we're unwinding to find the resume frame and we're examining the topmost previously scanned frame, |
1613 | // then we can't ignore it because we could resume here due to an escaped nested exception. |
1614 | if (!fUnwindingToFindResumeFrame || (m_ScannedStackRange.GetUpperBound() != sf)) |
1615 | { |
1616 | // been there, done that. |
1617 | EH_LOG((LL_INFO100, " IGNOREFRAME: This frame has been processed already\n" )); |
1618 | return false; |
1619 | } |
1620 | } |
1621 | else |
1622 | { |
1623 | if (sf < m_ScannedStackRange.GetLowerBound()) |
1624 | { |
1625 | m_ScannedStackRange.ExtendLowerBound(sf); |
1626 | } |
1627 | |
1628 | if (sf > m_ScannedStackRange.GetUpperBound()) |
1629 | { |
1630 | m_ScannedStackRange.ExtendUpperBound(sf); |
1631 | } |
1632 | |
1633 | DebugLogTrackerRanges(" C" ); |
1634 | } |
1635 | |
1636 | return true; |
1637 | } |
1638 | |
1639 | void CheckForRudeAbort(Thread* pThread, bool fIsFirstPass) |
1640 | { |
1641 | if (fIsFirstPass && pThread->IsRudeAbort()) |
1642 | { |
1643 | GCX_COOP(); |
1644 | OBJECTREF rudeAbortThrowable = CLRException::GetPreallocatedRudeThreadAbortException(); |
1645 | if (pThread->GetThrowable() != rudeAbortThrowable) |
1646 | { |
1647 | pThread->SafeSetThrowables(rudeAbortThrowable); |
1648 | } |
1649 | |
1650 | if (!pThread->IsRudeAbortInitiated()) |
1651 | { |
1652 | pThread->PreWorkForThreadAbort(); |
1653 | } |
1654 | } |
1655 | } |
1656 | |
1657 | void ExceptionTracker::FirstPassIsComplete() |
1658 | { |
1659 | m_ExceptionFlags.ResetUnwindingToFindResumeFrame(); |
1660 | m_pSkipToParentFunctionMD = NULL; |
1661 | } |
1662 | |
1663 | void ExceptionTracker::SecondPassIsComplete(MethodDesc* pMD, StackFrame sfResumeStackFrame) |
1664 | { |
1665 | EH_LOG((LL_INFO100, " second pass unwind completed\n" )); |
1666 | |
1667 | m_pMethodDescOfCatcher = pMD; |
1668 | m_sfResumeStackFrame = sfResumeStackFrame; |
1669 | } |
1670 | |
1671 | CLRUnwindStatus ExceptionTracker::ProcessOSExceptionNotification( |
1672 | PEXCEPTION_RECORD pExceptionRecord, |
1673 | PCONTEXT pContextRecord, |
1674 | PDISPATCHER_CONTEXT pDispatcherContext, |
1675 | DWORD dwExceptionFlags, |
1676 | StackFrame sf, |
1677 | Thread* pThread, |
1678 | StackTraceState STState |
1679 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1680 | , PVOID pICFSetAsLimitFrame |
1681 | #endif // USE_PER_FRAME_PINVOKE_INIT |
1682 | ) |
1683 | { |
1684 | CONTRACTL |
1685 | { |
1686 | MODE_ANY; |
1687 | GC_TRIGGERS; |
1688 | THROWS; |
1689 | } |
1690 | CONTRACTL_END; |
1691 | |
1692 | CLRUnwindStatus status = UnwindPending; |
1693 | |
1694 | CrawlFrame cfThisFrame; |
1695 | REGDISPLAY regdisp; |
1696 | UINT_PTR uMethodStartPC; |
1697 | UINT_PTR uCallerSP; |
1698 | |
1699 | DWORD_PTR ControlPc = pDispatcherContext->ControlPc; |
1700 | |
1701 | ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, ®disp, pDispatcherContext, ControlPc, &uMethodStartPC, this); |
1702 | |
1703 | #ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1704 | uCallerSP = EECodeManager::GetCallerSp(cfThisFrame.pRD); |
1705 | #else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1706 | uCallerSP = sf.SP; |
1707 | #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
1708 | |
1709 | EH_LOG((LL_INFO100, "ProcessCrawlFrame: PSP: " FMT_ADDR " EstablisherFrame: " FMT_ADDR "\n" , DBG_ADDR(uCallerSP), DBG_ADDR(sf.SP))); |
1710 | |
1711 | bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING); |
1712 | bool fTargetUnwind = !!(dwExceptionFlags & EXCEPTION_TARGET_UNWIND); |
1713 | |
1714 | // If a thread abort was raised after a catch block's execution, we would have saved |
1715 | // the index and EstablisherFrame of the EH clause corresponding to the handler that executed. |
1716 | // Fetch that locally and reset the state against the thread if we are in the unwind pass. |
1717 | // |
1718 | // It should be kept in mind that by the virtue of copying the information below, we will |
1719 | // have it available for the first frame seen during the unwind pass (which will be the |
1720 | // frame where ThreadAbort was raised after the catch block) for us to skip any termination |
1721 | // handlers that may be present prior to the EH clause whose index we saved. |
1722 | DWORD dwTACatchHandlerClauseIndex = pThread->m_dwIndexClauseForCatch; |
1723 | StackFrame sfEstablisherOfActualHandlerFrame = pThread->m_sfEstablisherOfActualHandlerFrame; |
1724 | if (!fIsFirstPass) |
1725 | { |
1726 | pThread->m_dwIndexClauseForCatch = 0; |
1727 | pThread->m_sfEstablisherOfActualHandlerFrame.Clear(); |
1728 | } |
1729 | |
1730 | bool fProcessThisFrame = false; |
1731 | bool fCrawlFrameIsDirty = false; |
1732 | |
1733 | // <GC_FUNCLET_REFERENCE_REPORTING> |
1734 | // |
1735 | // Refer to the detailed comment in ExceptionTracker::ProcessManagedCallFrame for more context. |
1736 | // In summary, if we have reached the target of the unwind, then we need to fix CallerSP (for |
1737 | // GC reference reporting) if we have been asked to. |
1738 | // |
1739 | // This will be done only when we reach the frame that is handling the exception. |
1740 | // |
1741 | // </GC_FUNCLET_REFERENCE_REPORTING> |
1742 | if (fTargetUnwind && (m_fFixupCallerSPForGCReporting == true)) |
1743 | { |
1744 | m_fFixupCallerSPForGCReporting = false; |
1745 | this->m_EnclosingClauseInfoForGCReporting.SetEnclosingClauseCallerSP(uCallerSP); |
1746 | } |
1747 | |
1748 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1749 | // Refer to detailed comment below. |
1750 | PTR_Frame pICFForUnwindTarget = NULL; |
1751 | #endif // USE_PER_FRAME_PINVOKE_INIT |
1752 | |
1753 | CheckForRudeAbort(pThread, fIsFirstPass); |
1754 | |
1755 | bool fIsFrameLess = cfThisFrame.IsFrameless(); |
1756 | GSCookie* pGSCookie = NULL; |
1757 | bool fSetLastUnwoundEstablisherFrame = false; |
1758 | |
1759 | // |
1760 | // process any frame since the last frame we've seen |
1761 | // |
1762 | { |
1763 | GCX_COOP_THREAD_EXISTS(pThread); |
1764 | |
1765 | // UpdateScannedStackRange needs to be invoked in COOP mode since |
1766 | // the stack range can also be accessed during GC stackwalk. |
1767 | fProcessThisFrame = UpdateScannedStackRange(sf, fIsFirstPass); |
1768 | |
1769 | MethodDesc *pMD = cfThisFrame.GetFunction(); |
1770 | |
1771 | Frame* pFrame = GetLimitFrame(); // next frame to process |
1772 | if (pFrame != FRAME_TOP) |
1773 | { |
1774 | // The following function call sets the GS cookie pointers and checks the cookie. |
1775 | cfThisFrame.SetCurGSCookie(Frame::SafeGetGSCookiePtr(pFrame)); |
1776 | } |
1777 | |
1778 | while (((UINT_PTR)pFrame) < uCallerSP) |
1779 | { |
1780 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1781 | // InlinedCallFrames (ICF) are allocated, initialized and linked to the Frame chain |
1782 | // by the code generated by the JIT for a method containing a PInvoke. |
1783 | // |
1784 | // On X64, JIT generates code to dynamically link and unlink the ICF around |
1785 | // each PInvoke call. On ARM, on the other hand, JIT's codegen, in context of ICF, |
1786 | // is more inline with X86 and thus, it links in the ICF at the start of the method |
1787 | // and unlinks it towards the method end. Thus, ICF is present on the Frame chain |
1788 | // at any given point so long as the method containing the PInvoke is on the stack. |
1789 | // |
1790 | // Now, if the method containing ICF catches an exception, we will reset the Frame chain |
1791 | // with the LimitFrame, that is computed below, after the catch handler returns. Since this |
1792 | // computation is done relative to the CallerSP (on both X64 and ARM), we will end up |
1793 | // removing the ICF from the Frame chain as that will always be below (stack growing down) |
1794 | // the CallerSP since it lives in the stack space of the current managed frame. |
1795 | // |
1796 | // As a result, if there is another PInvoke call after the catch block, it will expect |
1797 | // the ICF to be present and without one, execution will go south. |
1798 | // |
1799 | // To account for this ICF codegen difference, in the EH system we check if the current |
1800 | // Frame is an ICF or not. If it is and lies inside the current managed method, we |
1801 | // keep a reference to it and reset the LimitFrame to this saved reference before we |
1802 | // return back to invoke the catch handler. |
1803 | // |
1804 | // Thus, if there is another PInvoke call post the catch handler, it will find ICF as expected. |
1805 | // |
1806 | // This is based upon the following assumptions: |
1807 | // |
1808 | // 1) There will be no other explicit Frame inserted above the ICF inside the |
1809 | // managed method containing ICF. That is, ICF is the top-most explicit frame |
1810 | // in the managed method (and thus, lies in the current managed frame). |
1811 | // |
1812 | // 2) There is only one ICF per managed method containing one (or more) PInvoke(s). |
1813 | // |
1814 | // 3) We only do this if the current frame is the one handling the exception. This is to |
1815 | // address the scenario of keeping any ICF from frames lower in the stack active. |
1816 | // |
1817 | // 4) The ExceptionUnwind method of the ICF is a no-op. As noted above, we save a reference |
1818 | // to the ICF and yet continue to process the frame chain. During unwind, this implies |
1819 | // that we will end up invoking the ExceptionUnwind methods of all frames that lie |
1820 | // below the caller SP of the managed frame handling the exception. And since the handling |
1821 | // managed frame contains an ICF, it will be the topmost frame that will lie |
1822 | // below the callerSP for which we will invoke ExceptionUnwind. |
1823 | // |
1824 | // Thus, ICF::ExceptionUnwind should not do anything significant. If any of these assumptions |
1825 | // break, then the next best thing will be to make the JIT link/unlink the frame dynamically. |
1826 | |
1827 | if (fTargetUnwind && (pFrame->GetVTablePtr() == InlinedCallFrame::GetMethodFrameVPtr())) |
1828 | { |
1829 | PTR_InlinedCallFrame pICF = (PTR_InlinedCallFrame)pFrame; |
1830 | // Does it live inside the current managed method? It will iff: |
1831 | // |
1832 | // 1) ICF address is higher than the current frame's SP (which we get from DispatcherContext), AND |
1833 | // 2) ICF address is below callerSP. |
1834 | if ((GetSP(pDispatcherContext->ContextRecord) < (TADDR)pICF) && |
1835 | ((UINT_PTR)pICF < uCallerSP)) |
1836 | { |
1837 | pICFForUnwindTarget = pFrame; |
1838 | } |
1839 | } |
1840 | #endif // defined(_TARGET_ARM_) |
1841 | |
1842 | cfThisFrame.CheckGSCookies(); |
1843 | |
1844 | if (fProcessThisFrame) |
1845 | { |
1846 | ExceptionTracker::InitializeCrawlFrameForExplicitFrame(&cfThisFrame, pFrame, pMD); |
1847 | fCrawlFrameIsDirty = true; |
1848 | |
1849 | status = ProcessExplicitFrame( |
1850 | &cfThisFrame, |
1851 | sf, |
1852 | fIsFirstPass, |
1853 | STState); |
1854 | cfThisFrame.CheckGSCookies(); |
1855 | } |
1856 | |
1857 | if (!fIsFirstPass) |
1858 | { |
1859 | // |
1860 | // notify Frame of unwind |
1861 | // |
1862 | pFrame->ExceptionUnwind(); |
1863 | |
1864 | // If we have not yet set the initial explicit frame processed by this tracker, then |
1865 | // set it now. |
1866 | if (m_pInitialExplicitFrame == NULL) |
1867 | { |
1868 | m_pInitialExplicitFrame = pFrame; |
1869 | } |
1870 | } |
1871 | |
1872 | pFrame = pFrame->Next(); |
1873 | m_pLimitFrame = pFrame; |
1874 | |
1875 | if (UnwindPending != status) |
1876 | { |
1877 | goto lExit; |
1878 | } |
1879 | } |
1880 | |
1881 | if (fCrawlFrameIsDirty) |
1882 | { |
1883 | // If crawlframe is dirty, it implies that it got modified as part of explicit frame processing. Thus, we shall |
1884 | // reinitialize it here. |
1885 | ExceptionTracker::InitializeCrawlFrame(&cfThisFrame, pThread, sf, ®disp, pDispatcherContext, ControlPc, &uMethodStartPC, this); |
1886 | } |
1887 | |
1888 | if (fIsFrameLess) |
1889 | { |
1890 | pGSCookie = (GSCookie*)cfThisFrame.GetCodeManager()->GetGSCookieAddr(cfThisFrame.pRD, |
1891 | &cfThisFrame.codeInfo, |
1892 | &cfThisFrame.codeManState); |
1893 | if (pGSCookie) |
1894 | { |
1895 | // The following function call sets the GS cookie pointers and checks the cookie. |
1896 | cfThisFrame.SetCurGSCookie(pGSCookie); |
1897 | } |
1898 | |
1899 | status = HandleFunclets(&fProcessThisFrame, fIsFirstPass, |
1900 | cfThisFrame.GetFunction(), cfThisFrame.IsFunclet(), sf); |
1901 | } |
1902 | |
1903 | if ((!fIsFirstPass) && (!fProcessThisFrame)) |
1904 | { |
1905 | // If we are unwinding and not processing the current frame, it implies that |
1906 | // this frame has been unwound for one of the following reasons: |
1907 | // |
1908 | // 1) We have already seen it due to nested exception processing, OR |
1909 | // 2) We are skipping frames to find a funclet's parent and thus, its been already |
1910 | // unwound. |
1911 | // |
1912 | // If the current frame is NOT the target of unwind, update the last unwound |
1913 | // establisher frame. We don't do this for "target of unwind" since it has the catch handler, for a |
1914 | // duplicate EH clause reported in the funclet, that needs to be invoked and thus, may have valid |
1915 | // references to report for GC reporting. |
1916 | // |
1917 | // If we are not skipping the managed frame, then LastUnwoundEstablisherFrame will be updated later in this method, |
1918 | // just before we return back to our caller. |
1919 | if (!fTargetUnwind) |
1920 | { |
1921 | SetLastUnwoundEstablisherFrame(sf); |
1922 | fSetLastUnwoundEstablisherFrame = true; |
1923 | } |
1924 | } |
1925 | |
1926 | // GCX_COOP_THREAD_EXISTS ends here and we may switch to preemp mode now (if applicable). |
1927 | } |
1928 | |
1929 | // |
1930 | // now process managed call frame if needed |
1931 | // |
1932 | if (fIsFrameLess) |
1933 | { |
1934 | if (fProcessThisFrame) |
1935 | { |
1936 | status = ProcessManagedCallFrame( |
1937 | &cfThisFrame, |
1938 | sf, |
1939 | StackFrame::FromEstablisherFrame(pDispatcherContext->EstablisherFrame), |
1940 | pExceptionRecord, |
1941 | STState, |
1942 | uMethodStartPC, |
1943 | dwExceptionFlags, |
1944 | dwTACatchHandlerClauseIndex, |
1945 | sfEstablisherOfActualHandlerFrame); |
1946 | |
1947 | if (pGSCookie) |
1948 | { |
1949 | cfThisFrame.CheckGSCookies(); |
1950 | } |
1951 | } |
1952 | |
1953 | if (fTargetUnwind && (UnwindPending == status)) |
1954 | { |
1955 | SecondPassIsComplete(cfThisFrame.GetFunction(), sf); |
1956 | status = SecondPassComplete; |
1957 | } |
1958 | } |
1959 | |
1960 | lExit: |
1961 | |
1962 | // If we are unwinding and have returned successfully from unwinding the frame, then mark it as the last unwound frame for the current |
1963 | // exception. We don't do this if the frame is target of unwind (i.e. handling the exception) since catch block invocation may have references to be |
1964 | // reported (if a GC happens during catch block invocation). |
1965 | // |
1966 | // If an exception escapes out of a funclet (this is only possible for fault/finally/catch clauses), then we will not return here. |
1967 | // Since this implies that the funclet no longer has any valid references to report, we will need to set the LastUnwoundEstablisherFrame |
1968 | // close to the point we detect the exception has escaped the funclet. This is done in ExceptionTracker::CallHandler and marks the |
1969 | // frame that invoked (and thus, contained) the funclet as the LastUnwoundEstablisherFrame. |
1970 | // |
1971 | // Note: Do no add any GC triggering code between the return from ProcessManagedCallFrame and setting of the LastUnwoundEstablisherFrame |
1972 | if ((!fIsFirstPass) && (!fTargetUnwind) && (!fSetLastUnwoundEstablisherFrame)) |
1973 | { |
1974 | GCX_COOP(); |
1975 | SetLastUnwoundEstablisherFrame(sf); |
1976 | } |
1977 | |
1978 | if (FirstPassComplete == status) |
1979 | { |
1980 | FirstPassIsComplete(); |
1981 | } |
1982 | |
1983 | if (fTargetUnwind && (status == SecondPassComplete)) |
1984 | { |
1985 | #ifdef USE_PER_FRAME_PINVOKE_INIT |
1986 | // If we have got a ICF to set as the LimitFrame, do that now. |
1987 | // The Frame chain is still intact and would be updated using |
1988 | // the LimitFrame (done after the catch handler returns). |
1989 | // |
1990 | // NOTE: This should be done as the last thing before we return |
1991 | // back to invoke the catch handler. |
1992 | if (pICFForUnwindTarget != NULL) |
1993 | { |
1994 | m_pLimitFrame = pICFForUnwindTarget; |
1995 | pICFSetAsLimitFrame = (PVOID)pICFForUnwindTarget; |
1996 | } |
1997 | #endif // USE_PER_FRAME_PINVOKE_INIT |
1998 | |
1999 | // Since second pass is complete and we have reached |
2000 | // the frame containing the catch funclet, reset the enclosing |
2001 | // clause SP for the catch funclet, if applicable, to be the CallerSP of the |
2002 | // current frame. |
2003 | // |
2004 | // Refer to the detailed comment about this code |
2005 | // in ExceptionTracker::ProcessManagedCallFrame. |
2006 | if (m_fResetEnclosingClauseSPForCatchFunclet) |
2007 | { |
2008 | #ifdef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
2009 | // DispatcherContext->EstablisherFrame's value |
2010 | // represents the CallerSP of the current frame. |
2011 | UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)pDispatcherContext->EstablisherFrame; |
2012 | #else // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
2013 | // Extract the CallerSP from RegDisplay |
2014 | REGDISPLAY *pRD = cfThisFrame.GetRegisterSet(); |
2015 | _ASSERTE(pRD->IsCallerContextValid || pRD->IsCallerSPValid); |
2016 | UINT_PTR EnclosingClauseCallerSP = (UINT_PTR)GetSP(pRD->pCallerContext); |
2017 | #endif // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
2018 | m_EnclosingClauseInfo = EnclosingClauseInfo(false, cfThisFrame.GetRelOffset(), EnclosingClauseCallerSP); |
2019 | } |
2020 | m_fResetEnclosingClauseSPForCatchFunclet = FALSE; |
2021 | } |
2022 | |
2023 | // If we are unwinding and the exception was not caught in managed code and we have reached the |
2024 | // topmost frame we saw in the first pass, then reset thread abort state if this is the last managed |
2025 | // code personality routine on the stack. |
2026 | if ((fIsFirstPass == false) && (this->GetTopmostStackFrameFromFirstPass() == sf) && (GetCatchToCallPC() == NULL)) |
2027 | { |
2028 | ExceptionTracker::ResetThreadAbortStatus(pThread, &cfThisFrame, sf); |
2029 | } |
2030 | |
2031 | // |
2032 | // fill in the out parameter |
2033 | // |
2034 | return status; |
2035 | } |
2036 | |
2037 | // static |
2038 | void ExceptionTracker::DebugLogTrackerRanges(__in_z const char *pszTag) |
2039 | { |
2040 | #ifdef _DEBUG |
2041 | CONTRACTL |
2042 | { |
2043 | MODE_ANY; |
2044 | GC_NOTRIGGER; |
2045 | NOTHROW; |
2046 | } |
2047 | CONTRACTL_END; |
2048 | |
2049 | Thread* pThread = GetThread(); |
2050 | ExceptionTracker* pTracker = pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL; |
2051 | |
2052 | int i = 0; |
2053 | |
2054 | while (pTracker) |
2055 | { |
2056 | EH_LOG((LL_INFO100, "%s:|%02d| %p: (%p %p) %s\n" , pszTag, i, pTracker, pTracker->m_ScannedStackRange.GetLowerBound().SP, pTracker->m_ScannedStackRange.GetUpperBound().SP, |
2057 | pTracker->IsInFirstPass() ? "1st pass" : "2nd pass" |
2058 | )); |
2059 | pTracker = pTracker->m_pPrevNestedInfo; |
2060 | i++; |
2061 | } |
2062 | #endif // _DEBUG |
2063 | } |
2064 | |
2065 | |
2066 | bool ExceptionTracker::HandleNestedExceptionEscape(StackFrame sf, bool fIsFirstPass) |
2067 | { |
2068 | CONTRACTL |
2069 | { |
2070 | // Since this function can modify the scanned stack range, which is also accessed during the GC stackwalk, |
2071 | // we invoke it in COOP mode so that that access to the range is synchronized. |
2072 | MODE_COOPERATIVE; |
2073 | GC_NOTRIGGER; |
2074 | NOTHROW; |
2075 | } |
2076 | CONTRACTL_END; |
2077 | |
2078 | bool fResult = false; |
2079 | |
2080 | DebugLogTrackerRanges(" A" ); |
2081 | |
2082 | ExceptionTracker* pPreviousTracker = m_pPrevNestedInfo; |
2083 | |
2084 | while (pPreviousTracker && pPreviousTracker->m_ScannedStackRange.IsSupersededBy(sf)) |
2085 | { |
2086 | // |
2087 | // If the previous tracker (representing exception E1 and whose scanned stack range is superseded by the current frame) |
2088 | // is in the first pass AND current tracker (representing exceptio E2) has not seen the current frame AND we are here, |
2089 | // it implies that we had a nested exception while the previous tracker was in the first pass. |
2090 | // |
2091 | // This can happen in the following scenarios: |
2092 | // |
2093 | // 1) An exception escapes a managed filter (which are invoked in the first pass). However, |
2094 | // that is not possible since any exception escaping them is swallowed by the runtime. |
2095 | // If someone does longjmp from within the filter, then that is illegal and unsupported. |
2096 | // |
2097 | // 2) While processing an exception (E1), either us or native code caught it, triggering unwind. However, before the |
2098 | // first managed frame was processed for unwind, another native frame (below the first managed frame on the stack) |
2099 | // did a longjmp to go past us or raised another exception from one of their termination handlers. |
2100 | // |
2101 | // Thus, we will never get a chance to switch our tracker for E1 to 2nd pass (which would be done when |
2102 | // ExceptionTracker::GetOrCreateTracker will be invoked for the first managed frame) since the longjmp, or the |
2103 | // new-exception would result in a new tracker being setup. |
2104 | // |
2105 | // Below is an example of such a case that does longjmp |
2106 | // ---------------------------------------------------- |
2107 | // |
2108 | // NativeA (does setjmp) -> ManagedFunc -> NativeB |
2109 | // |
2110 | // |
2111 | // NativeB could be implemented as: |
2112 | // |
2113 | // __try { // raise exception } __finally { longjmp(jmp1, 1); } |
2114 | // |
2115 | // "jmp1" is the jmp_buf setup by NativeA by calling setjmp. |
2116 | // |
2117 | // ManagedFunc could be implemented as: |
2118 | // |
2119 | // try { |
2120 | // try { NativeB(); } |
2121 | // finally { Console.WriteLine("Finally in ManagedFunc"); } |
2122 | // } |
2123 | // catch(Exception ex} { Console.WriteLine("Caught"); } |
2124 | // |
2125 | // |
2126 | // In case of nested exception, we combine the stack range (see below) since we have already seen those frames |
2127 | // in the specified pass for the previous tracker. However, in the example above, the current tracker (in 2nd pass) |
2128 | // has not see the frames which the previous tracker (which is in the first pass) has seen. |
2129 | // |
2130 | // On a similar note, the __finally in the example above could also do a "throw 1;". In such a case, we would expect |
2131 | // that the catch in ManagedFunc would catch the exception (since "throw 1;" would be represented as SEHException in |
2132 | // the runtime). However, during first pass, when the exception enters ManagedFunc, the current tracker would not have |
2133 | // processed the ManagedFunc frame, while the previous tracker (for E1) would have. If we proceed to combine the stack |
2134 | // ranges, we will omit examining the catch clause in ManagedFunc. |
2135 | // |
2136 | // Thus, we cannot combine the stack range yet and must let each frame, already scanned by the previous |
2137 | // tracker, be also processed by the current (longjmp) tracker if not already done. |
2138 | // |
2139 | // Note: This is not a concern if the previous tracker (for exception E1) is in the second pass since any escaping exception (E2) |
2140 | // would come out of a finally/fault funclet and the runtime's funclet skipping logic will deal with it correctly. |
2141 | |
2142 | if (pPreviousTracker->IsInFirstPass() && (!this->m_ScannedStackRange.Contains(sf))) |
2143 | { |
2144 | // Allow all stackframes seen by previous tracker to be seen by the current |
2145 | // tracker as well. |
2146 | if (sf <= pPreviousTracker->m_ScannedStackRange.GetUpperBound()) |
2147 | { |
2148 | EH_LOG((LL_INFO100, " - not updating current tracker bounds for escaped exception since\n" )); |
2149 | EH_LOG((LL_INFO100, " - active tracker (%p; %s) has not seen the current frame [" , this, this->IsInFirstPass()?"FirstPass" :"SecondPass" )); |
2150 | EH_LOG((LL_INFO100, " - SP = %p" , sf.SP)); |
2151 | EH_LOG((LL_INFO100, "]\n" )); |
2152 | EH_LOG((LL_INFO100, " - which the previous (%p) tracker has processed.\n" , pPreviousTracker)); |
2153 | return fResult; |
2154 | } |
2155 | } |
2156 | |
2157 | EH_LOG((LL_INFO100, " nested exception ESCAPED\n" )); |
2158 | EH_LOG((LL_INFO100, " - updating current tracker stack bounds\n" )); |
2159 | m_ScannedStackRange.CombineWith(sf, &pPreviousTracker->m_ScannedStackRange); |
2160 | |
2161 | // |
2162 | // Only the topmost tracker can be in the first pass. |
2163 | // |
2164 | // (Except in the case where we have an exception thrown in a filter, |
2165 | // which should never escape the filter, and thus, will never supersede |
2166 | // the previous exception. This is why we cannot walk the entire list |
2167 | // of trackers to assert that they're all in the right mode.) |
2168 | // |
2169 | // CONSISTENCY_CHECK(!pPreviousTracker->IsInFirstPass()); |
2170 | |
2171 | // If our modes don't match, don't actually delete the supersceded exception. |
2172 | // If we did, we would lose valueable state on which frames have been scanned |
2173 | // on the second pass if an exception is thrown during the 2nd pass. |
2174 | |
2175 | // Advance the current tracker pointer now, since it may be deleted below. |
2176 | pPreviousTracker = pPreviousTracker->m_pPrevNestedInfo; |
2177 | |
2178 | if (!fIsFirstPass) |
2179 | { |
2180 | |
2181 | // During unwind, at each frame we collapse exception trackers only once i.e. there cannot be multiple |
2182 | // exception trackers that are collapsed at each frame. Store the information of collapsed exception |
2183 | // tracker in current tracker to be able to find the parent frame when nested exception escapes. |
2184 | m_csfEHClauseOfCollapsedTracker = m_pPrevNestedInfo->m_EHClauseInfo.GetCallerStackFrameForEHClause(); |
2185 | m_EnclosingClauseInfoOfCollapsedTracker = m_pPrevNestedInfo->m_EnclosingClauseInfoForGCReporting; |
2186 | |
2187 | EH_LOG((LL_INFO100, " - removing previous tracker\n" )); |
2188 | |
2189 | ExceptionTracker* pTrackerToFree = m_pPrevNestedInfo; |
2190 | m_pPrevNestedInfo = pTrackerToFree->m_pPrevNestedInfo; |
2191 | |
2192 | #if defined(DEBUGGING_SUPPORTED) |
2193 | if (g_pDebugInterface != NULL) |
2194 | { |
2195 | g_pDebugInterface->DeleteInterceptContext(pTrackerToFree->m_DebuggerExState.GetDebuggerInterceptContext()); |
2196 | } |
2197 | #endif // DEBUGGING_SUPPORTED |
2198 | |
2199 | CONSISTENCY_CHECK(pTrackerToFree->IsValid()); |
2200 | FreeTrackerMemory(pTrackerToFree, memBoth); |
2201 | } |
2202 | |
2203 | DebugLogTrackerRanges(" B" ); |
2204 | } |
2205 | |
2206 | return fResult; |
2207 | } |
2208 | |
2209 | CLRUnwindStatus ExceptionTracker::ProcessExplicitFrame( |
2210 | CrawlFrame* pcfThisFrame, |
2211 | StackFrame sf, |
2212 | BOOL fIsFirstPass, |
2213 | StackTraceState& STState |
2214 | ) |
2215 | { |
2216 | CONTRACTL |
2217 | { |
2218 | MODE_COOPERATIVE; |
2219 | GC_TRIGGERS; |
2220 | THROWS; |
2221 | PRECONDITION(!pcfThisFrame->IsFrameless()); |
2222 | PRECONDITION(pcfThisFrame->GetFrame() != FRAME_TOP); |
2223 | } |
2224 | CONTRACTL_END; |
2225 | |
2226 | Frame* pFrame = pcfThisFrame->GetFrame(); |
2227 | |
2228 | EH_LOG((LL_INFO100, " [ ProcessExplicitFrame: pFrame: " FMT_ADDR " pMD: " FMT_ADDR " %s PASS ]\n" , DBG_ADDR(pFrame), DBG_ADDR(pFrame->GetFunction()), fIsFirstPass ? "FIRST" : "SECOND" )); |
2229 | |
2230 | if (FRAME_TOP == pFrame) |
2231 | { |
2232 | goto lExit; |
2233 | } |
2234 | |
2235 | if (!m_ExceptionFlags.UnwindingToFindResumeFrame()) |
2236 | { |
2237 | // |
2238 | // update our exception stacktrace |
2239 | // |
2240 | |
2241 | BOOL bReplaceStack = FALSE; |
2242 | BOOL bSkipLastElement = FALSE; |
2243 | |
2244 | if (STS_FirstRethrowFrame == STState) |
2245 | { |
2246 | bSkipLastElement = TRUE; |
2247 | } |
2248 | else |
2249 | if (STS_NewException == STState) |
2250 | { |
2251 | bReplaceStack = TRUE; |
2252 | } |
2253 | |
2254 | // Normally, we need to notify the profiler in two cases: |
2255 | // 1) a brand new exception is thrown, and |
2256 | // 2) an exception is rethrown. |
2257 | // However, in this case, if the explicit frame doesn't correspond to a MD, we don't set STState to STS_Append, |
2258 | // so the next managed call frame we process will give another ExceptionThrown() callback to the profiler. |
2259 | // So we give the callback below, only in the case when we append to the stack trace. |
2260 | |
2261 | MethodDesc* pMD = pcfThisFrame->GetFunction(); |
2262 | if (pMD) |
2263 | { |
2264 | Thread* pThread = m_pThread; |
2265 | |
2266 | if (fIsFirstPass) |
2267 | { |
2268 | // |
2269 | // notify profiler of new/rethrown exception |
2270 | // |
2271 | if (bSkipLastElement || bReplaceStack) |
2272 | { |
2273 | GCX_COOP(); |
2274 | EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread); |
2275 | UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack); |
2276 | } |
2277 | |
2278 | // |
2279 | // Update stack trace |
2280 | // |
2281 | m_StackTraceInfo.AppendElement(CanAllocateMemory(), NULL, sf.SP, pMD, pcfThisFrame); |
2282 | m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement); |
2283 | |
2284 | // |
2285 | // make callback to debugger and/or profiler |
2286 | // |
2287 | #if defined(DEBUGGING_SUPPORTED) |
2288 | if (ExceptionTracker::NotifyDebuggerOfStub(pThread, sf, pFrame)) |
2289 | { |
2290 | // Deliver the FirstChanceNotification after the debugger, if not already delivered. |
2291 | if (!this->DeliveredFirstChanceNotification()) |
2292 | { |
2293 | ExceptionNotifications::DeliverFirstChanceNotification(); |
2294 | } |
2295 | } |
2296 | #endif // DEBUGGING_SUPPORTED |
2297 | |
2298 | STState = STS_Append; |
2299 | } |
2300 | } |
2301 | } |
2302 | |
2303 | lExit: |
2304 | return UnwindPending; |
2305 | } |
2306 | |
2307 | CLRUnwindStatus ExceptionTracker::HandleFunclets(bool* pfProcessThisFrame, bool fIsFirstPass, |
2308 | MethodDesc * pMD, bool fFunclet, StackFrame sf) |
2309 | { |
2310 | CONTRACTL |
2311 | { |
2312 | MODE_ANY; |
2313 | GC_NOTRIGGER; |
2314 | NOTHROW; |
2315 | } |
2316 | CONTRACTL_END; |
2317 | |
2318 | BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame(); |
2319 | |
2320 | // |
2321 | // handle out-of-line finallys |
2322 | // |
2323 | |
2324 | // In the second pass, we always want to execute this code. |
2325 | // In the first pass, we only execute this code if we are not unwinding to find the resume frame. |
2326 | // We do this to avoid calling the same filter more than once. Search for "UnwindingToFindResumeFrame" |
2327 | // to find a more elaborate comment in ProcessManagedCallFrame(). |
2328 | |
2329 | // If we are in the first pass and we are unwinding to find the resume frame, then make sure the flag is cleared. |
2330 | if (fIsFirstPass && fUnwindingToFindResumeFrame) |
2331 | { |
2332 | m_pSkipToParentFunctionMD = NULL; |
2333 | } |
2334 | else |
2335 | { |
2336 | // <TODO> |
2337 | // this 'skip to parent function MD' code only seems to be needed |
2338 | // in the case where we call a finally funclet from the normal |
2339 | // execution codepath. Is there a better way to achieve the same |
2340 | // goal? Also, will recursion break us in any corner cases? |
2341 | // [ThrowInFinallyNestedInTryTest] |
2342 | // [GoryManagedPresentTest] |
2343 | // </TODO> |
2344 | |
2345 | // <TODO> |
2346 | // this was done for AMD64, but i don't understand why AMD64 needed the workaround.. |
2347 | // (the workaround is the "double call on parent method" part.) |
2348 | // </TODO> |
2349 | |
2350 | // |
2351 | // If we encounter a funclet, we need to skip all call frames up |
2352 | // to and including its parent method call frame. The reason |
2353 | // behind this is that a funclet is logically part of the parent |
2354 | // method has all the clauses that covered its logical location |
2355 | // in the parent covering its body. |
2356 | // |
2357 | if (((UINT_PTR)m_pSkipToParentFunctionMD) & 1) |
2358 | { |
2359 | EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: skipping to parent\n" )); |
2360 | *pfProcessThisFrame = false; |
2361 | if ((((UINT_PTR)pMD) == (((UINT_PTR)m_pSkipToParentFunctionMD) & ~((UINT_PTR)1))) && !fFunclet) |
2362 | { |
2363 | EH_LOG((LL_INFO100, " SKIPTOPARENT: found parent for funclet pMD = %p, sf.SP = %p, will stop skipping frames\n" , pMD, sf.SP)); |
2364 | _ASSERTE(0 == (((UINT_PTR)sf.SP) & 1)); |
2365 | m_pSkipToParentFunctionMD = (MethodDesc*)sf.SP; |
2366 | |
2367 | _ASSERTE(!fUnwindingToFindResumeFrame); |
2368 | } |
2369 | } |
2370 | else if (fFunclet) |
2371 | { |
2372 | EH_LOG((LL_INFO100, " SKIPTOPARENT: found funclet pMD = %p, will start skipping frames\n" , pMD)); |
2373 | _ASSERTE(0 == (((UINT_PTR)pMD) & 1)); |
2374 | m_pSkipToParentFunctionMD = (MethodDesc*)(((UINT_PTR)pMD) | 1); |
2375 | } |
2376 | else |
2377 | { |
2378 | if (sf.SP == ((UINT_PTR)m_pSkipToParentFunctionMD)) |
2379 | { |
2380 | EH_LOG((LL_INFO100, " IGNOREFRAME: SKIPTOPARENT: got double call on parent method\n" )); |
2381 | *pfProcessThisFrame = false; |
2382 | } |
2383 | else if (m_pSkipToParentFunctionMD && (sf.SP > ((UINT_PTR)m_pSkipToParentFunctionMD))) |
2384 | { |
2385 | EH_LOG((LL_INFO100, " SKIPTOPARENT: went past parent method\n" )); |
2386 | m_pSkipToParentFunctionMD = NULL; |
2387 | } |
2388 | } |
2389 | } |
2390 | |
2391 | return UnwindPending; |
2392 | } |
2393 | |
2394 | CLRUnwindStatus ExceptionTracker::ProcessManagedCallFrame( |
2395 | CrawlFrame* pcfThisFrame, |
2396 | StackFrame sf, |
2397 | StackFrame sfEstablisherFrame, |
2398 | EXCEPTION_RECORD* pExceptionRecord, |
2399 | StackTraceState STState, |
2400 | UINT_PTR uMethodStartPC, |
2401 | DWORD dwExceptionFlags, |
2402 | DWORD dwTACatchHandlerClauseIndex, |
2403 | StackFrame sfEstablisherOfActualHandlerFrame |
2404 | ) |
2405 | { |
2406 | CONTRACTL |
2407 | { |
2408 | MODE_ANY; |
2409 | GC_TRIGGERS; |
2410 | THROWS; |
2411 | PRECONDITION(pcfThisFrame->IsFrameless()); |
2412 | } |
2413 | CONTRACTL_END; |
2414 | |
2415 | UINT_PTR uControlPC = (UINT_PTR)GetControlPC(pcfThisFrame->GetRegisterSet()); |
2416 | CLRUnwindStatus ReturnStatus = UnwindPending; |
2417 | |
2418 | MethodDesc* pMD = pcfThisFrame->GetFunction(); |
2419 | |
2420 | bool fIsFirstPass = !(dwExceptionFlags & EXCEPTION_UNWINDING); |
2421 | bool fIsFunclet = pcfThisFrame->IsFunclet(); |
2422 | |
2423 | CONSISTENCY_CHECK(IsValid()); |
2424 | CONSISTENCY_CHECK(ThrowableIsValid() || !fIsFirstPass); |
2425 | CONSISTENCY_CHECK(pMD != 0); |
2426 | |
2427 | EH_LOG((LL_INFO100, " [ ProcessManagedCallFrame this=%p, %s PASS ]\n" , this, (fIsFirstPass ? "FIRST" : "SECOND" ))); |
2428 | |
2429 | EH_LOG((LL_INFO100, " [ method: %s%s, %s ]\n" , |
2430 | (fIsFunclet ? "FUNCLET of " : "" ), |
2431 | pMD->m_pszDebugMethodName, pMD->m_pszDebugClassName)); |
2432 | |
2433 | Thread *pThread = GetThread(); |
2434 | _ASSERTE (pThread); |
2435 | |
2436 | INDEBUG( DumpClauses(pcfThisFrame->GetJitManager(), pcfThisFrame->GetMethodToken(), uMethodStartPC, uControlPC) ); |
2437 | |
2438 | bool fIsILStub = pMD->IsILStub(); |
2439 | bool fGiveDebuggerAndProfilerNotification = !fIsILStub; |
2440 | BOOL fUnwindingToFindResumeFrame = m_ExceptionFlags.UnwindingToFindResumeFrame(); |
2441 | |
2442 | bool fIgnoreThisFrame = false; |
2443 | bool fProcessThisFrameToFindResumeFrameOnly = false; |
2444 | |
2445 | MethodDesc * pUserMDForILStub = NULL; |
2446 | Frame * pILStubFrame = NULL; |
2447 | if (fIsILStub && !fIsFunclet) // only make this callback on the main method body of IL stubs |
2448 | pUserMDForILStub = GetUserMethodForILStub(pThread, sf.SP, pMD, &pILStubFrame); |
2449 | |
2450 | #ifdef FEATURE_CORRUPTING_EXCEPTIONS |
2451 | BOOL fCanMethodHandleException = TRUE; |
2452 | CorruptionSeverity currentSeverity = NotCorrupting; |
2453 | { |
2454 | // Switch to COOP mode since we are going to request throwable |
2455 | GCX_COOP(); |
2456 | |
2457 | // We must defer to the MethodDesc of the user method instead of the IL stub |
2458 | // itself because the user can specify the policy on a per-method basis and |
2459 | // that won't be reflected via the IL stub's MethodDesc. |
2460 | MethodDesc * pMDWithCEAttribute = (pUserMDForILStub != NULL) ? pUserMDForILStub : pMD; |
2461 | |
2462 | // Check if the exception can be delivered to the method? It will check if the exception |
2463 | // is a CE or not. If it is, it will check if the method can process it or not. |
2464 | currentSeverity = pThread->GetExceptionState()->GetCurrentExceptionTracker()->GetCorruptionSeverity(); |
2465 | fCanMethodHandleException = CEHelper::CanMethodHandleException(currentSeverity, pMDWithCEAttribute); |
2466 | } |
2467 | #endif // FEATURE_CORRUPTING_EXCEPTIONS |
2468 | |
2469 | // Doing rude abort. Skip all non-constrained execution region code. |
2470 | // When rude abort is initiated, we cannot intercept any exceptions. |
2471 | if ((pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame))) |
2472 | { |
2473 | // If we are unwinding to find the real resume frame, then we cannot ignore frames yet. |
2474 | // We need to make sure we find the correct resume frame before starting to ignore frames. |
2475 | if (fUnwindingToFindResumeFrame) |
2476 | { |
2477 | fProcessThisFrameToFindResumeFrameOnly = true; |
2478 | } |
2479 | else |
2480 | { |
2481 | EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort/CE\n" )); |
2482 | fIgnoreThisFrame = true; |
2483 | } |
2484 | } |
2485 | |
2486 | // |
2487 | // BEGIN resume frame processing code |
2488 | // |
2489 | // Often times, we'll run into the situation where the actual resume call frame |
2490 | // is not the same call frame that we see the catch clause in. The reason for this |
2491 | // is that catch clauses get duplicated down to cover funclet code ranges. When we |
2492 | // see a catch clause covering our control PC, but it is marked as a duplicate, we |
2493 | // need to continue to unwind until we find the same clause that isn't marked as a |
2494 | // duplicate. This will be the correct resume frame. |
2495 | // |
2496 | // We actually achieve this skipping by observing that if we are catching at a |
2497 | // duplicated clause, all the call frames we should be skipping have already been |
2498 | // processed by a previous exception dispatch. So if we allow the unwind to |
2499 | // continue, we will immediately bump into the ExceptionTracker for the previous |
2500 | // dispatch, and our resume frame will be the last frame seen by that Tracker. |
2501 | // |
2502 | // Note that we will have visited all the EH clauses for a particular method when we |
2503 | // see its first funclet (the funclet which is closest to the leaf). We need to make |
2504 | // sure we don't process any EH clause again when we see other funclets or the parent |
2505 | // method until we get to the real resume frame. The real resume frame may be another |
2506 | // funclet, which is why we can't blindly skip all funclets until we see the parent |
2507 | // method frame. |
2508 | // |
2509 | // If the exception is handled by the method, then UnwindingToFindResumeFrame takes |
2510 | // care of the skipping. We basically skip everything when we are unwinding to find |
2511 | // the resume frame. If the exception is not handled by the method, then we skip all the |
2512 | // funclets until we get to the parent method. The logic to handle this is in |
2513 | // HandleFunclets(). In the first pass, HandleFunclets() only kicks |
2514 | // in if we are not unwinding to find the resume frame. |
2515 | // |
2516 | // Then on the second pass, we need to process frames up to the initial place where |
2517 | // we saw the catch clause, which means upto and including part of the resume stack |
2518 | // frame. Then we need to skip the call frames up to the real resume stack frame |
2519 | // and resume. |
2520 | // |
2521 | // In the second pass, we have the same problem with skipping funclets as in the first |
2522 | // pass. However, in this case, we know exactly which frame is our target unwind frame |
2523 | // (EXCEPTION_TARGET_UNWIND will be set). So we blindly unwind until we see the parent |
2524 | // method, or until the target unwind frame. |
2525 | PTR_EXCEPTION_CLAUSE_TOKEN pLimitClauseToken = NULL; |
2526 | if (!fIgnoreThisFrame && !fIsFirstPass && !m_sfResumeStackFrame.IsNull() && (sf >= m_sfResumeStackFrame)) |
2527 | { |
2528 | EH_LOG((LL_INFO100, " RESUMEFRAME: sf is %p and m_sfResumeStackFrame: %p\n" , sf.SP, m_sfResumeStackFrame.SP)); |
2529 | EH_LOG((LL_INFO100, " RESUMEFRAME: %s initial resume frame: %p\n" , (sf == m_sfResumeStackFrame) ? "REACHED" : "PASSED" , m_sfResumeStackFrame.SP)); |
2530 | |
2531 | // process this frame to call handlers |
2532 | EH_LOG((LL_INFO100, " RESUMEFRAME: Found last frame to process finallys in, need to process only part of call frame\n" )); |
2533 | EH_LOG((LL_INFO100, " RESUMEFRAME: Limit clause token: %p\n" , m_pClauseForCatchToken)); |
2534 | pLimitClauseToken = m_pClauseForCatchToken; |
2535 | |
2536 | // The limit clause is the same as the clause we're catching at. It is used |
2537 | // as the last clause we process in the "inital resume frame". Anything further |
2538 | // down the list of clauses is skipped along with all call frames up to the actual |
2539 | // resume frame. |
2540 | CONSISTENCY_CHECK_MSG(sf == m_sfResumeStackFrame, "Passed initial resume frame and fIgnoreThisFrame wasn't set!" ); |
2541 | } |
2542 | // |
2543 | // END resume frame code |
2544 | // |
2545 | |
2546 | if (!fIgnoreThisFrame) |
2547 | { |
2548 | BOOL fFoundHandler = FALSE; |
2549 | DWORD_PTR dwHandlerStartPC = NULL; |
2550 | |
2551 | BOOL bReplaceStack = FALSE; |
2552 | BOOL bSkipLastElement = FALSE; |
2553 | bool fUnwindFinished = false; |
2554 | |
2555 | if (STS_FirstRethrowFrame == STState) |
2556 | { |
2557 | bSkipLastElement = TRUE; |
2558 | } |
2559 | else |
2560 | if (STS_NewException == STState) |
2561 | { |
2562 | bReplaceStack = TRUE; |
2563 | } |
2564 | |
2565 | // We need to notify the profiler on the first pass in two cases: |
2566 | // 1) a brand new exception is thrown, and |
2567 | // 2) an exception is rethrown. |
2568 | if (fIsFirstPass && (bSkipLastElement || bReplaceStack)) |
2569 | { |
2570 | GCX_COOP(); |
2571 | EEToProfilerExceptionInterfaceWrapper::ExceptionThrown(pThread); |
2572 | UpdatePerformanceMetrics(pcfThisFrame, bSkipLastElement, bReplaceStack); |
2573 | } |
2574 | |
2575 | if (!fUnwindingToFindResumeFrame) |
2576 | { |
2577 | // |
2578 | // update our exception stacktrace, ignoring IL stubs |
2579 | // |
2580 | if (fIsFirstPass && !pMD->IsILStub()) |
2581 | { |
2582 | GCX_COOP(); |
2583 | |
2584 | m_StackTraceInfo.AppendElement(CanAllocateMemory(), uControlPC, sf.SP, pMD, pcfThisFrame); |
2585 | m_StackTraceInfo.SaveStackTrace(CanAllocateMemory(), m_hThrowable, bReplaceStack, bSkipLastElement); |
2586 | } |
2587 | |
2588 | // |
2589 | // make callback to debugger and/or profiler |
2590 | // |
2591 | if (fGiveDebuggerAndProfilerNotification) |
2592 | { |
2593 | if (fIsFirstPass) |
2594 | { |
2595 | EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionEnter(pMD); |
2596 | |
2597 | // Notfiy the debugger that we are on the first pass for a managed exception. |
2598 | // Note that this callback is made for every managed frame. |
2599 | EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, uControlPC, sf.SP); |
2600 | |
2601 | #if defined(DEBUGGING_SUPPORTED) |
2602 | _ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker); |
2603 | |
2604 | // check if the current exception has been intercepted. |
2605 | if (m_ExceptionFlags.DebuggerInterceptInfo()) |
2606 | { |
2607 | // According to the x86 implementation, we don't need to call the ExceptionSearchFunctionLeave() |
2608 | // profiler callback. |
2609 | StackFrame sfInterceptStackFrame; |
2610 | m_DebuggerExState.GetDebuggerInterceptInfo(NULL, NULL, |
2611 | reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)), |
2612 | NULL, NULL); |
2613 | |
2614 | // Save the target unwind frame just like we do when we find a catch clause. |
2615 | m_sfResumeStackFrame = sfInterceptStackFrame; |
2616 | ReturnStatus = FirstPassComplete; |
2617 | goto lExit; |
2618 | } |
2619 | #endif // DEBUGGING_SUPPORTED |
2620 | |
2621 | // Attempt to deliver the first chance notification to the AD only *AFTER* the debugger |
2622 | // has done that, provided we have not already delivered it. |
2623 | if (!this->DeliveredFirstChanceNotification()) |
2624 | { |
2625 | ExceptionNotifications::DeliverFirstChanceNotification(); |
2626 | } |
2627 | } |
2628 | else |
2629 | { |
2630 | #if defined(DEBUGGING_SUPPORTED) |
2631 | _ASSERTE(this == pThread->GetExceptionState()->m_pCurrentTracker); |
2632 | |
2633 | // check if the exception is intercepted. |
2634 | if (m_ExceptionFlags.DebuggerInterceptInfo()) |
2635 | { |
2636 | MethodDesc* pInterceptMD = NULL; |
2637 | StackFrame sfInterceptStackFrame; |
2638 | |
2639 | // check if we have reached the interception point yet |
2640 | m_DebuggerExState.GetDebuggerInterceptInfo(&pInterceptMD, NULL, |
2641 | reinterpret_cast<PBYTE *>(&(sfInterceptStackFrame.SP)), |
2642 | NULL, NULL); |
2643 | |
2644 | // If the exception has gone unhandled in the first pass, we wouldn't have a chance |
2645 | // to set the target unwind frame. Check for this case now. |
2646 | if (m_sfResumeStackFrame.IsNull()) |
2647 | { |
2648 | m_sfResumeStackFrame = sfInterceptStackFrame; |
2649 | } |
2650 | _ASSERTE(m_sfResumeStackFrame == sfInterceptStackFrame); |
2651 | |
2652 | if ((pInterceptMD == pMD) && |
2653 | (sfInterceptStackFrame == sf)) |
2654 | { |
2655 | // If we have reached the stack frame at which the exception is intercepted, |
2656 | // then finish the second pass prematurely. |
2657 | SecondPassIsComplete(pMD, sf); |
2658 | ReturnStatus = SecondPassComplete; |
2659 | goto lExit; |
2660 | } |
2661 | } |
2662 | #endif // DEBUGGING_SUPPORTED |
2663 | |
2664 | // According to the x86 implementation, we don't need to call the ExceptionUnwindFunctionEnter() |
2665 | // profiler callback when an exception is intercepted. |
2666 | EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionEnter(pMD); |
2667 | } |
2668 | } |
2669 | |
2670 | } |
2671 | |
2672 | #ifdef FEATURE_STACK_PROBE |
2673 | // Don't call a handler if we're within a certain distance of the end of the stack. Could end up here via probe, in |
2674 | // which case guard page is intact, or via hard SO, in which case guard page won't be. So don't check for presence of |
2675 | // guard page, just check for sufficient space on stack. |
2676 | if ( IsStackOverflowException() |
2677 | && !pThread->CanResetStackTo((void*)sf.SP)) |
2678 | { |
2679 | EH_LOG((LL_INFO100, " STACKOVERFLOW: IGNOREFRAME: stack frame too close to guard page: sf.SP: %p\n" , sf.SP)); |
2680 | } |
2681 | else |
2682 | #endif // FEATURE_STACK_PROBE |
2683 | { |
2684 | IJitManager* pJitMan = pcfThisFrame->GetJitManager(); |
2685 | const METHODTOKEN& MethToken = pcfThisFrame->GetMethodToken(); |
2686 | |
2687 | EH_CLAUSE_ENUMERATOR EnumState; |
2688 | unsigned EHCount; |
2689 | |
2690 | #ifdef FEATURE_CORRUPTING_EXCEPTIONS |
2691 | // The method cannot handle the exception (e.g. cannot handle the CE), then simply bail out |
2692 | // without examining the EH clauses in it. |
2693 | if (!fCanMethodHandleException) |
2694 | { |
2695 | LOG((LF_EH, LL_INFO100, "ProcessManagedCallFrame - CEHelper decided not to look for exception handlers in the method(MD:%p).\n" , pMD)); |
2696 | |
2697 | // Set the flag to skip this frame since the CE cannot be delivered |
2698 | _ASSERTE(currentSeverity == ProcessCorrupting); |
2699 | |
2700 | // Force EHClause count to be zero |
2701 | EHCount = 0; |
2702 | } |
2703 | else |
2704 | #endif // FEATURE_CORRUPTING_EXCEPTIONS |
2705 | { |
2706 | EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState); |
2707 | } |
2708 | |
2709 | |
2710 | if (!fIsFirstPass) |
2711 | { |
2712 | // For a method that may have nested funclets, it is possible that a reference may be |
2713 | // dead at the point where control flow left the method but may become active once |
2714 | // a funclet is executed. |
2715 | // |
2716 | // Upon returning from the funclet but before the next funclet is invoked, a GC |
2717 | // may happen if we are in preemptive mode. Since the GC stackwalk will commence |
2718 | // at the original IP at which control left the method, it can result in the reference |
2719 | // not being updated (since it was dead at the point control left the method) if the object |
2720 | // is moved during GC. |
2721 | // |
2722 | // To address this, we will indefinitely switch to COOP mode while enumerating, and invoking, |
2723 | // funclets. |
2724 | // |
2725 | // This switch is also required for another scenario: we may be in unwind phase and the current frame |
2726 | // may not have any termination handlers to be invoked (i.e. it may have zero EH clauses applicable to |
2727 | // the unwind phase). If we do not switch to COOP mode for such a frame, we could remain in preemp mode. |
2728 | // Upon returning back from ProcessOSExceptionNotification in ProcessCLRException, when we attempt to |
2729 | // switch to COOP mode to update the LastUnwoundEstablisherFrame, we could get blocked due to an |
2730 | // active GC, prior to peforming the update. |
2731 | // |
2732 | // In this case, if the GC stackwalk encounters the current frame and attempts to check if it has been |
2733 | // unwound by an exception, then while it has been unwound (especially since it had no termination handlers) |
2734 | // logically, it will not figure out as unwound and thus, GC stackwalk would attempt to report references from |
2735 | // it, which is incorrect. |
2736 | // |
2737 | // Thus, when unwinding, we will always switch to COOP mode indefinitely, irrespective of whether |
2738 | // the frame has EH clauses to be processed or not. |
2739 | GCX_COOP_NO_DTOR(); |
2740 | |
2741 | // We will also forbid any GC to happen between successive funclet invocations. |
2742 | // This will be automatically undone when the contract goes off the stack as the method |
2743 | // returns back to its caller. |
2744 | BEGINFORBIDGC(); |
2745 | } |
2746 | |
2747 | for (unsigned i = 0; i < EHCount; i++) |
2748 | { |
2749 | EE_ILEXCEPTION_CLAUSE EHClause; |
2750 | PTR_EXCEPTION_CLAUSE_TOKEN pEHClauseToken = pJitMan->GetNextEHClause(&EnumState, &EHClause); |
2751 | |
2752 | EH_LOG((LL_INFO100, " considering %s clause [%x,%x), ControlPc is %s clause (offset %x)" , |
2753 | (IsFault(&EHClause) ? "fault" : |
2754 | (IsFinally(&EHClause) ? "finally" : |
2755 | (IsFilterHandler(&EHClause) ? "filter" : |
2756 | (IsTypedHandler(&EHClause) ? "typed" : "unknown" )))), |
2757 | EHClause.TryStartPC, |
2758 | EHClause.TryEndPC, |
2759 | (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset()) ? "inside" : "outside" ), |
2760 | pcfThisFrame->GetRelOffset() |
2761 | )); |
2762 | |
2763 | LOG((LF_EH, LL_INFO100, "\n" )); |
2764 | |
2765 | // If we have a valid EstablisherFrame for the managed frame where |
2766 | // ThreadAbort was raised after the catch block, then see if we |
2767 | // have reached that frame during the exception dispatch. If we |
2768 | // have, then proceed to skip applicable EH clauses. |
2769 | if ((!sfEstablisherOfActualHandlerFrame.IsNull()) && (sfEstablisherFrame == sfEstablisherOfActualHandlerFrame)) |
2770 | { |
2771 | // We should have a valid index of the EH clause (corresponding to a catch block) after |
2772 | // which thread abort was raised? |
2773 | _ASSERTE(dwTACatchHandlerClauseIndex > 0); |
2774 | { |
2775 | // Since we have the index, check if the current EH clause index |
2776 | // is less then saved index. If it is, then it implies that |
2777 | // we are evaluating clauses that lie "before" the EH clause |
2778 | // for the catch block "after" which thread abort was raised. |
2779 | // |
2780 | // Since ThreadAbort has to make forward progress, we will |
2781 | // skip evaluating any such EH clauses. Two things can happen: |
2782 | // |
2783 | // 1) We will find clauses representing handlers beyond the |
2784 | // catch block after which ThreadAbort was raised. Since this is |
2785 | // what we want, we evaluate them. |
2786 | // |
2787 | // 2) There wont be any more clauses implying that the catch block |
2788 | // after which the exception was raised was the outermost |
2789 | // handler in the method. Thus, the exception will escape out, |
2790 | // which is semantically the correct thing to happen. |
2791 | // |
2792 | // The premise of this check is based upon a JIT compiler's implementation |
2793 | // detail: when it generates EH clauses, JIT compiler will order them from |
2794 | // top->bottom (when reading a method) and inside->out when reading nested |
2795 | // clauses. |
2796 | // |
2797 | // This assumption is not new since the basic EH type-matching is reliant |
2798 | // on this very assumption. However, now we have one more candidate that |
2799 | // gets to rely on it. |
2800 | // |
2801 | // Eventually, this enables forward progress of thread abort exception. |
2802 | if (i <= (dwTACatchHandlerClauseIndex -1)) |
2803 | { |
2804 | EH_LOG((LL_INFO100, " skipping the evaluation of EH clause (index=%d) since we cannot process an exception in a handler\n" , i)); |
2805 | EH_LOG((LL_INFO100, " that exists prior to the one (index=%d) after which ThreadAbort was [re]raised.\n" , dwTACatchHandlerClauseIndex)); |
2806 | continue; |
2807 | } |
2808 | } |
2809 | } |
2810 | |
2811 | |
2812 | // see comment above where we set pLimitClauseToken |
2813 | if (pEHClauseToken == pLimitClauseToken) |
2814 | { |
2815 | EH_LOG((LL_INFO100, " found limit clause, stopping clause enumeration\n" )); |
2816 | |
2817 | // <GC_FUNCLET_REFERENCE_REPORTING> |
2818 | // |
2819 | // If we are here, the exception has been identified to be handled by a duplicate catch clause |
2820 | // that is protecting the current funclet. The call to SetEnclosingClauseInfo (below) |
2821 | // will setup the CallerSP (for GC reference reporting) to be the SP of the |
2822 | // of the caller of current funclet (where the exception has happened, or is escaping from). |
2823 | // |
2824 | // However, we need the CallerSP to be set as the SP of the caller of the |
2825 | // actual frame that will contain (and invoke) the catch handler corresponding to |
2826 | // the duplicate clause. But that isn't available right now and we can only know |
2827 | // once we unwind upstack to reach the target frame. |
2828 | // |
2829 | // Thus, upon reaching the target frame and before invoking the catch handler, |
2830 | // we will fix up the CallerSP (for GC reporting) to be that of the caller of the |
2831 | // target frame that will be invoking the actual catch handler. |
2832 | // |
2833 | // </GC_FUNCLET_REFERENCE_REPORTING> |
2834 | // |
2835 | // for catch clauses |
2836 | SetEnclosingClauseInfo(fIsFunclet, |
2837 | pcfThisFrame->GetRelOffset(), |
2838 | GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext)); |
2839 | fUnwindFinished = true; |
2840 | m_fFixupCallerSPForGCReporting = true; |
2841 | break; |
2842 | } |
2843 | |
2844 | BOOL fTermHandler = IsFaultOrFinally(&EHClause); |
2845 | fFoundHandler = FALSE; |
2846 | |
2847 | if (( fIsFirstPass && fTermHandler) || |
2848 | (!fIsFirstPass && !fTermHandler)) |
2849 | { |
2850 | continue; |
2851 | } |
2852 | |
2853 | if (ClauseCoversPC(&EHClause, pcfThisFrame->GetRelOffset())) |
2854 | { |
2855 | EH_LOG((LL_INFO100, " clause covers ControlPC\n" )); |
2856 | |
2857 | dwHandlerStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.HandlerStartPC); |
2858 | |
2859 | if (fUnwindingToFindResumeFrame) |
2860 | { |
2861 | CONSISTENCY_CHECK(fIsFirstPass); |
2862 | if (!fTermHandler) |
2863 | { |
2864 | // m_pClauseForCatchToken can only be NULL for continuable exceptions, but we should never |
2865 | // get here if we are handling continuable exceptions. fUnwindingToFindResumeFrame is |
2866 | // only true at the end of the first pass. |
2867 | _ASSERTE(m_pClauseForCatchToken != NULL); |
2868 | |
2869 | // handlers match and not duplicate? |
2870 | EH_LOG((LL_INFO100, " RESUMEFRAME: catch handler: [%x,%x], this handler: [%x,%x] %s\n" , |
2871 | m_ClauseForCatch.HandlerStartPC, |
2872 | m_ClauseForCatch.HandlerEndPC, |
2873 | EHClause.HandlerStartPC, |
2874 | EHClause.HandlerEndPC, |
2875 | IsDuplicateClause(&EHClause) ? "[duplicate]" : "" )); |
2876 | |
2877 | if ((m_ClauseForCatch.HandlerStartPC == EHClause.HandlerStartPC) && |
2878 | (m_ClauseForCatch.HandlerEndPC == EHClause.HandlerEndPC)) |
2879 | { |
2880 | EH_LOG((LL_INFO100, " RESUMEFRAME: found clause with same handler as catch\n" )); |
2881 | if (!IsDuplicateClause(&EHClause)) |
2882 | { |
2883 | CONSISTENCY_CHECK(fIsFirstPass); |
2884 | |
2885 | if (fProcessThisFrameToFindResumeFrameOnly) |
2886 | { |
2887 | EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame, \ |
2888 | but rude thread abort is initiated: %p\n" , sf.SP)); |
2889 | |
2890 | // We have found the real resume frame. However, rude thread abort |
2891 | // has been initiated. Thus, we need to continue the first pass |
2892 | // as if we have not found a handler yet. To do so, we need to |
2893 | // reset all the information we have saved when we find the handler. |
2894 | m_ExceptionFlags.ResetUnwindingToFindResumeFrame(); |
2895 | |
2896 | m_uCatchToCallPC = NULL; |
2897 | m_pClauseForCatchToken = NULL; |
2898 | |
2899 | m_sfResumeStackFrame.Clear(); |
2900 | ReturnStatus = UnwindPending; |
2901 | } |
2902 | else |
2903 | { |
2904 | EH_LOG((LL_INFO100, " RESUMEFRAME: identified real resume frame: %p\n" , sf.SP)); |
2905 | |
2906 | // Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler |
2907 | // that decided to handle the exception. We may need it |
2908 | // if a ThreadAbort is raised after the catch block |
2909 | // executes. |
2910 | m_dwIndexClauseForCatch = i + 1; |
2911 | m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame; |
2912 | #ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
2913 | m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD); |
2914 | #else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
2915 | // On ARM & ARM64, the EstablisherFrame is the value of SP at the time a function was called and before it's prolog |
2916 | // executed. Effectively, it is the SP of the caller. |
2917 | m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP; |
2918 | #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
2919 | |
2920 | ReturnStatus = FirstPassComplete; |
2921 | } |
2922 | } |
2923 | break; |
2924 | } |
2925 | } |
2926 | } |
2927 | else if (IsFilterHandler(&EHClause)) |
2928 | { |
2929 | DWORD_PTR dwResult = EXCEPTION_CONTINUE_SEARCH; |
2930 | DWORD_PTR dwFilterStartPC; |
2931 | |
2932 | dwFilterStartPC = pJitMan->GetCodeAddressForRelOffset(MethToken, EHClause.FilterOffset); |
2933 | |
2934 | EH_LOG((LL_INFO100, " calling filter\n" )); |
2935 | |
2936 | // @todo : If user code throws a StackOveflowException and we have plenty of stack, |
2937 | // we probably don't want to be so strict in not calling handlers. |
2938 | if (! IsStackOverflowException()) |
2939 | { |
2940 | // Save the current EHClause Index and Establisher of the clause post which |
2941 | // ThreadAbort was raised. This is done an exception handled inside a filter |
2942 | // reset the state that was setup before the filter was invoked. |
2943 | // |
2944 | // We dont have to do this for finally/fault clauses since they execute |
2945 | // in the second pass and by that time, we have already skipped the required |
2946 | // EH clauses in the applicable stackframe. |
2947 | DWORD dwPreFilterTACatchHandlerClauseIndex = dwTACatchHandlerClauseIndex; |
2948 | StackFrame sfPreFilterEstablisherOfActualHandlerFrame = sfEstablisherOfActualHandlerFrame; |
2949 | |
2950 | EX_TRY |
2951 | { |
2952 | // We want to call filters even if the thread is aborting, so suppress abort |
2953 | // checks while the filter runs. |
2954 | ThreadPreventAsyncHolder preventAbort(TRUE); |
2955 | |
2956 | // for filter clauses |
2957 | SetEnclosingClauseInfo(fIsFunclet, |
2958 | pcfThisFrame->GetRelOffset(), |
2959 | GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext)); |
2960 | #ifdef USE_FUNCLET_CALL_HELPER |
2961 | // On ARM & ARM64, the OS passes us the CallerSP for the frame for which personality routine has been invoked. |
2962 | // Since IL filters are invoked in the first pass, we pass this CallerSP to the filter funclet which will |
2963 | // then lookup the actual frame pointer value using it since we dont have a frame pointer to pass to it |
2964 | // directly. |
2965 | // |
2966 | // Assert our invariants (we had set them up in InitializeCrawlFrame): |
2967 | REGDISPLAY *pCurRegDisplay = pcfThisFrame->GetRegisterSet(); |
2968 | |
2969 | CONTEXT *pContext = NULL; |
2970 | #ifndef USE_CURRENT_CONTEXT_IN_FILTER |
2971 | // 1) In first pass, we dont have a valid current context IP |
2972 | _ASSERTE(GetIP(pCurRegDisplay->pCurrentContext) == 0); |
2973 | pContext = pCurRegDisplay->pCallerContext; |
2974 | #else |
2975 | pContext = pCurRegDisplay->pCurrentContext; |
2976 | #endif // !USE_CURRENT_CONTEXT_IN_FILTER |
2977 | #ifdef USE_CALLER_SP_IN_FUNCLET |
2978 | // 2) Our caller context and caller SP are valid |
2979 | _ASSERTE(pCurRegDisplay->IsCallerContextValid && pCurRegDisplay->IsCallerSPValid); |
2980 | // 3) CallerSP is intact |
2981 | _ASSERTE(GetSP(pCurRegDisplay->pCallerContext) == GetRegdisplaySP(pCurRegDisplay)); |
2982 | #endif // USE_CALLER_SP_IN_FUNCLET |
2983 | #endif // USE_FUNCLET_CALL_HELPER |
2984 | { |
2985 | // CallHandler expects to be in COOP mode. |
2986 | GCX_COOP(); |
2987 | dwResult = CallHandler(dwFilterStartPC, sf, &EHClause, pMD, Filter X86_ARG(pContext) ARM_ARG(pContext) ARM64_ARG(pContext)); |
2988 | } |
2989 | } |
2990 | EX_CATCH |
2991 | { |
2992 | // We had an exception in filter invocation that remained unhandled. |
2993 | |
2994 | // Sync managed exception state, for the managed thread, based upon the active exception tracker. |
2995 | pThread->SyncManagedExceptionState(false); |
2996 | |
2997 | // we've returned from the filter abruptly, now out of managed code |
2998 | m_EHClauseInfo.SetManagedCodeEntered(FALSE); |
2999 | |
3000 | EH_LOG((LL_INFO100, " filter threw an exception\n" )); |
3001 | |
3002 | // notify profiler |
3003 | EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave(); |
3004 | m_EHClauseInfo.ResetInfo(); |
3005 | |
3006 | // continue search |
3007 | } |
3008 | EX_END_CATCH(SwallowAllExceptions); |
3009 | |
3010 | // Reset the EH clause Index and Establisher of the TA reraise clause |
3011 | pThread->m_dwIndexClauseForCatch = dwPreFilterTACatchHandlerClauseIndex; |
3012 | pThread->m_sfEstablisherOfActualHandlerFrame = sfPreFilterEstablisherOfActualHandlerFrame; |
3013 | |
3014 | if (pThread->IsRudeAbortInitiated() && !pThread->IsWithinCer(pcfThisFrame)) |
3015 | { |
3016 | EH_LOG((LL_INFO100, " IGNOREFRAME: rude abort\n" )); |
3017 | goto lExit; |
3018 | } |
3019 | } |
3020 | else |
3021 | { |
3022 | EH_LOG((LL_INFO100, " STACKOVERFLOW: filter not called due to lack of guard page\n" )); |
3023 | // continue search |
3024 | } |
3025 | |
3026 | if (EXCEPTION_EXECUTE_HANDLER == dwResult) |
3027 | { |
3028 | fFoundHandler = TRUE; |
3029 | } |
3030 | else if (EXCEPTION_CONTINUE_SEARCH != dwResult) |
3031 | { |
3032 | // |
3033 | // Behavior is undefined according to the spec. Let's not execute the handler. |
3034 | // |
3035 | } |
3036 | EH_LOG((LL_INFO100, " filter returned %s\n" , (fFoundHandler ? "EXCEPTION_EXECUTE_HANDLER" : "EXCEPTION_CONTINUE_SEARCH" ))); |
3037 | } |
3038 | else if (IsTypedHandler(&EHClause)) |
3039 | { |
3040 | GCX_COOP(); |
3041 | |
3042 | TypeHandle thrownType = TypeHandle(); |
3043 | OBJECTREF oThrowable = m_pThread->GetThrowable(); |
3044 | if (oThrowable != NULL) |
3045 | { |
3046 | oThrowable = PossiblyUnwrapThrowable(oThrowable, pcfThisFrame->GetAssembly()); |
3047 | thrownType = oThrowable->GetTrueTypeHandle(); |
3048 | } |
3049 | |
3050 | if (!thrownType.IsNull()) |
3051 | { |
3052 | if (EHClause.ClassToken == mdTypeRefNil) |
3053 | { |
3054 | // this is a catch(...) |
3055 | fFoundHandler = TRUE; |
3056 | } |
3057 | else |
3058 | { |
3059 | TypeHandle typeHnd = pJitMan->ResolveEHClause(&EHClause, pcfThisFrame); |
3060 | |
3061 | EH_LOG((LL_INFO100, |
3062 | " clause type = %s\n" , |
3063 | (!typeHnd.IsNull() ? typeHnd.GetMethodTable()->GetDebugClassName() |
3064 | : "<couldn't resolve>" ))); |
3065 | EH_LOG((LL_INFO100, |
3066 | " thrown type = %s\n" , |
3067 | thrownType.GetMethodTable()->GetDebugClassName())); |
3068 | |
3069 | fFoundHandler = !typeHnd.IsNull() && ExceptionIsOfRightType(typeHnd, thrownType); |
3070 | } |
3071 | } |
3072 | } |
3073 | else |
3074 | { |
3075 | _ASSERTE(fTermHandler); |
3076 | fFoundHandler = TRUE; |
3077 | } |
3078 | |
3079 | if (fFoundHandler) |
3080 | { |
3081 | if (fIsFirstPass) |
3082 | { |
3083 | _ASSERTE(IsFilterHandler(&EHClause) || IsTypedHandler(&EHClause)); |
3084 | |
3085 | EH_LOG((LL_INFO100, " found catch at 0x%p, sp = 0x%p\n" , dwHandlerStartPC, sf.SP)); |
3086 | m_uCatchToCallPC = dwHandlerStartPC; |
3087 | m_pClauseForCatchToken = pEHClauseToken; |
3088 | m_ClauseForCatch = EHClause; |
3089 | |
3090 | m_sfResumeStackFrame = sf; |
3091 | |
3092 | #if defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED) |
3093 | // |
3094 | // notify the debugger and profiler |
3095 | // |
3096 | if (fGiveDebuggerAndProfilerNotification) |
3097 | { |
3098 | EEToProfilerExceptionInterfaceWrapper::ExceptionSearchCatcherFound(pMD); |
3099 | } |
3100 | |
3101 | if (fIsILStub) |
3102 | { |
3103 | // |
3104 | // NotifyOfCHFFilter has two behaviors |
3105 | // * Notifify debugger, get interception info and unwind (function will not return) |
3106 | // In this case, m_sfResumeStackFrame is expected to be NULL or the frame of interception. |
3107 | // We NULL it out because we get the interception event after this point. |
3108 | // * Notifify debugger and return. |
3109 | // In this case the normal EH proceeds and we need to reset m_sfResumeStackFrame to the sf catch handler. |
3110 | // TODO: remove this call and try to report the IL catch handler in the IL stub itself. |
3111 | m_sfResumeStackFrame.Clear(); |
3112 | EEToDebuggerExceptionInterfaceWrapper::NotifyOfCHFFilter((EXCEPTION_POINTERS*)&m_ptrs, pILStubFrame); |
3113 | m_sfResumeStackFrame = sf; |
3114 | } |
3115 | else |
3116 | { |
3117 | // We don't need to do anything special for continuable exceptions after calling |
3118 | // this callback. We are going to start unwinding anyway. |
3119 | EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedExceptionCatcherFound(pThread, pMD, (TADDR) uMethodStartPC, sf.SP, |
3120 | &EHClause); |
3121 | } |
3122 | |
3123 | // If the exception is intercepted, then the target unwind frame may not be the |
3124 | // stack frame we are currently processing, so clear it now. We'll set it |
3125 | // later in second pass. |
3126 | if (pThread->GetExceptionState()->GetFlags()->DebuggerInterceptInfo()) |
3127 | { |
3128 | m_sfResumeStackFrame.Clear(); |
3129 | } |
3130 | #endif //defined(DEBUGGING_SUPPORTED) || defined(PROFILING_SUPPORTED) |
3131 | |
3132 | // |
3133 | // BEGIN resume frame code |
3134 | // |
3135 | EH_LOG((LL_INFO100, " RESUMEFRAME: initial resume stack frame: %p\n" , sf.SP)); |
3136 | |
3137 | if (IsDuplicateClause(&EHClause)) |
3138 | { |
3139 | EH_LOG((LL_INFO100, " RESUMEFRAME: need to unwind to find real resume frame\n" )); |
3140 | m_ExceptionFlags.SetUnwindingToFindResumeFrame(); |
3141 | |
3142 | // This is a duplicate catch funclet. As a result, we will continue to let the |
3143 | // exception dispatch proceed upstack to find the actual frame where the |
3144 | // funclet lives. |
3145 | // |
3146 | // At the same time, we also need to save the CallerSP of the frame containing |
3147 | // the catch funclet (like we do for other funclets). If the current frame |
3148 | // represents a funclet that was invoked by JITted code, then we will save |
3149 | // the caller SP of the current frame when we see it during the 2nd pass - |
3150 | // refer to the use of "pLimitClauseToken" in the code above. |
3151 | // |
3152 | // However, that is not the callerSP of the frame containing the catch funclet |
3153 | // as the actual frame containing the funclet (and where it will be executed) |
3154 | // is the one that will be the target of unwind during the first pass. |
3155 | // |
3156 | // To correctly get that, we will determine if the current frame is a funclet |
3157 | // and if it was invoked from JITted code. If this is true, then current frame |
3158 | // represents a finally funclet invoked non-exceptionally (from its parent frame |
3159 | // or yet another funclet). In such a case, we will set a flag indicating that |
3160 | // we need to reset the enclosing clause SP for the catch funclet and later, |
3161 | // when 2nd pass reaches the actual frame containing the catch funclet to be |
3162 | // executed, we will update the enclosing clause SP if the |
3163 | // "m_fResetEnclosingClauseSPForCatchFunclet" flag is set, just prior to |
3164 | // invoking the catch funclet. |
3165 | if (fIsFunclet) |
3166 | { |
3167 | REGDISPLAY* pCurRegDisplay = pcfThisFrame->GetRegisterSet(); |
3168 | _ASSERTE(pCurRegDisplay->IsCallerContextValid); |
3169 | TADDR adrReturnAddressFromFunclet = PCODEToPINSTR(GetIP(pCurRegDisplay->pCallerContext)) - STACKWALK_CONTROLPC_ADJUST_OFFSET; |
3170 | m_fResetEnclosingClauseSPForCatchFunclet = ExecutionManager::IsManagedCode(adrReturnAddressFromFunclet); |
3171 | } |
3172 | |
3173 | ReturnStatus = UnwindPending; |
3174 | break; |
3175 | } |
3176 | |
3177 | EH_LOG((LL_INFO100, " RESUMEFRAME: no extra unwinding required, real resume frame: %p\n" , sf.SP)); |
3178 | |
3179 | // Save off the index and the EstablisherFrame of the EH clause of the non-duplicate handler |
3180 | // that decided to handle the exception. We may need it |
3181 | // if a ThreadAbort is raised after the catch block |
3182 | // executes. |
3183 | m_dwIndexClauseForCatch = i + 1; |
3184 | m_sfEstablisherOfActualHandlerFrame = sfEstablisherFrame; |
3185 | |
3186 | #ifndef ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
3187 | m_sfCallerOfActualHandlerFrame = EECodeManager::GetCallerSp(pcfThisFrame->pRD); |
3188 | #else // !ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
3189 | m_sfCallerOfActualHandlerFrame = sfEstablisherFrame.SP; |
3190 | #endif // ESTABLISHER_FRAME_ADDRESS_IS_CALLER_SP |
3191 | // |
3192 | // END resume frame code |
3193 | // |
3194 | |
3195 | ReturnStatus = FirstPassComplete; |
3196 | break; |
3197 | } |
3198 | else |
3199 | { |
3200 | EH_LOG((LL_INFO100, " found finally/fault at 0x%p\n" , dwHandlerStartPC)); |
3201 | _ASSERTE(fTermHandler); |
3202 | |
3203 | // @todo : If user code throws a StackOveflowException and we have plenty of stack, |
3204 | // we probably don't want to be so strict in not calling handlers. |
3205 | if (!IsStackOverflowException()) |
3206 | { |
3207 | DWORD_PTR dwStatus; |
3208 | |
3209 | // for finally clauses |
3210 | SetEnclosingClauseInfo(fIsFunclet, |
3211 | pcfThisFrame->GetRelOffset(), |
3212 | GetSP(pcfThisFrame->GetRegisterSet()->pCallerContext)); |
3213 | |
3214 | // We have switched to indefinite COOP mode just before this loop started. |
3215 | // Since we also forbid GC during second pass, disable it now since |
3216 | // invocation of managed code can result in a GC. |
3217 | ENDFORBIDGC(); |
3218 | dwStatus = CallHandler(dwHandlerStartPC, sf, &EHClause, pMD, FaultFinally X86_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext) ARM64_ARG(pcfThisFrame->GetRegisterSet()->pCurrentContext)); |
3219 | |
3220 | // Once we return from a funclet, forbid GC again (refer to comment before start of the loop for details) |
3221 | BEGINFORBIDGC(); |
3222 | } |
3223 | else |
3224 | { |
3225 | EH_LOG((LL_INFO100, " STACKOVERFLOW: finally not called due to lack of guard page\n" )); |
3226 | // continue search |
3227 | } |
3228 | |
3229 | // |
3230 | // will continue to find next fault/finally in this call frame |
3231 | // |
3232 | } |
3233 | } // if fFoundHandler |
3234 | } // if clause covers PC |
3235 | } // foreach eh clause |
3236 | } // if stack frame is far enough away from guard page |
3237 | |
3238 | // |
3239 | // notify the profiler |
3240 | // |
3241 | if (fGiveDebuggerAndProfilerNotification) |
3242 | { |
3243 | if (fIsFirstPass) |
3244 | { |
3245 | if (!fUnwindingToFindResumeFrame) |
3246 | { |
3247 | EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFunctionLeave(pMD); |
3248 | } |
3249 | } |
3250 | else |
3251 | { |
3252 | if (!fUnwindFinished) |
3253 | { |
3254 | EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFunctionLeave(pMD); |
3255 | } |
3256 | } |
3257 | } |
3258 | } // fIgnoreThisFrame |
3259 | |
3260 | lExit: |
3261 | return ReturnStatus; |
3262 | } |
3263 | |
3264 | // <64bit_And_Arm_Specific> |
3265 | |
3266 | // For funclets, add support for unwinding frame chain during SO. These definitions will be automatically picked up by |
3267 | // BEGIN_SO_TOLERANT_CODE/END_SO_TOLERANT_CODE usage in ExceptionTracker::CallHandler below. |
3268 | // |
3269 | // This is required since funclet invocation is the only case of calling managed code from VM that is not wrapped by |
3270 | // assembly helper with associated personality routine. The personality routine will invoke CleanupForSecondPass to |
3271 | // release exception trackers and unwind frame chain. |
3272 | // |
3273 | // We need to do the same work as CleanupForSecondPass for funclet invocation in the face of SO. Thus, we redefine OPTIONAL_SO_CLEANUP_UNWIND |
3274 | // below. This will perform frame chain unwind inside the "__finally" block that is part of the END_SO_TOLERANT_CODE macro only in the face |
3275 | // of an SO. |
3276 | // |
3277 | // The second part of work, releasing exception trackers, is done inside the "__except" block also part of the END_SO_TOLERANT_CODE by invoking |
3278 | // ClearExceptionStateAfterSO. |
3279 | // |
3280 | // </64bit_And_Arm_Specific> |
3281 | |
3282 | #undef OPTIONAL_SO_CLEANUP_UNWIND |
3283 | |
3284 | #define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) if (pThread->GetFrame() < pFrame) { UnwindFrameChain(pThread, pFrame); } |
3285 | |
3286 | typedef DWORD_PTR (HandlerFn)(UINT_PTR uStackFrame, Object* pExceptionObj); |
3287 | |
3288 | #ifdef USE_FUNCLET_CALL_HELPER |
3289 | // This is an assembly helper that enables us to call into EH funclets. |
3290 | EXTERN_C DWORD_PTR STDCALL CallEHFunclet(Object *pThrowable, UINT_PTR pFuncletToInvoke, UINT_PTR *pFirstNonVolReg, UINT_PTR *pFuncletCallerSP); |
3291 | |
3292 | // This is an assembly helper that enables us to call into EH filter funclets. |
3293 | EXTERN_C DWORD_PTR STDCALL CallEHFilterFunclet(Object *pThrowable, TADDR CallerSP, UINT_PTR pFuncletToInvoke, UINT_PTR *pFuncletCallerSP); |
3294 | |
3295 | static inline UINT_PTR CastHandlerFn(HandlerFn *pfnHandler) |
3296 | { |
3297 | #ifdef _TARGET_ARM_ |
3298 | return DataPointerToThumbCode<UINT_PTR, HandlerFn *>(pfnHandler); |
3299 | #else |
3300 | return (UINT_PTR)pfnHandler; |
3301 | #endif |
3302 | } |
3303 | |
3304 | static inline UINT_PTR *GetFirstNonVolatileRegisterAddress(PCONTEXT pContextRecord) |
3305 | { |
3306 | #if defined(_TARGET_ARM_) |
3307 | return (UINT_PTR*)&(pContextRecord->R4); |
3308 | #elif defined(_TARGET_ARM64_) |
3309 | return (UINT_PTR*)&(pContextRecord->X19); |
3310 | #elif defined(_TARGET_X86_) |
3311 | return (UINT_PTR*)&(pContextRecord->Edi); |
3312 | #else |
3313 | PORTABILITY_ASSERT("GetFirstNonVolatileRegisterAddress" ); |
3314 | return NULL; |
3315 | #endif |
3316 | } |
3317 | |
3318 | static inline TADDR GetFrameRestoreBase(PCONTEXT pContextRecord) |
3319 | { |
3320 | #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) |
3321 | return GetSP(pContextRecord); |
3322 | #elif defined(_TARGET_X86_) |
3323 | return pContextRecord->Ebp; |
3324 | #else |
3325 | PORTABILITY_ASSERT("GetFrameRestoreBase" ); |
3326 | return NULL; |
3327 | #endif |
3328 | } |
3329 | |
3330 | #endif // USE_FUNCLET_CALL_HELPER |
3331 | |
3332 | DWORD_PTR ExceptionTracker::CallHandler( |
3333 | UINT_PTR uHandlerStartPC, |
3334 | StackFrame sf, |
3335 | EE_ILEXCEPTION_CLAUSE* pEHClause, |
3336 | MethodDesc* pMD, |
3337 | EHFuncletType funcletType |
3338 | X86_ARG(PCONTEXT pContextRecord) |
3339 | ARM_ARG(PCONTEXT pContextRecord) |
3340 | ARM64_ARG(PCONTEXT pContextRecord) |
3341 | ) |
3342 | { |
3343 | STATIC_CONTRACT_THROWS; |
3344 | STATIC_CONTRACT_GC_TRIGGERS; |
3345 | STATIC_CONTRACT_MODE_COOPERATIVE; |
3346 | |
3347 | DWORD_PTR dwResumePC; |
3348 | OBJECTREF throwable; |
3349 | HandlerFn* pfnHandler = (HandlerFn*)uHandlerStartPC; |
3350 | |
3351 | EH_LOG((LL_INFO100, " calling handler at 0x%p, sp = 0x%p\n" , uHandlerStartPC, sf.SP)); |
3352 | |
3353 | Thread* pThread = GetThread(); |
3354 | |
3355 | // The first parameter specifies whether we want to make callbacks before (true) or after (false) |
3356 | // calling the handler. |
3357 | MakeCallbacksRelatedToHandler(true, pThread, pMD, pEHClause, uHandlerStartPC, sf); |
3358 | |
3359 | _ASSERTE(pThread->DetermineIfGuardPagePresent()); |
3360 | |
3361 | throwable = PossiblyUnwrapThrowable(pThread->GetThrowable(), pMD->GetAssembly()); |
3362 | |
3363 | // We probe for stack space before attempting to call a filter, finally, or catch clause. The path from |
3364 | // here to the actual managed code is very short. We must probe, however, because the JIT does not generate a |
3365 | // probe for us upon entry to the handler. This probe ensures we have enough stack space to actually make it |
3366 | // into the managed code. |
3367 | // |
3368 | // Incase a SO happens, this macro will also unwind the frame chain before continuing to dispatch the SO |
3369 | // upstack (look at the macro implementation for details). |
3370 | BEGIN_SO_TOLERANT_CODE(pThread); |
3371 | |
3372 | // Stores the current SP and BSP, which will be the caller SP and BSP for the funclet. |
3373 | // Note that we are making the assumption here that the SP and BSP don't change from this point |
3374 | // forward until we actually make the call to the funclet. If it's not the case then we will need |
3375 | // some sort of assembly wrappers to help us out. |
3376 | CallerStackFrame csfFunclet = CallerStackFrame((UINT_PTR)GetCurrentSP()); |
3377 | this->m_EHClauseInfo.SetManagedCodeEntered(TRUE); |
3378 | this->m_EHClauseInfo.SetCallerStackFrame(csfFunclet); |
3379 | |
3380 | switch(funcletType) |
3381 | { |
3382 | case EHFuncletType::Filter: |
3383 | ETW::ExceptionLog::ExceptionFilterBegin(pMD, (PVOID)uHandlerStartPC); |
3384 | break; |
3385 | case EHFuncletType::FaultFinally: |
3386 | ETW::ExceptionLog::ExceptionFinallyBegin(pMD, (PVOID)uHandlerStartPC); |
3387 | break; |
3388 | case EHFuncletType::Catch: |
3389 | ETW::ExceptionLog::ExceptionCatchBegin(pMD, (PVOID)uHandlerStartPC); |
3390 | break; |
3391 | } |
3392 | |
3393 | #ifdef USE_FUNCLET_CALL_HELPER |
3394 | // Invoke the funclet. We pass throwable only when invoking the catch block. |
3395 | // Since the actual caller of the funclet is the assembly helper, pass the reference |
3396 | // to the CallerStackFrame instance so that it can be updated. |
3397 | CallerStackFrame* pCallerStackFrame = this->m_EHClauseInfo.GetCallerStackFrameForEHClauseReference(); |
3398 | UINT_PTR *pFuncletCallerSP = &(pCallerStackFrame->SP); |
3399 | if (funcletType != EHFuncletType::Filter) |
3400 | { |
3401 | dwResumePC = CallEHFunclet((funcletType == EHFuncletType::Catch)?OBJECTREFToObject(throwable):(Object *)NULL, |
3402 | CastHandlerFn(pfnHandler), |
3403 | GetFirstNonVolatileRegisterAddress(pContextRecord), |
3404 | pFuncletCallerSP); |
3405 | } |
3406 | else |
3407 | { |
3408 | // For invoking IL filter funclet, we pass the CallerSP to the funclet using which |
3409 | // it will retrieve the framepointer for accessing the locals in the parent |
3410 | // method. |
3411 | dwResumePC = CallEHFilterFunclet(OBJECTREFToObject(throwable), |
3412 | GetFrameRestoreBase(pContextRecord), |
3413 | CastHandlerFn(pfnHandler), |
3414 | pFuncletCallerSP); |
3415 | } |
3416 | #else // USE_FUNCLET_CALL_HELPER |
3417 | // |
3418 | // Invoke the funclet. |
3419 | // |
3420 | dwResumePC = pfnHandler(sf.SP, OBJECTREFToObject(throwable)); |
3421 | #endif // !USE_FUNCLET_CALL_HELPER |
3422 | |
3423 | switch(funcletType) |
3424 | { |
3425 | case EHFuncletType::Filter: |
3426 | ETW::ExceptionLog::ExceptionFilterEnd(); |
3427 | break; |
3428 | case EHFuncletType::FaultFinally: |
3429 | ETW::ExceptionLog::ExceptionFinallyEnd(); |
3430 | break; |
3431 | case EHFuncletType::Catch: |
3432 | ETW::ExceptionLog::ExceptionCatchEnd(); |
3433 | ETW::ExceptionLog::ExceptionThrownEnd(); |
3434 | break; |
3435 | } |
3436 | |
3437 | this->m_EHClauseInfo.SetManagedCodeEntered(FALSE); |
3438 | |
3439 | END_SO_TOLERANT_CODE; |
3440 | |
3441 | // The first parameter specifies whether we want to make callbacks before (true) or after (false) |
3442 | // calling the handler. |
3443 | MakeCallbacksRelatedToHandler(false, pThread, pMD, pEHClause, uHandlerStartPC, sf); |
3444 | |
3445 | return dwResumePC; |
3446 | } |
3447 | |
3448 | #undef OPTIONAL_SO_CLEANUP_UNWIND |
3449 | #define OPTIONAL_SO_CLEANUP_UNWIND(pThread, pFrame) |
3450 | |
3451 | |
3452 | // |
3453 | // this must be done after the second pass has run, it does not |
3454 | // reference anything on the stack, so it is safe to run in an |
3455 | // SEH __except clause as well as a C++ catch clause. |
3456 | // |
3457 | // static |
3458 | void ExceptionTracker::PopTrackers( |
3459 | void* pStackFrameSP |
3460 | ) |
3461 | { |
3462 | CONTRACTL |
3463 | { |
3464 | MODE_ANY; |
3465 | GC_NOTRIGGER; |
3466 | NOTHROW; |
3467 | } |
3468 | CONTRACTL_END; |
3469 | |
3470 | StackFrame sf((UINT_PTR)pStackFrameSP); |
3471 | |
3472 | // Only call into PopTrackers if we have a managed thread and we have an exception progress. |
3473 | // Otherwise, the call below (to PopTrackers) is a noop. If this ever changes, then this short-circuit needs to be fixed. |
3474 | Thread *pCurThread = GetThread(); |
3475 | if ((pCurThread != NULL) && (pCurThread->GetExceptionState()->IsExceptionInProgress())) |
3476 | { |
3477 | // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException |
3478 | // for details on the usage of this COOP switch. |
3479 | GCX_COOP(); |
3480 | |
3481 | PopTrackers(sf, false); |
3482 | } |
3483 | } |
3484 | |
3485 | // |
3486 | // during the second pass, an exception might escape out to |
3487 | // unmanaged code where it is swallowed (or potentially rethrown). |
3488 | // The current tracker is abandoned in this case, and if a rethrow |
3489 | // does happen in unmanaged code, this is unfortunately treated as |
3490 | // a brand new exception. This is unavoidable because if two |
3491 | // exceptions escape out to unmanaged code in this manner, a subsequent |
3492 | // rethrow cannot be disambiguated as corresponding to the nested vs. |
3493 | // the original exception. |
3494 | void ExceptionTracker::PopTrackerIfEscaping( |
3495 | void* pStackPointer |
3496 | ) |
3497 | { |
3498 | CONTRACTL |
3499 | { |
3500 | MODE_ANY; |
3501 | GC_NOTRIGGER; |
3502 | NOTHROW; |
3503 | } |
3504 | CONTRACTL_END; |
3505 | |
3506 | Thread* pThread = GetThread(); |
3507 | ThreadExceptionState* pExState = pThread->GetExceptionState(); |
3508 | ExceptionTracker* pTracker = pExState->m_pCurrentTracker; |
3509 | CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid()); |
3510 | |
3511 | // If we are resuming in managed code (albeit further up the stack) we will still need this |
3512 | // tracker. Otherwise we are either propagating into unmanaged code -- with the rethrow |
3513 | // issues mentioned above -- or we are going unhandled. |
3514 | // |
3515 | // Note that we don't distinguish unmanaged code in the EE vs. unmanaged code outside the |
3516 | // EE. We could use the types of the Frames above us to make this distinction. Without |
3517 | // this, the technique of EX_TRY/EX_CATCH/EX_RETHROW inside the EE will lose its tracker |
3518 | // and have to rely on LastThrownObject in the rethrow. Along the same lines, unhandled |
3519 | // exceptions only have access to LastThrownObject. |
3520 | // |
3521 | // There may not be a current tracker if, for instance, UMThunk has dispatched into managed |
3522 | // code via CallDescr. In that case, CallDescr may pop the tracker, leaving UMThunk with |
3523 | // nothing to do. |
3524 | |
3525 | if (pTracker && pTracker->m_sfResumeStackFrame.IsNull()) |
3526 | { |
3527 | StackFrame sf((UINT_PTR)pStackPointer); |
3528 | StackFrame sfTopMostStackFrameFromFirstPass = pTracker->GetTopmostStackFrameFromFirstPass(); |
3529 | |
3530 | // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException |
3531 | // for details on the usage of this COOP switch. |
3532 | GCX_COOP(); |
3533 | ExceptionTracker::PopTrackers(sf, true); |
3534 | } |
3535 | } |
3536 | |
3537 | // |
3538 | // static |
3539 | void ExceptionTracker::PopTrackers( |
3540 | StackFrame sfResumeFrame, |
3541 | bool fPopWhenEqual |
3542 | ) |
3543 | { |
3544 | CONTRACTL |
3545 | { |
3546 | // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException |
3547 | // for details on the mode being COOP here. |
3548 | MODE_COOPERATIVE; |
3549 | GC_NOTRIGGER; |
3550 | NOTHROW; |
3551 | } |
3552 | CONTRACTL_END; |
3553 | |
3554 | Thread* pThread = GetThread(); |
3555 | ExceptionTracker* pTracker = (pThread ? pThread->GetExceptionState()->m_pCurrentTracker : NULL); |
3556 | |
3557 | // NOTE: |
3558 | // |
3559 | // This method is a no-op when there is no managed Thread object. We detect such a case and short circuit out in ExceptionTrackers::PopTrackers. |
3560 | // If this ever changes, then please revisit that method and fix it up appropriately. |
3561 | |
3562 | // If this tracker does not have valid stack ranges and it is in the first pass, |
3563 | // then we came here likely when the tracker was being setup |
3564 | // and an exception took place. |
3565 | // |
3566 | // In such a case, we will not pop off the tracker |
3567 | if (pTracker && pTracker->m_ScannedStackRange.IsEmpty() && pTracker->IsInFirstPass()) |
3568 | { |
3569 | // skip any others with empty ranges... |
3570 | do |
3571 | { |
3572 | pTracker = pTracker->m_pPrevNestedInfo; |
3573 | } |
3574 | while (pTracker && pTracker->m_ScannedStackRange.IsEmpty()); |
3575 | |
3576 | // pTracker is now the first non-empty one, make sure it doesn't need popping |
3577 | // if it does, then someone let an exception propagate out of the exception dispatch code |
3578 | |
3579 | _ASSERTE(!pTracker || (pTracker->m_ScannedStackRange.GetUpperBound() > sfResumeFrame)); |
3580 | return; |
3581 | } |
3582 | |
3583 | #if defined(DEBUGGING_SUPPORTED) |
3584 | DWORD_PTR dwInterceptStackFrame = 0; |
3585 | |
3586 | // This method may be called on an unmanaged thread, in which case no interception can be done. |
3587 | if (pTracker) |
3588 | { |
3589 | ThreadExceptionState* pExState = pThread->GetExceptionState(); |
3590 | |
3591 | // If the exception is intercepted, then pop trackers according to the stack frame at which |
3592 | // the exception is intercepted. We must retrieve the frame pointer before we start popping trackers. |
3593 | if (pExState->GetFlags()->DebuggerInterceptInfo()) |
3594 | { |
3595 | pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, (PBYTE*)&dwInterceptStackFrame, |
3596 | NULL, NULL); |
3597 | } |
3598 | } |
3599 | #endif // DEBUGGING_SUPPORTED |
3600 | |
3601 | while (pTracker) |
3602 | { |
3603 | #ifndef FEATURE_PAL |
3604 | // When we are about to pop off a tracker, it should |
3605 | // have a stack range setup. |
3606 | // It is not true on PAL where the scanned stack range needs to |
3607 | // be reset after unwinding a sequence of native frames. |
3608 | _ASSERTE(!pTracker->m_ScannedStackRange.IsEmpty()); |
3609 | #endif // FEATURE_PAL |
3610 | |
3611 | ExceptionTracker* pPrev = pTracker->m_pPrevNestedInfo; |
3612 | |
3613 | // <TODO> |
3614 | // with new tracker collapsing code, we will only ever pop one of these at a time |
3615 | // at the end of the 2nd pass. However, CLRException::HandlerState::SetupCatch |
3616 | // still uses this function and we still need to revisit how it interacts with |
3617 | // ExceptionTrackers |
3618 | // </TODO> |
3619 | |
3620 | if ((fPopWhenEqual && (pTracker->m_ScannedStackRange.GetUpperBound() == sfResumeFrame)) || |
3621 | (pTracker->m_ScannedStackRange.GetUpperBound() < sfResumeFrame)) |
3622 | { |
3623 | #if defined(DEBUGGING_SUPPORTED) |
3624 | if (g_pDebugInterface != NULL) |
3625 | { |
3626 | if (pTracker->m_ScannedStackRange.GetUpperBound().SP < dwInterceptStackFrame) |
3627 | { |
3628 | g_pDebugInterface->DeleteInterceptContext(pTracker->m_DebuggerExState.GetDebuggerInterceptContext()); |
3629 | } |
3630 | else |
3631 | { |
3632 | _ASSERTE(dwInterceptStackFrame == 0 || |
3633 | ( dwInterceptStackFrame == sfResumeFrame.SP && |
3634 | dwInterceptStackFrame == pTracker->m_ScannedStackRange.GetUpperBound().SP )); |
3635 | } |
3636 | } |
3637 | #endif // DEBUGGING_SUPPORTED |
3638 | |
3639 | ExceptionTracker* pTrackerToFree = pTracker; |
3640 | EH_LOG((LL_INFO100, "Unlinking ExceptionTracker object 0x%p, thread = 0x%p\n" , pTrackerToFree, pTrackerToFree->m_pThread)); |
3641 | CONSISTENCY_CHECK(pTracker->IsValid()); |
3642 | pTracker = pPrev; |
3643 | |
3644 | // free managed tracker resources causing notification -- do this before unlinking the tracker |
3645 | // this is necessary so that we know an exception is still in flight while we give the notification |
3646 | FreeTrackerMemory(pTrackerToFree, memManaged); |
3647 | |
3648 | // unlink the tracker from the thread |
3649 | pThread->GetExceptionState()->m_pCurrentTracker = pTracker; |
3650 | CONSISTENCY_CHECK((NULL == pTracker) || pTracker->IsValid()); |
3651 | |
3652 | // free unmanaged tracker resources |
3653 | FreeTrackerMemory(pTrackerToFree, memUnmanaged); |
3654 | } |
3655 | else |
3656 | { |
3657 | break; |
3658 | } |
3659 | } |
3660 | } |
3661 | |
3662 | // |
3663 | // static |
3664 | ExceptionTracker* ExceptionTracker::GetOrCreateTracker( |
3665 | UINT_PTR ControlPc, |
3666 | StackFrame sf, |
3667 | EXCEPTION_RECORD* pExceptionRecord, |
3668 | CONTEXT* pContextRecord, |
3669 | BOOL bAsynchronousThreadStop, |
3670 | bool fIsFirstPass, |
3671 | StackTraceState* pStackTraceState |
3672 | ) |
3673 | { |
3674 | CONTRACT(ExceptionTracker*) |
3675 | { |
3676 | MODE_ANY; |
3677 | GC_TRIGGERS; |
3678 | NOTHROW; |
3679 | PRECONDITION(CheckPointer(pStackTraceState)); |
3680 | POSTCONDITION(CheckPointer(RETVAL)); |
3681 | } |
3682 | CONTRACT_END; |
3683 | |
3684 | Thread* pThread = GetThread(); |
3685 | ThreadExceptionState* pExState = pThread->GetExceptionState(); |
3686 | ExceptionTracker* pTracker = pExState->m_pCurrentTracker; |
3687 | CONSISTENCY_CHECK((NULL == pTracker) || (pTracker->IsValid())); |
3688 | |
3689 | bool fCreateNewTracker = false; |
3690 | bool fIsRethrow = false; |
3691 | bool fTransitionFromSecondToFirstPass = false; |
3692 | |
3693 | // Initialize the out parameter. |
3694 | *pStackTraceState = STS_Append; |
3695 | |
3696 | if (NULL != pTracker) |
3697 | { |
3698 | fTransitionFromSecondToFirstPass = fIsFirstPass && !pTracker->IsInFirstPass(); |
3699 | |
3700 | #ifndef FEATURE_PAL |
3701 | // We don't check this on PAL where the scanned stack range needs to |
3702 | // be reset after unwinding a sequence of native frames. |
3703 | CONSISTENCY_CHECK(!pTracker->m_ScannedStackRange.IsEmpty()); |
3704 | #endif // FEATURE_PAL |
3705 | |
3706 | if (pTracker->m_ExceptionFlags.IsRethrown()) |
3707 | { |
3708 | EH_LOG((LL_INFO100, ">>continued processing of RETHROWN exception\n" )); |
3709 | // this is the first time we've seen a rethrown exception, reuse the tracker and reset some state |
3710 | |
3711 | fCreateNewTracker = true; |
3712 | fIsRethrow = true; |
3713 | } |
3714 | else |
3715 | if ((pTracker->m_ptrs.ExceptionRecord != pExceptionRecord) && fIsFirstPass) |
3716 | { |
3717 | EH_LOG((LL_INFO100, ">>NEW exception (exception records do not match)\n" )); |
3718 | fCreateNewTracker = true; |
3719 | } |
3720 | else |
3721 | if (sf >= pTracker->m_ScannedStackRange.GetUpperBound()) |
3722 | { |
3723 | // We can't have a transition from 1st pass to 2nd pass in this case. |
3724 | _ASSERTE( ( sf == pTracker->m_ScannedStackRange.GetUpperBound() ) || |
3725 | ( fIsFirstPass || !pTracker->IsInFirstPass() ) ); |
3726 | |
3727 | if (fTransitionFromSecondToFirstPass) |
3728 | { |
3729 | // We just transition from 2nd pass to 1st pass without knowing it. |
3730 | // This means that some unmanaged frame outside of the EE catches the previous exception, |
3731 | // so we should trash the current tracker and create a new one. |
3732 | EH_LOG((LL_INFO100, ">>NEW exception (the previous second pass finishes at some unmanaged frame outside of the EE)\n" )); |
3733 | { |
3734 | GCX_COOP(); |
3735 | ExceptionTracker::PopTrackers(sf, false); |
3736 | } |
3737 | |
3738 | fCreateNewTracker = true; |
3739 | } |
3740 | else |
3741 | { |
3742 | EH_LOG((LL_INFO100, ">>continued processing of PREVIOUS exception\n" )); |
3743 | // previously seen exception, reuse the tracker |
3744 | |
3745 | *pStackTraceState = STS_Append; |
3746 | } |
3747 | } |
3748 | else |
3749 | if (pTracker->m_ScannedStackRange.Contains(sf)) |
3750 | { |
3751 | EH_LOG((LL_INFO100, ">>continued processing of PREVIOUS exception (revisiting previously processed frames)\n" )); |
3752 | } |
3753 | else |
3754 | { |
3755 | // nested exception |
3756 | EH_LOG((LL_INFO100, ">>new NESTED exception\n" )); |
3757 | fCreateNewTracker = true; |
3758 | } |
3759 | } |
3760 | else |
3761 | { |
3762 | EH_LOG((LL_INFO100, ">>NEW exception\n" )); |
3763 | fCreateNewTracker = true; |
3764 | } |
3765 | |
3766 | if (fCreateNewTracker) |
3767 | { |
3768 | #ifdef _DEBUG |
3769 | if (STATUS_STACK_OVERFLOW == pExceptionRecord->ExceptionCode) |
3770 | { |
3771 | CONSISTENCY_CHECK(pExceptionRecord->NumberParameters >= 2); |
3772 | UINT_PTR uFaultAddress = pExceptionRecord->ExceptionInformation[1]; |
3773 | UINT_PTR uStackLimit = (UINT_PTR)pThread->GetCachedStackLimit(); |
3774 | |
3775 | EH_LOG((LL_INFO100, "STATUS_STACK_OVERFLOW accessing address %p %s\n" , |
3776 | uFaultAddress)); |
3777 | |
3778 | UINT_PTR uDispatchStackAvailable; |
3779 | |
3780 | uDispatchStackAvailable = uFaultAddress - uStackLimit - HARD_GUARD_REGION_SIZE; |
3781 | |
3782 | EH_LOG((LL_INFO100, "%x bytes available for SO processing\n" , uDispatchStackAvailable)); |
3783 | } |
3784 | else if ((IsComPlusException(pExceptionRecord)) && |
3785 | (pThread->GetThrowableAsHandle() == g_pPreallocatedStackOverflowException)) |
3786 | { |
3787 | EH_LOG((LL_INFO100, "STACKOVERFLOW: StackOverflowException manually thrown\n" )); |
3788 | } |
3789 | #endif // _DEBUG |
3790 | |
3791 | ExceptionTracker* pNewTracker; |
3792 | |
3793 | pNewTracker = GetTrackerMemory(); |
3794 | if (!pNewTracker) |
3795 | { |
3796 | if (NULL != pExState->m_OOMTracker.m_pThread) |
3797 | { |
3798 | // Fatal error: we spun and could not allocate another tracker |
3799 | // and our existing emergency tracker is in use. |
3800 | EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
3801 | } |
3802 | |
3803 | pNewTracker = &pExState->m_OOMTracker; |
3804 | } |
3805 | |
3806 | new (pNewTracker) ExceptionTracker(ControlPc, |
3807 | pExceptionRecord, |
3808 | pContextRecord); |
3809 | |
3810 | CONSISTENCY_CHECK(pNewTracker->IsValid()); |
3811 | CONSISTENCY_CHECK(pThread == pNewTracker->m_pThread); |
3812 | |
3813 | EH_LOG((LL_INFO100, "___________________________________________\n" )); |
3814 | EH_LOG((LL_INFO100, "creating new tracker object 0x%p, thread = 0x%p\n" , pNewTracker, pThread)); |
3815 | |
3816 | GCX_COOP(); |
3817 | |
3818 | // We always create a throwable in the first pass when we first see an exception. |
3819 | // |
3820 | // On 64bit, every time the exception passes beyond a boundary (e.g. RPInvoke call, or CallDescrWorker call), |
3821 | // the exception trackers that were created below (stack growing down) that boundary are released, during the 2nd pass, |
3822 | // if the exception was not caught in managed code. This is because the catcher is in native code and managed exception |
3823 | // data structures are for use of VM only when the exception is caught in managed code. Also, passing by such |
3824 | // boundaries is our only opportunity to release such internal structures and not leak the memory. |
3825 | // |
3826 | // However, in certain case, release of exception trackers at each boundary can prove to be a bit aggressive. |
3827 | // Take the example below where "VM" prefix refers to a VM frame and "M" prefix refers to a managed frame on the stack. |
3828 | // |
3829 | // VM1 -> M1 - VM2 - (via RPinvoke) -> M2 |
3830 | // |
3831 | // Let M2 throw E2 that remains unhandled in managed code (i.e. M1 also does not catch it) but is caught in VM1. |
3832 | // Note that the acting of throwing an exception also sets it as the LastThrownObject (LTO) against the thread. |
3833 | // |
3834 | // Since this is native code (as mentioned in the comments above, there is no distinction made between VM native |
3835 | // code and external native code) that caught the exception, when the unwind goes past the "Reverse Pinvoke" boundary, |
3836 | // its personality routine will release the tracker for E2. Thus, only the LTO (which is off the Thread object and not |
3837 | // the exception tracker) is indicative of type of the last exception thrown. |
3838 | // |
3839 | // As the unwind goes up the stack, we come across M1 and, since the original tracker was released, we create a new |
3840 | // tracker in the 2nd pass that does not contain details like the active exception object. A managed finally executes in M1 |
3841 | // that throws and catches E1 inside the finally block. Thus, LTO is updated to indicate E1 as the last exception thrown. |
3842 | // When the exception is caught in VM1 and VM attempts to get LTO, it gets E1, which is incorrect as it was handled within the finally. |
3843 | // Semantically, it should have got E2 as the LTO. |
3844 | // |
3845 | // To address, this we will *also* create a throwable during second pass for most exceptions |
3846 | // since most of them have had the corresponding first pass. If we are processing |
3847 | // an exception's second pass, we would have processed its first pass as well and thus, already |
3848 | // created a throwable that would be setup as the LastThrownObject (LTO) against the Thread. |
3849 | // |
3850 | // The only exception to this rule is the longjump - this exception only has second pass |
3851 | // Thus, if we are in second pass and exception in question is longjump, then do not create a throwable. |
3852 | // |
3853 | // In the case of the scenario above, when we attempt to create a new exception tracker, during the unwind, |
3854 | // for M1, we will also setup E2 as the throwable in the tracker. As a result, when the finally in M1 throws |
3855 | // and catches the exception, the LTO is correctly updated against the thread (see SafeUpdateLastThrownObject) |
3856 | // and thus, when VM requests for the LTO, it gets E2 as expected. |
3857 | bool fCreateThrowableForCurrentPass = true; |
3858 | if (pExceptionRecord->ExceptionCode == STATUS_LONGJUMP) |
3859 | { |
3860 | // Long jump is only in second pass of exception dispatch |
3861 | _ASSERTE(!fIsFirstPass); |
3862 | fCreateThrowableForCurrentPass = false; |
3863 | } |
3864 | |
3865 | // When dealing with SQL Hosting like scenario, a real SO |
3866 | // may be caught in native code. As a result, CRT will perform |
3867 | // STATUS_UNWIND_CONSOLIDATE that will result in replacing |
3868 | // the exception record in ProcessCLRException. This replaced |
3869 | // exception record will point to the exception record for original |
3870 | // SO for which we will not have created a throwable in the first pass |
3871 | // due to the SO-specific early exit code in ProcessCLRException. |
3872 | // |
3873 | // Thus, if we see that we are here for SO in the 2nd pass, then |
3874 | // we shouldn't attempt to create a throwable. |
3875 | if ((!fIsFirstPass) && (IsSOExceptionCode(pExceptionRecord->ExceptionCode))) |
3876 | { |
3877 | fCreateThrowableForCurrentPass = false; |
3878 | } |
3879 | |
3880 | #ifdef _DEBUG |
3881 | if ((!fIsFirstPass) && (fCreateThrowableForCurrentPass == true)) |
3882 | { |
3883 | // We should have a LTO available if we are creating |
3884 | // a throwable during second pass. |
3885 | _ASSERTE(pThread->LastThrownObjectHandle() != NULL); |
3886 | } |
3887 | #endif // _DEBUG |
3888 | |
3889 | bool fCreateThrowable = (fCreateThrowableForCurrentPass || (bAsynchronousThreadStop && !pThread->IsAsyncPrevented())); |
3890 | OBJECTREF oThrowable = NULL; |
3891 | |
3892 | if (fCreateThrowable) |
3893 | { |
3894 | if (fIsRethrow) |
3895 | { |
3896 | oThrowable = ObjectFromHandle(pTracker->m_hThrowable); |
3897 | } |
3898 | else |
3899 | { |
3900 | // this can take a nested exception |
3901 | oThrowable = CreateThrowable(pExceptionRecord, bAsynchronousThreadStop); |
3902 | } |
3903 | } |
3904 | |
3905 | GCX_FORBID(); // we haven't protected oThrowable |
3906 | |
3907 | if (pExState->m_pCurrentTracker != pNewTracker) // OOM can make this false |
3908 | { |
3909 | pNewTracker->m_pPrevNestedInfo = pExState->m_pCurrentTracker; |
3910 | pTracker = pNewTracker; |
3911 | pThread->GetExceptionState()->m_pCurrentTracker = pTracker; |
3912 | } |
3913 | |
3914 | if (fCreateThrowable) |
3915 | { |
3916 | CONSISTENCY_CHECK(oThrowable != NULL); |
3917 | CONSISTENCY_CHECK(NULL == pTracker->m_hThrowable); |
3918 | |
3919 | pThread->SafeSetThrowables(oThrowable); |
3920 | |
3921 | if (pTracker->CanAllocateMemory()) |
3922 | { |
3923 | pTracker->m_StackTraceInfo.AllocateStackTrace(); |
3924 | } |
3925 | } |
3926 | INDEBUG(oThrowable = NULL); |
3927 | |
3928 | if (fIsRethrow) |
3929 | { |
3930 | *pStackTraceState = STS_FirstRethrowFrame; |
3931 | } |
3932 | else |
3933 | { |
3934 | *pStackTraceState = STS_NewException; |
3935 | } |
3936 | |
3937 | _ASSERTE(pTracker->m_pLimitFrame == NULL); |
3938 | pTracker->ResetLimitFrame(); |
3939 | } |
3940 | |
3941 | if (!fIsFirstPass) |
3942 | { |
3943 | { |
3944 | // Refer to the comment around ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException |
3945 | // for details on the usage of this COOP switch. |
3946 | GCX_COOP(); |
3947 | |
3948 | if (pTracker->IsInFirstPass()) |
3949 | { |
3950 | CONSISTENCY_CHECK_MSG(fCreateNewTracker || pTracker->m_ScannedStackRange.Contains(sf), |
3951 | "Tracker did not receive a first pass!" ); |
3952 | |
3953 | // Save the topmost StackFrame the tracker saw in the first pass before we reset the |
3954 | // scanned stack range. |
3955 | pTracker->m_sfFirstPassTopmostFrame = pTracker->m_ScannedStackRange.GetUpperBound(); |
3956 | |
3957 | // We have to detect this transition because otherwise we break when unmanaged code |
3958 | // catches our exceptions. |
3959 | EH_LOG((LL_INFO100, ">>tracker transitioned to second pass\n" )); |
3960 | pTracker->m_ScannedStackRange.Reset(); |
3961 | |
3962 | pTracker->m_ExceptionFlags.SetUnwindHasStarted(); |
3963 | if (pTracker->m_ExceptionFlags.UnwindingToFindResumeFrame()) |
3964 | { |
3965 | // UnwindingToFindResumeFrame means that in the first pass, we determine that a method |
3966 | // catches the exception, but the method frame we are inspecting is a funclet method frame |
3967 | // and is not the correct frame to resume execution. We need to resume to the correct |
3968 | // method frame before starting the second pass. The correct method frame is most likely |
3969 | // the parent method frame, but it can also be another funclet method frame. |
3970 | // |
3971 | // If the exception transitions from first pass to second pass before we find the parent |
3972 | // method frame, there is only one possibility: some other thread has initiated a rude |
3973 | // abort on the current thread, causing us to skip processing of all method frames. |
3974 | _ASSERTE(pThread->IsRudeAbortInitiated()); |
3975 | } |
3976 | // Lean on the safe side and just reset everything unconditionally. |
3977 | pTracker->FirstPassIsComplete(); |
3978 | |
3979 | EEToDebuggerExceptionInterfaceWrapper::ManagedExceptionUnwindBegin(pThread); |
3980 | |
3981 | pTracker->ResetLimitFrame(); |
3982 | } |
3983 | else |
3984 | { |
3985 | // In the second pass, there's a possibility that UMThunkUnwindFrameChainHandler() has |
3986 | // popped some frames off the frame chain underneath us. Check for this case here. |
3987 | if (pTracker->m_pLimitFrame < pThread->GetFrame()) |
3988 | { |
3989 | pTracker->ResetLimitFrame(); |
3990 | } |
3991 | } |
3992 | } |
3993 | |
3994 | #ifdef FEATURE_CORRUPTING_EXCEPTIONS |
3995 | if (fCreateNewTracker) |
3996 | { |
3997 | // Exception tracker should be in the 2nd pass right now |
3998 | _ASSERTE(!pTracker->IsInFirstPass()); |
3999 | |
4000 | // The corruption severity of a newly created tracker is NotSet |
4001 | _ASSERTE(pTracker->GetCorruptionSeverity() == NotSet); |
4002 | |
4003 | // See comment in CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass for details |
4004 | CEHelper::SetupCorruptionSeverityForActiveExceptionInUnwindPass(pThread, pTracker, FALSE, pExceptionRecord->ExceptionCode); |
4005 | } |
4006 | #endif // FEATURE_CORRUPTING_EXCEPTIONS |
4007 | } |
4008 | |
4009 | _ASSERTE(pTracker->m_pLimitFrame >= pThread->GetFrame()); |
4010 | |
4011 | RETURN pTracker; |
4012 | } |
4013 | |
4014 | void ExceptionTracker::ResetLimitFrame() |
4015 | { |
4016 | WRAPPER_NO_CONTRACT; |
4017 | |
4018 | m_pLimitFrame = m_pThread->GetFrame(); |
4019 | } |
4020 | |
4021 | // |
4022 | // static |
4023 | void ExceptionTracker::ResumeExecution( |
4024 | CONTEXT* pContextRecord, |
4025 | EXCEPTION_RECORD* pExceptionRecord |
4026 | ) |
4027 | { |
4028 | // |
4029 | // This method never returns, so it will leave its |
4030 | // state on the thread if useing dynamic contracts. |
4031 | // |
4032 | STATIC_CONTRACT_MODE_COOPERATIVE; |
4033 | STATIC_CONTRACT_GC_NOTRIGGER; |
4034 | STATIC_CONTRACT_NOTHROW; |
4035 | |
4036 | AMD64_ONLY(STRESS_LOG4(LF_GCROOTS, LL_INFO100, "Resuming after exception at %p, rbx=%p, rsi=%p, rdi=%p\n" , |
4037 | GetIP(pContextRecord), |
4038 | pContextRecord->Rbx, |
4039 | pContextRecord->Rsi, |
4040 | pContextRecord->Rdi)); |
4041 | |
4042 | EH_LOG((LL_INFO100, "resuming execution at 0x%p\n" , GetIP(pContextRecord))); |
4043 | EH_LOG((LL_INFO100, "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n" )); |
4044 | |
4045 | RtlRestoreContext(pContextRecord, pExceptionRecord); |
4046 | |
4047 | UNREACHABLE(); |
4048 | // |
4049 | // doesn't return |
4050 | // |
4051 | } |
4052 | |
4053 | // |
4054 | // static |
4055 | OBJECTREF ExceptionTracker::CreateThrowable( |
4056 | PEXCEPTION_RECORD pExceptionRecord, |
4057 | BOOL bAsynchronousThreadStop |
4058 | ) |
4059 | { |
4060 | CONTRACTL |
4061 | { |
4062 | MODE_COOPERATIVE; |
4063 | GC_TRIGGERS; |
4064 | NOTHROW; |
4065 | } |
4066 | CONTRACTL_END; |
4067 | |
4068 | OBJECTREF oThrowable = NULL; |
4069 | Thread* pThread = GetThread(); |
4070 | |
4071 | |
4072 | if ((!bAsynchronousThreadStop) && IsComPlusException(pExceptionRecord)) |
4073 | { |
4074 | oThrowable = pThread->LastThrownObject(); |
4075 | } |
4076 | else |
4077 | { |
4078 | oThrowable = CreateCOMPlusExceptionObject(pThread, pExceptionRecord, bAsynchronousThreadStop); |
4079 | } |
4080 | |
4081 | return oThrowable; |
4082 | } |
4083 | |
4084 | // |
4085 | //static |
4086 | BOOL ExceptionTracker::ClauseCoversPC( |
4087 | EE_ILEXCEPTION_CLAUSE* pEHClause, |
4088 | DWORD dwOffset |
4089 | ) |
4090 | { |
4091 | // TryStartPC and TryEndPC are offsets relative to the start |
4092 | // of the method so we can just compare them to the offset returned |
4093 | // by JitCodeToMethodInfo. |
4094 | // |
4095 | return ((pEHClause->TryStartPC <= dwOffset) && (dwOffset < pEHClause->TryEndPC)); |
4096 | } |
4097 | |
4098 | #if defined(DEBUGGING_SUPPORTED) |
4099 | BOOL ExceptionTracker::NotifyDebuggerOfStub(Thread* pThread, StackFrame sf, Frame* pCurrentFrame) |
4100 | { |
4101 | LIMITED_METHOD_CONTRACT; |
4102 | |
4103 | BOOL fDeliveredFirstChanceNotification = FALSE; |
4104 | |
4105 | // <TODO> |
4106 | // Remove this once SIS is fully enabled. |
4107 | // </TODO> |
4108 | extern bool g_EnableSIS; |
4109 | |
4110 | if (g_EnableSIS) |
4111 | { |
4112 | _ASSERTE(GetThread() == pThread); |
4113 | |
4114 | GCX_COOP(); |
4115 | |
4116 | // For debugger, we may want to notify 1st chance exceptions if they're coming out of a stub. |
4117 | // We recognize stubs as Frames with a M2U transition type. The debugger's stackwalker also |
4118 | // recognizes these frames and publishes ICorDebugInternalFrames in the stackwalk. It's |
4119 | // important to use pFrame as the stack address so that the Exception callback matches up |
4120 | // w/ the ICorDebugInternlFrame stack range. |
4121 | if (CORDebuggerAttached()) |
4122 | { |
4123 | if (pCurrentFrame->GetTransitionType() == Frame::TT_M2U) |
4124 | { |
4125 | // Use -1 for the backing store pointer whenever we use the address of a frame as the stack pointer. |
4126 | EEToDebuggerExceptionInterfaceWrapper::FirstChanceManagedException(pThread, |
4127 | (SIZE_T)0, |
4128 | (SIZE_T)pCurrentFrame); |
4129 | fDeliveredFirstChanceNotification = TRUE; |
4130 | } |
4131 | } |
4132 | } |
4133 | |
4134 | return fDeliveredFirstChanceNotification; |
4135 | } |
4136 | |
4137 | bool ExceptionTracker::IsFilterStartOffset(EE_ILEXCEPTION_CLAUSE* pEHClause, DWORD_PTR dwHandlerStartPC) |
4138 | { |
4139 | EECodeInfo codeInfo((PCODE)dwHandlerStartPC); |
4140 | _ASSERTE(codeInfo.IsValid()); |
4141 | |
4142 | return pEHClause->FilterOffset == codeInfo.GetRelOffset(); |
4143 | } |
4144 | |
4145 | void ExceptionTracker::MakeCallbacksRelatedToHandler( |
4146 | bool fBeforeCallingHandler, |
4147 | Thread* pThread, |
4148 | MethodDesc* pMD, |
4149 | EE_ILEXCEPTION_CLAUSE* pEHClause, |
4150 | DWORD_PTR dwHandlerStartPC, |
4151 | StackFrame sf |
4152 | ) |
4153 | { |
4154 | // Here we need to make an extra check for filter handlers because we could be calling the catch handler |
4155 | // associated with a filter handler and yet the EH clause we have saved is for the filter handler. |
4156 | BOOL fIsFilterHandler = IsFilterHandler(pEHClause) && ExceptionTracker::IsFilterStartOffset(pEHClause, dwHandlerStartPC); |
4157 | BOOL fIsFaultOrFinallyHandler = IsFaultOrFinally(pEHClause); |
4158 | |
4159 | if (fBeforeCallingHandler) |
4160 | { |
4161 | StackFrame sfToStore = sf; |
4162 | if ((this->m_pPrevNestedInfo != NULL) && |
4163 | (this->m_pPrevNestedInfo->m_EnclosingClauseInfo == this->m_EnclosingClauseInfo)) |
4164 | { |
4165 | // If this is a nested exception which has the same enclosing clause as the previous exception, |
4166 | // we should just propagate the clause info from the previous exception. |
4167 | sfToStore = this->m_pPrevNestedInfo->m_EHClauseInfo.GetStackFrameForEHClause(); |
4168 | } |
4169 | m_EHClauseInfo.SetInfo(COR_PRF_CLAUSE_NONE, (UINT_PTR)dwHandlerStartPC, sfToStore); |
4170 | |
4171 | if (pMD->IsILStub()) |
4172 | { |
4173 | return; |
4174 | } |
4175 | |
4176 | if (fIsFilterHandler) |
4177 | { |
4178 | m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_FILTER); |
4179 | EEToDebuggerExceptionInterfaceWrapper::ExceptionFilter(pMD, (TADDR) dwHandlerStartPC, pEHClause->FilterOffset, (BYTE*)sf.SP); |
4180 | |
4181 | EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterEnter(pMD); |
4182 | |
4183 | COUNTER_ONLY(GetPerfCounters().m_Excep.cFiltersExecuted++); |
4184 | } |
4185 | else |
4186 | { |
4187 | EEToDebuggerExceptionInterfaceWrapper::ExceptionHandle(pMD, (TADDR) dwHandlerStartPC, pEHClause->HandlerStartPC, (BYTE*)sf.SP); |
4188 | |
4189 | if (fIsFaultOrFinallyHandler) |
4190 | { |
4191 | m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_FINALLY); |
4192 | EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyEnter(pMD); |
4193 | COUNTER_ONLY(GetPerfCounters().m_Excep.cFinallysExecuted++); |
4194 | } |
4195 | else |
4196 | { |
4197 | m_EHClauseInfo.SetEHClauseType(COR_PRF_CLAUSE_CATCH); |
4198 | EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherEnter(pThread, pMD); |
4199 | |
4200 | DACNotify::DoExceptionCatcherEnterNotification(pMD, pEHClause->HandlerStartPC); |
4201 | } |
4202 | } |
4203 | } |
4204 | else |
4205 | { |
4206 | if (pMD->IsILStub()) |
4207 | { |
4208 | return; |
4209 | } |
4210 | |
4211 | if (fIsFilterHandler) |
4212 | { |
4213 | EEToProfilerExceptionInterfaceWrapper::ExceptionSearchFilterLeave(); |
4214 | } |
4215 | else |
4216 | { |
4217 | if (fIsFaultOrFinallyHandler) |
4218 | { |
4219 | EEToProfilerExceptionInterfaceWrapper::ExceptionUnwindFinallyLeave(); |
4220 | } |
4221 | else |
4222 | { |
4223 | EEToProfilerExceptionInterfaceWrapper::ExceptionCatcherLeave(); |
4224 | } |
4225 | } |
4226 | m_EHClauseInfo.ResetInfo(); |
4227 | } |
4228 | } |
4229 | |
4230 | #ifdef DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED |
4231 | //--------------------------------------------------------------------------------------- |
4232 | // |
4233 | // This function is called by DefaultCatchHandler() to intercept an exception and start an unwind. |
4234 | // |
4235 | // Arguments: |
4236 | // pCurrentEstablisherFrame - unused on WIN64 |
4237 | // pExceptionRecord - EXCEPTION_RECORD of the exception being intercepted |
4238 | // |
4239 | // Return Value: |
4240 | // ExceptionContinueSearch if the exception cannot be intercepted |
4241 | // |
4242 | // Notes: |
4243 | // If the exception is intercepted, this function never returns. |
4244 | // |
4245 | |
4246 | EXCEPTION_DISPOSITION ClrDebuggerDoUnwindAndIntercept(X86_FIRST_ARG(EXCEPTION_REGISTRATION_RECORD* pCurrentEstablisherFrame) |
4247 | EXCEPTION_RECORD* pExceptionRecord) |
4248 | { |
4249 | if (!CheckThreadExceptionStateForInterception()) |
4250 | { |
4251 | return ExceptionContinueSearch; |
4252 | } |
4253 | |
4254 | Thread* pThread = GetThread(); |
4255 | ThreadExceptionState* pExState = pThread->GetExceptionState(); |
4256 | |
4257 | UINT_PTR uInterceptStackFrame = 0; |
4258 | |
4259 | pExState->GetDebuggerState()->GetDebuggerInterceptInfo(NULL, NULL, |
4260 | (PBYTE*)&uInterceptStackFrame, |
4261 | NULL, NULL); |
4262 | |
4263 | ClrUnwindEx(pExceptionRecord, (UINT_PTR)pThread, INVALID_RESUME_ADDRESS, uInterceptStackFrame); |
4264 | |
4265 | UNREACHABLE(); |
4266 | } |
4267 | #endif // DEBUGGER_EXCEPTION_INTERCEPTION_SUPPORTED |
4268 | #endif // DEBUGGING_SUPPORTED |
4269 | |
4270 | #ifdef _DEBUG |
4271 | inline bool ExceptionTracker::IsValid() |
4272 | { |
4273 | bool fRetVal = false; |
4274 | |
4275 | EX_TRY |
4276 | { |
4277 | Thread* pThisThread = GetThread(); |
4278 | if (m_pThread == pThisThread) |
4279 | { |
4280 | fRetVal = true; |
4281 | } |
4282 | } |
4283 | EX_CATCH |
4284 | { |
4285 | } |
4286 | EX_END_CATCH(SwallowAllExceptions); |
4287 | |
4288 | if (!fRetVal) |
4289 | { |
4290 | EH_LOG((LL_ERROR, "ExceptionTracker::IsValid() failed! this = 0x%p\n" , this)); |
4291 | } |
4292 | |
4293 | return fRetVal; |
4294 | } |
4295 | BOOL ExceptionTracker::ThrowableIsValid() |
4296 | { |
4297 | GCX_COOP(); |
4298 | CONSISTENCY_CHECK(IsValid()); |
4299 | |
4300 | BOOL isValid = FALSE; |
4301 | |
4302 | |
4303 | isValid = (m_pThread->GetThrowable() != NULL); |
4304 | |
4305 | return isValid; |
4306 | } |
4307 | // |
4308 | // static |
4309 | UINT_PTR ExceptionTracker::DebugComputeNestingLevel() |
4310 | { |
4311 | UINT_PTR uNestingLevel = 0; |
4312 | Thread* pThread = GetThread(); |
4313 | |
4314 | if (pThread) |
4315 | { |
4316 | ExceptionTracker* pTracker; |
4317 | pTracker = pThread->GetExceptionState()->m_pCurrentTracker; |
4318 | |
4319 | while (pTracker) |
4320 | { |
4321 | uNestingLevel++; |
4322 | pTracker = pTracker->m_pPrevNestedInfo; |
4323 | }; |
4324 | } |
4325 | |
4326 | return uNestingLevel; |
4327 | } |
4328 | void DumpClauses(IJitManager* pJitMan, const METHODTOKEN& MethToken, UINT_PTR uMethodStartPC, UINT_PTR dwControlPc) |
4329 | { |
4330 | EH_CLAUSE_ENUMERATOR EnumState; |
4331 | unsigned EHCount; |
4332 | |
4333 | EH_LOG((LL_INFO1000, " | uMethodStartPC: %p, ControlPc at offset %x\n" , uMethodStartPC, dwControlPc - uMethodStartPC)); |
4334 | |
4335 | EHCount = pJitMan->InitializeEHEnumeration(MethToken, &EnumState); |
4336 | for (unsigned i = 0; i < EHCount; i++) |
4337 | { |
4338 | EE_ILEXCEPTION_CLAUSE EHClause; |
4339 | pJitMan->GetNextEHClause(&EnumState, &EHClause); |
4340 | |
4341 | EH_LOG((LL_INFO1000, " | %s clause [%x, %x], handler: [%x, %x] %s" , |
4342 | (IsFault(&EHClause) ? "fault" : |
4343 | (IsFinally(&EHClause) ? "finally" : |
4344 | (IsFilterHandler(&EHClause) ? "filter" : |
4345 | (IsTypedHandler(&EHClause) ? "typed" : "unknown" )))), |
4346 | EHClause.TryStartPC , // + uMethodStartPC, |
4347 | EHClause.TryEndPC , // + uMethodStartPC, |
4348 | EHClause.HandlerStartPC , // + uMethodStartPC, |
4349 | EHClause.HandlerEndPC , // + uMethodStartPC |
4350 | (IsDuplicateClause(&EHClause) ? "[duplicate]" : "" ) |
4351 | )); |
4352 | |
4353 | if (IsFilterHandler(&EHClause)) |
4354 | { |
4355 | LOG((LF_EH, LL_INFO1000, " filter: [%x, ...]" , |
4356 | EHClause.FilterOffset));// + uMethodStartPC |
4357 | } |
4358 | |
4359 | LOG((LF_EH, LL_INFO1000, "\n" )); |
4360 | } |
4361 | |
4362 | } |
4363 | |
4364 | #define STACK_ALLOC_ARRAY(numElements, type) \ |
4365 | ((type *)_alloca((numElements)*(sizeof(type)))) |
4366 | |
4367 | static void DoEHLog( |
4368 | DWORD lvl, |
4369 | __in_z const char *fmt, |
4370 | ... |
4371 | ) |
4372 | { |
4373 | if (!LoggingOn(LF_EH, lvl)) |
4374 | return; |
4375 | |
4376 | va_list args; |
4377 | va_start(args, fmt); |
4378 | |
4379 | UINT_PTR nestinglevel = ExceptionTracker::DebugComputeNestingLevel(); |
4380 | if (nestinglevel) |
4381 | { |
4382 | _ASSERTE(FitsIn<UINT_PTR>(2 * nestinglevel)); |
4383 | UINT_PTR cch = 2 * nestinglevel; |
4384 | char* pPadding = STACK_ALLOC_ARRAY(cch + 1, char); |
4385 | memset(pPadding, '.', cch); |
4386 | pPadding[cch] = 0; |
4387 | |
4388 | LOG((LF_EH, lvl, pPadding)); |
4389 | } |
4390 | |
4391 | LogSpewValist(LF_EH, lvl, fmt, args); |
4392 | va_end(args); |
4393 | } |
4394 | #endif // _DEBUG |
4395 | |
4396 | #ifdef FEATURE_PAL |
4397 | |
4398 | //--------------------------------------------------------------------------------------- |
4399 | // |
4400 | // This functions performs an unwind procedure for a managed exception. The stack is unwound |
4401 | // until the target frame is reached. For each frame we use its PC value to find |
4402 | // a handler using information that has been built by JIT. |
4403 | // |
4404 | // Arguments: |
4405 | // ex - the PAL_SEHException representing the managed exception |
4406 | // unwindStartContext - the context that the unwind should start at. Either the original exception |
4407 | // context (when the exception didn't cross native frames) or the first managed |
4408 | // frame after crossing native frames. |
4409 | // |
4410 | VOID UnwindManagedExceptionPass2(PAL_SEHException& ex, CONTEXT* unwindStartContext) |
4411 | { |
4412 | UINT_PTR controlPc; |
4413 | PVOID sp; |
4414 | EXCEPTION_DISPOSITION disposition; |
4415 | CONTEXT* currentFrameContext; |
4416 | CONTEXT* callerFrameContext; |
4417 | CONTEXT contextStorage; |
4418 | DISPATCHER_CONTEXT dispatcherContext; |
4419 | EECodeInfo codeInfo; |
4420 | UINT_PTR establisherFrame = NULL; |
4421 | PVOID handlerData; |
4422 | |
4423 | // Indicate that we are performing second pass. |
4424 | ex.GetExceptionRecord()->ExceptionFlags = EXCEPTION_UNWINDING; |
4425 | |
4426 | currentFrameContext = unwindStartContext; |
4427 | callerFrameContext = &contextStorage; |
4428 | |
4429 | memset(&dispatcherContext, 0, sizeof(DISPATCHER_CONTEXT)); |
4430 | disposition = ExceptionContinueSearch; |
4431 | |
4432 | do |
4433 | { |
4434 | controlPc = GetIP(currentFrameContext); |
4435 | |
4436 | codeInfo.Init(controlPc); |
4437 | |
4438 | dispatcherContext.FunctionEntry = codeInfo.GetFunctionEntry(); |
4439 | dispatcherContext.ControlPc = controlPc; |
4440 | dispatcherContext.ImageBase = codeInfo.GetModuleBase(); |
4441 | #ifdef ADJUST_PC_UNWOUND_TO_CALL |
4442 | dispatcherContext.ControlPcIsUnwound = !!(currentFrameContext->ContextFlags & CONTEXT_UNWOUND_TO_CALL); |
4443 | #endif |
4444 | // Check whether we have a function table entry for the current controlPC. |
4445 | // If yes, then call RtlVirtualUnwind to get the establisher frame pointer. |
4446 | if (dispatcherContext.FunctionEntry != NULL) |
4447 | { |
4448 | // Create a copy of the current context because we don't want |
4449 | // the current context record to be updated by RtlVirtualUnwind. |
4450 | memcpy(callerFrameContext, currentFrameContext, sizeof(CONTEXT)); |
4451 | RtlVirtualUnwind(UNW_FLAG_EHANDLER, |
4452 | dispatcherContext.ImageBase, |
4453 | dispatcherContext.ControlPc, |
4454 | dispatcherContext.FunctionEntry, |
4455 | callerFrameContext, |
4456 | &handlerData, |
4457 | &establisherFrame, |
4458 | NULL); |
4459 | |
4460 | // Make sure that the establisher frame pointer is within stack boundaries |
4461 | // and we did not go below that target frame. |
4462 | // TODO: make sure the establisher frame is properly aligned. |
4463 | if (!Thread::IsAddressInCurrentStack((void*)establisherFrame) || establisherFrame > ex.TargetFrameSp) |
4464 | { |
4465 | // TODO: add better error handling |
4466 | UNREACHABLE(); |
4467 | } |
4468 | |
4469 | dispatcherContext.EstablisherFrame = establisherFrame; |
4470 | dispatcherContext.ContextRecord = currentFrameContext; |
4471 | |
4472 | EXCEPTION_RECORD* exceptionRecord = ex.GetExceptionRecord(); |
4473 | |
4474 | if (establisherFrame == ex.TargetFrameSp) |
4475 | { |
4476 | // We have reached the frame that will handle the exception. |
4477 | ex.GetExceptionRecord()->ExceptionFlags |= EXCEPTION_TARGET_UNWIND; |
4478 | ExceptionTracker* pTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker(); |
4479 | pTracker->TakeExceptionPointersOwnership(&ex); |
4480 | } |
4481 | |
4482 | // Perform unwinding of the current frame |
4483 | disposition = ProcessCLRException(exceptionRecord, |
4484 | establisherFrame, |
4485 | currentFrameContext, |
4486 | &dispatcherContext); |
4487 | |
4488 | if (disposition == ExceptionContinueSearch) |
4489 | { |
4490 | // Exception handler not found. Try the parent frame. |
4491 | CONTEXT* temp = currentFrameContext; |
4492 | currentFrameContext = callerFrameContext; |
4493 | callerFrameContext = temp; |
4494 | } |
4495 | else |
4496 | { |
4497 | UNREACHABLE(); |
4498 | } |
4499 | } |
4500 | else |
4501 | { |
4502 | Thread::VirtualUnwindLeafCallFrame(currentFrameContext); |
4503 | } |
4504 | |
4505 | controlPc = GetIP(currentFrameContext); |
4506 | sp = (PVOID)GetSP(currentFrameContext); |
4507 | |
4508 | // Check whether we are crossing managed-to-native boundary |
4509 | if (!ExecutionManager::IsManagedCode(controlPc)) |
4510 | { |
4511 | // Return back to the UnwindManagedExceptionPass1 and let it unwind the native frames |
4512 | { |
4513 | GCX_COOP(); |
4514 | // Pop all frames that are below the block of native frames and that would be |
4515 | // in the unwound part of the stack when UnwindManagedExceptionPass2 is resumed |
4516 | // at the next managed frame. |
4517 | |
4518 | UnwindFrameChain(GetThread(), sp); |
4519 | // We are going to reclaim the stack range that was scanned by the exception tracker |
4520 | // until now. We need to reset the explicit frames range so that if GC fires before |
4521 | // we recreate the tracker at the first managed frame after unwinding the native |
4522 | // frames, it doesn't attempt to scan the reclaimed stack range. |
4523 | // We also need to reset the scanned stack range since the scanned frames will be |
4524 | // obsolete after the unwind of the native frames completes. |
4525 | ExceptionTracker* pTracker = GetThread()->GetExceptionState()->GetCurrentExceptionTracker(); |
4526 | pTracker->CleanupBeforeNativeFramesUnwind(); |
4527 | } |
4528 | |
4529 | // Now we need to unwind the native frames until we reach managed frames again or the exception is |
4530 | // handled in the native code. |
4531 | STRESS_LOG2(LF_EH, LL_INFO100, "Unwinding native frames starting at IP = %p, SP = %p \n" , controlPc, sp); |
4532 | PAL_ThrowExceptionFromContext(currentFrameContext, &ex); |
4533 | UNREACHABLE(); |
4534 | } |
4535 | |
4536 | } while (Thread::IsAddressInCurrentStack(sp) && (establisherFrame != ex.TargetFrameSp)); |
4537 | |
4538 | _ASSERTE(!"UnwindManagedExceptionPass2: Unwinding failed. Reached the end of the stack" ); |
4539 | EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
4540 | } |
4541 | |
4542 | //--------------------------------------------------------------------------------------- |
4543 | // |
4544 | // This functions performs dispatching of a managed exception. |
4545 | // It tries to find an exception handler by examining each frame in the call stack. |
4546 | // The search is started from the managed frame caused the exception to be thrown. |
4547 | // For each frame we use its PC value to find a handler using information that |
4548 | // has been built by JIT. If an exception handler is found then this function initiates |
4549 | // the second pass to unwind the stack and execute the handler. |
4550 | // |
4551 | // Arguments: |
4552 | // ex - a PAL_SEHException that stores information about the managed |
4553 | // exception that needs to be dispatched. |
4554 | // frameContext - the context of the first managed frame of the exception call stack |
4555 | // |
4556 | VOID DECLSPEC_NORETURN UnwindManagedExceptionPass1(PAL_SEHException& ex, CONTEXT* frameContext) |
4557 | { |
4558 | CONTEXT unwindStartContext; |
4559 | EXCEPTION_DISPOSITION disposition; |
4560 | DISPATCHER_CONTEXT dispatcherContext; |
4561 | EECodeInfo codeInfo; |
4562 | UINT_PTR controlPc; |
4563 | UINT_PTR establisherFrame = NULL; |
4564 | PVOID handlerData; |
4565 | |
4566 | #ifdef FEATURE_HIJACK |
4567 | GetThread()->UnhijackThread(); |
4568 | #endif |
4569 | |
4570 | controlPc = GetIP(frameContext); |
4571 | unwindStartContext = *frameContext; |
4572 | |
4573 | if (!ExecutionManager::IsManagedCode(GetIP(ex.GetContextRecord()))) |
4574 | { |
4575 | // This is the first time we see the managed exception, set its context to the managed frame that has caused |
4576 | // the exception to be thrown |
4577 | *ex.GetContextRecord() = *frameContext; |
4578 | ex.GetExceptionRecord()->ExceptionAddress = (VOID*)controlPc; |
4579 | } |
4580 | |
4581 | ex.GetExceptionRecord()->ExceptionFlags = 0; |
4582 | |
4583 | memset(&dispatcherContext, 0, sizeof(DISPATCHER_CONTEXT)); |
4584 | disposition = ExceptionContinueSearch; |
4585 | |
4586 | do |
4587 | { |
4588 | codeInfo.Init(controlPc); |
4589 | dispatcherContext.FunctionEntry = codeInfo.GetFunctionEntry(); |
4590 | dispatcherContext.ControlPc = controlPc; |
4591 | dispatcherContext.ImageBase = codeInfo.GetModuleBase(); |
4592 | #ifdef ADJUST_PC_UNWOUND_TO_CALL |
4593 | dispatcherContext.ControlPcIsUnwound = !!(frameContext->ContextFlags & CONTEXT_UNWOUND_TO_CALL); |
4594 | #endif |
4595 | |
4596 | // Check whether we have a function table entry for the current controlPC. |
4597 | // If yes, then call RtlVirtualUnwind to get the establisher frame pointer |
4598 | // and then check whether an exception handler exists for the frame. |
4599 | if (dispatcherContext.FunctionEntry != NULL) |
4600 | { |
4601 | #ifdef USE_CURRENT_CONTEXT_IN_FILTER |
4602 | KNONVOLATILE_CONTEXT currentNonVolatileContext; |
4603 | CaptureNonvolatileRegisters(¤tNonVolatileContext, frameContext); |
4604 | #endif // USE_CURRENT_CONTEXT_IN_FILTER |
4605 | |
4606 | RtlVirtualUnwind(UNW_FLAG_EHANDLER, |
4607 | dispatcherContext.ImageBase, |
4608 | dispatcherContext.ControlPc, |
4609 | dispatcherContext.FunctionEntry, |
4610 | frameContext, |
4611 | &handlerData, |
4612 | &establisherFrame, |
4613 | NULL); |
4614 | |
4615 | // Make sure that the establisher frame pointer is within stack boundaries. |
4616 | // TODO: make sure the establisher frame is properly aligned. |
4617 | if (!Thread::IsAddressInCurrentStack((void*)establisherFrame)) |
4618 | { |
4619 | // TODO: add better error handling |
4620 | UNREACHABLE(); |
4621 | } |
4622 | |
4623 | dispatcherContext.EstablisherFrame = establisherFrame; |
4624 | #ifdef USE_CURRENT_CONTEXT_IN_FILTER |
4625 | dispatcherContext.CurrentNonVolatileContextRecord = ¤tNonVolatileContext; |
4626 | #endif // USE_CURRENT_CONTEXT_IN_FILTER |
4627 | dispatcherContext.ContextRecord = frameContext; |
4628 | |
4629 | // Find exception handler in the current frame |
4630 | disposition = ProcessCLRException(ex.GetExceptionRecord(), |
4631 | establisherFrame, |
4632 | ex.GetContextRecord(), |
4633 | &dispatcherContext); |
4634 | |
4635 | if (disposition == ExceptionContinueSearch) |
4636 | { |
4637 | // Exception handler not found. Try the parent frame. |
4638 | controlPc = GetIP(frameContext); |
4639 | } |
4640 | else if (disposition == ExceptionStackUnwind) |
4641 | { |
4642 | // The first pass is complete. We have found the frame that |
4643 | // will handle the exception. Start the second pass. |
4644 | ex.TargetFrameSp = establisherFrame; |
4645 | UnwindManagedExceptionPass2(ex, &unwindStartContext); |
4646 | } |
4647 | else |
4648 | { |
4649 | // TODO: This needs to implemented. Make it fail for now. |
4650 | UNREACHABLE(); |
4651 | } |
4652 | } |
4653 | else |
4654 | { |
4655 | controlPc = Thread::VirtualUnwindLeafCallFrame(frameContext); |
4656 | } |
4657 | |
4658 | // Check whether we are crossing managed-to-native boundary |
4659 | while (!ExecutionManager::IsManagedCode(controlPc)) |
4660 | { |
4661 | UINT_PTR sp = GetSP(frameContext); |
4662 | |
4663 | BOOL success = PAL_VirtualUnwind(frameContext, NULL); |
4664 | if (!success) |
4665 | { |
4666 | _ASSERTE(!"UnwindManagedExceptionPass1: PAL_VirtualUnwind failed" ); |
4667 | EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
4668 | } |
4669 | |
4670 | controlPc = GetIP(frameContext); |
4671 | |
4672 | STRESS_LOG2(LF_EH, LL_INFO100, "Processing exception at native frame: IP = %p, SP = %p \n" , controlPc, sp); |
4673 | |
4674 | if (controlPc == 0) |
4675 | { |
4676 | if (!GetThread()->HasThreadStateNC(Thread::TSNC_ProcessedUnhandledException)) |
4677 | { |
4678 | LONG disposition = InternalUnhandledExceptionFilter_Worker(&ex.ExceptionPointers); |
4679 | _ASSERTE(disposition == EXCEPTION_CONTINUE_SEARCH); |
4680 | } |
4681 | TerminateProcess(GetCurrentProcess(), 1); |
4682 | UNREACHABLE(); |
4683 | } |
4684 | |
4685 | UINT_PTR parentSp = GetSP(frameContext); |
4686 | |
4687 | // Find all holders on this frame that are in scopes embedded in each other and call their filters. |
4688 | NativeExceptionHolderBase* holder = nullptr; |
4689 | while ((holder = NativeExceptionHolderBase::FindNextHolder(holder, (void*)sp, (void*)parentSp)) != nullptr) |
4690 | { |
4691 | EXCEPTION_DISPOSITION disposition = holder->InvokeFilter(ex); |
4692 | if (disposition == EXCEPTION_EXECUTE_HANDLER) |
4693 | { |
4694 | // Switch to pass 2 |
4695 | STRESS_LOG1(LF_EH, LL_INFO100, "First pass finished, found native handler, TargetFrameSp = %p\n" , sp); |
4696 | |
4697 | ex.TargetFrameSp = sp; |
4698 | UnwindManagedExceptionPass2(ex, &unwindStartContext); |
4699 | UNREACHABLE(); |
4700 | } |
4701 | |
4702 | // The EXCEPTION_CONTINUE_EXECUTION is not supported and should never be returned by a filter |
4703 | _ASSERTE(disposition == EXCEPTION_CONTINUE_SEARCH); |
4704 | } |
4705 | } |
4706 | |
4707 | } while (Thread::IsAddressInCurrentStack((void*)GetSP(frameContext))); |
4708 | |
4709 | _ASSERTE(!"UnwindManagedExceptionPass1: Failed to find a handler. Reached the end of the stack" ); |
4710 | EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
4711 | } |
4712 | |
4713 | VOID DECLSPEC_NORETURN DispatchManagedException(PAL_SEHException& ex, bool isHardwareException) |
4714 | { |
4715 | do |
4716 | { |
4717 | try |
4718 | { |
4719 | // Unwind the context to the first managed frame |
4720 | CONTEXT frameContext; |
4721 | |
4722 | // If the exception is hardware exceptions, we use the exception's context record directly |
4723 | if (isHardwareException) |
4724 | { |
4725 | frameContext = *ex.GetContextRecord(); |
4726 | } |
4727 | else |
4728 | { |
4729 | RtlCaptureContext(&frameContext); |
4730 | UINT_PTR currentSP = GetSP(&frameContext); |
4731 | |
4732 | if (Thread::VirtualUnwindToFirstManagedCallFrame(&frameContext) == 0) |
4733 | { |
4734 | // There are no managed frames on the stack, so we need to continue unwinding using C++ exception |
4735 | // handling |
4736 | break; |
4737 | } |
4738 | |
4739 | UINT_PTR firstManagedFrameSP = GetSP(&frameContext); |
4740 | |
4741 | // Check if there is any exception holder in the skipped frames. If there is one, we need to unwind them |
4742 | // using the C++ handling. This is a special case when the UNINSTALL_MANAGED_EXCEPTION_DISPATCHER was |
4743 | // not at the managed to native boundary. |
4744 | if (NativeExceptionHolderBase::FindNextHolder(nullptr, (void*)currentSP, (void*)firstManagedFrameSP) != nullptr) |
4745 | { |
4746 | break; |
4747 | } |
4748 | } |
4749 | |
4750 | if (ex.IsFirstPass()) |
4751 | { |
4752 | UnwindManagedExceptionPass1(ex, &frameContext); |
4753 | } |
4754 | else |
4755 | { |
4756 | // This is a continuation of pass 2 after native frames unwinding. |
4757 | UnwindManagedExceptionPass2(ex, &frameContext); |
4758 | } |
4759 | UNREACHABLE(); |
4760 | } |
4761 | catch (PAL_SEHException& ex2) |
4762 | { |
4763 | isHardwareException = false; |
4764 | ex = std::move(ex2); |
4765 | } |
4766 | |
4767 | } |
4768 | while (true); |
4769 | |
4770 | // Ensure that the corruption severity is set for exceptions that didn't pass through managed frames |
4771 | // yet and so there is no exception tracker. |
4772 | if (ex.IsFirstPass()) |
4773 | { |
4774 | // Get the thread and the thread exception state - they must exist at this point |
4775 | Thread *pCurThread = GetThread(); |
4776 | _ASSERTE(pCurThread != NULL); |
4777 | |
4778 | ThreadExceptionState * pCurTES = pCurThread->GetExceptionState(); |
4779 | _ASSERTE(pCurTES != NULL); |
4780 | |
4781 | #ifdef FEATURE_CORRUPTING_EXCEPTIONS |
4782 | ExceptionTracker* pEHTracker = pCurTES->GetCurrentExceptionTracker(); |
4783 | if (pEHTracker == NULL) |
4784 | { |
4785 | CorruptionSeverity severity = NotCorrupting; |
4786 | if (CEHelper::IsProcessCorruptedStateException(ex.GetExceptionRecord()->ExceptionCode)) |
4787 | { |
4788 | severity = ProcessCorrupting; |
4789 | } |
4790 | |
4791 | pCurTES->SetLastActiveExceptionCorruptionSeverity(severity); |
4792 | } |
4793 | #endif // FEATURE_CORRUPTING_EXCEPTIONS |
4794 | } |
4795 | |
4796 | throw std::move(ex); |
4797 | } |
4798 | |
4799 | #if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) |
4800 | |
4801 | /*++ |
4802 | Function : |
4803 | GetRegisterAddressByIndex |
4804 | |
4805 | Get address of a register in a context |
4806 | |
4807 | Parameters: |
4808 | PCONTEXT pContext : context containing the registers |
4809 | UINT index : index of the register (Rax=0 .. R15=15) |
4810 | |
4811 | Return value : |
4812 | Pointer to the context member represeting the register |
4813 | --*/ |
4814 | VOID* GetRegisterAddressByIndex(PCONTEXT pContext, UINT index) |
4815 | { |
4816 | return getRegAddr(index, pContext); |
4817 | } |
4818 | |
4819 | /*++ |
4820 | Function : |
4821 | GetRegisterValueByIndex |
4822 | |
4823 | Get value of a register in a context |
4824 | |
4825 | Parameters: |
4826 | PCONTEXT pContext : context containing the registers |
4827 | UINT index : index of the register (Rax=0 .. R15=15) |
4828 | |
4829 | Return value : |
4830 | Value of the context member represeting the register |
4831 | --*/ |
4832 | DWORD64 GetRegisterValueByIndex(PCONTEXT pContext, UINT index) |
4833 | { |
4834 | _ASSERTE(index < 16); |
4835 | return *(DWORD64*)GetRegisterAddressByIndex(pContext, index); |
4836 | } |
4837 | |
4838 | /*++ |
4839 | Function : |
4840 | GetModRMOperandValue |
4841 | |
4842 | Get value of an instruction operand represented by the ModR/M field |
4843 | |
4844 | Parameters: |
4845 | BYTE rex : REX prefix, 0 if there was none |
4846 | BYTE* ip : instruction pointer pointing to the ModR/M field |
4847 | PCONTEXT pContext : context containing the registers |
4848 | bool is8Bit : true if the operand size is 8 bit |
4849 | bool hasOpSizePrefix : true if the instruction has op size prefix (0x66) |
4850 | |
4851 | Return value : |
4852 | Value of the context member represeting the register |
4853 | --*/ |
4854 | DWORD64 GetModRMOperandValue(BYTE rex, BYTE* ip, PCONTEXT pContext, bool is8Bit, bool hasOpSizePrefix) |
4855 | { |
4856 | DWORD64 result; |
4857 | |
4858 | BYTE rex_b = (rex & 0x1); // high bit to modrm r/m field or SIB base field |
4859 | BYTE rex_x = (rex & 0x2) >> 1; // high bit to sib index field |
4860 | BYTE rex_r = (rex & 0x4) >> 2; // high bit to modrm reg field |
4861 | BYTE rex_w = (rex & 0x8) >> 3; // 1 = 64 bit operand size, 0 = operand size determined by hasOpSizePrefix |
4862 | |
4863 | BYTE modrm = *ip++; |
4864 | |
4865 | _ASSERTE(modrm != 0); |
4866 | |
4867 | BYTE mod = (modrm & 0xC0) >> 6; |
4868 | BYTE reg = (modrm & 0x38) >> 3; |
4869 | BYTE rm = (modrm & 0x07); |
4870 | |
4871 | reg |= (rex_r << 3); |
4872 | BYTE rmIndex = rm | (rex_b << 3); |
4873 | |
4874 | // 8 bit idiv without the REX prefix uses registers AH, CH, DH, BH for rm 4..8 |
4875 | // which is an exception from the regular register indexes. |
4876 | bool isAhChDhBh = is8Bit && (rex == 0) && (rm >= 4); |
4877 | |
4878 | // See: Tables A-15,16,17 in AMD Dev Manual 3 for information |
4879 | // about how the ModRM/SIB/REX bytes interact. |
4880 | |
4881 | switch (mod) |
4882 | { |
4883 | case 0: |
4884 | case 1: |
4885 | case 2: |
4886 | if (rm == 4) // we have an SIB byte following |
4887 | { |
4888 | // |
4889 | // Get values from the SIB byte |
4890 | // |
4891 | BYTE sib = *ip++; |
4892 | |
4893 | _ASSERTE(sib != 0); |
4894 | |
4895 | BYTE ss = (sib & 0xC0) >> 6; |
4896 | BYTE index = (sib & 0x38) >> 3; |
4897 | BYTE base = (sib & 0x07); |
4898 | |
4899 | index |= (rex_x << 3); |
4900 | base |= (rex_b << 3); |
4901 | |
4902 | // |
4903 | // Get starting value |
4904 | // |
4905 | if ((mod == 0) && (base == 5)) |
4906 | { |
4907 | result = 0; |
4908 | } |
4909 | else |
4910 | { |
4911 | result = GetRegisterValueByIndex(pContext, base); |
4912 | } |
4913 | |
4914 | // |
4915 | // Add in the [index] |
4916 | // |
4917 | if (index != 4) |
4918 | { |
4919 | result += GetRegisterValueByIndex(pContext, index) << ss; |
4920 | } |
4921 | |
4922 | // |
4923 | // Finally add in the offset |
4924 | // |
4925 | if (mod == 0) |
4926 | { |
4927 | if (base == 5) |
4928 | { |
4929 | result += *((INT32*)ip); |
4930 | } |
4931 | } |
4932 | else if (mod == 1) |
4933 | { |
4934 | result += *((INT8*)ip); |
4935 | } |
4936 | else // mod == 2 |
4937 | { |
4938 | result += *((INT32*)ip); |
4939 | } |
4940 | |
4941 | } |
4942 | else |
4943 | { |
4944 | // |
4945 | // Get the value we need from the register. |
4946 | // |
4947 | |
4948 | // Check for RIP-relative addressing mode for AMD64 |
4949 | // Check for Displacement only addressing mode for x86 |
4950 | if ((mod == 0) && (rm == 5)) |
4951 | { |
4952 | #if defined(_TARGET_AMD64_) |
4953 | result = (DWORD64)ip + sizeof(INT32) + *(INT32*)ip; |
4954 | #else |
4955 | result = (DWORD64)(*(DWORD*)ip); |
4956 | #endif // _TARGET_AMD64_ |
4957 | } |
4958 | else |
4959 | { |
4960 | result = GetRegisterValueByIndex(pContext, rmIndex); |
4961 | |
4962 | if (mod == 1) |
4963 | { |
4964 | result += *((INT8*)ip); |
4965 | } |
4966 | else if (mod == 2) |
4967 | { |
4968 | result += *((INT32*)ip); |
4969 | } |
4970 | } |
4971 | } |
4972 | |
4973 | break; |
4974 | |
4975 | case 3: |
4976 | default: |
4977 | // The operand is stored in a register. |
4978 | if (isAhChDhBh) |
4979 | { |
4980 | // 8 bit idiv without the REX prefix uses registers AH, CH, DH or BH for rm 4..8. |
4981 | // So we shift the register index to get the real register index. |
4982 | rmIndex -= 4; |
4983 | } |
4984 | |
4985 | result = (DWORD64)GetRegisterAddressByIndex(pContext, rmIndex); |
4986 | |
4987 | if (isAhChDhBh) |
4988 | { |
4989 | // Move one byte higher to get an address of the AH, CH, DH or BH |
4990 | result++; |
4991 | } |
4992 | |
4993 | break; |
4994 | |
4995 | } |
4996 | |
4997 | // |
4998 | // Now dereference thru the result to get the resulting value. |
4999 | // |
5000 | if (is8Bit) |
5001 | { |
5002 | result = *((BYTE*)result); |
5003 | } |
5004 | else if (rex_w != 0) |
5005 | { |
5006 | result = *((DWORD64*)result); |
5007 | } |
5008 | else if (hasOpSizePrefix) |
5009 | { |
5010 | result = *((USHORT*)result); |
5011 | } |
5012 | else |
5013 | { |
5014 | result = *((UINT32*)result); |
5015 | } |
5016 | |
5017 | return result; |
5018 | } |
5019 | |
5020 | /*++ |
5021 | Function : |
5022 | SkipPrefixes |
5023 | |
5024 | Skip all prefixes until the instruction code or the REX prefix is found |
5025 | |
5026 | Parameters: |
5027 | BYTE** ip : Pointer to the current instruction pointer. Updated |
5028 | as the function walks the codes. |
5029 | bool* hasOpSizePrefix : Pointer to bool, on exit set to true if a op size prefix |
5030 | was found. |
5031 | |
5032 | Return value : |
5033 | Code of the REX prefix or the instruction code after the prefixes. |
5034 | --*/ |
5035 | BYTE SkipPrefixes(BYTE **ip, bool* hasOpSizePrefix) |
5036 | { |
5037 | *hasOpSizePrefix = false; |
5038 | |
5039 | while (true) |
5040 | { |
5041 | BYTE code = *(*ip)++; |
5042 | |
5043 | switch (code) |
5044 | { |
5045 | case 0x66: // Operand-Size |
5046 | *hasOpSizePrefix = true; |
5047 | break; |
5048 | |
5049 | // Segment overrides |
5050 | case 0x26: // ES |
5051 | case 0x2E: // CS |
5052 | case 0x36: // SS |
5053 | case 0x3E: // DS |
5054 | case 0x64: // FS |
5055 | case 0x65: // GS |
5056 | |
5057 | // Size overrides |
5058 | case 0x67: // Address-Size |
5059 | |
5060 | // Lock |
5061 | case 0xf0: |
5062 | |
5063 | // String REP prefixes |
5064 | case 0xf2: // REPNE/REPNZ |
5065 | case 0xf3: |
5066 | break; |
5067 | |
5068 | default: |
5069 | // Return address of the nonprefix code |
5070 | return code; |
5071 | } |
5072 | } |
5073 | } |
5074 | |
5075 | /*++ |
5076 | Function : |
5077 | IsDivByZeroAnIntegerOverflow |
5078 | |
5079 | Check if a division by zero exception is in fact a division overflow. The |
5080 | x64 processor generate the same exception in both cases for the IDIV / DIV |
5081 | instruction. So we need to decode the instruction argument and check |
5082 | whether it was zero or not. |
5083 | |
5084 | Parameters: |
5085 | PCONTEXT pContext : context containing the registers |
5086 | PEXCEPTION_RECORD pExRecord : exception record of the exception |
5087 | |
5088 | Return value : |
5089 | true if the division error was an overflow |
5090 | --*/ |
5091 | bool IsDivByZeroAnIntegerOverflow(PCONTEXT pContext) |
5092 | { |
5093 | BYTE * ip = (BYTE *)GetIP(pContext); |
5094 | BYTE rex = 0; |
5095 | bool hasOpSizePrefix = false; |
5096 | |
5097 | BYTE code = SkipPrefixes(&ip, &hasOpSizePrefix); |
5098 | |
5099 | // The REX prefix must directly preceed the instruction code |
5100 | if ((code & 0xF0) == 0x40) |
5101 | { |
5102 | rex = code; |
5103 | code = *ip++; |
5104 | } |
5105 | |
5106 | DWORD64 divisor = 0; |
5107 | |
5108 | // Check if the instruction is IDIV or DIV. The instruction code includes the three |
5109 | // 'reg' bits in the ModRM byte. These are 7 for IDIV and 6 for DIV |
5110 | BYTE regBits = (*ip & 0x38) >> 3; |
5111 | if ((code == 0xF7 || code == 0xF6) && (regBits == 7 || regBits == 6)) |
5112 | { |
5113 | bool is8Bit = (code == 0xF6); |
5114 | divisor = GetModRMOperandValue(rex, ip, pContext, is8Bit, hasOpSizePrefix); |
5115 | } |
5116 | else |
5117 | { |
5118 | _ASSERTE(!"Invalid instruction (expected IDIV or DIV)" ); |
5119 | } |
5120 | |
5121 | // If the division operand is zero, it was division by zero. Otherwise the failure |
5122 | // must have been an overflow. |
5123 | return divisor != 0; |
5124 | } |
5125 | #endif // _TARGET_AMD64_ || _TARGET_X86_ |
5126 | |
5127 | BOOL IsSafeToCallExecutionManager() |
5128 | { |
5129 | Thread *pThread = GetThread(); |
5130 | |
5131 | // It is safe to call the ExecutionManager::IsManagedCode only if the current thread is in |
5132 | // the cooperative mode. Otherwise ExecutionManager::IsManagedCode could deadlock if |
5133 | // the exception happened when the thread was holding the ExecutionManager's writer lock. |
5134 | // When the thread is in preemptive mode, we know for sure that it is not executing managed code. |
5135 | // Unfortunately, when running GC stress mode that invokes GC after every jitted or NGENed |
5136 | // instruction, we need to relax that to enable instrumentation of PInvoke stubs that switch to |
5137 | // preemptive GC mode at some point. |
5138 | return ((pThread != NULL) && pThread->PreemptiveGCDisabled()) || |
5139 | GCStress<cfg_instr_jit>::IsEnabled() || |
5140 | GCStress<cfg_instr_ngen>::IsEnabled(); |
5141 | } |
5142 | |
5143 | #ifdef VSD_STUB_CAN_THROW_AV |
5144 | //Return TRUE if pContext->Pc is in VirtualStub |
5145 | static BOOL IsIPinVirtualStub(PCODE f_IP) |
5146 | { |
5147 | LIMITED_METHOD_CONTRACT; |
5148 | |
5149 | Thread * pThread = GetThread(); |
5150 | |
5151 | // We may not have a managed thread object. Example is an AV on the helper thread. |
5152 | // (perhaps during StubManager::IsStub) |
5153 | if (pThread == NULL) |
5154 | { |
5155 | return FALSE; |
5156 | } |
5157 | |
5158 | VirtualCallStubManager::StubKind sk; |
5159 | VirtualCallStubManager::FindStubManager(f_IP, &sk, FALSE /* usePredictStubKind */); |
5160 | |
5161 | if (sk == VirtualCallStubManager::SK_DISPATCH) |
5162 | { |
5163 | return TRUE; |
5164 | } |
5165 | else if (sk == VirtualCallStubManager::SK_RESOLVE) |
5166 | { |
5167 | return TRUE; |
5168 | } |
5169 | |
5170 | else { |
5171 | return FALSE; |
5172 | } |
5173 | } |
5174 | #endif // VSD_STUB_CAN_THROW_AV |
5175 | |
5176 | BOOL IsSafeToHandleHardwareException(PCONTEXT contextRecord, PEXCEPTION_RECORD exceptionRecord) |
5177 | { |
5178 | PCODE controlPc = GetIP(contextRecord); |
5179 | return g_fEEStarted && ( |
5180 | exceptionRecord->ExceptionCode == STATUS_BREAKPOINT || |
5181 | exceptionRecord->ExceptionCode == STATUS_SINGLE_STEP || |
5182 | (IsSafeToCallExecutionManager() && ExecutionManager::IsManagedCode(controlPc)) || |
5183 | #ifdef VSD_STUB_CAN_THROW_AV |
5184 | IsIPinVirtualStub(controlPc) || // access violation comes from DispatchStub of Interface call |
5185 | #endif // VSD_STUB_CAN_THROW_AV |
5186 | IsIPInMarkedJitHelper(controlPc)); |
5187 | } |
5188 | |
5189 | #ifdef _TARGET_ARM_ |
5190 | static inline BOOL HandleArmSingleStep(PCONTEXT pContext, PEXCEPTION_RECORD pExceptionRecord, Thread *pThread) |
5191 | { |
5192 | #ifdef __linux__ |
5193 | // On ARM Linux exception point to the break instruction, |
5194 | // but the rest of the code expects that it points to an instruction after the break |
5195 | if (pExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT) |
5196 | { |
5197 | SetIP(pContext, GetIP(pContext) + CORDbg_BREAK_INSTRUCTION_SIZE); |
5198 | pExceptionRecord->ExceptionAddress = (void *)GetIP(pContext); |
5199 | } |
5200 | #endif |
5201 | // On ARM we don't have any reliable hardware support for single stepping so it is emulated in software. |
5202 | // The implementation will end up throwing an EXCEPTION_BREAKPOINT rather than an EXCEPTION_SINGLE_STEP |
5203 | // and leaves other aspects of the thread context in an invalid state. Therefore we use this opportunity |
5204 | // to fixup the state before any other part of the system uses it (we do it here since only the debugger |
5205 | // uses single step functionality). |
5206 | |
5207 | // First ask the emulation itself whether this exception occurred while single stepping was enabled. If so |
5208 | // it will fix up the context to be consistent again and return true. If so and the exception was |
5209 | // EXCEPTION_BREAKPOINT then we translate it to EXCEPTION_SINGLE_STEP (otherwise we leave it be, e.g. the |
5210 | // instruction stepped caused an access violation). |
5211 | if (pThread->HandleSingleStep(pContext, pExceptionRecord->ExceptionCode) && (pExceptionRecord->ExceptionCode == EXCEPTION_BREAKPOINT)) |
5212 | { |
5213 | pExceptionRecord->ExceptionCode = EXCEPTION_SINGLE_STEP; |
5214 | pExceptionRecord->ExceptionAddress = (void *)GetIP(pContext); |
5215 | return TRUE; |
5216 | } |
5217 | return FALSE; |
5218 | } |
5219 | #endif // _TARGET_ARM_ |
5220 | |
5221 | BOOL HandleHardwareException(PAL_SEHException* ex) |
5222 | { |
5223 | _ASSERTE(IsSafeToHandleHardwareException(ex->GetContextRecord(), ex->GetExceptionRecord())); |
5224 | |
5225 | if (ex->GetExceptionRecord()->ExceptionCode != STATUS_BREAKPOINT && ex->GetExceptionRecord()->ExceptionCode != STATUS_SINGLE_STEP) |
5226 | { |
5227 | // A hardware exception is handled only if it happened in a jitted code or |
5228 | // in one of the JIT helper functions (JIT_MemSet, ...) |
5229 | PCODE controlPc = GetIP(ex->GetContextRecord()); |
5230 | if (ExecutionManager::IsManagedCode(controlPc) && IsGcMarker(ex->GetContextRecord(), ex->GetExceptionRecord())) |
5231 | { |
5232 | // Exception was handled, let the signal handler return to the exception context. Some registers in the context can |
5233 | // have been modified by the GC. |
5234 | return TRUE; |
5235 | } |
5236 | |
5237 | #if defined(_TARGET_AMD64_) || defined(_TARGET_X86_) |
5238 | // It is possible that an overflow was mapped to a divide-by-zero exception. |
5239 | // This happens when we try to divide the maximum negative value of a |
5240 | // signed integer with -1. |
5241 | // |
5242 | // Thus, we will attempt to decode the instruction @ RIP to determine if that |
5243 | // is the case using the faulting context. |
5244 | if ((ex->GetExceptionRecord()->ExceptionCode == EXCEPTION_INT_DIVIDE_BY_ZERO) && |
5245 | IsDivByZeroAnIntegerOverflow(ex->GetContextRecord())) |
5246 | { |
5247 | // The exception was an integer overflow, so augment the exception code. |
5248 | ex->GetExceptionRecord()->ExceptionCode = EXCEPTION_INT_OVERFLOW; |
5249 | } |
5250 | #endif // _TARGET_AMD64_ || _TARGET_X86_ |
5251 | |
5252 | // Create frame necessary for the exception handling |
5253 | FrameWithCookie<FaultingExceptionFrame> fef; |
5254 | *((&fef)->GetGSCookiePtr()) = GetProcessGSCookie(); |
5255 | { |
5256 | GCX_COOP(); // Must be cooperative to modify frame chain. |
5257 | if (IsIPInMarkedJitHelper(controlPc)) |
5258 | { |
5259 | // For JIT helpers, we need to set the frame to point to the |
5260 | // managed code that called the helper, otherwise the stack |
5261 | // walker would skip all the managed frames upto the next |
5262 | // explicit frame. |
5263 | PAL_VirtualUnwind(ex->GetContextRecord(), NULL); |
5264 | ex->GetExceptionRecord()->ExceptionAddress = (PVOID)GetIP(ex->GetContextRecord()); |
5265 | } |
5266 | #ifdef VSD_STUB_CAN_THROW_AV |
5267 | else if (IsIPinVirtualStub(controlPc)) |
5268 | { |
5269 | AdjustContextForVirtualStub(ex->GetExceptionRecord(), ex->GetContextRecord()); |
5270 | } |
5271 | #endif // VSD_STUB_CAN_THROW_AV |
5272 | fef.InitAndLink(ex->GetContextRecord()); |
5273 | } |
5274 | |
5275 | DispatchManagedException(*ex, true /* isHardwareException */); |
5276 | UNREACHABLE(); |
5277 | } |
5278 | else |
5279 | { |
5280 | // This is a breakpoint or single step stop, we report it to the debugger. |
5281 | Thread *pThread = GetThread(); |
5282 | if (pThread != NULL && g_pDebugInterface != NULL) |
5283 | { |
5284 | #ifdef _TARGET_ARM_ |
5285 | HandleArmSingleStep(ex->GetContextRecord(), ex->GetExceptionRecord(), pThread); |
5286 | #endif |
5287 | if (ex->GetExceptionRecord()->ExceptionCode == STATUS_BREAKPOINT) |
5288 | { |
5289 | // If this is breakpoint context, it is set up to point to an instruction after the break instruction. |
5290 | // But debugger expects to see context that points to the break instruction, that's why we correct it. |
5291 | SetIP(ex->GetContextRecord(), GetIP(ex->GetContextRecord()) - CORDbg_BREAK_INSTRUCTION_SIZE); |
5292 | ex->GetExceptionRecord()->ExceptionAddress = (void *)GetIP(ex->GetContextRecord()); |
5293 | } |
5294 | |
5295 | if (g_pDebugInterface->FirstChanceNativeException(ex->GetExceptionRecord(), |
5296 | ex->GetContextRecord(), |
5297 | ex->GetExceptionRecord()->ExceptionCode, |
5298 | pThread)) |
5299 | { |
5300 | // Exception was handled, let the signal handler return to the exception context. Some registers in the context can |
5301 | // have been modified by the debugger. |
5302 | return TRUE; |
5303 | } |
5304 | } |
5305 | } |
5306 | |
5307 | return FALSE; |
5308 | } |
5309 | |
5310 | #endif // FEATURE_PAL |
5311 | |
5312 | #ifndef FEATURE_PAL |
5313 | void ClrUnwindEx(EXCEPTION_RECORD* pExceptionRecord, UINT_PTR ReturnValue, UINT_PTR TargetIP, UINT_PTR TargetFrameSp) |
5314 | { |
5315 | PVOID TargetFrame = (PVOID)TargetFrameSp; |
5316 | |
5317 | CONTEXT ctx; |
5318 | RtlUnwindEx(TargetFrame, |
5319 | (PVOID)TargetIP, |
5320 | pExceptionRecord, |
5321 | (PVOID)ReturnValue, // ReturnValue |
5322 | &ctx, |
5323 | NULL); // HistoryTable |
5324 | |
5325 | // doesn't return |
5326 | UNREACHABLE(); |
5327 | } |
5328 | #endif // !FEATURE_PAL |
5329 | |
5330 | void TrackerAllocator::Init() |
5331 | { |
5332 | void* pvFirstPage = (void*)new BYTE[TRACKER_ALLOCATOR_PAGE_SIZE]; |
5333 | |
5334 | ZeroMemory(pvFirstPage, TRACKER_ALLOCATOR_PAGE_SIZE); |
5335 | |
5336 | m_pFirstPage = (Page*)pvFirstPage; |
5337 | |
5338 | _ASSERTE(NULL == m_pFirstPage->m_header.m_pNext); |
5339 | _ASSERTE(0 == m_pFirstPage->m_header.m_idxFirstFree); |
5340 | |
5341 | m_pCrst = new Crst(CrstException, CRST_UNSAFE_ANYMODE); |
5342 | |
5343 | EH_LOG((LL_INFO100, "TrackerAllocator::Init() succeeded..\n" )); |
5344 | } |
5345 | |
5346 | void TrackerAllocator::Terminate() |
5347 | { |
5348 | Page* pPage = m_pFirstPage; |
5349 | |
5350 | while (pPage) |
5351 | { |
5352 | Page* pDeleteMe = pPage; |
5353 | pPage = pPage->m_header.m_pNext; |
5354 | delete [] pDeleteMe; |
5355 | } |
5356 | delete m_pCrst; |
5357 | } |
5358 | |
5359 | ExceptionTracker* TrackerAllocator::GetTrackerMemory() |
5360 | { |
5361 | CONTRACT(ExceptionTracker*) |
5362 | { |
5363 | GC_TRIGGERS; |
5364 | NOTHROW; |
5365 | MODE_ANY; |
5366 | POSTCONDITION(CheckPointer(RETVAL, NULL_OK)); |
5367 | } |
5368 | CONTRACT_END; |
5369 | |
5370 | _ASSERTE(NULL != m_pFirstPage); |
5371 | |
5372 | Page* pPage = m_pFirstPage; |
5373 | |
5374 | ExceptionTracker* pTracker = NULL; |
5375 | |
5376 | for (int i = 0; i < TRACKER_ALLOCATOR_MAX_OOM_SPINS; i++) |
5377 | { |
5378 | { // open lock scope |
5379 | CrstHolder ch(m_pCrst); |
5380 | |
5381 | while (pPage) |
5382 | { |
5383 | int idx; |
5384 | for (idx = 0; idx < NUM_TRACKERS_PER_PAGE; idx++) |
5385 | { |
5386 | pTracker = &(pPage->m_rgTrackers[idx]); |
5387 | if (pTracker->m_pThread == NULL) |
5388 | { |
5389 | break; |
5390 | } |
5391 | } |
5392 | |
5393 | if (idx < NUM_TRACKERS_PER_PAGE) |
5394 | { |
5395 | break; |
5396 | } |
5397 | else |
5398 | { |
5399 | if (NULL == pPage->m_header.m_pNext) |
5400 | { |
5401 | Page* pNewPage = (Page*) new (nothrow) BYTE[TRACKER_ALLOCATOR_PAGE_SIZE]; |
5402 | |
5403 | if (pNewPage) |
5404 | { |
5405 | STRESS_LOG0(LF_EH, LL_INFO10, "TrackerAllocator: allocated page\n" ); |
5406 | pPage->m_header.m_pNext = pNewPage; |
5407 | ZeroMemory(pPage->m_header.m_pNext, TRACKER_ALLOCATOR_PAGE_SIZE); |
5408 | } |
5409 | else |
5410 | { |
5411 | STRESS_LOG0(LF_EH, LL_WARNING, "TrackerAllocator: failed to allocate a page\n" ); |
5412 | pTracker = NULL; |
5413 | } |
5414 | } |
5415 | |
5416 | pPage = pPage->m_header.m_pNext; |
5417 | } |
5418 | } |
5419 | |
5420 | if (pTracker) |
5421 | { |
5422 | Thread* pThread = GetThread(); |
5423 | _ASSERTE(NULL != pPage); |
5424 | ZeroMemory(pTracker, sizeof(*pTracker)); |
5425 | pTracker->m_pThread = pThread; |
5426 | EH_LOG((LL_INFO100, "TrackerAllocator: allocating tracker 0x%p, thread = 0x%p\n" , pTracker, pTracker->m_pThread)); |
5427 | break; |
5428 | } |
5429 | } // end lock scope |
5430 | |
5431 | // |
5432 | // We could not allocate a new page of memory. This is a fatal error if it happens twice (nested) |
5433 | // on the same thread because we have only one m_OOMTracker. We will spin hoping for another thread |
5434 | // to give back to the pool or for the allocation to succeed. |
5435 | // |
5436 | |
5437 | ClrSleepEx(TRACKER_ALLOCATOR_OOM_SPIN_DELAY, FALSE); |
5438 | STRESS_LOG1(LF_EH, LL_WARNING, "TrackerAllocator: retry #%d\n" , i); |
5439 | } |
5440 | |
5441 | RETURN pTracker; |
5442 | } |
5443 | |
5444 | void TrackerAllocator::FreeTrackerMemory(ExceptionTracker* pTracker) |
5445 | { |
5446 | CONTRACTL |
5447 | { |
5448 | GC_NOTRIGGER; |
5449 | NOTHROW; |
5450 | MODE_ANY; |
5451 | } |
5452 | CONTRACTL_END; |
5453 | |
5454 | // mark this entry as free |
5455 | EH_LOG((LL_INFO100, "TrackerAllocator: freeing tracker 0x%p, thread = 0x%p\n" , pTracker, pTracker->m_pThread)); |
5456 | CONSISTENCY_CHECK(pTracker->IsValid()); |
5457 | FastInterlockExchangePointer(&(pTracker->m_pThread), NULL); |
5458 | } |
5459 | |
5460 | #ifndef FEATURE_PAL |
5461 | // This is Windows specific implementation as it is based upon the notion of collided unwind that is specific |
5462 | // to Windows 64bit. |
5463 | // |
5464 | // If pContext is not NULL, then this function copies pContext to pDispatcherContext->ContextRecord. If pContext |
5465 | // is NULL, then this function assumes that pDispatcherContext->ContextRecord has already been fixed up. In any |
5466 | // case, this function then starts to update the various fields in pDispatcherContext. |
5467 | // |
5468 | // In order to redirect the unwind, the OS requires us to provide a personality routine for the code at the |
5469 | // new context we are providing. If RtlVirtualUnwind can't determine the personality routine and using |
5470 | // the default managed code personality routine isn't appropriate (maybe you aren't returning to managed code) |
5471 | // specify pUnwindPersonalityRoutine. For instance the debugger uses this to unwind from ExceptionHijack back |
5472 | // to RaiseException in win32 and specifies an empty personality routine. For more details about this |
5473 | // see the comments in the code below. |
5474 | // |
5475 | // <AMD64-specific> |
5476 | // AMD64 is more "advanced", in that the DISPATCHER_CONTEXT contains a field for the TargetIp. So we don't have |
5477 | // to use the control PC in pDispatcherContext->ContextRecord to indicate the target IP for the unwind. However, |
5478 | // this also means that pDispatcherContext->ContextRecord is expected to be consistent. |
5479 | // </AMD64-specific> |
5480 | // |
5481 | // For more information, refer to vctools\crt\crtw32\misc\{ia64|amd64}\chandler.c for __C_specific_handler() and |
5482 | // nt\base\ntos\rtl\{ia64|amd64}\exdsptch.c for RtlUnwindEx(). |
5483 | void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pContext, LPVOID originalControlPC, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine) |
5484 | { |
5485 | if (pContext) |
5486 | { |
5487 | STRESS_LOG1(LF_EH, LL_INFO10, "FDC: pContext: %p\n" , pContext); |
5488 | CopyOSContext(pDispatcherContext->ContextRecord, pContext); |
5489 | } |
5490 | |
5491 | pDispatcherContext->ControlPc = (UINT_PTR) GetIP(pDispatcherContext->ContextRecord); |
5492 | |
5493 | #if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_) |
5494 | // Since this routine is used to fixup contexts for async exceptions, |
5495 | // clear the CONTEXT_UNWOUND_TO_CALL flag since, semantically, frames |
5496 | // where such exceptions have happened do not have callsites. On a similar |
5497 | // note, also clear out the ControlPcIsUnwound field. Post discussion with |
5498 | // AaronGi from the kernel team, it's safe for us to have both of these |
5499 | // cleared. |
5500 | // |
5501 | // The OS will pick this up with the rest of the DispatcherContext state |
5502 | // when it processes collided unwind and thus, when our managed personality |
5503 | // routine is invoked, ExceptionTracker::InitializeCrawlFrame will adjust |
5504 | // ControlPC correctly. |
5505 | pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_UNWOUND_TO_CALL; |
5506 | pDispatcherContext->ControlPcIsUnwound = FALSE; |
5507 | |
5508 | // Also, clear out the debug-registers flag so that when this context is used by the |
5509 | // OS, it does not end up setting bogus access breakpoints. The kernel team will also |
5510 | // be fixing it at their end, in their implementation of collided unwind. |
5511 | pDispatcherContext->ContextRecord->ContextFlags &= ~CONTEXT_DEBUG_REGISTERS; |
5512 | |
5513 | #ifdef _TARGET_ARM_ |
5514 | // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS) |
5515 | pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM; |
5516 | #else // _TARGET_ARM64_ |
5517 | // But keep the architecture flag set (its part of CONTEXT_DEBUG_REGISTERS) |
5518 | pDispatcherContext->ContextRecord->ContextFlags |= CONTEXT_ARM64; |
5519 | #endif // _TARGET_ARM_ |
5520 | |
5521 | #endif // _TARGET_ARM_ || _TARGET_ARM64_ |
5522 | |
5523 | INDEBUG(pDispatcherContext->FunctionEntry = (PT_RUNTIME_FUNCTION)INVALID_POINTER_CD); |
5524 | INDEBUG(pDispatcherContext->ImageBase = INVALID_POINTER_CD); |
5525 | |
5526 | pDispatcherContext->FunctionEntry = RtlLookupFunctionEntry(pDispatcherContext->ControlPc, |
5527 | &(pDispatcherContext->ImageBase), |
5528 | NULL |
5529 | ); |
5530 | |
5531 | _ASSERTE(((PT_RUNTIME_FUNCTION)INVALID_POINTER_CD) != pDispatcherContext->FunctionEntry); |
5532 | _ASSERTE(INVALID_POINTER_CD != pDispatcherContext->ImageBase); |
5533 | |
5534 | // |
5535 | // need to find the establisher frame by virtually unwinding |
5536 | // |
5537 | CONTEXT tempContext; |
5538 | PVOID HandlerData; |
5539 | |
5540 | CopyOSContext(&tempContext, pDispatcherContext->ContextRecord); |
5541 | |
5542 | // RtlVirtualUnwind returns the language specific handler for the ControlPC in question |
5543 | // on ARM and AMD64. |
5544 | pDispatcherContext->LanguageHandler = RtlVirtualUnwind( |
5545 | NULL, // HandlerType |
5546 | pDispatcherContext->ImageBase, |
5547 | pDispatcherContext->ControlPc, |
5548 | pDispatcherContext->FunctionEntry, |
5549 | &tempContext, |
5550 | &HandlerData, |
5551 | &(pDispatcherContext->EstablisherFrame), |
5552 | NULL); |
5553 | |
5554 | pDispatcherContext->HandlerData = NULL; |
5555 | pDispatcherContext->HistoryTable = NULL; |
5556 | |
5557 | |
5558 | // Why does the OS consider it invalid to have a NULL personality routine (or, why does |
5559 | // the OS assume that DispatcherContext returned from ExceptionCollidedUnwind will always |
5560 | // have a valid personality routine)? |
5561 | // |
5562 | // |
5563 | // We force the OS to pickup the DispatcherContext (that we fixed above) by returning |
5564 | // ExceptionCollidedUnwind. Per Dave Cutler, the only entity which is allowed to return |
5565 | // this exception disposition is the personality routine of the assembly helper which is used |
5566 | // to invoke the user (stack-based) personality routines. For such invocations made by the |
5567 | // OS assembly helper, the DispatcherContext it saves before invoking the user personality routine |
5568 | // will always have a valid personality routine reference and thus, when a real collided unwind happens |
5569 | // and this exception disposition is returned, OS exception dispatch will have a valid personality routine |
5570 | // to invoke. |
5571 | // |
5572 | // By using this exception disposition to make the OS walk stacks we broke (for async exceptions), we are |
5573 | // simply abusing the semantic of this disposition. However, since we must use it, we should also check |
5574 | // that we are returning a valid personality routine reference back to the OS. |
5575 | if(pDispatcherContext->LanguageHandler == NULL) |
5576 | { |
5577 | if (pUnwindPersonalityRoutine != NULL) |
5578 | { |
5579 | pDispatcherContext->LanguageHandler = pUnwindPersonalityRoutine; |
5580 | } |
5581 | else |
5582 | { |
5583 | // We would be here only for fixing up context for an async exception in managed code. |
5584 | // This implies that we should have got a personality routine returned from the call to |
5585 | // RtlVirtualUnwind above. |
5586 | // |
5587 | // However, if the ControlPC happened to be in the prolog or epilog of a managed method, |
5588 | // then RtlVirtualUnwind will always return NULL. We cannot return this NULL back to the |
5589 | // OS as it is an invalid value which the OS does not expect (and attempting to do so will |
5590 | // result in the kernel exception dispatch going haywire). |
5591 | #if defined(_DEBUG) |
5592 | // We should be in jitted code |
5593 | TADDR adrRedirectedIP = PCODEToPINSTR(pDispatcherContext->ControlPc); |
5594 | _ASSERTE(ExecutionManager::IsManagedCode(adrRedirectedIP)); |
5595 | #endif // _DEBUG |
5596 | |
5597 | // Set the personality routine to be returned as the one which is conventionally |
5598 | // invoked for exception dispatch. |
5599 | pDispatcherContext->LanguageHandler = (PEXCEPTION_ROUTINE)GetEEFuncEntryPoint(ProcessCLRException); |
5600 | STRESS_LOG1(LF_EH, LL_INFO10, "FDC: ControlPC was in prolog/epilog, so setting DC->LanguageHandler to %p\n" , pDispatcherContext->LanguageHandler); |
5601 | } |
5602 | } |
5603 | |
5604 | _ASSERTE(pDispatcherContext->LanguageHandler != NULL); |
5605 | } |
5606 | |
5607 | |
5608 | // See the comment above for the overloaded version of this function. |
5609 | void FixupDispatcherContext(DISPATCHER_CONTEXT* pDispatcherContext, CONTEXT* pContext, CONTEXT* pOriginalContext, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine = NULL) |
5610 | { |
5611 | _ASSERTE(pOriginalContext != NULL); |
5612 | FixupDispatcherContext(pDispatcherContext, pContext, (LPVOID)::GetIP(pOriginalContext), pUnwindPersonalityRoutine); |
5613 | } |
5614 | |
5615 | |
5616 | BOOL FirstCallToHandler ( |
5617 | DISPATCHER_CONTEXT *pDispatcherContext, |
5618 | CONTEXT **ppContextRecord) |
5619 | { |
5620 | CONTRACTL |
5621 | { |
5622 | NOTHROW; |
5623 | GC_NOTRIGGER; |
5624 | MODE_ANY; |
5625 | SO_TOLERANT; |
5626 | } |
5627 | CONTRACTL_END; |
5628 | |
5629 | FaultingExceptionFrame *pFrame = GetFrameFromRedirectedStubStackFrame(pDispatcherContext); |
5630 | |
5631 | BOOL *pfFilterExecuted = pFrame->GetFilterExecutedFlag(); |
5632 | BOOL fFilterExecuted = *pfFilterExecuted; |
5633 | |
5634 | STRESS_LOG4(LF_EH, LL_INFO10, "FirstCallToHandler: Fixing exception context for redirect stub, sp %p, establisher %p, flag %p -> %u\n" , |
5635 | GetSP(pDispatcherContext->ContextRecord), |
5636 | pDispatcherContext->EstablisherFrame, |
5637 | pfFilterExecuted, |
5638 | fFilterExecuted); |
5639 | |
5640 | *ppContextRecord = pFrame->GetExceptionContext(); |
5641 | *pfFilterExecuted = TRUE; |
5642 | |
5643 | return !fFilterExecuted; |
5644 | } |
5645 | |
5646 | |
5647 | EXTERN_C EXCEPTION_DISPOSITION |
5648 | HijackHandler(IN PEXCEPTION_RECORD pExceptionRecord |
5649 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
5650 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
5651 | IN OUT PCONTEXT pContextRecord, |
5652 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
5653 | ) |
5654 | { |
5655 | CONTRACTL |
5656 | { |
5657 | GC_NOTRIGGER; |
5658 | NOTHROW; |
5659 | MODE_ANY; |
5660 | SO_TOLERANT; |
5661 | } |
5662 | CONTRACTL_END; |
5663 | |
5664 | STRESS_LOG4(LF_EH, LL_INFO10, "HijackHandler: establisher: %p, disp->cxr: %p, sp %p, cxr @ exception: %p\n" , |
5665 | pDispatcherContext->EstablisherFrame, |
5666 | pDispatcherContext->ContextRecord, |
5667 | GetSP(pDispatcherContext->ContextRecord), |
5668 | pContextRecord); |
5669 | |
5670 | Thread* pThread = GetThread(); |
5671 | CONTEXT *pNewContext = NULL; |
5672 | |
5673 | VALIDATE_BACKOUT_STACK_CONSUMPTION; |
5674 | |
5675 | if (FirstCallToHandler(pDispatcherContext, &pNewContext)) |
5676 | { |
5677 | // |
5678 | // We've pushed a Frame, but it is not initialized yet, so we |
5679 | // must not be in preemptive mode |
5680 | // |
5681 | CONSISTENCY_CHECK(pThread->PreemptiveGCDisabled()); |
5682 | |
5683 | // |
5684 | // AdjustContextForThreadStop will reset the ThrowControlForThread state |
5685 | // on the thread, but we don't want to do that just yet. We need that |
5686 | // information in our personality routine, so we will reset it back to |
5687 | // InducedThreadStop and then clear it in our personality routine. |
5688 | // |
5689 | CONSISTENCY_CHECK(IsThreadHijackedForThreadStop(pThread, pExceptionRecord)); |
5690 | AdjustContextForThreadStop(pThread, pNewContext); |
5691 | pThread->SetThrowControlForThread(Thread::InducedThreadStop); |
5692 | } |
5693 | |
5694 | FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord); |
5695 | |
5696 | STRESS_LOG4(LF_EH, LL_INFO10, "HijackHandler: new establisher: %p, disp->cxr: %p, new ip: %p, new sp: %p\n" , |
5697 | pDispatcherContext->EstablisherFrame, |
5698 | pDispatcherContext->ContextRecord, |
5699 | GetIP(pDispatcherContext->ContextRecord), |
5700 | GetSP(pDispatcherContext->ContextRecord)); |
5701 | |
5702 | // Returning ExceptionCollidedUnwind will cause the OS to take our new context record |
5703 | // and dispatcher context and restart the exception dispatching on this call frame, |
5704 | // which is exactly the behavior we want in order to restore our thread's unwindability |
5705 | // (which was broken when we whacked the IP to get control over the thread) |
5706 | return ExceptionCollidedUnwind; |
5707 | } |
5708 | |
5709 | |
5710 | EXTERN_C VOID FixContextForFaultingExceptionFrame ( |
5711 | EXCEPTION_RECORD* pExceptionRecord, |
5712 | CONTEXT *pContextRecord); |
5713 | |
5714 | EXTERN_C EXCEPTION_DISPOSITION |
5715 | FixContextHandler(IN PEXCEPTION_RECORD pExceptionRecord |
5716 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
5717 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
5718 | IN OUT PCONTEXT pContextRecord, |
5719 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
5720 | ) |
5721 | { |
5722 | CONTEXT* pNewContext = NULL; |
5723 | |
5724 | VALIDATE_BACKOUT_STACK_CONSUMPTION; |
5725 | |
5726 | // Our backout validation should ensure that we don't SO here. |
5727 | BEGIN_CONTRACT_VIOLATION(SOToleranceViolation); |
5728 | |
5729 | if (FirstCallToHandler(pDispatcherContext, &pNewContext)) |
5730 | { |
5731 | // |
5732 | // We've pushed a Frame, but it is not initialized yet, so we |
5733 | // must not be in preemptive mode |
5734 | // |
5735 | CONSISTENCY_CHECK(GetThread()->PreemptiveGCDisabled()); |
5736 | |
5737 | FixContextForFaultingExceptionFrame(pExceptionRecord, pNewContext); |
5738 | } |
5739 | |
5740 | FixupDispatcherContext(pDispatcherContext, pNewContext, pContextRecord); |
5741 | |
5742 | END_CONTRACT_VIOLATION; |
5743 | |
5744 | // Returning ExceptionCollidedUnwind will cause the OS to take our new context record |
5745 | // and dispatcher context and restart the exception dispatching on this call frame, |
5746 | // which is exactly the behavior we want in order to restore our thread's unwindability |
5747 | // (which was broken when we whacked the IP to get control over the thread) |
5748 | return ExceptionCollidedUnwind; |
5749 | } |
5750 | #endif // !FEATURE_PAL |
5751 | |
5752 | #ifdef _DEBUG |
5753 | // IsSafeToUnwindFrameChain: |
5754 | // Arguments: |
5755 | // pThread the Thread* being unwound |
5756 | // MemoryStackFpForFrameChain the stack limit to unwind the Frames |
5757 | // Returns |
5758 | // FALSE if the value MemoryStackFpForFrameChain falls between a M2U transition frame |
5759 | // and its corresponding managed method stack pointer |
5760 | // TRUE otherwise. |
5761 | // |
5762 | // If the managed method will *NOT* be unwound by the current exception |
5763 | // pass we have an error: with no Frame on the stack to report it, the |
5764 | // managed method will not be included in the next stack walk. |
5765 | // An example of running into this issue was DDBug 1133, where |
5766 | // TransparentProxyStubIA64 had a personality routine that removed a |
5767 | // transition frame. As a consequence the managed method did not |
5768 | // participate in the stack walk until the exception handler was called. At |
5769 | // that time the stack walking code was able to see the managed method again |
5770 | // but by this time all references from this managed method were stale. |
5771 | BOOL IsSafeToUnwindFrameChain(Thread* pThread, LPVOID MemoryStackFpForFrameChain) |
5772 | { |
5773 | // Look for the last Frame to be removed that marks a managed-to-unmanaged transition |
5774 | Frame* pLastFrameOfInterest = FRAME_TOP; |
5775 | for (Frame* pf = pThread->m_pFrame; pf < MemoryStackFpForFrameChain; pf = pf->PtrNextFrame()) |
5776 | { |
5777 | PCODE retAddr = pf->GetReturnAddress(); |
5778 | if (retAddr != NULL && ExecutionManager::IsManagedCode(retAddr)) |
5779 | { |
5780 | pLastFrameOfInterest = pf; |
5781 | } |
5782 | } |
5783 | |
5784 | // If there is none it's safe to remove all these Frames |
5785 | if (pLastFrameOfInterest == FRAME_TOP) |
5786 | { |
5787 | return TRUE; |
5788 | } |
5789 | |
5790 | // Otherwise "unwind" to managed method |
5791 | REGDISPLAY rd; |
5792 | CONTEXT ctx; |
5793 | SetIP(&ctx, 0); |
5794 | SetSP(&ctx, 0); |
5795 | FillRegDisplay(&rd, &ctx); |
5796 | pLastFrameOfInterest->UpdateRegDisplay(&rd); |
5797 | |
5798 | // We're safe only if the managed method will be unwound also |
5799 | LPVOID managedSP = dac_cast<PTR_VOID>(GetRegdisplaySP(&rd)); |
5800 | |
5801 | if (managedSP < MemoryStackFpForFrameChain) |
5802 | { |
5803 | return TRUE; |
5804 | } |
5805 | else |
5806 | { |
5807 | return FALSE; |
5808 | } |
5809 | |
5810 | } |
5811 | #endif // _DEBUG |
5812 | |
5813 | |
5814 | void CleanUpForSecondPass(Thread* pThread, bool fIsSO, LPVOID MemoryStackFpForFrameChain, LPVOID MemoryStackFp) |
5815 | { |
5816 | WRAPPER_NO_CONTRACT; |
5817 | |
5818 | EH_LOG((LL_INFO100, "Exception is going into unmanaged code, unwinding frame chain to %p\n" , MemoryStackFpForFrameChain)); |
5819 | |
5820 | // On AMD64 the establisher pointer is the live stack pointer, but on |
5821 | // IA64 and ARM it's the caller's stack pointer. It makes no difference, since there |
5822 | // is no Frame anywhere in CallDescrWorker's region of stack. |
5823 | |
5824 | // First make sure that unwinding the frame chain does not remove any transition frames |
5825 | // that report managed methods that will not be unwound. |
5826 | // If this assert fires it's probably the personality routine of some assembly code that |
5827 | // incorrectly removed a transition frame (more details in IsSafeToUnwindFrameChain) |
5828 | // [Do not perform the IsSafeToUnwindFrameChain() check in the SO case, since |
5829 | // IsSafeToUnwindFrameChain() requires a large amount of stack space.] |
5830 | _ASSERTE(fIsSO || IsSafeToUnwindFrameChain(pThread, (Frame*)MemoryStackFpForFrameChain)); |
5831 | |
5832 | UnwindFrameChain(pThread, (Frame*)MemoryStackFpForFrameChain); |
5833 | |
5834 | // Only pop the trackers if this is not an SO. It's not safe to pop the trackers during EH for an SO. |
5835 | // Instead, we rely on the END_SO_TOLERANT_CODE macro to call ClearExceptionStateAfterSO(). Of course, |
5836 | // we may leak in the UMThunkStubCommon() case where we don't have this macro lower on the stack |
5837 | // (stack grows up). |
5838 | if (!fIsSO) |
5839 | { |
5840 | ExceptionTracker::PopTrackerIfEscaping((void*)MemoryStackFp); |
5841 | } |
5842 | } |
5843 | |
5844 | #ifdef FEATURE_PAL |
5845 | |
5846 | // This is a personality routine for TheUMEntryPrestub and UMThunkStub Unix asm stubs. |
5847 | // An exception propagating through these stubs is an unhandled exception. |
5848 | // This function dumps managed stack trace and terminates the current process. |
5849 | EXTERN_C _Unwind_Reason_Code |
5850 | UnhandledExceptionHandlerUnix( |
5851 | IN int version, |
5852 | IN _Unwind_Action action, |
5853 | IN uint64_t exceptionClass, |
5854 | IN struct _Unwind_Exception *exception, |
5855 | IN struct _Unwind_Context *context |
5856 | ) |
5857 | { |
5858 | // Unhandled exception happened, so dump the managed stack trace and terminate the process |
5859 | |
5860 | DefaultCatchHandler(NULL /*pExceptionInfo*/, NULL /*Throwable*/, TRUE /*useLastThrownObject*/, |
5861 | TRUE /*isTerminating*/, FALSE /*isThreadBaseFIlter*/, FALSE /*sendAppDomainEvents*/); |
5862 | |
5863 | EEPOLICY_HANDLE_FATAL_ERROR(COR_E_EXECUTIONENGINE); |
5864 | return _URC_FATAL_PHASE1_ERROR; |
5865 | } |
5866 | |
5867 | #else // FEATURE_PAL |
5868 | |
5869 | EXTERN_C EXCEPTION_DISPOSITION |
5870 | UMThunkUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord |
5871 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
5872 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
5873 | IN OUT PCONTEXT pContextRecord, |
5874 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
5875 | ) |
5876 | { |
5877 | Thread* pThread = GetThread(); |
5878 | if (pThread == NULL) { |
5879 | return ExceptionContinueSearch; |
5880 | } |
5881 | |
5882 | bool fIsSO = |
5883 | IsSOExceptionCode(pExceptionRecord->ExceptionCode); |
5884 | |
5885 | VALIDATE_BACKOUT_STACK_CONSUMPTION; |
5886 | |
5887 | if (IS_UNWINDING(pExceptionRecord->ExceptionFlags)) |
5888 | { |
5889 | if (fIsSO) |
5890 | { |
5891 | if (!pThread->PreemptiveGCDisabled()) |
5892 | { |
5893 | pThread->DisablePreemptiveGC(); |
5894 | } |
5895 | } |
5896 | // The VALIDATE_BACKOUT_STACK_CONSUMPTION makes sure that this function does not use stack more than backout limit. |
5897 | CONTRACT_VIOLATION(SOToleranceViolation); |
5898 | CleanUpForSecondPass(pThread, fIsSO, (void*)MemoryStackFp, (void*)MemoryStackFp); |
5899 | } |
5900 | |
5901 | // The asm stub put us into COOP mode, but we're about to scan unmanaged call frames |
5902 | // so unmanaged filters/handlers/etc can run and we must be in PREEMP mode for that. |
5903 | if (pThread->PreemptiveGCDisabled()) |
5904 | { |
5905 | if (fIsSO) |
5906 | { |
5907 | // We don't have stack to do full-version EnablePreemptiveGC. |
5908 | FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0); |
5909 | } |
5910 | else |
5911 | { |
5912 | pThread->EnablePreemptiveGC(); |
5913 | } |
5914 | } |
5915 | |
5916 | return ExceptionContinueSearch; |
5917 | } |
5918 | |
5919 | EXTERN_C EXCEPTION_DISPOSITION |
5920 | UMEntryPrestubUnwindFrameChainHandler( |
5921 | IN PEXCEPTION_RECORD pExceptionRecord |
5922 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
5923 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
5924 | IN OUT PCONTEXT pContextRecord, |
5925 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
5926 | ) |
5927 | { |
5928 | EXCEPTION_DISPOSITION disposition = UMThunkUnwindFrameChainHandler( |
5929 | pExceptionRecord, |
5930 | MemoryStackFp, |
5931 | pContextRecord, |
5932 | pDispatcherContext |
5933 | ); |
5934 | |
5935 | return disposition; |
5936 | } |
5937 | |
5938 | EXTERN_C EXCEPTION_DISPOSITION |
5939 | UMThunkStubUnwindFrameChainHandler( |
5940 | IN PEXCEPTION_RECORD pExceptionRecord |
5941 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
5942 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
5943 | IN OUT PCONTEXT pContextRecord, |
5944 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
5945 | ) |
5946 | { |
5947 | |
5948 | #ifdef _DEBUG |
5949 | // If the exception is escaping the last CLR personality routine on the stack, |
5950 | // then state a flag on the thread to indicate so. |
5951 | // |
5952 | // We check for thread object since this function is the personality routine of the UMThunk |
5953 | // and we can landup here even when thread creation (within the thunk) fails. |
5954 | if (GetThread() != NULL) |
5955 | { |
5956 | SetReversePInvokeEscapingUnhandledExceptionStatus(IS_UNWINDING(pExceptionRecord->ExceptionFlags), |
5957 | MemoryStackFp |
5958 | ); |
5959 | } |
5960 | #endif // _DEBUG |
5961 | |
5962 | EXCEPTION_DISPOSITION disposition = UMThunkUnwindFrameChainHandler( |
5963 | pExceptionRecord, |
5964 | MemoryStackFp, |
5965 | pContextRecord, |
5966 | pDispatcherContext |
5967 | ); |
5968 | |
5969 | return disposition; |
5970 | } |
5971 | |
5972 | |
5973 | // This is the personality routine setup for the assembly helper (CallDescrWorker) that calls into |
5974 | // managed code. |
5975 | EXTERN_C EXCEPTION_DISPOSITION |
5976 | CallDescrWorkerUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord |
5977 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
5978 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
5979 | IN OUT PCONTEXT pContextRecord, |
5980 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
5981 | ) |
5982 | { |
5983 | |
5984 | Thread* pThread = GetThread(); |
5985 | _ASSERTE(pThread); |
5986 | |
5987 | if (IsSOExceptionCode(pExceptionRecord->ExceptionCode)) |
5988 | { |
5989 | if (IS_UNWINDING(pExceptionRecord->ExceptionFlags)) |
5990 | { |
5991 | GCX_COOP_NO_DTOR(); |
5992 | CleanUpForSecondPass(pThread, true, (void*)MemoryStackFp, (void*)MemoryStackFp); |
5993 | } |
5994 | |
5995 | FastInterlockAnd (&pThread->m_fPreemptiveGCDisabled, 0); |
5996 | // We'll let the SO infrastructure handle this exception... at that point, we |
5997 | // know that we'll have enough stack to do it. |
5998 | return ExceptionContinueSearch; |
5999 | } |
6000 | |
6001 | EXCEPTION_DISPOSITION retVal = ProcessCLRException(pExceptionRecord, |
6002 | MemoryStackFp, |
6003 | pContextRecord, |
6004 | pDispatcherContext); |
6005 | |
6006 | // Our backout validation should ensure that we don't SO here. Add a |
6007 | // backout validation here. |
6008 | BEGIN_CONTRACT_VIOLATION(SOToleranceViolation); |
6009 | |
6010 | if (retVal == ExceptionContinueSearch) |
6011 | { |
6012 | |
6013 | if (IS_UNWINDING(pExceptionRecord->ExceptionFlags)) |
6014 | { |
6015 | CleanUpForSecondPass(pThread, false, (void*)MemoryStackFp, (void*)MemoryStackFp); |
6016 | } |
6017 | |
6018 | // We're scanning out from CallDescr and potentially through the EE and out to unmanaged. |
6019 | // So switch to preemptive mode. |
6020 | GCX_PREEMP_NO_DTOR(); |
6021 | } |
6022 | |
6023 | END_CONTRACT_VIOLATION; |
6024 | |
6025 | return retVal; |
6026 | } |
6027 | |
6028 | #endif // FEATURE_PAL |
6029 | |
6030 | #ifdef FEATURE_COMINTEROP |
6031 | EXTERN_C EXCEPTION_DISPOSITION |
6032 | ReverseComUnwindFrameChainHandler(IN PEXCEPTION_RECORD pExceptionRecord |
6033 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
6034 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
6035 | IN OUT PCONTEXT pContextRecord, |
6036 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
6037 | ) |
6038 | { |
6039 | if (IS_UNWINDING(pExceptionRecord->ExceptionFlags)) |
6040 | { |
6041 | ComMethodFrame::DoSecondPassHandlerCleanup(GetThread()->GetFrame()); |
6042 | } |
6043 | return ExceptionContinueSearch; |
6044 | } |
6045 | #endif // FEATURE_COMINTEROP |
6046 | |
6047 | #ifndef FEATURE_PAL |
6048 | EXTERN_C EXCEPTION_DISPOSITION |
6049 | FixRedirectContextHandler( |
6050 | IN PEXCEPTION_RECORD pExceptionRecord |
6051 | WIN64_ARG(IN ULONG64 MemoryStackFp) |
6052 | NOT_WIN64_ARG(IN ULONG MemoryStackFp), |
6053 | IN OUT PCONTEXT pContextRecord, |
6054 | IN OUT PDISPATCHER_CONTEXT pDispatcherContext |
6055 | ) |
6056 | { |
6057 | CONTRACTL |
6058 | { |
6059 | GC_NOTRIGGER; |
6060 | NOTHROW; |
6061 | MODE_ANY; |
6062 | SO_TOLERANT; |
6063 | } |
6064 | CONTRACTL_END; |
6065 | |
6066 | STRESS_LOG4(LF_EH, LL_INFO10, "FixRedirectContextHandler: sp %p, establisher %p, cxr: %p, disp cxr: %p\n" , |
6067 | GetSP(pDispatcherContext->ContextRecord), |
6068 | pDispatcherContext->EstablisherFrame, |
6069 | pContextRecord, |
6070 | pDispatcherContext->ContextRecord); |
6071 | |
6072 | VALIDATE_BACKOUT_STACK_CONSUMPTION; |
6073 | |
6074 | CONTEXT *pRedirectedContext = GetCONTEXTFromRedirectedStubStackFrame(pDispatcherContext); |
6075 | |
6076 | FixupDispatcherContext(pDispatcherContext, pRedirectedContext, pContextRecord); |
6077 | |
6078 | // Returning ExceptionCollidedUnwind will cause the OS to take our new context record |
6079 | // and dispatcher context and restart the exception dispatching on this call frame, |
6080 | // which is exactly the behavior we want in order to restore our thread's unwindability |
6081 | // (which was broken when we whacked the IP to get control over the thread) |
6082 | return ExceptionCollidedUnwind; |
6083 | } |
6084 | #endif // !FEATURE_PAL |
6085 | #endif // DACCESS_COMPILE |
6086 | |
6087 | void ExceptionTracker::StackRange::Reset() |
6088 | { |
6089 | LIMITED_METHOD_CONTRACT; |
6090 | |
6091 | m_sfLowBound.SetMaxVal(); |
6092 | m_sfHighBound.Clear(); |
6093 | } |
6094 | |
6095 | bool ExceptionTracker::StackRange::IsEmpty() |
6096 | { |
6097 | LIMITED_METHOD_CONTRACT; |
6098 | return (m_sfLowBound.IsMaxVal() && |
6099 | m_sfHighBound.IsNull()); |
6100 | } |
6101 | |
6102 | bool ExceptionTracker::StackRange::IsSupersededBy(StackFrame sf) |
6103 | { |
6104 | LIMITED_METHOD_CONTRACT; |
6105 | CONSISTENCY_CHECK(IsConsistent()); |
6106 | |
6107 | return (sf >= m_sfLowBound); |
6108 | } |
6109 | |
6110 | void ExceptionTracker::StackRange::CombineWith(StackFrame sfCurrent, StackRange* pPreviousRange) |
6111 | { |
6112 | LIMITED_METHOD_CONTRACT; |
6113 | |
6114 | if ((pPreviousRange->m_sfHighBound < sfCurrent) && IsEmpty()) |
6115 | { |
6116 | // This case comes from an unusual situation. It is possible for a new nested tracker to start its |
6117 | // first pass at a higher SP than any previously scanned frame in the previous "enclosing" tracker. |
6118 | // Typically this doesn't happen because the ProcessCLRException callback is made multiple times for |
6119 | // the frame where the nesting first occurs and that will ensure that the stack range of the new |
6120 | // nested exception is extended to contain the scan range of the previous tracker's scan. However, |
6121 | // if the exception dispatch calls a C++ handler (e.g. a finally) and then that handler tries to |
6122 | // reverse-pinvoke into the runtime, AND we trigger an exception (e.g. ThreadAbort) |
6123 | // before we reach another managed frame (which would have the CLR personality |
6124 | // routine associated with it), the first callback to ProcessCLRException for this new exception |
6125 | // will occur on a frame that has never been seen before by the current tracker. |
6126 | // |
6127 | // So in this case, we'll see a sfCurrent that is larger than the previous tracker's high bound and |
6128 | // we'll have an empty scan range for the current tracker. And we'll just need to pre-init the |
6129 | // scanned stack range for the new tracker to the previous tracker's range. This maintains the |
6130 | // invariant that the scanned range for nested trackers completely cover the scanned range of thier |
6131 | // previous tracker once they "escape" the previous tracker. |
6132 | STRESS_LOG3(LF_EH, LL_INFO100, |
6133 | "Initializing current StackRange with previous tracker's StackRange. sfCurrent: %p, prev low: %p, prev high: %p\n" , |
6134 | sfCurrent.SP, pPreviousRange->m_sfLowBound.SP, pPreviousRange->m_sfHighBound.SP); |
6135 | |
6136 | *this = *pPreviousRange; |
6137 | } |
6138 | else |
6139 | { |
6140 | #ifdef FEATURE_PAL |
6141 | // When the current range is empty, copy the low bound too. Otherwise a degenerate range would get |
6142 | // created and tests for stack frame in the stack range would always fail. |
6143 | // TODO: Check if we could enable it for non-PAL as well. |
6144 | if (IsEmpty()) |
6145 | { |
6146 | m_sfLowBound = pPreviousRange->m_sfLowBound; |
6147 | } |
6148 | #endif // FEATURE_PAL |
6149 | m_sfHighBound = pPreviousRange->m_sfHighBound; |
6150 | } |
6151 | } |
6152 | |
6153 | bool ExceptionTracker::StackRange::Contains(StackFrame sf) |
6154 | { |
6155 | LIMITED_METHOD_CONTRACT; |
6156 | CONSISTENCY_CHECK(IsConsistent()); |
6157 | |
6158 | return ((m_sfLowBound <= sf) && |
6159 | (sf <= m_sfHighBound)); |
6160 | } |
6161 | |
6162 | void ExceptionTracker::StackRange::ExtendUpperBound(StackFrame sf) |
6163 | { |
6164 | LIMITED_METHOD_CONTRACT; |
6165 | CONSISTENCY_CHECK(IsConsistent()); |
6166 | CONSISTENCY_CHECK(sf > m_sfHighBound); |
6167 | |
6168 | m_sfHighBound = sf; |
6169 | } |
6170 | |
6171 | void ExceptionTracker::StackRange::ExtendLowerBound(StackFrame sf) |
6172 | { |
6173 | LIMITED_METHOD_CONTRACT; |
6174 | CONSISTENCY_CHECK(IsConsistent()); |
6175 | CONSISTENCY_CHECK(sf < m_sfLowBound); |
6176 | |
6177 | m_sfLowBound = sf; |
6178 | } |
6179 | |
6180 | void ExceptionTracker::StackRange::TrimLowerBound(StackFrame sf) |
6181 | { |
6182 | LIMITED_METHOD_CONTRACT; |
6183 | CONSISTENCY_CHECK(IsConsistent()); |
6184 | CONSISTENCY_CHECK(sf >= m_sfLowBound); |
6185 | |
6186 | m_sfLowBound = sf; |
6187 | } |
6188 | |
6189 | StackFrame ExceptionTracker::StackRange::GetLowerBound() |
6190 | { |
6191 | LIMITED_METHOD_CONTRACT; |
6192 | CONSISTENCY_CHECK(IsConsistent()); |
6193 | |
6194 | return m_sfLowBound; |
6195 | } |
6196 | |
6197 | StackFrame ExceptionTracker::StackRange::GetUpperBound() |
6198 | { |
6199 | LIMITED_METHOD_CONTRACT; |
6200 | CONSISTENCY_CHECK(IsConsistent()); |
6201 | |
6202 | return m_sfHighBound; |
6203 | } |
6204 | |
6205 | #ifdef _DEBUG |
6206 | bool ExceptionTracker::StackRange::IsDisjointWithAndLowerThan(StackRange* pOtherRange) |
6207 | { |
6208 | CONSISTENCY_CHECK(IsConsistent()); |
6209 | CONSISTENCY_CHECK(pOtherRange->IsConsistent()); |
6210 | |
6211 | return m_sfHighBound < pOtherRange->m_sfLowBound; |
6212 | } |
6213 | |
6214 | #endif // _DEBUG |
6215 | |
6216 | |
6217 | #ifdef _DEBUG |
6218 | bool ExceptionTracker::StackRange::IsConsistent() |
6219 | { |
6220 | LIMITED_METHOD_CONTRACT; |
6221 | if (m_sfLowBound.IsMaxVal() || |
6222 | m_sfHighBound.IsNull()) |
6223 | { |
6224 | return true; |
6225 | } |
6226 | |
6227 | if (m_sfLowBound <= m_sfHighBound) |
6228 | { |
6229 | return true; |
6230 | } |
6231 | |
6232 | LOG((LF_EH, LL_ERROR, "sp: low: %p high: %p\n" , m_sfLowBound.SP, m_sfHighBound.SP)); |
6233 | |
6234 | return false; |
6235 | } |
6236 | #endif // _DEBUG |
6237 | |
6238 | // Determine if the given StackFrame is in the stack region unwound by the specified ExceptionTracker. |
6239 | // This is used by the stackwalker to skip funclets. Refer to the calls to this method in StackWalkFramesEx() |
6240 | // for more information. |
6241 | // |
6242 | // Effectively, this will make the stackwalker skip all the frames until it reaches the frame |
6243 | // containing the funclet. Details of the skipping logic are described in the method implementation. |
6244 | // |
6245 | // static |
6246 | bool ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(CrawlFrame * pCF, PTR_ExceptionTracker pExceptionTracker) |
6247 | { |
6248 | LIMITED_METHOD_CONTRACT; |
6249 | |
6250 | _ASSERTE(pCF != NULL); |
6251 | |
6252 | // The tracker must be in the second pass, and its stack range must not be empty. |
6253 | if ( (pExceptionTracker == NULL) || |
6254 | pExceptionTracker->IsInFirstPass() || |
6255 | pExceptionTracker->m_ScannedStackRange.IsEmpty()) |
6256 | { |
6257 | return false; |
6258 | } |
6259 | |
6260 | CallerStackFrame csfToCheck; |
6261 | if (pCF->IsFrameless()) |
6262 | { |
6263 | csfToCheck = CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet()); |
6264 | } |
6265 | else |
6266 | { |
6267 | csfToCheck = CallerStackFrame((UINT_PTR)pCF->GetFrame()); |
6268 | } |
6269 | |
6270 | StackFrame sfLowerBound = pExceptionTracker->m_ScannedStackRange.GetLowerBound(); |
6271 | StackFrame sfUpperBound = pExceptionTracker->m_ScannedStackRange.GetUpperBound(); |
6272 | |
6273 | // |
6274 | // Let's take an example callstack that grows from left->right: |
6275 | // |
6276 | // M5 (50) -> M4 (40) -> M3 (30) -> M2 (20) -> M1 (10) ->throw |
6277 | // |
6278 | // These are all managed frames, where M1 throws and the exception is caught |
6279 | // in M4. The numbers in the brackets are the values of the stack pointer after |
6280 | // the prolog is executed (or, in case of dynamic allocation, its SP after |
6281 | // dynamic allocation) and will be the SP at the time the callee function |
6282 | // is invoked. |
6283 | // |
6284 | // When the stackwalker is asked to skip funclets during the stackwalk, |
6285 | // it will skip all the frames on the stack until it reaches the frame |
6286 | // containing the funclet after it has identified the funclet from |
6287 | // which the skipping of frames needs to commence. |
6288 | // |
6289 | // At such a point, the exception tracker's scanned stack range's |
6290 | // lowerbound will correspond to the frame that had the exception |
6291 | // and the upper bound will correspond to the frame that had the funclet. |
6292 | // For scenarios like security stackwalk that may be triggered out of a |
6293 | // funclet (e.g. a catch block), skipping funclets and frames in this fashion |
6294 | // is expected to lead us to the parent frame containing the funclet as it |
6295 | // will contain an object of interest (e.g. security descriptor). |
6296 | // |
6297 | // The check below ensures that we skip the frames from the one that |
6298 | // had exception to the one that is the callee of the method containing |
6299 | // the funclet of interest. In the example above, this would mean skipping |
6300 | // from M1 to M3. |
6301 | // |
6302 | // We use CallerSP of a given CrawlFrame to perform such a skip. On AMD64, |
6303 | // the first frame where CallerSP will be greater than SP of the frame |
6304 | // itself will be when we reach the lowest frame itself (i.e. M1). On a similar |
6305 | // note, the only time when CallerSP of a given CrawlFrame will be equal to the |
6306 | // upper bound is when we reach the callee of the frame containing the funclet. |
6307 | // Thus, our check for the skip range is done by the following clause: |
6308 | // |
6309 | // if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound)) |
6310 | // |
6311 | // On ARM and ARM64, while the lower and upper bounds are populated using the Establisher |
6312 | // frame given by the OS during exception dispatch, they actually correspond to the |
6313 | // SP of the caller of a given frame, instead of being the SP of the given frame. |
6314 | // Thus, in the example, we will have lowerBound as 20 (corresponding to M1) and |
6315 | // upperBound as 50 (corresponding to M4 which contains the catch funclet). |
6316 | // |
6317 | // Thus, to skip frames on ARM and ARM64 until we reach the frame containing funclet of |
6318 | // interest, the skipping will done by the following clause: |
6319 | // |
6320 | // if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound)) |
6321 | // |
6322 | // The first time when CallerSP of a given CrawlFrame will be the same as lowerBound |
6323 | // is when we will reach the first frame to be skipped. Likewise, last frame whose |
6324 | // CallerSP will be less than the upperBound will be the callee of the frame |
6325 | // containing the funclet. When CallerSP is equal to the upperBound, we have reached |
6326 | // the frame containing the funclet and DO NOT want to skip it. Hence, "<" |
6327 | // in the 2nd part of the clause. |
6328 | |
6329 | // Remember that sfLowerBound and sfUpperBound are in the "OS format". |
6330 | // Refer to the comment for CallerStackFrame for more information. |
6331 | #ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6332 | if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound)) |
6333 | #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6334 | if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound)) |
6335 | #endif // STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6336 | { |
6337 | return true; |
6338 | } |
6339 | else |
6340 | { |
6341 | return false; |
6342 | } |
6343 | } |
6344 | |
6345 | // Returns a bool indicating if the specified CrawlFrame has been unwound by the active exception. |
6346 | bool ExceptionTracker::IsInStackRegionUnwoundByCurrentException(CrawlFrame * pCF) |
6347 | { |
6348 | LIMITED_METHOD_CONTRACT; |
6349 | |
6350 | Thread * pThread = pCF->pThread; |
6351 | PTR_ExceptionTracker pCurrentTracker = pThread->GetExceptionState()->GetCurrentExceptionTracker(); |
6352 | return ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException(pCF, pCurrentTracker); |
6353 | } |
6354 | |
6355 | |
6356 | |
6357 | // Returns a bool indicating if the specified CrawlFrame has been unwound by any active (e.g. nested) exceptions. |
6358 | // |
6359 | // This method uses various fields of the ExceptionTracker data structure to do its work. Since this code runs on the thread |
6360 | // performing the GC stackwalk, it must be ensured that these fields are not updated on another thread in parallel. Thus, |
6361 | // any access to the fields in question that may result in updating them should happen in COOP mode. This provides a high-level |
6362 | // synchronization with the GC thread since when GC stackwalk is active, attempt to enter COOP mode will result in the thread blocking |
6363 | // and thus, attempts to update such fields will be synchronized. |
6364 | // |
6365 | // Currently, the following fields are used below: |
6366 | // |
6367 | // m_ExceptionFlags, m_ScannedStackRange, m_sfCurrentEstablisherFrame, m_sfLastUnwoundEstablisherFrame, |
6368 | // m_pInitialExplicitFrame, m_pLimitFrame, m_pPrevNestedInfo. |
6369 | // |
6370 | bool ExceptionTracker::HasFrameBeenUnwoundByAnyActiveException(CrawlFrame * pCF) |
6371 | { |
6372 | LIMITED_METHOD_CONTRACT; |
6373 | |
6374 | _ASSERTE(pCF != NULL); |
6375 | |
6376 | // Enumerate all (nested) exception trackers and see if any of them has unwound the |
6377 | // specified CrawlFrame. |
6378 | Thread * pTargetThread = pCF->pThread; |
6379 | PTR_ExceptionTracker pTopTracker = pTargetThread->GetExceptionState()->GetCurrentExceptionTracker(); |
6380 | PTR_ExceptionTracker pCurrentTracker = pTopTracker; |
6381 | |
6382 | bool fHasFrameBeenUnwound = false; |
6383 | |
6384 | while (pCurrentTracker != NULL) |
6385 | { |
6386 | bool fSkipCurrentTracker = false; |
6387 | |
6388 | // The tracker must be in the second pass, and its stack range must not be empty. |
6389 | if (pCurrentTracker->IsInFirstPass() || |
6390 | pCurrentTracker->m_ScannedStackRange.IsEmpty()) |
6391 | { |
6392 | fSkipCurrentTracker = true; |
6393 | } |
6394 | |
6395 | if (!fSkipCurrentTracker) |
6396 | { |
6397 | CallerStackFrame csfToCheck; |
6398 | bool fFrameless = false; |
6399 | if (pCF->IsFrameless()) |
6400 | { |
6401 | csfToCheck = CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet()); |
6402 | fFrameless = true; |
6403 | } |
6404 | else |
6405 | { |
6406 | csfToCheck = CallerStackFrame((UINT_PTR)pCF->GetFrame()); |
6407 | } |
6408 | |
6409 | STRESS_LOG4(LF_EH|LF_GCROOTS, LL_INFO100, "CrawlFrame (%p): Frameless: %s %s: %p\n" , |
6410 | pCF, fFrameless ? "Yes" : "No" , fFrameless ? "CallerSP" : "Address" , csfToCheck.SP); |
6411 | |
6412 | StackFrame sfLowerBound = pCurrentTracker->m_ScannedStackRange.GetLowerBound(); |
6413 | StackFrame sfUpperBound = pCurrentTracker->m_ScannedStackRange.GetUpperBound(); |
6414 | StackFrame sfCurrentEstablisherFrame = pCurrentTracker->GetCurrentEstablisherFrame(); |
6415 | StackFrame sfLastUnwoundEstablisherFrame = pCurrentTracker->GetLastUnwoundEstablisherFrame(); |
6416 | |
6417 | STRESS_LOG4(LF_EH|LF_GCROOTS, LL_INFO100, "LowerBound/UpperBound/CurrentEstablisherFrame/LastUnwoundManagedFrame: %p/%p/%p/%p\n" , |
6418 | sfLowerBound.SP, sfUpperBound.SP, sfCurrentEstablisherFrame.SP, sfLastUnwoundEstablisherFrame.SP); |
6419 | |
6420 | // Refer to the detailed comment in ExceptionTracker::IsInStackRegionUnwoundBySpecifiedException on the nature |
6421 | // of this check. |
6422 | // |
6423 | #ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6424 | if ((sfLowerBound < csfToCheck) && (csfToCheck <= sfUpperBound)) |
6425 | #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6426 | if ((sfLowerBound <= csfToCheck) && (csfToCheck < sfUpperBound)) |
6427 | #endif // STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6428 | { |
6429 | fHasFrameBeenUnwound = true; |
6430 | break; |
6431 | } |
6432 | |
6433 | // |
6434 | // The frame in question was not found to be covered by the scanned stack range of the exception tracker. |
6435 | // If the frame is managed, then it is possible that it forms the upper bound of the scanned stack range. |
6436 | // |
6437 | // The scanned stack range is updated by our personality routine once ExceptionTracker::ProcessOSExceptionNotification is invoked. |
6438 | // However, it is possible that we have unwound a frame and returned back to the OS (in preemptive mode) and: |
6439 | // |
6440 | // 1) Either our personality routine has been invoked for the subsequent upstack managed frame but it has not yet got a chance to update |
6441 | // the scanned stack range, OR |
6442 | // 2) We have simply returned to the kernel exception dispatch and yet to be invoked for a subsequent frame. |
6443 | // |
6444 | // In such a window, if we have been asked to check if the frame forming the upper bound of the scanned stack range has been unwound, or not, |
6445 | // then do the needful validations. |
6446 | // |
6447 | // This is applicable to managed frames only. |
6448 | if (fFrameless) |
6449 | { |
6450 | #ifndef STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6451 | // On X64, if the SP of the managed frame indicates that the frame is forming the upper bound, |
6452 | // then: |
6453 | // |
6454 | // For case (1) above, sfCurrentEstablisherFrame will be the same as the callerSP of the managed frame. |
6455 | // For case (2) above, sfLastUnwoundEstbalisherFrame would be the same as the managed frame's SP (or upper bound) |
6456 | // |
6457 | // For these scenarios, the frame is considered unwound. |
6458 | |
6459 | // For most cases which satisfy above condition GetRegdisplaySP(pCF->GetRegisterSet()) will be equal to sfUpperBound.SP. |
6460 | // However, frames where Sp is modified after prolog ( eg. localloc) this might not be the case. For those scenarios, |
6461 | // we need to check if sfUpperBound.SP is in between GetRegdisplaySP(pCF->GetRegisterSet()) & callerSp. |
6462 | if (GetRegdisplaySP(pCF->GetRegisterSet()) <= sfUpperBound.SP && sfUpperBound < csfToCheck) |
6463 | { |
6464 | if (csfToCheck == sfCurrentEstablisherFrame) |
6465 | { |
6466 | fHasFrameBeenUnwound = true; |
6467 | break; |
6468 | } |
6469 | else if (sfUpperBound == sfLastUnwoundEstablisherFrame) |
6470 | { |
6471 | fHasFrameBeenUnwound = true; |
6472 | break; |
6473 | } |
6474 | } |
6475 | #else // !STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6476 | // On ARM, if the callerSP of the managed frame is the same as upper bound, then: |
6477 | // |
6478 | // For case (1), sfCurrentEstablisherFrame will be above the callerSP of the managed frame (since EstbalisherFrame is the caller SP for a given frame on ARM) |
6479 | // For case (2), upper bound will be the same as LastUnwoundEstbalisherFrame. |
6480 | // |
6481 | // For these scenarios, the frame is considered unwound. |
6482 | if (sfUpperBound == csfToCheck) |
6483 | { |
6484 | if (csfToCheck < sfCurrentEstablisherFrame) |
6485 | { |
6486 | fHasFrameBeenUnwound = true; |
6487 | break; |
6488 | } |
6489 | else if (sfLastUnwoundEstablisherFrame == sfUpperBound) |
6490 | { |
6491 | fHasFrameBeenUnwound = true; |
6492 | break; |
6493 | } |
6494 | } |
6495 | #endif // STACK_RANGE_BOUNDS_ARE_CALLER_SP |
6496 | } |
6497 | |
6498 | // The frame in question does not appear in the current tracker's scanned stack range (of managed frames). |
6499 | // If the frame is an explicit frame, then check if it equal to (or greater) than the initial explicit frame |
6500 | // of the tracker. We can do this equality comparison because explicit frames are stack allocated. |
6501 | // |
6502 | // Do keep in mind that InitialExplicitFrame is only set in the 2nd (unwind) pass, which works |
6503 | // fine for the purpose of this method since it operates on exception trackers in the second pass only. |
6504 | if (!fFrameless) |
6505 | { |
6506 | PTR_Frame pInitialExplicitFrame = pCurrentTracker->GetInitialExplicitFrame(); |
6507 | PTR_Frame pLimitFrame = pCurrentTracker->GetLimitFrame(); |
6508 | |
6509 | #if !defined(DACCESS_COMPILE) |
6510 | STRESS_LOG2(LF_EH|LF_GCROOTS, LL_INFO100, "InitialExplicitFrame: %p, LimitFrame: %p\n" , pInitialExplicitFrame, pLimitFrame); |
6511 | #endif // !defined(DACCESS_COMPILE) |
6512 | |
6513 | // Ideally, we would like to perform a comparison check to determine if the |
6514 | // frame has been unwound. This, however, is based upon the premise that |
6515 | // each explicit frame that is added to the frame chain is at a lower |
6516 | // address than this predecessor. |
6517 | // |
6518 | // This works for frames across function calls but if we have multiple |
6519 | // explicit frames in the same function, then the compiler is free to |
6520 | // assign an address it deems fit. Thus, its totally possible for a |
6521 | // frame at the head of the frame chain to be at a higher address than |
6522 | // its predecessor. This has been observed to be true with VC++ compiler |
6523 | // in the CLR ret build. |
6524 | // |
6525 | // To address this, we loop starting from the InitialExplicitFrame until we reach |
6526 | // the LimitFrame. Since all frames starting from the InitialExplicitFrame, and prior |
6527 | // to the LimitFrame, have been unwound, we break out of the loop if we find |
6528 | // the frame we are looking for, setting a flag indicating that the frame in question |
6529 | // was unwound. |
6530 | |
6531 | /*if ((sfInitialExplicitFrame <= csfToCheck) && (csfToCheck < sfLimitFrame)) |
6532 | { |
6533 | // The explicit frame falls in the range of explicit frames unwound by this tracker. |
6534 | fHasFrameBeenUnwound = true; |
6535 | break; |
6536 | }*/ |
6537 | |
6538 | // The pInitialExplicitFrame can be NULL on Unix right after we've unwound a sequence |
6539 | // of native frames in the second pass of exception unwinding, since the pInitialExplicitFrame |
6540 | // is cleared to make sure that it doesn't point to a frame that was destroyed during the |
6541 | // native frames unwinding. At that point, the csfToCheck could not have been unwound, |
6542 | // so we don't need to do any check. |
6543 | if (pInitialExplicitFrame != NULL) |
6544 | { |
6545 | PTR_Frame pFrameToCheck = (PTR_Frame)csfToCheck.SP; |
6546 | PTR_Frame pCurrentFrame = pInitialExplicitFrame; |
6547 | |
6548 | { |
6549 | while((pCurrentFrame != FRAME_TOP) && (pCurrentFrame != pLimitFrame)) |
6550 | { |
6551 | if (pCurrentFrame == pFrameToCheck) |
6552 | { |
6553 | fHasFrameBeenUnwound = true; |
6554 | break; |
6555 | } |
6556 | |
6557 | pCurrentFrame = pCurrentFrame->PtrNextFrame(); |
6558 | } |
6559 | } |
6560 | |
6561 | if (fHasFrameBeenUnwound == true) |
6562 | { |
6563 | break; |
6564 | } |
6565 | } |
6566 | } |
6567 | } |
6568 | |
6569 | // Move to the next (previous) tracker |
6570 | pCurrentTracker = pCurrentTracker->GetPreviousExceptionTracker(); |
6571 | } |
6572 | |
6573 | if (fHasFrameBeenUnwound) |
6574 | STRESS_LOG0(LF_EH|LF_GCROOTS, LL_INFO100, "Has already been unwound\n" ); |
6575 | |
6576 | return fHasFrameBeenUnwound; |
6577 | } |
6578 | |
6579 | //--------------------------------------------------------------------------------------- |
6580 | // |
6581 | // Given the CrawlFrame of the current frame, return a StackFrame representing the current frame. |
6582 | // This StackFrame should only be used in a check to see if the current frame is the parent method frame |
6583 | // of a particular funclet. Don't use the returned StackFrame in any other way except to pass it back to |
6584 | // ExceptionTracker::IsUnwoundToTargetParentFrame(). The comparison logic is very platform-dependent. |
6585 | // |
6586 | // Arguments: |
6587 | // pCF - the CrawlFrame for the current frame |
6588 | // |
6589 | // Return Value: |
6590 | // Return a StackFrame for parent frame check |
6591 | // |
6592 | // Notes: |
6593 | // Don't use the returned StackFrame in any other way. |
6594 | // |
6595 | |
6596 | //static |
6597 | StackFrame ExceptionTracker::GetStackFrameForParentCheck(CrawlFrame * pCF) |
6598 | { |
6599 | WRAPPER_NO_CONTRACT; |
6600 | |
6601 | StackFrame sfResult; |
6602 | |
6603 | // Returns the CrawlFrame's caller's SP - this is used to determine if we have |
6604 | // reached the intended CrawlFrame in question (or not). |
6605 | |
6606 | // sfParent is returned by the EH subsystem, which uses the OS format, i.e. the initial SP before |
6607 | // any dynamic stack allocation. The stackwalker uses the current SP, i.e. the SP after all |
6608 | // dynamic stack allocations. Thus, we cannot do an equality check. Instead, we get the |
6609 | // CallerStackFrame, which is the caller SP. |
6610 | sfResult = (StackFrame)CallerStackFrame::FromRegDisplay(pCF->GetRegisterSet()); |
6611 | |
6612 | return sfResult; |
6613 | } |
6614 | |
6615 | //--------------------------------------------------------------------------------------- |
6616 | // |
6617 | // Given the StackFrame of a parent method frame, determine if we have unwound to it during stackwalking yet. |
6618 | // The StackFrame should be the return value of one of the FindParentStackFrameFor*() functions. |
6619 | // Refer to the comment for UnwindStackFrame for more information. |
6620 | // |
6621 | // Arguments: |
6622 | // pCF - the CrawlFrame of the current frame |
6623 | // sfParent - the StackFrame of the target parent method frame, |
6624 | // returned by one of the FindParentStackFrameFor*() functions |
6625 | // |
6626 | // Return Value: |
6627 | // whether we have unwound to the target parent method frame |
6628 | // |
6629 | |
6630 | // static |
6631 | bool ExceptionTracker::IsUnwoundToTargetParentFrame(CrawlFrame * pCF, StackFrame sfParent) |
6632 | { |
6633 | CONTRACTL |
6634 | { |
6635 | NOTHROW; |
6636 | GC_NOTRIGGER; |
6637 | MODE_ANY; |
6638 | PRECONDITION( CheckPointer(pCF, NULL_NOT_OK) ); |
6639 | PRECONDITION( pCF->IsFrameless() ); |
6640 | PRECONDITION( pCF->GetRegisterSet()->IsCallerContextValid || pCF->GetRegisterSet()->IsCallerSPValid ); |
6641 | } |
6642 | CONTRACTL_END; |
6643 | |
6644 | StackFrame sfToCheck = GetStackFrameForParentCheck(pCF); |
6645 | return IsUnwoundToTargetParentFrame(sfToCheck, sfParent); |
6646 | } |
6647 | |
6648 | // static |
6649 | bool ExceptionTracker::IsUnwoundToTargetParentFrame(StackFrame sfToCheck, StackFrame sfParent) |
6650 | { |
6651 | LIMITED_METHOD_CONTRACT; |
6652 | |
6653 | return (sfParent == sfToCheck); |
6654 | } |
6655 | |
6656 | // Given the CrawlFrame for a funclet frame, return the frame pointer of the enclosing funclet frame. |
6657 | // For filter funclet frames and normal method frames, this function returns a NULL StackFrame. |
6658 | // |
6659 | // <WARNING> |
6660 | // It is not valid to call this function on an arbitrary funclet. You have to be doing a full stackwalk from |
6661 | // the leaf frame and skipping method frames as indicated by the return value of this function. This function |
6662 | // relies on the ExceptionTrackers, which are collapsed in the second pass when a nested exception escapes. |
6663 | // When this happens, we'll lose information on the funclet represented by the collapsed tracker. |
6664 | // </WARNING> |
6665 | // |
6666 | // Return Value: |
6667 | // StackFrame.IsNull() - no skipping is necessary |
6668 | // StackFrame.IsMaxVal() - skip one frame and then ask again |
6669 | // Anything else - skip to the method frame indicated by the return value and ask again |
6670 | // |
6671 | // static |
6672 | StackFrame ExceptionTracker::FindParentStackFrameForStackWalk(CrawlFrame* pCF, bool fForGCReporting /*= false */) |
6673 | { |
6674 | WRAPPER_NO_CONTRACT; |
6675 | |
6676 | // We should never skip filter funclets. However, if we are stackwalking for GC reference |
6677 | // reporting, then we need to get the stackframe of the parent frame (where the filter was |
6678 | // invoked from) so that when we reach it, we can indicate that the filter has already |
6679 | // performed the reporting. |
6680 | // |
6681 | // Thus, for GC reporting purposes, get filter's parent frame. |
6682 | if (pCF->IsFilterFunclet() && (!fForGCReporting)) |
6683 | { |
6684 | return StackFrame(); |
6685 | } |
6686 | else |
6687 | { |
6688 | return FindParentStackFrameHelper(pCF, NULL, NULL, NULL, fForGCReporting); |
6689 | } |
6690 | } |
6691 | |
6692 | // Given the CrawlFrame for a filter funclet frame, return the frame pointer of the parent method frame. |
6693 | // It also returns the relative offset and the caller SP of the parent method frame. |
6694 | // |
6695 | // <WARNING> |
6696 | // The same warning for FindParentStackFrameForStackWalk() also applies here. Moreoever, although |
6697 | // this function seems to be more convenient, it may potentially trigger a full stackwalk! Do not |
6698 | // call this unless you know absolutely what you are doing. In most cases FindParentStackFrameForStackWalk() |
6699 | // is what you need. |
6700 | // </WARNING> |
6701 | // |
6702 | // Return Value: |
6703 | // StackFrame.IsNull() - no skipping is necessary |
6704 | // Anything else - the StackFrame of the parent method frame |
6705 | // |
6706 | // static |
6707 | StackFrame ExceptionTracker::FindParentStackFrameEx(CrawlFrame* pCF, |
6708 | DWORD* pParentOffset, |
6709 | UINT_PTR* pParentCallerSP) |
6710 | { |
6711 | CONTRACTL |
6712 | { |
6713 | NOTHROW; |
6714 | GC_NOTRIGGER; |
6715 | MODE_ANY; |
6716 | PRECONDITION( pCF != NULL ); |
6717 | PRECONDITION( pCF->IsFilterFunclet() ); |
6718 | } |
6719 | CONTRACTL_END; |
6720 | |
6721 | bool fRealParent = false; |
6722 | StackFrame sfResult = ExceptionTracker::FindParentStackFrameHelper(pCF, &fRealParent, pParentOffset, pParentCallerSP); |
6723 | |
6724 | if (fRealParent) |
6725 | { |
6726 | // If the enclosing method is the parent method, then we are done. |
6727 | return sfResult; |
6728 | } |
6729 | else |
6730 | { |
6731 | // Otherwise we need to do a full stackwalk to find the parent method frame. |
6732 | // This should only happen if we are calling a filter inside a funclet. |
6733 | return ExceptionTracker::RareFindParentStackFrame(pCF, pParentOffset, pParentCallerSP); |
6734 | } |
6735 | } |
6736 | |
6737 | // static |
6738 | StackFrame ExceptionTracker::GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(CrawlFrame *pCF) |
6739 | { |
6740 | CONTRACTL |
6741 | { |
6742 | NOTHROW; |
6743 | GC_NOTRIGGER; |
6744 | MODE_ANY; |
6745 | PRECONDITION(pCF != NULL); |
6746 | PRECONDITION(pCF->IsFunclet() && (!pCF->IsFilterFunclet())); |
6747 | } |
6748 | CONTRACTL_END; |
6749 | |
6750 | PREGDISPLAY pRD = pCF->GetRegisterSet(); |
6751 | |
6752 | // Ensure that the caller Context is valid. |
6753 | _ASSERTE(pRD->IsCallerContextValid); |
6754 | |
6755 | // Make a copy of the caller context |
6756 | T_CONTEXT tempContext; |
6757 | CopyOSContext(&tempContext, pRD->pCallerContext); |
6758 | |
6759 | // Now unwind it to get the context of the caller's caller. |
6760 | EECodeInfo codeInfo(dac_cast<PCODE>(GetIP(pRD->pCallerContext))); |
6761 | Thread::VirtualUnwindCallFrame(&tempContext, NULL, &codeInfo); |
6762 | |
6763 | StackFrame sfRetVal = StackFrame((UINT_PTR)(GetSP(&tempContext))); |
6764 | _ASSERTE(!sfRetVal.IsNull() && !sfRetVal.IsMaxVal()); |
6765 | |
6766 | return sfRetVal; |
6767 | } |
6768 | |
6769 | // static |
6770 | StackFrame ExceptionTracker::FindParentStackFrameHelper(CrawlFrame* pCF, |
6771 | bool* pfRealParent, |
6772 | DWORD* pParentOffset, |
6773 | UINT_PTR* pParentCallerSP, |
6774 | bool fForGCReporting /* = false */) |
6775 | { |
6776 | CONTRACTL |
6777 | { |
6778 | NOTHROW; |
6779 | GC_NOTRIGGER; |
6780 | MODE_ANY; |
6781 | PRECONDITION( pCF != NULL ); |
6782 | PRECONDITION( pCF->IsFunclet() ); |
6783 | PRECONDITION( CheckPointer(pfRealParent, NULL_OK) ); |
6784 | PRECONDITION( CheckPointer(pParentOffset, NULL_OK) ); |
6785 | PRECONDITION( CheckPointer(pParentCallerSP, NULL_OK) ); |
6786 | } |
6787 | CONTRACTL_END; |
6788 | |
6789 | StackFrame sfResult; |
6790 | REGDISPLAY* pRegDisplay = pCF->GetRegisterSet(); |
6791 | |
6792 | // At this point, we need a valid caller SP and the CallerStackFrame::FromRegDisplay |
6793 | // asserts that the RegDisplay contains one. |
6794 | CallerStackFrame csfCurrent = CallerStackFrame::FromRegDisplay(pRegDisplay); |
6795 | ExceptionTracker *pCurrentTracker = NULL; |
6796 | bool fIsFilterFunclet = pCF->IsFilterFunclet(); |
6797 | |
6798 | // We can't do this on an unmanaged thread. |
6799 | Thread* pThread = pCF->pThread; |
6800 | if (pThread == NULL) |
6801 | { |
6802 | _ASSERTE(!"FindParentStackFrame() called on an unmanaged thread" ); |
6803 | goto lExit; |
6804 | } |
6805 | |
6806 | // Check for out-of-line finally funclets. Filter funclets can't be out-of-line. |
6807 | if (!fIsFilterFunclet) |
6808 | { |
6809 | if (pRegDisplay->IsCallerContextValid) |
6810 | { |
6811 | PCODE callerIP = dac_cast<PCODE>(GetIP(pRegDisplay->pCallerContext)); |
6812 | BOOL fIsCallerInVM = FALSE; |
6813 | |
6814 | // Check if the caller IP is in mscorwks. If it is not, then it is an out-of-line finally. |
6815 | // Normally, the caller of a finally is ExceptionTracker::CallHandler(). |
6816 | #ifdef FEATURE_PAL |
6817 | fIsCallerInVM = !ExecutionManager::IsManagedCode(callerIP); |
6818 | #else |
6819 | #if defined(DACCESS_COMPILE) |
6820 | HMODULE_TGT hEE = DacGlobalBase(); |
6821 | #else // !DACCESS_COMPILE |
6822 | HMODULE_TGT hEE = g_pMSCorEE; |
6823 | #endif // !DACCESS_COMPILE |
6824 | fIsCallerInVM = IsIPInModule(hEE, callerIP); |
6825 | #endif // FEATURE_PAL |
6826 | |
6827 | if (!fIsCallerInVM) |
6828 | { |
6829 | if (!fForGCReporting) |
6830 | { |
6831 | sfResult.SetMaxVal(); |
6832 | goto lExit; |
6833 | } |
6834 | else |
6835 | { |
6836 | // We have run into a non-exceptionally invoked finally funclet (aka out-of-line finally funclet). |
6837 | // Since these funclets are invoked from JITted code, we will not find their EnclosingClauseCallerSP |
6838 | // in an exception tracker as one does not exist (remember, these funclets are invoked "non"-exceptionally). |
6839 | // |
6840 | // At this point, the caller context is that of the parent frame of the funclet. All we need is the CallerSP |
6841 | // of that parent. We leverage a helper function that will perform an unwind against the caller context |
6842 | // and return us the SP (of the caller of the funclet's parent). |
6843 | StackFrame sfCallerSPOfFuncletParent = ExceptionTracker::GetCallerSPOfParentOfNonExceptionallyInvokedFunclet(pCF); |
6844 | return sfCallerSPOfFuncletParent; |
6845 | } |
6846 | } |
6847 | } |
6848 | } |
6849 | |
6850 | for (pCurrentTracker = pThread->GetExceptionState()->m_pCurrentTracker; |
6851 | pCurrentTracker != NULL; |
6852 | pCurrentTracker = pCurrentTracker->m_pPrevNestedInfo) |
6853 | { |
6854 | // Check if the tracker has just been created. |
6855 | if (pCurrentTracker->m_ScannedStackRange.IsEmpty()) |
6856 | { |
6857 | continue; |
6858 | } |
6859 | |
6860 | // Since the current frame is a non-filter funclet, determine if its caller is the same one |
6861 | // as was saved against the exception tracker before the funclet was invoked in ExceptionTracker::CallHandler. |
6862 | CallerStackFrame csfFunclet = pCurrentTracker->m_EHClauseInfo.GetCallerStackFrameForEHClause(); |
6863 | if (csfCurrent == csfFunclet) |
6864 | { |
6865 | // The EnclosingClauseCallerSP is initialized in ExceptionTracker::ProcessManagedCallFrame, just before |
6866 | // invoking the funclets. Basically, we are using the SP of the caller of the frame containing the funclet |
6867 | // to determine if we have reached the frame containing the funclet. |
6868 | EnclosingClauseInfo srcEnclosingClause = (fForGCReporting) ? pCurrentTracker->m_EnclosingClauseInfoForGCReporting |
6869 | : pCurrentTracker->m_EnclosingClauseInfo; |
6870 | sfResult = (StackFrame)(CallerStackFrame(srcEnclosingClause.GetEnclosingClauseCallerSP())); |
6871 | |
6872 | // Check whether the tracker has called any funclet yet. |
6873 | if (sfResult.IsNull()) |
6874 | { |
6875 | continue; |
6876 | } |
6877 | |
6878 | // Set the relevant information. |
6879 | if (pfRealParent != NULL) |
6880 | { |
6881 | *pfRealParent = !srcEnclosingClause.EnclosingClauseIsFunclet(); |
6882 | } |
6883 | if (pParentOffset != NULL) |
6884 | { |
6885 | *pParentOffset = srcEnclosingClause.GetEnclosingClauseOffset(); |
6886 | } |
6887 | if (pParentCallerSP != NULL) |
6888 | { |
6889 | *pParentCallerSP = srcEnclosingClause.GetEnclosingClauseCallerSP(); |
6890 | } |
6891 | |
6892 | break; |
6893 | } |
6894 | // Check if this tracker was collapsed with another tracker and if caller of funclet clause for collapsed exception tracker matches. |
6895 | else if (fForGCReporting && !(pCurrentTracker->m_csfEHClauseOfCollapsedTracker.IsNull()) && csfCurrent == pCurrentTracker->m_csfEHClauseOfCollapsedTracker) |
6896 | { |
6897 | EnclosingClauseInfo srcEnclosingClause = pCurrentTracker->m_EnclosingClauseInfoOfCollapsedTracker; |
6898 | sfResult = (StackFrame)(CallerStackFrame(srcEnclosingClause.GetEnclosingClauseCallerSP())); |
6899 | |
6900 | _ASSERTE(!sfResult.IsNull()); |
6901 | |
6902 | break; |
6903 | |
6904 | } |
6905 | } |
6906 | |
6907 | lExit: ; |
6908 | |
6909 | STRESS_LOG3(LF_EH|LF_GCROOTS, LL_INFO100, "Returning 0x%p as the parent stack frame for %s 0x%p\n" , |
6910 | sfResult.SP, fIsFilterFunclet ? "filter funclet" : "funclet" , csfCurrent.SP); |
6911 | |
6912 | return sfResult; |
6913 | } |
6914 | |
6915 | struct RareFindParentStackFrameCallbackState |
6916 | { |
6917 | StackFrame m_sfTarget; |
6918 | StackFrame m_sfParent; |
6919 | bool m_fFoundTarget; |
6920 | DWORD m_dwParentOffset; |
6921 | UINT_PTR m_uParentCallerSP; |
6922 | }; |
6923 | |
6924 | // This is the callback for the stackwalk to get the parent stack frame for a filter funclet. |
6925 | // |
6926 | // static |
6927 | StackWalkAction ExceptionTracker::RareFindParentStackFrameCallback(CrawlFrame* pCF, LPVOID pData) |
6928 | { |
6929 | CONTRACTL |
6930 | { |
6931 | NOTHROW; |
6932 | GC_NOTRIGGER; |
6933 | MODE_ANY; |
6934 | } |
6935 | CONTRACTL_END; |
6936 | |
6937 | RareFindParentStackFrameCallbackState* pState = (RareFindParentStackFrameCallbackState*)pData; |
6938 | |
6939 | // In all cases, we don't care about explicit frame. |
6940 | if (!pCF->IsFrameless()) |
6941 | { |
6942 | return SWA_CONTINUE; |
6943 | } |
6944 | |
6945 | REGDISPLAY* pRegDisplay = pCF->GetRegisterSet(); |
6946 | StackFrame sfCurrent = StackFrame::FromRegDisplay(pRegDisplay); |
6947 | |
6948 | // Check if we have reached the target already. |
6949 | if (!pState->m_fFoundTarget) |
6950 | { |
6951 | if (sfCurrent != pState->m_sfTarget) |
6952 | { |
6953 | return SWA_CONTINUE; |
6954 | } |
6955 | |
6956 | pState->m_fFoundTarget = true; |
6957 | } |
6958 | |
6959 | // We hae reached the target, now do the normal frames skipping. |
6960 | if (!pState->m_sfParent.IsNull()) |
6961 | { |
6962 | if (pState->m_sfParent.IsMaxVal() || IsUnwoundToTargetParentFrame(pCF, pState->m_sfParent)) |
6963 | { |
6964 | // We have reached the specified method frame to skip to. |
6965 | // Now clear the flag and ask again. |
6966 | pState->m_sfParent.Clear(); |
6967 | } |
6968 | } |
6969 | |
6970 | if (pState->m_sfParent.IsNull() && pCF->IsFunclet()) |
6971 | { |
6972 | pState->m_sfParent = ExceptionTracker::FindParentStackFrameHelper(pCF, NULL, NULL, NULL); |
6973 | } |
6974 | |
6975 | // If we still need to skip, then continue the stackwalk. |
6976 | if (!pState->m_sfParent.IsNull()) |
6977 | { |
6978 | return SWA_CONTINUE; |
6979 | } |
6980 | |
6981 | // At this point, we are done. |
6982 | pState->m_sfParent = ExceptionTracker::GetStackFrameForParentCheck(pCF); |
6983 | pState->m_dwParentOffset = pCF->GetRelOffset(); |
6984 | |
6985 | _ASSERTE(pRegDisplay->IsCallerContextValid); |
6986 | pState->m_uParentCallerSP = GetSP(pRegDisplay->pCallerContext); |
6987 | |
6988 | return SWA_ABORT; |
6989 | } |
6990 | |
6991 | // static |
6992 | StackFrame ExceptionTracker::RareFindParentStackFrame(CrawlFrame* pCF, |
6993 | DWORD* pParentOffset, |
6994 | UINT_PTR* pParentCallerSP) |
6995 | { |
6996 | CONTRACTL |
6997 | { |
6998 | NOTHROW; |
6999 | GC_NOTRIGGER; |
7000 | MODE_ANY; |
7001 | PRECONDITION( pCF != NULL ); |
7002 | PRECONDITION( pCF->IsFunclet() ); |
7003 | PRECONDITION( CheckPointer(pParentOffset, NULL_OK) ); |
7004 | PRECONDITION( CheckPointer(pParentCallerSP, NULL_OK) ); |
7005 | } |
7006 | CONTRACTL_END; |
7007 | |
7008 | Thread* pThread = pCF->pThread; |
7009 | |
7010 | RareFindParentStackFrameCallbackState state; |
7011 | state.m_sfParent.Clear(); |
7012 | state.m_sfTarget = StackFrame::FromRegDisplay(pCF->GetRegisterSet()); |
7013 | state.m_fFoundTarget = false; |
7014 | |
7015 | PTR_Frame pFrame = pCF->pFrame; |
7016 | T_CONTEXT ctx; |
7017 | REGDISPLAY rd; |
7018 | CopyRegDisplay((const PREGDISPLAY)pCF->GetRegisterSet(), &rd, &ctx); |
7019 | |
7020 | pThread->StackWalkFramesEx(&rd, &ExceptionTracker::RareFindParentStackFrameCallback, &state, 0, pFrame); |
7021 | |
7022 | if (pParentOffset != NULL) |
7023 | { |
7024 | *pParentOffset = state.m_dwParentOffset; |
7025 | } |
7026 | if (pParentCallerSP != NULL) |
7027 | { |
7028 | *pParentCallerSP = state.m_uParentCallerSP; |
7029 | } |
7030 | return state.m_sfParent; |
7031 | } |
7032 | |
7033 | ExceptionTracker::StackRange::StackRange() |
7034 | { |
7035 | WRAPPER_NO_CONTRACT; |
7036 | |
7037 | #ifndef DACCESS_COMPILE |
7038 | Reset(); |
7039 | #endif // DACCESS_COMPILE |
7040 | } |
7041 | |
7042 | ExceptionTracker::EnclosingClauseInfo::EnclosingClauseInfo() |
7043 | { |
7044 | LIMITED_METHOD_CONTRACT; |
7045 | |
7046 | m_fEnclosingClauseIsFunclet = false; |
7047 | m_dwEnclosingClauseOffset = 0; |
7048 | m_uEnclosingClauseCallerSP = 0; |
7049 | } |
7050 | |
7051 | ExceptionTracker::EnclosingClauseInfo::EnclosingClauseInfo(bool fEnclosingClauseIsFunclet, |
7052 | DWORD dwEnclosingClauseOffset, |
7053 | UINT_PTR uEnclosingClauseCallerSP) |
7054 | { |
7055 | LIMITED_METHOD_CONTRACT; |
7056 | |
7057 | m_fEnclosingClauseIsFunclet = fEnclosingClauseIsFunclet; |
7058 | m_dwEnclosingClauseOffset = dwEnclosingClauseOffset; |
7059 | m_uEnclosingClauseCallerSP = uEnclosingClauseCallerSP; |
7060 | } |
7061 | |
7062 | bool ExceptionTracker::EnclosingClauseInfo::EnclosingClauseIsFunclet() |
7063 | { |
7064 | LIMITED_METHOD_CONTRACT; |
7065 | return m_fEnclosingClauseIsFunclet; |
7066 | } |
7067 | |
7068 | DWORD ExceptionTracker::EnclosingClauseInfo::GetEnclosingClauseOffset() |
7069 | { |
7070 | LIMITED_METHOD_CONTRACT; |
7071 | return m_dwEnclosingClauseOffset; |
7072 | } |
7073 | |
7074 | UINT_PTR ExceptionTracker::EnclosingClauseInfo::GetEnclosingClauseCallerSP() |
7075 | { |
7076 | LIMITED_METHOD_CONTRACT; |
7077 | return m_uEnclosingClauseCallerSP; |
7078 | } |
7079 | |
7080 | void ExceptionTracker::EnclosingClauseInfo::SetEnclosingClauseCallerSP(UINT_PTR callerSP) |
7081 | { |
7082 | LIMITED_METHOD_CONTRACT; |
7083 | m_uEnclosingClauseCallerSP = callerSP; |
7084 | } |
7085 | |
7086 | bool ExceptionTracker::EnclosingClauseInfo::operator==(const EnclosingClauseInfo & rhs) |
7087 | { |
7088 | LIMITED_METHOD_CONTRACT; |
7089 | SUPPORTS_DAC; |
7090 | |
7091 | return ((this->m_fEnclosingClauseIsFunclet == rhs.m_fEnclosingClauseIsFunclet) && |
7092 | (this->m_dwEnclosingClauseOffset == rhs.m_dwEnclosingClauseOffset) && |
7093 | (this->m_uEnclosingClauseCallerSP == rhs.m_uEnclosingClauseCallerSP)); |
7094 | } |
7095 | |
7096 | void ExceptionTracker::ReleaseResources() |
7097 | { |
7098 | #ifndef DACCESS_COMPILE |
7099 | if (m_hThrowable) |
7100 | { |
7101 | if (!CLRException::IsPreallocatedExceptionHandle(m_hThrowable)) |
7102 | { |
7103 | DestroyHandle(m_hThrowable); |
7104 | } |
7105 | m_hThrowable = NULL; |
7106 | } |
7107 | m_StackTraceInfo.FreeStackTrace(); |
7108 | |
7109 | #ifndef FEATURE_PAL |
7110 | // Clear any held Watson Bucketing details |
7111 | GetWatsonBucketTracker()->ClearWatsonBucketDetails(); |
7112 | #else // !FEATURE_PAL |
7113 | if (m_fOwnsExceptionPointers) |
7114 | { |
7115 | PAL_FreeExceptionRecords(m_ptrs.ExceptionRecord, m_ptrs.ContextRecord); |
7116 | m_fOwnsExceptionPointers = FALSE; |
7117 | } |
7118 | #endif // !FEATURE_PAL |
7119 | #endif // DACCESS_COMPILE |
7120 | } |
7121 | |
7122 | void ExceptionTracker::SetEnclosingClauseInfo(bool fEnclosingClauseIsFunclet, |
7123 | DWORD dwEnclosingClauseOffset, |
7124 | UINT_PTR uEnclosingClauseCallerSP) |
7125 | { |
7126 | // Preserve the details of the current frame for GC reporting before |
7127 | // we apply the nested exception logic below. |
7128 | this->m_EnclosingClauseInfoForGCReporting = EnclosingClauseInfo(fEnclosingClauseIsFunclet, |
7129 | dwEnclosingClauseOffset, |
7130 | uEnclosingClauseCallerSP); |
7131 | if (this->m_pPrevNestedInfo != NULL) |
7132 | { |
7133 | PTR_ExceptionTracker pPrevTracker = this->m_pPrevNestedInfo; |
7134 | CallerStackFrame csfPrevEHClause = pPrevTracker->m_EHClauseInfo.GetCallerStackFrameForEHClause(); |
7135 | |
7136 | // Just propagate the information if this is a nested exception. |
7137 | if (csfPrevEHClause.SP == uEnclosingClauseCallerSP) |
7138 | { |
7139 | this->m_EnclosingClauseInfo = pPrevTracker->m_EnclosingClauseInfo; |
7140 | return; |
7141 | } |
7142 | } |
7143 | |
7144 | this->m_EnclosingClauseInfo = EnclosingClauseInfo(fEnclosingClauseIsFunclet, |
7145 | dwEnclosingClauseOffset, |
7146 | uEnclosingClauseCallerSP); |
7147 | } |
7148 | |
7149 | |
7150 | #ifdef DACCESS_COMPILE |
7151 | void ExceptionTracker::EnumMemoryRegions(CLRDataEnumMemoryFlags flags) |
7152 | { |
7153 | // ExInfo is embedded so don't enum 'this'. |
7154 | OBJECTHANDLE_EnumMemoryRegions(m_hThrowable); |
7155 | m_ptrs.ExceptionRecord.EnumMem(); |
7156 | m_ptrs.ContextRecord.EnumMem(); |
7157 | } |
7158 | #endif // DACCESS_COMPILE |
7159 | |
7160 | #ifndef DACCESS_COMPILE |
7161 | // This is a thin wrapper around ResetThreadAbortState. Its primarily used to |
7162 | // instantiate CrawlFrame, when required, for walking the stack on IA64. |
7163 | // |
7164 | // The "when required" part are the set of conditions checked prior to the call to |
7165 | // this method in ExceptionTracker::ProcessOSExceptionNotification (and asserted in |
7166 | // ResetThreadabortState). |
7167 | // |
7168 | // Also, since CrawlFrame ctor is protected, it can only be instantiated by friend |
7169 | // types (which ExceptionTracker is). |
7170 | |
7171 | // static |
7172 | void ExceptionTracker::ResetThreadAbortStatus(PTR_Thread pThread, CrawlFrame *pCf, StackFrame sfCurrentStackFrame) |
7173 | { |
7174 | CONTRACTL |
7175 | { |
7176 | NOTHROW; |
7177 | GC_NOTRIGGER; |
7178 | MODE_ANY; |
7179 | PRECONDITION(pThread != NULL); |
7180 | PRECONDITION(pCf != NULL); |
7181 | PRECONDITION(!sfCurrentStackFrame.IsNull()); |
7182 | } |
7183 | CONTRACTL_END; |
7184 | |
7185 | if (pThread->IsAbortRequested()) |
7186 | { |
7187 | ResetThreadAbortState(pThread, pCf, sfCurrentStackFrame); |
7188 | } |
7189 | } |
7190 | #endif //!DACCESS_COMPILE |
7191 | |
7192 | #endif // WIN64EXCEPTIONS |
7193 | |
7194 | |