1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | // ==++== |
6 | // |
7 | |
8 | // |
9 | // ==--== |
10 | // **************************************************************************** |
11 | // File: controller.cpp |
12 | // |
13 | |
14 | // |
15 | // controller.cpp: Debugger execution control routines |
16 | // |
17 | // **************************************************************************** |
18 | // Putting code & #includes, #defines, etc, before the stdafx.h will |
19 | // cause the code,etc, to be silently ignored |
20 | #include "stdafx.h" |
21 | #include "openum.h" |
22 | #include "../inc/common.h" |
23 | #include "eeconfig.h" |
24 | |
25 | #include "../../vm/methoditer.h" |
26 | |
27 | const char *GetTType( TraceType tt); |
28 | |
29 | #define IsSingleStep(exception) (exception == EXCEPTION_SINGLE_STEP) |
30 | |
31 | |
32 | |
33 | |
34 | |
35 | // ------------------------------------------------------------------------- |
36 | // DebuggerController routines |
37 | // ------------------------------------------------------------------------- |
38 | |
39 | SPTR_IMPL_INIT(DebuggerPatchTable, DebuggerController, g_patches, NULL); |
40 | SVAL_IMPL_INIT(BOOL, DebuggerController, g_patchTableValid, FALSE); |
41 | |
42 | #if !defined(DACCESS_COMPILE) |
43 | |
44 | DebuggerController *DebuggerController::g_controllers = NULL; |
45 | DebuggerControllerPage *DebuggerController::g_protections = NULL; |
46 | CrstStatic DebuggerController::g_criticalSection; |
47 | int DebuggerController::g_cTotalMethodEnter = 0; |
48 | |
49 | |
50 | // Is this patch at a position at which it's safe to take a stack? |
51 | bool DebuggerControllerPatch::IsSafeForStackTrace() |
52 | { |
53 | LIMITED_METHOD_CONTRACT; |
54 | |
55 | TraceType tt = this->trace.GetTraceType(); |
56 | Module *module = this->key.module; |
57 | BOOL managed = this->IsManagedPatch(); |
58 | |
59 | // Patches placed by MgrPush can come at lots of illegal spots. Can't take a stack trace here. |
60 | if ((module == NULL) && managed && (tt == TRACE_MGR_PUSH)) |
61 | { |
62 | return false; |
63 | } |
64 | |
65 | // Consider everything else legal. |
66 | // This is a little shady for TRACE_FRAME_PUSH. But TraceFrame() needs a stackInfo |
67 | // to get a RegDisplay (though almost nobody uses it, so perhaps it could be removed). |
68 | return true; |
69 | |
70 | } |
71 | |
72 | #ifndef _TARGET_ARM_ |
73 | // returns a pointer to the shared buffer. each call will AddRef() the object |
74 | // before returning it so callers only need to Release() when they're finished with it. |
75 | SharedPatchBypassBuffer* DebuggerControllerPatch::GetOrCreateSharedPatchBypassBuffer() |
76 | { |
77 | CONTRACTL |
78 | { |
79 | THROWS; |
80 | GC_NOTRIGGER; |
81 | } |
82 | CONTRACTL_END; |
83 | |
84 | if (m_pSharedPatchBypassBuffer == NULL) |
85 | { |
86 | m_pSharedPatchBypassBuffer = new (interopsafeEXEC) SharedPatchBypassBuffer(); |
87 | _ASSERTE(m_pSharedPatchBypassBuffer); |
88 | TRACE_ALLOC(m_pSharedPatchBypassBuffer); |
89 | } |
90 | |
91 | m_pSharedPatchBypassBuffer->AddRef(); |
92 | |
93 | return m_pSharedPatchBypassBuffer; |
94 | } |
95 | #endif // _TARGET_ARM_ |
96 | |
97 | // @todo - remove all this splicing trash |
98 | // This Sort/Splice stuff just reorders the patches within a particular chain such |
99 | // that when we iterate through by calling GetPatch() and GetNextPatch(DebuggerControllerPatch), |
100 | // we'll get patches in increasing order of DebuggerControllerTypes. |
101 | // Practically, this means that calling GetPatch() will return EnC patches before stepping patches. |
102 | // |
103 | #if 1 |
104 | void DebuggerPatchTable::SortPatchIntoPatchList(DebuggerControllerPatch **ppPatch) |
105 | { |
106 | LOG((LF_CORDB, LL_EVERYTHING, "DPT::SPIPL called.\n" )); |
107 | #ifdef _DEBUG |
108 | DebuggerControllerPatch *patchFirst |
109 | = (DebuggerControllerPatch *) Find(Hash((*ppPatch)), Key((*ppPatch))); |
110 | _ASSERTE(patchFirst == (*ppPatch)); |
111 | _ASSERTE((*ppPatch)->controller->GetDCType() != DEBUGGER_CONTROLLER_STATIC); |
112 | #endif //_DEBUG |
113 | DebuggerControllerPatch *patchNext = GetNextPatch((*ppPatch)); |
114 | LOG((LF_CORDB, LL_EVERYTHING, "DPT::SPIPL GetNextPatch passed\n" )); |
115 | //List contains one, (sorted) element |
116 | if (patchNext == NULL) |
117 | { |
118 | LOG((LF_CORDB, LL_INFO10000, |
119 | "DPT::SPIPL: Patch 0x%x is a sorted singleton\n" , (*ppPatch))); |
120 | return; |
121 | } |
122 | |
123 | // If we decide to reorder the list, we'll need to keep the element |
124 | // indexed by the hash function as the (sorted)first item. Everything else |
125 | // chains off this element, can can thus stay put. |
126 | // Thus, either the element we just added is already sorted, or else we'll |
127 | // have to move it elsewhere in the list, meaning that we'll have to swap |
128 | // the second item & the new item, so that the index points to the proper |
129 | // first item in the list. |
130 | |
131 | //use Cur ptr for case where patch gets appended to list |
132 | DebuggerControllerPatch *patchCur = patchNext; |
133 | |
134 | while (patchNext != NULL && |
135 | ((*ppPatch)->controller->GetDCType() > |
136 | patchNext->controller->GetDCType()) ) |
137 | { |
138 | patchCur = patchNext; |
139 | patchNext = GetNextPatch(patchNext); |
140 | } |
141 | |
142 | if (patchNext == GetNextPatch((*ppPatch))) |
143 | { |
144 | LOG((LF_CORDB, LL_INFO10000, |
145 | "DPT::SPIPL: Patch 0x%x is already sorted\n" , (*ppPatch))); |
146 | return; //already sorted |
147 | } |
148 | |
149 | LOG((LF_CORDB, LL_INFO10000, |
150 | "DPT::SPIPL: Patch 0x%x will be moved \n" , (*ppPatch))); |
151 | |
152 | //remove it from the list |
153 | SpliceOutOfList((*ppPatch)); |
154 | |
155 | // the kinda neat thing is: since we put it originally at the front of the list, |
156 | // and it's not in order, then it must be behind another element of this list, |
157 | // so we don't have to write any 'SpliceInFrontOf' code. |
158 | |
159 | _ASSERTE(patchCur != NULL); |
160 | SpliceInBackOf((*ppPatch), patchCur); |
161 | |
162 | LOG((LF_CORDB, LL_INFO10000, |
163 | "DPT::SPIPL: Patch 0x%x is now sorted\n" , (*ppPatch))); |
164 | } |
165 | |
166 | // This can leave the list empty, so don't do this unless you put |
167 | // the patch back somewhere else. |
168 | void DebuggerPatchTable::SpliceOutOfList(DebuggerControllerPatch *patch) |
169 | { |
170 | // We need to get iHash, the index of the ptr within |
171 | // m_piBuckets, ie it's entry in the hashtable. |
172 | ULONG iHash = Hash(patch) % m_iBuckets; |
173 | ULONG iElement = m_piBuckets[iHash]; |
174 | DebuggerControllerPatch *patchFirst |
175 | = (DebuggerControllerPatch *) EntryPtr(iElement); |
176 | |
177 | // Fix up pointers to chain |
178 | if (patchFirst == patch) |
179 | { |
180 | // The first patch shouldn't have anything behind it. |
181 | _ASSERTE(patch->entry.iPrev == DPT_INVALID_SLOT); |
182 | |
183 | if (patch->entry.iNext != DPT_INVALID_SLOT) |
184 | { |
185 | m_piBuckets[iHash] = patch->entry.iNext; |
186 | } |
187 | else |
188 | { |
189 | m_piBuckets[iHash] = DPT_INVALID_SLOT; |
190 | } |
191 | } |
192 | |
193 | if (patch->entry.iNext != DPT_INVALID_SLOT) |
194 | { |
195 | EntryPtr(patch->entry.iNext)->iPrev = patch->entry.iPrev; |
196 | } |
197 | |
198 | if (patch->entry.iPrev != DPT_INVALID_SLOT) |
199 | { |
200 | EntryPtr(patch->entry.iNext)->iNext = patch->entry.iNext; |
201 | } |
202 | |
203 | patch->entry.iNext = DPT_INVALID_SLOT; |
204 | patch->entry.iPrev = DPT_INVALID_SLOT; |
205 | } |
206 | |
207 | void DebuggerPatchTable::SpliceInBackOf(DebuggerControllerPatch *patchAppend, |
208 | DebuggerControllerPatch *patchEnd) |
209 | { |
210 | ULONG iAppend = ItemIndex((HASHENTRY*)patchAppend); |
211 | ULONG iEnd = ItemIndex((HASHENTRY*)patchEnd); |
212 | |
213 | patchAppend->entry.iPrev = iEnd; |
214 | patchAppend->entry.iNext = patchEnd->entry.iNext; |
215 | |
216 | if (patchAppend->entry.iNext != DPT_INVALID_SLOT) |
217 | EntryPtr(patchAppend->entry.iNext)->iPrev = iAppend; |
218 | |
219 | patchEnd->entry.iNext = iAppend; |
220 | } |
221 | #endif |
222 | |
223 | //----------------------------------------------------------------------------- |
224 | // Stack safety rules. |
225 | // In general, we're safe to crawl whenever we're in preemptive mode. |
226 | // We're also must be safe at any spot the thread could get synchronized, |
227 | // because that means that the thread will be stopped to let the debugger shell |
228 | // inspect it and that can definitely take stack traces. |
229 | // Basically the only unsafe spot is in the middle of goofy stub with some |
230 | // partially constructed frame while in coop mode. |
231 | //----------------------------------------------------------------------------- |
232 | |
233 | // Safe if we're at certain types of patches. |
234 | // See Patch::IsSafeForStackTrace for details. |
235 | StackTraceTicket::StackTraceTicket(DebuggerControllerPatch * patch) |
236 | { |
237 | _ASSERTE(patch != NULL); |
238 | _ASSERTE(patch->IsSafeForStackTrace()); |
239 | } |
240 | |
241 | // Safe if there was already another stack trace at this spot. (Grandfather clause) |
242 | // This is commonly used for StepOut, which takes runs stacktraces to crawl up |
243 | // the stack to find a place to patch. |
244 | StackTraceTicket::StackTraceTicket(ControllerStackInfo * info) |
245 | { |
246 | _ASSERTE(info != NULL); |
247 | |
248 | // Ensure that the other stack info object actually executed (and thus was |
249 | // actually valid). |
250 | _ASSERTE(info->m_dbgExecuted); |
251 | } |
252 | |
253 | // Safe b/c the context shows we're in native managed code. |
254 | // This must be safe because we could always set a managed breakpoint by native |
255 | // offset and thus synchronize the shell at this spot. So this is |
256 | // a specific example of the Synchronized case. The fact that we don't actually |
257 | // synchronize doesn't make us any less safe. |
258 | StackTraceTicket::StackTraceTicket(const BYTE * ip) |
259 | { |
260 | _ASSERTE(g_pEEInterface->IsManagedNativeCode(ip)); |
261 | } |
262 | |
263 | // Safe it we're at a Synchronized point point. |
264 | StackTraceTicket::StackTraceTicket(Thread * pThread) |
265 | { |
266 | _ASSERTE(pThread != NULL); |
267 | |
268 | // If we're synchronized, the debugger should be stopped. |
269 | // That means all threads are synced and must be safe to take a stacktrace. |
270 | // Thus we don't even need to do a thread-specific check. |
271 | _ASSERTE(g_pDebugger->IsStopped()); |
272 | } |
273 | |
274 | // DebuggerUserBreakpoint has a special case of safety. See that ctor for details. |
275 | StackTraceTicket::StackTraceTicket(DebuggerUserBreakpoint * p) |
276 | { |
277 | _ASSERTE(p != NULL); |
278 | } |
279 | |
280 | //void ControllerStackInfo::GetStackInfo(): GetStackInfo |
281 | // is invoked by the user to trigger the stack walk. This will |
282 | // cause the stack walk detailed in the class description to happen. |
283 | // Thread* thread: The thread to do the stack walk on. |
284 | // void* targetFP: Can be either NULL (meaning that the bottommost |
285 | // frame is the target), or an frame pointer, meaning that the |
286 | // caller wants information about a specific frame. |
287 | // CONTEXT* pContext: A pointer to a CONTEXT structure. Can be null, |
288 | // we use our temp context. |
289 | // bool suppressUMChainFromComPlusMethodFrameGeneric - A ridiculous flag that is trying to narrowly |
290 | // target a fix for issue 650903. |
291 | // StackTraceTicket - ticket to ensure that we actually have permission for this stacktrace |
292 | void ControllerStackInfo::GetStackInfo( |
293 | StackTraceTicket ticket, |
294 | Thread *thread, |
295 | FramePointer targetFP, |
296 | CONTEXT *pContext, |
297 | bool suppressUMChainFromComPlusMethodFrameGeneric |
298 | ) |
299 | { |
300 | _ASSERTE(thread != NULL); |
301 | |
302 | BOOL contextValid = (pContext != NULL); |
303 | if (!contextValid) |
304 | { |
305 | // We're assuming the thread is protected w/ a frame (which includes the redirection |
306 | // case). The stackwalker will use that protection to prime the context. |
307 | pContext = &this->m_tempContext; |
308 | } |
309 | else |
310 | { |
311 | // If we provided an explicit context for this thread, it better not be redirected. |
312 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
313 | } |
314 | |
315 | // Mark this stackwalk as valid so that it can in turn be used to grandfather |
316 | // in other stackwalks. |
317 | INDEBUG(m_dbgExecuted = true); |
318 | |
319 | m_activeFound = false; |
320 | m_returnFound = false; |
321 | m_bottomFP = LEAF_MOST_FRAME; |
322 | m_targetFP = targetFP; |
323 | m_targetFrameFound = (m_targetFP == LEAF_MOST_FRAME); |
324 | m_specialChainReason = CHAIN_NONE; |
325 | m_suppressUMChainFromComPlusMethodFrameGeneric = suppressUMChainFromComPlusMethodFrameGeneric; |
326 | |
327 | int result = DebuggerWalkStack(thread, |
328 | LEAF_MOST_FRAME, |
329 | pContext, |
330 | contextValid, |
331 | WalkStack, |
332 | (void *) this, |
333 | FALSE); |
334 | |
335 | _ASSERTE(m_activeFound); // All threads have at least one unmanaged frame |
336 | |
337 | if (result == SWA_DONE) |
338 | { |
339 | _ASSERTE(!m_returnFound); |
340 | m_returnFrame = m_activeFrame; |
341 | } |
342 | } |
343 | |
344 | //--------------------------------------------------------------------------------------- |
345 | // |
346 | // This function "undoes" an unwind, i.e. it takes the active frame (the current frame) |
347 | // and sets it to be the return frame (the caller frame). Currently it is only used by |
348 | // the stepper to step out of an LCG method. See DebuggerStepper::DetectHandleLCGMethods() |
349 | // for more information. |
350 | // |
351 | // Assumptions: |
352 | // The current frame is valid on entry. |
353 | // |
354 | // Notes: |
355 | // After this function returns, the active frame on this instance of ControllerStackInfo will no longer be valid. |
356 | // |
357 | // This function is specifically for DebuggerStepper::DetectHandleLCGMethods(). Using it in other scencarios may |
358 | // require additional changes. |
359 | // |
360 | |
361 | void ControllerStackInfo::SetReturnFrameWithActiveFrame() |
362 | { |
363 | // Copy the active frame into the return frame. |
364 | m_returnFound = true; |
365 | m_returnFrame = m_activeFrame; |
366 | |
367 | // Invalidate the active frame. |
368 | m_activeFound = false; |
369 | memset(&(m_activeFrame), 0, sizeof(m_activeFrame)); |
370 | m_activeFrame.fp = LEAF_MOST_FRAME; |
371 | } |
372 | |
373 | // Fill in a controller-stack info. |
374 | StackWalkAction ControllerStackInfo::WalkStack(FrameInfo *pInfo, void *data) |
375 | { |
376 | LIMITED_METHOD_CONTRACT; |
377 | |
378 | _ASSERTE(!pInfo->HasStubFrame()); // we didn't ask for stub frames. |
379 | |
380 | ControllerStackInfo *i = (ControllerStackInfo *) data; |
381 | |
382 | //save this info away for later use |
383 | if (i->m_bottomFP == LEAF_MOST_FRAME) |
384 | i->m_bottomFP = pInfo->fp; |
385 | |
386 | // This is part of the targetted fix for issue 650903. (See the other |
387 | // parts in in code:TrackUMChain and code:DebuggerStepper::TrapStepOut.) |
388 | // pInfo->fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric has been |
389 | // set by TrackUMChain to help us remember that the current frame we're looking at is |
390 | // ComPlusMethodFrameGeneric (we can't rely on looking at pInfo->frame to check |
391 | // this), and i->m_suppressUMChainFromComPlusMethodFrameGeneric has been set by the |
392 | // dude initiating this walk to remind us that our goal in life is to do a Step Out |
393 | // during managed-only debugging. These two things together tell us we should ignore |
394 | // this frame, rather than erroneously identifying it as the target frame. |
395 | #ifdef FEATURE_COMINTEROP |
396 | if(i->m_suppressUMChainFromComPlusMethodFrameGeneric && |
397 | (pInfo->chainReason == CHAIN_ENTER_UNMANAGED) && |
398 | (pInfo->fIgnoreThisFrameIfSuppressingUMChainFromComPlusMethodFrameGeneric)) |
399 | { |
400 | return SWA_CONTINUE; |
401 | } |
402 | #endif // FEATURE_COMINTEROP |
403 | |
404 | //have we reached the correct frame yet? |
405 | if (!i->m_targetFrameFound && |
406 | IsEqualOrCloserToLeaf(i->m_targetFP, pInfo->fp)) |
407 | { |
408 | i->m_targetFrameFound = true; |
409 | } |
410 | |
411 | if (i->m_targetFrameFound ) |
412 | { |
413 | // Ignore Enter-managed chains. |
414 | if (pInfo->chainReason == CHAIN_ENTER_MANAGED) |
415 | { |
416 | return SWA_CONTINUE; |
417 | } |
418 | |
419 | if (i->m_activeFound ) |
420 | { |
421 | // We care if the current frame is unmanaged (in case a managed stepper is initiated |
422 | // on a thread currently in unmanaged code). But since we can't step-out to UM frames, |
423 | // we can just skip them in the stack walk. |
424 | if (!pInfo->managed) |
425 | { |
426 | return SWA_CONTINUE; |
427 | } |
428 | |
429 | if (pInfo->chainReason == CHAIN_CLASS_INIT) |
430 | i->m_specialChainReason = pInfo->chainReason; |
431 | |
432 | if (pInfo->fp != i->m_activeFrame.fp) // avoid dups |
433 | { |
434 | i->m_returnFrame = *pInfo; |
435 | |
436 | #if defined(WIN64EXCEPTIONS) |
437 | CopyREGDISPLAY(&(i->m_returnFrame.registers), &(pInfo->registers)); |
438 | #endif // WIN64EXCEPTIONS |
439 | |
440 | i->m_returnFound = true; |
441 | |
442 | return SWA_ABORT; |
443 | } |
444 | } |
445 | else |
446 | { |
447 | i->m_activeFrame = *pInfo; |
448 | |
449 | #if defined(WIN64EXCEPTIONS) |
450 | CopyREGDISPLAY(&(i->m_activeFrame.registers), &(pInfo->registers)); |
451 | #endif // WIN64EXCEPTIONS |
452 | |
453 | i->m_activeFound = true; |
454 | |
455 | return SWA_CONTINUE; |
456 | } |
457 | } |
458 | |
459 | return SWA_CONTINUE; |
460 | } |
461 | |
462 | |
463 | // |
464 | // Note that patches may be reallocated - do not keep a pointer to a patch. |
465 | // |
466 | DebuggerControllerPatch *DebuggerPatchTable::AddPatchForMethodDef(DebuggerController *controller, |
467 | Module *module, |
468 | mdMethodDef md, |
469 | MethodDesc* pMethodDescFilter, |
470 | size_t offset, |
471 | BOOL offsetIsIL, |
472 | DebuggerPatchKind kind, |
473 | FramePointer fp, |
474 | AppDomain *pAppDomain, |
475 | SIZE_T masterEnCVersion, |
476 | DebuggerJitInfo *dji) |
477 | { |
478 | CONTRACTL |
479 | { |
480 | THROWS; |
481 | MODE_ANY; |
482 | GC_NOTRIGGER; |
483 | } |
484 | CONTRACTL_END; |
485 | |
486 | |
487 | |
488 | LOG( (LF_CORDB,LL_INFO10000,"DCP:AddPatchForMethodDef unbound " |
489 | "relative in methodDef 0x%x with dji 0x%x " |
490 | "controller:0x%x AD:0x%x\n" , md, |
491 | dji, controller, pAppDomain)); |
492 | |
493 | DebuggerFunctionKey key; |
494 | |
495 | key.module = module; |
496 | key.md = md; |
497 | |
498 | // Get a new uninitialized patch object |
499 | DebuggerControllerPatch *patch = |
500 | (DebuggerControllerPatch *) Add(HashKey(&key)); |
501 | if (patch == NULL) |
502 | { |
503 | ThrowOutOfMemory(); |
504 | } |
505 | #ifndef _TARGET_ARM_ |
506 | patch->Initialize(); |
507 | #endif |
508 | |
509 | //initialize the patch data structure. |
510 | InitializePRD(&(patch->opcode)); |
511 | patch->controller = controller; |
512 | patch->key.module = module; |
513 | patch->key.md = md; |
514 | patch->pMethodDescFilter = pMethodDescFilter; |
515 | patch->offset = offset; |
516 | patch->offsetIsIL = offsetIsIL; |
517 | patch->address = NULL; |
518 | patch->fp = fp; |
519 | patch->trace.Bad_SetTraceType(DPT_DEFAULT_TRACE_TYPE); // TRACE_OTHER |
520 | patch->refCount = 1; // AddRef() |
521 | patch->fSaveOpcode = false; |
522 | patch->pAppDomain = pAppDomain; |
523 | patch->pid = m_pid++; |
524 | |
525 | if (kind == PATCH_KIND_IL_MASTER) |
526 | { |
527 | _ASSERTE(dji == NULL); |
528 | patch->encVersion = masterEnCVersion; |
529 | } |
530 | else |
531 | { |
532 | patch->dji = dji; |
533 | } |
534 | patch->kind = kind; |
535 | |
536 | if (dji) |
537 | LOG((LF_CORDB,LL_INFO10000,"AddPatchForMethodDef w/ version 0x%04x, " |
538 | "pid:0x%x\n" , dji->m_encVersion, patch->pid)); |
539 | else if (kind == PATCH_KIND_IL_MASTER) |
540 | LOG((LF_CORDB,LL_INFO10000,"AddPatchForMethodDef w/ version 0x%04x, " |
541 | "pid:0x%x\n" , masterEnCVersion,patch->pid)); |
542 | else |
543 | LOG((LF_CORDB,LL_INFO10000,"AddPatchForMethodDef w/ no dji or dmi, pid:0x%x\n" ,patch->pid)); |
544 | |
545 | |
546 | // This patch is not yet bound or activated |
547 | _ASSERTE( !patch->IsBound() ); |
548 | _ASSERTE( !patch->IsActivated() ); |
549 | |
550 | // The only kind of patch with IL offset is the IL master patch. |
551 | _ASSERTE(patch->IsILMasterPatch() || patch->offsetIsIL == FALSE); |
552 | |
553 | // The only kind of patch that allows a MethodDescFilter is the IL master patch |
554 | _ASSERTE(patch->IsILMasterPatch() || patch->pMethodDescFilter == NULL); |
555 | |
556 | // Zero is the only native offset that we allow to bind across different jitted |
557 | // code bodies. There isn't any sensible meaning to binding at some other native offset. |
558 | // Even if all the code bodies had an instruction that started at that offset there is |
559 | // no guarantee those instructions represent a semantically equivalent point in the |
560 | // method's execution. |
561 | _ASSERTE(!(patch->IsILMasterPatch() && !patch->offsetIsIL && patch->offset != 0)); |
562 | |
563 | return patch; |
564 | } |
565 | |
566 | // Create and bind a patch to the specified address |
567 | // The caller should immediately activate the patch since we typically expect bound patches |
568 | // will always be activated. |
569 | DebuggerControllerPatch *DebuggerPatchTable::AddPatchForAddress(DebuggerController *controller, |
570 | MethodDesc *fd, |
571 | size_t offset, |
572 | DebuggerPatchKind kind, |
573 | CORDB_ADDRESS_TYPE *address, |
574 | FramePointer fp, |
575 | AppDomain *pAppDomain, |
576 | DebuggerJitInfo *dji, |
577 | SIZE_T pid, |
578 | TraceType traceType) |
579 | |
580 | { |
581 | CONTRACTL |
582 | { |
583 | THROWS; |
584 | MODE_ANY; |
585 | GC_NOTRIGGER; |
586 | } |
587 | CONTRACTL_END; |
588 | |
589 | |
590 | _ASSERTE(kind == PATCH_KIND_NATIVE_MANAGED || kind == PATCH_KIND_NATIVE_UNMANAGED); |
591 | LOG((LF_CORDB,LL_INFO10000,"DCP:AddPatchForAddress bound " |
592 | "absolute to 0x%x with dji 0x%x (mdDef:0x%x) " |
593 | "controller:0x%x AD:0x%x\n" , |
594 | address, dji, (fd!=NULL?fd->GetMemberDef():0), controller, |
595 | pAppDomain)); |
596 | |
597 | // get new uninitialized patch object |
598 | DebuggerControllerPatch *patch = |
599 | (DebuggerControllerPatch *) Add(HashAddress(address)); |
600 | |
601 | if (patch == NULL) |
602 | { |
603 | ThrowOutOfMemory(); |
604 | } |
605 | #ifndef _TARGET_ARM_ |
606 | patch->Initialize(); |
607 | #endif |
608 | |
609 | // initialize the patch data structure |
610 | InitializePRD(&(patch->opcode)); |
611 | patch->controller = controller; |
612 | |
613 | if (fd == NULL) |
614 | { |
615 | patch->key.module = NULL; |
616 | patch->key.md = mdTokenNil; |
617 | } |
618 | else |
619 | { |
620 | patch->key.module = g_pEEInterface->MethodDescGetModule(fd); |
621 | patch->key.md = fd->GetMemberDef(); |
622 | } |
623 | patch->pMethodDescFilter = NULL; |
624 | patch->offset = offset; |
625 | patch->offsetIsIL = FALSE; |
626 | patch->address = address; |
627 | patch->fp = fp; |
628 | patch->trace.Bad_SetTraceType(traceType); |
629 | patch->refCount = 1; // AddRef() |
630 | patch->fSaveOpcode = false; |
631 | patch->pAppDomain = pAppDomain; |
632 | if (pid == DCP_PID_INVALID) |
633 | patch->pid = m_pid++; |
634 | else |
635 | patch->pid = pid; |
636 | |
637 | patch->dji = dji; |
638 | patch->kind = kind; |
639 | |
640 | if (dji == NULL) |
641 | LOG((LF_CORDB,LL_INFO10000,"AddPatchForAddress w/ version with no dji, pid:0x%x\n" , patch->pid)); |
642 | else |
643 | { |
644 | LOG((LF_CORDB,LL_INFO10000,"AddPatchForAddress w/ version 0x%04x, " |
645 | "pid:0x%x\n" , dji->m_methodInfo->GetCurrentEnCVersion(), patch->pid)); |
646 | |
647 | _ASSERTE( fd==NULL || fd == dji->m_fd ); |
648 | } |
649 | |
650 | SortPatchIntoPatchList(&patch); |
651 | |
652 | // This patch is bound but not yet activated |
653 | _ASSERTE( patch->IsBound() ); |
654 | _ASSERTE( !patch->IsActivated() ); |
655 | |
656 | // The only kind of patch with IL offset is the IL master patch. |
657 | _ASSERTE(patch->IsILMasterPatch() || patch->offsetIsIL == FALSE); |
658 | return patch; |
659 | } |
660 | |
661 | // Set the native address for this patch. |
662 | void DebuggerPatchTable::BindPatch(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address) |
663 | { |
664 | _ASSERTE(patch != NULL); |
665 | _ASSERTE(address != NULL); |
666 | _ASSERTE( !patch->IsILMasterPatch() ); |
667 | _ASSERTE(!patch->IsBound() ); |
668 | |
669 | //Since the actual patch doesn't move, we don't have to worry about |
670 | //zeroing out the opcode field (see lenghty comment above) |
671 | // Since the patch is double-hashed based off Address, if we change the address, |
672 | // we must remove and reinsert the patch. |
673 | CHashTable::Delete(HashKey(&patch->key), ItemIndex((HASHENTRY*)patch)); |
674 | |
675 | patch->address = address; |
676 | |
677 | CHashTable::Add(HashAddress(address), ItemIndex((HASHENTRY*)patch)); |
678 | |
679 | SortPatchIntoPatchList(&patch); |
680 | |
681 | _ASSERTE(patch->IsBound() ); |
682 | _ASSERTE(!patch->IsActivated() ); |
683 | } |
684 | |
685 | // Disassociate a patch from a specific code address. |
686 | void DebuggerPatchTable::UnbindPatch(DebuggerControllerPatch *patch) |
687 | { |
688 | _ASSERTE(patch != NULL); |
689 | _ASSERTE(patch->kind != PATCH_KIND_IL_MASTER); |
690 | _ASSERTE(patch->IsBound() ); |
691 | _ASSERTE(!patch->IsActivated() ); |
692 | |
693 | //<REVISIT_TODO>@todo We're hosed if the patch hasn't been primed with |
694 | // this info & we can't get it...</REVISIT_TODO> |
695 | if (patch->key.module == NULL || |
696 | patch->key.md == mdTokenNil) |
697 | { |
698 | MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc( |
699 | dac_cast<PCODE>(patch->address)); |
700 | _ASSERTE( fd != NULL ); |
701 | patch->key.module = g_pEEInterface->MethodDescGetModule(fd); |
702 | patch->key.md = fd->GetMemberDef(); |
703 | } |
704 | |
705 | // Update it's index entry in the table to use it's unbound key |
706 | // Since the patch is double-hashed based off Address, if we change the address, |
707 | // we must remove and reinsert the patch. |
708 | CHashTable::Delete( HashAddress(patch->address), |
709 | ItemIndex((HASHENTRY*)patch)); |
710 | |
711 | patch->address = NULL; // we're no longer bound to this address |
712 | |
713 | CHashTable::Add( HashKey(&patch->key), |
714 | ItemIndex((HASHENTRY*)patch)); |
715 | |
716 | _ASSERTE(!patch->IsBound() ); |
717 | |
718 | } |
719 | |
720 | void DebuggerPatchTable::RemovePatch(DebuggerControllerPatch *patch) |
721 | { |
722 | // Since we're deleting this patch, it must not be activated (i.e. it must not have a stored opcode) |
723 | _ASSERTE( !patch->IsActivated() ); |
724 | #ifndef _TARGET_ARM_ |
725 | patch->DoCleanup(); |
726 | #endif |
727 | |
728 | // |
729 | // Because of the implementation of CHashTable, we can safely |
730 | // delete elements while iterating through the table. This |
731 | // behavior is relied upon - do not change to a different |
732 | // implementation without considering this fact. |
733 | // |
734 | Delete(Hash(patch), (HASHENTRY *) patch); |
735 | |
736 | } |
737 | |
738 | DebuggerControllerPatch *DebuggerPatchTable::GetNextPatch(DebuggerControllerPatch *prev) |
739 | { |
740 | ULONG iNext; |
741 | HASHENTRY *psEntry; |
742 | |
743 | // Start at the next entry in the chain. |
744 | // @todo - note that: EntryPtr(ItemIndex(x)) == x |
745 | iNext = EntryPtr(ItemIndex((HASHENTRY*)prev))->iNext; |
746 | |
747 | // Search until we hit the end. |
748 | while (iNext != UINT32_MAX) |
749 | { |
750 | // Compare the keys. |
751 | psEntry = EntryPtr(iNext); |
752 | |
753 | // Careful here... we can hash the entries in this table |
754 | // by two types of keys. In this type of search, the type |
755 | // of the second key (psEntry) does not necessarily |
756 | // indicate the type of the first key (prev), so we have |
757 | // to check for sure. |
758 | DebuggerControllerPatch *pc2 = (DebuggerControllerPatch*)psEntry; |
759 | |
760 | if (((pc2->address == NULL) && (prev->address == NULL)) || |
761 | ((pc2->address != NULL) && (prev->address != NULL))) |
762 | if (!Cmp(Key(prev), psEntry)) |
763 | return pc2; |
764 | |
765 | // Advance to the next item in the chain. |
766 | iNext = psEntry->iNext; |
767 | } |
768 | |
769 | return NULL; |
770 | } |
771 | |
772 | #ifdef _DEBUG_PATCH_TABLE |
773 | // DEBUG An internal debugging routine, it iterates |
774 | // through the hashtable, stopping at every |
775 | // single entry, no matter what it's state. For this to |
776 | // compile, you're going to have to add friend status |
777 | // of this class to CHashTableAndData in |
778 | // to $\Com99\Src\inc\UtilCode.h |
779 | void DebuggerPatchTable::CheckPatchTable() |
780 | { |
781 | if (NULL != m_pcEntries) |
782 | { |
783 | DebuggerControllerPatch *dcp; |
784 | int i = 0; |
785 | while (i++ <m_iEntries) |
786 | { |
787 | dcp = (DebuggerControllerPatch*)&(((DebuggerControllerPatch *)m_pcEntries)[i]); |
788 | if (dcp->opcode != 0 ) |
789 | { |
790 | LOG((LF_CORDB,LL_INFO1000, "dcp->addr:0x%8x " |
791 | "mdMD:0x%8x, offset:0x%x, native:%d\n" , |
792 | dcp->address, dcp->key.md, dcp->offset, |
793 | dcp->IsNativePatch())); |
794 | } |
795 | } |
796 | } |
797 | } |
798 | |
799 | #endif // _DEBUG_PATCH_TABLE |
800 | |
801 | // Count how many patches are in the table. |
802 | // Use for asserts |
803 | int DebuggerPatchTable::GetNumberOfPatches() |
804 | { |
805 | int total = 0; |
806 | |
807 | if (NULL != m_pcEntries) |
808 | { |
809 | DebuggerControllerPatch *dcp; |
810 | ULONG i = 0; |
811 | |
812 | while (i++ <m_iEntries) |
813 | { |
814 | dcp = (DebuggerControllerPatch*)&(((DebuggerControllerPatch *)m_pcEntries)[i]); |
815 | |
816 | if (dcp->IsActivated() || !dcp->IsFree()) |
817 | total++; |
818 | } |
819 | } |
820 | return total; |
821 | } |
822 | |
823 | #if defined(_DEBUG) |
824 | //----------------------------------------------------------------------------- |
825 | // Debug check that we only have 1 thread-starter per thread. |
826 | // pNew - the new DTS. We'll make sure there's not already a DTS on this thread. |
827 | //----------------------------------------------------------------------------- |
828 | void DebuggerController::EnsureUniqueThreadStarter(DebuggerThreadStarter * pNew) |
829 | { |
830 | // This lock should be safe to take since our base class ctor takes it. |
831 | ControllerLockHolder lockController; |
832 | DebuggerController * pExisting = g_controllers; |
833 | while(pExisting != NULL) |
834 | { |
835 | if (pExisting->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER) |
836 | { |
837 | if (pExisting != pNew) |
838 | { |
839 | // If we have 2 thread starters, they'd better be on different threads. |
840 | _ASSERTE((pExisting->GetThread() != pNew->GetThread())); |
841 | } |
842 | } |
843 | pExisting = pExisting->m_next; |
844 | } |
845 | } |
846 | #endif |
847 | |
848 | //----------------------------------------------------------------------------- |
849 | // If we have a thread-starter on the given EE thread, make sure it's cancel. |
850 | // Thread-Starters normally delete themselves when they fire. But if the EE |
851 | // destroys the thread before it fires, then we'd still have an active DTS. |
852 | //----------------------------------------------------------------------------- |
853 | void DebuggerController::CancelOutstandingThreadStarter(Thread * pThread) |
854 | { |
855 | _ASSERTE(pThread != NULL); |
856 | LOG((LF_CORDB, LL_EVERYTHING, "DC:CancelOutstandingThreadStarter - checking on thread =0x%p\n" , pThread)); |
857 | |
858 | ControllerLockHolder lockController; |
859 | DebuggerController * p = g_controllers; |
860 | while(p != NULL) |
861 | { |
862 | if (p->GetDCType() == DEBUGGER_CONTROLLER_THREAD_STARTER) |
863 | { |
864 | if (p->GetThread() == pThread) |
865 | { |
866 | LOG((LF_CORDB, LL_EVERYTHING, "DC:CancelOutstandingThreadStarter, pThread=0x%p, Found=0x%p\n" , p)); |
867 | |
868 | // There's only 1 DTS per thread, so once we find it, we can quit. |
869 | p->Delete(); |
870 | p = NULL; |
871 | break; |
872 | } |
873 | } |
874 | p = p->m_next; |
875 | } |
876 | // The common case is that our DTS hit its patch and did a SendEvent (and |
877 | // deleted itself). So usually we'll get through the whole list w/o deleting anything. |
878 | |
879 | } |
880 | |
881 | //void DebuggerController::Initialize() Sets up the static |
882 | // variables for the static DebuggerController class. |
883 | // How: Sets g_runningOnWin95, initializes the critical section |
884 | HRESULT DebuggerController::Initialize() |
885 | { |
886 | CONTRACT(HRESULT) |
887 | { |
888 | THROWS; |
889 | GC_NOTRIGGER; |
890 | // This can be called in an "early attach" case, so DebuggerIsInvolved() |
891 | // will be b/c we don't realize the debugger's attaching to us. |
892 | //PRECONDITION(DebuggerIsInvolved()); |
893 | POSTCONDITION(CheckPointer(g_patches)); |
894 | POSTCONDITION(RETVAL == S_OK); |
895 | } |
896 | CONTRACT_END; |
897 | |
898 | if (g_patches == NULL) |
899 | { |
900 | ZeroMemory(&g_criticalSection, sizeof(g_criticalSection)); // Init() expects zero-init memory. |
901 | |
902 | // NOTE: CRST_UNSAFE_ANYMODE prevents a GC mode switch when entering this crst. |
903 | // If you remove this flag, we will switch to preemptive mode when entering |
904 | // g_criticalSection, which means all functions that enter it will become |
905 | // GC_TRIGGERS. (This includes all uses of ControllerLockHolder.) So be sure |
906 | // to update the contracts if you remove this flag. |
907 | g_criticalSection.Init(CrstDebuggerController, |
908 | (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD)); |
909 | |
910 | g_patches = new (interopsafe) DebuggerPatchTable(); |
911 | _ASSERTE(g_patches != NULL); // throws on oom |
912 | |
913 | HRESULT hr = g_patches->Init(); |
914 | |
915 | if (FAILED(hr)) |
916 | { |
917 | DeleteInteropSafe(g_patches); |
918 | ThrowHR(hr); |
919 | } |
920 | |
921 | g_patchTableValid = TRUE; |
922 | TRACE_ALLOC(g_patches); |
923 | } |
924 | |
925 | _ASSERTE(g_patches != NULL); |
926 | |
927 | RETURN (S_OK); |
928 | } |
929 | |
930 | |
931 | //--------------------------------------------------------------------------------------- |
932 | // |
933 | // Constructor for a controller |
934 | // |
935 | // Arguments: |
936 | // pThread - thread that controller has affinity to. NULL if no thread - affinity. |
937 | // pAppdomain - appdomain that controller has affinity to. NULL if no AD affinity. |
938 | // |
939 | // |
940 | // Notes: |
941 | // "Affinity" is per-controller specific. Affinity is generally passed on to |
942 | // any patches the controller creates. So if a controller has affinity to Thread X, |
943 | // then any patches it creates will only fire on Thread-X. |
944 | // |
945 | //--------------------------------------------------------------------------------------- |
946 | |
947 | DebuggerController::DebuggerController(Thread * pThread, AppDomain * pAppDomain) |
948 | : m_pAppDomain(pAppDomain), |
949 | m_thread(pThread), |
950 | m_singleStep(false), |
951 | m_exceptionHook(false), |
952 | m_traceCall(0), |
953 | m_traceCallFP(ROOT_MOST_FRAME), |
954 | m_unwindFP(LEAF_MOST_FRAME), |
955 | m_eventQueuedCount(0), |
956 | m_deleted(false), |
957 | m_fEnableMethodEnter(false) |
958 | { |
959 | CONTRACTL |
960 | { |
961 | SO_NOT_MAINLINE; |
962 | NOTHROW; |
963 | GC_NOTRIGGER; |
964 | CONSTRUCTOR_CHECK; |
965 | } |
966 | CONTRACTL_END; |
967 | |
968 | LOG((LF_CORDB, LL_INFO10000, "DC: 0x%x m_eventQueuedCount to 0 - DC::DC\n" , this)); |
969 | ControllerLockHolder lockController; |
970 | { |
971 | m_next = g_controllers; |
972 | g_controllers = this; |
973 | } |
974 | } |
975 | |
976 | //--------------------------------------------------------------------------------------- |
977 | // |
978 | // Debugger::Controller::DeleteAllControlers - deletes all debugger contollers |
979 | // |
980 | // Arguments: |
981 | // None |
982 | // |
983 | // Return Value: |
984 | // None |
985 | // |
986 | // Notes: |
987 | // This is used at detach time to remove all DebuggerControllers. This will remove all |
988 | // patches and do whatever other cleanup individual DebuggerControllers consider |
989 | // necessary to allow the debugger to detach and the process to run normally. |
990 | // |
991 | |
992 | void DebuggerController::DeleteAllControllers() |
993 | { |
994 | CONTRACTL |
995 | { |
996 | SO_NOT_MAINLINE; |
997 | NOTHROW; |
998 | GC_NOTRIGGER; |
999 | } |
1000 | CONTRACTL_END; |
1001 | |
1002 | ControllerLockHolder lockController; |
1003 | DebuggerController * pDebuggerController = g_controllers; |
1004 | DebuggerController * pNextDebuggerController = NULL; |
1005 | |
1006 | while (pDebuggerController != NULL) |
1007 | { |
1008 | pNextDebuggerController = pDebuggerController->m_next; |
1009 | pDebuggerController->DebuggerDetachClean(); |
1010 | pDebuggerController->Delete(); |
1011 | pDebuggerController = pNextDebuggerController; |
1012 | } |
1013 | } |
1014 | |
1015 | DebuggerController::~DebuggerController() |
1016 | { |
1017 | CONTRACTL |
1018 | { |
1019 | SO_NOT_MAINLINE; |
1020 | NOTHROW; |
1021 | GC_NOTRIGGER; |
1022 | DESTRUCTOR_CHECK; |
1023 | } |
1024 | CONTRACTL_END; |
1025 | |
1026 | ControllerLockHolder lockController; |
1027 | |
1028 | _ASSERTE(m_eventQueuedCount == 0); |
1029 | |
1030 | DisableAll(); |
1031 | |
1032 | // |
1033 | // Remove controller from list |
1034 | // |
1035 | |
1036 | DebuggerController **c; |
1037 | |
1038 | c = &g_controllers; |
1039 | while (*c != this) |
1040 | c = &(*c)->m_next; |
1041 | |
1042 | *c = m_next; |
1043 | |
1044 | } |
1045 | |
1046 | // void DebuggerController::Delete() |
1047 | // What: Marks an instance as deletable. If it's ref count |
1048 | // (see Enqueue, Dequeue) is currently zero, it actually gets deleted |
1049 | // How: Set m_deleted to true. If m_eventQueuedCount==0, delete this |
1050 | void DebuggerController::Delete() |
1051 | { |
1052 | CONTRACTL |
1053 | { |
1054 | SO_NOT_MAINLINE; |
1055 | NOTHROW; |
1056 | GC_NOTRIGGER; |
1057 | } |
1058 | CONTRACTL_END; |
1059 | |
1060 | if (m_eventQueuedCount == 0) |
1061 | { |
1062 | LOG((LF_CORDB|LF_ENC, LL_INFO100000, "DC::Delete: actual delete of this:0x%x!\n" , this)); |
1063 | TRACE_FREE(this); |
1064 | DeleteInteropSafe(this); |
1065 | } |
1066 | else |
1067 | { |
1068 | LOG((LF_CORDB|LF_ENC, LL_INFO100000, "DC::Delete: marked for " |
1069 | "future delete of this:0x%x!\n" , this)); |
1070 | LOG((LF_CORDB|LF_ENC, LL_INFO10000, "DC:0x%x m_eventQueuedCount at 0x%x\n" , |
1071 | this, m_eventQueuedCount)); |
1072 | m_deleted = true; |
1073 | } |
1074 | } |
1075 | |
1076 | void DebuggerController::DebuggerDetachClean() |
1077 | { |
1078 | //do nothing here |
1079 | } |
1080 | |
1081 | //static |
1082 | void DebuggerController::AddRef(DebuggerControllerPatch *patch) |
1083 | { |
1084 | patch->refCount++; |
1085 | } |
1086 | |
1087 | //static |
1088 | void DebuggerController::Release(DebuggerControllerPatch *patch) |
1089 | { |
1090 | patch->refCount--; |
1091 | if (patch->refCount == 0) |
1092 | { |
1093 | LOG((LF_CORDB, LL_INFO10000, "DCP::R: patch deleted, deactivating\n" )); |
1094 | DeactivatePatch(patch); |
1095 | GetPatchTable()->RemovePatch(patch); |
1096 | } |
1097 | } |
1098 | |
1099 | // void DebuggerController::DisableAll() DisableAll removes |
1100 | // all control from the controller. This includes all patches & page |
1101 | // protection. This will invoke Disable* for unwind,singlestep, |
1102 | // exceptionHook, and tracecall. It will also go through the patch table & |
1103 | // attempt to remove any and all patches that belong to this controller. |
1104 | // If the patch is currently triggering, then a Dispatch* method expects the |
1105 | // patch to be there after we return, so we instead simply mark the patch |
1106 | // itself as deleted. |
1107 | void DebuggerController::DisableAll() |
1108 | { |
1109 | CONTRACTL |
1110 | { |
1111 | SO_NOT_MAINLINE; |
1112 | NOTHROW; |
1113 | GC_NOTRIGGER; |
1114 | MODE_ANY; |
1115 | } |
1116 | CONTRACTL_END; |
1117 | |
1118 | LOG((LF_CORDB,LL_INFO1000, "DC::DisableAll\n" )); |
1119 | _ASSERTE(g_patches != NULL); |
1120 | |
1121 | ControllerLockHolder ch; |
1122 | { |
1123 | // |
1124 | // Remove controller's patches from list. |
1125 | // Don't do this on shutdown because the shutdown thread may have killed another thread asynchronously |
1126 | // thus leaving the patchtable in an inconsistent state such that we may fail trying to walk it. |
1127 | // Since we're exiting anyways, leaving int3 in the code can't harm anybody. |
1128 | // |
1129 | if (!g_fProcessDetach) |
1130 | { |
1131 | HASHFIND f; |
1132 | for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f); |
1133 | patch != NULL; |
1134 | patch = g_patches->GetNextPatch(&f)) |
1135 | { |
1136 | if (patch->controller == this) |
1137 | { |
1138 | Release(patch); |
1139 | } |
1140 | } |
1141 | } |
1142 | |
1143 | if (m_singleStep) |
1144 | DisableSingleStep(); |
1145 | if (m_exceptionHook) |
1146 | DisableExceptionHook(); |
1147 | if (m_unwindFP != LEAF_MOST_FRAME) |
1148 | DisableUnwind(); |
1149 | if (m_traceCall) |
1150 | DisableTraceCall(); |
1151 | if (m_fEnableMethodEnter) |
1152 | DisableMethodEnter(); |
1153 | } |
1154 | } |
1155 | |
1156 | // void DebuggerController::Enqueue() What: Does |
1157 | // reference counting so we don't toast a |
1158 | // DebuggerController while it's in a Dispatch queue. |
1159 | // Why: In DispatchPatchOrSingleStep, we can't hold locks when going |
1160 | // into PreEmptiveGC mode b/c we'll create a deadlock. |
1161 | // So we have to UnLock() prior to |
1162 | // EnablePreEmptiveGC(). But somebody else can show up and delete the |
1163 | // DebuggerControllers since we no longer have the lock. So we have to |
1164 | // do this reference counting thing to make sure that the controllers |
1165 | // don't get toasted as we're trying to invoke SendEvent on them. We have to |
1166 | // reaquire the lock before invoking Dequeue because Dequeue may |
1167 | // result in the controller being deleted, which would change the global |
1168 | // controller list. |
1169 | // How: InterlockIncrement( m_eventQueuedCount ) |
1170 | void DebuggerController::Enqueue() |
1171 | { |
1172 | LIMITED_METHOD_CONTRACT; |
1173 | |
1174 | m_eventQueuedCount++; |
1175 | LOG((LF_CORDB, LL_INFO10000, "DC::Enq DC:0x%x m_eventQueuedCount at 0x%x\n" , |
1176 | this, m_eventQueuedCount)); |
1177 | } |
1178 | |
1179 | // void DebuggerController::Dequeue() What: Does |
1180 | // reference counting so we don't toast a |
1181 | // DebuggerController while it's in a Dispatch queue. |
1182 | // How: InterlockDecrement( m_eventQueuedCount ), delete this if |
1183 | // m_eventQueuedCount == 0 AND m_deleted has been set to true |
1184 | void DebuggerController::Dequeue() |
1185 | { |
1186 | CONTRACTL |
1187 | { |
1188 | SO_NOT_MAINLINE; |
1189 | NOTHROW; |
1190 | GC_NOTRIGGER; |
1191 | } |
1192 | CONTRACTL_END; |
1193 | |
1194 | LOG((LF_CORDB, LL_INFO10000, "DC::Deq DC:0x%x m_eventQueuedCount at 0x%x\n" , |
1195 | this, m_eventQueuedCount)); |
1196 | if (--m_eventQueuedCount == 0) |
1197 | { |
1198 | if (m_deleted) |
1199 | { |
1200 | TRACE_FREE(this); |
1201 | DeleteInteropSafe(this); |
1202 | } |
1203 | } |
1204 | } |
1205 | |
1206 | |
1207 | // bool DebuggerController::BindPatch() If the method has |
1208 | // been JITted and isn't hashed by address already, then hash |
1209 | // it into the hashtable by address and not DebuggerFunctionKey. |
1210 | // If the patch->address field is nonzero, we're done. |
1211 | // Otherwise ask g_pEEInterface to FindLoadedMethodRefOrDef, then |
1212 | // GetFunctionAddress of the method, if the method is in IL, |
1213 | // MapILOffsetToNative. If everything else went Ok, we can now invoke |
1214 | // g_patches->BindPatch. |
1215 | // Returns: false if we know that we can't bind the patch immediately. |
1216 | // true if we either can bind the patch right now, or can't right now, |
1217 | // but might be able to in the future (eg, the method hasn't been JITted) |
1218 | |
1219 | // Have following outcomes: |
1220 | // 1) Succeeded in binding the patch to a raw address. patch->address is set. |
1221 | // (Note we still must apply the patch to put the int 3 in.) |
1222 | // returns true, *pFail = false |
1223 | // |
1224 | // 2) Fails to bind, but a future attempt may succeed. Obvious ex, for an IL-only |
1225 | // patch on an unjitted method. |
1226 | // returns false, *pFail = false |
1227 | // |
1228 | // 3) Fails to bind because something's wrong. Ex: bad IL offset, no DJI to do a |
1229 | // mapping with. Future calls will fail too. |
1230 | // returns false, *pFail = true |
1231 | bool DebuggerController::BindPatch(DebuggerControllerPatch *patch, |
1232 | MethodDesc *fd, |
1233 | CORDB_ADDRESS_TYPE *startAddr) |
1234 | { |
1235 | CONTRACTL |
1236 | { |
1237 | SO_NOT_MAINLINE; |
1238 | THROWS; // from GetJitInfo |
1239 | GC_NOTRIGGER; |
1240 | MODE_ANY; // don't really care what mode we're in. |
1241 | |
1242 | PRECONDITION(ThisMaybeHelperThread()); |
1243 | } |
1244 | CONTRACTL_END; |
1245 | |
1246 | _ASSERTE(patch != NULL); |
1247 | _ASSERTE(!patch->IsILMasterPatch()); |
1248 | _ASSERTE(fd != NULL); |
1249 | |
1250 | // |
1251 | // Translate patch to address, if it hasn't been already. |
1252 | // |
1253 | |
1254 | if (patch->address != NULL) |
1255 | { |
1256 | return true; |
1257 | } |
1258 | |
1259 | if (startAddr == NULL) |
1260 | { |
1261 | if (patch->HasDJI() && patch->GetDJI()->m_jitComplete) |
1262 | { |
1263 | startAddr = (CORDB_ADDRESS_TYPE *) CORDB_ADDRESS_TO_PTR(patch->GetDJI()->m_addrOfCode); |
1264 | _ASSERTE(startAddr != NULL); |
1265 | } |
1266 | if (startAddr == NULL) |
1267 | { |
1268 | // Should not be trying to place patches on MethodDecs's for stubs. |
1269 | // These stubs will never get jitted. |
1270 | CONSISTENCY_CHECK_MSGF(!fd->IsWrapperStub(), ("Can't place patch at stub md %p, %s::%s" , |
1271 | fd, fd->m_pszDebugClassName, fd->m_pszDebugMethodName)); |
1272 | |
1273 | startAddr = (CORDB_ADDRESS_TYPE *)g_pEEInterface->GetFunctionAddress(fd); |
1274 | // |
1275 | // Code is not available yet to patch. The prestub should |
1276 | // notify us when it is executed. |
1277 | // |
1278 | if (startAddr == NULL) |
1279 | { |
1280 | LOG((LF_CORDB, LL_INFO10000, |
1281 | "DC::BP:Patch at 0x%x not bindable yet.\n" , patch->offset)); |
1282 | |
1283 | return false; |
1284 | } |
1285 | } |
1286 | } |
1287 | |
1288 | _ASSERTE(!g_pEEInterface->IsStub((const BYTE *)startAddr)); |
1289 | |
1290 | // If we've jitted, map to a native offset. |
1291 | DebuggerJitInfo *info = g_pDebugger->GetJitInfo(fd, (const BYTE *)startAddr); |
1292 | |
1293 | #ifdef LOGGING |
1294 | if (info == NULL) |
1295 | { |
1296 | LOG((LF_CORDB,LL_INFO10000, "DC::BindPa: For startAddr 0x%x, didn't find a DJI\n" , startAddr)); |
1297 | } |
1298 | #endif //LOGGING |
1299 | if (info != NULL) |
1300 | { |
1301 | // There is a strange case with prejitted code and unjitted trace patches. We can enter this function |
1302 | // with no DebuggerJitInfo created, then have the call just above this actually create the |
1303 | // DebuggerJitInfo, which causes JitComplete to be called, which causes all patches to be bound! If this |
1304 | // happens, then we don't need to continue here (its already been done recursivley) and we don't need to |
1305 | // re-active the patch, so we return false from right here. We can check this by seeing if we suddently |
1306 | // have the address in the patch set. |
1307 | if (patch->address != NULL) |
1308 | { |
1309 | LOG((LF_CORDB,LL_INFO10000, "DC::BindPa: patch bound recursivley by GetJitInfo, bailing...\n" )); |
1310 | return false; |
1311 | } |
1312 | |
1313 | LOG((LF_CORDB,LL_INFO10000, "DC::BindPa: For startAddr 0x%x, got DJI " |
1314 | "0x%x, from 0x%x size: 0x%x\n" , startAddr, info, info->m_addrOfCode, info->m_sizeOfCode)); |
1315 | } |
1316 | |
1317 | LOG((LF_CORDB, LL_INFO10000, "DC::BP:Trying to bind patch in %s::%s version %d\n" , |
1318 | fd->m_pszDebugClassName, fd->m_pszDebugMethodName, info ? info->m_encVersion : (SIZE_T)-1)); |
1319 | |
1320 | _ASSERTE(g_patches != NULL); |
1321 | |
1322 | CORDB_ADDRESS_TYPE *addr = (CORDB_ADDRESS_TYPE *) |
1323 | CodeRegionInfo::GetCodeRegionInfo(NULL, NULL, startAddr).OffsetToAddress(patch->offset); |
1324 | g_patches->BindPatch(patch, addr); |
1325 | |
1326 | LOG((LF_CORDB, LL_INFO10000, "DC::BP:Binding patch at 0x%x(off:%x)\n" , addr, patch->offset)); |
1327 | |
1328 | return true; |
1329 | } |
1330 | |
1331 | // bool DebuggerController::ApplyPatch() applies |
1332 | // the patch described to the code, and |
1333 | // remembers the replaced opcode. Note that the same address |
1334 | // cannot be patched twice at the same time. |
1335 | // Grabs the opcode & stores in patch, then sets a break |
1336 | // instruction for either native or IL. |
1337 | // VirtualProtect & some macros. Returns false if anything |
1338 | // went bad. |
1339 | // DebuggerControllerPatch *patch: The patch, indicates where |
1340 | // to set the INT3 instruction |
1341 | // Returns: true if the user break instruction was successfully |
1342 | // placed into the code-stream, false otherwise |
1343 | bool DebuggerController::ApplyPatch(DebuggerControllerPatch *patch) |
1344 | { |
1345 | LOG((LF_CORDB, LL_INFO10000, "DC::ApplyPatch at addr 0x%p\n" , |
1346 | patch->address)); |
1347 | |
1348 | // If we try to apply an already applied patch, we'll overide our saved opcode |
1349 | // with the break opcode and end up getting a break in out patch bypass buffer. |
1350 | _ASSERTE(!patch->IsActivated() ); |
1351 | _ASSERTE(patch->IsBound()); |
1352 | |
1353 | // Note we may be patching at certain "blessed" points in mscorwks. |
1354 | // This is very dangerous b/c we can't be sure patch->Address is blessed or not. |
1355 | |
1356 | |
1357 | // |
1358 | // Apply the patch. |
1359 | // |
1360 | _ASSERTE(!(g_pConfig->GetGCStressLevel() & (EEConfig::GCSTRESS_INSTR_JIT|EEConfig::GCSTRESS_INSTR_NGEN)) |
1361 | && "Debugger does not work with GCSTRESS 4" ); |
1362 | |
1363 | if (patch->IsNativePatch()) |
1364 | { |
1365 | if (patch->fSaveOpcode) |
1366 | { |
1367 | // We only used SaveOpcode for when we've moved code, so |
1368 | // the patch should already be there. |
1369 | patch->opcode = patch->opcodeSaved; |
1370 | _ASSERTE( AddressIsBreakpoint(patch->address) ); |
1371 | return true; |
1372 | } |
1373 | |
1374 | #if _DEBUG |
1375 | VerifyExecutableAddress((BYTE*)patch->address); |
1376 | #endif |
1377 | |
1378 | LPVOID baseAddress = (LPVOID)(patch->address); |
1379 | |
1380 | DWORD oldProt; |
1381 | |
1382 | if (!VirtualProtect(baseAddress, |
1383 | CORDbg_BREAK_INSTRUCTION_SIZE, |
1384 | PAGE_EXECUTE_READWRITE, &oldProt)) |
1385 | { |
1386 | _ASSERTE(!"VirtualProtect of code page failed" ); |
1387 | return false; |
1388 | } |
1389 | |
1390 | patch->opcode = CORDbgGetInstruction(patch->address); |
1391 | |
1392 | CORDbgInsertBreakpoint((CORDB_ADDRESS_TYPE *)patch->address); |
1393 | LOG((LF_CORDB, LL_EVERYTHING, "Breakpoint was inserted at %p for opcode %x\n" , patch->address, patch->opcode)); |
1394 | |
1395 | if (!VirtualProtect(baseAddress, |
1396 | CORDbg_BREAK_INSTRUCTION_SIZE, |
1397 | oldProt, &oldProt)) |
1398 | { |
1399 | _ASSERTE(!"VirtualProtect of code page failed" ); |
1400 | return false; |
1401 | } |
1402 | } |
1403 | // TODO: : determine if this is needed for AMD64 |
1404 | #if defined(_TARGET_X86_) //REVISIT_TODO what is this?! |
1405 | else |
1406 | { |
1407 | DWORD oldProt; |
1408 | |
1409 | // |
1410 | // !!! IL patch logic assumes reference insruction encoding |
1411 | // |
1412 | if (!VirtualProtect((void *) patch->address, 2, |
1413 | PAGE_EXECUTE_READWRITE, &oldProt)) |
1414 | { |
1415 | _ASSERTE(!"VirtualProtect of code page failed" ); |
1416 | return false; |
1417 | } |
1418 | patch->opcode = |
1419 | (unsigned int) *(unsigned short*)(patch->address+1); |
1420 | |
1421 | _ASSERTE(patch->opcode != CEE_BREAK); |
1422 | |
1423 | *(unsigned short *) (patch->address+1) = CEE_BREAK; |
1424 | |
1425 | if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt)) |
1426 | { |
1427 | _ASSERTE(!"VirtualProtect of code page failed" ); |
1428 | return false; |
1429 | } |
1430 | } |
1431 | #endif //_TARGET_X86_ |
1432 | |
1433 | return true; |
1434 | } |
1435 | |
1436 | // bool DebuggerController::UnapplyPatch() |
1437 | // UnapplyPatch removes the patch described by the patch. |
1438 | // (CopyOpcodeFromAddrToPatch, in reverse.) |
1439 | // Looks a lot like CopyOpcodeFromAddrToPatch, except that we use a macro to |
1440 | // copy the instruction back to the code-stream & immediately set the |
1441 | // opcode field to 0 so ReadMemory,WriteMemory will work right. |
1442 | // Note that it's very important to zero out the opcode field, as it |
1443 | // is used by the right side to determine if a patch is |
1444 | // valid or not. |
1445 | // NO LOCKING |
1446 | // DebuggerControllerPatch * patch: Patch to remove |
1447 | // Returns: true if the patch was unapplied, false otherwise |
1448 | bool DebuggerController::UnapplyPatch(DebuggerControllerPatch *patch) |
1449 | { |
1450 | _ASSERTE(patch->address != NULL); |
1451 | _ASSERTE(patch->IsActivated() ); |
1452 | |
1453 | LOG((LF_CORDB,LL_INFO1000, "DC::UP unapply patch at addr 0x%p\n" , |
1454 | patch->address)); |
1455 | |
1456 | if (patch->IsNativePatch()) |
1457 | { |
1458 | if (patch->fSaveOpcode) |
1459 | { |
1460 | // We're doing this for MoveCode, and we don't want to |
1461 | // overwrite something if we don't get moved far enough. |
1462 | patch->opcodeSaved = patch->opcode; |
1463 | InitializePRD(&(patch->opcode)); |
1464 | _ASSERTE( !patch->IsActivated() ); |
1465 | return true; |
1466 | } |
1467 | |
1468 | LPVOID baseAddress = (LPVOID)(patch->address); |
1469 | |
1470 | DWORD oldProt; |
1471 | |
1472 | if (!VirtualProtect(baseAddress, |
1473 | CORDbg_BREAK_INSTRUCTION_SIZE, |
1474 | PAGE_EXECUTE_READWRITE, &oldProt)) |
1475 | { |
1476 | // |
1477 | // We may be trying to remove a patch from memory |
1478 | // which has been unmapped. We can ignore the |
1479 | // error in this case. |
1480 | // |
1481 | InitializePRD(&(patch->opcode)); |
1482 | return false; |
1483 | } |
1484 | |
1485 | CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patch->address, patch->opcode); |
1486 | |
1487 | //VERY IMPORTANT to zero out opcode, else we might mistake |
1488 | //this patch for an active on on ReadMem/WriteMem (see |
1489 | //header file comment) |
1490 | InitializePRD(&(patch->opcode)); |
1491 | |
1492 | if (!VirtualProtect(baseAddress, |
1493 | CORDbg_BREAK_INSTRUCTION_SIZE, |
1494 | oldProt, &oldProt)) |
1495 | { |
1496 | _ASSERTE(!"VirtualProtect of code page failed" ); |
1497 | return false; |
1498 | } |
1499 | } |
1500 | else |
1501 | { |
1502 | DWORD oldProt; |
1503 | |
1504 | if (!VirtualProtect((void *) patch->address, 2, |
1505 | PAGE_EXECUTE_READWRITE, &oldProt)) |
1506 | { |
1507 | // |
1508 | // We may be trying to remove a patch from memory |
1509 | // which has been unmapped. We can ignore the |
1510 | // error in this case. |
1511 | // |
1512 | InitializePRD(&(patch->opcode)); |
1513 | return false; |
1514 | } |
1515 | |
1516 | // |
1517 | // !!! IL patch logic assumes reference encoding |
1518 | // |
1519 | // TODO: : determine if this is needed for AMD64 |
1520 | #if defined(_TARGET_X86_) |
1521 | _ASSERTE(*(unsigned short*)(patch->address+1) == CEE_BREAK); |
1522 | |
1523 | *(unsigned short *) (patch->address+1) |
1524 | = (unsigned short) patch->opcode; |
1525 | #endif //this makes no sense on anything but X86 |
1526 | //VERY IMPORTANT to zero out opcode, else we might mistake |
1527 | //this patch for an active on on ReadMem/WriteMem (see |
1528 | //header file comment |
1529 | InitializePRD(&(patch->opcode)); |
1530 | |
1531 | if (!VirtualProtect((void *) patch->address, 2, oldProt, &oldProt)) |
1532 | { |
1533 | _ASSERTE(!"VirtualProtect of code page failed" ); |
1534 | return false; |
1535 | } |
1536 | } |
1537 | |
1538 | _ASSERTE( !patch->IsActivated() ); |
1539 | _ASSERTE( patch->IsBound() ); |
1540 | return true; |
1541 | } |
1542 | |
1543 | // void DebuggerController::UnapplyPatchAt() |
1544 | // NO LOCKING |
1545 | // UnapplyPatchAt removes the patch from a copy of the patched code. |
1546 | // Like UnapplyPatch, except that we don't bother checking |
1547 | // memory permissions, but instead replace the breakpoint instruction |
1548 | // with the opcode at an arbitrary memory address. |
1549 | void DebuggerController::UnapplyPatchAt(DebuggerControllerPatch *patch, |
1550 | CORDB_ADDRESS_TYPE *address) |
1551 | { |
1552 | _ASSERTE(patch->IsBound() ); |
1553 | |
1554 | if (patch->IsNativePatch()) |
1555 | { |
1556 | CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)address, patch->opcode); |
1557 | //note that we don't have to zero out opcode field |
1558 | //since we're unapplying at something other than |
1559 | //the original spot. We assert this is true: |
1560 | _ASSERTE( patch->address != address ); |
1561 | } |
1562 | else |
1563 | { |
1564 | // |
1565 | // !!! IL patch logic assumes reference encoding |
1566 | // |
1567 | // TODO: : determine if this is needed for AMD64 |
1568 | #ifdef _TARGET_X86_ |
1569 | _ASSERTE(*(unsigned short*)(address+1) == CEE_BREAK); |
1570 | |
1571 | *(unsigned short *) (address+1) |
1572 | = (unsigned short) patch->opcode; |
1573 | _ASSERTE( patch->address != address ); |
1574 | #endif // this makes no sense on anything but X86 |
1575 | } |
1576 | } |
1577 | |
1578 | // bool DebuggerController::IsPatched() Is there a patch at addr? |
1579 | // How: if fNative && the instruction at addr is the break |
1580 | // instruction for this platform. |
1581 | bool DebuggerController::IsPatched(CORDB_ADDRESS_TYPE *address, BOOL native) |
1582 | { |
1583 | LIMITED_METHOD_CONTRACT; |
1584 | |
1585 | if (native) |
1586 | { |
1587 | return AddressIsBreakpoint(address); |
1588 | } |
1589 | else |
1590 | return false; |
1591 | } |
1592 | |
1593 | // DWORD DebuggerController::GetPatchedOpcode() Gets the opcode |
1594 | // at addr, 'looking underneath' any patches if needed. |
1595 | // GetPatchedInstruction is a function for the EE to call to "see through" |
1596 | // a patch to the opcodes which was patched. |
1597 | // How: Lock() grab opcode directly unless there's a patch, in |
1598 | // which case grab it out of the patch table. |
1599 | // BYTE * address: The address that we want to 'see through' |
1600 | // Returns: DWORD value, that is the opcode that should really be there, |
1601 | // if we hadn't placed a patch there. If we haven't placed a patch |
1602 | // there, then we'll see the actual opcode at that address. |
1603 | PRD_TYPE DebuggerController::GetPatchedOpcode(CORDB_ADDRESS_TYPE *address) |
1604 | { |
1605 | _ASSERTE(g_patches != NULL); |
1606 | |
1607 | PRD_TYPE opcode; |
1608 | ZeroMemory(&opcode, sizeof(opcode)); |
1609 | |
1610 | ControllerLockHolder lockController; |
1611 | |
1612 | // |
1613 | // Look for a patch at the address |
1614 | // |
1615 | |
1616 | DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)address); |
1617 | |
1618 | if (patch != NULL) |
1619 | { |
1620 | // Since we got the patch at this address, is must by definition be bound to that address |
1621 | _ASSERTE( patch->IsBound() ); |
1622 | _ASSERTE( patch->address == address ); |
1623 | // If we're going to be returning it's opcode, then the patch must also be activated |
1624 | _ASSERTE( patch->IsActivated() ); |
1625 | opcode = patch->opcode; |
1626 | } |
1627 | else |
1628 | { |
1629 | // |
1630 | // Patch was not found - it either is not our patch, or it has |
1631 | // just been removed. In either case, just return the current |
1632 | // opcode. |
1633 | // |
1634 | |
1635 | if (g_pEEInterface->IsManagedNativeCode((const BYTE *)address)) |
1636 | { |
1637 | opcode = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)address); |
1638 | } |
1639 | // <REVISIT_TODO> |
1640 | // TODO: : determine if this is needed for AMD64 |
1641 | // </REVISIT_TODO> |
1642 | #ifdef _TARGET_X86_ //what is this?! |
1643 | else |
1644 | { |
1645 | // |
1646 | // !!! IL patch logic assumes reference encoding |
1647 | // |
1648 | |
1649 | opcode = *(unsigned short*)(address+1); |
1650 | } |
1651 | #endif //_TARGET_X86_ |
1652 | |
1653 | } |
1654 | |
1655 | return opcode; |
1656 | } |
1657 | |
1658 | // Holding the controller lock, this will check if an address is patched, |
1659 | // and if so will then set the PRT_TYPE out parameter to the unpatched value. |
1660 | BOOL DebuggerController::CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, |
1661 | /*OUT*/ PRD_TYPE *pOpcode) |
1662 | { |
1663 | CONTRACTL |
1664 | { |
1665 | SO_NOT_MAINLINE; // take Controller lock. |
1666 | NOTHROW; |
1667 | GC_NOTRIGGER; |
1668 | } |
1669 | CONTRACTL_END; |
1670 | |
1671 | _ASSERTE(g_patches != NULL); |
1672 | |
1673 | BOOL res; |
1674 | |
1675 | ControllerLockHolder lockController; |
1676 | |
1677 | // |
1678 | // Look for a patch at the address |
1679 | // |
1680 | |
1681 | if (IsAddressPatched(address)) |
1682 | { |
1683 | *pOpcode = GetPatchedOpcode(address); |
1684 | res = TRUE; |
1685 | } |
1686 | else |
1687 | { |
1688 | InitializePRD(pOpcode); |
1689 | res = FALSE; |
1690 | } |
1691 | |
1692 | |
1693 | return res; |
1694 | } |
1695 | |
1696 | // void DebuggerController::ActivatePatch() Place a breakpoint |
1697 | // so that threads will trip over this patch. |
1698 | // If there any patches at the address already, then copy |
1699 | // their opcode into this one & return. Otherwise, |
1700 | // call ApplyPatch(patch). There is an implicit list of patches at this |
1701 | // address by virtue of the fact that we can iterate through all the |
1702 | // patches in the patch with the same address. |
1703 | // DebuggerControllerPatch *patch: The patch to activate |
1704 | /* static */ void DebuggerController::ActivatePatch(DebuggerControllerPatch *patch) |
1705 | { |
1706 | _ASSERTE(g_patches != NULL); |
1707 | _ASSERTE(patch != NULL); |
1708 | _ASSERTE(patch->IsBound() ); |
1709 | _ASSERTE(!patch->IsActivated() ); |
1710 | |
1711 | bool fApply = true; |
1712 | |
1713 | // |
1714 | // See if we already have an active patch at this address. |
1715 | // |
1716 | for (DebuggerControllerPatch *p = g_patches->GetPatch(patch->address); |
1717 | p != NULL; |
1718 | p = g_patches->GetNextPatch(p)) |
1719 | { |
1720 | if (p != patch) |
1721 | { |
1722 | // If we're going to skip activating 'patch' because 'p' already exists at the same address |
1723 | // then 'p' must be activated. We expect that all bound patches are activated. |
1724 | _ASSERTE( p->IsActivated() ); |
1725 | patch->opcode = p->opcode; |
1726 | fApply = false; |
1727 | break; |
1728 | } |
1729 | } |
1730 | |
1731 | // |
1732 | // This is the only patch at this address - apply the patch |
1733 | // to the code. |
1734 | // |
1735 | if (fApply) |
1736 | { |
1737 | ApplyPatch(patch); |
1738 | } |
1739 | |
1740 | _ASSERTE(patch->IsActivated() ); |
1741 | } |
1742 | |
1743 | // void DebuggerController::DeactivatePatch() Make sure that a |
1744 | // patch won't be hit. |
1745 | // How: If this patch is the last one at this address, then |
1746 | // UnapplyPatch. The caller should then invoke RemovePatch to remove the |
1747 | // patch from the patch table. |
1748 | // DebuggerControllerPatch *patch: Patch to deactivate |
1749 | void DebuggerController::DeactivatePatch(DebuggerControllerPatch *patch) |
1750 | { |
1751 | _ASSERTE(g_patches != NULL); |
1752 | |
1753 | if( !patch->IsBound() ) { |
1754 | // patch is not bound, nothing to do |
1755 | return; |
1756 | } |
1757 | |
1758 | // We expect that all bound patches are also activated. |
1759 | // One exception to this is if the shutdown thread killed another thread right after |
1760 | // if deactivated a patch but before it got to remove it. |
1761 | _ASSERTE(patch->IsActivated() ); |
1762 | |
1763 | bool fUnapply = true; |
1764 | |
1765 | // |
1766 | // See if we already have an active patch at this address. |
1767 | // |
1768 | for (DebuggerControllerPatch *p = g_patches->GetPatch(patch->address); |
1769 | p != NULL; |
1770 | p = g_patches->GetNextPatch(p)) |
1771 | { |
1772 | if (p != patch) |
1773 | { |
1774 | // There is another patch at this address, so don't remove it |
1775 | // However, clear the patch data so that we no longer consider this particular patch activated |
1776 | fUnapply = false; |
1777 | InitializePRD(&(patch->opcode)); |
1778 | break; |
1779 | } |
1780 | } |
1781 | |
1782 | if (fUnapply) |
1783 | { |
1784 | UnapplyPatch(patch); |
1785 | } |
1786 | |
1787 | _ASSERTE(!patch->IsActivated() ); |
1788 | |
1789 | // |
1790 | // Patch must now be removed from the table. |
1791 | // |
1792 | } |
1793 | |
1794 | // AddILMasterPatch: record a patch on IL code but do not bind it or activate it. The master b.p. |
1795 | // is associated with a module/token pair. It is used later |
1796 | // (e.g. in MapAndBindFunctionPatches) to create one or more "slave" |
1797 | // breakpoints which are associated with particular MethodDescs/JitInfos. |
1798 | // |
1799 | // Rationale: For generic code a single IL patch (e.g a breakpoint) |
1800 | // may give rise to several patches, one for each JITting of |
1801 | // the IL (i.e. generic code may be JITted multiple times for |
1802 | // different instantiations). |
1803 | // |
1804 | // So we keep one patch which describes |
1805 | // the breakpoint but which is never actually bound or activated. |
1806 | // This is then used to apply new "slave" patches to all copies of |
1807 | // JITted code associated with the method. |
1808 | // |
1809 | // <REVISIT_TODO>In theory we could bind and apply the master patch when the |
1810 | // code is known not to be generic (as used to happen to all breakpoint |
1811 | // patches in V1). However this seems like a premature |
1812 | // optimization.</REVISIT_TODO> |
1813 | DebuggerControllerPatch *DebuggerController::AddILMasterPatch(Module *module, |
1814 | mdMethodDef md, |
1815 | MethodDesc *pMethodDescFilter, |
1816 | SIZE_T offset, |
1817 | BOOL offsetIsIL, |
1818 | SIZE_T encVersion) |
1819 | { |
1820 | CONTRACTL |
1821 | { |
1822 | THROWS; |
1823 | MODE_ANY; |
1824 | GC_NOTRIGGER; |
1825 | } |
1826 | CONTRACTL_END; |
1827 | |
1828 | _ASSERTE(g_patches != NULL); |
1829 | |
1830 | ControllerLockHolder ch; |
1831 | |
1832 | |
1833 | DebuggerControllerPatch *patch = g_patches->AddPatchForMethodDef(this, |
1834 | module, |
1835 | md, |
1836 | pMethodDescFilter, |
1837 | offset, |
1838 | offsetIsIL, |
1839 | PATCH_KIND_IL_MASTER, |
1840 | LEAF_MOST_FRAME, |
1841 | NULL, |
1842 | encVersion, |
1843 | NULL); |
1844 | |
1845 | LOG((LF_CORDB, LL_INFO10000, |
1846 | "DC::AP: Added IL master patch 0x%p for mdTok 0x%x, desc 0x%p at %s offset %d encVersion %d\n" , |
1847 | patch, md, pMethodDescFilter, offsetIsIL ? "il" : "native" , offset, encVersion)); |
1848 | |
1849 | return patch; |
1850 | } |
1851 | |
1852 | // See notes above on AddILMasterPatch |
1853 | BOOL DebuggerController::AddBindAndActivateILSlavePatch(DebuggerControllerPatch *master, |
1854 | DebuggerJitInfo *dji) |
1855 | { |
1856 | _ASSERTE(g_patches != NULL); |
1857 | _ASSERTE(master->IsILMasterPatch()); |
1858 | _ASSERTE(dji != NULL); |
1859 | |
1860 | BOOL result = FALSE; |
1861 | |
1862 | if (!master->offsetIsIL) |
1863 | { |
1864 | // Zero is the only native offset that we allow to bind across different jitted |
1865 | // code bodies. |
1866 | _ASSERTE(master->offset == 0); |
1867 | INDEBUG(BOOL fOk = ) |
1868 | AddBindAndActivatePatchForMethodDesc(dji->m_fd, dji, |
1869 | 0, PATCH_KIND_IL_SLAVE, |
1870 | LEAF_MOST_FRAME, m_pAppDomain); |
1871 | _ASSERTE(fOk); |
1872 | result = TRUE; |
1873 | } |
1874 | else // bind by IL offset |
1875 | { |
1876 | // Do not dereference the "master" pointer in the loop! The loop may add more patches, |
1877 | // causing the patch table to grow and move. |
1878 | SIZE_T masterILOffset = master->offset; |
1879 | |
1880 | // Loop through all the native offsets mapped to the given IL offset. On x86 the mapping |
1881 | // should be 1:1. On WIN64, because there are funclets, we have have an 1:N mapping. |
1882 | DebuggerJitInfo::ILToNativeOffsetIterator it; |
1883 | for (dji->InitILToNativeOffsetIterator(it, masterILOffset); !it.IsAtEnd(); it.Next()) |
1884 | { |
1885 | BOOL fExact; |
1886 | SIZE_T offsetNative = it.Current(&fExact); |
1887 | |
1888 | // We special case offset 0, which is when a breakpoint is set |
1889 | // at the beginning of a method that hasn't been jitted yet. In |
1890 | // that case it's possible that offset 0 has been optimized out, |
1891 | // but we still want to set the closest breakpoint to that. |
1892 | if (!fExact && (masterILOffset != 0)) |
1893 | { |
1894 | LOG((LF_CORDB, LL_INFO10000, "DC::BP:Failed to bind patch at IL offset 0x%p in %s::%s\n" , |
1895 | masterILOffset, dji->m_fd->m_pszDebugClassName, dji->m_fd->m_pszDebugMethodName)); |
1896 | |
1897 | continue; |
1898 | } |
1899 | else |
1900 | { |
1901 | result = TRUE; |
1902 | } |
1903 | |
1904 | INDEBUG(BOOL fOk = ) |
1905 | AddBindAndActivatePatchForMethodDesc(dji->m_fd, dji, |
1906 | offsetNative, PATCH_KIND_IL_SLAVE, |
1907 | LEAF_MOST_FRAME, m_pAppDomain); |
1908 | _ASSERTE(fOk); |
1909 | } |
1910 | } |
1911 | |
1912 | // As long as we have successfully bound at least one patch, we consider the operation successful. |
1913 | return result; |
1914 | } |
1915 | |
1916 | |
1917 | |
1918 | // This routine places a patch that is conceptually a patch on the IL code. |
1919 | // The IL code may be jitted multiple times, e.g. due to generics. |
1920 | // This routine ensures that both present and subsequent JITtings of code will |
1921 | // also be patched. |
1922 | // |
1923 | // This routine will return FALSE only if we will _never_ be able to |
1924 | // place the patch in any native code corresponding to the given offset. |
1925 | // Otherwise it will: |
1926 | // (a) record a "master" patch |
1927 | // (b) apply as many slave patches as it can to existing copies of code |
1928 | // that have debugging information |
1929 | BOOL DebuggerController::AddILPatch(AppDomain * pAppDomain, Module *module, |
1930 | mdMethodDef md, |
1931 | MethodDesc *pMethodDescFilter, |
1932 | SIZE_T encVersion, // what encVersion does this apply to? |
1933 | SIZE_T offset, |
1934 | BOOL offsetIsIL) |
1935 | { |
1936 | _ASSERTE(g_patches != NULL); |
1937 | _ASSERTE(md != NULL); |
1938 | _ASSERTE(module != NULL); |
1939 | |
1940 | BOOL fOk = FALSE; |
1941 | |
1942 | DebuggerMethodInfo *dmi = g_pDebugger->GetOrCreateMethodInfo(module, md); // throws |
1943 | if (dmi == NULL) |
1944 | { |
1945 | return false; |
1946 | } |
1947 | |
1948 | EX_TRY |
1949 | { |
1950 | // OK, we either have (a) no code at all or (b) we have both JIT information and code |
1951 | //. |
1952 | // Either way, lay down the MasterPatch. |
1953 | // |
1954 | // MapAndBindFunctionPatches will take care of any instantiations that haven't |
1955 | // finished JITting, by making a copy of the master breakpoint. |
1956 | DebuggerControllerPatch *master = AddILMasterPatch(module, md, pMethodDescFilter, offset, offsetIsIL, encVersion); |
1957 | |
1958 | // We have to keep the index here instead of the pointer. The loop below adds more patches, |
1959 | // which may cause the patch table to grow and move. |
1960 | ULONG masterIndex = g_patches->GetItemIndex((HASHENTRY*)master); |
1961 | |
1962 | // Iterate through every existing NativeCodeBlob (with the same EnC version). |
1963 | // This includes generics + prejitted code. |
1964 | DebuggerMethodInfo::DJIIterator it; |
1965 | dmi->IterateAllDJIs(pAppDomain, NULL /* module filter */, pMethodDescFilter, &it); |
1966 | |
1967 | if (it.IsAtEnd()) |
1968 | { |
1969 | // It is okay if we don't have any DJIs yet. It just means that the method hasn't been jitted. |
1970 | fOk = TRUE; |
1971 | } |
1972 | else |
1973 | { |
1974 | // On the other hand, if the method has been jitted, then we expect to be able to bind at least |
1975 | // one breakpoint. The exception is when we have multiple EnC versions of the method, in which |
1976 | // case it is ok if we don't bind any breakpoint. One scenario is when a method has been updated |
1977 | // via EnC but it's not yet jitted. We need to allow a debugger to put a breakpoint on the new |
1978 | // version of the method, but the new version won't have a DJI yet. |
1979 | BOOL fVersionMatch = FALSE; |
1980 | while(!it.IsAtEnd()) |
1981 | { |
1982 | DebuggerJitInfo *dji = it.Current(); |
1983 | _ASSERTE(dji->m_jitComplete); |
1984 | if (dji->m_encVersion == encVersion && |
1985 | (pMethodDescFilter == NULL || pMethodDescFilter == dji->m_fd)) |
1986 | { |
1987 | fVersionMatch = TRUE; |
1988 | |
1989 | master = (DebuggerControllerPatch *)g_patches->GetEntryPtr(masterIndex); |
1990 | |
1991 | // <REVISIT_TODO> If we're missing JIT info for any then |
1992 | // we won't have applied the bp to every instantiation. That should probably be reported |
1993 | // as a new kind of condition to the debugger, i.e. report "bp only partially applied". It would be |
1994 | // a shame to completely fail just because on instantiation is missing debug info: e.g. just because |
1995 | // one component hasn't been prejitted with debugging information.</REVISIT_TODO> |
1996 | fOk = (AddBindAndActivateILSlavePatch(master, dji) || fOk); |
1997 | } |
1998 | it.Next(); |
1999 | } |
2000 | |
2001 | // This is the exceptional case referred to in the comment above. If we fail to put a breakpoint |
2002 | // because we don't have a matching version of the method, we need to return TRUE. |
2003 | if (fVersionMatch == FALSE) |
2004 | { |
2005 | fOk = TRUE; |
2006 | } |
2007 | } |
2008 | } |
2009 | EX_CATCH |
2010 | { |
2011 | fOk = FALSE; |
2012 | } |
2013 | EX_END_CATCH(SwallowAllExceptions) |
2014 | return fOk; |
2015 | } |
2016 | |
2017 | // Add a patch at native-offset 0 in the latest version of the method. |
2018 | // This is used by step-in. |
2019 | // Calls to new methods always go to the latest version, so EnC is not an issue here. |
2020 | // The method may be not yet jitted. Or it may be prejitted. |
2021 | void DebuggerController::AddPatchToStartOfLatestMethod(MethodDesc * fd) |
2022 | { |
2023 | CONTRACTL |
2024 | { |
2025 | SO_NOT_MAINLINE; |
2026 | THROWS; // from GetJitInfo |
2027 | GC_NOTRIGGER; |
2028 | MODE_ANY; // don't really care what mode we're in. |
2029 | |
2030 | PRECONDITION(ThisMaybeHelperThread()); |
2031 | PRECONDITION(CheckPointer(fd)); |
2032 | } |
2033 | CONTRACTL_END; |
2034 | |
2035 | _ASSERTE(g_patches != NULL); |
2036 | Module* pModule = fd->GetModule(); |
2037 | mdToken defToken = fd->GetMemberDef(); |
2038 | DebuggerMethodInfo* pDMI = g_pDebugger->GetOrCreateMethodInfo(pModule, defToken); |
2039 | DebuggerController::AddILPatch(GetAppDomain(), pModule, defToken, fd, pDMI->GetCurrentEnCVersion(), 0, FALSE); |
2040 | return; |
2041 | } |
2042 | |
2043 | |
2044 | // Place patch in method at native offset. |
2045 | BOOL DebuggerController::AddBindAndActivateNativeManagedPatch(MethodDesc * fd, |
2046 | DebuggerJitInfo *dji, |
2047 | SIZE_T offsetNative, |
2048 | FramePointer fp, |
2049 | AppDomain *pAppDomain) |
2050 | { |
2051 | CONTRACTL |
2052 | { |
2053 | SO_NOT_MAINLINE; |
2054 | THROWS; // from GetJitInfo |
2055 | GC_NOTRIGGER; |
2056 | MODE_ANY; // don't really care what mode we're in. |
2057 | |
2058 | PRECONDITION(ThisMaybeHelperThread()); |
2059 | PRECONDITION(CheckPointer(fd)); |
2060 | PRECONDITION(fd->IsDynamicMethod() || (dji != NULL)); |
2061 | } |
2062 | CONTRACTL_END; |
2063 | |
2064 | // For non-dynamic methods, we always expect to have a DJI, but just in case, we don't want the assert to AV. |
2065 | _ASSERTE((dji == NULL) || (fd == dji->m_fd)); |
2066 | _ASSERTE(g_patches != NULL); |
2067 | return DebuggerController::AddBindAndActivatePatchForMethodDesc(fd, dji, offsetNative, PATCH_KIND_NATIVE_MANAGED, fp, pAppDomain); |
2068 | } |
2069 | |
2070 | // Adds a breakpoint at a specific native offset in a particular jitted code version |
2071 | BOOL DebuggerController::AddBindAndActivatePatchForMethodDesc(MethodDesc *fd, |
2072 | DebuggerJitInfo *dji, |
2073 | SIZE_T nativeOffset, |
2074 | DebuggerPatchKind kind, |
2075 | FramePointer fp, |
2076 | AppDomain *pAppDomain) |
2077 | { |
2078 | CONTRACTL |
2079 | { |
2080 | SO_NOT_MAINLINE; |
2081 | THROWS; |
2082 | GC_NOTRIGGER; |
2083 | MODE_ANY; // don't really care what mode we're in. |
2084 | |
2085 | PRECONDITION(ThisMaybeHelperThread()); |
2086 | PRECONDITION(kind != PATCH_KIND_IL_MASTER); |
2087 | } |
2088 | CONTRACTL_END; |
2089 | |
2090 | BOOL ok = FALSE; |
2091 | ControllerLockHolder ch; |
2092 | |
2093 | LOG((LF_CORDB|LF_ENC,LL_INFO10000,"DC::AP: Add to %s::%s, at offs 0x%x " |
2094 | "fp:0x%x AD:0x%x\n" , fd->m_pszDebugClassName, |
2095 | fd->m_pszDebugMethodName, |
2096 | nativeOffset, fp.GetSPValue(), pAppDomain)); |
2097 | |
2098 | DebuggerControllerPatch *patch = g_patches->AddPatchForMethodDef( |
2099 | this, |
2100 | g_pEEInterface->MethodDescGetModule(fd), |
2101 | fd->GetMemberDef(), |
2102 | NULL, |
2103 | nativeOffset, |
2104 | FALSE, |
2105 | kind, |
2106 | fp, |
2107 | pAppDomain, |
2108 | NULL, |
2109 | dji); |
2110 | |
2111 | if (DebuggerController::BindPatch(patch, fd, NULL)) |
2112 | { |
2113 | LOG((LF_CORDB|LF_ENC,LL_INFO1000,"BindPatch went fine, doing ActivatePatch\n" )); |
2114 | DebuggerController::ActivatePatch(patch); |
2115 | ok = TRUE; |
2116 | } |
2117 | |
2118 | return ok; |
2119 | } |
2120 | |
2121 | |
2122 | // This version is particularly useful b/c it doesn't assume that the |
2123 | // patch is inside a managed method. |
2124 | DebuggerControllerPatch *DebuggerController::AddAndActivateNativePatchForAddress(CORDB_ADDRESS_TYPE *address, |
2125 | FramePointer fp, |
2126 | bool managed, |
2127 | TraceType traceType) |
2128 | { |
2129 | CONTRACTL |
2130 | { |
2131 | THROWS; |
2132 | MODE_ANY; |
2133 | GC_NOTRIGGER; |
2134 | |
2135 | PRECONDITION(g_patches != NULL); |
2136 | } |
2137 | CONTRACTL_END; |
2138 | |
2139 | |
2140 | ControllerLockHolder ch; |
2141 | |
2142 | DebuggerControllerPatch *patch |
2143 | = g_patches->AddPatchForAddress(this, |
2144 | NULL, |
2145 | 0, |
2146 | (managed? PATCH_KIND_NATIVE_MANAGED : PATCH_KIND_NATIVE_UNMANAGED), |
2147 | address, |
2148 | fp, |
2149 | NULL, |
2150 | NULL, |
2151 | DebuggerPatchTable::DCP_PID_INVALID, |
2152 | traceType); |
2153 | |
2154 | ActivatePatch(patch); |
2155 | |
2156 | return patch; |
2157 | } |
2158 | |
2159 | void DebuggerController::RemovePatchesFromModule(Module *pModule, AppDomain *pAppDomain ) |
2160 | { |
2161 | CONTRACTL |
2162 | { |
2163 | SO_NOT_MAINLINE; |
2164 | NOTHROW; |
2165 | GC_NOTRIGGER; |
2166 | } |
2167 | CONTRACTL_END; |
2168 | |
2169 | LOG((LF_CORDB, LL_INFO100000, "DPT::CPFM mod:0x%p (%S)\n" , |
2170 | pModule, pModule->GetDebugName())); |
2171 | |
2172 | // First find all patches of interest |
2173 | DebuggerController::ControllerLockHolder ch; |
2174 | HASHFIND f; |
2175 | for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f); |
2176 | patch != NULL; |
2177 | patch = g_patches->GetNextPatch(&f)) |
2178 | { |
2179 | // Skip patches not in the specified domain |
2180 | if ((pAppDomain != NULL) && (patch->pAppDomain != pAppDomain)) |
2181 | continue; |
2182 | |
2183 | BOOL fRemovePatch = FALSE; |
2184 | |
2185 | // Remove both native and IL patches the belong to this module |
2186 | if (patch->HasDJI()) |
2187 | { |
2188 | DebuggerJitInfo * dji = patch->GetDJI(); |
2189 | |
2190 | _ASSERTE(patch->key.module == dji->m_fd->GetModule()); |
2191 | |
2192 | // It is not necessary to check for m_fd->GetModule() here. It will |
2193 | // be covered by other module unload notifications issued for the appdomain. |
2194 | if ( dji->m_pLoaderModule == pModule ) |
2195 | fRemovePatch = TRUE; |
2196 | } |
2197 | else |
2198 | if (patch->key.module == pModule) |
2199 | { |
2200 | fRemovePatch = TRUE; |
2201 | } |
2202 | |
2203 | if (fRemovePatch) |
2204 | { |
2205 | LOG((LF_CORDB, LL_EVERYTHING, "Removing patch 0x%p\n" , |
2206 | patch)); |
2207 | // we shouldn't be both hitting this patch AND |
2208 | // unloading the module it belongs to. |
2209 | _ASSERTE(!patch->IsTriggering()); |
2210 | Release( patch ); |
2211 | } |
2212 | } |
2213 | } |
2214 | |
2215 | #ifdef _DEBUG |
2216 | bool DebuggerController::ModuleHasPatches( Module* pModule ) |
2217 | { |
2218 | CONTRACTL |
2219 | { |
2220 | SO_NOT_MAINLINE; |
2221 | NOTHROW; |
2222 | GC_NOTRIGGER; |
2223 | } |
2224 | CONTRACTL_END; |
2225 | |
2226 | if( g_patches == NULL ) |
2227 | { |
2228 | // Patch table hasn't been initialized |
2229 | return false; |
2230 | } |
2231 | |
2232 | // First find all patches of interest |
2233 | HASHFIND f; |
2234 | for (DebuggerControllerPatch *patch = g_patches->GetFirstPatch(&f); |
2235 | patch != NULL; |
2236 | patch = g_patches->GetNextPatch(&f)) |
2237 | { |
2238 | // |
2239 | // This mirrors logic in code:DebuggerController::RemovePatchesFromModule |
2240 | // |
2241 | |
2242 | if (patch->HasDJI()) |
2243 | { |
2244 | DebuggerJitInfo * dji = patch->GetDJI(); |
2245 | |
2246 | _ASSERTE(patch->key.module == dji->m_fd->GetModule()); |
2247 | |
2248 | // It may be sufficient to just check m_pLoaderModule here. Since this is used for debug-only |
2249 | // check, we will check for m_fd->GetModule() as well to catch more potential problems. |
2250 | if ( (dji->m_pLoaderModule == pModule) || (dji->m_fd->GetModule() == pModule) ) |
2251 | { |
2252 | return true; |
2253 | } |
2254 | } |
2255 | |
2256 | if (patch->key.module == pModule) |
2257 | { |
2258 | return true; |
2259 | } |
2260 | } |
2261 | |
2262 | return false; |
2263 | } |
2264 | #endif // _DEBUG |
2265 | |
2266 | // |
2267 | // Returns true if the given address is in an internal helper |
2268 | // function, false if its not. |
2269 | // |
2270 | // This is a temporary workaround function to avoid having us stop in |
2271 | // unmanaged code belonging to the Runtime during a StepIn operation. |
2272 | // |
2273 | static bool _AddrIsJITHelper(PCODE addr) |
2274 | { |
2275 | #if !defined(_WIN64) && !defined(FEATURE_PAL) |
2276 | // Is the address in the runtime dll (clr.dll or coreclr.dll) at all? (All helpers are in |
2277 | // that dll) |
2278 | if (g_runtimeLoadedBaseAddress <= addr && |
2279 | addr < g_runtimeLoadedBaseAddress + g_runtimeVirtualSize) |
2280 | { |
2281 | for (int i = 0; i < CORINFO_HELP_COUNT; i++) |
2282 | { |
2283 | if (hlpFuncTable[i].pfnHelper == (void*)addr) |
2284 | { |
2285 | LOG((LF_CORDB, LL_INFO10000, |
2286 | "_ANIM: address of helper function found: 0x%08x\n" , |
2287 | addr)); |
2288 | return true; |
2289 | } |
2290 | } |
2291 | |
2292 | for (unsigned d = 0; d < DYNAMIC_CORINFO_HELP_COUNT; d++) |
2293 | { |
2294 | if (hlpDynamicFuncTable[d].pfnHelper == (void*)addr) |
2295 | { |
2296 | LOG((LF_CORDB, LL_INFO10000, |
2297 | "_ANIM: address of helper function found: 0x%08x\n" , |
2298 | addr)); |
2299 | return true; |
2300 | } |
2301 | } |
2302 | |
2303 | LOG((LF_CORDB, LL_INFO10000, |
2304 | "_ANIM: address within runtime dll, but not a helper function " |
2305 | "0x%08x\n" , addr)); |
2306 | } |
2307 | #else // !defined(_WIN64) && !defined(FEATURE_PAL) |
2308 | // TODO: Figure out what we want to do here |
2309 | #endif // !defined(_WIN64) && !defined(FEATURE_PAL) |
2310 | |
2311 | return false; |
2312 | } |
2313 | |
2314 | // bool DebuggerController::PatchTrace() What: Invoke |
2315 | // AddPatch depending on the type of the given TraceDestination. |
2316 | // How: Invokes AddPatch based on the trace type: TRACE_OTHER will |
2317 | // return false, the others will obtain args for a call to an AddPatch |
2318 | // method & return true. |
2319 | // |
2320 | // Return true if we set a patch, else false |
2321 | bool DebuggerController::PatchTrace(TraceDestination *trace, |
2322 | FramePointer fp, |
2323 | bool fStopInUnmanaged) |
2324 | { |
2325 | CONTRACTL |
2326 | { |
2327 | THROWS; // Because AddPatch may throw on oom. We may want to convert this to nothrow and return false. |
2328 | MODE_ANY; |
2329 | DISABLED(GC_TRIGGERS); // @todo - what should this be? |
2330 | |
2331 | PRECONDITION(ThisMaybeHelperThread()); |
2332 | } |
2333 | CONTRACTL_END; |
2334 | DebuggerControllerPatch *dcp = NULL; |
2335 | SIZE_T nativeOffset = 0; |
2336 | |
2337 | switch (trace->GetTraceType()) |
2338 | { |
2339 | case TRACE_ENTRY_STUB: // fall through |
2340 | case TRACE_UNMANAGED: |
2341 | LOG((LF_CORDB, LL_INFO10000, |
2342 | "DC::PT: Setting unmanaged trace patch at 0x%p(%p)\n" , |
2343 | trace->GetAddress(), fp.GetSPValue())); |
2344 | |
2345 | if (fStopInUnmanaged && !_AddrIsJITHelper(trace->GetAddress())) |
2346 | { |
2347 | AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(), |
2348 | fp, |
2349 | FALSE, |
2350 | trace->GetTraceType()); |
2351 | return true; |
2352 | } |
2353 | else |
2354 | { |
2355 | LOG((LF_CORDB, LL_INFO10000, "DC::PT: decided to NOT " |
2356 | "place a patch in unmanaged code\n" )); |
2357 | return false; |
2358 | } |
2359 | |
2360 | case TRACE_MANAGED: |
2361 | LOG((LF_CORDB, LL_INFO10000, |
2362 | "Setting managed trace patch at 0x%p(%p)\n" , trace->GetAddress(), fp.GetSPValue())); |
2363 | |
2364 | MethodDesc *fd; |
2365 | fd = g_pEEInterface->GetNativeCodeMethodDesc(trace->GetAddress()); |
2366 | _ASSERTE(fd); |
2367 | |
2368 | DebuggerJitInfo *dji; |
2369 | dji = g_pDebugger->GetJitInfoFromAddr(trace->GetAddress()); |
2370 | //_ASSERTE(dji); //we'd like to assert this, but attach won't work |
2371 | |
2372 | nativeOffset = CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset((const BYTE *)trace->GetAddress()); |
2373 | |
2374 | // Code versioning allows calls to be redirected to alternate code potentially after this trace is complete but before |
2375 | // execution reaches the call target. Rather than bind the breakpoint to a specific jitted code instance that is currently |
2376 | // configured to receive execution we need to prepare for that potential retargetting by binding all jitted code instances. |
2377 | // |
2378 | // Triggering this based of the native offset is a little subtle, but all of the stubmanagers follow a rule that if they |
2379 | // trace across a call boundary into jitted code they either stop at offset zero of the new method, or they continue tracing |
2380 | // out of that jitted code. |
2381 | if (nativeOffset == 0) |
2382 | { |
2383 | AddPatchToStartOfLatestMethod(fd); |
2384 | } |
2385 | else |
2386 | { |
2387 | AddBindAndActivateNativeManagedPatch(fd, dji, nativeOffset, fp, NULL); |
2388 | } |
2389 | |
2390 | |
2391 | return true; |
2392 | |
2393 | case TRACE_UNJITTED_METHOD: |
2394 | // trace->address is actually a MethodDesc* of the method that we'll |
2395 | // soon JIT, so put a relative bp at offset zero in. |
2396 | LOG((LF_CORDB, LL_INFO10000, |
2397 | "Setting unjitted method patch in MethodDesc 0x%p %s\n" , trace->GetMethodDesc(), trace->GetMethodDesc() ? trace->GetMethodDesc()->m_pszDebugMethodName : "" )); |
2398 | |
2399 | // Note: we have to make sure to bind here. If this function is prejitted, this may be our only chance to get a |
2400 | // DebuggerJITInfo and thereby cause a JITComplete callback. |
2401 | AddPatchToStartOfLatestMethod(trace->GetMethodDesc()); |
2402 | return true; |
2403 | |
2404 | case TRACE_FRAME_PUSH: |
2405 | LOG((LF_CORDB, LL_INFO10000, |
2406 | "Setting frame patch at 0x%p(%p)\n" , trace->GetAddress(), fp.GetSPValue())); |
2407 | |
2408 | AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(), |
2409 | fp, |
2410 | TRUE, |
2411 | TRACE_FRAME_PUSH); |
2412 | return true; |
2413 | |
2414 | case TRACE_MGR_PUSH: |
2415 | LOG((LF_CORDB, LL_INFO10000, |
2416 | "Setting frame patch (TRACE_MGR_PUSH) at 0x%p(%p)\n" , |
2417 | trace->GetAddress(), fp.GetSPValue())); |
2418 | |
2419 | dcp = AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)trace->GetAddress(), |
2420 | LEAF_MOST_FRAME, // But Mgr_push can't have fp affinity! |
2421 | TRUE, |
2422 | DPT_DEFAULT_TRACE_TYPE); // TRACE_OTHER |
2423 | // Now copy over the trace field since TriggerPatch will expect this |
2424 | // to be set for this case. |
2425 | if (dcp != NULL) |
2426 | { |
2427 | dcp->trace = *trace; |
2428 | } |
2429 | |
2430 | return true; |
2431 | |
2432 | case TRACE_OTHER: |
2433 | LOG((LF_CORDB, LL_INFO10000, |
2434 | "Can't set a trace patch for TRACE_OTHER...\n" )); |
2435 | return false; |
2436 | |
2437 | default: |
2438 | _ASSERTE(0); |
2439 | return false; |
2440 | } |
2441 | } |
2442 | |
2443 | //----------------------------------------------------------------------------- |
2444 | // Checks if the patch matches the context + thread. |
2445 | // Multiple patches can exist at a single address, so given a patch at the |
2446 | // Context's current address, this does additional patch-affinity checks like |
2447 | // thread, AppDomain, and frame-pointer. |
2448 | // thread - thread executing the given context that hit the patch |
2449 | // context - context of the thread that hit the patch |
2450 | // patch - candidate patch that we're looking for a match. |
2451 | // Returns: |
2452 | // True if the patch matches. |
2453 | // False |
2454 | //----------------------------------------------------------------------------- |
2455 | bool DebuggerController::MatchPatch(Thread *thread, |
2456 | CONTEXT *context, |
2457 | DebuggerControllerPatch *patch) |
2458 | { |
2459 | LOG((LF_CORDB, LL_INFO100000, "DC::MP: EIP:0x%p\n" , GetIP(context))); |
2460 | |
2461 | // Caller should have already matched our addresses. |
2462 | if (patch->address != dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(context))) |
2463 | { |
2464 | return false; |
2465 | } |
2466 | |
2467 | // <BUGNUM>RAID 67173 -</BUGNUM> we'll make sure that intermediate patches have NULL |
2468 | // pAppDomain so that we don't end up running to completion when |
2469 | // the appdomain switches halfway through a step. |
2470 | if (patch->pAppDomain != NULL) |
2471 | { |
2472 | AppDomain *pAppDomainCur = thread->GetDomain(); |
2473 | |
2474 | if (pAppDomainCur != patch->pAppDomain) |
2475 | { |
2476 | LOG((LF_CORDB, LL_INFO10000, "DC::MP: patches didn't match b/c of " |
2477 | "appdomains!\n" )); |
2478 | return false; |
2479 | } |
2480 | } |
2481 | |
2482 | if (patch->controller->m_thread != NULL && patch->controller->m_thread != thread) |
2483 | { |
2484 | LOG((LF_CORDB, LL_INFO10000, "DC::MP: patches didn't match b/c threads\n" )); |
2485 | return false; |
2486 | } |
2487 | |
2488 | if (patch->fp != LEAF_MOST_FRAME) |
2489 | { |
2490 | // If we specified a Frame-pointer, than it should have been safe to take a stack trace. |
2491 | |
2492 | ControllerStackInfo info; |
2493 | StackTraceTicket ticket(patch); |
2494 | info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context); |
2495 | |
2496 | // !!! This check should really be != , but there is some ambiguity about which frame is the parent frame |
2497 | // in the destination returned from Frame::TraceFrame, so this allows some slop there. |
2498 | |
2499 | if (info.HasReturnFrame() && IsCloserToLeaf(info.m_returnFrame.fp, patch->fp)) |
2500 | { |
2501 | LOG((LF_CORDB, LL_INFO10000, "Patch hit but frame not matched at %p (current=%p, patch=%p)\n" , |
2502 | patch->address, info.m_returnFrame.fp.GetSPValue(), patch->fp.GetSPValue())); |
2503 | |
2504 | return false; |
2505 | } |
2506 | } |
2507 | |
2508 | LOG((LF_CORDB, LL_INFO100000, "DC::MP: Returning true" )); |
2509 | |
2510 | return true; |
2511 | } |
2512 | |
2513 | DebuggerPatchSkip *DebuggerController::ActivatePatchSkip(Thread *thread, |
2514 | const BYTE *PC, |
2515 | BOOL fForEnC) |
2516 | { |
2517 | #ifdef _DEBUG |
2518 | BOOL shouldBreak = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_ActivatePatchSkip); |
2519 | if (shouldBreak > 0) { |
2520 | _ASSERTE(!"ActivatePatchSkip" ); |
2521 | } |
2522 | #endif |
2523 | |
2524 | LOG((LF_CORDB,LL_INFO10000, "DC::APS thread=0x%p pc=0x%p fForEnc=%d\n" , |
2525 | thread, PC, fForEnC)); |
2526 | _ASSERTE(g_patches != NULL); |
2527 | |
2528 | // Previously, we assumed that if we got to this point & the patch |
2529 | // was still there that we'd have to skip the patch. SetIP changes |
2530 | // this like so: |
2531 | // A breakpoint is set, and hit (but not removed), and all the |
2532 | // EE threads come to a skreeching halt. The Debugger RC thread |
2533 | // continues along, and is told to SetIP of the thread that hit |
2534 | // the BP to whatever. Eventually the RC thread is told to continue, |
2535 | // and at that point the EE thread is released, finishes DispatchPatchOrSingleStep, |
2536 | // and shows up here. |
2537 | // At that point, if the thread's current PC is |
2538 | // different from the patch PC, then SetIP must have moved it elsewhere |
2539 | // & we shouldn't do this patch skip (which will put us back to where |
2540 | // we were, which is clearly wrong). If the PC _is_ the same, then |
2541 | // the thread hasn't been moved, the patch is still in the code stream, |
2542 | // and we want to do the patch skip thing in order to execute this |
2543 | // instruction w/o removing it from the code stream. |
2544 | |
2545 | DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)PC); |
2546 | DebuggerPatchSkip *skip = NULL; |
2547 | |
2548 | if (patch != NULL && patch->IsNativePatch()) |
2549 | { |
2550 | // |
2551 | // We adjust the thread's PC to someplace where we write |
2552 | // the next instruction, then |
2553 | // we single step over that, then we set the PC back here so |
2554 | // we don't let other threads race past here while we're stepping |
2555 | // this one. |
2556 | // |
2557 | // !!! check result |
2558 | LOG((LF_CORDB,LL_INFO10000, "DC::APS: About to skip from PC=0x%p\n" , PC)); |
2559 | skip = new (interopsafe) DebuggerPatchSkip(thread, patch, thread->GetDomain()); |
2560 | TRACE_ALLOC(skip); |
2561 | } |
2562 | |
2563 | return skip; |
2564 | } |
2565 | |
2566 | DPOSS_ACTION DebuggerController::ScanForTriggers(CORDB_ADDRESS_TYPE *address, |
2567 | Thread *thread, |
2568 | CONTEXT *context, |
2569 | DebuggerControllerQueue *pDcq, |
2570 | SCAN_TRIGGER stWhat, |
2571 | TP_RESULT *pTpr) |
2572 | { |
2573 | CONTRACTL |
2574 | { |
2575 | SO_NOT_MAINLINE; |
2576 | // @todo - should this throw or not? |
2577 | NOTHROW; |
2578 | |
2579 | // call Triggers which may invoke GC stuff... See comment in DispatchNativeException for why it's disabled. |
2580 | DISABLED(GC_TRIGGERS); |
2581 | PRECONDITION(!ThisIsHelperThreadWorker()); |
2582 | |
2583 | PRECONDITION(CheckPointer(address)); |
2584 | PRECONDITION(CheckPointer(thread)); |
2585 | PRECONDITION(CheckPointer(context)); |
2586 | PRECONDITION(CheckPointer(pDcq)); |
2587 | PRECONDITION(CheckPointer(pTpr)); |
2588 | } |
2589 | CONTRACTL_END; |
2590 | |
2591 | _ASSERTE(HasLock()); |
2592 | |
2593 | CONTRACT_VIOLATION(ThrowsViolation); |
2594 | |
2595 | LOG((LF_CORDB, LL_INFO10000, "DC::SFT: starting scan for addr:0x%p" |
2596 | " thread:0x%x\n" , address, thread)); |
2597 | |
2598 | _ASSERTE( pTpr != NULL ); |
2599 | DebuggerControllerPatch *patch = NULL; |
2600 | |
2601 | if (g_patches != NULL) |
2602 | patch = g_patches->GetPatch(address); |
2603 | |
2604 | ULONG iEvent = UINT32_MAX; |
2605 | ULONG iEventNext = UINT32_MAX; |
2606 | BOOL fDone = FALSE; |
2607 | |
2608 | // This is a debugger exception if there's a patch here, or |
2609 | // we're here for something like a single step. |
2610 | DPOSS_ACTION used = DPOSS_INVALID; |
2611 | if ((patch != NULL) || !IsPatched(address, TRUE)) |
2612 | { |
2613 | // we are sure that we care for this exception but not sure |
2614 | // if we will send event to the RS |
2615 | used = DPOSS_USED_WITH_NO_EVENT; |
2616 | } |
2617 | else |
2618 | { |
2619 | // initialize it to don't care for now |
2620 | used = DPOSS_DONT_CARE; |
2621 | } |
2622 | |
2623 | TP_RESULT tpr = TPR_IGNORE; |
2624 | |
2625 | while (stWhat & ST_PATCH && |
2626 | patch != NULL && |
2627 | !fDone) |
2628 | { |
2629 | _ASSERTE(IsInUsedAction(used) == true); |
2630 | |
2631 | DebuggerControllerPatch *patchNext |
2632 | = g_patches->GetNextPatch(patch); |
2633 | |
2634 | LOG((LF_CORDB, LL_INFO10000, "DC::SFT: patch 0x%x, patchNext 0x%x\n" , patch, patchNext)); |
2635 | |
2636 | // Annoyingly, TriggerPatch may add patches, which may cause |
2637 | // the patch table to move, which may, in turn, invalidate |
2638 | // the patch (and patchNext) pointers. Store indeces, instead. |
2639 | iEvent = g_patches->GetItemIndex( (HASHENTRY *)patch ); |
2640 | |
2641 | if (patchNext != NULL) |
2642 | { |
2643 | iEventNext = g_patches->GetItemIndex((HASHENTRY *)patchNext); |
2644 | } |
2645 | |
2646 | if (MatchPatch(thread, context, patch)) |
2647 | { |
2648 | LOG((LF_CORDB, LL_INFO10000, "DC::SFT: patch matched\n" )); |
2649 | AddRef(patch); |
2650 | |
2651 | // We are hitting a patch at a virtual trace call target, so let's trigger trace call here. |
2652 | if (patch->trace.GetTraceType() == TRACE_ENTRY_STUB) |
2653 | { |
2654 | patch->controller->TriggerTraceCall(thread, dac_cast<PTR_CBYTE>(::GetIP(context))); |
2655 | tpr = TPR_IGNORE; |
2656 | } |
2657 | else |
2658 | { |
2659 | // Mark if we're at an unsafe place. |
2660 | AtSafePlaceHolder unsafePlaceHolder(thread); |
2661 | |
2662 | tpr = patch->controller->TriggerPatch(patch, |
2663 | thread, |
2664 | TY_NORMAL); |
2665 | } |
2666 | |
2667 | // Any patch may potentially send an event. |
2668 | // (Whereas some single-steps are "internal-only" and can |
2669 | // never send an event- such as a single step over an exception that |
2670 | // lands us in la-la land.) |
2671 | used = DPOSS_USED_WITH_EVENT; |
2672 | |
2673 | if (tpr == TPR_TRIGGER || |
2674 | tpr == TPR_TRIGGER_ONLY_THIS || |
2675 | tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP) |
2676 | { |
2677 | // Make sure we've still got a valid pointer. |
2678 | patch = (DebuggerControllerPatch *) |
2679 | DebuggerController::g_patches->GetEntryPtr( iEvent ); |
2680 | |
2681 | pDcq->dcqEnqueue(patch->controller, TRUE); // <REVISIT_TODO>@todo Return value</REVISIT_TODO> |
2682 | } |
2683 | |
2684 | // Make sure we've got a valid pointer in case TriggerPatch |
2685 | // returned false but still caused the table to move. |
2686 | patch = (DebuggerControllerPatch *) |
2687 | g_patches->GetEntryPtr( iEvent ); |
2688 | |
2689 | // A patch can be deleted as a result of it's being triggered. |
2690 | // The actual deletion of the patch is delayed until after the |
2691 | // the end of the trigger. |
2692 | // Moreover, "patchNext" could have been deleted as a result of DisableAll() |
2693 | // being called in TriggerPatch(). Thus, we should update our patchNext |
2694 | // pointer now. We were just lucky before, because the now-deprecated |
2695 | // "deleted" flag didn't get set when we iterate the patches in DisableAll(). |
2696 | patchNext = g_patches->GetNextPatch(patch); |
2697 | if (patchNext != NULL) |
2698 | iEventNext = g_patches->GetItemIndex((HASHENTRY *)patchNext); |
2699 | |
2700 | // Note that Release() actually removes the patch if its ref count |
2701 | // reaches 0 after the release. |
2702 | Release(patch); |
2703 | } |
2704 | |
2705 | if (tpr == TPR_IGNORE_AND_STOP || |
2706 | tpr == TPR_TRIGGER_ONLY_THIS || |
2707 | tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP) |
2708 | { |
2709 | #ifdef _DEBUG |
2710 | if (tpr == TPR_TRIGGER_ONLY_THIS || |
2711 | tpr == TPR_TRIGGER_ONLY_THIS_AND_LOOP) |
2712 | _ASSERTE(pDcq->dcqGetCount() == 1); |
2713 | #endif //_DEBUG |
2714 | |
2715 | fDone = TRUE; |
2716 | } |
2717 | else if (patchNext != NULL) |
2718 | { |
2719 | patch = (DebuggerControllerPatch *) |
2720 | g_patches->GetEntryPtr(iEventNext); |
2721 | } |
2722 | else |
2723 | { |
2724 | patch = NULL; |
2725 | } |
2726 | } |
2727 | |
2728 | #ifdef FEATURE_DATABREAKPOINT |
2729 | if (stWhat & ST_SINGLE_STEP && |
2730 | tpr != TPR_TRIGGER_ONLY_THIS && |
2731 | DebuggerDataBreakpoint::TriggerDataBreakpoint(thread, context)) |
2732 | { |
2733 | DebuggerDataBreakpoint *pDataBreakpoint = new (interopsafe) DebuggerDataBreakpoint(thread); |
2734 | pDcq->dcqEnqueue(pDataBreakpoint, FALSE); |
2735 | } |
2736 | #endif |
2737 | |
2738 | if (stWhat & ST_SINGLE_STEP && |
2739 | tpr != TPR_TRIGGER_ONLY_THIS) |
2740 | { |
2741 | LOG((LF_CORDB, LL_INFO10000, "DC::SFT: Trigger controllers with single step\n" )); |
2742 | |
2743 | // |
2744 | // Now, go ahead & trigger all controllers with |
2745 | // single step events |
2746 | // |
2747 | |
2748 | DebuggerController *p; |
2749 | |
2750 | p = g_controllers; |
2751 | while (p != NULL) |
2752 | { |
2753 | DebuggerController *pNext = p->m_next; |
2754 | |
2755 | if (p->m_thread == thread && p->m_singleStep) |
2756 | { |
2757 | if (used == DPOSS_DONT_CARE) |
2758 | { |
2759 | // Debugger does care for this exception. |
2760 | used = DPOSS_USED_WITH_NO_EVENT; |
2761 | } |
2762 | |
2763 | if (p->TriggerSingleStep(thread, (const BYTE *)address)) |
2764 | { |
2765 | // by now, we should already know that we care for this exception. |
2766 | _ASSERTE(IsInUsedAction(used) == true); |
2767 | |
2768 | // now we are sure that we will send event to the RS |
2769 | used = DPOSS_USED_WITH_EVENT; |
2770 | pDcq->dcqEnqueue(p, FALSE); // <REVISIT_TODO>@todo Return value</REVISIT_TODO> |
2771 | |
2772 | } |
2773 | } |
2774 | |
2775 | p = pNext; |
2776 | } |
2777 | |
2778 | UnapplyTraceFlag(thread); |
2779 | |
2780 | // |
2781 | // See if we have any steppers still active for this thread, if so |
2782 | // re-apply the trace flag. |
2783 | // |
2784 | |
2785 | p = g_controllers; |
2786 | while (p != NULL) |
2787 | { |
2788 | if (p->m_thread == thread && p->m_singleStep) |
2789 | { |
2790 | ApplyTraceFlag(thread); |
2791 | break; |
2792 | } |
2793 | |
2794 | p = p->m_next; |
2795 | } |
2796 | } |
2797 | |
2798 | // Significant speed increase from single dereference, I bet :) |
2799 | (*pTpr) = tpr; |
2800 | |
2801 | LOG((LF_CORDB, LL_INFO10000, "DC::SFT returning 0x%x as used\n" ,used)); |
2802 | return used; |
2803 | } |
2804 | |
2805 | #ifdef EnC_SUPPORTED |
2806 | DebuggerControllerPatch *DebuggerController::IsXXXPatched(const BYTE *PC, |
2807 | DEBUGGER_CONTROLLER_TYPE dct) |
2808 | { |
2809 | _ASSERTE(g_patches != NULL); |
2810 | |
2811 | DebuggerControllerPatch *patch = g_patches->GetPatch((CORDB_ADDRESS_TYPE *)PC); |
2812 | |
2813 | while(patch != NULL && |
2814 | (int)patch->controller->GetDCType() <= (int)dct) |
2815 | { |
2816 | if (patch->IsNativePatch() && |
2817 | patch->controller->GetDCType()==dct) |
2818 | { |
2819 | return patch; |
2820 | } |
2821 | patch = g_patches->GetNextPatch(patch); |
2822 | } |
2823 | |
2824 | return NULL; |
2825 | } |
2826 | |
2827 | // This function will check for an EnC patch at the given address and return |
2828 | // it if one is there, otherwise it will return NULL. |
2829 | DebuggerControllerPatch *DebuggerController::GetEnCPatch(const BYTE *address) |
2830 | { |
2831 | _ASSERTE(address); |
2832 | |
2833 | if( g_pEEInterface->IsManagedNativeCode(address) ) |
2834 | { |
2835 | DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) address); |
2836 | if (dji == NULL) |
2837 | return NULL; |
2838 | |
2839 | // we can have two types of patches - one in code where the IL has been updated to trigger |
2840 | // the switch and the other in the code we've switched to in order to trigger FunctionRemapComplete |
2841 | // callback. If version == default then can't be the latter, but otherwise if haven't handled the |
2842 | // remap for this function yet is certainly the latter. |
2843 | if (! dji->m_encBreakpointsApplied && |
2844 | (dji->m_encVersion == CorDB_DEFAULT_ENC_FUNCTION_VERSION)) |
2845 | { |
2846 | return NULL; |
2847 | } |
2848 | } |
2849 | return IsXXXPatched(address, DEBUGGER_CONTROLLER_ENC); |
2850 | } |
2851 | #endif //EnC_SUPPORTED |
2852 | |
2853 | // DebuggerController::DispatchPatchOrSingleStep - Ask any patches that are active at a given |
2854 | // address if they want to do anything about the exception that's occurred there. How: For the given |
2855 | // address, go through the list of patches & see if any of them are interested (by invoking their |
2856 | // DebuggerController's TriggerPatch). Put any DCs that are interested into a queue and then calls |
2857 | // SendEvent on each. |
2858 | // Note that control will not return from this function in the case of EnC remap |
2859 | DPOSS_ACTION DebuggerController::DispatchPatchOrSingleStep(Thread *thread, CONTEXT *context, CORDB_ADDRESS_TYPE *address, SCAN_TRIGGER which) |
2860 | { |
2861 | CONTRACT(DPOSS_ACTION) |
2862 | { |
2863 | // @todo - should this throw or not? |
2864 | NOTHROW; |
2865 | DISABLED(GC_TRIGGERS); // Only GC triggers if we send an event. See Comment in DispatchNativeException |
2866 | PRECONDITION(!ThisIsHelperThreadWorker()); |
2867 | |
2868 | PRECONDITION(CheckPointer(thread)); |
2869 | PRECONDITION(CheckPointer(context)); |
2870 | PRECONDITION(CheckPointer(address)); |
2871 | PRECONDITION(!HasLock()); |
2872 | |
2873 | POSTCONDITION(!HasLock()); // make sure we're not leaking the controller lock |
2874 | } |
2875 | CONTRACT_END; |
2876 | |
2877 | CONTRACT_VIOLATION(ThrowsViolation); |
2878 | |
2879 | LOG((LF_CORDB|LF_ENC,LL_INFO1000,"DC:DPOSS at 0x%p trigger:0x%x\n" , address, which)); |
2880 | |
2881 | // We should only have an exception if some managed thread was running. |
2882 | // Thus we should never be here when we're stopped. |
2883 | // @todo - this assert fires! Is that an issue, or is it invalid? |
2884 | //_ASSERTE(!g_pDebugger->IsStopped()); |
2885 | DPOSS_ACTION used = DPOSS_DONT_CARE; |
2886 | |
2887 | DebuggerControllerQueue dcq; |
2888 | if (!g_patchTableValid) |
2889 | { |
2890 | |
2891 | LOG((LF_CORDB|LF_ENC, LL_INFO1000, "DC::DPOSS returning, no patch table.\n" )); |
2892 | RETURN (used); |
2893 | } |
2894 | _ASSERTE(g_patches != NULL); |
2895 | |
2896 | CrstHolderWithState lockController(&g_criticalSection); |
2897 | |
2898 | TADDR originalAddress = 0; |
2899 | |
2900 | #ifdef EnC_SUPPORTED |
2901 | DebuggerControllerPatch *dcpEnCOriginal = NULL; |
2902 | |
2903 | // If this sequence point has an EnC patch, we want to process it ahead of any others. If the |
2904 | // debugger wants to remap the function at this point, then we'll call ResumeInUpdatedFunction and |
2905 | // not return, otherwise we will just continue with regular patch-handling logic |
2906 | dcpEnCOriginal = GetEnCPatch(dac_cast<PTR_CBYTE>(GetIP(context))); |
2907 | |
2908 | if (dcpEnCOriginal) |
2909 | { |
2910 | LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS EnC short-circuit\n" )); |
2911 | TP_RESULT tpres = |
2912 | dcpEnCOriginal->controller->TriggerPatch(dcpEnCOriginal, |
2913 | thread, |
2914 | TY_SHORT_CIRCUIT); |
2915 | |
2916 | // We will only come back here on a RemapOpportunity that wasn't taken, or on a RemapComplete. |
2917 | // If we processed a RemapComplete (which returns TPR_IGNORE_AND_STOP), then don't want to handle |
2918 | // additional breakpoints on the current line because we've already effectively executed to that point |
2919 | // and would have hit them already. If they are new, we also don't want to hit them because eg. if are |
2920 | // sitting on line 10 and add a breakpoint at line 10 and step, |
2921 | // don't expect to stop at line 10, expect to go to line 11. |
2922 | // |
2923 | // Special case is if an EnC remap breakpoint exists in the function. This could only happen if the function was |
2924 | // updated between the RemapOpportunity and the RemapComplete. In that case we want to not skip the patches |
2925 | // and fall through to handle the remap breakpoint. |
2926 | |
2927 | if (tpres == TPR_IGNORE_AND_STOP) |
2928 | { |
2929 | // It was a RemapComplete, so fall through. Set dcpEnCOriginal to NULL to indicate that any |
2930 | // EnC patch still there should be treated as a new patch. Any RemapComplete patch will have been |
2931 | // already removed by patch processing. |
2932 | dcpEnCOriginal = NULL; |
2933 | LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS done EnC short-circuit, exiting\n" )); |
2934 | used = DPOSS_USED_WITH_EVENT; // indicate that we handled a patch |
2935 | goto Exit; |
2936 | } |
2937 | |
2938 | _ASSERTE(tpres==TPR_IGNORE); |
2939 | LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS done EnC short-circuit, ignoring\n" )); |
2940 | // if we got here, then the EnC remap opportunity was not taken, so just continue on. |
2941 | } |
2942 | #endif // EnC_SUPPORTED |
2943 | |
2944 | TP_RESULT tpr; |
2945 | |
2946 | used = ScanForTriggers((CORDB_ADDRESS_TYPE *)address, thread, context, &dcq, which, &tpr); |
2947 | |
2948 | LOG((LF_CORDB|LF_ENC, LL_EVERYTHING, "DC::DPOSS ScanForTriggers called and returned.\n" )); |
2949 | |
2950 | |
2951 | // If we setip, then that will change the address in the context. |
2952 | // Remeber the old address so that we can compare it to the context's ip and see if it changed. |
2953 | // If it did change, then don't dispatch our current event. |
2954 | originalAddress = (TADDR) address; |
2955 | |
2956 | #ifdef _DEBUG |
2957 | // If we do a SetIP after this point, the value of address will be garbage. Set it to a distictive pattern now, so |
2958 | // we don't accidentally use what will (98% of the time) appear to be a valid value. |
2959 | address = (CORDB_ADDRESS_TYPE *)(UINT_PTR)0xAABBCCFF; |
2960 | #endif //_DEBUG |
2961 | |
2962 | if (dcq.dcqGetCount()> 0) |
2963 | { |
2964 | lockController.Release(); |
2965 | |
2966 | // Mark if we're at an unsafe place. |
2967 | bool atSafePlace = g_pDebugger->IsThreadAtSafePlace(thread); |
2968 | if (!atSafePlace) |
2969 | g_pDebugger->IncThreadsAtUnsafePlaces(); |
2970 | |
2971 | DWORD dwEvent = 0xFFFFFFFF; |
2972 | DWORD dwNumberEvents = 0; |
2973 | BOOL reabort = FALSE; |
2974 | |
2975 | SENDIPCEVENT_BEGIN(g_pDebugger, thread); |
2976 | |
2977 | // Now that we've resumed from blocking, check if somebody did a SetIp on us. |
2978 | bool fIpChanged = (originalAddress != GetIP(context)); |
2979 | |
2980 | // Send the events outside of the controller lock |
2981 | bool anyEventsSent = false; |
2982 | |
2983 | dwNumberEvents = dcq.dcqGetCount(); |
2984 | dwEvent = 0; |
2985 | |
2986 | while (dwEvent < dwNumberEvents) |
2987 | { |
2988 | DebuggerController *event = dcq.dcqGetElement(dwEvent); |
2989 | |
2990 | if (!event->m_deleted) |
2991 | { |
2992 | #ifdef DEBUGGING_SUPPORTED |
2993 | if (thread->GetDomain()->IsDebuggerAttached()) |
2994 | { |
2995 | if (event->SendEvent(thread, fIpChanged)) |
2996 | { |
2997 | anyEventsSent = true; |
2998 | } |
2999 | } |
3000 | #endif //DEBUGGING_SUPPORTED |
3001 | } |
3002 | |
3003 | dwEvent++; |
3004 | } |
3005 | |
3006 | // Trap all threads if necessary, but only if we actually sent a event up (i.e., all the queued events weren't |
3007 | // deleted before we got a chance to get the EventSending lock.) |
3008 | if (anyEventsSent) |
3009 | { |
3010 | LOG((LF_CORDB|LF_ENC, LL_EVERYTHING, "DC::DPOSS We sent an event\n" )); |
3011 | g_pDebugger->SyncAllThreads(SENDIPCEVENT_PtrDbgLockHolder); |
3012 | LOG((LF_CORDB,LL_INFO1000, "SAT called!\n" )); |
3013 | } |
3014 | |
3015 | |
3016 | // If we need to to a re-abort (see below), then save the current IP in the thread's context before we block and |
3017 | // possibly let another func eval get setup. |
3018 | reabort = thread->m_StateNC & Thread::TSNC_DebuggerReAbort; |
3019 | SENDIPCEVENT_END; |
3020 | |
3021 | if (!atSafePlace) |
3022 | g_pDebugger->DecThreadsAtUnsafePlaces(); |
3023 | |
3024 | lockController.Acquire(); |
3025 | |
3026 | // Dequeue the events while we have the controller lock. |
3027 | dwEvent = 0; |
3028 | while (dwEvent < dwNumberEvents) |
3029 | { |
3030 | dcq.dcqDequeue(); |
3031 | dwEvent++; |
3032 | } |
3033 | // If a func eval completed with a ThreadAbortException, go ahead and setup the thread to re-abort itself now |
3034 | // that we're continuing the thread. Note: we make sure that the thread's IP hasn't changed between now and when |
3035 | // we blocked above. While blocked above, the debugger has a chance to setup another func eval on this |
3036 | // thread. If that happens, we don't want to setup the reabort just yet. |
3037 | if (reabort) |
3038 | { |
3039 | if ((UINT_PTR)GetEEFuncEntryPoint(::FuncEvalHijack) != (UINT_PTR)GetIP(context)) |
3040 | { |
3041 | HRESULT hr; |
3042 | hr = g_pDebugger->FuncEvalSetupReAbort(thread, Thread::TAR_Thread); |
3043 | _ASSERTE(SUCCEEDED(hr)); |
3044 | } |
3045 | } |
3046 | } |
3047 | |
3048 | #if defined EnC_SUPPORTED |
3049 | Exit: |
3050 | #endif |
3051 | |
3052 | // Note: if the thread filter context is NULL, then SetIP would have failed & thus we should do the |
3053 | // patch skip thing. |
3054 | // @todo - do we need to get the context again here? |
3055 | CONTEXT *pCtx = GetManagedLiveCtx(thread); |
3056 | |
3057 | #ifdef EnC_SUPPORTED |
3058 | DebuggerControllerPatch *dcpEnCCurrent = GetEnCPatch(dac_cast<PTR_CBYTE>((GetIP(context)))); |
3059 | |
3060 | // we have a new patch if the original was null and the current is non-null. Otherwise we have an old |
3061 | // patch. We want to skip old patches, but handle new patches. |
3062 | if (dcpEnCOriginal == NULL && dcpEnCCurrent != NULL) |
3063 | { |
3064 | LOG((LF_CORDB|LF_ENC,LL_INFO10000, "DC::DPOSS EnC post-processing\n" )); |
3065 | dcpEnCCurrent->controller->TriggerPatch( dcpEnCCurrent, |
3066 | thread, |
3067 | TY_SHORT_CIRCUIT); |
3068 | used = DPOSS_USED_WITH_EVENT; // indicate that we handled a patch |
3069 | } |
3070 | #endif |
3071 | |
3072 | ActivatePatchSkip(thread, dac_cast<PTR_CBYTE>(GetIP(pCtx)), FALSE); |
3073 | |
3074 | lockController.Release(); |
3075 | |
3076 | |
3077 | // We pulse the GC mode here too cooperate w/ a thread trying to suspend the runtime. If we didn't pulse |
3078 | // the GC, the odds of catching this thread in interuptable code may be very small (since this filter |
3079 | // could be very large compared to the managed code this thread is running). |
3080 | // Only do this if the exception was actually for the debugger. (We don't want to toggle the GC mode on every |
3081 | // random exception). We can't do this while holding any debugger locks. |
3082 | if (used == DPOSS_USED_WITH_EVENT) |
3083 | { |
3084 | bool atSafePlace = g_pDebugger->IsThreadAtSafePlace(thread); |
3085 | if (!atSafePlace) |
3086 | { |
3087 | g_pDebugger->IncThreadsAtUnsafePlaces(); |
3088 | } |
3089 | |
3090 | // Always pulse the GC mode. This will allow an async break to complete even if we have a patch |
3091 | // at an unsafe place. |
3092 | // If we are at an unsafe place, then we can't do a GC. |
3093 | thread->PulseGCMode(); |
3094 | |
3095 | if (!atSafePlace) |
3096 | { |
3097 | g_pDebugger->DecThreadsAtUnsafePlaces(); |
3098 | } |
3099 | |
3100 | } |
3101 | |
3102 | RETURN used; |
3103 | } |
3104 | |
3105 | bool DebuggerController::IsSingleStepEnabled() |
3106 | { |
3107 | LIMITED_METHOD_CONTRACT; |
3108 | return m_singleStep; |
3109 | } |
3110 | |
3111 | void DebuggerController::EnableSingleStep() |
3112 | { |
3113 | CONTRACTL |
3114 | { |
3115 | SO_NOT_MAINLINE; |
3116 | NOTHROW; |
3117 | GC_NOTRIGGER; |
3118 | } |
3119 | CONTRACTL_END; |
3120 | |
3121 | #ifdef _DEBUG |
3122 | // Some controllers don't need to set the SS to do their job, and if they are setting it, it's likely an issue. |
3123 | // So we assert here to catch them red-handed. This assert can always be updated to accomodate changes |
3124 | // in a controller's behavior. |
3125 | |
3126 | switch(GetDCType()) |
3127 | { |
3128 | case DEBUGGER_CONTROLLER_THREAD_STARTER: |
3129 | case DEBUGGER_CONTROLLER_BREAKPOINT: |
3130 | case DEBUGGER_CONTROLLER_USER_BREAKPOINT: |
3131 | case DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE: |
3132 | CONSISTENCY_CHECK_MSGF(false, ("Controller pThis=%p shouldn't be setting ss flag." , this)); |
3133 | break; |
3134 | default: // MingW compilers require all enum cases to be handled in switch statement. |
3135 | break; |
3136 | } |
3137 | #endif |
3138 | |
3139 | EnableSingleStep(m_thread); |
3140 | m_singleStep = true; |
3141 | } |
3142 | |
3143 | #ifdef EnC_SUPPORTED |
3144 | // Note that this doesn't tell us if Single Stepping is currently enabled |
3145 | // at the hardware level (ie, for x86, if (context->EFlags & 0x100), but |
3146 | // rather, if we WANT single stepping enabled (pThread->m_State &Thread::TS_DebuggerIsStepping) |
3147 | // This gets called from exactly one place - ActivatePatchSkipForEnC |
3148 | BOOL DebuggerController::IsSingleStepEnabled(Thread *pThread) |
3149 | { |
3150 | CONTRACTL |
3151 | { |
3152 | SO_NOT_MAINLINE; |
3153 | NOTHROW; |
3154 | GC_NOTRIGGER; |
3155 | } |
3156 | CONTRACTL_END; |
3157 | |
3158 | // This should be an atomic operation, do we |
3159 | // don't need to lock it. |
3160 | if(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping) |
3161 | { |
3162 | _ASSERTE(pThread->m_StateNC & Thread::TSNC_DebuggerIsStepping); |
3163 | |
3164 | return TRUE; |
3165 | } |
3166 | else |
3167 | return FALSE; |
3168 | } |
3169 | #endif //EnC_SUPPORTED |
3170 | |
3171 | void DebuggerController::EnableSingleStep(Thread *pThread) |
3172 | { |
3173 | CONTRACTL |
3174 | { |
3175 | SO_NOT_MAINLINE; |
3176 | NOTHROW; |
3177 | GC_NOTRIGGER; |
3178 | } |
3179 | CONTRACTL_END; |
3180 | |
3181 | LOG((LF_CORDB,LL_INFO1000, "DC::EnableSingleStep\n" )); |
3182 | |
3183 | _ASSERTE(pThread != NULL); |
3184 | |
3185 | ControllerLockHolder lockController; |
3186 | |
3187 | ApplyTraceFlag(pThread); |
3188 | } |
3189 | |
3190 | // Disable Single stepping for this controller. |
3191 | // If none of the controllers on this thread want single-stepping, then also |
3192 | // ensure that it's disabled on the hardware level. |
3193 | void DebuggerController::DisableSingleStep() |
3194 | { |
3195 | CONTRACTL |
3196 | { |
3197 | SO_NOT_MAINLINE; |
3198 | NOTHROW; |
3199 | GC_NOTRIGGER; |
3200 | } |
3201 | CONTRACTL_END; |
3202 | |
3203 | _ASSERTE(m_thread != NULL); |
3204 | |
3205 | LOG((LF_CORDB,LL_INFO1000, "DC::DisableSingleStep\n" )); |
3206 | |
3207 | ControllerLockHolder lockController; |
3208 | { |
3209 | DebuggerController *p = g_controllers; |
3210 | |
3211 | m_singleStep = false; |
3212 | |
3213 | while (p != NULL) |
3214 | { |
3215 | if (p->m_thread == m_thread |
3216 | && p->m_singleStep) |
3217 | break; |
3218 | |
3219 | p = p->m_next; |
3220 | } |
3221 | |
3222 | if (p == NULL) |
3223 | { |
3224 | UnapplyTraceFlag(m_thread); |
3225 | } |
3226 | } |
3227 | } |
3228 | |
3229 | |
3230 | // |
3231 | // ApplyTraceFlag sets the trace flag (i.e., turns on single-stepping) |
3232 | // for a thread. |
3233 | // |
3234 | void DebuggerController::ApplyTraceFlag(Thread *thread) |
3235 | { |
3236 | LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag thread:0x%x [0x%0x]\n" , thread, Debugger::GetThreadIdHelper(thread))); |
3237 | |
3238 | CONTEXT *context; |
3239 | if(thread->GetInteropDebuggingHijacked()) |
3240 | { |
3241 | context = GetManagedLiveCtx(thread); |
3242 | } |
3243 | else |
3244 | { |
3245 | context = GetManagedStoppedCtx(thread); |
3246 | } |
3247 | CONSISTENCY_CHECK_MSGF(context != NULL, ("Can't apply ss flag to thread 0x%p b/c it's not in a safe place.\n" , thread)); |
3248 | PREFIX_ASSUME(context != NULL); |
3249 | |
3250 | |
3251 | g_pEEInterface->MarkThreadForDebugStepping(thread, true); |
3252 | LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag marked thread for debug stepping\n" )); |
3253 | |
3254 | SetSSFlag(reinterpret_cast<DT_CONTEXT *>(context) ARM_ARG(thread)); |
3255 | LOG((LF_CORDB,LL_INFO1000, "DC::ApplyTraceFlag Leaving, baby!\n" )); |
3256 | } |
3257 | |
3258 | // |
3259 | // UnapplyTraceFlag sets the trace flag for a thread. |
3260 | // Removes the hardware trace flag on this thread. |
3261 | // |
3262 | |
3263 | void DebuggerController::UnapplyTraceFlag(Thread *thread) |
3264 | { |
3265 | LOG((LF_CORDB,LL_INFO1000, "DC::UnapplyTraceFlag thread:0x%x\n" , thread)); |
3266 | |
3267 | |
3268 | // Either this is the helper thread, or we're manipulating our own context. |
3269 | _ASSERTE( |
3270 | ThisIsHelperThreadWorker() || |
3271 | (thread == ::GetThread()) |
3272 | ); |
3273 | |
3274 | CONTEXT *context = GetManagedStoppedCtx(thread); |
3275 | |
3276 | // If there's no context available, then the thread shouldn't have the single-step flag |
3277 | // enabled and there's nothing for us to do. |
3278 | if (context == NULL) |
3279 | { |
3280 | // In theory, I wouldn't expect us to ever get here. |
3281 | // Even if we are here, our single-step flag should already be deactivated, |
3282 | // so there should be nothing to do. However, we still assert b/c we want to know how |
3283 | // we'd actually hit this. |
3284 | // @todo - is there a path if TriggerUnwind() calls DisableAll(). But why would |
3285 | CONSISTENCY_CHECK_MSGF(false, ("How did we get here?. thread=%p\n" , thread)); |
3286 | LOG((LF_CORDB,LL_INFO1000, "DC::UnapplyTraceFlag couldn't get context.\n" )); |
3287 | return; |
3288 | } |
3289 | |
3290 | // Always need to unmark for stepping |
3291 | g_pEEInterface->MarkThreadForDebugStepping(thread, false); |
3292 | UnsetSSFlag(reinterpret_cast<DT_CONTEXT *>(context) ARM_ARG(thread)); |
3293 | } |
3294 | |
3295 | void DebuggerController::EnableExceptionHook() |
3296 | { |
3297 | CONTRACTL |
3298 | { |
3299 | SO_NOT_MAINLINE; |
3300 | NOTHROW; |
3301 | GC_NOTRIGGER; |
3302 | } |
3303 | CONTRACTL_END; |
3304 | |
3305 | _ASSERTE(m_thread != NULL); |
3306 | |
3307 | ControllerLockHolder lockController; |
3308 | |
3309 | m_exceptionHook = true; |
3310 | } |
3311 | |
3312 | void DebuggerController::DisableExceptionHook() |
3313 | { |
3314 | CONTRACTL |
3315 | { |
3316 | SO_NOT_MAINLINE; |
3317 | NOTHROW; |
3318 | GC_NOTRIGGER; |
3319 | } |
3320 | CONTRACTL_END; |
3321 | |
3322 | _ASSERTE(m_thread != NULL); |
3323 | |
3324 | ControllerLockHolder lockController; |
3325 | m_exceptionHook = false; |
3326 | } |
3327 | |
3328 | |
3329 | // void DebuggerController::DispatchExceptionHook() Called before |
3330 | // the switch statement in DispatchNativeException (therefore |
3331 | // when any exception occurs), this allows patches to do something before the |
3332 | // regular DispatchX methods. |
3333 | // How: Iterate through list of controllers. If m_exceptionHook |
3334 | // is set & m_thread is either thread or NULL, then invoke TriggerExceptionHook() |
3335 | BOOL DebuggerController::DispatchExceptionHook(Thread *thread, |
3336 | CONTEXT *context, |
3337 | EXCEPTION_RECORD *pException) |
3338 | { |
3339 | // ExceptionHook has restrictive contract b/c it could come from anywhere. |
3340 | // This can only modify controller's internal state. Can't send managed debug events. |
3341 | CONTRACTL |
3342 | { |
3343 | SO_NOT_MAINLINE; |
3344 | GC_NOTRIGGER; |
3345 | NOTHROW; |
3346 | MODE_ANY; |
3347 | |
3348 | // Filter context not set yet b/c we can only set it in COOP, and this may be in preemptive. |
3349 | PRECONDITION(thread == ::GetThread()); |
3350 | PRECONDITION((g_pEEInterface->GetThreadFilterContext(thread) == NULL)); |
3351 | PRECONDITION(CheckPointer(pException)); |
3352 | } |
3353 | CONTRACTL_END; |
3354 | |
3355 | LOG((LF_CORDB, LL_INFO1000, "DC:: DispatchExceptionHook\n" )); |
3356 | |
3357 | if (!g_patchTableValid) |
3358 | { |
3359 | LOG((LF_CORDB, LL_INFO1000, "DC::DEH returning, no patch table.\n" )); |
3360 | return (TRUE); |
3361 | } |
3362 | |
3363 | |
3364 | _ASSERTE(g_patches != NULL); |
3365 | |
3366 | ControllerLockHolder lockController; |
3367 | |
3368 | TP_RESULT tpr = TPR_IGNORE; |
3369 | DebuggerController *p; |
3370 | |
3371 | p = g_controllers; |
3372 | while (p != NULL) |
3373 | { |
3374 | DebuggerController *pNext = p->m_next; |
3375 | |
3376 | if (p->m_exceptionHook |
3377 | && (p->m_thread == NULL || p->m_thread == thread) && |
3378 | tpr != TPR_IGNORE_AND_STOP) |
3379 | { |
3380 | LOG((LF_CORDB, LL_INFO1000, "DC::DEH calling TEH...\n" )); |
3381 | tpr = p->TriggerExceptionHook(thread, context , pException); |
3382 | LOG((LF_CORDB, LL_INFO1000, "DC::DEH ... returned.\n" )); |
3383 | |
3384 | if (tpr == TPR_IGNORE_AND_STOP) |
3385 | { |
3386 | LOG((LF_CORDB, LL_INFO1000, "DC:: DEH: leaving early!\n" )); |
3387 | break; |
3388 | } |
3389 | } |
3390 | |
3391 | p = pNext; |
3392 | } |
3393 | |
3394 | LOG((LF_CORDB, LL_INFO1000, "DC:: DEH: returning 0x%x!\n" , tpr)); |
3395 | |
3396 | return (tpr != TPR_IGNORE_AND_STOP); |
3397 | } |
3398 | |
3399 | // |
3400 | // EnableUnwind enables an unwind event to be called when the stack is unwound |
3401 | // (via an exception) to or past the given pointer. |
3402 | // |
3403 | |
3404 | void DebuggerController::EnableUnwind(FramePointer fp) |
3405 | { |
3406 | CONTRACTL |
3407 | { |
3408 | SO_NOT_MAINLINE; |
3409 | NOTHROW; |
3410 | GC_NOTRIGGER; |
3411 | } |
3412 | CONTRACTL_END; |
3413 | |
3414 | ASSERT(m_thread != NULL); |
3415 | LOG((LF_CORDB,LL_EVERYTHING,"DC:EU EnableUnwind at 0x%x\n" , fp.GetSPValue())); |
3416 | |
3417 | ControllerLockHolder lockController; |
3418 | m_unwindFP = fp; |
3419 | } |
3420 | |
3421 | FramePointer DebuggerController::GetUnwind() |
3422 | { |
3423 | LIMITED_METHOD_CONTRACT; |
3424 | |
3425 | return m_unwindFP; |
3426 | } |
3427 | |
3428 | // |
3429 | // DisableUnwind disables the unwind event for the controller. |
3430 | // |
3431 | |
3432 | void DebuggerController::DisableUnwind() |
3433 | { |
3434 | CONTRACTL |
3435 | { |
3436 | NOTHROW; |
3437 | GC_NOTRIGGER; |
3438 | MODE_ANY; |
3439 | CAN_TAKE_LOCK; |
3440 | } |
3441 | CONTRACTL_END; |
3442 | |
3443 | ASSERT(m_thread != NULL); |
3444 | |
3445 | LOG((LF_CORDB,LL_INFO1000, "DC::DU\n" )); |
3446 | |
3447 | ControllerLockHolder lockController; |
3448 | |
3449 | m_unwindFP = LEAF_MOST_FRAME; |
3450 | } |
3451 | |
3452 | // |
3453 | // DispatchUnwind is called when an unwind happens. |
3454 | // the event to the appropriate controllers. |
3455 | // - handlerFP is the frame pointer that the handler will be invoked at. |
3456 | // - DJI is EnC-aware method that the handler is in. |
3457 | // - newOffset is the |
3458 | // |
3459 | bool DebuggerController::DispatchUnwind(Thread *thread, |
3460 | MethodDesc *fd, DebuggerJitInfo * pDJI, |
3461 | SIZE_T newOffset, |
3462 | FramePointer handlerFP, |
3463 | CorDebugStepReason unwindReason) |
3464 | { |
3465 | CONTRACTL |
3466 | { |
3467 | SO_NOT_MAINLINE; |
3468 | NOTHROW; |
3469 | GC_NOTRIGGER; // don't send IPC events |
3470 | MODE_COOPERATIVE; // TriggerUnwind always is coop |
3471 | |
3472 | PRECONDITION(!IsDbgHelperSpecialThread()); |
3473 | } |
3474 | CONTRACTL_END; |
3475 | |
3476 | |
3477 | CONTRACT_VIOLATION(ThrowsViolation); // trigger unwind throws |
3478 | |
3479 | _ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER); |
3480 | |
3481 | bool used = false; |
3482 | |
3483 | LOG((LF_CORDB, LL_INFO10000, "DC: Dispatch Unwind\n" )); |
3484 | |
3485 | ControllerLockHolder lockController; |
3486 | { |
3487 | DebuggerController *p; |
3488 | |
3489 | p = g_controllers; |
3490 | |
3491 | while (p != NULL) |
3492 | { |
3493 | DebuggerController *pNext = p->m_next; |
3494 | |
3495 | if (p->m_thread == thread && p->m_unwindFP != LEAF_MOST_FRAME) |
3496 | { |
3497 | LOG((LF_CORDB, LL_INFO10000, "Dispatch Unwind: Found candidate\n" )); |
3498 | |
3499 | |
3500 | // Assumptions here: |
3501 | // Function with handlers are -ALWAYS- EBP-frame based (JIT assumption) |
3502 | // |
3503 | // newFrame is the EBP for the handler |
3504 | // p->m_unwindFP points to the stack slot with the return address of the function. |
3505 | // |
3506 | // For the interesting case: stepover, we want to know if the handler is in the same function |
3507 | // as the stepper, if its above it (caller) o under it (callee) in order to know if we want |
3508 | // to patch the handler or not. |
3509 | // |
3510 | // 3 cases: |
3511 | // |
3512 | // a) Handler is in a function under the function where the step happened. It therefore is |
3513 | // a stepover. We don't want to patch this handler. The handler will have an EBP frame. |
3514 | // So it will be at least be 2 DWORDs away from the m_unwindFP of the controller ( |
3515 | // 1 DWORD from the pushed return address and 1 DWORD for the push EBP). |
3516 | // |
3517 | // b) Handler is in the same function as the stepper. We want to patch the handler. In this |
3518 | // case handlerFP will be the same as p->m_unwindFP-sizeof(void*). Why? p->m_unwindFP |
3519 | // stores a pointer to the return address of the function. As a function with a handler |
3520 | // is always EBP frame based it will have the following code in the prolog: |
3521 | // |
3522 | // push ebp <- ( sub esp, 4 ; mov [esp], ebp ) |
3523 | // mov esp, ebp |
3524 | // |
3525 | // Therefore EBP will be equal to &CallerReturnAddress-4. |
3526 | // |
3527 | // c) Handler is above the function where the stepper is. We want to patch the handler. handlerFP |
3528 | // will be always greater than the pointer to the return address of the function where the |
3529 | // stepper is. |
3530 | // |
3531 | // |
3532 | // |
3533 | |
3534 | if (IsEqualOrCloserToRoot(handlerFP, p->m_unwindFP)) |
3535 | { |
3536 | used = true; |
3537 | |
3538 | // |
3539 | // Assume that this isn't going to block us at all -- |
3540 | // other threads may be waiting to patch or unpatch something, |
3541 | // or to dispatch. |
3542 | // |
3543 | LOG((LF_CORDB, LL_INFO10000, |
3544 | "Unwind trigger at offset 0x%p; handlerFP: 0x%p unwindReason: 0x%x.\n" , |
3545 | newOffset, handlerFP.GetSPValue(), unwindReason)); |
3546 | |
3547 | p->TriggerUnwind(thread, |
3548 | fd, pDJI, |
3549 | newOffset, |
3550 | handlerFP, |
3551 | unwindReason); |
3552 | } |
3553 | else |
3554 | { |
3555 | LOG((LF_CORDB, LL_INFO10000, |
3556 | "Unwind trigger at offset 0x%p; handlerFP: 0x%p unwindReason: 0x%x.\n" , |
3557 | newOffset, handlerFP.GetSPValue(), unwindReason)); |
3558 | } |
3559 | } |
3560 | |
3561 | p = pNext; |
3562 | } |
3563 | } |
3564 | |
3565 | return used; |
3566 | } |
3567 | |
3568 | // |
3569 | // EnableTraceCall enables a call event on the controller |
3570 | // maxFrame is the leaf-most frame that we want notifications for. |
3571 | // For step-in stuff, this will always be LEAF_MOST_FRAME. |
3572 | // for step-out, this will be the current frame because we don't |
3573 | // care if the current frame calls back into managed code when we're |
3574 | // only interested in our parent frames. |
3575 | // |
3576 | |
3577 | void DebuggerController::EnableTraceCall(FramePointer maxFrame) |
3578 | { |
3579 | CONTRACTL |
3580 | { |
3581 | SO_NOT_MAINLINE; |
3582 | NOTHROW; |
3583 | GC_NOTRIGGER; |
3584 | } |
3585 | CONTRACTL_END; |
3586 | |
3587 | ASSERT(m_thread != NULL); |
3588 | |
3589 | LOG((LF_CORDB,LL_INFO1000, "DC::ETC maxFrame=0x%x, thread=0x%x\n" , |
3590 | maxFrame.GetSPValue(), Debugger::GetThreadIdHelper(m_thread))); |
3591 | |
3592 | // JMC stepper should never enabled this. (They should enable ME instead). |
3593 | _ASSERTE((DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType()) || !"JMC stepper shouldn't enable trace-call" ); |
3594 | |
3595 | |
3596 | ControllerLockHolder lockController; |
3597 | { |
3598 | if (!m_traceCall) |
3599 | { |
3600 | m_traceCall = true; |
3601 | g_pEEInterface->EnableTraceCall(m_thread); |
3602 | } |
3603 | |
3604 | if (IsCloserToLeaf(maxFrame, m_traceCallFP)) |
3605 | m_traceCallFP = maxFrame; |
3606 | } |
3607 | } |
3608 | |
3609 | struct PatchTargetVisitorData |
3610 | { |
3611 | DebuggerController* controller; |
3612 | FramePointer maxFrame; |
3613 | }; |
3614 | |
3615 | VOID DebuggerController::PatchTargetVisitor(TADDR pVirtualTraceCallTarget, VOID* pUserData) |
3616 | { |
3617 | CONTRACTL |
3618 | { |
3619 | SO_NOT_MAINLINE; |
3620 | NOTHROW; |
3621 | GC_NOTRIGGER; |
3622 | } |
3623 | CONTRACTL_END; |
3624 | |
3625 | DebuggerController* controller = ((PatchTargetVisitorData*) pUserData)->controller; |
3626 | FramePointer maxFrame = ((PatchTargetVisitorData*) pUserData)->maxFrame; |
3627 | |
3628 | EX_TRY |
3629 | { |
3630 | CONTRACT_VIOLATION(GCViolation); // PatchTrace throws, which implies GC-triggers |
3631 | TraceDestination trace; |
3632 | trace.InitForUnmanagedStub(pVirtualTraceCallTarget); |
3633 | controller->PatchTrace(&trace, maxFrame, true); |
3634 | } |
3635 | EX_CATCH |
3636 | { |
3637 | // not much we can do here |
3638 | } |
3639 | EX_END_CATCH(SwallowAllExceptions) |
3640 | } |
3641 | |
3642 | // |
3643 | // DisableTraceCall disables call events on the controller |
3644 | // |
3645 | |
3646 | void DebuggerController::DisableTraceCall() |
3647 | { |
3648 | CONTRACTL |
3649 | { |
3650 | SO_NOT_MAINLINE; |
3651 | NOTHROW; |
3652 | GC_NOTRIGGER; |
3653 | } |
3654 | CONTRACTL_END; |
3655 | |
3656 | ASSERT(m_thread != NULL); |
3657 | |
3658 | ControllerLockHolder lockController; |
3659 | { |
3660 | if (m_traceCall) |
3661 | { |
3662 | LOG((LF_CORDB,LL_INFO1000, "DC::DTC thread=0x%x\n" , |
3663 | Debugger::GetThreadIdHelper(m_thread))); |
3664 | |
3665 | g_pEEInterface->DisableTraceCall(m_thread); |
3666 | |
3667 | m_traceCall = false; |
3668 | m_traceCallFP = ROOT_MOST_FRAME; |
3669 | } |
3670 | } |
3671 | } |
3672 | |
3673 | // Get a FramePointer for the leafmost frame on this thread's stacktrace. |
3674 | // It's tempting to create this off the head of the Frame chain, but that may |
3675 | // include internal EE Frames (like GCRoot frames) which a FrameInfo-stackwalk may skip over. |
3676 | // Thus using the Frame chain would err on the side of returning a FramePointer that |
3677 | // closer to the leaf. |
3678 | FramePointer GetCurrentFramePointerFromStackTraceForTraceCall(Thread * thread) |
3679 | { |
3680 | _ASSERTE(thread != NULL); |
3681 | |
3682 | // Ensure this is really the same as CSI. |
3683 | ControllerStackInfo info; |
3684 | |
3685 | // It's possible this stackwalk may be done at an unsafe time. |
3686 | // this method may trigger a GC, for example, in |
3687 | // FramedMethodFrame::AskStubForUnmanagedCallSite |
3688 | // which will trash the incoming argument array |
3689 | // which is not gc-protected. |
3690 | |
3691 | // We could probably imagine a more specialized stackwalk that |
3692 | // avoids these calls and is thus GC_NOTRIGGER. |
3693 | CONTRACT_VIOLATION(GCViolation); |
3694 | |
3695 | // This is being run live, so there's no filter available. |
3696 | CONTEXT *context; |
3697 | context = g_pEEInterface->GetThreadFilterContext(thread); |
3698 | _ASSERTE(context == NULL); |
3699 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
3700 | |
3701 | // This is actually safe because we're coming from a TraceCall, which |
3702 | // means we're not in the middle of a stub. We don't have some partially |
3703 | // constructed frame, so we can safely traverse the stack. |
3704 | // However, we may still have a problem w/ the GC-violation. |
3705 | StackTraceTicket ticket(StackTraceTicket::SPECIAL_CASE_TICKET); |
3706 | info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL); |
3707 | |
3708 | FramePointer fp = info.m_activeFrame.fp; |
3709 | |
3710 | return fp; |
3711 | } |
3712 | // |
3713 | // DispatchTraceCall is called when a call is traced in the EE |
3714 | // It dispatches the event to the appropriate controllers. |
3715 | // |
3716 | |
3717 | bool DebuggerController::DispatchTraceCall(Thread *thread, |
3718 | const BYTE *ip) |
3719 | { |
3720 | CONTRACTL |
3721 | { |
3722 | GC_NOTRIGGER; |
3723 | THROWS; |
3724 | } |
3725 | CONTRACTL_END; |
3726 | |
3727 | bool used = false; |
3728 | |
3729 | LOG((LF_CORDB, LL_INFO10000, |
3730 | "DC::DTC: TraceCall at 0x%x\n" , ip)); |
3731 | |
3732 | ControllerLockHolder lockController; |
3733 | { |
3734 | DebuggerController *p; |
3735 | |
3736 | p = g_controllers; |
3737 | while (p != NULL) |
3738 | { |
3739 | DebuggerController *pNext = p->m_next; |
3740 | |
3741 | if (p->m_thread == thread && p->m_traceCall) |
3742 | { |
3743 | bool trigger; |
3744 | |
3745 | if (p->m_traceCallFP == LEAF_MOST_FRAME) |
3746 | trigger = true; |
3747 | else |
3748 | { |
3749 | // We know we don't have a filter context, so get a frame pointer from our frame chain. |
3750 | FramePointer fpToCheck = GetCurrentFramePointerFromStackTraceForTraceCall(thread); |
3751 | |
3752 | |
3753 | // <REVISIT_TODO> |
3754 | // |
3755 | // Currently, we never ever put a patch in an IL stub, and as such, if the IL stub |
3756 | // throws an exception after returning from unmanaged code, we would not trigger |
3757 | // a trace call when we call the constructor of the exception. The following is |
3758 | // kind of a workaround to make that working. If we ever make the change to stop in |
3759 | // IL stubs (for example, if we start to share security IL stub), then this can be |
3760 | // removed. |
3761 | // |
3762 | // </REVISIT_TODO> |
3763 | |
3764 | |
3765 | |
3766 | // It's possible this stackwalk may be done at an unsafe time. |
3767 | // this method may trigger a GC, for example, in |
3768 | // FramedMethodFrame::AskStubForUnmanagedCallSite |
3769 | // which will trash the incoming argument array |
3770 | // which is not gc-protected. |
3771 | ControllerStackInfo info; |
3772 | { |
3773 | CONTRACT_VIOLATION(GCViolation); |
3774 | #ifdef _DEBUG |
3775 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
3776 | #endif // _DEBUG |
3777 | _ASSERTE(context == NULL); |
3778 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
3779 | |
3780 | // See explanation in GetCurrentFramePointerFromStackTraceForTraceCall. |
3781 | StackTraceTicket ticket(StackTraceTicket::SPECIAL_CASE_TICKET); |
3782 | info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL); |
3783 | } |
3784 | |
3785 | if (info.m_activeFrame.chainReason == CHAIN_ENTER_UNMANAGED) |
3786 | { |
3787 | _ASSERTE(info.HasReturnFrame()); |
3788 | |
3789 | // This check makes sure that we don't do this logic for inlined frames. |
3790 | if (info.m_returnFrame.md->IsILStub()) |
3791 | { |
3792 | // Make sure that the frame pointer of the active frame is actually |
3793 | // the address of an exit frame. |
3794 | _ASSERTE( (static_cast<Frame*>(info.m_activeFrame.fp.GetSPValue()))->GetFrameType() |
3795 | == Frame::TYPE_EXIT ); |
3796 | _ASSERTE(!info.m_returnFrame.HasChainMarker()); |
3797 | fpToCheck = info.m_returnFrame.fp; |
3798 | } |
3799 | } |
3800 | |
3801 | // @todo - This comparison seems somewhat nonsensical. We don't have a filter context |
3802 | // in place, so what frame pointer is fpToCheck actually for? |
3803 | trigger = IsEqualOrCloserToRoot(fpToCheck, p->m_traceCallFP); |
3804 | } |
3805 | |
3806 | if (trigger) |
3807 | { |
3808 | used = true; |
3809 | |
3810 | // This can only update controller's state, can't actually send IPC events. |
3811 | p->TriggerTraceCall(thread, ip); |
3812 | } |
3813 | } |
3814 | |
3815 | p = pNext; |
3816 | } |
3817 | } |
3818 | |
3819 | return used; |
3820 | } |
3821 | |
3822 | bool DebuggerController::IsMethodEnterEnabled() |
3823 | { |
3824 | LIMITED_METHOD_CONTRACT; |
3825 | return m_fEnableMethodEnter; |
3826 | } |
3827 | |
3828 | |
3829 | // Notify dispatching logic that this controller wants to get TriggerMethodEnter |
3830 | // We keep a count of total controllers waiting for MethodEnter (in g_cTotalMethodEnter). |
3831 | // That way we know if any controllers want MethodEnter callbacks. If none do, |
3832 | // then we can set the JMC probe flag to false for all modules. |
3833 | void DebuggerController::EnableMethodEnter() |
3834 | { |
3835 | CONTRACTL |
3836 | { |
3837 | SO_NOT_MAINLINE; |
3838 | NOTHROW; |
3839 | GC_NOTRIGGER; |
3840 | } |
3841 | CONTRACTL_END; |
3842 | |
3843 | ControllerLockHolder chController; |
3844 | Debugger::DebuggerDataLockHolder chInfo(g_pDebugger); |
3845 | |
3846 | // Both JMC + Traditional steppers may use MethodEnter. |
3847 | // For JMC, it's a core part of functionality. For Traditional steppers, we use it as a backstop |
3848 | // in case the stub-managers fail. |
3849 | _ASSERTE(g_cTotalMethodEnter >= 0); |
3850 | if (!m_fEnableMethodEnter) |
3851 | { |
3852 | LOG((LF_CORDB, LL_INFO1000000, "DC::EnableME, this=%p, previously disabled\n" , this)); |
3853 | m_fEnableMethodEnter = true; |
3854 | |
3855 | g_cTotalMethodEnter++; |
3856 | } |
3857 | else |
3858 | { |
3859 | LOG((LF_CORDB, LL_INFO1000000, "DC::EnableME, this=%p, already set\n" , this)); |
3860 | } |
3861 | g_pDebugger->UpdateAllModuleJMCFlag(g_cTotalMethodEnter != 0); // Needs JitInfo lock |
3862 | } |
3863 | |
3864 | // Notify dispatching logic that this controller doesn't want to get |
3865 | // TriggerMethodEnter |
3866 | void DebuggerController::DisableMethodEnter() |
3867 | { |
3868 | CONTRACTL |
3869 | { |
3870 | SO_NOT_MAINLINE; |
3871 | NOTHROW; |
3872 | GC_NOTRIGGER; |
3873 | } |
3874 | CONTRACTL_END; |
3875 | |
3876 | ControllerLockHolder chController; |
3877 | Debugger::DebuggerDataLockHolder chInfo(g_pDebugger); |
3878 | |
3879 | if (m_fEnableMethodEnter) |
3880 | { |
3881 | LOG((LF_CORDB, LL_INFO1000000, "DC::DisableME, this=%p, previously set\n" , this)); |
3882 | m_fEnableMethodEnter = false; |
3883 | |
3884 | g_cTotalMethodEnter--; |
3885 | _ASSERTE(g_cTotalMethodEnter >= 0); |
3886 | } |
3887 | else |
3888 | { |
3889 | LOG((LF_CORDB, LL_INFO1000000, "DC::DisableME, this=%p, already disabled\n" , this)); |
3890 | } |
3891 | |
3892 | g_pDebugger->UpdateAllModuleJMCFlag(g_cTotalMethodEnter != 0); // Needs JitInfo lock |
3893 | } |
3894 | |
3895 | // Loop through controllers and dispatch TriggerMethodEnter |
3896 | void DebuggerController::DispatchMethodEnter(void * pIP, FramePointer fp) |
3897 | { |
3898 | _ASSERTE(pIP != NULL); |
3899 | |
3900 | Thread * pThread = g_pEEInterface->GetThread(); |
3901 | _ASSERTE(pThread != NULL); |
3902 | |
3903 | // Lookup the DJI for this method & ip. |
3904 | // Since we create DJIs when we jit the code, and this code has been jitted |
3905 | // (that's where the probe's coming from!), we will have a DJI. |
3906 | DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP); |
3907 | |
3908 | // This includes the case where we have a LightWeight codegen method. |
3909 | if (dji == NULL) |
3910 | { |
3911 | return; |
3912 | } |
3913 | |
3914 | LOG((LF_CORDB, LL_INFO100000, "DC::DispatchMethodEnter for '%s::%s'\n" , |
3915 | dji->m_fd->m_pszDebugClassName, |
3916 | dji->m_fd->m_pszDebugMethodName)); |
3917 | |
3918 | ControllerLockHolder lockController; |
3919 | |
3920 | // For debug check, keep a count to make sure that g_cTotalMethodEnter |
3921 | // is actually the number of controllers w/ MethodEnter enabled. |
3922 | int count = 0; |
3923 | |
3924 | DebuggerController *p = g_controllers; |
3925 | while (p != NULL) |
3926 | { |
3927 | if (p->m_fEnableMethodEnter) |
3928 | { |
3929 | if ((p->GetThread() == NULL) || (p->GetThread() == pThread)) |
3930 | { |
3931 | ++count; |
3932 | p->TriggerMethodEnter(pThread, dji, (const BYTE *) pIP, fp); |
3933 | } |
3934 | } |
3935 | p = p->m_next; |
3936 | } |
3937 | |
3938 | _ASSERTE(g_cTotalMethodEnter == count); |
3939 | |
3940 | } |
3941 | |
3942 | // |
3943 | // AddProtection adds page protection to (at least) the given range of |
3944 | // addresses |
3945 | // |
3946 | |
3947 | void DebuggerController::AddProtection(const BYTE *start, const BYTE *end, |
3948 | bool readable) |
3949 | { |
3950 | // !!! |
3951 | _ASSERTE(!"Not implemented yet" ); |
3952 | } |
3953 | |
3954 | // |
3955 | // RemoveProtection removes page protection from the given |
3956 | // addresses. The parameters should match an earlier call to |
3957 | // AddProtection |
3958 | // |
3959 | |
3960 | void DebuggerController::RemoveProtection(const BYTE *start, const BYTE *end, |
3961 | bool readable) |
3962 | { |
3963 | // !!! |
3964 | _ASSERTE(!"Not implemented yet" ); |
3965 | } |
3966 | |
3967 | |
3968 | // Default implementations for FuncEvalEnter & Exit notifications. |
3969 | void DebuggerController::TriggerFuncEvalEnter(Thread * thread) |
3970 | { |
3971 | LOG((LF_CORDB, LL_INFO100000, "DC::TFEEnter, thead=%p, this=%p\n" , thread, this)); |
3972 | } |
3973 | |
3974 | void DebuggerController::TriggerFuncEvalExit(Thread * thread) |
3975 | { |
3976 | LOG((LF_CORDB, LL_INFO100000, "DC::TFEExit, thead=%p, this=%p\n" , thread, this)); |
3977 | } |
3978 | |
3979 | // bool DebuggerController::TriggerPatch() What: Tells the |
3980 | // static DC whether this patch should be activated now. |
3981 | // Returns true if it should be, false otherwise. |
3982 | // How: Base class implementation returns false. Others may |
3983 | // return true. |
3984 | TP_RESULT DebuggerController::TriggerPatch(DebuggerControllerPatch *patch, |
3985 | Thread *thread, |
3986 | TRIGGER_WHY tyWhy) |
3987 | { |
3988 | LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerPatch\n" )); |
3989 | return TPR_IGNORE; |
3990 | } |
3991 | |
3992 | bool DebuggerController::TriggerSingleStep(Thread *thread, |
3993 | const BYTE *ip) |
3994 | { |
3995 | LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerSingleStep\n" )); |
3996 | return false; |
3997 | } |
3998 | |
3999 | void DebuggerController::TriggerUnwind(Thread *thread, |
4000 | MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset, |
4001 | FramePointer fp, |
4002 | CorDebugStepReason unwindReason) |
4003 | { |
4004 | LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerUnwind\n" )); |
4005 | } |
4006 | |
4007 | void DebuggerController::TriggerTraceCall(Thread *thread, |
4008 | const BYTE *ip) |
4009 | { |
4010 | LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerTraceCall\n" )); |
4011 | } |
4012 | |
4013 | TP_RESULT DebuggerController::TriggerExceptionHook(Thread *thread, CONTEXT * pContext, |
4014 | EXCEPTION_RECORD *exception) |
4015 | { |
4016 | LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default TriggerExceptionHook\n" )); |
4017 | return TPR_IGNORE; |
4018 | } |
4019 | |
4020 | void DebuggerController::TriggerMethodEnter(Thread * thread, |
4021 | DebuggerJitInfo * dji, |
4022 | const BYTE * ip, |
4023 | FramePointer fp) |
4024 | { |
4025 | LOG((LF_CORDB, LL_INFO10000, "DC::TME in default impl. dji=%p, addr=%p, fp=%p\n" , |
4026 | dji, ip, fp.GetSPValue())); |
4027 | } |
4028 | |
4029 | bool DebuggerController::SendEvent(Thread *thread, bool fIpChanged) |
4030 | { |
4031 | CONTRACTL |
4032 | { |
4033 | SO_NOT_MAINLINE; |
4034 | NOTHROW; |
4035 | SENDEVENT_CONTRACT_ITEMS; |
4036 | } |
4037 | CONTRACTL_END; |
4038 | |
4039 | LOG((LF_CORDB, LL_INFO10000, "DC::TP: in default SendEvent\n" )); |
4040 | |
4041 | // If any derived class trigger SendEvent, it should also implement SendEvent. |
4042 | _ASSERTE(false || !"Base DebuggerController sending an event?" ); |
4043 | return false; |
4044 | } |
4045 | |
4046 | |
4047 | // Dispacth Func-Eval Enter & Exit notifications. |
4048 | void DebuggerController::DispatchFuncEvalEnter(Thread * thread) |
4049 | { |
4050 | LOG((LF_CORDB, LL_INFO100000, "DC::DispatchFuncEvalEnter for thread 0x%p\n" , thread)); |
4051 | |
4052 | ControllerLockHolder lockController; |
4053 | |
4054 | DebuggerController *p = g_controllers; |
4055 | while (p != NULL) |
4056 | { |
4057 | if ((p->GetThread() == NULL) || (p->GetThread() == thread)) |
4058 | { |
4059 | p->TriggerFuncEvalEnter(thread); |
4060 | } |
4061 | |
4062 | p = p->m_next; |
4063 | } |
4064 | |
4065 | |
4066 | } |
4067 | |
4068 | void DebuggerController::DispatchFuncEvalExit(Thread * thread) |
4069 | { |
4070 | LOG((LF_CORDB, LL_INFO100000, "DC::DispatchFuncEvalExit for thread 0x%p\n" , thread)); |
4071 | |
4072 | ControllerLockHolder lockController; |
4073 | |
4074 | DebuggerController *p = g_controllers; |
4075 | while (p != NULL) |
4076 | { |
4077 | if ((p->GetThread() == NULL) || (p->GetThread() == thread)) |
4078 | { |
4079 | p->TriggerFuncEvalExit(thread); |
4080 | } |
4081 | |
4082 | p = p->m_next; |
4083 | } |
4084 | |
4085 | |
4086 | } |
4087 | |
4088 | |
4089 | #ifdef _DEBUG |
4090 | // See comment in DispatchNativeException |
4091 | void ThisFunctionMayHaveTriggerAGC() |
4092 | { |
4093 | CONTRACTL |
4094 | { |
4095 | SO_NOT_MAINLINE; |
4096 | GC_TRIGGERS; |
4097 | NOTHROW; |
4098 | } |
4099 | CONTRACTL_END; |
4100 | } |
4101 | #endif |
4102 | |
4103 | // bool DebuggerController::DispatchNativeException() Figures out |
4104 | // if any debugger controllers will handle the exception. |
4105 | // DispatchNativeException should be called by the EE when a native exception |
4106 | // occurs. If it returns true, the exception was generated by a Controller and |
4107 | // should be ignored. |
4108 | // How: Calls DispatchExceptionHook to see if anything is |
4109 | // interested in ExceptionHook, then does a switch on dwCode: |
4110 | // EXCEPTION_BREAKPOINT means invoke DispatchPatchOrSingleStep(ST_PATCH). |
4111 | // EXCEPTION_SINGLE_STEP means DispatchPatchOrSingleStep(ST_SINGLE_STEP). |
4112 | // EXCEPTION_ACCESS_VIOLATION means invoke DispatchAccessViolation. |
4113 | // Returns true if the exception was actually meant for the debugger, |
4114 | // returns false otherwise. |
4115 | bool DebuggerController::DispatchNativeException(EXCEPTION_RECORD *pException, |
4116 | CONTEXT *pContext, |
4117 | DWORD dwCode, |
4118 | Thread *pCurThread) |
4119 | { |
4120 | CONTRACTL |
4121 | { |
4122 | SO_INTOLERANT; |
4123 | NOTHROW; |
4124 | |
4125 | // If this exception is for the debugger, then we may trigger a GC. |
4126 | // But we'll be called on _any_ exception, including ones in a GC-no-triggers region. |
4127 | // Our current contract system doesn't let us specify such conditions on GC_TRIGGERS. |
4128 | // So we disable it now, and if we find out the exception is meant for the debugger, |
4129 | // we'll call ThisFunctionMayHaveTriggerAGC() to ping that we're really a GC_TRIGGERS. |
4130 | DISABLED(GC_TRIGGERS); // Only GC triggers if we send an event, |
4131 | PRECONDITION(!IsDbgHelperSpecialThread()); |
4132 | |
4133 | // If we're called from preemptive mode, than our caller has protected the stack. |
4134 | // If we're in cooperative mode, then we need to protect the stack before toggling GC modes |
4135 | // (by setting the filter-context) |
4136 | MODE_ANY; |
4137 | |
4138 | PRECONDITION(CheckPointer(pException)); |
4139 | PRECONDITION(CheckPointer(pContext)); |
4140 | PRECONDITION(CheckPointer(pCurThread)); |
4141 | } |
4142 | CONTRACTL_END; |
4143 | |
4144 | LOG((LF_CORDB, LL_EVERYTHING, "DispatchNativeException was called\n" )); |
4145 | LOG((LF_CORDB, LL_INFO10000, "Native exception at 0x%p, code=0x%8x, context=0x%p, er=0x%p\n" , |
4146 | pException->ExceptionAddress, dwCode, pContext, pException)); |
4147 | |
4148 | |
4149 | bool fDebuggers; |
4150 | BOOL fDispatch; |
4151 | DPOSS_ACTION result = DPOSS_DONT_CARE; |
4152 | |
4153 | |
4154 | // We have a potentially ugly locking problem here. This notification is called on any exception, |
4155 | // but we have no idea what our locking context is at the time. Thus we may hold locks smaller |
4156 | // than the controller lock. |
4157 | // The debugger logic really only cares about exceptions directly in managed code (eg, hardware exceptions) |
4158 | // or in patch-skippers (since that's a copy of managed code running in a look-aside buffer). |
4159 | // That should exclude all C++ exceptions, which are the common case if Runtime code throws an internal ex. |
4160 | // So we ignore those to avoid the lock violation. |
4161 | if (pException->ExceptionCode == EXCEPTION_MSVC) |
4162 | { |
4163 | LOG((LF_CORDB, LL_INFO1000, "Debugger skipping for C++ exception.\n" )); |
4164 | return FALSE; |
4165 | } |
4166 | |
4167 | // The debugger really only cares about exceptions in managed code. Any exception that occurs |
4168 | // while the thread is redirected (such as EXCEPTION_HIJACK) is not of interest to the debugger. |
4169 | // Allowing this would be problematic because when an exception occurs while the thread is |
4170 | // redirected, we don't know which context (saved redirection context or filter context) |
4171 | // we should be operating on (see code:GetManagedStoppedCtx). |
4172 | if( ISREDIRECTEDTHREAD(pCurThread) ) |
4173 | { |
4174 | LOG((LF_CORDB, LL_INFO1000, "Debugger ignoring exception 0x%x on redirected thread.\n" , dwCode)); |
4175 | |
4176 | // We shouldn't be seeing debugging exceptions on a redirected thread. While a thread is |
4177 | // redirected we only call a few internal things (see code:Thread.RedirectedHandledJITCase), |
4178 | // and may call into the host. We can't call normal managed code or anything we'd want to debug. |
4179 | _ASSERTE(dwCode != EXCEPTION_BREAKPOINT); |
4180 | _ASSERTE(dwCode != EXCEPTION_SINGLE_STEP); |
4181 | |
4182 | return FALSE; |
4183 | } |
4184 | |
4185 | // It's possible we're here without a debugger (since we have to call the |
4186 | // patch skippers). The Debugger may detach anytime, |
4187 | // so remember the attach state now. |
4188 | #ifdef _DEBUG |
4189 | bool fWasAttached = false; |
4190 | #ifdef DEBUGGING_SUPPORTED |
4191 | fWasAttached = (CORDebuggerAttached() != 0); |
4192 | #endif //DEBUGGING_SUPPORTED |
4193 | #endif //_DEBUG |
4194 | |
4195 | { |
4196 | // If we're in cooperative mode, it's unsafe to do a GC until we've put a filter context in place. |
4197 | GCX_NOTRIGGER(); |
4198 | |
4199 | // If we know the debugger doesn't care about this exception, bail now. |
4200 | // Usually this is just if there's a debugger attached. |
4201 | // However, if a debugger detached but left outstanding controllers (like patch-skippers), |
4202 | // we still may care. |
4203 | // The only way a controller would get created outside of the helper thread is from |
4204 | // a patch skipper, so we always handle breakpoints. |
4205 | if (!CORDebuggerAttached() && (g_controllers == NULL) && (dwCode != EXCEPTION_BREAKPOINT)) |
4206 | { |
4207 | return false; |
4208 | } |
4209 | |
4210 | FireEtwDebugExceptionProcessingStart(); |
4211 | |
4212 | // We should never be here if the debugger was never involved. |
4213 | CONTEXT * pOldContext; |
4214 | pOldContext = pCurThread->GetFilterContext(); |
4215 | |
4216 | // In most cases it is an error to nest, however in the patch-skipping logic we must |
4217 | // copy an unknown amount of code into another buffer and it occasionally triggers |
4218 | // an AV. This heuristic should filter that case out. See DDB 198093. |
4219 | // Ensure we perform this exception nesting filtering even before the call to |
4220 | // DebuggerController::DispatchExceptionHook, otherwise the nesting will continue when |
4221 | // a contract check is triggered in DispatchExceptionHook and another BP exception is |
4222 | // raised. See Dev11 66058. |
4223 | if ((pOldContext != NULL) && pCurThread->AVInRuntimeImplOkay() && |
4224 | pException->ExceptionCode == STATUS_ACCESS_VIOLATION) |
4225 | { |
4226 | STRESS_LOG1(LF_CORDB, LL_INFO100, "DC::DNE Nested Access Violation at 0x%p is being ignored\n" , |
4227 | pException->ExceptionAddress); |
4228 | return false; |
4229 | } |
4230 | // Otherwise it is an error to nest at all |
4231 | _ASSERTE(pOldContext == NULL); |
4232 | |
4233 | fDispatch = DebuggerController::DispatchExceptionHook(pCurThread, |
4234 | pContext, |
4235 | pException); |
4236 | |
4237 | { |
4238 | // Must be in cooperative mode to set the filter context. We know there are times we'll be in preemptive mode, |
4239 | // (such as M2U handoff, or potentially patches in the middle of a stub, or various random exceptions) |
4240 | |
4241 | // @todo - We need to worry about GC-protecting our stack. If we're in preemptive mode, the caller did it for us. |
4242 | // If we're in cooperative, then we need to set the FilterContext *before* we toggle GC mode (since |
4243 | // the FC protects the stack). |
4244 | // If we're in preemptive, then we need to set the FilterContext *after* we toggle ourselves to Cooperative. |
4245 | // Also note it may not be possible to toggle GC mode at these times (such as in the middle of the stub). |
4246 | // |
4247 | // Part of the problem is that the Filter Context is serving 2 purposes here: |
4248 | // - GC protect the stack. (essential if we're in coop mode). |
4249 | // - provide info to controllers (such as current IP, and a place to set the Single-Step flag). |
4250 | // |
4251 | // This contract violation is mitigated in that we must have had the debugger involved to get to this point. |
4252 | CONTRACT_VIOLATION(ModeViolation); |
4253 | g_pEEInterface->SetThreadFilterContext(pCurThread, pContext); |
4254 | } |
4255 | // Now that we've set the filter context, we can let the GCX_NOTRIGGER expire. |
4256 | // It's still possible that we may be called from a No-trigger region. |
4257 | } |
4258 | |
4259 | |
4260 | if (fDispatch) |
4261 | { |
4262 | // Disable SingleStep for all controllers on this thread. This requires the filter context set. |
4263 | // This is what would disable the ss-flag when single-stepping over an AV. |
4264 | if (g_patchTableValid && (dwCode != EXCEPTION_SINGLE_STEP)) |
4265 | { |
4266 | LOG((LF_CORDB, LL_INFO1000, "DC::DNE non-single-step exception; check if any controller has ss turned on\n" )); |
4267 | |
4268 | ControllerLockHolder lockController; |
4269 | for (DebuggerController* p = g_controllers; p != NULL; p = p->m_next) |
4270 | { |
4271 | if (p->m_singleStep && (p->m_thread == pCurThread)) |
4272 | { |
4273 | LOG((LF_CORDB, LL_INFO1000, "DC::DNE turn off ss for controller 0x%p\n" , p)); |
4274 | p->DisableSingleStep(); |
4275 | } |
4276 | } |
4277 | // implicit controller lock release |
4278 | } |
4279 | |
4280 | CORDB_ADDRESS_TYPE * ip = dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(pContext)); |
4281 | |
4282 | switch (dwCode) |
4283 | { |
4284 | case EXCEPTION_BREAKPOINT: |
4285 | // EIP should be properly set up at this point. |
4286 | result = DebuggerController::DispatchPatchOrSingleStep(pCurThread, |
4287 | pContext, |
4288 | ip, |
4289 | ST_PATCH); |
4290 | LOG((LF_CORDB, LL_EVERYTHING, "DC::DNE DispatchPatch call returned\n" )); |
4291 | |
4292 | // If we detached, we should remove all our breakpoints. So if we try |
4293 | // to handle this breakpoint, make sure that we're attached. |
4294 | if (IsInUsedAction(result) == true) |
4295 | { |
4296 | _ASSERTE(fWasAttached); |
4297 | } |
4298 | break; |
4299 | |
4300 | case EXCEPTION_SINGLE_STEP: |
4301 | LOG((LF_CORDB, LL_EVERYTHING, "DC::DNE SINGLE_STEP Exception\n" )); |
4302 | |
4303 | result = DebuggerController::DispatchPatchOrSingleStep(pCurThread, |
4304 | pContext, |
4305 | ip, |
4306 | (SCAN_TRIGGER)(ST_PATCH|ST_SINGLE_STEP)); |
4307 | // We pass patch | single step since single steps actually |
4308 | // do both (eg, you SS onto a breakpoint). |
4309 | break; |
4310 | |
4311 | default: |
4312 | break; |
4313 | } // end switch |
4314 | |
4315 | } |
4316 | #ifdef _DEBUG |
4317 | else |
4318 | { |
4319 | LOG((LF_CORDB, LL_INFO1000, "DC:: DNE step-around fDispatch:0x%x!\n" , fDispatch)); |
4320 | } |
4321 | #endif //_DEBUG |
4322 | |
4323 | fDebuggers = (fDispatch?(IsInUsedAction(result)?true:false):true); |
4324 | |
4325 | LOG((LF_CORDB, LL_INFO10000, "DC::DNE, returning 0x%x.\n" , fDebuggers)); |
4326 | |
4327 | #ifdef _DEBUG |
4328 | if (fDebuggers && (result == DPOSS_USED_WITH_EVENT)) |
4329 | { |
4330 | // If the exception belongs to the debugger, then we may have sent an event, |
4331 | // and thus we may have triggered a GC. |
4332 | ThisFunctionMayHaveTriggerAGC(); |
4333 | } |
4334 | #endif |
4335 | |
4336 | |
4337 | |
4338 | // Must restore the filter context. After the filter context is gone, we're |
4339 | // unprotected again and unsafe for a GC. |
4340 | { |
4341 | CONTRACT_VIOLATION(ModeViolation); |
4342 | g_pEEInterface->SetThreadFilterContext(pCurThread, NULL); |
4343 | } |
4344 | |
4345 | #ifdef _TARGET_ARM_ |
4346 | if (pCurThread->IsSingleStepEnabled()) |
4347 | pCurThread->ApplySingleStep(pContext); |
4348 | #endif |
4349 | |
4350 | FireEtwDebugExceptionProcessingEnd(); |
4351 | |
4352 | return fDebuggers; |
4353 | } |
4354 | |
4355 | // * ------------------------------------------------------------------------- |
4356 | // * DebuggerPatchSkip routines |
4357 | // * ------------------------------------------------------------------------- |
4358 | |
4359 | DebuggerPatchSkip::DebuggerPatchSkip(Thread *thread, |
4360 | DebuggerControllerPatch *patch, |
4361 | AppDomain *pAppDomain) |
4362 | : DebuggerController(thread, pAppDomain), |
4363 | m_address(patch->address) |
4364 | { |
4365 | LOG((LF_CORDB, LL_INFO10000, |
4366 | "DPS::DPS: Patch skip 0x%p\n" , patch->address)); |
4367 | |
4368 | // On ARM the single-step emulation already utilizes a per-thread execution buffer similar to the scheme |
4369 | // below. As a result we can skip most of the instruction parsing logic that's instead internalized into |
4370 | // the single-step emulation itself. |
4371 | #ifndef _TARGET_ARM_ |
4372 | |
4373 | // NOTE: in order to correctly single-step RIP-relative writes on multiple threads we need to set up |
4374 | // a shared buffer with the instruction and a buffer for the RIP-relative value so that all threads |
4375 | // are working on the same copy. as the single-steps complete the modified data in the buffer is |
4376 | // copied back to the real address to ensure proper execution of the program. |
4377 | |
4378 | // |
4379 | // Create the shared instruction block. this will also create the shared RIP-relative buffer |
4380 | // |
4381 | |
4382 | m_pSharedPatchBypassBuffer = patch->GetOrCreateSharedPatchBypassBuffer(); |
4383 | BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass; |
4384 | |
4385 | // Copy the instruction block over to the patch skip |
4386 | // WARNING: there used to be an issue here because CopyInstructionBlock copied the breakpoint from the |
4387 | // jitted code stream into the patch buffer. Further below CORDbgSetInstruction would correct the |
4388 | // first instruction. This buffer is shared by all threads so if another thread executed the buffer |
4389 | // between this thread's execution of CopyInstructionBlock and CORDbgSetInstruction the wrong |
4390 | // code would be executed. The bug has been fixed by changing CopyInstructionBlock to only copy |
4391 | // the code bytes after the breakpoint. |
4392 | // You might be tempted to stop copying the code at all, however that wouldn't work well with rejit. |
4393 | // If we skip a breakpoint that is sitting at the beginning of a method, then the profiler rejits that |
4394 | // method causing a jump-stamp to be placed, then we skip the breakpoint again, we need to make sure |
4395 | // the 2nd skip executes the new jump-stamp code and not the original method prologue code. Copying |
4396 | // the code every time ensures that we have the most up-to-date version of the code in the buffer. |
4397 | _ASSERTE( patch->IsBound() ); |
4398 | CopyInstructionBlock(patchBypass, (const BYTE *)patch->address); |
4399 | |
4400 | // Technically, we could create a patch skipper for an inactive patch, but we rely on the opcode being |
4401 | // set here. |
4402 | _ASSERTE( patch->IsActivated() ); |
4403 | CORDbgSetInstruction((CORDB_ADDRESS_TYPE *)patchBypass, patch->opcode); |
4404 | |
4405 | LOG((LF_CORDB, LL_EVERYTHING, "SetInstruction was called\n" )); |
4406 | // |
4407 | // Look at instruction to get some attributes |
4408 | // |
4409 | |
4410 | NativeWalker::DecodeInstructionForPatchSkip(patchBypass, &(m_instrAttrib)); |
4411 | |
4412 | #if defined(_TARGET_AMD64_) |
4413 | |
4414 | |
4415 | // The code below handles RIP-relative addressing on AMD64. the original implementation made the assumption that |
4416 | // we are only using RIP-relative addressing to access read-only data (see VSW 246145 for more information). this |
4417 | // has since been expanded to handle RIP-relative writes as well. |
4418 | if (m_instrAttrib.m_dwOffsetToDisp != 0) |
4419 | { |
4420 | _ASSERTE(m_instrAttrib.m_cbInstr != 0); |
4421 | |
4422 | // |
4423 | // Populate the RIP-relative buffer with the current value if needed |
4424 | // |
4425 | |
4426 | BYTE* bufferBypass = m_pSharedPatchBypassBuffer->BypassBuffer; |
4427 | |
4428 | // Overwrite the *signed* displacement. |
4429 | int dwOldDisp = *(int*)(&patchBypass[m_instrAttrib.m_dwOffsetToDisp]); |
4430 | int dwNewDisp = offsetof(SharedPatchBypassBuffer, BypassBuffer) - |
4431 | (offsetof(SharedPatchBypassBuffer, PatchBypass) + m_instrAttrib.m_cbInstr); |
4432 | *(int*)(&patchBypass[m_instrAttrib.m_dwOffsetToDisp]) = dwNewDisp; |
4433 | |
4434 | // This could be an LEA, which we'll just have to change into a MOV |
4435 | // and copy the original address |
4436 | if (((patchBypass[0] == 0x4C) || (patchBypass[0] == 0x48)) && (patchBypass[1] == 0x8d)) |
4437 | { |
4438 | patchBypass[1] = 0x8b; // MOV reg, mem |
4439 | _ASSERTE((int)sizeof(void*) <= SharedPatchBypassBuffer::cbBufferBypass); |
4440 | *(void**)bufferBypass = (void*)(patch->address + m_instrAttrib.m_cbInstr + dwOldDisp); |
4441 | } |
4442 | else |
4443 | { |
4444 | // Copy the data into our buffer. |
4445 | memcpy(bufferBypass, patch->address + m_instrAttrib.m_cbInstr + dwOldDisp, SharedPatchBypassBuffer::cbBufferBypass); |
4446 | |
4447 | if (m_instrAttrib.m_fIsWrite) |
4448 | { |
4449 | // save the actual destination address and size so when we TriggerSingleStep() we can update the value |
4450 | m_pSharedPatchBypassBuffer->RipTargetFixup = (UINT_PTR)(patch->address + m_instrAttrib.m_cbInstr + dwOldDisp); |
4451 | m_pSharedPatchBypassBuffer->RipTargetFixupSize = m_instrAttrib.m_cOperandSize; |
4452 | } |
4453 | } |
4454 | } |
4455 | #endif // _TARGET_AMD64_ |
4456 | |
4457 | #endif // !_TARGET_ARM_ |
4458 | |
4459 | // Signals our thread that the debugger will be manipulating the context |
4460 | // during the patch skip operation. This effectively prevents other threads |
4461 | // from suspending us until we have completed skiping the patch and restored |
4462 | // a good context (See DDB 188816) |
4463 | thread->BeginDebuggerPatchSkip(this); |
4464 | |
4465 | // |
4466 | // Set IP of context to point to patch bypass buffer |
4467 | // |
4468 | |
4469 | T_CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
4470 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
4471 | CONTEXT c; |
4472 | if (context == NULL) |
4473 | { |
4474 | // We can't play with our own context! |
4475 | #if _DEBUG |
4476 | if (g_pEEInterface->GetThread()) |
4477 | { |
4478 | // current thread is mamaged thread |
4479 | _ASSERTE(Debugger::GetThreadIdHelper(thread) != Debugger::GetThreadIdHelper(g_pEEInterface->GetThread())); |
4480 | } |
4481 | #endif // _DEBUG |
4482 | |
4483 | c.ContextFlags = CONTEXT_CONTROL; |
4484 | |
4485 | thread->GetThreadContext(&c); |
4486 | context =(T_CONTEXT *) &c; |
4487 | |
4488 | ARM_ONLY(_ASSERTE(!"We should always have a filter context in DebuggerPatchSkip." )); |
4489 | } |
4490 | |
4491 | #ifdef _TARGET_ARM_ |
4492 | // Since we emulate all single-stepping on ARM using an instruction buffer and a breakpoint all we have to |
4493 | // do here is initiate a normal single-step except that we pass the instruction to be stepped explicitly |
4494 | // (calling EnableSingleStep() would infer this by looking at the PC in the context, which would pick up |
4495 | // the patch we're trying to skip). |
4496 | // |
4497 | // Ideally we'd refactor the EnableSingleStep to support this alternative calling sequence but since this |
4498 | // involves three levels of methods and is only applicable to ARM we've chosen to replicate the relevant |
4499 | // implementation here instead. |
4500 | { |
4501 | ControllerLockHolder lockController; |
4502 | g_pEEInterface->MarkThreadForDebugStepping(thread, true); |
4503 | WORD opcode2 = 0; |
4504 | |
4505 | if (Is32BitInstruction(patch->opcode)) |
4506 | { |
4507 | opcode2 = CORDbgGetInstruction((CORDB_ADDRESS_TYPE *)(((DWORD)patch->address) + 2)); |
4508 | } |
4509 | |
4510 | thread->BypassWithSingleStep((DWORD)patch->address, patch->opcode, opcode2); |
4511 | m_singleStep = true; |
4512 | } |
4513 | |
4514 | #else // _TARGET_ARM_ |
4515 | |
4516 | #ifdef _TARGET_ARM64_ |
4517 | patchBypass = NativeWalker::SetupOrSimulateInstructionForPatchSkip(context, m_pSharedPatchBypassBuffer, (const BYTE *)patch->address, patch->opcode); |
4518 | #endif //_TARGET_ARM64_ |
4519 | |
4520 | //set eip to point to buffer... |
4521 | SetIP(context, (PCODE)patchBypass); |
4522 | |
4523 | if (context ==(T_CONTEXT*) &c) |
4524 | thread->SetThreadContext(&c); |
4525 | |
4526 | |
4527 | LOG((LF_CORDB, LL_INFO10000, "DPS::DPS Bypass at 0x%p for opcode %p \n" , patchBypass, patch->opcode)); |
4528 | |
4529 | // |
4530 | // Turn on single step (if the platform supports it) so we can |
4531 | // fix up state after the instruction is executed. |
4532 | // Also turn on exception hook so we can adjust IP in exceptions |
4533 | // |
4534 | |
4535 | EnableSingleStep(); |
4536 | |
4537 | #endif // _TARGET_ARM_ |
4538 | |
4539 | EnableExceptionHook(); |
4540 | } |
4541 | |
4542 | DebuggerPatchSkip::~DebuggerPatchSkip() |
4543 | { |
4544 | #ifndef _TARGET_ARM_ |
4545 | _ASSERTE(m_pSharedPatchBypassBuffer); |
4546 | m_pSharedPatchBypassBuffer->Release(); |
4547 | #endif |
4548 | } |
4549 | |
4550 | void DebuggerPatchSkip::DebuggerDetachClean() |
4551 | { |
4552 | // Since for ARM SharedPatchBypassBuffer isn't existed, we don't have to anything here. |
4553 | #ifndef _TARGET_ARM_ |
4554 | // Fix for Bug 1176448 |
4555 | // When a debugger is detaching from the debuggee, we need to move the IP if it is pointing |
4556 | // somewhere in PatchBypassBuffer.All managed threads are suspended during detach, so changing |
4557 | // the context without notifications is safe. |
4558 | // Notice: |
4559 | // THIS FIX IS INCOMPLETE!It attempts to update the IP in the cases we can easily detect.However, |
4560 | // if a thread is in pre - emptive mode, and its filter context has been propagated to a VEH |
4561 | // context, then the filter context we get will be NULL and this fix will not work.Our belief is |
4562 | // that this scenario is rare enough that it doesnt justify the cost and risk associated with a |
4563 | // complete fix, in which we would have to either : |
4564 | // 1. Change the reference counting for DebuggerController and then change the exception handling |
4565 | // logic in the debuggee so that we can handle the debugger event after detach. |
4566 | // 2. Create a "stack walking" implementation for native code and use it to get the current IP and |
4567 | // set the IP to the right place. |
4568 | |
4569 | Thread *thread = GetThread(); |
4570 | if (thread != NULL) |
4571 | { |
4572 | BYTE *patchBypass = m_pSharedPatchBypassBuffer->PatchBypass; |
4573 | CONTEXT *context = thread->GetFilterContext(); |
4574 | if (patchBypass != NULL && |
4575 | context != NULL && |
4576 | (size_t)GetIP(context) >= (size_t)patchBypass && |
4577 | (size_t)GetIP(context) <= (size_t)(patchBypass + MAX_INSTRUCTION_LENGTH + 1)) |
4578 | { |
4579 | SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address))); |
4580 | } |
4581 | } |
4582 | #endif |
4583 | } |
4584 | |
4585 | |
4586 | // |
4587 | // We have to have a whole seperate function for this because you |
4588 | // can't use __try in a function that requires object unwinding... |
4589 | // |
4590 | |
4591 | LONG FilterAccessViolation2(LPEXCEPTION_POINTERS ep, PVOID pv) |
4592 | { |
4593 | LIMITED_METHOD_CONTRACT; |
4594 | |
4595 | return (ep->ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) |
4596 | ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; |
4597 | } |
4598 | |
4599 | // This helper is required because the AVInRuntimeImplOkayHolder can not |
4600 | // be directly placed inside the scope of a PAL_TRY |
4601 | void _CopyInstructionBlockHelper(BYTE* to, const BYTE* from) |
4602 | { |
4603 | AVInRuntimeImplOkayHolder AVOkay; |
4604 | |
4605 | // This function only copies the portion of the instruction that follows the |
4606 | // breakpoint opcode, not the breakpoint itself |
4607 | to += CORDbg_BREAK_INSTRUCTION_SIZE; |
4608 | from += CORDbg_BREAK_INSTRUCTION_SIZE; |
4609 | |
4610 | // If an AV occurs because we walked off a valid page then we need |
4611 | // to be certain that all bytes on the previous page were copied. |
4612 | // We are certain that we copied enough bytes to contain the instruction |
4613 | // because it must have fit within the valid page. |
4614 | for (int i = 0; i < MAX_INSTRUCTION_LENGTH - CORDbg_BREAK_INSTRUCTION_SIZE; i++) |
4615 | { |
4616 | *to++ = *from++; |
4617 | } |
4618 | |
4619 | } |
4620 | |
4621 | // WARNING: this function skips copying the first CORDbg_BREAK_INSTRUCTION_SIZE bytes by design |
4622 | // See the comment at the callsite in DebuggerPatchSkip::DebuggerPatchSkip for more details on |
4623 | // this |
4624 | void DebuggerPatchSkip::CopyInstructionBlock(BYTE *to, const BYTE* from) |
4625 | { |
4626 | // We wrap the memcpy in an exception handler to handle the |
4627 | // extremely rare case where we're copying an instruction off the |
4628 | // end of a method that is also at the end of a page, and the next |
4629 | // page is unmapped. |
4630 | struct Param |
4631 | { |
4632 | BYTE *to; |
4633 | const BYTE* from; |
4634 | } param; |
4635 | param.to = to; |
4636 | param.from = from; |
4637 | PAL_TRY(Param *, pParam, ¶m) |
4638 | { |
4639 | _CopyInstructionBlockHelper(pParam->to, pParam->from); |
4640 | } |
4641 | PAL_EXCEPT_FILTER(FilterAccessViolation2) |
4642 | { |
4643 | // The whole point is that if we copy up the the AV, then |
4644 | // that's enough to execute, otherwise we would not have been |
4645 | // able to execute the code anyway. So we just ignore the |
4646 | // exception. |
4647 | LOG((LF_CORDB, LL_INFO10000, |
4648 | "DPS::DPS: AV copying instruction block ignored.\n" )); |
4649 | } |
4650 | PAL_ENDTRY |
4651 | |
4652 | // We just created a new buffer of code, but the CPU caches code and may |
4653 | // not be aware of our changes. This should force the CPU to dump any cached |
4654 | // instructions it has in this region and load the new ones from memory |
4655 | FlushInstructionCache(GetCurrentProcess(), to + CORDbg_BREAK_INSTRUCTION_SIZE, |
4656 | MAX_INSTRUCTION_LENGTH - CORDbg_BREAK_INSTRUCTION_SIZE); |
4657 | } |
4658 | |
4659 | TP_RESULT DebuggerPatchSkip::TriggerPatch(DebuggerControllerPatch *patch, |
4660 | Thread *thread, |
4661 | TRIGGER_WHY tyWhy) |
4662 | { |
4663 | ARM_ONLY(_ASSERTE(!"Should not have called DebuggerPatchSkip::TriggerPatch." )); |
4664 | LOG((LF_CORDB, LL_EVERYTHING, "DPS::TP called\n" )); |
4665 | |
4666 | #if defined(_DEBUG) && !defined(_TARGET_ARM_) |
4667 | CONTEXT *context = GetManagedLiveCtx(thread); |
4668 | |
4669 | LOG((LF_CORDB, LL_INFO1000, "DPS::TP: We've patched 0x%x (byPass:0x%x) " |
4670 | "for a skip after an EnC update!\n" , GetIP(context), |
4671 | GetBypassAddress())); |
4672 | _ASSERTE(g_patches != NULL); |
4673 | |
4674 | // We shouldn't have mucked with EIP, yet. |
4675 | _ASSERTE(dac_cast<PTR_CORDB_ADDRESS_TYPE>(GetIP(context)) == GetBypassAddress()); |
4676 | |
4677 | //We should be the _only_ patch here |
4678 | MethodDesc *md2 = dac_cast<PTR_MethodDesc>(GetIP(context)); |
4679 | DebuggerControllerPatch *patchCheck = g_patches->GetPatch(g_pEEInterface->MethodDescGetModule(md2),md2->GetMemberDef()); |
4680 | _ASSERTE(patchCheck == patch); |
4681 | _ASSERTE(patchCheck->controller == patch->controller); |
4682 | |
4683 | patchCheck = g_patches->GetNextPatch(patchCheck); |
4684 | _ASSERTE(patchCheck == NULL); |
4685 | #endif // _DEBUG |
4686 | |
4687 | DisableAll(); |
4688 | EnableExceptionHook(); |
4689 | EnableSingleStep(); //gets us back to where we want. |
4690 | return TPR_IGNORE; // don't actually want to stop here.... |
4691 | } |
4692 | |
4693 | TP_RESULT DebuggerPatchSkip::TriggerExceptionHook(Thread *thread, CONTEXT * context, |
4694 | EXCEPTION_RECORD *exception) |
4695 | { |
4696 | CONTRACTL |
4697 | { |
4698 | SO_NOT_MAINLINE; |
4699 | NOTHROW; |
4700 | GC_NOTRIGGER; |
4701 | // Patch skippers only operate on patches set in managed code. But the infrastructure may have |
4702 | // toggled the GC mode underneath us. |
4703 | MODE_ANY; |
4704 | |
4705 | PRECONDITION(GetThread() == thread); |
4706 | PRECONDITION(thread != NULL); |
4707 | PRECONDITION(CheckPointer(context)); |
4708 | } |
4709 | CONTRACTL_END; |
4710 | |
4711 | if (m_pAppDomain != NULL) |
4712 | { |
4713 | AppDomain *pAppDomainCur = thread->GetDomain(); |
4714 | |
4715 | if (pAppDomainCur != m_pAppDomain) |
4716 | { |
4717 | LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: Appdomain mismatch - not skiiping!\n" )); |
4718 | return TPR_IGNORE; |
4719 | } |
4720 | } |
4721 | |
4722 | LOG((LF_CORDB,LL_INFO10000, "DPS::TEH: doing the patch-skip thing\n" )); |
4723 | |
4724 | #if defined(_TARGET_ARM64_) |
4725 | |
4726 | if (!IsSingleStep(exception->ExceptionCode)) |
4727 | { |
4728 | LOG((LF_CORDB, LL_INFO10000, "Exception in patched Bypass instruction .\n" )); |
4729 | return (TPR_IGNORE_AND_STOP); |
4730 | } |
4731 | |
4732 | _ASSERTE(m_pSharedPatchBypassBuffer); |
4733 | BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass; |
4734 | PCODE targetIp; |
4735 | if (m_pSharedPatchBypassBuffer->RipTargetFixup) |
4736 | { |
4737 | targetIp = m_pSharedPatchBypassBuffer->RipTargetFixup; |
4738 | } |
4739 | else |
4740 | { |
4741 | targetIp = (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address)); |
4742 | } |
4743 | |
4744 | SetIP(context, targetIp); |
4745 | LOG((LF_CORDB, LL_ALWAYS, "Redirecting after Patch to 0x%p\n" , GetIP(context))); |
4746 | |
4747 | #elif defined (_TARGET_ARM_) |
4748 | //Do nothing |
4749 | #else |
4750 | _ASSERTE(m_pSharedPatchBypassBuffer); |
4751 | BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass; |
4752 | |
4753 | if (m_instrAttrib.m_fIsCall && IsSingleStep(exception->ExceptionCode)) |
4754 | { |
4755 | // Fixup return address on stack |
4756 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
4757 | SIZE_T *sp = (SIZE_T *) GetSP(context); |
4758 | |
4759 | LOG((LF_CORDB, LL_INFO10000, |
4760 | "Bypass call return address redirected from 0x%p\n" , *sp)); |
4761 | |
4762 | *sp -= patchBypass - (BYTE*)m_address; |
4763 | |
4764 | LOG((LF_CORDB, LL_INFO10000, "to 0x%p\n" , *sp)); |
4765 | #else |
4766 | PORTABILITY_ASSERT("DebuggerPatchSkip::TriggerExceptionHook -- return address fixup NYI" ); |
4767 | #endif |
4768 | } |
4769 | |
4770 | if (!m_instrAttrib.m_fIsAbsBranch || !IsSingleStep(exception->ExceptionCode)) |
4771 | { |
4772 | // Fixup IP |
4773 | |
4774 | LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected from 0x%p\n" , GetIP(context))); |
4775 | |
4776 | if (IsSingleStep(exception->ExceptionCode)) |
4777 | { |
4778 | #ifndef FEATURE_PAL |
4779 | // Check if the current IP is anywhere near the exception dispatcher logic. |
4780 | // If it is, ignore the exception, as the real exception is coming next. |
4781 | static FARPROC pExcepDispProc = NULL; |
4782 | |
4783 | if (!pExcepDispProc) |
4784 | { |
4785 | HMODULE hNtDll = WszGetModuleHandle(W("ntdll.dll" )); |
4786 | |
4787 | if (hNtDll != NULL) |
4788 | { |
4789 | pExcepDispProc = GetProcAddress(hNtDll, "KiUserExceptionDispatcher" ); |
4790 | |
4791 | if (!pExcepDispProc) |
4792 | pExcepDispProc = (FARPROC)(size_t)(-1); |
4793 | } |
4794 | else |
4795 | pExcepDispProc = (FARPROC)(size_t)(-1); |
4796 | } |
4797 | |
4798 | _ASSERTE(pExcepDispProc != NULL); |
4799 | |
4800 | if ((size_t)pExcepDispProc != (size_t)(-1)) |
4801 | { |
4802 | LPVOID pExcepDispEntryPoint = pExcepDispProc; |
4803 | |
4804 | if ((size_t)GetIP(context) > (size_t)pExcepDispEntryPoint && |
4805 | (size_t)GetIP(context) <= ((size_t)pExcepDispEntryPoint + MAX_INSTRUCTION_LENGTH * 2 + 1)) |
4806 | { |
4807 | LOG((LF_CORDB, LL_INFO10000, |
4808 | "Bypass instruction not redirected. Landed in exception dispatcher.\n" )); |
4809 | |
4810 | return (TPR_IGNORE_AND_STOP); |
4811 | } |
4812 | } |
4813 | #endif // FEATURE_PAL |
4814 | |
4815 | // If the IP is close to the skip patch start, or if we were skipping over a call, then assume the IP needs |
4816 | // adjusting. |
4817 | if (m_instrAttrib.m_fIsCall || |
4818 | ((size_t)GetIP(context) > (size_t)patchBypass && |
4819 | (size_t)GetIP(context) <= (size_t)(patchBypass + MAX_INSTRUCTION_LENGTH + 1))) |
4820 | { |
4821 | LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because still in skip area.\n" )); |
4822 | LOG((LF_CORDB, LL_INFO10000, "m_fIsCall = %d, patchBypass = 0x%x, m_address = 0x%x\n" , |
4823 | m_instrAttrib.m_fIsCall, patchBypass, m_address)); |
4824 | SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address))); |
4825 | } |
4826 | else |
4827 | { |
4828 | // Otherwise, need to see if the IP is something we recognize (either managed code |
4829 | // or stub code) - if not, we ignore the exception |
4830 | PCODE newIP = GetIP(context); |
4831 | newIP -= PCODE(patchBypass - (BYTE *)m_address); |
4832 | TraceDestination trace; |
4833 | |
4834 | if (g_pEEInterface->IsManagedNativeCode(dac_cast<PTR_CBYTE>(newIP)) || |
4835 | (g_pEEInterface->TraceStub(LPBYTE(newIP), &trace))) |
4836 | { |
4837 | LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because we landed in managed or stub code\n" )); |
4838 | SetIP(context, newIP); |
4839 | } |
4840 | |
4841 | // If we have no idea where things have gone, then we assume that the IP needs no adjusting (which |
4842 | // could happen if the instruction we were trying to patch skip caused an AV). In this case we want |
4843 | // to claim it as ours but ignore it and continue execution. |
4844 | else |
4845 | { |
4846 | LOG((LF_CORDB, LL_INFO10000, "Bypass instruction not redirected because we're not in managed or stub code.\n" )); |
4847 | return (TPR_IGNORE_AND_STOP); |
4848 | } |
4849 | } |
4850 | } |
4851 | else |
4852 | { |
4853 | LOG((LF_CORDB, LL_INFO10000, "Bypass instruction redirected because it wasn't a single step exception.\n" )); |
4854 | SetIP(context, (PCODE)((BYTE *)GetIP(context) - (patchBypass - (BYTE *)m_address))); |
4855 | } |
4856 | |
4857 | LOG((LF_CORDB, LL_ALWAYS, "to 0x%x\n" , GetIP(context))); |
4858 | |
4859 | } |
4860 | |
4861 | #endif |
4862 | |
4863 | |
4864 | // Signals our thread that the debugger is done manipulating the context |
4865 | // during the patch skip operation. This effectively prevented other threads |
4866 | // from suspending us until we completed skiping the patch and restored |
4867 | // a good context (See DDB 188816) |
4868 | m_thread->EndDebuggerPatchSkip(); |
4869 | |
4870 | // Don't delete the controller yet if this is a single step exception, as the code will still want to dispatch to |
4871 | // our single step method, and if it doesn't find something to dispatch to we won't continue from the exception. |
4872 | // |
4873 | // (This is kind of broken behavior but is easily worked around here |
4874 | // by this test) |
4875 | if (!IsSingleStep(exception->ExceptionCode)) |
4876 | { |
4877 | Delete(); |
4878 | } |
4879 | |
4880 | DisableExceptionHook(); |
4881 | |
4882 | return TPR_TRIGGER; |
4883 | } |
4884 | |
4885 | bool DebuggerPatchSkip::TriggerSingleStep(Thread *thread, const BYTE *ip) |
4886 | { |
4887 | LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: basically a no-op\n" )); |
4888 | |
4889 | if (m_pAppDomain != NULL) |
4890 | { |
4891 | AppDomain *pAppDomainCur = thread->GetDomain(); |
4892 | |
4893 | if (pAppDomainCur != m_pAppDomain) |
4894 | { |
4895 | LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: Appdomain mismatch - " |
4896 | "not SingSteping!!\n" )); |
4897 | return false; |
4898 | } |
4899 | } |
4900 | #if defined(_TARGET_AMD64_) |
4901 | // Dev11 91932: for RIP-relative writes we need to copy the value that was written in our buffer to the actual address |
4902 | _ASSERTE(m_pSharedPatchBypassBuffer); |
4903 | if (m_pSharedPatchBypassBuffer->RipTargetFixup) |
4904 | { |
4905 | _ASSERTE(m_pSharedPatchBypassBuffer->RipTargetFixupSize); |
4906 | |
4907 | BYTE* bufferBypass = m_pSharedPatchBypassBuffer->BypassBuffer; |
4908 | BYTE fixupSize = m_pSharedPatchBypassBuffer->RipTargetFixupSize; |
4909 | UINT_PTR targetFixup = m_pSharedPatchBypassBuffer->RipTargetFixup; |
4910 | |
4911 | switch (fixupSize) |
4912 | { |
4913 | case 1: |
4914 | *(reinterpret_cast<BYTE*>(targetFixup)) = *(reinterpret_cast<BYTE*>(bufferBypass)); |
4915 | break; |
4916 | |
4917 | case 2: |
4918 | *(reinterpret_cast<WORD*>(targetFixup)) = *(reinterpret_cast<WORD*>(bufferBypass)); |
4919 | break; |
4920 | |
4921 | case 4: |
4922 | *(reinterpret_cast<DWORD*>(targetFixup)) = *(reinterpret_cast<DWORD*>(bufferBypass)); |
4923 | break; |
4924 | |
4925 | case 8: |
4926 | *(reinterpret_cast<ULONGLONG*>(targetFixup)) = *(reinterpret_cast<ULONGLONG*>(bufferBypass)); |
4927 | break; |
4928 | |
4929 | case 16: |
4930 | memcpy(reinterpret_cast<void*>(targetFixup), bufferBypass, 16); |
4931 | break; |
4932 | |
4933 | default: |
4934 | _ASSERTE(!"bad operand size" ); |
4935 | } |
4936 | } |
4937 | #endif |
4938 | LOG((LF_CORDB,LL_INFO10000, "DPS::TSS: triggered, about to delete\n" )); |
4939 | |
4940 | TRACE_FREE(this); |
4941 | Delete(); |
4942 | return false; |
4943 | } |
4944 | |
4945 | // * ------------------------------------------------------------------------- |
4946 | // * DebuggerBreakpoint routines |
4947 | // * ------------------------------------------------------------------------- |
4948 | // DebuggerBreakpoint::DebuggerBreakpoint() The constructor |
4949 | // invokes AddBindAndActivatePatch to set the breakpoint |
4950 | DebuggerBreakpoint::DebuggerBreakpoint(Module *module, |
4951 | mdMethodDef md, |
4952 | AppDomain *pAppDomain, |
4953 | SIZE_T offset, |
4954 | bool native, |
4955 | SIZE_T ilEnCVersion, // must give the EnC version for non-native bps |
4956 | MethodDesc *nativeMethodDesc, // use only when m_native |
4957 | DebuggerJitInfo *nativeJITInfo, // optional when m_native, null otherwise |
4958 | bool nativeCodeBindAllVersions, |
4959 | BOOL *pSucceed |
4960 | ) |
4961 | : DebuggerController(NULL, pAppDomain) |
4962 | { |
4963 | _ASSERTE(pSucceed != NULL); |
4964 | _ASSERTE((native == (nativeMethodDesc != NULL)) || nativeCodeBindAllVersions); |
4965 | _ASSERTE(native || nativeJITInfo == NULL); |
4966 | _ASSERTE(!nativeJITInfo || nativeJITInfo->m_jitComplete); // this is sent by the left-side, and it couldn't have got the code if the JIT wasn't complete |
4967 | |
4968 | if (native && !nativeCodeBindAllVersions) |
4969 | { |
4970 | (*pSucceed) = AddBindAndActivateNativeManagedPatch(nativeMethodDesc, nativeJITInfo, offset, LEAF_MOST_FRAME, pAppDomain); |
4971 | return; |
4972 | } |
4973 | else |
4974 | { |
4975 | _ASSERTE(!native || offset == 0); |
4976 | (*pSucceed) = AddILPatch(pAppDomain, module, md, NULL, ilEnCVersion, offset, !native); |
4977 | } |
4978 | } |
4979 | |
4980 | // TP_RESULT DebuggerBreakpoint::TriggerPatch() |
4981 | // What: This patch will always be activated. |
4982 | // How: return true. |
4983 | TP_RESULT DebuggerBreakpoint::TriggerPatch(DebuggerControllerPatch *patch, |
4984 | Thread *thread, |
4985 | TRIGGER_WHY tyWhy) |
4986 | { |
4987 | LOG((LF_CORDB, LL_INFO10000, "DB::TP\n" )); |
4988 | |
4989 | return TPR_TRIGGER; |
4990 | } |
4991 | |
4992 | // void DebuggerBreakpoint::SendEvent() What: Inform |
4993 | // the right side that the breakpoint was reached. |
4994 | // How: g_pDebugger->SendBreakpoint() |
4995 | bool DebuggerBreakpoint::SendEvent(Thread *thread, bool fIpChanged) |
4996 | { |
4997 | CONTRACTL |
4998 | { |
4999 | SO_NOT_MAINLINE; |
5000 | NOTHROW; |
5001 | SENDEVENT_CONTRACT_ITEMS; |
5002 | } |
5003 | CONTRACTL_END; |
5004 | |
5005 | |
5006 | LOG((LF_CORDB, LL_INFO10000, "DB::SE: in DebuggerBreakpoint's SendEvent\n" )); |
5007 | |
5008 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
5009 | |
5010 | // If we got interupted by SetIp, we just don't send the IPC event. Our triggers are still |
5011 | // active so no harm done. |
5012 | if (!fIpChanged) |
5013 | { |
5014 | g_pDebugger->SendBreakpoint(thread, context, this); |
5015 | return true; |
5016 | } |
5017 | |
5018 | // Controller is still alive, will fire if we hit the breakpoint again. |
5019 | return false; |
5020 | } |
5021 | |
5022 | //* ------------------------------------------------------------------------- |
5023 | // * DebuggerStepper routines |
5024 | // * ------------------------------------------------------------------------- |
5025 | |
5026 | DebuggerStepper::DebuggerStepper(Thread *thread, |
5027 | CorDebugUnmappedStop rgfMappingStop, |
5028 | CorDebugIntercept interceptStop, |
5029 | AppDomain *appDomain) |
5030 | : DebuggerController(thread, appDomain), |
5031 | m_stepIn(false), |
5032 | m_reason(STEP_NORMAL), |
5033 | m_fpStepInto(LEAF_MOST_FRAME), |
5034 | m_rgfInterceptStop(interceptStop), |
5035 | m_rgfMappingStop(rgfMappingStop), |
5036 | m_range(NULL), |
5037 | m_rangeCount(0), |
5038 | m_realRangeCount(0), |
5039 | m_fp(LEAF_MOST_FRAME), |
5040 | #if defined(WIN64EXCEPTIONS) |
5041 | m_fpParentMethod(LEAF_MOST_FRAME), |
5042 | #endif // WIN64EXCEPTIONS |
5043 | m_fpException(LEAF_MOST_FRAME), |
5044 | m_fdException(0), |
5045 | m_cFuncEvalNesting(0) |
5046 | { |
5047 | #ifdef _DEBUG |
5048 | m_fReadyToSend = false; |
5049 | #endif |
5050 | } |
5051 | |
5052 | DebuggerStepper::~DebuggerStepper() |
5053 | { |
5054 | if (m_range != NULL) |
5055 | { |
5056 | TRACE_FREE(m_range); |
5057 | DeleteInteropSafe(m_range); |
5058 | } |
5059 | } |
5060 | |
5061 | // bool DebuggerStepper::ShouldContinueStep() Return true if |
5062 | // the stepper should not stop at this address. The stepper should not |
5063 | // stop here if: here is in the {prolog,epilog,etc}; |
5064 | // and the stepper is not interested in stopping here. |
5065 | // We assume that this is being called in the frame which the stepper steps |
5066 | // through. Unless, of course, we're returning from a call, in which |
5067 | // case we want to stop in the epilog even if the user didn't say so, |
5068 | // to prevent stepping out of multiple frames at once. |
5069 | // <REVISIT_TODO>Possible optimization: GetJitInfo, then AddPatch @ end of prolog?</REVISIT_TODO> |
5070 | bool DebuggerStepper::ShouldContinueStep( ControllerStackInfo *info, |
5071 | SIZE_T nativeOffset) |
5072 | { |
5073 | LOG((LF_CORDB,LL_INFO10000, "DeSt::ShContSt: nativeOffset:0x%p \n" , nativeOffset)); |
5074 | if (m_rgfMappingStop != STOP_ALL && (m_reason != STEP_EXIT) ) |
5075 | { |
5076 | |
5077 | DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame(); |
5078 | |
5079 | if ( ji != NULL ) |
5080 | { |
5081 | LOG((LF_CORDB,LL_INFO10000,"DeSt::ShContSt: For code 0x%p, got " |
5082 | "DJI 0x%p, from 0x%p to 0x%p\n" , |
5083 | (const BYTE*)GetControlPC(&(info->m_activeFrame.registers)), |
5084 | ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode)); |
5085 | } |
5086 | else |
5087 | { |
5088 | LOG((LF_CORDB,LL_INFO10000,"DeSt::ShCoSt: For code 0x%p, didn't " |
5089 | "get DJI\n" ,(const BYTE*)GetControlPC(&(info->m_activeFrame.registers)))); |
5090 | |
5091 | return false; // Haven't a clue if we should continue, so |
5092 | // don't |
5093 | } |
5094 | CorDebugMappingResult map = MAPPING_UNMAPPED_ADDRESS; |
5095 | DWORD whichIDontCare; |
5096 | ji->MapNativeOffsetToIL( nativeOffset, &map, &whichIDontCare); |
5097 | unsigned int interestingMappings = |
5098 | (map & ~(MAPPING_APPROXIMATE | MAPPING_EXACT)); |
5099 | |
5100 | LOG((LF_CORDB,LL_INFO10000, |
5101 | "DeSt::ShContSt: interestingMappings:0x%x m_rgfMappingStop:%x\n" , |
5102 | interestingMappings,m_rgfMappingStop)); |
5103 | |
5104 | // If we're in a prolog,epilog, then we may want to skip |
5105 | // over it or stop |
5106 | if ( interestingMappings ) |
5107 | { |
5108 | if ( interestingMappings & m_rgfMappingStop ) |
5109 | return false; |
5110 | else |
5111 | return true; |
5112 | } |
5113 | } |
5114 | return false; |
5115 | } |
5116 | |
5117 | bool DebuggerStepper::IsRangeAppropriate(ControllerStackInfo *info) |
5118 | { |
5119 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: info:0x%x \n" , info)); |
5120 | if (m_range == NULL) |
5121 | { |
5122 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: m_range == NULL, returning FALSE\n" )); |
5123 | return false; |
5124 | } |
5125 | |
5126 | FrameInfo *realFrame; |
5127 | |
5128 | #if defined(WIN64EXCEPTIONS) |
5129 | bool fActiveFrameIsFunclet = info->m_activeFrame.IsNonFilterFuncletFrame(); |
5130 | |
5131 | if (fActiveFrameIsFunclet) |
5132 | { |
5133 | realFrame = &(info->m_returnFrame); |
5134 | } |
5135 | else |
5136 | #endif // WIN64EXCEPTIONS |
5137 | { |
5138 | realFrame = &(info->m_activeFrame); |
5139 | } |
5140 | |
5141 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: info->m_activeFrame.fp:0x%x m_fp:0x%x\n" , info->m_activeFrame.fp, m_fp)); |
5142 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: m_fdException:0x%x realFrame->md:0x%x realFrame->fp:0x%x m_fpException:0x%x\n" , |
5143 | m_fdException, realFrame->md, realFrame->fp, m_fpException)); |
5144 | if ( (info->m_activeFrame.fp == m_fp) || |
5145 | ( (m_fdException != NULL) && (realFrame->md == m_fdException) && |
5146 | IsEqualOrCloserToRoot(realFrame->fp, m_fpException) ) ) |
5147 | { |
5148 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n" )); |
5149 | return true; |
5150 | } |
5151 | |
5152 | #if defined(WIN64EXCEPTIONS) |
5153 | // There are two scenarios which make this function more complicated on WIN64. |
5154 | // 1) We initiate a step in the parent method or a funclet but end up stepping into another funclet closer to the leaf. |
5155 | // a) start in the parent method |
5156 | // b) start in a funclet |
5157 | // 2) We initiate a step in a funclet but end up stepping out to the parent method or a funclet closer to the root. |
5158 | // a) end up in the parent method |
5159 | // b) end up in a funclet |
5160 | // In both cases the range of the stepper should still be appropriate. |
5161 | |
5162 | bool fValidParentMethodFP = (m_fpParentMethod != LEAF_MOST_FRAME); |
5163 | |
5164 | if (fActiveFrameIsFunclet) |
5165 | { |
5166 | // Scenario 1a |
5167 | if (m_fp == info->m_returnFrame.fp) |
5168 | { |
5169 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n" )); |
5170 | return true; |
5171 | } |
5172 | // Scenario 1b & 2b have the same condition |
5173 | else if (fValidParentMethodFP && (m_fpParentMethod == info->m_returnFrame.fp)) |
5174 | { |
5175 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n" )); |
5176 | return true; |
5177 | } |
5178 | } |
5179 | else |
5180 | { |
5181 | // Scenario 2a |
5182 | if (fValidParentMethodFP && (m_fpParentMethod == info->m_activeFrame.fp)) |
5183 | { |
5184 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning TRUE\n" )); |
5185 | return true; |
5186 | } |
5187 | } |
5188 | #endif // WIN64EXCEPTIONS |
5189 | |
5190 | LOG((LF_CORDB,LL_INFO10000, "DS::IRA: returning FALSE\n" )); |
5191 | return false; |
5192 | } |
5193 | |
5194 | // bool DebuggerStepper::IsInRange() Given the native offset ip, |
5195 | // returns true if ip falls within any of the native offset ranges specified |
5196 | // by the array of COR_DEBUG_STEP_RANGEs. |
5197 | // Returns true if ip falls within any of the ranges. Returns false |
5198 | // if ip doesn't, or if there are no ranges (rangeCount==0). Note that a |
5199 | // COR_DEBUG_STEP_RANGE with an endOffset of zero is interpreted as extending |
5200 | // from startOffset to the end of the method. |
5201 | // SIZE_T ip: Native offset, relative to the beginning of the method. |
5202 | // COR_DEBUG_STEP_RANGE *range: An array of ranges, which are themselves |
5203 | // native offsets, to compare against ip. |
5204 | // SIZE_T rangeCount: Number of elements in range |
5205 | bool DebuggerStepper::IsInRange(SIZE_T ip, COR_DEBUG_STEP_RANGE *range, SIZE_T rangeCount, |
5206 | ControllerStackInfo *pInfo) |
5207 | { |
5208 | LOG((LF_CORDB,LL_INFO10000,"DS::IIR: off=0x%x\n" , ip)); |
5209 | |
5210 | if (range == NULL) |
5211 | { |
5212 | LOG((LF_CORDB,LL_INFO10000,"DS::IIR: range == NULL -> not in range\n" )); |
5213 | return false; |
5214 | } |
5215 | |
5216 | if (pInfo && !IsRangeAppropriate(pInfo)) |
5217 | { |
5218 | LOG((LF_CORDB,LL_INFO10000,"DS::IIR: no pInfo or range not appropriate -> not in range\n" )); |
5219 | return false; |
5220 | } |
5221 | |
5222 | COR_DEBUG_STEP_RANGE *r = range; |
5223 | COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount; |
5224 | |
5225 | while (r < rEnd) |
5226 | { |
5227 | SIZE_T endOffset = r->endOffset ? r->endOffset : ~0; |
5228 | LOG((LF_CORDB,LL_INFO100000,"DS::IIR: so=0x%x, eo=0x%x\n" , |
5229 | r->startOffset, endOffset)); |
5230 | |
5231 | if (ip >= r->startOffset && ip < endOffset) |
5232 | { |
5233 | LOG((LF_CORDB,LL_INFO1000,"DS::IIR:this:0x%x Found native offset " |
5234 | "0x%x to be in the range" |
5235 | "[0x%x, 0x%x), index 0x%x\n\n" , this, ip, r->startOffset, |
5236 | endOffset, ((r-range)/sizeof(COR_DEBUG_STEP_RANGE *)) )); |
5237 | return true; |
5238 | } |
5239 | |
5240 | r++; |
5241 | } |
5242 | |
5243 | LOG((LF_CORDB,LL_INFO10000,"DS::IIR: not in range\n" )); |
5244 | return false; |
5245 | } |
5246 | |
5247 | // bool DebuggerStepper::DetectHandleInterceptors() Return true if |
5248 | // the current execution takes place within an interceptor (that is, either |
5249 | // the current frame, or the parent frame is a framed frame whose |
5250 | // GetInterception method returns something other than INTERCEPTION_NONE), |
5251 | // and this stepper doesn't want to stop in an interceptor, and we successfully |
5252 | // set a breakpoint after the top-most interceptor in the stack. |
5253 | bool DebuggerStepper::DetectHandleInterceptors(ControllerStackInfo *info) |
5254 | { |
5255 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Start DetectHandleInterceptors\n" )); |
5256 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI: active frame=0x%08x, has return frame=%d, return frame=0x%08x m_reason:%d\n" , |
5257 | info->m_activeFrame.frame, info->HasReturnFrame(), info->m_returnFrame.frame, m_reason)); |
5258 | |
5259 | // If this is a normal step, then we want to continue stepping, even if we |
5260 | // are in an interceptor. |
5261 | if (m_reason == STEP_NORMAL || m_reason == STEP_RETURN || m_reason == STEP_EXCEPTION_HANDLER) |
5262 | { |
5263 | LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Returning false while stepping within function, finally!\n" )); |
5264 | return false; |
5265 | } |
5266 | |
5267 | bool fAttemptStepOut = false; |
5268 | |
5269 | if (m_rgfInterceptStop != INTERCEPT_ALL) // we may have to skip out of one |
5270 | { |
5271 | if (info->m_activeFrame.frame != NULL && |
5272 | info->m_activeFrame.frame != FRAME_TOP && |
5273 | info->m_activeFrame.frame->GetInterception() != Frame::INTERCEPTION_NONE) |
5274 | { |
5275 | if (!((CorDebugIntercept)info->m_activeFrame.frame->GetInterception() & Frame::Interception(m_rgfInterceptStop))) |
5276 | { |
5277 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded frame type:0x%x\n" , |
5278 | info->m_returnFrame. frame->GetInterception())); |
5279 | |
5280 | fAttemptStepOut = true; |
5281 | } |
5282 | else |
5283 | { |
5284 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI: 0x%x set to STEP_INTERCEPT\n" , this)); |
5285 | |
5286 | m_reason = STEP_INTERCEPT; //remember why we've stopped |
5287 | } |
5288 | } |
5289 | |
5290 | if ((m_reason == STEP_EXCEPTION_FILTER) || |
5291 | (info->HasReturnFrame() && |
5292 | info->m_returnFrame.frame != NULL && |
5293 | info->m_returnFrame.frame != FRAME_TOP && |
5294 | info->m_returnFrame.frame->GetInterception() != Frame::INTERCEPTION_NONE)) |
5295 | { |
5296 | if (m_reason == STEP_EXCEPTION_FILTER) |
5297 | { |
5298 | // Exceptions raised inside of the EE by COMPlusThrow, FCThrow, etc will not |
5299 | // insert an ExceptionFrame, and hence info->m_returnFrame.frame->GetInterception() |
5300 | // will not be accurate. Hence we use m_reason instead |
5301 | |
5302 | if (!(Frame::INTERCEPTION_EXCEPTION & Frame::Interception(m_rgfInterceptStop))) |
5303 | { |
5304 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded INTERCEPTION_EXCEPTION\n" )); |
5305 | fAttemptStepOut = true; |
5306 | } |
5307 | } |
5308 | else if (!(info->m_returnFrame.frame->GetInterception() & Frame::Interception(m_rgfInterceptStop))) |
5309 | { |
5310 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI: Stepping out b/c of excluded return frame type:0x%x\n" , |
5311 | info->m_returnFrame.frame->GetInterception())); |
5312 | |
5313 | fAttemptStepOut = true; |
5314 | } |
5315 | |
5316 | if (!fAttemptStepOut) |
5317 | { |
5318 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n" , this)); |
5319 | |
5320 | m_reason = STEP_INTERCEPT; //remember why we've stopped |
5321 | } |
5322 | } |
5323 | else if (info->m_specialChainReason != CHAIN_NONE) |
5324 | { |
5325 | if(!(info->m_specialChainReason & CorDebugChainReason(m_rgfInterceptStop)) ) |
5326 | { |
5327 | LOG((LF_CORDB,LL_INFO10000, "DS::DHI: (special) Stepping out b/c of excluded return frame type:0x%x\n" , |
5328 | info->m_specialChainReason)); |
5329 | |
5330 | fAttemptStepOut = true; |
5331 | } |
5332 | else |
5333 | { |
5334 | LOG((LF_CORDB,LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n" , this)); |
5335 | |
5336 | m_reason = STEP_INTERCEPT; //remember why we've stopped |
5337 | } |
5338 | } |
5339 | else if (info->m_activeFrame.frame == NULL) |
5340 | { |
5341 | // Make sure we are not dealing with a chain here. |
5342 | if (info->m_activeFrame.HasMethodFrame()) |
5343 | { |
5344 | // Check whether we are executing in a class constructor. |
5345 | _ASSERTE(info->m_activeFrame.md != NULL); |
5346 | if (info->m_activeFrame.md->IsClassConstructor()) |
5347 | { |
5348 | // We are in a class constructor. Check whether we want to stop in it. |
5349 | if (!(CHAIN_CLASS_INIT & CorDebugChainReason(m_rgfInterceptStop))) |
5350 | { |
5351 | LOG((LF_CORDB, LL_INFO10000, "DS::DHI: Stepping out b/c of excluded cctor:0x%x\n" , |
5352 | CHAIN_CLASS_INIT)); |
5353 | |
5354 | fAttemptStepOut = true; |
5355 | } |
5356 | else |
5357 | { |
5358 | LOG((LF_CORDB, LL_INFO10000,"DS::DHI 0x%x set to STEP_INTERCEPT\n" , this)); |
5359 | |
5360 | m_reason = STEP_INTERCEPT; //remember why we've stopped |
5361 | } |
5362 | } |
5363 | } |
5364 | } |
5365 | } |
5366 | |
5367 | if (fAttemptStepOut) |
5368 | { |
5369 | LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Doing TSO!\n" )); |
5370 | |
5371 | // TrapStepOut could alter the step reason if we're stepping out of an inteceptor and it looks like we're |
5372 | // running off the top of the program. So hold onto it here, and if our step reason becomes STEP_EXIT, then |
5373 | // reset it to what it was. |
5374 | CorDebugStepReason holdReason = m_reason; |
5375 | |
5376 | // @todo - should this be TrapStepNext??? But that may stop in a child... |
5377 | TrapStepOut(info); |
5378 | EnableUnwind(m_fp); |
5379 | |
5380 | if (m_reason == STEP_EXIT) |
5381 | { |
5382 | m_reason = holdReason; |
5383 | } |
5384 | |
5385 | return true; |
5386 | } |
5387 | |
5388 | // We're not in a special area of code, so we don't want to continue unless some other part of the code decides that |
5389 | // we should. |
5390 | LOG((LF_CORDB,LL_INFO1000,"DS::DHI: Returning false, finally!\n" )); |
5391 | |
5392 | return false; |
5393 | } |
5394 | |
5395 | |
5396 | //--------------------------------------------------------------------------------------- |
5397 | // |
5398 | // This function checks whether the given IP is in an LCG method. If so, it enables |
5399 | // JMC and does a step out. This effectively makes sure that we never stop in an LCG method. |
5400 | // |
5401 | // There are two common scnearios here: |
5402 | // 1) We single-step into an LCG method from a managed method. |
5403 | // 2) We single-step off the end of a method called by an LCG method and end up in the calling LCG method. |
5404 | // |
5405 | // In both cases, we don't want to stop in the LCG method. If the LCG method directly or indirectly calls |
5406 | // another user method, we want to stop there. Otherwise, we just want to step out back to the caller of |
5407 | // LCG method. In other words, what we want is exactly the JMC behaviour. |
5408 | // |
5409 | // Arguments: |
5410 | // ip - the current IP where the thread is stopped at |
5411 | // pMD - This is the MethodDesc for the specified ip. This can be NULL, but if it's not, |
5412 | // then it has to match the specified IP. |
5413 | // pInfo - the ControllerStackInfo taken at the specified IP (see Notes below) |
5414 | // |
5415 | // Return Value: |
5416 | // Returns TRUE if the specified IP is indeed in an LCG method, in which case this function has already |
5417 | // enabled all the traps to catch the thread, including turning on JMC, enabling unwind callback, and |
5418 | // putting a patch in the caller. |
5419 | // |
5420 | // Notes: |
5421 | // LCG methods don't show up in stackwalks done by the ControllerStackInfo. So even if the specified IP |
5422 | // is in an LCG method, the LCG method won't show up in the call strack. That's why we need to call |
5423 | // ControllerStackInfo::SetReturnFrameWithActiveFrame() in this function before calling TrapStepOut(). |
5424 | // Otherwise TrapStepOut() will put a patch in the caller's caller (if there is one). |
5425 | // |
5426 | |
5427 | BOOL DebuggerStepper::DetectHandleLCGMethods(const PCODE ip, MethodDesc * pMD, ControllerStackInfo * pInfo) |
5428 | { |
5429 | // Look up the MethodDesc for the given IP. |
5430 | if (pMD == NULL) |
5431 | { |
5432 | if (g_pEEInterface->IsManagedNativeCode((const BYTE *)ip)) |
5433 | { |
5434 | pMD = g_pEEInterface->GetNativeCodeMethodDesc(ip); |
5435 | _ASSERTE(pMD != NULL); |
5436 | } |
5437 | } |
5438 | #if defined(_DEBUG) |
5439 | else |
5440 | { |
5441 | // If a MethodDesc is specified, it has to match the given IP. |
5442 | _ASSERTE(pMD == g_pEEInterface->GetNativeCodeMethodDesc(ip)); |
5443 | } |
5444 | #endif // _DEBUG |
5445 | |
5446 | // If the given IP is in unmanaged code, then we won't have a MethodDesc by this point. |
5447 | if (pMD != NULL) |
5448 | { |
5449 | if (pMD->IsLCGMethod()) |
5450 | { |
5451 | // Enable all the traps to catch the thread. |
5452 | EnableUnwind(m_fp); |
5453 | EnableJMCBackStop(pMD); |
5454 | |
5455 | pInfo->SetReturnFrameWithActiveFrame(); |
5456 | TrapStepOut(pInfo); |
5457 | return TRUE; |
5458 | } |
5459 | } |
5460 | |
5461 | return FALSE; |
5462 | } |
5463 | |
5464 | |
5465 | // Steppers override these so that they can skip func-evals. Note that steppers can |
5466 | // be created & used inside of func-evals (nested-break states). |
5467 | // On enter, we check for freezing the stepper. |
5468 | void DebuggerStepper::TriggerFuncEvalEnter(Thread * thread) |
5469 | { |
5470 | LOG((LF_CORDB, LL_INFO10000, "DS::TFEEnter, this=0x%p, old nest=%d\n" , this, m_cFuncEvalNesting)); |
5471 | |
5472 | // Since this is always called on the hijacking thread, we should be thread-safe |
5473 | _ASSERTE(thread == this->GetThread()); |
5474 | |
5475 | if (IsDead()) |
5476 | return; |
5477 | |
5478 | m_cFuncEvalNesting++; |
5479 | |
5480 | if (m_cFuncEvalNesting == 1) |
5481 | { |
5482 | // We're entering our 1st funceval, so freeze us. |
5483 | LOG((LF_CORDB, LL_INFO100000, "DS::TFEEnter - freezing stepper\n" )); |
5484 | |
5485 | // Freeze the stepper by disabling all triggers |
5486 | m_bvFrozenTriggers = 0; |
5487 | |
5488 | // |
5489 | // We dont explicitly disable single-stepping because the OS |
5490 | // gives us a new thread context during an exception. Since |
5491 | // all func-evals are done inside exceptions, we should never |
5492 | // have this problem. |
5493 | // |
5494 | // Note: however, that if func-evals were no longer done in |
5495 | // exceptions, this would have to change. |
5496 | // |
5497 | |
5498 | |
5499 | if (IsMethodEnterEnabled()) |
5500 | { |
5501 | m_bvFrozenTriggers |= kMethodEnter; |
5502 | DisableMethodEnter(); |
5503 | } |
5504 | |
5505 | } |
5506 | else |
5507 | { |
5508 | LOG((LF_CORDB, LL_INFO100000, "DS::TFEEnter - new nest=%d\n" , m_cFuncEvalNesting)); |
5509 | } |
5510 | } |
5511 | |
5512 | // On Func-EvalExit, we check if the stepper is trying to step-out of a func-eval |
5513 | // (in which case we kill it) |
5514 | // or if we previously entered this func-eval and should thaw it now. |
5515 | void DebuggerStepper::TriggerFuncEvalExit(Thread * thread) |
5516 | { |
5517 | LOG((LF_CORDB, LL_INFO10000, "DS::TFEExit, this=0x%p, old nest=%d\n" , this, m_cFuncEvalNesting)); |
5518 | |
5519 | // Since this is always called on the hijacking thread, we should be thread-safe |
5520 | _ASSERTE(thread == this->GetThread()); |
5521 | |
5522 | if (IsDead()) |
5523 | return; |
5524 | |
5525 | m_cFuncEvalNesting--; |
5526 | |
5527 | if (m_cFuncEvalNesting == -1) |
5528 | { |
5529 | LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - disabling stepper\n" )); |
5530 | |
5531 | // we're exiting the func-eval session we were created in. So we just completely |
5532 | // disable ourselves so that we don't fire anything anymore. |
5533 | // The RS still has to free the stepper though. |
5534 | |
5535 | // This prevents us from stepping-out of a func-eval. For traditional steppers, |
5536 | // this is overkill since it won't have any outstanding triggers. (trap-step-out |
5537 | // won't patch if it crosses a func-eval frame). |
5538 | // But JMC-steppers have Method-Enter; and so this is the only place we have to |
5539 | // disable that. |
5540 | DisableAll(); |
5541 | } |
5542 | else if (m_cFuncEvalNesting == 0) |
5543 | { |
5544 | // We're back to our starting Func-eval session, we should have been frozen, |
5545 | // so now we thaw. |
5546 | LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - thawing stepper\n" )); |
5547 | |
5548 | // Thaw the stepper (reenable triggers) |
5549 | if ((m_bvFrozenTriggers & kMethodEnter) != 0) |
5550 | { |
5551 | EnableMethodEnter(); |
5552 | } |
5553 | m_bvFrozenTriggers = 0; |
5554 | |
5555 | } |
5556 | else |
5557 | { |
5558 | LOG((LF_CORDB, LL_INFO100000, "DS::TFEExit - new nest=%d\n" , m_cFuncEvalNesting)); |
5559 | } |
5560 | } |
5561 | |
5562 | |
5563 | // Return true iff we set a patch (which implies to caller that we should |
5564 | // let controller run free and hit that patch) |
5565 | bool DebuggerStepper::TrapStepInto(ControllerStackInfo *info, |
5566 | const BYTE *ip, |
5567 | TraceDestination *pTD) |
5568 | { |
5569 | _ASSERTE( pTD != NULL ); |
5570 | _ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER); |
5571 | |
5572 | EnableTraceCall(LEAF_MOST_FRAME); |
5573 | if (IsCloserToRoot(info->m_activeFrame.fp, m_fpStepInto)) |
5574 | m_fpStepInto = info->m_activeFrame.fp; |
5575 | |
5576 | LOG((LF_CORDB, LL_INFO1000, "Ds::TSI this:0x%x m_fpStepInto:0x%x\n" , |
5577 | this, m_fpStepInto.GetSPValue())); |
5578 | |
5579 | TraceDestination trace; |
5580 | |
5581 | // Trace through the stubs. |
5582 | // If we're calling from managed code, this should either succeed |
5583 | // or become an ecall into mscorwks. |
5584 | // @Todo - what about stubs in mscorwks. |
5585 | // @todo - if this fails, we want to provde as much info as possible. |
5586 | if (!g_pEEInterface->TraceStub(ip, &trace) |
5587 | || !g_pEEInterface->FollowTrace(&trace)) |
5588 | { |
5589 | return false; |
5590 | } |
5591 | |
5592 | |
5593 | (*pTD) = trace; //bitwise copy |
5594 | |
5595 | // Step-in always operates at the leaf-most frame. Thus the frame pointer for any |
5596 | // patch for step-in should be LEAF_MOST_FRAME, regardless of whatever our current fp |
5597 | // is before the step-in. |
5598 | // Note that step-in may skip 'internal' frames (FrameInfo w/ internal=true) since |
5599 | // such frames may really just be a marker for an internal EE Frame on the stack. |
5600 | // However, step-out uses these frames b/c it may call frame->TraceFrame() on them. |
5601 | return PatchTrace(&trace, |
5602 | LEAF_MOST_FRAME, // step-in is always leaf-most frame. |
5603 | (m_rgfMappingStop&STOP_UNMANAGED)?(true):(false)); |
5604 | } |
5605 | |
5606 | // Enable the JMC backstop for stepping on Step-In. |
5607 | // This activate the JMC probes, which will provide a safety net |
5608 | // to stop a stepper if the StubManagers don't predict the call properly. |
5609 | // Ideally, this should never be necessary (because the SMs would do their job). |
5610 | void DebuggerStepper::EnableJMCBackStop(MethodDesc * pStartMethod) |
5611 | { |
5612 | // JMC steppers should not need the JMC backstop unless a thread inadvertently stops in an LCG method. |
5613 | //_ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType()); |
5614 | |
5615 | // Since we should never hit the JMC backstop (since it's really a SM issue), we'll assert if we actually do. |
5616 | // However, there's 1 corner case here. If we trace calls at the start of the method before the JMC-probe, |
5617 | // then we'll still hit the JMC backstop in our own method. |
5618 | // Record that starting method. That way, if we end up hitting our JMC backstop in our own method, |
5619 | // we don't over aggressively fire the assert. (This won't work for recursive cases, but since this is just |
5620 | // changing an assert, we don't care). |
5621 | |
5622 | #ifdef _DEBUG |
5623 | // May be NULL if we didn't start in a method. |
5624 | m_StepInStartMethod = pStartMethod; |
5625 | #endif |
5626 | |
5627 | // We don't want traditional steppers to rely on MethodEnter (b/c it's not guaranteed to be correct), |
5628 | // but it may be a useful last resort. |
5629 | this->EnableMethodEnter(); |
5630 | } |
5631 | |
5632 | // Return true if the stepper can run free. |
5633 | bool DebuggerStepper::TrapStepInHelper( |
5634 | ControllerStackInfo * pInfo, |
5635 | const BYTE * ipCallTarget, |
5636 | const BYTE * ipNext, |
5637 | bool fCallingIntoFunclet) |
5638 | { |
5639 | TraceDestination td; |
5640 | |
5641 | #ifdef _DEBUG |
5642 | // Begin logging the step-in activity in debug builds. |
5643 | StubManager::DbgBeginLog((TADDR) ipNext, (TADDR) ipCallTarget); |
5644 | #endif |
5645 | |
5646 | |
5647 | if (TrapStepInto(pInfo, ipCallTarget, &td)) |
5648 | { |
5649 | // If we placed a patch, see if we need to update our step-reason |
5650 | if (td.GetTraceType() == TRACE_MANAGED ) |
5651 | { |
5652 | // Possible optimization: Roll all of g_pEEInterface calls into |
5653 | // one function so we don't repeatedly get the CodeMan,etc |
5654 | MethodDesc *md = NULL; |
5655 | _ASSERTE( g_pEEInterface->IsManagedNativeCode((const BYTE *)td.GetAddress()) ); |
5656 | md = g_pEEInterface->GetNativeCodeMethodDesc(td.GetAddress()); |
5657 | |
5658 | DebuggerJitInfo* pDJI = g_pDebugger->GetJitInfoFromAddr(td.GetAddress()); |
5659 | CodeRegionInfo code = CodeRegionInfo::GetCodeRegionInfo(pDJI, md); |
5660 | if (code.AddressToOffset((const BYTE *)td.GetAddress()) == 0) |
5661 | { |
5662 | |
5663 | LOG((LF_CORDB,LL_INFO1000,"\tDS::TS 0x%x m_reason = STEP_CALL" |
5664 | "@ip0x%x\n" , this, (BYTE*)GetControlPC(&(pInfo->m_activeFrame.registers)))); |
5665 | m_reason = STEP_CALL; |
5666 | } |
5667 | else |
5668 | { |
5669 | LOG((LF_CORDB, LL_INFO1000, "Didn't step: md:0x%x" |
5670 | "td.type:%s td.address:0x%p, hot code address:0x%p\n" , |
5671 | md, GetTType(td.GetTraceType()), td.GetAddress(), |
5672 | code.getAddrOfHotCode())); |
5673 | } |
5674 | } |
5675 | else |
5676 | { |
5677 | LOG((LF_CORDB,LL_INFO10000,"DS::TS else 0x%x m_reason = STEP_CALL\n" , |
5678 | this)); |
5679 | m_reason = STEP_CALL; |
5680 | } |
5681 | |
5682 | |
5683 | return true; |
5684 | } // end TrapStepIn |
5685 | else |
5686 | { |
5687 | // If we can't figure out where the stepper should call into (likely because we can't find a stub-manager), |
5688 | // then enable the JMC backstop. |
5689 | EnableJMCBackStop(pInfo->m_activeFrame.md); |
5690 | |
5691 | } |
5692 | |
5693 | // We ignore ipNext here. Instead we'll return false and let the caller (TrapStep) |
5694 | // set the patch for us. |
5695 | return false; |
5696 | } |
5697 | |
5698 | FORCEINLINE bool IsTailCall(const BYTE * pTargetIP) |
5699 | { |
5700 | return TailCallStubManager::IsTailCallStubHelper(reinterpret_cast<PCODE>(pTargetIP)); |
5701 | } |
5702 | |
5703 | // bool DebuggerStepper::TrapStep() TrapStep attepts to set a |
5704 | // patch at the next IL instruction to be executed. If we're stepping in & |
5705 | // the next IL instruction is a call, then this'll set a breakpoint inside |
5706 | // the code that will be called. |
5707 | // How: There are a number of cases, depending on where the IP |
5708 | // currently is: |
5709 | // Unmanaged code: EnableTraceCall() & return false - try and get |
5710 | // it when it returns. |
5711 | // In a frame: if the <p in> param is true, then do an |
5712 | // EnableTraceCall(). If the frame isn't the top frame, also do |
5713 | // g_pEEInterface->TraceFrame(), g_pEEInterface->FollowTrace, and |
5714 | // PatchTrace. |
5715 | // Normal managed frame: create a Walker and walk the instructions until either |
5716 | // leave the provided range (AddPatch there, return true), or we don't know what the |
5717 | // next instruction is (say, after a call, or return, or branch - return false). |
5718 | // Returns a boolean indicating if we were able to set a patch successfully |
5719 | // in either this method, or (if in == true & the next instruction is a call) |
5720 | // inside a callee method. |
5721 | // true: Patch successfully placed either in this method or a callee, |
5722 | // so the stepping is taken care of. |
5723 | // false: Unable to place patch in either this method or any |
5724 | // applicable callee methods, so the only option the caller has to put |
5725 | // patch to control flow is to call TrapStepOut & try and place a patch |
5726 | // on the method that called the current frame's method. |
5727 | bool DebuggerStepper::TrapStep(ControllerStackInfo *info, bool in) |
5728 | { |
5729 | LOG((LF_CORDB,LL_INFO10000,"DS::TS: this:0x%x\n" , this)); |
5730 | if (!info->m_activeFrame.managed) |
5731 | { |
5732 | // |
5733 | // We're not in managed code. Patch up all paths back in. |
5734 | // |
5735 | |
5736 | LOG((LF_CORDB,LL_INFO10000, "DS::TS: not in managed code\n" )); |
5737 | |
5738 | if (in) |
5739 | { |
5740 | EnablePolyTraceCall(); |
5741 | } |
5742 | |
5743 | return false; |
5744 | } |
5745 | |
5746 | if (info->m_activeFrame.frame != NULL) |
5747 | { |
5748 | |
5749 | // |
5750 | // We're in some kind of weird frame. Patch further entry to the frame. |
5751 | // or if we can't, patch return from the frame |
5752 | // |
5753 | |
5754 | LOG((LF_CORDB,LL_INFO10000, "DS::TS: in a weird frame\n" )); |
5755 | |
5756 | if (in) |
5757 | { |
5758 | EnablePolyTraceCall(); |
5759 | |
5760 | // Only traditional steppers should patch a frame. JMC steppers will |
5761 | // just rely on TriggerMethodEnter. |
5762 | if (DEBUGGER_CONTROLLER_STEPPER == this->GetDCType()) |
5763 | { |
5764 | if (info->m_activeFrame.frame != FRAME_TOP) |
5765 | { |
5766 | TraceDestination trace; |
5767 | |
5768 | CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers |
5769 | |
5770 | // This could be anywhere, especially b/c step could be on non-leaf frame. |
5771 | if (g_pEEInterface->TraceFrame(this->GetThread(), |
5772 | info->m_activeFrame.frame, |
5773 | FALSE, &trace, |
5774 | &(info->m_activeFrame.registers)) |
5775 | && g_pEEInterface->FollowTrace(&trace) |
5776 | && PatchTrace(&trace, info->m_activeFrame.fp, |
5777 | (m_rgfMappingStop&STOP_UNMANAGED)? |
5778 | (true):(false))) |
5779 | |
5780 | { |
5781 | return true; |
5782 | } |
5783 | } |
5784 | } |
5785 | } |
5786 | |
5787 | return false; |
5788 | } |
5789 | |
5790 | #ifdef _TARGET_X86_ |
5791 | LOG((LF_CORDB,LL_INFO1000, "GetJitInfo for pc = 0x%x (addr of " |
5792 | "that value:0x%x)\n" , (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)), |
5793 | info->m_activeFrame.registers.PCTAddr)); |
5794 | #endif |
5795 | |
5796 | // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and |
5797 | // it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged |
5798 | // function that had been called. |
5799 | DebuggerJitInfo *ji = info->m_activeFrame.GetJitInfoFromFrame(); |
5800 | if( ji != NULL ) |
5801 | { |
5802 | LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, got DJI 0x%p, " |
5803 | "from 0x%p to 0x%p\n" , |
5804 | (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)), |
5805 | ji, ji->m_addrOfCode, ji->m_addrOfCode+ji->m_sizeOfCode)); |
5806 | } |
5807 | else |
5808 | { |
5809 | LOG((LF_CORDB,LL_INFO10000,"DS::TS: For code 0x%p, " |
5810 | "didn't get a DJI \n" , |
5811 | (const BYTE*)(GetControlPC(&info->m_activeFrame.registers)))); |
5812 | } |
5813 | |
5814 | // |
5815 | // We're in a normal managed frame - walk the code |
5816 | // |
5817 | |
5818 | NativeWalker walker; |
5819 | |
5820 | LOG((LF_CORDB,LL_INFO1000, "DS::TS: &info->m_activeFrame.registers 0x%p\n" , &info->m_activeFrame.registers)); |
5821 | |
5822 | // !!! Eventually when using the fjit, we'll want |
5823 | // to walk the IL to get the next location, & then map |
5824 | // it back to native. |
5825 | walker.Init((BYTE*)GetControlPC(&(info->m_activeFrame.registers)), &info->m_activeFrame.registers); |
5826 | |
5827 | |
5828 | // Is the active frame really the active frame? |
5829 | // What if the thread is stopped at a managed debug event outside of a filter ctx? Eg, stopped |
5830 | // somewhere directly in mscorwks (like sending a LogMsg or ClsLoad event) or even at WaitForSingleObject. |
5831 | // ActiveFrame is either the stepper's initial frame or the frame of a filterctx. |
5832 | bool fIsActiveFrameLive = (info->m_activeFrame.fp == info->m_bottomFP); |
5833 | |
5834 | // If this thread isn't stopped in managed code, it can't be at the active frame. |
5835 | if (GetManagedStoppedCtx(this->GetThread()) == NULL) |
5836 | { |
5837 | fIsActiveFrameLive = false; |
5838 | } |
5839 | |
5840 | bool fIsJump = false; |
5841 | bool fCallingIntoFunclet = false; |
5842 | |
5843 | // If m_activeFrame is not the actual active frame, |
5844 | // we should skip this first switch - never single step, and |
5845 | // assume our context is bogus. |
5846 | if (fIsActiveFrameLive) |
5847 | { |
5848 | LOG((LF_CORDB,LL_INFO10000, "DC::TS: immediate?\n" )); |
5849 | |
5850 | // Note that by definition our walker must always be able to step |
5851 | // through a single instruction, so any return |
5852 | // of NULL IP's from those cases on the first step |
5853 | // means that an exception is going to be generated. |
5854 | // |
5855 | // (On future steps, it can also mean that the destination |
5856 | // simply can't be computed.) |
5857 | WALK_TYPE wt = walker.GetOpcodeWalkType(); |
5858 | { |
5859 | switch (wt) |
5860 | { |
5861 | case WALK_RETURN: |
5862 | { |
5863 | LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_RETURN\n" )); |
5864 | |
5865 | // Normally a 'ret' opcode means we're at the end of a function and doing a step-out. |
5866 | // But the jit is free to use a 'ret' opcode to implement various goofy constructs like |
5867 | // managed filters, in which case we may ret to the same function or we may ret to some |
5868 | // internal CLR stub code. |
5869 | // So we'll just ignore this and tell the Stepper to enable every notification it has |
5870 | // and let the thread run free. This will include TrapStepOut() and EnableUnwind() |
5871 | // to catch any potential filters. |
5872 | |
5873 | |
5874 | // Go ahead and enable the single-step flag too. We know it's safe. |
5875 | // If this lands in random code, then TriggerSingleStep will just ignore it. |
5876 | EnableSingleStep(); |
5877 | |
5878 | // Don't set step-reason yet. If another trigger gets hit, it will set the reason. |
5879 | return false; |
5880 | } |
5881 | |
5882 | case WALK_BRANCH: |
5883 | LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_BRANCH\n" )); |
5884 | // A branch can be handled just like a call. If the branch is within the current method, then we just |
5885 | // down to WALK_UNKNOWN, otherwise we handle it just like a call. Note: we need to force in=true |
5886 | // because for a jmp, in or over is the same thing, we're still going there, and the in==true case is |
5887 | // the case we want to use... |
5888 | fIsJump = true; |
5889 | |
5890 | // fall through... |
5891 | |
5892 | case WALK_CALL: |
5893 | LOG((LF_CORDB,LL_INFO10000, "DC::TS:Imm:WALK_CALL ip=%p nextip=%p\n" , walker.GetIP(), walker.GetNextIP())); |
5894 | |
5895 | // If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL |
5896 | // instruction), then put the bp where we're going, NOT at the instruction following the call |
5897 | if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP())) |
5898 | { |
5899 | LOG((LF_CORDB, LL_INFO1000, "Walk call within method!" )); |
5900 | goto LWALK_UNKNOWN; |
5901 | } |
5902 | |
5903 | if (walker.GetNextIP() != NULL) |
5904 | { |
5905 | #ifdef WIN64EXCEPTIONS |
5906 | // There are 4 places we could be jumping: |
5907 | // 1) to the beginning of the same method (recursive call) |
5908 | // 2) somewhere in the same funclet, that isn't the method start |
5909 | // 3) somewhere in the same method but different funclet |
5910 | // 4) somewhere in a different method |
5911 | // |
5912 | // IsAddrWithinFrame ruled out option 2, IsAddrWithinMethodIncludingFunclet rules out option 4, |
5913 | // and checking the IP against the start address rules out option 1. That leaves option only what we |
5914 | // wanted, option #3 |
5915 | fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP()) && |
5916 | ((CORDB_ADDRESS)(SIZE_T)walker.GetNextIP() != ji->m_addrOfCode); |
5917 | #endif |
5918 | // At this point, we know that the call/branch target is not in the current method. |
5919 | // So if the current instruction is a jump, this must be a tail call or possibly a jump to the finally. |
5920 | // So, check if the call/branch target is the JIT helper for handling tail calls if we are not calling |
5921 | // into the funclet. |
5922 | if ((fIsJump && !fCallingIntoFunclet) || IsTailCall(walker.GetNextIP())) |
5923 | { |
5924 | // A step-over becomes a step-out for a tail call. |
5925 | if (!in) |
5926 | { |
5927 | TrapStepOut(info); |
5928 | return true; |
5929 | } |
5930 | } |
5931 | |
5932 | // To preserve the old behaviour, if this is not a tail call, then we assume we want to |
5933 | // follow the call/jump. |
5934 | if (fIsJump) |
5935 | { |
5936 | in = true; |
5937 | } |
5938 | |
5939 | |
5940 | // There are two cases where we need to perform a step-in. One, if the step operation is |
5941 | // a step-in. Two, if the target address of the call is in a funclet of the current method. |
5942 | // In this case, we want to step into the funclet even if the step operation is a step-over. |
5943 | if (in || fCallingIntoFunclet) |
5944 | { |
5945 | if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet)) |
5946 | { |
5947 | return true; |
5948 | } |
5949 | } |
5950 | |
5951 | } |
5952 | if (walker.GetSkipIP() == NULL) |
5953 | { |
5954 | LOG((LF_CORDB,LL_INFO10000,"DS::TS 0x%x m_reason = STEP_CALL (skip)\n" , |
5955 | this)); |
5956 | m_reason = STEP_CALL; |
5957 | |
5958 | return true; |
5959 | } |
5960 | |
5961 | |
5962 | LOG((LF_CORDB,LL_INFO100000, "DC::TS:Imm:WALK_CALL Skip instruction\n" )); |
5963 | walker.Skip(); |
5964 | break; |
5965 | |
5966 | case WALK_UNKNOWN: |
5967 | LWALK_UNKNOWN: |
5968 | LOG((LF_CORDB,LL_INFO10000,"DS::TS:WALK_UNKNOWN - curIP:0x%x " |
5969 | "nextIP:0x%x skipIP:0x%x 1st byte of opcode:0x%x\n" , (BYTE*)GetControlPC(&(info->m_activeFrame. |
5970 | registers)), walker.GetNextIP(),walker.GetSkipIP(), |
5971 | *(BYTE*)GetControlPC(&(info->m_activeFrame.registers)))); |
5972 | |
5973 | EnableSingleStep(); |
5974 | |
5975 | return true; |
5976 | |
5977 | default: |
5978 | if (walker.GetNextIP() == NULL) |
5979 | { |
5980 | return true; |
5981 | } |
5982 | |
5983 | walker.Next(); |
5984 | } |
5985 | } |
5986 | } // if (fIsActiveFrameLive) |
5987 | |
5988 | // |
5989 | // Use our range, if we're in the original |
5990 | // frame. |
5991 | // |
5992 | |
5993 | COR_DEBUG_STEP_RANGE *range; |
5994 | SIZE_T rangeCount; |
5995 | |
5996 | if (info->m_activeFrame.fp == m_fp) |
5997 | { |
5998 | range = m_range; |
5999 | rangeCount = m_rangeCount; |
6000 | } |
6001 | else |
6002 | { |
6003 | range = NULL; |
6004 | rangeCount = 0; |
6005 | } |
6006 | |
6007 | // |
6008 | // Keep walking until either we're out of range, or |
6009 | // else we can't predict ahead any more. |
6010 | // |
6011 | |
6012 | while (TRUE) |
6013 | { |
6014 | const BYTE *ip = walker.GetIP(); |
6015 | |
6016 | SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(ip); |
6017 | |
6018 | LOG((LF_CORDB, LL_INFO1000, "Walking to ip 0x%p (natOff:0x%x)\n" ,ip,offset)); |
6019 | |
6020 | if (!IsInRange(offset, range, rangeCount) |
6021 | && !ShouldContinueStep( info, offset )) |
6022 | { |
6023 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6024 | ji, |
6025 | offset, |
6026 | info->m_returnFrame.fp, |
6027 | NULL); |
6028 | return true; |
6029 | } |
6030 | |
6031 | switch (walker.GetOpcodeWalkType()) |
6032 | { |
6033 | case WALK_RETURN: |
6034 | |
6035 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_RETURN Adding Patch.\n" )); |
6036 | |
6037 | // In the loop above, if we're at the return address, we'll check & see |
6038 | // if we're returning to elsewhere within the same method, and if so, |
6039 | // we'll single step rather than TrapStepOut. If we see a return in the |
6040 | // code stream, then we'll set a breakpoint there, so that we can |
6041 | // examine the return address, and decide whether to SS or TSO then |
6042 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6043 | ji, |
6044 | offset, |
6045 | info->m_returnFrame.fp, |
6046 | NULL); |
6047 | return true; |
6048 | |
6049 | case WALK_CALL: |
6050 | |
6051 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL.\n" )); |
6052 | |
6053 | // If we're doing some sort of intra-method jump (usually, to get EIP in a clever way, via the CALL |
6054 | // instruction), then put the bp where we're going, NOT at the instruction following the call |
6055 | if (IsAddrWithinFrame(ji, info->m_activeFrame.md, walker.GetIP(), walker.GetNextIP())) |
6056 | { |
6057 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL IsAddrWithinFrame, Adding Patch.\n" )); |
6058 | |
6059 | // How else to detect this? |
6060 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6061 | ji, |
6062 | CodeRegionInfo::GetCodeRegionInfo(ji, info->m_activeFrame.md).AddressToOffset(walker.GetNextIP()), |
6063 | info->m_returnFrame.fp, |
6064 | NULL); |
6065 | return true; |
6066 | } |
6067 | |
6068 | if (IsTailCall(walker.GetNextIP())) |
6069 | { |
6070 | if (!in) |
6071 | { |
6072 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6073 | ji, |
6074 | offset, |
6075 | info->m_returnFrame.fp, |
6076 | NULL); |
6077 | return true; |
6078 | } |
6079 | } |
6080 | |
6081 | #ifdef WIN64EXCEPTIONS |
6082 | fCallingIntoFunclet = IsAddrWithinMethodIncludingFunclet(ji, info->m_activeFrame.md, walker.GetNextIP()); |
6083 | #endif |
6084 | if (in || fCallingIntoFunclet) |
6085 | { |
6086 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL step in is true\n" )); |
6087 | if (walker.GetNextIP() == NULL) |
6088 | { |
6089 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL NextIP == NULL\n" )); |
6090 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6091 | ji, |
6092 | offset, |
6093 | info->m_returnFrame.fp, |
6094 | NULL); |
6095 | |
6096 | LOG((LF_CORDB,LL_INFO10000,"DS0x%x m_reason=STEP_CALL 2\n" , |
6097 | this)); |
6098 | m_reason = STEP_CALL; |
6099 | |
6100 | return true; |
6101 | } |
6102 | |
6103 | if (TrapStepInHelper(info, walker.GetNextIP(), walker.GetSkipIP(), fCallingIntoFunclet)) |
6104 | { |
6105 | return true; |
6106 | } |
6107 | |
6108 | } |
6109 | |
6110 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: WALK_CALL Calling GetSkipIP\n" )); |
6111 | if (walker.GetSkipIP() == NULL) |
6112 | { |
6113 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6114 | ji, |
6115 | offset, |
6116 | info->m_returnFrame.fp, |
6117 | NULL); |
6118 | |
6119 | LOG((LF_CORDB,LL_INFO10000,"DS 0x%x m_reason=STEP_CALL4\n" ,this)); |
6120 | m_reason = STEP_CALL; |
6121 | |
6122 | return true; |
6123 | } |
6124 | |
6125 | walker.Skip(); |
6126 | LOG((LF_CORDB, LL_INFO10000, "DS::TS: skipping over call.\n" )); |
6127 | break; |
6128 | |
6129 | default: |
6130 | if (walker.GetNextIP() == NULL) |
6131 | { |
6132 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6133 | ji, |
6134 | offset, |
6135 | info->m_returnFrame.fp, |
6136 | NULL); |
6137 | return true; |
6138 | } |
6139 | walker.Next(); |
6140 | break; |
6141 | } |
6142 | } |
6143 | LOG((LF_CORDB,LL_INFO1000,"Ending TrapStep\n" )); |
6144 | } |
6145 | |
6146 | bool DebuggerStepper::IsAddrWithinFrame(DebuggerJitInfo *dji, |
6147 | MethodDesc* pMD, |
6148 | const BYTE* currentAddr, |
6149 | const BYTE* targetAddr) |
6150 | { |
6151 | _ASSERTE(dji != NULL); |
6152 | |
6153 | bool result = IsAddrWithinMethodIncludingFunclet(dji, pMD, targetAddr); |
6154 | |
6155 | // We need to check if this is a recursive call. In RTM we should see if this method is really necessary, |
6156 | // since it looks like the X86 JIT doesn't emit intra-method jumps anymore. |
6157 | if (result) |
6158 | { |
6159 | if ((CORDB_ADDRESS)(SIZE_T)targetAddr == dji->m_addrOfCode) |
6160 | { |
6161 | result = false; |
6162 | } |
6163 | } |
6164 | |
6165 | #if defined(WIN64EXCEPTIONS) |
6166 | // On WIN64, we also check whether the targetAddr and the currentAddr is in the same funclet. |
6167 | _ASSERTE(currentAddr != NULL); |
6168 | if (result) |
6169 | { |
6170 | int currentFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)currentAddr, DebuggerJitInfo::GFIM_BYADDRESS); |
6171 | int targetFuncletIndex = dji->GetFuncletIndex((CORDB_ADDRESS)targetAddr, DebuggerJitInfo::GFIM_BYADDRESS); |
6172 | result = (currentFuncletIndex == targetFuncletIndex); |
6173 | } |
6174 | #endif // WIN64EXCEPTIONS |
6175 | |
6176 | return result; |
6177 | } |
6178 | |
6179 | // x86 shouldn't need to call this method directly. We should call IsAddrWithinFrame() on x86 instead. |
6180 | // That's why I use a name with the word "funclet" in it to scare people off. |
6181 | bool DebuggerStepper::IsAddrWithinMethodIncludingFunclet(DebuggerJitInfo *dji, |
6182 | MethodDesc* pMD, |
6183 | const BYTE* targetAddr) |
6184 | { |
6185 | _ASSERTE(dji != NULL); |
6186 | return CodeRegionInfo::GetCodeRegionInfo(dji, pMD).IsMethodAddress(targetAddr); |
6187 | } |
6188 | |
6189 | void DebuggerStepper::TrapStepNext(ControllerStackInfo *info) |
6190 | { |
6191 | LOG((LF_CORDB, LL_INFO10000, "DS::TrapStepNext, this=%p\n" , this)); |
6192 | // StepNext for a Normal stepper is just a step-out |
6193 | TrapStepOut(info); |
6194 | |
6195 | // @todo -should we also EnableTraceCall?? |
6196 | } |
6197 | |
6198 | // Is this frame interesting? |
6199 | // For a traditional stepper, all frames are interesting. |
6200 | bool DebuggerStepper::IsInterestingFrame(FrameInfo * pFrame) |
6201 | { |
6202 | LIMITED_METHOD_CONTRACT; |
6203 | |
6204 | return true; |
6205 | } |
6206 | |
6207 | // Place a single patch somewhere up the stack to do a step-out |
6208 | void DebuggerStepper::TrapStepOut(ControllerStackInfo *info, bool fForceTraditional) |
6209 | { |
6210 | ControllerStackInfo returnInfo; |
6211 | DebuggerJitInfo *dji; |
6212 | |
6213 | LOG((LF_CORDB, LL_INFO10000, "DS::TSO this:0x%p\n" , this)); |
6214 | |
6215 | bool fReturningFromFinallyFunclet = false; |
6216 | |
6217 | #if defined(WIN64EXCEPTIONS) |
6218 | // When we step out of a funclet, we should do one of two things, depending |
6219 | // on the original stepping intention: |
6220 | // 1) If we originally want to step out, then we should skip the parent method. |
6221 | // 2) If we originally want to step in/over but we step off the end of the funclet, |
6222 | // then we should resume in the parent, if possible. |
6223 | if (info->m_activeFrame.IsNonFilterFuncletFrame()) |
6224 | { |
6225 | // There should always be a frame for the parent method. |
6226 | _ASSERTE(info->HasReturnFrame()); |
6227 | |
6228 | #ifdef _TARGET_ARM_ |
6229 | while (info->HasReturnFrame() && info->m_activeFrame.md != info->m_returnFrame.md) |
6230 | { |
6231 | StackTraceTicket ticket(info); |
6232 | returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL); |
6233 | info = &returnInfo; |
6234 | } |
6235 | |
6236 | _ASSERTE(info->HasReturnFrame()); |
6237 | #endif |
6238 | |
6239 | _ASSERTE(info->m_activeFrame.md == info->m_returnFrame.md); |
6240 | |
6241 | if (m_eMode == cStepOut) |
6242 | { |
6243 | StackTraceTicket ticket(info); |
6244 | returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL); |
6245 | info = &returnInfo; |
6246 | } |
6247 | else |
6248 | { |
6249 | _ASSERTE(info->m_returnFrame.managed); |
6250 | _ASSERTE(info->m_returnFrame.frame == NULL); |
6251 | |
6252 | MethodDesc *md = info->m_returnFrame.md; |
6253 | dji = info->m_returnFrame.GetJitInfoFromFrame(); |
6254 | |
6255 | // The return value of a catch funclet is the control PC to resume to. |
6256 | // The return value of a finally funclet has no meaning, so we need to check |
6257 | // if the return value is in the main method. |
6258 | LPVOID resumePC = GetRegdisplayReturnValue(&(info->m_activeFrame.registers)); |
6259 | |
6260 | // For finally funclet, there are two possible situations. Either the finally is |
6261 | // called normally (i.e. no exception), in which case we simply fall through and |
6262 | // let the normal loop do its work below, or the finally is called by the EH |
6263 | // routines, in which case we need the unwind notification. |
6264 | if (IsAddrWithinMethodIncludingFunclet(dji, md, (const BYTE *)resumePC)) |
6265 | { |
6266 | SIZE_T reloffset = dji->m_codeRegionInfo.AddressToOffset((BYTE*)resumePC); |
6267 | |
6268 | AddBindAndActivateNativeManagedPatch(info->m_returnFrame.md, |
6269 | dji, |
6270 | reloffset, |
6271 | info->m_returnFrame.fp, |
6272 | NULL); |
6273 | |
6274 | LOG((LF_CORDB, LL_INFO10000, |
6275 | "DS::TSO:normally managed code AddPatch" |
6276 | " in %s::%s, offset 0x%x, m_reason=%d\n" , |
6277 | info->m_returnFrame.md->m_pszDebugClassName, |
6278 | info->m_returnFrame.md->m_pszDebugMethodName, |
6279 | reloffset, m_reason)); |
6280 | |
6281 | // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the |
6282 | // same method, so we should not "return" to the parent method. |
6283 | LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n" )); |
6284 | |
6285 | return; |
6286 | } |
6287 | else |
6288 | { |
6289 | // This is the case where we step off the end of a finally funclet. |
6290 | fReturningFromFinallyFunclet = true; |
6291 | } |
6292 | } |
6293 | } |
6294 | #endif // WIN64EXCEPTIONS |
6295 | |
6296 | #ifdef _DEBUG |
6297 | FramePointer dbgLastFP; // for debug, make sure we're making progress through the stack. |
6298 | #endif |
6299 | |
6300 | while (info->HasReturnFrame()) |
6301 | { |
6302 | |
6303 | #ifdef _DEBUG |
6304 | dbgLastFP = info->m_activeFrame.fp; |
6305 | #endif |
6306 | |
6307 | // Continue walking up the stack & set a patch upon the next |
6308 | // frame up. We will eventually either hit managed code |
6309 | // (which we can set a definite patch in), or the top of the |
6310 | // stack. |
6311 | StackTraceTicket ticket(info); |
6312 | |
6313 | // The last parameter here is part of a really targetted (*cough* dirty) fix to |
6314 | // disable getting an unwanted UMChain to fix issue 650903 (See |
6315 | // code:ControllerStackInfo::WalkStack and code:TrackUMChain for the other |
6316 | // parts.) In the case of managed step out we know that we aren't interested in |
6317 | // unmanaged frames, and generating that unmanaged frame causes the stackwalker |
6318 | // not to report the managed frame that was at the same SP. However the unmanaged |
6319 | // frame might be used in the mixed-mode step out case so I don't suppress it |
6320 | // there. |
6321 | returnInfo.GetStackInfo(ticket, GetThread(), info->m_returnFrame.fp, NULL, !(m_rgfMappingStop & STOP_UNMANAGED)); |
6322 | info = &returnInfo; |
6323 | |
6324 | #ifdef _DEBUG |
6325 | // If this assert fires, then it means that we're not making progress while |
6326 | // tracing up the towards the root of the stack. Likely an issue in the Left-Side's |
6327 | // stackwalker. |
6328 | _ASSERTE(IsCloserToLeaf(dbgLastFP, info->m_activeFrame.fp)); |
6329 | #endif |
6330 | |
6331 | #ifdef FEATURE_MULTICASTSTUB_AS_IL |
6332 | if (info->m_activeFrame.md != nullptr && info->m_activeFrame.md->IsILStub() && info->m_activeFrame.md->AsDynamicMethodDesc()->IsMulticastStub()) |
6333 | { |
6334 | LOG((LF_CORDB, LL_INFO10000, |
6335 | "DS::TSO: multicast frame.\n" )); |
6336 | |
6337 | // User break should always be called from managed code, so it should never actually hit this codepath. |
6338 | _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT); |
6339 | |
6340 | // JMC steppers shouldn't be patching stubs. |
6341 | if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType()) |
6342 | { |
6343 | LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n" )); |
6344 | continue; |
6345 | } |
6346 | |
6347 | TraceDestination trace; |
6348 | |
6349 | EnableTraceCall(info->m_activeFrame.fp); |
6350 | |
6351 | PCODE ip = GetControlPC(&(info->m_activeFrame.registers)); |
6352 | if (g_pEEInterface->TraceStub((BYTE*)ip, &trace) |
6353 | && g_pEEInterface->FollowTrace(&trace) |
6354 | && PatchTrace(&trace, info->m_activeFrame.fp, |
6355 | true)) |
6356 | break; |
6357 | } |
6358 | else |
6359 | #endif // FEATURE_MULTICASTSTUB_AS_IL |
6360 | if (info->m_activeFrame.managed) |
6361 | { |
6362 | LOG((LF_CORDB, LL_INFO10000, |
6363 | "DS::TSO: return frame is managed.\n" )); |
6364 | |
6365 | if (info->m_activeFrame.frame == NULL) |
6366 | { |
6367 | // Returning normally to managed code. |
6368 | _ASSERTE(info->m_activeFrame.md != NULL); |
6369 | |
6370 | // Polymorphic check to skip over non-interesting frames. |
6371 | if (!fForceTraditional && !this->IsInterestingFrame(&info->m_activeFrame)) |
6372 | continue; |
6373 | |
6374 | dji = info->m_activeFrame.GetJitInfoFromFrame(); |
6375 | _ASSERTE(dji != NULL); |
6376 | |
6377 | // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value |
6378 | // in that, and it was causing problems creating a stepper while sitting in ndirect stubs after we'd |
6379 | // returned from the unmanaged function that had been called. |
6380 | ULONG reloffset = info->m_activeFrame.relOffset; |
6381 | |
6382 | AddBindAndActivateNativeManagedPatch(info->m_activeFrame.md, |
6383 | dji, |
6384 | reloffset, |
6385 | info->m_returnFrame.fp, |
6386 | NULL); |
6387 | |
6388 | LOG((LF_CORDB, LL_INFO10000, |
6389 | "DS::TSO:normally managed code AddPatch" |
6390 | " in %s::%s, offset 0x%x, m_reason=%d\n" , |
6391 | info->m_activeFrame.md->m_pszDebugClassName, |
6392 | info->m_activeFrame.md->m_pszDebugMethodName, |
6393 | reloffset, m_reason)); |
6394 | |
6395 | |
6396 | // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the |
6397 | // same method, so we should not "return" to the parent method. |
6398 | if (!fReturningFromFinallyFunclet) |
6399 | { |
6400 | m_reason = STEP_RETURN; |
6401 | } |
6402 | break; |
6403 | } |
6404 | else if (info->m_activeFrame.frame == FRAME_TOP) |
6405 | { |
6406 | |
6407 | // Trad-stepper's step-out is actually like a step-next when we go off the top. |
6408 | // JMC-steppers do a true-step out. So for JMC-steppers, don't enable trace-call. |
6409 | if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType()) |
6410 | { |
6411 | LOG((LF_CORDB, LL_EVERYTHING, "DS::TSO: JMC stepper skipping exit-frame case.\n" )); |
6412 | break; |
6413 | } |
6414 | |
6415 | // User break should always be called from managed code, so it should never actually hit this codepath. |
6416 | _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT); |
6417 | |
6418 | |
6419 | // We're walking off the top of the stack. Note that if we call managed code again, |
6420 | // this trace-call will cause us our stepper-to fire. So we'll actually do a |
6421 | // step-next; not a true-step out. |
6422 | EnableTraceCall(info->m_activeFrame.fp); |
6423 | |
6424 | LOG((LF_CORDB, LL_INFO1000, "DS::TSO: Off top of frame!\n" )); |
6425 | |
6426 | m_reason = STEP_EXIT; //we're on the way out.. |
6427 | |
6428 | // <REVISIT_TODO>@todo not that it matters since we don't send a |
6429 | // stepComplete message to the right side.</REVISIT_TODO> |
6430 | break; |
6431 | } |
6432 | else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_FUNC_EVAL) |
6433 | { |
6434 | // Note: we treat walking off the top of the stack and |
6435 | // walking off the top of a func eval the same way, |
6436 | // except that we don't enable trace call since we |
6437 | // know exactly where were going. |
6438 | |
6439 | LOG((LF_CORDB, LL_INFO1000, |
6440 | "DS::TSO: Off top of func eval!\n" )); |
6441 | |
6442 | m_reason = STEP_EXIT; |
6443 | break; |
6444 | } |
6445 | else if (info->m_activeFrame.frame->GetFrameType() == Frame::TYPE_SECURITY && |
6446 | info->m_activeFrame.frame->GetInterception() == Frame::INTERCEPTION_NONE) |
6447 | { |
6448 | // If we're stepping out of something that was protected by (declarative) security, |
6449 | // the security subsystem may leave a frame on the stack to cache it's computation. |
6450 | // HOWEVER, this isn't a real frame, and so we don't want to stop here. On the other |
6451 | // hand, if we're in the security goop (sec. executes managed code to do stuff), then |
6452 | // we'll want to use the "returning to stub case", below. GetInterception()==NONE |
6453 | // indicates that the frame is just a cache frame: |
6454 | // Skip it and keep on going |
6455 | |
6456 | LOG((LF_CORDB, LL_INFO10000, |
6457 | "DS::TSO: returning to a non-intercepting frame. Keep unwinding\n" )); |
6458 | continue; |
6459 | } |
6460 | else |
6461 | { |
6462 | LOG((LF_CORDB, LL_INFO10000, |
6463 | "DS::TSO: returning to a stub frame.\n" )); |
6464 | |
6465 | // User break should always be called from managed code, so it should never actually hit this codepath. |
6466 | _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT); |
6467 | |
6468 | // JMC steppers shouldn't be patching stubs. |
6469 | if (DEBUGGER_CONTROLLER_JMC_STEPPER == this->GetDCType()) |
6470 | { |
6471 | LOG((LF_CORDB, LL_INFO10000, "DS::TSO: JMC stepper skipping frame.\n" )); |
6472 | continue; |
6473 | } |
6474 | |
6475 | // We're returning to some funky frame. |
6476 | // (E.g. a security frame has called a native method.) |
6477 | |
6478 | // Patch the frame from entering other methods. This effectively gives the Step-out |
6479 | // a step-next behavior. For eg, this can be useful for step-out going between multicast delegates. |
6480 | // This step-next could actually land us leaf-more on the callstack than we currently are! |
6481 | // If we were a true-step out, we'd skip this and keep crawling. |
6482 | // up the callstack. |
6483 | // |
6484 | // !!! For now, we assume that the TraceFrame entry |
6485 | // point is smart enough to tell where it is in the |
6486 | // calling sequence. We'll see how this holds up. |
6487 | TraceDestination trace; |
6488 | |
6489 | // We don't want notifications of trace-calls leaf-more than our current frame. |
6490 | // For eg, if our current frame calls out to unmanaged code and then back in, |
6491 | // we'll get a TraceCall notification. But since it's leaf-more than our current frame, |
6492 | // we don't care because we just want to step out of our current frame (and everything |
6493 | // our current frame may call). |
6494 | EnableTraceCall(info->m_activeFrame.fp); |
6495 | |
6496 | CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers |
6497 | |
6498 | if (g_pEEInterface->TraceFrame(GetThread(), |
6499 | info->m_activeFrame.frame, FALSE, |
6500 | &trace, &(info->m_activeFrame.registers)) |
6501 | && g_pEEInterface->FollowTrace(&trace) |
6502 | && PatchTrace(&trace, info->m_activeFrame.fp, |
6503 | true)) |
6504 | break; |
6505 | |
6506 | // !!! Problem: we don't know which return frame to use - |
6507 | // the TraceFrame patch may be in a frame below the return |
6508 | // frame, or in a frame parallel with it |
6509 | // (e.g. prestub popping itself & then calling.) |
6510 | // |
6511 | // For now, I've tweaked the FP comparison in the |
6512 | // patch dispatching code to allow either case. |
6513 | } |
6514 | } |
6515 | else |
6516 | { |
6517 | LOG((LF_CORDB, LL_INFO10000, |
6518 | "DS::TSO: return frame is not managed.\n" )); |
6519 | |
6520 | // Only step out to unmanaged code if we're actually |
6521 | // marked to stop in unamanged code. Otherwise, just loop |
6522 | // to get us past the unmanaged frames. |
6523 | if (m_rgfMappingStop & STOP_UNMANAGED) |
6524 | { |
6525 | LOG((LF_CORDB, LL_INFO10000, |
6526 | "DS::TSO: return to unmanaged code " |
6527 | "m_reason=STEP_RETURN\n" )); |
6528 | |
6529 | // Do not set m_reason to STEP_RETURN here. Logically, the funclet and the parent method are the |
6530 | // same method, so we should not "return" to the parent method. |
6531 | if (!fReturningFromFinallyFunclet) |
6532 | { |
6533 | m_reason = STEP_RETURN; |
6534 | } |
6535 | |
6536 | // We're stepping out into unmanaged code |
6537 | LOG((LF_CORDB, LL_INFO10000, |
6538 | "DS::TSO: Setting unmanaged trace patch at 0x%x(%x)\n" , |
6539 | GetControlPC(&(info->m_activeFrame.registers)), |
6540 | info->m_returnFrame.fp.GetSPValue())); |
6541 | |
6542 | |
6543 | AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE *)GetControlPC(&(info->m_activeFrame.registers)), |
6544 | info->m_returnFrame.fp, |
6545 | FALSE, |
6546 | TRACE_UNMANAGED); |
6547 | |
6548 | break; |
6549 | |
6550 | } |
6551 | } |
6552 | } |
6553 | |
6554 | // <REVISIT_TODO>If we get here, we may be stepping out of the last frame. Our thread |
6555 | // exit logic should catch this case. (@todo)</REVISIT_TODO> |
6556 | LOG((LF_CORDB, LL_INFO10000,"DS::TSO: done\n" )); |
6557 | } |
6558 | |
6559 | |
6560 | // void DebuggerStepper::StepOut() |
6561 | // Called by Debugger::HandleIPCEvent to setup |
6562 | // everything so that the process will step over the range of IL |
6563 | // correctly. |
6564 | // How: Converts the provided array of ranges from IL ranges to |
6565 | // native ranges (if they're not native already), and then calls |
6566 | // TrapStep or TrapStepOut, like so: |
6567 | // Get the appropriate MethodDesc & JitInfo |
6568 | // Iterate through array of IL ranges, use |
6569 | // JitInfo::MapILRangeToMapEntryRange to translate IL to native |
6570 | // ranges. |
6571 | // Set member variables to remember that the DebuggerStepper now uses |
6572 | // the ranges: m_range, m_rangeCount, m_stepIn, m_fp |
6573 | // If (!TrapStep()) then {m_stepIn = true; TrapStepOut()} |
6574 | // EnableUnwind( m_fp ); |
6575 | void DebuggerStepper::StepOut(FramePointer fp, StackTraceTicket ticket) |
6576 | { |
6577 | LOG((LF_CORDB, LL_INFO10000, "Attempting to step out, fp:0x%x this:0x%x" |
6578 | "\n" , fp.GetSPValue(), this )); |
6579 | |
6580 | Thread *thread = GetThread(); |
6581 | |
6582 | |
6583 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
6584 | ControllerStackInfo info; |
6585 | |
6586 | // We pass in the ticket b/c this is called both when we're live (via |
6587 | // DebuggerUserBreakpoint) and when we're stopped (via normal StepOut) |
6588 | info.GetStackInfo(ticket, thread, fp, context); |
6589 | |
6590 | |
6591 | ResetRange(); |
6592 | |
6593 | |
6594 | m_stepIn = FALSE; |
6595 | m_fp = info.m_activeFrame.fp; |
6596 | #if defined(WIN64EXCEPTIONS) |
6597 | // We need to remember the parent method frame pointer here so that we will recognize |
6598 | // the range of the stepper as being valid when we return to the parent method. |
6599 | if (info.m_activeFrame.IsNonFilterFuncletFrame()) |
6600 | { |
6601 | m_fpParentMethod = info.m_returnFrame.fp; |
6602 | } |
6603 | #endif // WIN64EXCEPTIONS |
6604 | |
6605 | m_eMode = cStepOut; |
6606 | |
6607 | _ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) || (info.m_returnFrame.md != NULL)); |
6608 | |
6609 | TrapStepOut(&info); |
6610 | EnableUnwind(m_fp); |
6611 | } |
6612 | |
6613 | #define GROW_RANGES_IF_NECESSARY() \ |
6614 | if (rTo == rToEnd) \ |
6615 | { \ |
6616 | ULONG NewSize, OldSize; \ |
6617 | if (!ClrSafeInt<ULONG>::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)(realRangeCount*2), NewSize) || \ |
6618 | !ClrSafeInt<ULONG>::multiply(sizeof(COR_DEBUG_STEP_RANGE), (ULONG)realRangeCount, OldSize) || \ |
6619 | NewSize < OldSize) \ |
6620 | { \ |
6621 | DeleteInteropSafe(m_range); \ |
6622 | m_range = NULL; \ |
6623 | return false; \ |
6624 | } \ |
6625 | COR_DEBUG_STEP_RANGE *_pTmp = (COR_DEBUG_STEP_RANGE*) \ |
6626 | g_pDebugger->GetInteropSafeHeap()->Realloc(m_range, \ |
6627 | NewSize, \ |
6628 | OldSize); \ |
6629 | \ |
6630 | if (_pTmp == NULL) \ |
6631 | { \ |
6632 | DeleteInteropSafe(m_range); \ |
6633 | m_range = NULL; \ |
6634 | return false; \ |
6635 | } \ |
6636 | \ |
6637 | m_range = _pTmp; \ |
6638 | rTo = m_range + realRangeCount; \ |
6639 | rToEnd = m_range + (realRangeCount*2); \ |
6640 | realRangeCount *= 2; \ |
6641 | } |
6642 | |
6643 | //----------------------------------------------------------------------------- |
6644 | // Given a set of IL ranges, convert them to native and cache them. |
6645 | // Return true on success, false on error. |
6646 | //----------------------------------------------------------------------------- |
6647 | bool DebuggerStepper::SetRangesFromIL(DebuggerJitInfo *dji, COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount) |
6648 | { |
6649 | CONTRACTL |
6650 | { |
6651 | SO_NOT_MAINLINE; |
6652 | WRAPPER(THROWS); |
6653 | GC_NOTRIGGER; |
6654 | PRECONDITION(ThisIsHelperThreadWorker()); // Only help initializes a stepper. |
6655 | PRECONDITION(m_range == NULL); // shouldn't be set already. |
6656 | PRECONDITION(CheckPointer(ranges)); |
6657 | PRECONDITION(CheckPointer(dji)); |
6658 | } |
6659 | CONTRACTL_END; |
6660 | |
6661 | // Note: we used to pass in the IP from the active frame to GetJitInfo, but there seems to be no value in that, and |
6662 | // it was causing problems creating a stepper while sitting in ndirect stubs after we'd returned from the unmanaged |
6663 | // function that had been called. |
6664 | MethodDesc *fd = dji->m_fd; |
6665 | |
6666 | // The "+1" is for internal use, when we need to |
6667 | // set an intermediate patch in pitched code. Isn't |
6668 | // used unless the method is pitched & a patch is set |
6669 | // inside it. Thus we still pass cRanges as the |
6670 | // range count. |
6671 | m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount+1]; |
6672 | |
6673 | if (m_range == NULL) |
6674 | return false; |
6675 | |
6676 | TRACE_ALLOC(m_range); |
6677 | |
6678 | SIZE_T realRangeCount = rangeCount; |
6679 | |
6680 | if (dji != NULL) |
6681 | { |
6682 | LOG((LF_CORDB,LL_INFO10000,"DeSt::St: For code md=0x%x, got DJI 0x%x, from 0x%x to 0x%x\n" , |
6683 | fd, |
6684 | dji, dji->m_addrOfCode, (ULONG)dji->m_addrOfCode |
6685 | + (ULONG)dji->m_sizeOfCode)); |
6686 | |
6687 | // |
6688 | // Map ranges to native offsets for jitted code |
6689 | // |
6690 | COR_DEBUG_STEP_RANGE *r, *rEnd, *rTo, *rToEnd; |
6691 | |
6692 | r = ranges; |
6693 | rEnd = r + rangeCount; |
6694 | |
6695 | rTo = m_range; |
6696 | rToEnd = rTo + realRangeCount; |
6697 | |
6698 | // <NOTE> |
6699 | // rTo may also be incremented in the middle of the loop on WIN64 platforms. |
6700 | // </NOTE> |
6701 | for (/**/; r < rEnd; r++, rTo++) |
6702 | { |
6703 | // If we are already at the end of our allocated array, but there are still |
6704 | // more ranges to copy over, then grow the array. |
6705 | GROW_RANGES_IF_NECESSARY(); |
6706 | |
6707 | if (r->startOffset == 0 && r->endOffset == (ULONG) ~0) |
6708 | { |
6709 | // {0...-1} means use the entire method as the range |
6710 | // Code dup'd from below case. |
6711 | LOG((LF_CORDB, LL_INFO10000, "DS:Step: Have DJI, special (0,-1) entry\n" )); |
6712 | rTo->startOffset = 0; |
6713 | rTo->endOffset = (ULONG32)g_pEEInterface->GetFunctionSize(fd); |
6714 | } |
6715 | else |
6716 | { |
6717 | // |
6718 | // One IL range may consist of multiple |
6719 | // native ranges. |
6720 | // |
6721 | |
6722 | DebuggerILToNativeMap *mStart, *mEnd; |
6723 | |
6724 | dji->MapILRangeToMapEntryRange(r->startOffset, |
6725 | r->endOffset, |
6726 | &mStart, |
6727 | &mEnd); |
6728 | |
6729 | // Either mStart and mEnd are both NULL (we don't have any sequence point), |
6730 | // or they are both non-NULL. |
6731 | _ASSERTE( ((mStart == NULL) && (mEnd == NULL)) || |
6732 | ((mStart != NULL) && (mEnd != NULL)) ); |
6733 | |
6734 | if (mStart == NULL) |
6735 | { |
6736 | // <REVISIT_TODO>@todo Won't this result in us stepping across |
6737 | // the entire method?</REVISIT_TODO> |
6738 | rTo->startOffset = 0; |
6739 | rTo->endOffset = 0; |
6740 | } |
6741 | else if (mStart == mEnd) |
6742 | { |
6743 | rTo->startOffset = mStart->nativeStartOffset; |
6744 | rTo->endOffset = mStart->nativeEndOffset; |
6745 | } |
6746 | else |
6747 | { |
6748 | // Account for more than one continuous range here. |
6749 | |
6750 | // Move the pointer back to work with the loop increment below. |
6751 | // Don't dereference this pointer now! |
6752 | rTo--; |
6753 | |
6754 | for (DebuggerILToNativeMap* pMap = mStart; |
6755 | pMap <= mEnd; |
6756 | pMap = pMap + 1) |
6757 | { |
6758 | if ((pMap == mStart) || |
6759 | (pMap->nativeStartOffset != (pMap-1)->nativeEndOffset)) |
6760 | { |
6761 | rTo++; |
6762 | GROW_RANGES_IF_NECESSARY(); |
6763 | |
6764 | rTo->startOffset = pMap->nativeStartOffset; |
6765 | rTo->endOffset = pMap->nativeEndOffset; |
6766 | } |
6767 | else |
6768 | { |
6769 | // If we have continuous ranges, then lump them together. |
6770 | _ASSERTE(rTo->endOffset == pMap->nativeStartOffset); |
6771 | rTo->endOffset = pMap->nativeEndOffset; |
6772 | } |
6773 | } |
6774 | |
6775 | LOG((LF_CORDB, LL_INFO10000, "DS:Step: nat off:0x%x to 0x%x\n" , rTo->startOffset, rTo->endOffset)); |
6776 | } |
6777 | } |
6778 | } |
6779 | |
6780 | rangeCount = (int)((BYTE*)rTo - (BYTE*)m_range) / sizeof(COR_DEBUG_STEP_RANGE); |
6781 | } |
6782 | else |
6783 | { |
6784 | // Even if we don't have debug info, we'll be able to |
6785 | // step through the method |
6786 | SIZE_T functionSize = g_pEEInterface->GetFunctionSize(fd); |
6787 | |
6788 | COR_DEBUG_STEP_RANGE *r = ranges; |
6789 | COR_DEBUG_STEP_RANGE *rEnd = r + rangeCount; |
6790 | |
6791 | COR_DEBUG_STEP_RANGE *rTo = m_range; |
6792 | |
6793 | for(/**/; r < rEnd; r++, rTo++) |
6794 | { |
6795 | if (r->startOffset == 0 && r->endOffset == (ULONG) ~0) |
6796 | { |
6797 | LOG((LF_CORDB, LL_INFO10000, "DS:Step:No DJI, (0,-1) special entry\n" )); |
6798 | // Code dup'd from above case. |
6799 | // {0...-1} means use the entire method as the range |
6800 | rTo->startOffset = 0; |
6801 | rTo->endOffset = (ULONG32)functionSize; |
6802 | } |
6803 | else |
6804 | { |
6805 | LOG((LF_CORDB, LL_INFO10000, "DS:Step:No DJI, regular entry\n" )); |
6806 | // We can't just leave ths IL entry - we have to |
6807 | // get rid of it. |
6808 | // This will just be ignored |
6809 | rTo->startOffset = rTo->endOffset = (ULONG32)functionSize; |
6810 | } |
6811 | } |
6812 | } |
6813 | |
6814 | |
6815 | m_rangeCount = rangeCount; |
6816 | m_realRangeCount = rangeCount; |
6817 | |
6818 | return true; |
6819 | } |
6820 | |
6821 | |
6822 | // void DebuggerStepper::Step() Tells the stepper to step over |
6823 | // the provided ranges. |
6824 | // void *fp: frame pointer. |
6825 | // bool in: true if we want to step into a function within the range, |
6826 | // false if we want to step over functions within the range. |
6827 | // COR_DEBUG_STEP_RANGE *ranges: Assumed to be nonNULL, it will |
6828 | // always hold at least one element. |
6829 | // SIZE_T rangeCount: One less than the true number of elements in |
6830 | // the ranges argument. |
6831 | // bool rangeIL: true if the ranges are provided in IL (they'll be |
6832 | // converted to native before the DebuggerStepper uses them, |
6833 | // false if they already are native. |
6834 | bool DebuggerStepper::Step(FramePointer fp, bool in, |
6835 | COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount, |
6836 | bool rangeIL) |
6837 | { |
6838 | LOG((LF_CORDB, LL_INFO1000, "DeSt:Step this:0x%x " , this)); |
6839 | if (rangeCount>0) |
6840 | LOG((LF_CORDB,LL_INFO10000," start,end[0]:(0x%x,0x%x)\n" , |
6841 | ranges[0].startOffset, ranges[0].endOffset)); |
6842 | else |
6843 | LOG((LF_CORDB,LL_INFO10000," single step\n" )); |
6844 | |
6845 | Thread *thread = GetThread(); |
6846 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
6847 | |
6848 | // ControllerStackInfo doesn't report IL stubs, so if we are in an IL stub, we need |
6849 | // to handle the single-step specially. There are probably other problems when we stop |
6850 | // in an IL stub. We need to revisit this later. |
6851 | bool fIsILStub = false; |
6852 | if ((context != NULL) && |
6853 | g_pEEInterface->IsManagedNativeCode(reinterpret_cast<const BYTE *>(GetIP(context)))) |
6854 | { |
6855 | MethodDesc * pMD = g_pEEInterface->GetNativeCodeMethodDesc(GetIP(context)); |
6856 | if (pMD != NULL) |
6857 | { |
6858 | fIsILStub = pMD->IsILStub(); |
6859 | } |
6860 | } |
6861 | LOG((LF_CORDB, LL_INFO10000, "DS::S - fIsILStub = %d\n" , fIsILStub)); |
6862 | |
6863 | ControllerStackInfo info; |
6864 | |
6865 | |
6866 | StackTraceTicket ticket(thread); |
6867 | info.GetStackInfo(ticket, thread, fp, context); |
6868 | |
6869 | _ASSERTE((fp == LEAF_MOST_FRAME) || (info.m_activeFrame.md != NULL) || |
6870 | (info.m_returnFrame.md != NULL)); |
6871 | |
6872 | m_stepIn = in; |
6873 | |
6874 | DebuggerJitInfo *dji = info.m_activeFrame.GetJitInfoFromFrame(); |
6875 | |
6876 | if (dji == NULL) |
6877 | { |
6878 | // !!! ERROR range step in frame with no code |
6879 | ranges = NULL; |
6880 | rangeCount = 0; |
6881 | } |
6882 | |
6883 | |
6884 | if (m_range != NULL) |
6885 | { |
6886 | TRACE_FREE(m_range); |
6887 | DeleteInteropSafe(m_range); |
6888 | m_range = NULL; |
6889 | m_rangeCount = 0; |
6890 | m_realRangeCount = 0; |
6891 | } |
6892 | |
6893 | if (rangeCount > 0) |
6894 | { |
6895 | if (rangeIL) |
6896 | { |
6897 | // IL ranges supplied, we need to convert them to native ranges. |
6898 | bool fOk = SetRangesFromIL(dji, ranges, rangeCount); |
6899 | if (!fOk) |
6900 | { |
6901 | return false; |
6902 | } |
6903 | } |
6904 | else |
6905 | { |
6906 | // Native ranges, already supplied. Just copy them over. |
6907 | m_range = new (interopsafe) COR_DEBUG_STEP_RANGE[rangeCount]; |
6908 | |
6909 | if (m_range == NULL) |
6910 | { |
6911 | return false; |
6912 | } |
6913 | |
6914 | memcpy(m_range, ranges, sizeof(COR_DEBUG_STEP_RANGE) * rangeCount); |
6915 | m_realRangeCount = m_rangeCount = rangeCount; |
6916 | } |
6917 | _ASSERTE(m_range != NULL); |
6918 | _ASSERTE(m_rangeCount > 0); |
6919 | _ASSERTE(m_realRangeCount > 0); |
6920 | } |
6921 | else |
6922 | { |
6923 | // !!! ERROR cannot map IL ranges |
6924 | ranges = NULL; |
6925 | rangeCount = 0; |
6926 | } |
6927 | |
6928 | if (fIsILStub) |
6929 | { |
6930 | // Don't use the ControllerStackInfo if we are in an IL stub. |
6931 | m_fp = fp; |
6932 | } |
6933 | else |
6934 | { |
6935 | m_fp = info.m_activeFrame.fp; |
6936 | #if defined(WIN64EXCEPTIONS) |
6937 | // We need to remember the parent method frame pointer here so that we will recognize |
6938 | // the range of the stepper as being valid when we return to the parent method. |
6939 | if (info.m_activeFrame.IsNonFilterFuncletFrame()) |
6940 | { |
6941 | m_fpParentMethod = info.m_returnFrame.fp; |
6942 | } |
6943 | #endif // WIN64EXCEPTIONS |
6944 | } |
6945 | m_eMode = m_stepIn ? cStepIn : cStepOver; |
6946 | |
6947 | LOG((LF_CORDB,LL_INFO10000,"DS 0x%x STep: STEP_NORMAL\n" ,this)); |
6948 | m_reason = STEP_NORMAL; //assume it'll be a normal step & set it to |
6949 | //something else if we walk over it |
6950 | if (fIsILStub) |
6951 | { |
6952 | LOG((LF_CORDB, LL_INFO10000, "DS:Step: stepping in an IL stub\n" )); |
6953 | |
6954 | // Enable the right triggers if the user wants to step in. |
6955 | if (in) |
6956 | { |
6957 | if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER) |
6958 | { |
6959 | EnableTraceCall(info.m_activeFrame.fp); |
6960 | } |
6961 | else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER) |
6962 | { |
6963 | EnableMethodEnter(); |
6964 | } |
6965 | } |
6966 | |
6967 | // Also perform a step-out in case this IL stub is returning to managed code. |
6968 | // However, we must fix up the ControllerStackInfo first, since it doesn't |
6969 | // report IL stubs. The active frame reported by the ControllerStackInfo is |
6970 | // actually the return frame in this case. |
6971 | info.SetReturnFrameWithActiveFrame(); |
6972 | TrapStepOut(&info); |
6973 | } |
6974 | else if (!TrapStep(&info, in)) |
6975 | { |
6976 | LOG((LF_CORDB,LL_INFO10000,"DS:Step: Did TS\n" )); |
6977 | m_stepIn = true; |
6978 | TrapStepNext(&info); |
6979 | } |
6980 | |
6981 | LOG((LF_CORDB,LL_INFO10000,"DS:Step: Did TS,TSO\n" )); |
6982 | |
6983 | EnableUnwind(m_fp); |
6984 | |
6985 | return true; |
6986 | } |
6987 | |
6988 | // TP_RESULT DebuggerStepper::TriggerPatch() |
6989 | // What: Triggers patch if we're not in a stub, and we're |
6990 | // outside of the stepping range. Otherwise sets another patch so as to |
6991 | // step out of the stub, or in the next instruction within the range. |
6992 | // How: If module==NULL & managed==> we're in a stub: |
6993 | // TrapStepOut() and return false. Module==NULL&!managed==> return |
6994 | // true. If m_range != NULL & execution is currently in the range, |
6995 | // attempt a TrapStep (TrapStepOut otherwise) & return false. Otherwise, |
6996 | // return true. |
6997 | TP_RESULT DebuggerStepper::TriggerPatch(DebuggerControllerPatch *patch, |
6998 | Thread *thread, |
6999 | TRIGGER_WHY tyWhy) |
7000 | { |
7001 | LOG((LF_CORDB, LL_INFO10000, "DeSt::TP\n" )); |
7002 | |
7003 | // If we're frozen, we may hit a patch but we just ignore it |
7004 | if (IsFrozen()) |
7005 | { |
7006 | LOG((LF_CORDB, LL_INFO1000000, "DS::TP, ignoring patch at %p during frozen state\n" , patch->address)); |
7007 | return TPR_IGNORE; |
7008 | } |
7009 | |
7010 | Module *module = patch->key.module; |
7011 | BOOL managed = patch->IsManagedPatch(); |
7012 | mdMethodDef md = patch->key.md; |
7013 | SIZE_T offset = patch->offset; |
7014 | |
7015 | _ASSERTE((this->GetThread() == thread) || !"Stepper should only get patches on its thread" ); |
7016 | |
7017 | // Note we can only run a stack trace if: |
7018 | // - the context is in managed code (eg, not a stub) |
7019 | // - OR we have a frame in place to prime the stackwalk. |
7020 | ControllerStackInfo info; |
7021 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
7022 | |
7023 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
7024 | |
7025 | // Context should always be from patch. |
7026 | _ASSERTE(context != NULL); |
7027 | |
7028 | bool fSafeToDoStackTrace = true; |
7029 | |
7030 | // If we're in a stub (module == NULL and still in managed code), then our context is off in lala-land |
7031 | // Then, it's only safe to do a stackwalk if the top frame is protecting us. That's only true for a |
7032 | // frame_push. If we're here on a manager_push, then we don't have any such protection, so don't do the |
7033 | // stackwalk. |
7034 | |
7035 | fSafeToDoStackTrace = patch->IsSafeForStackTrace(); |
7036 | |
7037 | |
7038 | if (fSafeToDoStackTrace) |
7039 | { |
7040 | StackTraceTicket ticket(patch); |
7041 | info.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, context); |
7042 | |
7043 | LOG((LF_CORDB, LL_INFO10000, "DS::TP: this:0x%p in %s::%s (fp:0x%p, " |
7044 | "off:0x%p md:0x%p), \n\texception source:%s::%s (fp:0x%p)\n" , |
7045 | this, |
7046 | info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugClassName:"Unknown" , |
7047 | info.m_activeFrame.md!=NULL?info.m_activeFrame.md->m_pszDebugMethodName:"Unknown" , |
7048 | info.m_activeFrame.fp.GetSPValue(), patch->offset, patch->key.md, |
7049 | m_fdException!=NULL?m_fdException->m_pszDebugClassName:"None" , |
7050 | m_fdException!=NULL?m_fdException->m_pszDebugMethodName:"None" , |
7051 | m_fpException.GetSPValue())); |
7052 | } |
7053 | |
7054 | DisableAll(); |
7055 | |
7056 | if (DetectHandleLCGMethods(dac_cast<PCODE>(patch->address), NULL, &info)) |
7057 | { |
7058 | return TPR_IGNORE; |
7059 | } |
7060 | |
7061 | if (module == NULL) |
7062 | { |
7063 | // JMC steppers should not be patching here... |
7064 | _ASSERTE(DEBUGGER_CONTROLLER_JMC_STEPPER != this->GetDCType()); |
7065 | |
7066 | if (managed) |
7067 | { |
7068 | |
7069 | LOG((LF_CORDB, LL_INFO10000, |
7070 | "Frame (stub) patch hit at offset 0x%x\n" , offset)); |
7071 | |
7072 | // This is a stub patch. If it was a TRACE_FRAME_PUSH that |
7073 | // got us here, then the stub's frame is pushed now, so we |
7074 | // tell the frame to apply the real patch. If we got here |
7075 | // via a TRACE_MGR_PUSH, however, then there is no frame |
7076 | // and we tell the stub manager that generated the |
7077 | // TRACE_MGR_PUSH to apply the real patch. |
7078 | TraceDestination trace; |
7079 | bool traceOk; |
7080 | FramePointer frameFP; |
7081 | PTR_BYTE traceManagerRetAddr = NULL; |
7082 | |
7083 | if (patch->trace.GetTraceType() == TRACE_MGR_PUSH) |
7084 | { |
7085 | _ASSERTE(context != NULL); |
7086 | CONTRACT_VIOLATION(GCViolation); |
7087 | traceOk = g_pEEInterface->TraceManager( |
7088 | thread, |
7089 | patch->trace.GetStubManager(), |
7090 | &trace, |
7091 | context, |
7092 | &traceManagerRetAddr); |
7093 | |
7094 | // We don't hae an active frame here, so patch with a |
7095 | // FP of NULL so anything will match. |
7096 | // |
7097 | // <REVISIT_TODO>@todo: should we take Esp out of the context?</REVISIT_TODO> |
7098 | frameFP = LEAF_MOST_FRAME; |
7099 | } |
7100 | else |
7101 | { |
7102 | _ASSERTE(fSafeToDoStackTrace); |
7103 | CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers |
7104 | traceOk = g_pEEInterface->TraceFrame(thread, |
7105 | thread->GetFrame(), |
7106 | TRUE, |
7107 | &trace, |
7108 | &(info.m_activeFrame.registers)); |
7109 | |
7110 | frameFP = info.m_activeFrame.fp; |
7111 | } |
7112 | |
7113 | // Enable the JMC backstop for traditional steppers to catch us in case |
7114 | // we didn't predict the call target properly. |
7115 | EnableJMCBackStop(NULL); |
7116 | |
7117 | |
7118 | if (!traceOk |
7119 | || !g_pEEInterface->FollowTrace(&trace) |
7120 | || !PatchTrace(&trace, frameFP, |
7121 | (m_rgfMappingStop&STOP_UNMANAGED)? |
7122 | (true):(false))) |
7123 | { |
7124 | // |
7125 | // We can't set a patch in the frame -- we need |
7126 | // to trap returning from this frame instead. |
7127 | // |
7128 | // Note: if we're in the TRACE_MGR_PUSH case from |
7129 | // above, then we must place a patch where the |
7130 | // TraceManager function told us to, since we can't |
7131 | // actually unwind from here. |
7132 | // |
7133 | if (patch->trace.GetTraceType() != TRACE_MGR_PUSH) |
7134 | { |
7135 | _ASSERTE(fSafeToDoStackTrace); |
7136 | LOG((LF_CORDB,LL_INFO10000,"TSO for non TRACE_MGR_PUSH case\n" )); |
7137 | TrapStepOut(&info); |
7138 | } |
7139 | else |
7140 | { |
7141 | LOG((LF_CORDB, LL_INFO10000, |
7142 | "TSO for TRACE_MGR_PUSH case." )); |
7143 | |
7144 | // We'd better have a valid return address. |
7145 | _ASSERTE(traceManagerRetAddr != NULL); |
7146 | |
7147 | if (g_pEEInterface->IsManagedNativeCode(traceManagerRetAddr)) |
7148 | { |
7149 | // Grab the jit info for the method. |
7150 | DebuggerJitInfo *dji; |
7151 | dji = g_pDebugger->GetJitInfoFromAddr((TADDR) traceManagerRetAddr); |
7152 | |
7153 | MethodDesc * mdNative = (dji == NULL) ? |
7154 | g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(traceManagerRetAddr)) : dji->m_fd; |
7155 | _ASSERTE(mdNative != NULL); |
7156 | |
7157 | // Find the method that the return is to. |
7158 | _ASSERTE(g_pEEInterface->GetFunctionAddress(mdNative) != NULL); |
7159 | SIZE_T offsetRet = dac_cast<TADDR>(traceManagerRetAddr - |
7160 | g_pEEInterface->GetFunctionAddress(mdNative)); |
7161 | |
7162 | // Place the patch. |
7163 | AddBindAndActivateNativeManagedPatch(mdNative, |
7164 | dji, |
7165 | offsetRet, |
7166 | LEAF_MOST_FRAME, |
7167 | NULL); |
7168 | |
7169 | LOG((LF_CORDB, LL_INFO10000, |
7170 | "DS::TP: normally managed code AddPatch" |
7171 | " in %s::%s, offset 0x%x\n" , |
7172 | mdNative->m_pszDebugClassName, |
7173 | mdNative->m_pszDebugMethodName, |
7174 | offsetRet)); |
7175 | } |
7176 | else |
7177 | { |
7178 | // We're hitting this code path with MC++ assemblies |
7179 | // that have an unmanaged entry point so the stub returns to CallDescrWorker. |
7180 | _ASSERTE(g_pEEInterface->GetNativeCodeMethodDesc(dac_cast<PCODE>(patch->address))->IsILStub()); |
7181 | } |
7182 | |
7183 | } |
7184 | |
7185 | m_reason = STEP_NORMAL; //we tried to do a STEP_CALL, but since it didn't |
7186 | //work, we're doing what amounts to a normal step. |
7187 | LOG((LF_CORDB,LL_INFO10000,"DS 0x%x m_reason = STEP_NORMAL" |
7188 | "(attempted call thru stub manager, SM didn't know where" |
7189 | " we're going, so did a step out to original call\n" ,this)); |
7190 | } |
7191 | else |
7192 | { |
7193 | m_reason = STEP_CALL; |
7194 | } |
7195 | |
7196 | EnableTraceCall(LEAF_MOST_FRAME); |
7197 | EnableUnwind(m_fp); |
7198 | |
7199 | return TPR_IGNORE; |
7200 | } |
7201 | else |
7202 | { |
7203 | // @todo - when would we hit this codepath? |
7204 | // If we're not in managed, then we should have pushed a frame onto the Thread's frame chain, |
7205 | // and thus we should still safely be able to do a stackwalk here. |
7206 | _ASSERTE(fSafeToDoStackTrace); |
7207 | if (DetectHandleInterceptors(&info) ) |
7208 | { |
7209 | return TPR_IGNORE; //don't actually want to stop |
7210 | } |
7211 | |
7212 | LOG((LF_CORDB, LL_INFO10000, |
7213 | "Unmanaged step patch hit at 0x%x\n" , offset)); |
7214 | |
7215 | StackTraceTicket ticket(patch); |
7216 | PrepareForSendEvent(ticket); |
7217 | return TPR_TRIGGER; |
7218 | } |
7219 | } // end (module == NULL) |
7220 | |
7221 | // If we're inside an interceptor but don't want to be,then we'll set a |
7222 | // patch outside the current function. |
7223 | _ASSERTE(fSafeToDoStackTrace); |
7224 | if (DetectHandleInterceptors(&info) ) |
7225 | { |
7226 | return TPR_IGNORE; //don't actually want to stop |
7227 | } |
7228 | |
7229 | LOG((LF_CORDB,LL_INFO10000, "DS: m_fp:0x%p, activeFP:0x%p fpExc:0x%p\n" , |
7230 | m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue())); |
7231 | |
7232 | if (IsInRange(offset, m_range, m_rangeCount, &info) || |
7233 | ShouldContinueStep( &info, offset)) |
7234 | { |
7235 | LOG((LF_CORDB, LL_INFO10000, |
7236 | "Intermediate step patch hit at 0x%x\n" , offset)); |
7237 | |
7238 | if (!TrapStep(&info, m_stepIn)) |
7239 | TrapStepNext(&info); |
7240 | |
7241 | EnableUnwind(m_fp); |
7242 | return TPR_IGNORE; |
7243 | } |
7244 | else |
7245 | { |
7246 | LOG((LF_CORDB, LL_INFO10000, "Step patch hit at 0x%x\n" , offset)); |
7247 | |
7248 | // For a JMC stepper, we have an additional constraint: |
7249 | // skip non-user code. So if we're still in non-user code, then |
7250 | // we've got to keep going |
7251 | DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(module, md); |
7252 | |
7253 | if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi)) |
7254 | { |
7255 | return TPR_IGNORE; |
7256 | } |
7257 | |
7258 | StackTraceTicket ticket(patch); |
7259 | PrepareForSendEvent(ticket); |
7260 | return TPR_TRIGGER; |
7261 | } |
7262 | } |
7263 | |
7264 | // Return true if this should be skipped. |
7265 | // For a non-jmc stepper, we don't care about non-user code, so we |
7266 | // don't skip it and so we always return false. |
7267 | bool DebuggerStepper::DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo) |
7268 | { |
7269 | LIMITED_METHOD_CONTRACT; |
7270 | |
7271 | return false; |
7272 | } |
7273 | |
7274 | // For regular steppers, trace-call is just a trace-call. |
7275 | void DebuggerStepper::EnablePolyTraceCall() |
7276 | { |
7277 | this->EnableTraceCall(LEAF_MOST_FRAME); |
7278 | } |
7279 | |
7280 | // Traditional steppers enable MethodEnter as a back-stop for step-in. |
7281 | // We hope that the stub-managers will predict the step-in for us, |
7282 | // but in case they don't the Method-Enter should catch us. |
7283 | // MethodEnter is not fully correct for traditional steppers for a few reasons: |
7284 | // - doesn't handle step-in to native |
7285 | // - stops us *after* the prolog (a traditional stepper can stop us before the prolog). |
7286 | // - only works for methods that have the JMC probe. That can exclude all optimized code. |
7287 | void DebuggerStepper::TriggerMethodEnter(Thread * thread, |
7288 | DebuggerJitInfo *dji, |
7289 | const BYTE * ip, |
7290 | FramePointer fp) |
7291 | { |
7292 | _ASSERTE(dji != NULL); |
7293 | _ASSERTE(thread != NULL); |
7294 | _ASSERTE(ip != NULL); |
7295 | |
7296 | |
7297 | |
7298 | _ASSERTE(this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER); |
7299 | |
7300 | _ASSERTE(!IsFrozen()); |
7301 | |
7302 | MethodDesc * pDesc = dji->m_fd; |
7303 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n" , |
7304 | pDesc, ip)); |
7305 | |
7306 | // JMC steppers won't stop in Lightweight delegates. Just return & keep executing. |
7307 | if (pDesc->IsNoMetadata()) |
7308 | { |
7309 | LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n" )); |
7310 | return; |
7311 | } |
7312 | |
7313 | // This is really just a heuristic. We don't want to trigger a JMC probe when we are |
7314 | // executing in an IL stub, or in one of the marshaling methods called by the IL stub. |
7315 | // The problem is that the IL stub can call into arbitrary code, including custom marshalers. |
7316 | // In that case the user has to put a breakpoint to stop in the code. |
7317 | if (g_pEEInterface->DetectHandleILStubs(thread)) |
7318 | { |
7319 | return; |
7320 | } |
7321 | |
7322 | #ifdef _DEBUG |
7323 | // To help trace down if a problem is related to a stubmanager, |
7324 | // we add a knob that lets us skip the MethodEnter checks. This lets tests directly |
7325 | // go against the Stub-managers w/o the MethodEnter check backstops. |
7326 | int fSkip = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_DbgSkipMEOnStep); |
7327 | if (fSkip) |
7328 | { |
7329 | return; |
7330 | } |
7331 | |
7332 | // See EnableJMCBackStop() for details here. This check just makes sure that we don't fire |
7333 | // the assert if we end up in the method we started in (which could happen if we trace call |
7334 | // instructions before the JMC probe). |
7335 | // m_StepInStartMethod may be null (if this step-in didn't start from managed code). |
7336 | if ((m_StepInStartMethod != pDesc) && |
7337 | (!m_StepInStartMethod->IsLCGMethod())) |
7338 | { |
7339 | // Since normal step-in should stop us at the prolog, and TME is after the prolog, |
7340 | // if a stub-manager did successfully find the address, we should get a TriggerPatch first |
7341 | // at native offset 0 (before the prolog) and before we get the TME. That means if |
7342 | // we do get the TME, then there was no stub-manager to find us. |
7343 | |
7344 | SString sLog; |
7345 | StubManager::DbgGetLog(&sLog); |
7346 | |
7347 | // Assert b/c the Stub-manager should have caught us first. |
7348 | // We don't want people relying on TriggerMethodEnter as the real implementation for Traditional Step-in |
7349 | // (see above for reasons why). However, using TME will provide a bandage for the final retail product |
7350 | // in cases where we are missing a stub-manager. |
7351 | CONSISTENCY_CHECK_MSGF(false, ( |
7352 | "\nThe Stubmanagers failed to identify and trace a stub on step-in. The stub-managers for this code-path path need to be fixed.\n" |
7353 | "See http://team/sites/clrdev/Devdocs/StubManagers.rtf for more information on StubManagers.\n" |
7354 | "Stepper this=0x%p, startMethod='%s::%s'\n" |
7355 | "---------------------------------\n" |
7356 | "Stub manager log:\n%S" |
7357 | "\n" |
7358 | "The thread is now in managed method '%s::%s'.\n" |
7359 | "---------------------------------\n" , |
7360 | this, |
7361 | ((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugClassName), |
7362 | ((m_StepInStartMethod == NULL) ? "unknown" : m_StepInStartMethod->m_pszDebugMethodName), |
7363 | sLog.GetUnicode(), |
7364 | pDesc->m_pszDebugClassName, pDesc->m_pszDebugMethodName |
7365 | )); |
7366 | } |
7367 | #endif |
7368 | |
7369 | |
7370 | |
7371 | // Place a patch to stopus. |
7372 | // Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step. |
7373 | AddBindAndActivateNativeManagedPatch(pDesc, |
7374 | dji, |
7375 | CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip), |
7376 | fp, |
7377 | NULL // AppDomain |
7378 | ); |
7379 | |
7380 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n" )); |
7381 | |
7382 | // Once we resume, we'll go hit that patch (duh, we patched our return address) |
7383 | // Furthermore, we know the step will complete with reason = call, so set that now. |
7384 | m_reason = STEP_CALL; |
7385 | } |
7386 | |
7387 | |
7388 | // We may have single-stepped over a return statement to land us up a frame. |
7389 | // Or we may have single-stepped through a method. |
7390 | // We never single-step into calls (we place a patch at the call destination). |
7391 | bool DebuggerStepper::TriggerSingleStep(Thread *thread, const BYTE *ip) |
7392 | { |
7393 | LOG((LF_CORDB,LL_INFO10000,"DS:TSS this:0x%p, @ ip:0x%p\n" , this, ip)); |
7394 | |
7395 | _ASSERTE(!IsFrozen()); |
7396 | |
7397 | // User break should only do a step-out and never actually need a singlestep flag. |
7398 | _ASSERTE(GetDCType() != DEBUGGER_CONTROLLER_USER_BREAKPOINT); |
7399 | |
7400 | // |
7401 | // there's one weird case here - if the last instruction generated |
7402 | // a hardware exception, we may be in lala land. If so, rely on the unwind |
7403 | // handler to figure out what happened. |
7404 | // |
7405 | // <REVISIT_TODO>@todo this could be wrong when we have the incremental collector going</REVISIT_TODO> |
7406 | // |
7407 | |
7408 | if (!g_pEEInterface->IsManagedNativeCode(ip)) |
7409 | { |
7410 | LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n" )); |
7411 | DisableSingleStep(); |
7412 | return false; |
7413 | } |
7414 | |
7415 | // If we EnC the method, we'll blast the function address, |
7416 | // and so have to get it from teh DJI that we'll have. If |
7417 | // we haven't gotten debugger info about a regular function, then |
7418 | // we'll have to get the info from the EE, which will be valid |
7419 | // since we're standing in the function at this point, and |
7420 | // EnC couldn't have happened yet. |
7421 | MethodDesc *fd = g_pEEInterface->GetNativeCodeMethodDesc((PCODE)ip); |
7422 | |
7423 | SIZE_T offset; |
7424 | DebuggerJitInfo *dji = g_pDebugger->GetJitInfoFromAddr((TADDR) ip); |
7425 | offset = CodeRegionInfo::GetCodeRegionInfo(dji, fd).AddressToOffset(ip); |
7426 | |
7427 | ControllerStackInfo info; |
7428 | |
7429 | // Safe to stackwalk b/c we've already checked that our IP is in crawlable code. |
7430 | StackTraceTicket ticket(ip); |
7431 | info.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL); |
7432 | |
7433 | // This is a special case where we return from a managed method back to an IL stub. This can |
7434 | // only happen if there's no more managed method frames closer to the root and we want to perform |
7435 | // a step out, or if we step-next off the end of a method called by an IL stub. In either case, |
7436 | // we'll get a single step in an IL stub, which we want to ignore. We also want to enable trace |
7437 | // call here, just in case this IL stub is about to call the managed target (in the reverse interop case). |
7438 | if (fd->IsILStub()) |
7439 | { |
7440 | LOG((LF_CORDB,LL_INFO10000, "DS::TSS: not in managed code, Returning false (case 0)!\n" )); |
7441 | if (this->GetDCType() == DEBUGGER_CONTROLLER_STEPPER) |
7442 | { |
7443 | EnableTraceCall(info.m_activeFrame.fp); |
7444 | } |
7445 | else if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER) |
7446 | { |
7447 | EnableMethodEnter(); |
7448 | } |
7449 | DisableSingleStep(); |
7450 | return false; |
7451 | } |
7452 | |
7453 | DisableAll(); |
7454 | |
7455 | LOG((LF_CORDB,LL_INFO10000, "DS::TSS m_fp:0x%p, activeFP:0x%p fpExc:0x%p\n" , |
7456 | m_fp.GetSPValue(), info.m_activeFrame.fp.GetSPValue(), m_fpException.GetSPValue())); |
7457 | |
7458 | if (DetectHandleLCGMethods((PCODE)ip, fd, &info)) |
7459 | { |
7460 | return false; |
7461 | } |
7462 | |
7463 | if (IsInRange(offset, m_range, m_rangeCount, &info) || |
7464 | ShouldContinueStep( &info, offset)) |
7465 | { |
7466 | if (!TrapStep(&info, m_stepIn)) |
7467 | TrapStepNext(&info); |
7468 | |
7469 | EnableUnwind(m_fp); |
7470 | |
7471 | LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning false Case 1!\n" )); |
7472 | return false; |
7473 | } |
7474 | else |
7475 | { |
7476 | LOG((LF_CORDB,LL_INFO10000, "DS::TSS: Returning true Case 2 for reason STEP_%02x!\n" , m_reason)); |
7477 | |
7478 | // @todo - when would a single-step (not a patch) land us in user-code? |
7479 | // For a JMC stepper, we have an additional constraint: |
7480 | // skip non-user code. So if we're still in non-user code, then |
7481 | // we've got to keep going |
7482 | DebuggerMethodInfo * dmi = g_pDebugger->GetOrCreateMethodInfo(fd->GetModule(), fd->GetMemberDef()); |
7483 | |
7484 | if ((dmi != NULL) && DetectHandleNonUserCode(&info, dmi)) |
7485 | return false; |
7486 | |
7487 | PrepareForSendEvent(ticket); |
7488 | return true; |
7489 | } |
7490 | } |
7491 | |
7492 | void DebuggerStepper::TriggerTraceCall(Thread *thread, const BYTE *ip) |
7493 | { |
7494 | LOG((LF_CORDB,LL_INFO10000,"DS:TTC this:0x%x, @ ip:0x%x\n" ,this,ip)); |
7495 | TraceDestination trace; |
7496 | |
7497 | if (IsFrozen()) |
7498 | { |
7499 | LOG((LF_CORDB,LL_INFO10000,"DS:TTC exit b/c of Frozen\n" )); |
7500 | return; |
7501 | } |
7502 | |
7503 | // This is really just a heuristic. We don't want to trigger a JMC probe when we are |
7504 | // executing in an IL stub, or in one of the marshaling methods called by the IL stub. |
7505 | // The problem is that the IL stub can call into arbitrary code, including custom marshalers. |
7506 | // In that case the user has to put a breakpoint to stop in the code. |
7507 | if (g_pEEInterface->DetectHandleILStubs(thread)) |
7508 | { |
7509 | return; |
7510 | } |
7511 | |
7512 | if (g_pEEInterface->TraceStub(ip, &trace) |
7513 | && g_pEEInterface->FollowTrace(&trace) |
7514 | && PatchTrace(&trace, LEAF_MOST_FRAME, |
7515 | (m_rgfMappingStop&STOP_UNMANAGED)?(true):(false))) |
7516 | { |
7517 | // !!! We really want to know ahead of time if PatchTrace will succeed. |
7518 | DisableAll(); |
7519 | PatchTrace(&trace, LEAF_MOST_FRAME, (m_rgfMappingStop&STOP_UNMANAGED)? |
7520 | (true):(false)); |
7521 | |
7522 | // If we're triggering a trace call, and we're following a trace into either managed code or unjitted managed |
7523 | // code, then we need to update our stepper's reason to STEP_CALL to reflect the fact that we're going to land |
7524 | // into a new function because of a call. |
7525 | if ((trace.GetTraceType() == TRACE_UNJITTED_METHOD) || (trace.GetTraceType() == TRACE_MANAGED)) |
7526 | { |
7527 | m_reason = STEP_CALL; |
7528 | } |
7529 | |
7530 | EnableUnwind(m_fp); |
7531 | |
7532 | LOG((LF_CORDB, LL_INFO10000, "DS::TTC potentially a step call!\n" )); |
7533 | } |
7534 | } |
7535 | |
7536 | void DebuggerStepper::TriggerUnwind(Thread *thread, |
7537 | MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset, |
7538 | FramePointer fp, |
7539 | CorDebugStepReason unwindReason) |
7540 | { |
7541 | CONTRACTL |
7542 | { |
7543 | SO_NOT_MAINLINE; |
7544 | THROWS; // from GetJitInfo |
7545 | GC_NOTRIGGER; // don't send IPC events |
7546 | MODE_COOPERATIVE; // TriggerUnwind always is coop |
7547 | |
7548 | PRECONDITION(!IsDbgHelperSpecialThread()); |
7549 | PRECONDITION(fd->IsDynamicMethod() || (pDJI != NULL)); |
7550 | } |
7551 | CONTRACTL_END; |
7552 | |
7553 | LOG((LF_CORDB,LL_INFO10000,"DS::TU this:0x%p, in %s::%s, offset 0x%p " |
7554 | "frame:0x%p unwindReason:0x%x\n" , this, fd->m_pszDebugClassName, |
7555 | fd->m_pszDebugMethodName, offset, fp.GetSPValue(), unwindReason)); |
7556 | |
7557 | _ASSERTE(unwindReason == STEP_EXCEPTION_FILTER || unwindReason == STEP_EXCEPTION_HANDLER); |
7558 | |
7559 | if (IsFrozen()) |
7560 | { |
7561 | LOG((LF_CORDB,LL_INFO10000,"DS:TTC exit b/c of Frozen\n" )); |
7562 | return; |
7563 | } |
7564 | |
7565 | if (IsCloserToRoot(fp, GetUnwind())) |
7566 | { |
7567 | // Handler is in a parent frame . For all steps (in,out,over) |
7568 | // we want to stop in the handler. |
7569 | // This will be like a Step Out, so we don't need any range. |
7570 | ResetRange(); |
7571 | } |
7572 | else |
7573 | { |
7574 | // Handler/Filter is in the same frame as the stepper |
7575 | // For a step-in/over, we want to patch the handler/filter. |
7576 | // But for a step-out, we want to just continue executing (and don't change |
7577 | // the step-reason either). |
7578 | if (m_eMode == cStepOut) |
7579 | { |
7580 | LOG((LF_CORDB, LL_INFO10000, "DS::TU Step-out, returning for same-frame case.\n" )); |
7581 | return; |
7582 | } |
7583 | |
7584 | } |
7585 | |
7586 | // Remember the origin of the exception, so that if the step looks like |
7587 | // it's going to complete in a different frame, but the code comes from the |
7588 | // same frame as the one we're in, we won't stop twice in the "same" range |
7589 | m_fpException = fp; |
7590 | m_fdException = fd; |
7591 | |
7592 | // |
7593 | // An exception is exiting the step region. Set a patch on |
7594 | // the filter/handler. |
7595 | // |
7596 | |
7597 | DisableAll(); |
7598 | |
7599 | BOOL fOk; |
7600 | fOk = AddBindAndActivateNativeManagedPatch(fd, pDJI, offset, LEAF_MOST_FRAME, NULL); |
7601 | |
7602 | // Since we're unwinding to an already executed method, the method should already |
7603 | // be jitted and placing the patch should work. |
7604 | CONSISTENCY_CHECK_MSGF(fOk, ("Failed to place patch at TriggerUnwind.\npThis=0x%p md=0x%p, native offset=0x%x\n" , this, fd, offset)); |
7605 | |
7606 | LOG((LF_CORDB,LL_INFO100000,"Step reason:%s\n" , unwindReason==STEP_EXCEPTION_FILTER |
7607 | ? "STEP_EXCEPTION_FILTER" :"STEP_EXCEPTION_HANDLER" )); |
7608 | m_reason = unwindReason; |
7609 | } |
7610 | |
7611 | |
7612 | // Prepare for sending an event. |
7613 | // This is called 1:1 w/ SendEvent, but this guy can be called in a GC_TRIGGERABLE context |
7614 | // whereas SendEvent is pretty strict. |
7615 | // Caller ensures that it's safe to run a stack trace. |
7616 | void DebuggerStepper::PrepareForSendEvent(StackTraceTicket ticket) |
7617 | { |
7618 | #ifdef _DEBUG |
7619 | _ASSERTE(!m_fReadyToSend); |
7620 | m_fReadyToSend = true; |
7621 | #endif |
7622 | |
7623 | LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n" , m_fpStepInto.GetSPValue())); |
7624 | |
7625 | if (m_fpStepInto != LEAF_MOST_FRAME) |
7626 | { |
7627 | ControllerStackInfo csi; |
7628 | csi.GetStackInfo(ticket, GetThread(), LEAF_MOST_FRAME, NULL); |
7629 | |
7630 | if (csi.m_targetFrameFound && |
7631 | #if !defined(WIN64EXCEPTIONS) |
7632 | IsCloserToRoot(m_fpStepInto, csi.m_activeFrame.fp) |
7633 | #else |
7634 | IsCloserToRoot(m_fpStepInto, (csi.m_activeFrame.IsNonFilterFuncletFrame() ? csi.m_returnFrame.fp : csi.m_activeFrame.fp)) |
7635 | #endif // WIN64EXCEPTIONS |
7636 | ) |
7637 | |
7638 | { |
7639 | m_reason = STEP_CALL; |
7640 | LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x STEP_CALL!\n" , this)); |
7641 | } |
7642 | #ifdef _DEBUG |
7643 | else |
7644 | { |
7645 | LOG((LF_CORDB, LL_INFO10000, "DS::SE this:0x%x not a step call!\n" , this)); |
7646 | } |
7647 | #endif |
7648 | } |
7649 | |
7650 | #ifdef _DEBUG |
7651 | // Steppers should only stop in interesting code. |
7652 | if (this->GetDCType() == DEBUGGER_CONTROLLER_JMC_STEPPER) |
7653 | { |
7654 | // If we're at either a patch or SS, we'll have a context. |
7655 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(GetThread()); |
7656 | if (context == NULL) |
7657 | { |
7658 | void * pIP = CORDbgGetIP(reinterpret_cast<DT_CONTEXT *>(context)); |
7659 | |
7660 | DebuggerJitInfo * dji = g_pDebugger->GetJitInfoFromAddr((TADDR) pIP); |
7661 | DebuggerMethodInfo * dmi = NULL; |
7662 | if (dji != NULL) |
7663 | { |
7664 | dmi = dji->m_methodInfo; |
7665 | |
7666 | CONSISTENCY_CHECK_MSGF(dmi->IsJMCFunction(), ("JMC stepper %p stopping in non-jmc method, MD=%p, '%s::%s'" , |
7667 | this, dji->m_fd, dji->m_fd->m_pszDebugClassName, dji->m_fd->m_pszDebugMethodName)); |
7668 | |
7669 | } |
7670 | |
7671 | |
7672 | } |
7673 | } |
7674 | |
7675 | #endif |
7676 | } |
7677 | |
7678 | bool DebuggerStepper::SendEvent(Thread *thread, bool fIpChanged) |
7679 | { |
7680 | CONTRACTL |
7681 | { |
7682 | SO_NOT_MAINLINE; |
7683 | NOTHROW; |
7684 | SENDEVENT_CONTRACT_ITEMS; |
7685 | } |
7686 | CONTRACTL_END; |
7687 | |
7688 | // We practically should never have a step interupted by SetIp. |
7689 | // We'll still go ahead and send the Step-complete event because we've already |
7690 | // deactivated our triggers by now and we haven't placed any new patches to catch us. |
7691 | // We assert here because we don't believe we'll ever be able to hit this scenario. |
7692 | // This is technically an issue, but we consider it benign enough to leave in. |
7693 | _ASSERTE(!fIpChanged || !"Stepper interupted by SetIp" ); |
7694 | |
7695 | LOG((LF_CORDB, LL_INFO10000, "DS::SE m_fpStepInto:0x%x\n" , m_fpStepInto.GetSPValue())); |
7696 | |
7697 | _ASSERTE(m_fReadyToSend); |
7698 | _ASSERTE(GetThread() == thread); |
7699 | |
7700 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(thread); |
7701 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
7702 | |
7703 | // We need to send the stepper and delete the controller because our stepper |
7704 | // no longer has any patches or other triggers that will let it send the step-complete event. |
7705 | g_pDebugger->SendStep(thread, context, this, m_reason); |
7706 | |
7707 | this->Delete(); |
7708 | |
7709 | #ifdef _DEBUG |
7710 | // Now that we've sent the event, we can stop recording information. |
7711 | StubManager::DbgFinishLog(); |
7712 | #endif |
7713 | |
7714 | return true; |
7715 | } |
7716 | |
7717 | void DebuggerStepper::ResetRange() |
7718 | { |
7719 | if (m_range) |
7720 | { |
7721 | TRACE_FREE(m_range); |
7722 | DeleteInteropSafe(m_range); |
7723 | |
7724 | m_range = NULL; |
7725 | } |
7726 | } |
7727 | |
7728 | //----------------------------------------------------------------------------- |
7729 | // Return true if this stepper is alive, but frozen. (we freeze when the stepper |
7730 | // enters a nested func-eval). |
7731 | //----------------------------------------------------------------------------- |
7732 | bool DebuggerStepper::IsFrozen() |
7733 | { |
7734 | return (m_cFuncEvalNesting > 0); |
7735 | } |
7736 | |
7737 | //----------------------------------------------------------------------------- |
7738 | // Returns true if this stepper is 'dead' - which happens if a non-frozen stepper |
7739 | // gets a func-eval exit. |
7740 | //----------------------------------------------------------------------------- |
7741 | bool DebuggerStepper::IsDead() |
7742 | { |
7743 | return (m_cFuncEvalNesting < 0); |
7744 | } |
7745 | |
7746 | // * ------------------------------------------------------------------------ |
7747 | // * DebuggerJMCStepper routines |
7748 | // * ------------------------------------------------------------------------ |
7749 | DebuggerJMCStepper::DebuggerJMCStepper(Thread *thread, |
7750 | CorDebugUnmappedStop rgfMappingStop, |
7751 | CorDebugIntercept interceptStop, |
7752 | AppDomain *appDomain) : |
7753 | DebuggerStepper(thread, rgfMappingStop, interceptStop, appDomain) |
7754 | { |
7755 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper ctor, this=%p\n" , this)); |
7756 | } |
7757 | |
7758 | DebuggerJMCStepper::~DebuggerJMCStepper() |
7759 | { |
7760 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper dtor, this=%p\n" , this)); |
7761 | } |
7762 | |
7763 | // If we're a JMC stepper, then don't stop in non-user code. |
7764 | bool DebuggerJMCStepper::IsInterestingFrame(FrameInfo * pFrame) |
7765 | { |
7766 | CONTRACTL |
7767 | { |
7768 | THROWS; |
7769 | MODE_ANY; |
7770 | GC_NOTRIGGER; |
7771 | } |
7772 | CONTRACTL_END; |
7773 | |
7774 | DebuggerMethodInfo *pInfo = pFrame->GetMethodInfoFromFrameOrThrow(); |
7775 | _ASSERTE(pInfo != NULL); // throws on failure |
7776 | |
7777 | bool fIsUserCode = pInfo->IsJMCFunction(); |
7778 | |
7779 | |
7780 | LOG((LF_CORDB, LL_INFO1000000, "DS::TSO, frame '%s::%s' is '%s' code\n" , |
7781 | pFrame->DbgGetClassName(), pFrame->DbgGetMethodName(), |
7782 | fIsUserCode ? "user" : "non-user" )); |
7783 | |
7784 | return fIsUserCode; |
7785 | } |
7786 | |
7787 | // A JMC stepper's step-next stops at the next thing of code run. |
7788 | // This may be a Step-Out, or any User code called before that. |
7789 | // A1 -> B1 -> { A2, B2 -> B3 -> A3} |
7790 | // So TrapStepNex at end of A2 should land us in A3. |
7791 | void DebuggerJMCStepper::TrapStepNext(ControllerStackInfo *info) |
7792 | { |
7793 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TrapStepNext, this=%p\n" , this)); |
7794 | EnableMethodEnter(); |
7795 | |
7796 | // This will place a patch up the stack and set m_reason = STEP_RETURN. |
7797 | // If we end up hitting JMC before that patch, we'll hit TriggerMethodEnter |
7798 | // and that will set our reason to STEP_CALL. |
7799 | TrapStepOut(info); |
7800 | } |
7801 | |
7802 | // ip - target address for call instruction |
7803 | bool DebuggerJMCStepper::TrapStepInHelper( |
7804 | ControllerStackInfo * pInfo, |
7805 | const BYTE * ipCallTarget, |
7806 | const BYTE * ipNext, |
7807 | bool fCallingIntoFunclet) |
7808 | { |
7809 | #ifndef WIN64EXCEPTIONS |
7810 | // There are no funclets on x86. |
7811 | _ASSERTE(!fCallingIntoFunclet); |
7812 | #endif |
7813 | |
7814 | // If we are calling into a funclet, then we can't rely on the JMC probe to stop us because there are no |
7815 | // JMC probes in funclets. Instead, we have to perform a traditional step-in here. |
7816 | if (fCallingIntoFunclet) |
7817 | { |
7818 | TraceDestination td; |
7819 | td.InitForManaged(reinterpret_cast<PCODE>(ipCallTarget)); |
7820 | PatchTrace(&td, LEAF_MOST_FRAME, false); |
7821 | |
7822 | // If this succeeds, then we still need to put a patch at the return address. This is done below. |
7823 | // If this fails, then we definitely need to put a patch at the return address to trap the thread. |
7824 | // So in either case, we have to execute the rest of this function. |
7825 | } |
7826 | |
7827 | MethodDesc * pDesc = pInfo->m_activeFrame.md; |
7828 | DebuggerJitInfo *dji = NULL; |
7829 | |
7830 | // We may not have a DJI if we're in an attach case. We should still be able to do a JMC-step in though. |
7831 | // So NULL is ok here. |
7832 | dji = g_pDebugger->GetJitInfo(pDesc, (const BYTE*) ipNext); |
7833 | |
7834 | |
7835 | // Place patch after call, which is at ipNext. Note we don't need an IL->Native map here |
7836 | // since we disassembled native code to find the ip after the call. |
7837 | SIZE_T offset = CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ipNext); |
7838 | |
7839 | |
7840 | LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TSIH, at '%s::%s', calling=0x%p, next=0x%p, offset=%d\n" , |
7841 | pDesc->m_pszDebugClassName, |
7842 | pDesc->m_pszDebugMethodName, |
7843 | ipCallTarget, ipNext, |
7844 | offset)); |
7845 | |
7846 | // Place a patch at the native address (inside the managed method). |
7847 | AddBindAndActivateNativeManagedPatch(pInfo->m_activeFrame.md, |
7848 | dji, |
7849 | offset, |
7850 | pInfo->m_returnFrame.fp, |
7851 | NULL); |
7852 | |
7853 | EnableMethodEnter(); |
7854 | |
7855 | // Return true means that we want to let the stepper run free. It will either |
7856 | // hit the patch after the call instruction or it will hit a TriggerMethodEnter. |
7857 | return true; |
7858 | } |
7859 | |
7860 | // For JMC-steppers, we don't enable trace-call; we enable Method-Enter. |
7861 | void DebuggerJMCStepper::EnablePolyTraceCall() |
7862 | { |
7863 | _ASSERTE(!IsFrozen()); |
7864 | |
7865 | this->EnableMethodEnter(); |
7866 | } |
7867 | |
7868 | // Return true if this is non-user code. This means we've setup the proper patches & |
7869 | // triggers, etc and so we expect the controller to just run free. |
7870 | // This is called when all other stepping criteria are met and we're about to |
7871 | // send a step-complete. For JMC, this is when we see if we're in non-user code |
7872 | // and if so, continue stepping instead of send the step complete. |
7873 | // Return false if this is user-code. |
7874 | bool DebuggerJMCStepper::DetectHandleNonUserCode(ControllerStackInfo *pInfo, DebuggerMethodInfo * dmi) |
7875 | { |
7876 | _ASSERTE(dmi != NULL); |
7877 | bool fIsUserCode = dmi->IsJMCFunction(); |
7878 | |
7879 | if (!fIsUserCode) |
7880 | { |
7881 | LOG((LF_CORDB, LL_INFO10000, "JMC stepper stopped in non-user code, continuing.\n" )); |
7882 | // Not-user code, we want to skip through this. |
7883 | |
7884 | // We may be here while trying to step-out. |
7885 | // Step-out just means stop at the first interesting frame above us. |
7886 | // So JMC TrapStepOut won't patch a non-user frame. |
7887 | // But if we're skipping over other stuff (prolog, epilog, interceptors, |
7888 | // trace calls), then we may still be in the middle of non-user |
7889 | //_ASSERTE(m_eMode != cStepOut); |
7890 | |
7891 | if (m_eMode == cStepOut) |
7892 | { |
7893 | TrapStepOut(pInfo); |
7894 | } |
7895 | else if (m_stepIn) |
7896 | { |
7897 | EnableMethodEnter(); |
7898 | TrapStepOut(pInfo); |
7899 | // Run until we hit the next thing of managed code. |
7900 | } else { |
7901 | // Do a traditional step-out since we just want to go up 1 frame. |
7902 | TrapStepOut(pInfo, true); // force trad step out. |
7903 | |
7904 | |
7905 | // If we're not in the original frame anymore, then |
7906 | // If we did a Step-over at the end of a method, and that did a single-step over the return |
7907 | // then we may already be in our parent frame. In that case, we also want to behave |
7908 | // like a step-in and TriggerMethodEnter. |
7909 | if (this->m_fp != pInfo->m_activeFrame.fp) |
7910 | { |
7911 | // If we're a step-over, then we should only be stopped in a parent frame. |
7912 | _ASSERTE(m_stepIn || IsCloserToLeaf(this->m_fp, pInfo->m_activeFrame.fp)); |
7913 | EnableMethodEnter(); |
7914 | } |
7915 | |
7916 | // Step-over shouldn't stop in a frame below us in the same callstack. |
7917 | // So we do a tradional step-out of our current frame, which guarantees |
7918 | // that. After that, we act just like a step-in. |
7919 | m_stepIn = true; |
7920 | } |
7921 | EnableUnwind(m_fp); |
7922 | |
7923 | // Must keep going... |
7924 | return true; |
7925 | } |
7926 | |
7927 | return false; |
7928 | } |
7929 | |
7930 | // Dispatched right after the prolog of a JMC function. |
7931 | // We may be blocking the GC here, so let's be fast! |
7932 | void DebuggerJMCStepper::TriggerMethodEnter(Thread * thread, |
7933 | DebuggerJitInfo *dji, |
7934 | const BYTE * ip, |
7935 | FramePointer fp) |
7936 | { |
7937 | _ASSERTE(dji != NULL); |
7938 | _ASSERTE(thread != NULL); |
7939 | _ASSERTE(ip != NULL); |
7940 | |
7941 | _ASSERTE(!IsFrozen()); |
7942 | |
7943 | MethodDesc * pDesc = dji->m_fd; |
7944 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, desc=%p, addr=%p\n" , |
7945 | pDesc, ip)); |
7946 | |
7947 | // JMC steppers won't stop in Lightweight delegates. Just return & keep executing. |
7948 | if (pDesc->IsNoMetadata()) |
7949 | { |
7950 | LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, skipping b/c it's lw-codegen\n" )); |
7951 | return; |
7952 | } |
7953 | |
7954 | // Is this user code? |
7955 | DebuggerMethodInfo * dmi = dji->m_methodInfo; |
7956 | bool fIsUserCode = dmi->IsJMCFunction(); |
7957 | |
7958 | |
7959 | LOG((LF_CORDB, LL_INFO100000, "DJMCStepper::TME, '%s::%s' is '%s' code\n" , |
7960 | pDesc->m_pszDebugClassName, |
7961 | pDesc->m_pszDebugMethodName, |
7962 | fIsUserCode ? "user" : "non-user" |
7963 | )); |
7964 | |
7965 | // If this isn't user code, then just return and continue executing. |
7966 | if (!fIsUserCode) |
7967 | return; |
7968 | |
7969 | // MethodEnter is only enabled when we want to stop in a JMC function. |
7970 | // And that's where we are now. So patch the ip and resume. |
7971 | // The stepper will hit the patch, and stop. |
7972 | |
7973 | // It's a good thing we have the fp passed in, because we have no other |
7974 | // way of getting it. We can't do a stack trace here (the stack trace |
7975 | // would start at the last pushed Frame, which miss a lot of managed |
7976 | // frames). |
7977 | |
7978 | // Don't bind to a particular AppDomain so that we can do a Cross-Appdomain step. |
7979 | AddBindAndActivateNativeManagedPatch(pDesc, |
7980 | dji, |
7981 | CodeRegionInfo::GetCodeRegionInfo(dji, pDesc).AddressToOffset(ip), |
7982 | fp, |
7983 | NULL // AppDomain |
7984 | ); |
7985 | |
7986 | LOG((LF_CORDB, LL_INFO10000, "DJMCStepper::TME, after setting patch to stop\n" )); |
7987 | |
7988 | // Once we resume, we'll go hit that patch (duh, we patched our return address) |
7989 | // Furthermore, we know the step will complete with reason = call, so set that now. |
7990 | m_reason = STEP_CALL; |
7991 | } |
7992 | |
7993 | |
7994 | |
7995 | //----------------------------------------------------------------------------- |
7996 | // Helper to convert form an EE Frame's interception enum to a CorDebugIntercept |
7997 | // bitfield. |
7998 | // The intercept value in EE Frame's is a 0-based enumeration (not a bitfield). |
7999 | // The intercept value for ICorDebug is a bitfied. |
8000 | //----------------------------------------------------------------------------- |
8001 | CorDebugIntercept ConvertFrameBitsToDbg(Frame::Interception i) |
8002 | { |
8003 | _ASSERTE(i >= 0 && i < Frame::INTERCEPTION_COUNT); |
8004 | |
8005 | // Since the ee frame is a 0-based enum, we can just use a map. |
8006 | const CorDebugIntercept map[Frame::INTERCEPTION_COUNT] = |
8007 | { |
8008 | // ICorDebug EE Frame |
8009 | INTERCEPT_NONE, // INTERCEPTION_NONE, |
8010 | INTERCEPT_CLASS_INIT, // INTERCEPTION_CLASS_INIT |
8011 | INTERCEPT_EXCEPTION_FILTER, // INTERCEPTION_EXCEPTION |
8012 | INTERCEPT_CONTEXT_POLICY, // INTERCEPTION_CONTEXT |
8013 | INTERCEPT_SECURITY, // INTERCEPTION_SECURITY |
8014 | INTERCEPT_INTERCEPTION, // INTERCEPTION_OTHER |
8015 | }; |
8016 | |
8017 | return map[i]; |
8018 | } |
8019 | |
8020 | //----------------------------------------------------------------------------- |
8021 | // This is a helper class to do a stack walk over a certain range and find all the interceptors. |
8022 | // This allows a JMC stepper to see if there are any interceptors it wants to skip over (though |
8023 | // there's nothing JMC-specific about this). |
8024 | // Note that we only want to walk the stack range that the stepper is operating in. |
8025 | // That's because we don't care about interceptors that happened _before_ the |
8026 | // stepper was created. |
8027 | //----------------------------------------------------------------------------- |
8028 | class InterceptorStackInfo |
8029 | { |
8030 | public: |
8031 | #ifdef _DEBUG |
8032 | InterceptorStackInfo() |
8033 | { |
8034 | // since this ctor just nulls out fpTop (which is already done in Init), we |
8035 | // only need it in debug. |
8036 | m_fpTop = LEAF_MOST_FRAME; |
8037 | } |
8038 | #endif |
8039 | |
8040 | // Get a CorDebugIntercept bitfield that contains a bit for each type of interceptor |
8041 | // if that interceptor is present within our stack-range. |
8042 | // Stack range is from leaf-most up to and including fp |
8043 | CorDebugIntercept GetInterceptorsInRange() |
8044 | { |
8045 | _ASSERTE(m_fpTop != LEAF_MOST_FRAME || !"Must call Init first" ); |
8046 | return (CorDebugIntercept) m_bits; |
8047 | } |
8048 | |
8049 | // Prime the stackwalk. |
8050 | void Init(FramePointer fpTop, Thread *thread, CONTEXT *pContext, BOOL contextValid) |
8051 | { |
8052 | _ASSERTE(fpTop != LEAF_MOST_FRAME); |
8053 | _ASSERTE(thread != NULL); |
8054 | |
8055 | m_bits = 0; |
8056 | m_fpTop = fpTop; |
8057 | |
8058 | LOG((LF_CORDB,LL_EVERYTHING, "ISI::Init - fpTop=%p, thread=%p, pContext=%p, contextValid=%d\n" , |
8059 | fpTop.GetSPValue(), thread, pContext, contextValid)); |
8060 | |
8061 | int result; |
8062 | result = DebuggerWalkStack( |
8063 | thread, |
8064 | LEAF_MOST_FRAME, |
8065 | pContext, |
8066 | contextValid, |
8067 | WalkStack, |
8068 | (void *) this, |
8069 | FALSE |
8070 | ); |
8071 | } |
8072 | |
8073 | |
8074 | protected: |
8075 | // This is a bitfield of all the interceptors we encounter in our stack-range |
8076 | int m_bits; |
8077 | |
8078 | // This is the top of our stack range. |
8079 | FramePointer m_fpTop; |
8080 | |
8081 | static StackWalkAction WalkStack(FrameInfo *pInfo, void *data) |
8082 | { |
8083 | _ASSERTE(pInfo != NULL); |
8084 | _ASSERTE(data != NULL); |
8085 | InterceptorStackInfo * pThis = (InterceptorStackInfo*) data; |
8086 | |
8087 | // If there's an interceptor frame here, then set those |
8088 | // bits in our bitfield. |
8089 | Frame::Interception i = Frame::INTERCEPTION_NONE; |
8090 | Frame * pFrame = pInfo->frame; |
8091 | if ((pFrame != NULL) && (pFrame != FRAME_TOP)) |
8092 | { |
8093 | i = pFrame->GetInterception(); |
8094 | if (i != Frame::INTERCEPTION_NONE) |
8095 | { |
8096 | pThis->m_bits |= (int) ConvertFrameBitsToDbg(i); |
8097 | } |
8098 | } |
8099 | else if (pInfo->HasMethodFrame()) |
8100 | { |
8101 | // Check whether we are executing in a class constructor. |
8102 | _ASSERTE(pInfo->md != NULL); |
8103 | |
8104 | // Need to be careful about an off-by-one error here! Imagine your stack looks like: |
8105 | // Foo.DoSomething() |
8106 | // Foo..cctor <--- step starts/ends in here |
8107 | // Bar.Bar(); |
8108 | // |
8109 | // and your code looks like this: |
8110 | // Foo..cctor() |
8111 | // { |
8112 | // Foo.DoSomething(); <-- JMC step started here |
8113 | // int x = 1; <-- step ends here |
8114 | // } |
8115 | // This stackwalk covers the inclusive range [Foo..cctor, Foo.DoSomething()] so we will see |
8116 | // the static cctor in this walk. However executing inside a static class constructor does not |
8117 | // count as an interceptor. You must start the step outside the static constructor and then call |
8118 | // into it to have an interceptor. Therefore only static constructors that aren't the outermost |
8119 | // frame should be treated as interceptors. |
8120 | if (pInfo->md->IsClassConstructor() && (pInfo->fp != pThis->m_fpTop)) |
8121 | { |
8122 | // We called a class constructor, add the appropriate flag |
8123 | pThis->m_bits |= (int) INTERCEPT_CLASS_INIT; |
8124 | } |
8125 | } |
8126 | LOG((LF_CORDB,LL_EVERYTHING,"ISI::WS- Frame=%p, fp=%p, Frame bits=%x, Cor bits=0x%x\n" , pInfo->frame, pInfo->fp.GetSPValue(), i, pThis->m_bits)); |
8127 | |
8128 | |
8129 | // We can stop once we hit the top frame. |
8130 | if (pInfo->fp == pThis->m_fpTop) |
8131 | { |
8132 | return SWA_ABORT; |
8133 | } |
8134 | else |
8135 | { |
8136 | return SWA_CONTINUE; |
8137 | } |
8138 | } |
8139 | }; |
8140 | |
8141 | |
8142 | |
8143 | |
8144 | // Skip interceptors for JMC steppers. |
8145 | // Return true if we patch something (and thus should keep stepping) |
8146 | // Return false if we're done. |
8147 | bool DebuggerJMCStepper::DetectHandleInterceptors(ControllerStackInfo * info) |
8148 | { |
8149 | LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Start DetectHandleInterceptors\n" )); |
8150 | |
8151 | // For JMC, we could stop very far way from an interceptor. |
8152 | // So we have to do a stack walk to search for interceptors... |
8153 | // If we find any in our stack range (from m_fp ... current fp), then we just do a trap-step-next. |
8154 | |
8155 | // Note that this logic should also work for regular steppers, but we've left that in |
8156 | // as to keep that code-path unchanged. |
8157 | |
8158 | // ControllerStackInfo only gives us the bottom 2 frames on the stack, so we ignore it and |
8159 | // have to do our own stack walk. |
8160 | |
8161 | // @todo - for us to properly skip filters, we need to make sure that filters show up in our chains. |
8162 | |
8163 | |
8164 | InterceptorStackInfo info2; |
8165 | CONTEXT *context = g_pEEInterface->GetThreadFilterContext(this->GetThread()); |
8166 | CONTEXT tempContext; |
8167 | |
8168 | _ASSERTE(!ISREDIRECTEDTHREAD(this->GetThread())); |
8169 | |
8170 | if (context == NULL) |
8171 | { |
8172 | info2.Init(this->m_fp, this->GetThread(), &tempContext, FALSE); |
8173 | } |
8174 | else |
8175 | { |
8176 | info2.Init(this->m_fp, this->GetThread(), context, TRUE); |
8177 | } |
8178 | |
8179 | // The following casts are safe on WIN64 platforms. |
8180 | int iOnStack = (int) info2.GetInterceptorsInRange(); |
8181 | int iSkip = ~((int) m_rgfInterceptStop); |
8182 | |
8183 | LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: iOnStack=%x, iSkip=%x\n" , iOnStack, iSkip)); |
8184 | |
8185 | // If the bits on the stack contain any interceptors we want to skip, then we need to keep going. |
8186 | if ((iOnStack & iSkip) != 0) |
8187 | { |
8188 | LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: keep going!\n" )); |
8189 | TrapStepNext(info); |
8190 | EnableUnwind(m_fp); |
8191 | return true; |
8192 | } |
8193 | |
8194 | LOG((LF_CORDB,LL_INFO10000,"DJMCStepper::DHI: Done!!\n" )); |
8195 | return false; |
8196 | } |
8197 | |
8198 | |
8199 | // * ------------------------------------------------------------------------ |
8200 | // * DebuggerThreadStarter routines |
8201 | // * ------------------------------------------------------------------------ |
8202 | |
8203 | DebuggerThreadStarter::DebuggerThreadStarter(Thread *thread) |
8204 | : DebuggerController(thread, NULL) |
8205 | { |
8206 | LOG((LF_CORDB, LL_INFO1000, "DTS::DTS: this:0x%x Thread:0x%x\n" , |
8207 | this, thread)); |
8208 | |
8209 | // Check to make sure we only have 1 ThreadStarter on a given thread. (Inspired by NDPWhidbey issue 16888) |
8210 | #if defined(_DEBUG) |
8211 | EnsureUniqueThreadStarter(this); |
8212 | #endif |
8213 | } |
8214 | |
8215 | // TP_RESULT DebuggerThreadStarter::TriggerPatch() If we're in a |
8216 | // stub (module==NULL&&managed) then do a PatchTrace up the stack & |
8217 | // return false. Otherwise DisableAll & return |
8218 | // true |
8219 | TP_RESULT DebuggerThreadStarter::TriggerPatch(DebuggerControllerPatch *patch, |
8220 | Thread *thread, |
8221 | TRIGGER_WHY tyWhy) |
8222 | { |
8223 | Module *module = patch->key.module; |
8224 | BOOL managed = patch->IsManagedPatch(); |
8225 | |
8226 | LOG((LF_CORDB,LL_INFO1000, "DebuggerThreadStarter::TriggerPatch for thread 0x%x\n" , Debugger::GetThreadIdHelper(thread))); |
8227 | |
8228 | if (module == NULL && managed) |
8229 | { |
8230 | // This is a stub patch. If it was a TRACE_FRAME_PUSH that got us here, then the stub's frame is pushed now, so |
8231 | // we tell the frame to apply the real patch. If we got here via a TRACE_MGR_PUSH, however, then there is no |
8232 | // frame and we go back to the stub manager that generated the stub for where to patch next. |
8233 | TraceDestination trace; |
8234 | bool traceOk; |
8235 | if (patch->trace.GetTraceType() == TRACE_MGR_PUSH) |
8236 | { |
8237 | BYTE *dummy = NULL; |
8238 | CONTEXT *context = GetManagedLiveCtx(thread); |
8239 | CONTRACT_VIOLATION(GCViolation); |
8240 | traceOk = g_pEEInterface->TraceManager(thread, patch->trace.GetStubManager(), &trace, context, &dummy); |
8241 | } |
8242 | else if ((patch->trace.GetTraceType() == TRACE_FRAME_PUSH) && (thread->GetFrame()->IsTransitionToNativeFrame())) |
8243 | { |
8244 | // If we've got a frame that is transitioning to native, there's no reason to try to keep tracing. So we |
8245 | // bail early and save ourselves some effort. This also works around a problem where we deadlock trying to |
8246 | // do too much work to determine the destination of a ComPlusMethodFrame. (See issue 87103.) |
8247 | // |
8248 | // Note: trace call is still enabled, so we can just ignore this patch and wait for trace call to fire |
8249 | // again... |
8250 | return TPR_IGNORE; |
8251 | } |
8252 | else |
8253 | { |
8254 | // It's questionable whether Trace_Frame_Push is actually safe or not. |
8255 | ControllerStackInfo csi; |
8256 | StackTraceTicket ticket(patch); |
8257 | csi.GetStackInfo(ticket, thread, LEAF_MOST_FRAME, NULL); |
8258 | |
8259 | CONTRACT_VIOLATION(GCViolation); // TraceFrame GC-triggers |
8260 | traceOk = g_pEEInterface->TraceFrame(thread, thread->GetFrame(), TRUE, &trace, &(csi.m_activeFrame.registers)); |
8261 | } |
8262 | |
8263 | if (traceOk && g_pEEInterface->FollowTrace(&trace)) |
8264 | { |
8265 | PatchTrace(&trace, LEAF_MOST_FRAME, TRUE); |
8266 | } |
8267 | |
8268 | return TPR_IGNORE; |
8269 | } |
8270 | else |
8271 | { |
8272 | // We've hit user code; trigger our event. |
8273 | DisableAll(); |
8274 | |
8275 | |
8276 | { |
8277 | |
8278 | // Give the helper thread a chance to get ready. The temporary helper can't handle |
8279 | // execution control well, and the RS won't do any execution control until it gets a |
8280 | // create Thread event, which it won't get until here. |
8281 | // So now's our best time to wait for the real helper thread. |
8282 | g_pDebugger->PollWaitingForHelper(); |
8283 | } |
8284 | |
8285 | return TPR_TRIGGER; |
8286 | } |
8287 | } |
8288 | |
8289 | void DebuggerThreadStarter::TriggerTraceCall(Thread *thread, const BYTE *ip) |
8290 | { |
8291 | LOG((LF_CORDB, LL_EVERYTHING, "DTS::TTC called\n" )); |
8292 | #ifdef DEBUGGING_SUPPORTED |
8293 | if (thread->GetDomain()->IsDebuggerAttached()) |
8294 | { |
8295 | TraceDestination trace; |
8296 | |
8297 | if (g_pEEInterface->TraceStub(ip, &trace) && g_pEEInterface->FollowTrace(&trace)) |
8298 | { |
8299 | PatchTrace(&trace, LEAF_MOST_FRAME, true); |
8300 | } |
8301 | } |
8302 | #endif //DEBUGGING_SUPPORTED |
8303 | |
8304 | } |
8305 | |
8306 | bool DebuggerThreadStarter::SendEvent(Thread *thread, bool fIpChanged) |
8307 | { |
8308 | CONTRACTL |
8309 | { |
8310 | SO_NOT_MAINLINE; |
8311 | NOTHROW; |
8312 | SENDEVENT_CONTRACT_ITEMS; |
8313 | } |
8314 | CONTRACTL_END; |
8315 | |
8316 | // This SendEvent can't be interupted by a SetIp because until the client |
8317 | // gets a ThreadStarter event, it doesn't even know the thread exists, so |
8318 | // it certainly can't change its ip. |
8319 | _ASSERTE(!fIpChanged); |
8320 | |
8321 | LOG((LF_CORDB, LL_INFO10000, "DTS::SE: in DebuggerThreadStarter's SendEvent\n" )); |
8322 | |
8323 | // Send the thread started event. |
8324 | g_pDebugger->ThreadStarted(thread); |
8325 | |
8326 | // We delete this now because its no longer needed. We can call |
8327 | // delete here because the queued count is above 0. This object |
8328 | // will really be deleted when its dequeued shortly after this |
8329 | // call returns. |
8330 | Delete(); |
8331 | |
8332 | return true; |
8333 | } |
8334 | |
8335 | // * ------------------------------------------------------------------------ |
8336 | // * DebuggerUserBreakpoint routines |
8337 | // * ------------------------------------------------------------------------ |
8338 | |
8339 | bool DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(FrameInfo * pFrame) |
8340 | { |
8341 | CONTRACTL |
8342 | { |
8343 | THROWS; |
8344 | MODE_ANY; |
8345 | GC_NOTRIGGER; |
8346 | } |
8347 | CONTRACTL_END; |
8348 | |
8349 | // Steppers ignore internal frames, so should only be called on real frames. |
8350 | _ASSERTE(pFrame->HasMethodFrame()); |
8351 | |
8352 | // Now get the namespace of the active frame |
8353 | MethodDesc *pMD = pFrame->md; |
8354 | |
8355 | if (pMD != NULL) |
8356 | { |
8357 | MethodTable * pMT = pMD->GetMethodTable(); |
8358 | |
8359 | LPCUTF8 szNamespace = NULL; |
8360 | LPCUTF8 szClassName = pMT->GetFullyQualifiedNameInfo(&szNamespace); |
8361 | |
8362 | if (szClassName != NULL && szNamespace != NULL) |
8363 | { |
8364 | MAKE_WIDEPTR_FROMUTF8(wszNamespace, szNamespace); // throw |
8365 | MAKE_WIDEPTR_FROMUTF8(wszClassName, szClassName); |
8366 | if (wcscmp(wszClassName, W("Debugger" )) == 0 && |
8367 | wcscmp(wszNamespace, W("System.Diagnostics" )) == 0) |
8368 | { |
8369 | // This will continue stepping |
8370 | return true; |
8371 | } |
8372 | } |
8373 | } |
8374 | return false; |
8375 | } |
8376 | |
8377 | // Helper check if we're directly in a dynamic method (ignoring any chain goo |
8378 | // or stuff in the Debugger namespace. |
8379 | class IsLeafFrameDynamic |
8380 | { |
8381 | protected: |
8382 | static StackWalkAction WalkStackWrapper(FrameInfo *pInfo, void *data) |
8383 | { |
8384 | IsLeafFrameDynamic * pThis = reinterpret_cast<IsLeafFrameDynamic*> (data); |
8385 | return pThis->WalkStack(pInfo); |
8386 | } |
8387 | |
8388 | StackWalkAction WalkStack(FrameInfo *pInfo) |
8389 | { |
8390 | _ASSERTE(pInfo != NULL); |
8391 | |
8392 | // A FrameInfo may have both Method + Chain rolled into one. |
8393 | if (!pInfo->HasMethodFrame() && !pInfo->HasStubFrame()) |
8394 | { |
8395 | // We're a chain. Ignore it and keep looking. |
8396 | return SWA_CONTINUE; |
8397 | } |
8398 | |
8399 | // So now this is the first non-chain, non-Debugger namespace frame. |
8400 | // LW frames don't have a name, so we check if it's LW first. |
8401 | if (pInfo->eStubFrameType == STUBFRAME_LIGHTWEIGHT_FUNCTION) |
8402 | { |
8403 | m_fInLightWeightMethod = true; |
8404 | return SWA_ABORT; |
8405 | } |
8406 | |
8407 | // Ignore Debugger.Break() frames. |
8408 | // All Debugger.Break calls will have this on the stack. |
8409 | if (DebuggerUserBreakpoint::IsFrameInDebuggerNamespace(pInfo)) |
8410 | { |
8411 | return SWA_CONTINUE; |
8412 | } |
8413 | |
8414 | // We've now determined leafmost thing, so stop stackwalking. |
8415 | _ASSERTE(m_fInLightWeightMethod == false); |
8416 | return SWA_ABORT; |
8417 | } |
8418 | |
8419 | |
8420 | bool m_fInLightWeightMethod; |
8421 | |
8422 | // Need this context to do stack trace. |
8423 | CONTEXT m_tempContext; |
8424 | |
8425 | public: |
8426 | // On success, copies the leafmost non-chain frameinfo (including stubs) for the current thread into pInfo |
8427 | // and returns true. |
8428 | // On failure, returns false. |
8429 | // Return true on success. |
8430 | bool DoCheck(IN Thread * pThread) |
8431 | { |
8432 | CONTRACTL |
8433 | { |
8434 | GC_TRIGGERS; |
8435 | THROWS; |
8436 | MODE_ANY; |
8437 | |
8438 | PRECONDITION(CheckPointer(pThread)); |
8439 | } |
8440 | CONTRACTL_END; |
8441 | |
8442 | m_fInLightWeightMethod = false; |
8443 | |
8444 | |
8445 | DebuggerWalkStack( |
8446 | pThread, |
8447 | LEAF_MOST_FRAME, |
8448 | &m_tempContext, false, |
8449 | WalkStackWrapper, |
8450 | (void *) this, |
8451 | TRUE // includes everything |
8452 | ); |
8453 | |
8454 | // We don't care whether the stackwalk succeeds or not because the |
8455 | // callback sets our status via this field either way, so just return it. |
8456 | return m_fInLightWeightMethod; |
8457 | }; |
8458 | }; |
8459 | |
8460 | // Handle a Debug.Break() notification. |
8461 | // This may create a controller to step-out out the Debug.Break() call (so that |
8462 | // we appear stopped at the callsite). |
8463 | // If we can't step-out (eg, we're directly in a dynamic method), then send |
8464 | // the debug event immediately. |
8465 | void DebuggerUserBreakpoint::HandleDebugBreak(Thread * pThread) |
8466 | { |
8467 | bool fDoStepOut = true; |
8468 | |
8469 | // If the leaf frame is not a LW method, then step-out. |
8470 | IsLeafFrameDynamic info; |
8471 | fDoStepOut = !info.DoCheck(pThread); |
8472 | |
8473 | if (fDoStepOut) |
8474 | { |
8475 | // Create a controller that will step out for us. |
8476 | new (interopsafe) DebuggerUserBreakpoint(pThread); |
8477 | } |
8478 | else |
8479 | { |
8480 | // Send debug event immediately. |
8481 | g_pDebugger->SendUserBreakpointAndSynchronize(pThread); |
8482 | } |
8483 | } |
8484 | |
8485 | |
8486 | DebuggerUserBreakpoint::DebuggerUserBreakpoint(Thread *thread) |
8487 | : DebuggerStepper(thread, (CorDebugUnmappedStop) (STOP_ALL & ~STOP_UNMANAGED), INTERCEPT_ALL, NULL) |
8488 | { |
8489 | // Setup a step out from the current frame (which we know is |
8490 | // unmanaged, actually...) |
8491 | |
8492 | |
8493 | // This happens to be safe, but it's a very special case (so we have a special case ticket) |
8494 | // This is called while we're live (so no filter context) and from the fcall, |
8495 | // and we pushed a HelperMethodFrame to protect us. We also happen to know that we have |
8496 | // done anything illegal or dangerous since then. |
8497 | |
8498 | StackTraceTicket ticket(this); |
8499 | StepOut(LEAF_MOST_FRAME, ticket); |
8500 | } |
8501 | |
8502 | |
8503 | // Is this frame interesting? |
8504 | // Use this to skip all code in the namespace "Debugger.Diagnostics" |
8505 | bool DebuggerUserBreakpoint::IsInterestingFrame(FrameInfo * pFrame) |
8506 | { |
8507 | CONTRACTL |
8508 | { |
8509 | THROWS; |
8510 | MODE_ANY; |
8511 | GC_NOTRIGGER; |
8512 | } |
8513 | CONTRACTL_END; |
8514 | |
8515 | return !IsFrameInDebuggerNamespace(pFrame); |
8516 | } |
8517 | |
8518 | bool DebuggerUserBreakpoint::SendEvent(Thread *thread, bool fIpChanged) |
8519 | { |
8520 | CONTRACTL |
8521 | { |
8522 | SO_NOT_MAINLINE; |
8523 | NOTHROW; |
8524 | SENDEVENT_CONTRACT_ITEMS; |
8525 | } |
8526 | CONTRACTL_END; |
8527 | |
8528 | // See DebuggerStepper::SendEvent for why we assert here. |
8529 | // This is technically an issue, but it's too benign to fix. |
8530 | _ASSERTE(!fIpChanged); |
8531 | |
8532 | LOG((LF_CORDB, LL_INFO10000, |
8533 | "DUB::SE: in DebuggerUserBreakpoint's SendEvent\n" )); |
8534 | |
8535 | // Send the user breakpoint event. |
8536 | g_pDebugger->SendRawUserBreakpoint(thread); |
8537 | |
8538 | // We delete this now because its no longer needed. We can call |
8539 | // delete here because the queued count is above 0. This object |
8540 | // will really be deleted when its dequeued shortly after this |
8541 | // call returns. |
8542 | Delete(); |
8543 | |
8544 | return true; |
8545 | } |
8546 | |
8547 | // * ------------------------------------------------------------------------ |
8548 | // * DebuggerFuncEvalComplete routines |
8549 | // * ------------------------------------------------------------------------ |
8550 | |
8551 | DebuggerFuncEvalComplete::DebuggerFuncEvalComplete(Thread *thread, |
8552 | void *dest) |
8553 | : DebuggerController(thread, NULL) |
8554 | { |
8555 | #ifdef _TARGET_ARM_ |
8556 | m_pDE = reinterpret_cast<DebuggerEvalBreakpointInfoSegment*>(((DWORD)dest) & ~THUMB_CODE)->m_associatedDebuggerEval; |
8557 | #else |
8558 | m_pDE = reinterpret_cast<DebuggerEvalBreakpointInfoSegment*>(dest)->m_associatedDebuggerEval; |
8559 | #endif |
8560 | |
8561 | // Add an unmanaged patch at the destination. |
8562 | AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE*)dest, LEAF_MOST_FRAME, FALSE, TRACE_UNMANAGED); |
8563 | } |
8564 | |
8565 | TP_RESULT DebuggerFuncEvalComplete::TriggerPatch(DebuggerControllerPatch *patch, |
8566 | Thread *thread, |
8567 | TRIGGER_WHY tyWhy) |
8568 | { |
8569 | |
8570 | // It had better be an unmanaged patch... |
8571 | _ASSERTE((patch->key.module == NULL) && !patch->IsManagedPatch()); |
8572 | |
8573 | // set ThreadFilterContext back here because we need make stack crawlable! In case, |
8574 | // GC got triggered. |
8575 | |
8576 | // Restore the thread's context to what it was before we hijacked it for this func eval. |
8577 | CONTEXT *pCtx = GetManagedLiveCtx(thread); |
8578 | #ifdef FEATURE_DATABREAKPOINT |
8579 | #ifdef FEATURE_PAL |
8580 | #error Not supported |
8581 | #endif // FEATURE_PAL |
8582 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
8583 | // If a data breakpoint is set while we hit a breakpoint inside a FuncEval, this will make sure the data breakpoint stays |
8584 | m_pDE->m_context.Dr0 = pCtx->Dr0; |
8585 | m_pDE->m_context.Dr1 = pCtx->Dr1; |
8586 | m_pDE->m_context.Dr2 = pCtx->Dr2; |
8587 | m_pDE->m_context.Dr3 = pCtx->Dr3; |
8588 | m_pDE->m_context.Dr6 = pCtx->Dr6; |
8589 | m_pDE->m_context.Dr7 = pCtx->Dr7; |
8590 | #else |
8591 | #error Not supported |
8592 | #endif |
8593 | #endif |
8594 | CORDbgCopyThreadContext(reinterpret_cast<DT_CONTEXT *>(pCtx), |
8595 | reinterpret_cast<DT_CONTEXT *>(&(m_pDE->m_context))); |
8596 | |
8597 | // We've hit our patch, so simply disable all (which removes the |
8598 | // patch) and trigger the event. |
8599 | DisableAll(); |
8600 | return TPR_TRIGGER; |
8601 | } |
8602 | |
8603 | bool DebuggerFuncEvalComplete::SendEvent(Thread *thread, bool fIpChanged) |
8604 | { |
8605 | CONTRACTL |
8606 | { |
8607 | SO_NOT_MAINLINE; |
8608 | THROWS; |
8609 | SENDEVENT_CONTRACT_ITEMS; |
8610 | } |
8611 | CONTRACTL_END; |
8612 | |
8613 | |
8614 | // This should not ever be interupted by a SetIp. |
8615 | // The BP will be off in random native code for which SetIp would be illegal. |
8616 | // However, func-eval conroller will restore the context from when we're at the patch, |
8617 | // so that will look like the IP changed on us. |
8618 | _ASSERTE(fIpChanged); |
8619 | |
8620 | LOG((LF_CORDB, LL_INFO10000, "DFEC::SE: in DebuggerFuncEval's SendEvent\n" )); |
8621 | |
8622 | _ASSERTE(!ISREDIRECTEDTHREAD(thread)); |
8623 | |
8624 | // The DebuggerEval is at our faulting address. |
8625 | DebuggerEval *pDE = m_pDE; |
8626 | |
8627 | // Send the func eval complete (or exception) event. |
8628 | g_pDebugger->FuncEvalComplete(thread, pDE); |
8629 | |
8630 | // We delete this now because its no longer needed. We can call |
8631 | // delete here because the queued count is above 0. This object |
8632 | // will really be deleted when its dequeued shortly after this |
8633 | // call returns. |
8634 | Delete(); |
8635 | |
8636 | return true; |
8637 | } |
8638 | |
8639 | #ifdef EnC_SUPPORTED |
8640 | |
8641 | // * ------------------------------------------------------------------------ * |
8642 | // * DebuggerEnCBreakpoint routines |
8643 | // * ------------------------------------------------------------------------ * |
8644 | |
8645 | //--------------------------------------------------------------------------------------- |
8646 | // |
8647 | // DebuggerEnCBreakpoint constructor - creates and activates a new EnC breakpoint |
8648 | // |
8649 | // Arguments: |
8650 | // offset - native offset in the function to place the patch |
8651 | // jitInfo - identifies the function in which the breakpoint is being placed |
8652 | // fTriggerType - breakpoint type: either REMAP_PENDING or REMAP_COMPLETE |
8653 | // pAppDomain - the breakpoint applies to the specified AppDomain only |
8654 | // |
8655 | |
8656 | DebuggerEnCBreakpoint::DebuggerEnCBreakpoint(SIZE_T offset, |
8657 | DebuggerJitInfo *jitInfo, |
8658 | DebuggerEnCBreakpoint::TriggerType fTriggerType, |
8659 | AppDomain *pAppDomain) |
8660 | : DebuggerController(NULL, pAppDomain), |
8661 | m_fTriggerType(fTriggerType), |
8662 | m_jitInfo(jitInfo) |
8663 | { |
8664 | _ASSERTE( jitInfo != NULL ); |
8665 | // Add and activate the specified patch |
8666 | AddBindAndActivateNativeManagedPatch(jitInfo->m_fd, jitInfo, offset, LEAF_MOST_FRAME, pAppDomain); |
8667 | LOG((LF_ENC,LL_INFO1000, "DEnCBPDEnCBP::adding %S patch!\n" , |
8668 | fTriggerType == REMAP_PENDING ? W("remap pending" ) : W("remap complete" ))); |
8669 | } |
8670 | |
8671 | |
8672 | //--------------------------------------------------------------------------------------- |
8673 | // |
8674 | // DebuggerEnCBreakpoint::TriggerPatch |
8675 | // called by the debugging infrastructure when the patch is hit. |
8676 | // |
8677 | // Arguments: |
8678 | // patch - specifies the patch that was hit |
8679 | // thread - identifies the thread on which the patch was hit |
8680 | // tyWhy - TY_SHORT_CIRCUIT for normal REMAP_PENDING EnC patches |
8681 | // |
8682 | // Return value: |
8683 | // TPR_IGNORE if the debugger chooses not to take a remap opportunity |
8684 | // TPR_IGNORE_AND_STOP when a remap-complete event is sent |
8685 | // Doesn't return at all if the debugger remaps execution to the new version of the method |
8686 | // |
8687 | TP_RESULT DebuggerEnCBreakpoint::TriggerPatch(DebuggerControllerPatch *patch, |
8688 | Thread *thread, |
8689 | TRIGGER_WHY tyWhy) |
8690 | { |
8691 | _ASSERTE(HasLock()); |
8692 | |
8693 | Module *module = patch->key.module; |
8694 | mdMethodDef md = patch->key.md; |
8695 | SIZE_T offset = patch->offset; |
8696 | |
8697 | // Map the current native offset back to the IL offset in the old |
8698 | // function. This will be mapped to the new native offset within |
8699 | // ResumeInUpdatedFunction |
8700 | CorDebugMappingResult map; |
8701 | DWORD which; |
8702 | SIZE_T currentIP = (SIZE_T)m_jitInfo->MapNativeOffsetToIL(offset, |
8703 | &map, &which); |
8704 | |
8705 | // We only lay DebuggerEnCBreakpoints at sequence points |
8706 | _ASSERTE(map == MAPPING_EXACT); |
8707 | |
8708 | LOG((LF_ENC, LL_ALWAYS, |
8709 | "DEnCBP::TP: triggered E&C %S breakpoint: tid=0x%x, module=0x%08x, " |
8710 | "method def=0x%08x, version=%d, native offset=0x%x, IL offset=0x%x\n this=0x%x\n" , |
8711 | m_fTriggerType == REMAP_PENDING ? W("ResumePending" ) : W("ResumeComplete" ), |
8712 | thread, module, md, m_jitInfo->m_encVersion, offset, currentIP, this)); |
8713 | |
8714 | // If this is a REMAP_COMPLETE patch, then dispatch the RemapComplete callback |
8715 | if (m_fTriggerType == REMAP_COMPLETE) |
8716 | { |
8717 | return HandleRemapComplete(patch, thread, tyWhy); |
8718 | } |
8719 | |
8720 | // This must be a REMAP_PENDING patch |
8721 | // unless we got here on an explicit short-circuit, don't do any work |
8722 | if (tyWhy != TY_SHORT_CIRCUIT) |
8723 | { |
8724 | LOG((LF_ENC, LL_ALWAYS, "DEnCBP::TP: not short-circuit ... bailing\n" )); |
8725 | return TPR_IGNORE; |
8726 | } |
8727 | |
8728 | _ASSERTE(patch->IsManagedPatch()); |
8729 | |
8730 | // Grab the MethodDesc for this function. |
8731 | _ASSERTE(module != NULL); |
8732 | |
8733 | // GENERICS: @todo generics. This should be replaced by a similar loop |
8734 | // over the DJIs for the DMI as in BindPatch up above. |
8735 | MethodDesc *pFD = g_pEEInterface->FindLoadedMethodRefOrDef(module, md); |
8736 | |
8737 | _ASSERTE(pFD != NULL); |
8738 | |
8739 | LOG((LF_ENC, LL_ALWAYS, |
8740 | "DEnCBP::TP: in %s::%s\n" , pFD->m_pszDebugClassName,pFD->m_pszDebugMethodName)); |
8741 | |
8742 | // Grab the jit info for the original copy of the method, which is |
8743 | // what we are executing right now. |
8744 | DebuggerJitInfo *pJitInfo = m_jitInfo; |
8745 | _ASSERTE(pJitInfo); |
8746 | _ASSERTE(pJitInfo->m_fd == pFD); |
8747 | |
8748 | // Grab the context for this thread. This is the context that was |
8749 | // passed to COMPlusFrameHandler. |
8750 | CONTEXT *pContext = GetManagedLiveCtx(thread); |
8751 | |
8752 | // We use the module the current function is in. |
8753 | _ASSERTE(module->IsEditAndContinueEnabled()); |
8754 | EditAndContinueModule *pModule = (EditAndContinueModule*)module; |
8755 | |
8756 | // Release the controller lock for the rest of this method |
8757 | CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection); |
8758 | |
8759 | // resumeIP is the native offset in the new version of the method the debugger wants |
8760 | // to resume to. We'll pass the address of this variable over to the right-side |
8761 | // and if it modifies the contents while we're stopped dispatching the RemapOpportunity, |
8762 | // then we know it wants a remap. |
8763 | // This form of side-channel communication seems like an error-prone workaround. Ideally the |
8764 | // remap IP (if any) would just be returned in a response event. |
8765 | SIZE_T resumeIP = (SIZE_T) -1; |
8766 | |
8767 | // Debugging code to enable a break after N RemapOpportunities |
8768 | #ifdef _DEBUG |
8769 | static int breakOnRemapOpportunity = -1; |
8770 | if (breakOnRemapOpportunity == -1) |
8771 | breakOnRemapOpportunity = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapOpportunity); |
8772 | |
8773 | static int remapOpportunityCount = 0; |
8774 | |
8775 | ++remapOpportunityCount; |
8776 | if (breakOnRemapOpportunity == 1 || breakOnRemapOpportunity == remapOpportunityCount) |
8777 | { |
8778 | _ASSERTE(!"BreakOnRemapOpportunity" ); |
8779 | } |
8780 | #endif |
8781 | |
8782 | // Send an event to the RS to call the RemapOpportunity callback, passing the address of resumeIP. |
8783 | // If the debugger responds with a call to RemapFunction, the supplied IP will be copied into resumeIP |
8784 | // and we will know to update the context and resume the function at the new IP. Otherwise we just do |
8785 | // nothing and try again on next RemapFunction breakpoint |
8786 | g_pDebugger->LockAndSendEnCRemapEvent(pJitInfo, currentIP, &resumeIP); |
8787 | |
8788 | LOG((LF_ENC, LL_ALWAYS, |
8789 | "DEnCBP::TP: resume IL offset is 0x%x\n" , resumeIP)); |
8790 | |
8791 | // Has the debugger requested a remap? |
8792 | if (resumeIP != (SIZE_T) -1) |
8793 | { |
8794 | // This will jit the function, update the context, and resume execution at the new location. |
8795 | g_pEEInterface->ResumeInUpdatedFunction(pModule, |
8796 | pFD, |
8797 | (void*)pJitInfo, |
8798 | resumeIP, |
8799 | pContext); |
8800 | _ASSERTE(!"Returned from ResumeInUpdatedFunction!" ); |
8801 | } |
8802 | |
8803 | LOG((LF_CORDB, LL_ALWAYS, "DEnCB::TP: We've returned from ResumeInUpd" |
8804 | "atedFunction, we're going to skip the EnC patch ####\n" )); |
8805 | |
8806 | // We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches |
8807 | // in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have |
8808 | // changed underneath us. |
8809 | // inverseLock holder will reaquire lock. |
8810 | |
8811 | return TPR_IGNORE; |
8812 | } |
8813 | |
8814 | // |
8815 | // HandleResumeComplete is called for an EnC patch in the newly updated function |
8816 | // so that we can notify the debugger that the remap has completed and they can |
8817 | // now remap their steppers or anything else that depends on the new code actually |
8818 | // being on the stack. We return TPR_IGNORE_AND_STOP because it's possible that the |
8819 | // function was edited after we handled remap complete and want to make sure we |
8820 | // start a fresh call to TriggerPatch |
8821 | // |
8822 | TP_RESULT DebuggerEnCBreakpoint::HandleRemapComplete(DebuggerControllerPatch *patch, |
8823 | Thread *thread, |
8824 | TRIGGER_WHY tyWhy) |
8825 | { |
8826 | LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: HandleRemapComplete\n" )); |
8827 | |
8828 | // Debugging code to enable a break after N RemapCompletes |
8829 | #ifdef _DEBUG |
8830 | static int breakOnRemapComplete = -1; |
8831 | if (breakOnRemapComplete == -1) |
8832 | breakOnRemapComplete = CLRConfig::GetConfigValue(CLRConfig::INTERNAL_EnCBreakOnRemapComplete); |
8833 | |
8834 | static int remapCompleteCount = 0; |
8835 | ++remapCompleteCount; |
8836 | if (breakOnRemapComplete == 1 || breakOnRemapComplete == remapCompleteCount) |
8837 | { |
8838 | _ASSERTE(!"BreakOnRemapComplete" ); |
8839 | } |
8840 | #endif |
8841 | _ASSERTE(HasLock()); |
8842 | |
8843 | |
8844 | bool fApplied = m_jitInfo->m_encBreakpointsApplied; |
8845 | // Need to delete this before unlock below so if any other thread come in after the unlock |
8846 | // they won't handle this patch. |
8847 | Delete(); |
8848 | |
8849 | // We just deleted ourselves. Can't access anything any instances after this point. |
8850 | |
8851 | // if have somehow updated this function before we resume into it then just bail |
8852 | if (fApplied) |
8853 | { |
8854 | LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: function already updated, ignoring\n" )); |
8855 | return TPR_IGNORE_AND_STOP; |
8856 | } |
8857 | |
8858 | // GENERICS: @todo generics. This should be replaced by a similar loop |
8859 | // over the DJIs for the DMI as in BindPatch up above. |
8860 | MethodDesc *pFD = g_pEEInterface->FindLoadedMethodRefOrDef(patch->key.module, patch->key.md); |
8861 | |
8862 | LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: unlocking controller\n" )); |
8863 | |
8864 | // Unlock the controller lock and dispatch the remap complete event |
8865 | CrstBase::UnsafeCrstInverseHolder inverseLock(&g_criticalSection); |
8866 | |
8867 | LOG((LF_ENC, LL_ALWAYS, "DEnCBP::HRC: sending RemapCompleteEvent\n" )); |
8868 | |
8869 | g_pDebugger->LockAndSendEnCRemapCompleteEvent(pFD); |
8870 | |
8871 | // We're returning then we'll have to re-get this lock. Be careful that we haven't kept any controller/patches |
8872 | // in the caller. They can move when we unlock, so when we release the lock and reget it here, things might have |
8873 | // changed underneath us. |
8874 | // inverseLock holder will reacquire. |
8875 | |
8876 | return TPR_IGNORE_AND_STOP; |
8877 | } |
8878 | #endif //EnC_SUPPORTED |
8879 | |
8880 | // continuable-exceptions |
8881 | // * ------------------------------------------------------------------------ * |
8882 | // * DebuggerContinuableExceptionBreakpoint routines |
8883 | // * ------------------------------------------------------------------------ * |
8884 | |
8885 | |
8886 | //--------------------------------------------------------------------------------------- |
8887 | // |
8888 | // constructor |
8889 | // |
8890 | // Arguments: |
8891 | // pThread - the thread on which we are intercepting an exception |
8892 | // nativeOffset - This is the target native offset. It is where we are going to resume execution. |
8893 | // jitInfo - the DebuggerJitInfo of the method at which we are intercepting |
8894 | // pAppDomain - the AppDomain in which the thread is executing |
8895 | // |
8896 | |
8897 | DebuggerContinuableExceptionBreakpoint::DebuggerContinuableExceptionBreakpoint(Thread *pThread, |
8898 | SIZE_T nativeOffset, |
8899 | DebuggerJitInfo *jitInfo, |
8900 | AppDomain *pAppDomain) |
8901 | : DebuggerController(pThread, pAppDomain) |
8902 | { |
8903 | _ASSERTE( jitInfo != NULL ); |
8904 | // Add a native patch at the specified native offset, which is where we are going to resume execution. |
8905 | AddBindAndActivateNativeManagedPatch(jitInfo->m_fd, jitInfo, nativeOffset, LEAF_MOST_FRAME, pAppDomain); |
8906 | } |
8907 | |
8908 | //--------------------------------------------------------------------------------------- |
8909 | // |
8910 | // This function is called when the patch added in the constructor is hit. At this point, |
8911 | // we have already resumed execution, and the exception is no longer in flight. |
8912 | // |
8913 | // Arguments: |
8914 | // patch - the patch added in the constructor; unused |
8915 | // thread - the thread in question; unused |
8916 | // tyWhy - a flag which is only useful for EnC; unused |
8917 | // |
8918 | // Return Value: |
8919 | // This function always returns TPR_TRIGGER, meaning that it wants to send an event to notify the RS. |
8920 | // |
8921 | |
8922 | TP_RESULT DebuggerContinuableExceptionBreakpoint::TriggerPatch(DebuggerControllerPatch *patch, |
8923 | Thread *thread, |
8924 | TRIGGER_WHY tyWhy) |
8925 | { |
8926 | LOG((LF_CORDB, LL_INFO10000, "DCEBP::TP\n" )); |
8927 | |
8928 | // |
8929 | // Disable the patch |
8930 | // |
8931 | DisableAll(); |
8932 | |
8933 | // We will send a notification to the RS when the patch is triggered. |
8934 | return TPR_TRIGGER; |
8935 | } |
8936 | |
8937 | //--------------------------------------------------------------------------------------- |
8938 | // |
8939 | // This function is called when we want to notify the RS that an interception is complete. |
8940 | // At this point, we have already resumed execution, and the exception is no longer in flight. |
8941 | // |
8942 | // Arguments: |
8943 | // thread - the thread in question |
8944 | // fIpChanged - whether the IP has changed by SetIP after the patch is hit but |
8945 | // before this function is called |
8946 | // |
8947 | |
8948 | bool DebuggerContinuableExceptionBreakpoint::SendEvent(Thread *thread, bool fIpChanged) |
8949 | { |
8950 | CONTRACTL |
8951 | { |
8952 | SO_NOT_MAINLINE; |
8953 | NOTHROW; |
8954 | SENDEVENT_CONTRACT_ITEMS; |
8955 | } |
8956 | CONTRACTL_END; |
8957 | |
8958 | |
8959 | |
8960 | LOG((LF_CORDB, LL_INFO10000, |
8961 | "DCEBP::SE: in DebuggerContinuableExceptionBreakpoint's SendEvent\n" )); |
8962 | |
8963 | if (!fIpChanged) |
8964 | { |
8965 | g_pDebugger->SendInterceptExceptionComplete(thread); |
8966 | } |
8967 | |
8968 | // On WIN64, by the time we get here the DebuggerExState is gone already. |
8969 | // ExceptionTrackers are cleaned up before we resume execution for a handled exception. |
8970 | #if !defined(WIN64EXCEPTIONS) |
8971 | thread->GetExceptionState()->GetDebuggerState()->SetDebuggerInterceptContext(NULL); |
8972 | #endif // !WIN64EXCEPTIONS |
8973 | |
8974 | |
8975 | // |
8976 | // We delete this now because its no longer needed. We can call |
8977 | // delete here because the queued count is above 0. This object |
8978 | // will really be deleted when its dequeued shortly after this |
8979 | // call returns. |
8980 | // |
8981 | Delete(); |
8982 | |
8983 | return true; |
8984 | } |
8985 | |
8986 | #ifdef FEATURE_DATABREAKPOINT |
8987 | |
8988 | /* static */ bool DebuggerDataBreakpoint::TriggerDataBreakpoint(Thread *thread, CONTEXT * pContext) |
8989 | { |
8990 | LOG((LF_CORDB, LL_INFO10000, "D::DDBP: Doing TriggerDataBreakpoint...\n" )); |
8991 | |
8992 | bool hitDataBp = false; |
8993 | bool result = false; |
8994 | #ifdef FEATURE_PAL |
8995 | #error Not supported |
8996 | #endif // FEATURE_PAL |
8997 | #if defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
8998 | PDR6 pdr6 = (PDR6)&(pContext->Dr6); |
8999 | |
9000 | if (pdr6->B0 || pdr6->B1 || pdr6->B2 || pdr6->B3) |
9001 | { |
9002 | hitDataBp = true; |
9003 | } |
9004 | #else // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
9005 | #error Not supported |
9006 | #endif // defined(_TARGET_X86_) || defined(_TARGET_AMD64_) |
9007 | if (hitDataBp) |
9008 | { |
9009 | if (g_pDebugger->IsThreadAtSafePlace(thread)) |
9010 | { |
9011 | LOG((LF_CORDB, LL_INFO10000, "D::DDBP: HIT DATA BREAKPOINT...\n" )); |
9012 | result = true; |
9013 | } |
9014 | else |
9015 | { |
9016 | CONTEXT contextToAdjust; |
9017 | BOOL adjustedContext = FALSE; |
9018 | memcpy(&contextToAdjust, pContext, sizeof(CONTEXT)); |
9019 | adjustedContext = g_pEEInterface->AdjustContextForWriteBarrierForDebugger(&contextToAdjust); |
9020 | if (adjustedContext) |
9021 | { |
9022 | LOG((LF_CORDB, LL_INFO10000, "D::DDBP: HIT DATA BREAKPOINT INSIDE WRITE BARRIER...\n" )); |
9023 | DebuggerDataBreakpoint *pDataBreakpoint = new (interopsafe) DebuggerDataBreakpoint(thread); |
9024 | pDataBreakpoint->AddAndActivateNativePatchForAddress((CORDB_ADDRESS_TYPE*)GetIP(&contextToAdjust), FramePointer::MakeFramePointer(GetFP(&contextToAdjust)), true, DPT_DEFAULT_TRACE_TYPE); |
9025 | } |
9026 | else |
9027 | { |
9028 | LOG((LF_CORDB, LL_INFO10000, "D::DDBP: HIT DATA BREAKPOINT BUT STILL NEED TO ROLL ...\n" )); |
9029 | DebuggerDataBreakpoint *pDataBreakpoint = new (interopsafe) DebuggerDataBreakpoint(thread); |
9030 | pDataBreakpoint->EnableSingleStep(); |
9031 | } |
9032 | result = false; |
9033 | } |
9034 | } |
9035 | else |
9036 | { |
9037 | LOG((LF_CORDB, LL_INFO10000, "D::DDBP: DIDN'T TRIGGER DATA BREAKPOINT...\n" )); |
9038 | result = false; |
9039 | } |
9040 | return result; |
9041 | } |
9042 | |
9043 | TP_RESULT DebuggerDataBreakpoint::TriggerPatch(DebuggerControllerPatch *patch, Thread *thread, TRIGGER_WHY tyWhy) |
9044 | { |
9045 | if (g_pDebugger->IsThreadAtSafePlace(thread)) |
9046 | { |
9047 | return TPR_TRIGGER; |
9048 | } |
9049 | else |
9050 | { |
9051 | LOG((LF_CORDB, LL_INFO10000, "D::DDBP: REACH RETURN OF JIT HELPER BUT STILL NEED TO ROLL ...\n" )); |
9052 | this->EnableSingleStep(); |
9053 | return TPR_IGNORE; |
9054 | } |
9055 | } |
9056 | |
9057 | bool DebuggerDataBreakpoint::TriggerSingleStep(Thread *thread, const BYTE *ip) |
9058 | { |
9059 | if (g_pDebugger->IsThreadAtSafePlace(thread)) |
9060 | { |
9061 | LOG((LF_CORDB, LL_INFO10000, "D:DDBP: Finally safe for stopping, stop stepping\n" )); |
9062 | this->DisableSingleStep(); |
9063 | return true; |
9064 | } |
9065 | else |
9066 | { |
9067 | LOG((LF_CORDB, LL_INFO10000, "D:DDBP: Still not safe for stopping, continue stepping\n" )); |
9068 | return false; |
9069 | } |
9070 | } |
9071 | |
9072 | #endif // FEATURE_DATABREAKPOINT |
9073 | |
9074 | #endif // !DACCESS_COMPILE |
9075 | |