1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//*****************************************************************************
5// File: controller.h
6//
7
8//
9// Debugger control flow object
10//
11//*****************************************************************************
12
13#ifndef CONTROLLER_H_
14#define CONTROLLER_H_
15
16/* ========================================================================= */
17
18#if !defined(DACCESS_COMPILE)
19
20#include "frameinfo.h"
21
22/* ------------------------------------------------------------------------- *
23 * Forward declarations
24 * ------------------------------------------------------------------------- */
25
26class DebuggerPatchSkip;
27class DebuggerThreadStarter;
28class DebuggerController;
29class DebuggerControllerQueue;
30struct DebuggerControllerPatch;
31class DebuggerUserBreakpoint;
32class ControllerStackInfo;
33
34typedef struct _DR6 *PDR6;
35typedef struct _DR6 {
36 DWORD B0 : 1;
37 DWORD B1 : 1;
38 DWORD B2 : 1;
39 DWORD B3 : 1;
40 DWORD Pad1 : 9;
41 DWORD BD : 1;
42 DWORD BS : 1;
43 DWORD BT : 1;
44} DR6;
45
46typedef struct _DR7 *PDR7;
47typedef struct _DR7 {
48 DWORD L0 : 1;
49 DWORD G0 : 1;
50 DWORD L1 : 1;
51 DWORD G1 : 1;
52 DWORD L2 : 1;
53 DWORD G2 : 1;
54 DWORD L3 : 1;
55 DWORD G3 : 1;
56 DWORD LE : 1;
57 DWORD GE : 1;
58 DWORD Pad1 : 3;
59 DWORD GD : 1;
60 DWORD Pad2 : 1;
61 DWORD Pad3 : 1;
62 DWORD Rwe0 : 2;
63 DWORD Len0 : 2;
64 DWORD Rwe1 : 2;
65 DWORD Len1 : 2;
66 DWORD Rwe2 : 2;
67 DWORD Len2 : 2;
68 DWORD Rwe3 : 2;
69 DWORD Len3 : 2;
70} DR7;
71
72
73// Ticket for ensuring that it's safe to get a stack trace.
74class StackTraceTicket
75{
76public:
77 // Each ctor is a rule for why it's safety to run a stacktrace.
78
79 // Safe if we're at certain types of patches.
80 StackTraceTicket(DebuggerControllerPatch * patch);
81
82 // Safe if there was already another stack trace at this spot. (Grandfather clause)
83 StackTraceTicket(ControllerStackInfo * info);
84
85 // Safe it we're at a Synchronized point point.
86 StackTraceTicket(Thread * pThread);
87
88 // Safe b/c the context shows we're in native managed code
89 StackTraceTicket(const BYTE * ip);
90
91 // DebuggerUserBreakpoint has a special case of safety.
92 StackTraceTicket(DebuggerUserBreakpoint * p);
93
94 // This is like a contract violation.
95 // Unsafe tickets. Use as:
96 // StackTraceTicket ticket(StackTraceTicket::UNSAFE_TICKET);
97 enum EUNSAFE {
98 // Ticket is unsafe. Potential issue.
99 UNSAFE_TICKET = 0,
100
101 // For some wacky reason, it's safe to take a stacktrace here, but
102 // there's not an easily verifiable rule. Use this ticket very sparingly
103 // because it's much more difficult to verify.
104 SPECIAL_CASE_TICKET = 1
105 };
106 StackTraceTicket(EUNSAFE e) { };
107
108private:
109 // Tickets can't be copied around. Hide these definitions so to enforce that.
110 // We still need the Copy ctor so that it can be passed in as a parameter.
111 void operator=(StackTraceTicket & other);
112};
113
114/* ------------------------------------------------------------------------- *
115 * ControllerStackInfo utility
116 * ------------------------------------------------------------------------- *
117 * class ControllerStackInfo is a class designed
118 * to simply obtain a two-frame stack trace: it will obtain the bottommost
119 * framepointer (m_bottomFP), a given target frame (m_activeFrame), and the
120 * frame above the target frame (m_returnFrame). Note that the target frame
121 * may be the bottommost, 'active' frame, or it may be a frame higher up in
122 * the stack. ControllerStackInfo accomplishes this by starting at the
123 * bottommost frame and walking upwards until it reaches the target frame,
124 * whereupon it records the m_activeFrame info, gets called once more to
125 * fill in the m_returnFrame info, and thereafter stops the stack walk.
126 *
127 * public:
128 * void * m_bottomFP: Frame pointer for the
129 * bottommost (most active)
130 * frame. We can add more later, if we need it. Currently just used in
131 * TrapStep. NULL indicates an uninitialized value.
132 *
133 * void * m_targetFP: The frame pointer to the frame
134 * that we actually want the info of.
135 *
136 * bool m_targetFrameFound: Set to true if
137 * WalkStack finds the frame indicated by targetFP handed to GetStackInfo
138 * false otherwise.
139 *
140 * FrameInfo m_activeFrame: A FrameInfo
141 * describing the target frame. This should always be valid after a
142 * call to GetStackInfo.
143 *
144 * FrameInfo m_returnFrame: A FrameInfo
145 * describing the frame above the target frame, if target's
146 * return frame were found (call HasReturnFrame() to see if this is
147 * valid). Otherwise, this will be the same as m_activeFrame, above
148 *
149 * private:
150 * bool m_activeFound: Set to true if we found the target frame.
151 * bool m_returnFound: Set to true if we found the target's return frame.
152 */
153class ControllerStackInfo
154{
155public:
156 friend class StackTraceTicket;
157
158 ControllerStackInfo()
159 {
160 INDEBUG(m_dbgExecuted = false);
161 }
162
163 FramePointer m_bottomFP;
164 FramePointer m_targetFP;
165 bool m_targetFrameFound;
166
167 FrameInfo m_activeFrame;
168 FrameInfo m_returnFrame;
169
170 CorDebugChainReason m_specialChainReason;
171
172 // static StackWalkAction ControllerStackInfo::WalkStack() The
173 // callback that will be invoked by the DebuggerWalkStackProc.
174 // Note that the data argument is the "this" pointer to the
175 // ControllerStackInfo.
176 static StackWalkAction WalkStack(FrameInfo *pInfo, void *data);
177
178
179 //void ControllerStackInfo::GetStackInfo(): GetStackInfo
180 // is invoked by the user to trigger the stack walk. This will
181 // cause the stack walk detailed in the class description to happen.
182 // Thread* thread: The thread to do the stack walk on.
183 // void* targetFP: Can be either NULL (meaning that the bottommost
184 // frame is the target), or an frame pointer, meaning that the
185 // caller wants information about a specific frame.
186 // CONTEXT* pContext: A pointer to a CONTEXT structure. Can be null,
187 // we use our temp context.
188 // bool suppressUMChainFromComPlusMethodFrameGeneric - A ridiculous flag that is trying to narrowly
189 // target a fix for issue 650903.
190 // StackTraceTicket - ticket ensuring that we have permission to call this.
191 void GetStackInfo(
192 StackTraceTicket ticket,
193 Thread *thread,
194 FramePointer targetFP,
195 CONTEXT *pContext,
196 bool suppressUMChainFromComPlusMethodFrameGeneric = false
197 );
198
199 //bool ControllerStackInfo::HasReturnFrame() Returns
200 // true if m_returnFrame is valid. Returns false
201 // if m_returnFrame is set to m_activeFrame
202 bool HasReturnFrame() {LIMITED_METHOD_CONTRACT; return m_returnFound; }
203
204 // This function "undoes" an unwind, i.e. it takes the active frame (the current frame)
205 // and sets it to be the return frame (the caller frame). Currently it is only used by
206 // the stepper to step out of an LCG method. See DebuggerStepper::DetectHandleLCGMethods()
207 // for more information.
208 void SetReturnFrameWithActiveFrame();
209
210private:
211 // If we don't have a valid context, then use this temp cache.
212 CONTEXT m_tempContext;
213
214 bool m_activeFound;
215 bool m_returnFound;
216
217 // A ridiculous flag that is targetting a very narrow fix at issue 650903
218 // (4.5.1/Blue). This is set for the duration of a stackwalk designed to
219 // help us "Step Out" to a managed frame (i.e., managed-only debugging).
220 bool m_suppressUMChainFromComPlusMethodFrameGeneric;
221
222 // Track if this stackwalk actually happened.
223 // This is used by the StackTraceTicket(ControllerStackInfo * info) ticket.
224 INDEBUG(bool m_dbgExecuted);
225};
226
227#endif // !DACCESS_COMPILE
228
229
230/* ------------------------------------------------------------------------- *
231 * DebuggerController routines
232 * ------------------------------------------------------------------------- */
233
234// simple ref-counted buffer that's shared among DebuggerPatchSkippers for a
235// given DebuggerControllerPatch. upon creation the refcount will be 1. when
236// the last skipper and controller are cleaned up the buffer will be released.
237// note that there isn't a clear owner of this buffer since a controller can be
238// cleaned up while the final skipper is still in flight.
239class SharedPatchBypassBuffer
240{
241public:
242 SharedPatchBypassBuffer() : m_refCount(1)
243 {
244#ifdef _DEBUG
245 DWORD cbToProtect = MAX_INSTRUCTION_LENGTH;
246 _ASSERTE(DbgIsExecutable((BYTE*)PatchBypass, cbToProtect));
247#endif // _DEBUG
248
249 // sentinel value indicating uninitialized data
250 *(reinterpret_cast<DWORD*>(PatchBypass)) = SentinelValue;
251#ifdef _TARGET_AMD64_
252 *(reinterpret_cast<DWORD*>(BypassBuffer)) = SentinelValue;
253 RipTargetFixup = 0;
254 RipTargetFixupSize = 0;
255#elif _TARGET_ARM64_
256 RipTargetFixup = 0;
257
258#endif
259 }
260
261 ~SharedPatchBypassBuffer()
262 {
263 // trap deletes that don't go through Release()
264 _ASSERTE(m_refCount == 0);
265 }
266
267 LONG AddRef()
268 {
269 LONG newRefCount = InterlockedIncrement(&m_refCount);
270 _ASSERTE(newRefCount > 0);
271 return newRefCount;
272 }
273
274 LONG Release()
275 {
276 LONG newRefCount = InterlockedDecrement(&m_refCount);
277 _ASSERTE(newRefCount >= 0);
278
279 if (newRefCount == 0)
280 {
281 TRACE_FREE(this);
282 DeleteInteropSafeExecutable(this);
283 }
284
285 return newRefCount;
286 }
287
288 // "PatchBypass" must be the first field of this class for alignment to be correct.
289 BYTE PatchBypass[MAX_INSTRUCTION_LENGTH];
290#if defined(_TARGET_AMD64_)
291 const static int cbBufferBypass = 0x10;
292 BYTE BypassBuffer[cbBufferBypass];
293
294 UINT_PTR RipTargetFixup;
295 BYTE RipTargetFixupSize;
296#elif defined(_TARGET_ARM64_)
297 UINT_PTR RipTargetFixup;
298#endif
299
300private:
301 const static DWORD SentinelValue = 0xffffffff;
302 LONG m_refCount;
303};
304
305// struct DebuggerFunctionKey: Provides a means of hashing unactivated
306// breakpoints, it's used mainly for the case where the function to put
307// the breakpoint in hasn't been JITted yet.
308// Module* module: Module that the method belongs to.
309// mdMethodDef md: meta data token for the method.
310struct DebuggerFunctionKey1
311{
312 PTR_Module module;
313 mdMethodDef md;
314};
315
316typedef DebuggerFunctionKey1 UNALIGNED DebuggerFunctionKey;
317
318// IL Master: Breakpoints on IL code may need to be applied to multiple
319// copies of code. Historically generics was the only way IL code was JITTed
320// multiple times but more recently the CodeVersionManager and tiered compilation
321// provide more open-ended mechanisms to have multiple native code bodies derived
322// from a single IL method body.
323// The "master" is a patch we keep to record the IL offset or native offset, and
324// is used to create new "slave"patches. For native offsets only offset 0 is allowed
325// because that is the only one that we think would have a consistent semantic
326// meaning across different code bodies.
327// There can also be multiple IL bodies for the same method given EnC or ReJIT.
328// A given master breakpoint is tightly bound to one particular IL body determined
329// by encVersion. ReJIT + breakpoints isn't currently supported.
330//
331//
332// IL Slave: The slaves created from Master patches. If the master used an IL offset
333// then the slave also initially has an IL offset that will later become a native offset.
334// If the master uses a native offset (0) then the slave will also have a native offset (0).
335// These patches always resolve to addresses in jitted code.
336//
337//
338// NativeManaged: A patch we apply to managed code, usually for walkers etc. If this code
339// is jitted then these patches are always bound to one exact jitted code body.
340// If you need to be 100% sure I suggest you do more code review but I believe we also
341// use this for managed code from other code generators such as a stub or statically compiled
342// code that executes in cooperative mode.
343//
344//
345// NativeUnmanaged: A patch applied to any kind of native code.
346
347enum DebuggerPatchKind { PATCH_KIND_IL_MASTER, PATCH_KIND_IL_SLAVE, PATCH_KIND_NATIVE_MANAGED, PATCH_KIND_NATIVE_UNMANAGED };
348
349// struct DebuggerControllerPatch: An entry in the patch (hash) table,
350// this should contain all the info that's needed over the course of a
351// patch's lifetime.
352//
353// FREEHASHENTRY entry: Three ULONGs, this is required
354// by the underlying hashtable implementation
355// DWORD opcode: A nonzero opcode && address field means that
356// the patch has been applied to something.
357// A patch with a zero'd opcode field means that the patch isn't
358// actually tracking a valid break opcode. See DebuggerPatchTable
359// for more details.
360// DebuggerController *controller: The controller that put this
361// patch here.
362// BOOL fSaveOpcode: If true, then unapply patch will save
363// a copy of the opcode in opcodeSaved, and apply patch will
364// copy opcodeSaved to opcode rather than grabbing the opcode
365// from the instruction. This is useful mainly when the JIT
366// has moved code, and we don't want to erroneously pick up the
367// user break instruction.
368// Full story:
369// FJIT moves the code. Once that's done, it calls Debugger->MoveCode(MethodDesc
370// *) to let us know the code moved. At that point, unbind all the breakpoints
371// in the method. Then we whip over all the patches, and re-bind all the
372// patches in the method. However, we can't guarantee that the code will exist
373// in both the old & new locations exclusively of each other (the method could
374// be 0xFF bytes big, and get moved 0x10 bytes in one direction), so instead of
375// simply re-using the unbind/rebind logic as it is, we need a special case
376// wherein the old method isn't valid. Instead, we'll copy opcode into
377// opcodeSaved, and then zero out opcode (we need to zero out opcode since that
378// tells us that the patch is invalid, if the right side sees it). Thus the run-
379// around.
380// DebuggerPatchKind: see above
381// DWORD opcodeSaved: Contains an opcode if fSaveOpcode == true
382// SIZE_T nVersion: If the patch is stored by IL offset, then we
383// must also store the version ID so that we know which version
384// this is supposed to be applied to. Note that this will only
385// be set for DebuggerBreakpoints & DebuggerEnCBreakpoints. For
386// others, it should be set to DMI_VERSION_INVALID. For constants,
387// see DebuggerJitInfo
388// DebuggerJitInfo dji: A pointer to the debuggerJitInfo that describes
389// the method (and version) that this patch is applied to. This field may
390// also have the value DebuggerJitInfo::DMI_VERSION_INVALID
391
392// SIZE_T pid: Within a given patch table, all patches have a
393// semi-unique ID. There should be one and only 1 patch for a given
394// {pid,nVersion} tuple, thus ensuring that we don't duplicate
395// patches from multiple, previous versions.
396// AppDomain * pAppDomain: Either NULL (patch applies to all appdomains
397// that the debugger is attached to)
398// or contains a pointer to an AppDomain object (patch applies only to
399// that A.D.)
400
401// NOTE: due to unkind abuse of type system you cannot add ctor/dtor to this
402// type and expect them to be automatically invoked!
403struct DebuggerControllerPatch
404{
405 friend class DebuggerPatchTable;
406 friend class DebuggerController;
407
408 FREEHASHENTRY entry;
409 DebuggerController *controller;
410 DebuggerFunctionKey key;
411 SIZE_T offset;
412 PTR_CORDB_ADDRESS_TYPE address;
413 FramePointer fp;
414 PRD_TYPE opcode; //this name will probably change because it is a misnomer
415 BOOL fSaveOpcode;
416 PRD_TYPE opcodeSaved;//also a misnomer
417 BOOL offsetIsIL;
418 TraceDestination trace;
419 MethodDesc* pMethodDescFilter; // used for IL Master patches that should only bind to jitted
420 // code versions for a single generic instantiation
421private:
422 int refCount;
423 union
424 {
425 SIZE_T encVersion; // used for Master patches, to record which EnC version this Master applies to
426 DebuggerJitInfo *dji; // used for Slave and native patches, though only when tracking JIT Info
427 };
428
429#ifndef _TARGET_ARM_
430 // this is shared among all the skippers for this controller. see the comments
431 // right before the definition of SharedPatchBypassBuffer for lifetime info.
432 SharedPatchBypassBuffer* m_pSharedPatchBypassBuffer;
433#endif // _TARGET_ARM_
434
435public:
436 SIZE_T pid;
437 AppDomain *pAppDomain;
438
439 BOOL IsNativePatch();
440 BOOL IsManagedPatch();
441 BOOL IsILMasterPatch();
442 BOOL IsILSlavePatch();
443 DebuggerPatchKind GetKind();
444
445 // A patch has DJI if it was created with it or if it has been mapped to a
446 // function that has been jitted while JIT tracking was on. It does not
447 // necessarily mean the patch is bound. ILMaster patches never have DJIs.
448 // Patches will never have DJIs if we are not tracking JIT information.
449 //
450 // Patches can also be unbound, e.g. in UnbindFunctionPatches. Any DJI gets cleared
451 // when the patch is unbound. This appears to be used as an indicator
452 // to Debugger::MapAndBindFunctionPatches to make sure that
453 // we don't skip the patch when we get new code.
454 BOOL HasDJI()
455 {
456 return (!IsILMasterPatch() && dji != NULL);
457 }
458
459 DebuggerJitInfo *GetDJI()
460 {
461 _ASSERTE(!IsILMasterPatch());
462 return dji;
463 }
464
465 // These tell us which EnC version a patch relates to. They are used
466 // to determine if we are mapping a patch to a new version.
467 //
468 BOOL HasEnCVersion()
469 {
470 return (IsILMasterPatch() || HasDJI());
471 }
472
473 SIZE_T GetEnCVersion()
474 {
475 _ASSERTE(HasEnCVersion());
476 return (IsILMasterPatch() ? encVersion : (HasDJI() ? GetDJI()->m_encVersion : CorDB_DEFAULT_ENC_FUNCTION_VERSION));
477 }
478
479 // We set the DJI explicitly after mapping a patch
480 // to freshly jitted code or to a new version. The Unbind/Bind/MovedCode mess
481 // for the FJIT will also set the DJI to NULL as an indicator that Debugger::MapAndBindFunctionPatches
482 // should not skip the patch.
483 void SetDJI(DebuggerJitInfo *newDJI)
484 {
485 _ASSERTE(!IsILMasterPatch());
486 dji = newDJI;
487 }
488
489 // A patch is bound if we've mapped it to a real honest-to-goodness
490 // native address.
491 // Note that we currently activate all patches immediately after binding them, and
492 // delete all patches after unactivating them. This means that the window where
493 // a patch is bound but not active is very small (and should always be protected by
494 // a lock). We rely on this correlation in a few places, and ASSERT it explicitly there.
495 BOOL IsBound()
496 {
497 if( address == NULL ) {
498 // patch is unbound, cannot be active
499 _ASSERTE( PRDIsEmpty(opcode) );
500 return FALSE;
501 }
502
503 // IL Master patches are never bound.
504 _ASSERTE( !IsILMasterPatch() );
505
506 return TRUE;
507 }
508
509 // It would be nice if we never needed IsBreakpointPatch or IsStepperPatch,
510 // but a few bits of the existing code look at which controller type is involved.
511 BOOL IsBreakpointPatch();
512 BOOL IsStepperPatch();
513
514 bool IsActivated()
515 {
516 // Patch is activate if we've stored a non-zero opcode
517 // Note: this might be a problem as opcode 0 may be a valid opcode (see issue 366221).
518 if( PRDIsEmpty(opcode) ) {
519 return FALSE;
520 }
521
522 // Patch is active, so it must also be bound
523 _ASSERTE( address != NULL );
524 return TRUE;
525 }
526
527 bool IsFree() {return (refCount == 0);}
528 bool IsTriggering() {return (refCount > 1);}
529
530 // Is this patch at a position at which it's safe to take a stack?
531 bool IsSafeForStackTrace();
532
533#ifndef _TARGET_ARM_
534 // gets a pointer to the shared buffer
535 SharedPatchBypassBuffer* GetOrCreateSharedPatchBypassBuffer();
536
537 // entry point for general initialization when the controller is being created
538 void Initialize()
539 {
540 m_pSharedPatchBypassBuffer = NULL;
541 }
542
543 // entry point for general cleanup when the controller is being removed from the patch table
544 void DoCleanup()
545 {
546 if (m_pSharedPatchBypassBuffer != NULL)
547 m_pSharedPatchBypassBuffer->Release();
548 }
549#endif // _TARGET_ARM_
550
551private:
552 DebuggerPatchKind kind;
553};
554
555typedef DPTR(DebuggerControllerPatch) PTR_DebuggerControllerPatch;
556
557/* class DebuggerPatchTable: This is the table that contains
558 * information about the patches (breakpoints) maintained by the
559 * debugger for a variety of purposes.
560 * The only tricky part is that
561 * patches can be hashed either by the address that they're applied to,
562 * or by DebuggerFunctionKey. If address is equal to zero, then the
563 * patch is hashed by DebuggerFunctionKey.
564 *
565 * Patch table inspection scheme:
566 *
567 * We have to be able to inspect memory (read/write) from the right
568 * side w/o the help of the left side. When we do unmanaged debugging,
569 * we need to be able to R/W memory out of a debuggee s.t. the debugger
570 * won't see our patches. So we have to be able to read our patch table
571 * from the left side, which is problematic since we know that the left
572 * side will be arbitrarily frozen, but we don't know where.
573 *
574 * So our scheme is this:
575 * we'll send a pointer to the g_patches table over in startup,
576 * and when we want to inspect it at runtime, we'll freeze the left side,
577 * then read-memory the "data" (m_pcEntries) array over to the right. We'll
578 * iterate through the array & assume that anything with a non-zero opcode
579 * and address field is valid. To ensure that the assumption is ok, we
580 * use the zeroing allocator which zeros out newly created space, and
581 * we'll be very careful about zeroing out the opcode field during the
582 * Unapply operation
583 *
584 * NOTE: Don't mess with the memory protections on this while the
585 * left side is frozen (ie, no threads are executing).
586 * WriteMemory depends on being able to write the patchtable back
587 * if it was read successfully.
588 */
589#define DPT_INVALID_SLOT (UINT32_MAX)
590#define DPT_DEFAULT_TRACE_TYPE TRACE_OTHER
591
592/* Although CHashTableAndData can grow, we always use a fixed number of buckets.
593 * This is problematic for tables like the patch table which are usually small, but
594 * can become huge. When the number of entries far exceeds the number of buckets,
595 * lookup and addition basically degrade into linear searches. There is a trade-off
596 * here between wasting memory for unused buckets, and performance of large tables.
597 * Also note that the number of buckets should be a prime number.
598*/
599#define DPT_HASH_BUCKETS 1103
600
601class DebuggerPatchTable : private CHashTableAndData<CNewZeroData>
602{
603 VPTR_BASE_CONCRETE_VTABLE_CLASS(DebuggerPatchTable);
604
605public:
606 virtual ~DebuggerPatchTable() = default;
607
608 friend class DebuggerRCThread;
609private:
610 //incremented so that we can get DPT-wide unique PIDs.
611 // pid = Patch ID.
612 SIZE_T m_pid;
613 // Given a patch, retrieves the correct key. The return value of this function is passed to Cmp(), Find(), etc.
614 SIZE_T Key(DebuggerControllerPatch *patch)
615 {
616 LIMITED_METHOD_DAC_CONTRACT;
617
618 // Most clients of CHashTable pass a host pointer as the key. However, the key really could be
619 // anything. In our case, the key can either be a host pointer of type DebuggerFunctionKey or
620 // the address of the patch.
621 if (patch->address == NULL)
622 {
623 return (SIZE_T)(&patch->key);
624 }
625 else
626 {
627 return (SIZE_T)(dac_cast<TADDR>(patch->address));
628 }
629 }
630
631 // Given two DebuggerControllerPatches, tells
632 // whether they are equal or not. Does this by comparing the correct
633 // key.
634 // BYTE* pc1: If pc2 is hashed by address,
635 // pc1 is an address. If
636 // pc2 is hashed by DebuggerFunctionKey,
637 // pc1 is a DebuggerFunctionKey
638 //Returns true if the two patches are equal, false otherwise
639 BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2)
640 {
641 LIMITED_METHOD_DAC_CONTRACT;
642
643 DebuggerControllerPatch * pPatch2 = dac_cast<PTR_DebuggerControllerPatch>(const_cast<HASHENTRY *>(pc2));
644
645 if (pPatch2->address == NULL)
646 {
647 // k1 is a host pointer of type DebuggerFunctionKey.
648 DebuggerFunctionKey * pKey1 = reinterpret_cast<DebuggerFunctionKey *>(k1);
649
650 return ((pKey1->module != pPatch2->key.module) || (pKey1->md != pPatch2->key.md));
651 }
652 else
653 {
654 return ((SIZE_T)(dac_cast<TADDR>(pPatch2->address)) != k1);
655 }
656 }
657
658 //Computes a hash value based on an address
659 ULONG HashAddress(PTR_CORDB_ADDRESS_TYPE address)
660 {
661 LIMITED_METHOD_DAC_CONTRACT;
662 return (ULONG)(SIZE_T)(dac_cast<TADDR>(address));
663 }
664
665 //Computes a hash value based on a DebuggerFunctionKey
666 ULONG HashKey(DebuggerFunctionKey * pKey)
667 {
668 SUPPORTS_DAC;
669 return HashPtr(pKey->md, pKey->module);
670 }
671
672 //Computes a hash value from a patch, using the address field
673 // if the patch is hashed by address, using the DebuggerFunctionKey
674 // otherwise
675 ULONG Hash(DebuggerControllerPatch * pPatch)
676 {
677 SUPPORTS_DAC;
678
679 if (pPatch->address == NULL)
680 return HashKey(&(pPatch->key));
681 else
682 return HashAddress(pPatch->address);
683 }
684 //Public Members
685public:
686 enum {
687 DCP_PID_INVALID,
688 DCP_PID_FIRST_VALID,
689 };
690
691#ifndef DACCESS_COMPILE
692
693 DebuggerPatchTable() : CHashTableAndData<CNewZeroData>(DPT_HASH_BUCKETS) { }
694
695 HRESULT Init()
696 {
697 WRAPPER_NO_CONTRACT;
698
699 m_pid = DCP_PID_FIRST_VALID;
700
701 SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE;
702 return NewInit(17, sizeof(DebuggerControllerPatch), 101);
703 }
704
705 // Assuming that the chain of patches (as defined by all the
706 // GetNextPatch from this patch) are either sorted or NULL, take the given
707 // patch (which should be the first patch in the chain). This
708 // is called by AddPatch to make sure that the order of the
709 // patches is what we want for things like E&C, DePatchSkips,etc.
710 void SortPatchIntoPatchList(DebuggerControllerPatch **ppPatch);
711
712 void SpliceOutOfList(DebuggerControllerPatch *patch);
713
714 void SpliceInBackOf(DebuggerControllerPatch *patchAppend,
715 DebuggerControllerPatch *patchEnd);
716
717 //
718 // Note that patches may be reallocated - do not keep a pointer to a patch.
719 //
720 DebuggerControllerPatch *AddPatchForMethodDef(DebuggerController *controller,
721 Module *module,
722 mdMethodDef md,
723 MethodDesc *pMethodDescFilter,
724 size_t offset,
725 BOOL offsetIsIL,
726 DebuggerPatchKind kind,
727 FramePointer fp,
728 AppDomain *pAppDomain,
729 SIZE_T masterEnCVersion,
730 DebuggerJitInfo *dji);
731
732 DebuggerControllerPatch *AddPatchForAddress(DebuggerController *controller,
733 MethodDesc *fd,
734 size_t offset,
735 DebuggerPatchKind kind,
736 CORDB_ADDRESS_TYPE *address,
737 FramePointer fp,
738 AppDomain *pAppDomain,
739 DebuggerJitInfo *dji = NULL,
740 SIZE_T pid = DCP_PID_INVALID,
741 TraceType traceType = DPT_DEFAULT_TRACE_TYPE);
742
743 // Set the native address for this patch.
744 void BindPatch(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address);
745 void UnbindPatch(DebuggerControllerPatch *patch);
746 void RemovePatch(DebuggerControllerPatch *patch);
747
748 // This is a sad legacy workaround. The patch table (implemented as this
749 // class) is shared across process. We publish the runtime offsets of
750 // some key fields. Since those fields are private, we have to provide
751 // accessors here. So if you're not using these functions, don't start.
752 // We can hopefully remove them.
753 static SIZE_T GetOffsetOfEntries()
754 {
755 // assert that we the offsets of these fields in the base class is
756 // the same as the offset of this field in this class.
757 _ASSERTE((void*)(DebuggerPatchTable*)NULL == (void*)(CHashTableAndData<CNewZeroData>*)NULL);
758 return helper_GetOffsetOfEntries();
759 }
760
761 static SIZE_T GetOffsetOfCount()
762 {
763 _ASSERTE((void*)(DebuggerPatchTable*)NULL == (void*)(CHashTableAndData<CNewZeroData>*)NULL);
764 return helper_GetOffsetOfCount();
765 }
766
767 // GetPatch find the first patch in the hash table
768 // that is hashed by matching the {Module,mdMethodDef} to the
769 // patch's DebuggerFunctionKey. This will NOT find anything
770 // hashed by address, even if that address is within the
771 // method specified.
772 // You can use GetNextPatch to iterate through all the patches keyed by
773 // this Module,mdMethodDef pair
774 DebuggerControllerPatch *GetPatch(Module *module, mdToken md)
775 {
776 DebuggerFunctionKey key;
777
778 key.module = module;
779 key.md = md;
780
781 return reinterpret_cast<DebuggerControllerPatch *>(Find(HashKey(&key), (SIZE_T)&key));
782 }
783#endif // #ifndef DACCESS_COMPILE
784
785 // GetPatch will translate find the first patch in the hash
786 // table that is hashed by address. It will NOT find anything hashed
787 // by {Module,mdMethodDef}, or by MethodDesc.
788 DebuggerControllerPatch * GetPatch(PTR_CORDB_ADDRESS_TYPE address)
789 {
790 SUPPORTS_DAC;
791 ARM_ONLY(_ASSERTE(dac_cast<DWORD>(address) & THUMB_CODE));
792
793 DebuggerControllerPatch * pPatch =
794 dac_cast<PTR_DebuggerControllerPatch>(Find(HashAddress(address), (SIZE_T)(dac_cast<TADDR>(address))));
795
796 return pPatch;
797 }
798
799 DebuggerControllerPatch *GetNextPatch(DebuggerControllerPatch *prev);
800
801 // Find the first patch in the patch table, and store
802 // index info in info. Along with GetNextPatch, this can
803 // iterate through the whole patch table. Note that since the
804 // hashtable operates via iterating through all the contents
805 // of all the buckets, if you add an entry while iterating
806 // through the table, you may or may not iterate across
807 // the new entries. You will iterate through all the entries
808 // that were present at the beginning of the run. You
809 // safely delete anything you've already iterated by, anything
810 // else is kinda risky.
811 DebuggerControllerPatch * GetFirstPatch(HASHFIND * pInfo)
812 {
813 SUPPORTS_DAC;
814
815 return dac_cast<PTR_DebuggerControllerPatch>(FindFirstEntry(pInfo));
816 }
817
818 // Along with GetFirstPatch, this can iterate through
819 // the whole patch table. See GetFirstPatch for more info
820 // on the rules of iterating through the table.
821 DebuggerControllerPatch * GetNextPatch(HASHFIND * pInfo)
822 {
823 SUPPORTS_DAC;
824
825 return dac_cast<PTR_DebuggerControllerPatch>(FindNextEntry(pInfo));
826 }
827
828 // Used by DebuggerController to translate an index
829 // of a patch into a direct pointer.
830 inline HASHENTRY * GetEntryPtr(ULONG iEntry)
831 {
832 SUPPORTS_DAC;
833
834 return EntryPtr(iEntry);
835 }
836
837 // Used by DebuggerController to grab indeces of patches
838 // rather than holding direct pointers to them.
839 inline ULONG GetItemIndex(HASHENTRY * p)
840 {
841 SUPPORTS_DAC;
842
843 return ItemIndex(p);
844 }
845
846#ifdef _DEBUG_PATCH_TABLE
847public:
848 // DEBUG An internal debugging routine, it iterates
849 // through the hashtable, stopping at every
850 // single entry, no matter what it's state. For this to
851 // compile, you're going to have to add friend status
852 // of this class to CHashTableAndData in
853 // to $\Com99\Src\inc\UtilCode.h
854 void CheckPatchTable();
855#endif // _DEBUG_PATCH_TABLE
856
857 // Count how many patches are in the table.
858 // Use for asserts
859 int GetNumberOfPatches();
860
861};
862
863typedef VPTR(class DebuggerPatchTable) PTR_DebuggerPatchTable;
864
865
866#if !defined(DACCESS_COMPILE)
867
868// DebuggerControllerPage|Will eventually be used for
869// 'break when modified' behaviour'
870typedef struct DebuggerControllerPage
871{
872 DebuggerControllerPage *next;
873 const BYTE *start, *end;
874 DebuggerController *controller;
875 bool readable;
876} DebuggerControllerPage;
877
878// DEBUGGER_CONTROLLER_TYPE: Identifies the type of the controller.
879// It exists b/c we have RTTI turned off.
880// Note that the order of these is important - SortPatchIntoPatchList
881// relies on this ordering.
882//
883// DEBUGGER_CONTROLLER_STATIC|Base class response. Should never be
884// seen, since we shouldn't be asking the base class about this.
885// DEBUGGER_CONTROLLER_BREAKPOINT|DebuggerBreakpoint
886// DEBUGGER_CONTROLLER_STEPPER|DebuggerStepper
887// DEBUGGER_CONTROLLER_THREAD_STARTER|DebuggerThreadStarter
888// DEBUGGER_CONTROLLER_ENC|DebuggerEnCBreakpoint
889// DEBUGGER_CONTROLLER_PATCH_SKIP|DebuggerPatchSkip
890// DEBUGGER_CONTROLLER_JMC_STEPPER|DebuggerJMCStepper - steps through Just-My-Code
891// DEBUGGER_CONTROLLER_CONTINUABLE_EXCEPTION|DebuggerContinuableExceptionBreakpoint
892enum DEBUGGER_CONTROLLER_TYPE
893{
894 DEBUGGER_CONTROLLER_THREAD_STARTER,
895 DEBUGGER_CONTROLLER_ENC,
896 DEBUGGER_CONTROLLER_ENC_PATCH_TO_SKIP, // At any one address,
897 // There can be only one!
898 DEBUGGER_CONTROLLER_PATCH_SKIP,
899 DEBUGGER_CONTROLLER_BREAKPOINT,
900 DEBUGGER_CONTROLLER_STEPPER,
901 DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE,
902 DEBUGGER_CONTROLLER_USER_BREAKPOINT, // UserBreakpoints are used by Runtime threads to
903 // send that they've hit a user breakpoint to the Right Side.
904 DEBUGGER_CONTROLLER_JMC_STEPPER, // Stepper that only stops in JMC-functions.
905 DEBUGGER_CONTROLLER_CONTINUABLE_EXCEPTION,
906 DEBUGGER_CONTROLLER_DATA_BREAKPOINT,
907 DEBUGGER_CONTROLLER_STATIC,
908};
909
910enum TP_RESULT
911{
912 TPR_TRIGGER, // This controller wants to SendEvent
913 TPR_IGNORE, // This controller doesn't want to SendEvent
914 TPR_TRIGGER_ONLY_THIS, // This, and only this controller, should be triggered.
915 // Right now, only the DebuggerEnCRemap controller
916 // returns this, the remap patch should be the first
917 // patch in the list.
918 TPR_TRIGGER_ONLY_THIS_AND_LOOP,
919 // This, and only this controller, should be triggered.
920 // Right now, only the DebuggerEnCRemap controller
921 // returns this, the remap patch should be the first
922 // patch in the list.
923 // After triggering this, DPOSS should skip the
924 // ActivatePatchSkip call, so we hit the other
925 // breakpoints at this location.
926 TPR_IGNORE_AND_STOP, // Don't SendEvent, and stop asking other
927 // controllers if they want to.
928 // Service any previous triggered controllers.
929};
930
931enum SCAN_TRIGGER
932{
933 ST_PATCH = 0x1, // Only look for patches
934 ST_SINGLE_STEP = 0x2, // Look for patches, and single-steps.
935} ;
936
937enum TRIGGER_WHY
938{
939 TY_NORMAL = 0x0,
940 TY_SHORT_CIRCUIT= 0x1, // EnC short circuit - see DispatchPatchOrSingleStep
941} ;
942
943// the return value for DebuggerController::DispatchPatchOrSingleStep
944enum DPOSS_ACTION
945{
946 // the following enum has been carefully ordered to optimize the helper
947 // functions below. Do not re-order them w/o changing the helper funcs.
948 DPOSS_INVALID = 0x0, // invalid action value
949 DPOSS_DONT_CARE = 0x1, // don't care about this exception
950 DPOSS_USED_WITH_NO_EVENT = 0x2, // Care about this exception but won't send event to RS
951 DPOSS_USED_WITH_EVENT = 0x3, // Care about this exception and will send event to RS
952};
953
954// helper function
955inline bool IsInUsedAction(DPOSS_ACTION action)
956{
957 _ASSERTE(action != DPOSS_INVALID);
958 return (action >= DPOSS_USED_WITH_NO_EVENT);
959}
960
961inline void VerifyExecutableAddress(const BYTE* address)
962{
963// TODO: : when can we apply this to x86?
964#if defined(_WIN64)
965#if defined(_DEBUG)
966#ifndef FEATURE_PAL
967 MEMORY_BASIC_INFORMATION mbi;
968
969 if (sizeof(mbi) == ClrVirtualQuery(address, &mbi, sizeof(mbi)))
970 {
971 if (!(mbi.State & MEM_COMMIT))
972 {
973 STRESS_LOG1(LF_GCROOTS, LL_ERROR, "VerifyExecutableAddress: address is uncommited memory, address=0x%p", address);
974 CONSISTENCY_CHECK_MSGF((mbi.State & MEM_COMMIT), ("VEA: address (0x%p) is uncommited memory.", address));
975 }
976
977 if (!(mbi.Protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY)))
978 {
979 STRESS_LOG1(LF_GCROOTS, LL_ERROR, "VerifyExecutableAddress: address is not executable, address=0x%p", address);
980 CONSISTENCY_CHECK_MSGF((mbi.Protect & (PAGE_EXECUTE | PAGE_EXECUTE_READ | PAGE_EXECUTE_READWRITE | PAGE_EXECUTE_WRITECOPY)),
981 ("VEA: address (0x%p) is not on an executable page.", address));
982 }
983 }
984#endif // !FEATURE_PAL
985#endif // _DEBUG
986#endif // _WIN64
987}
988
989#endif // !DACCESS_COMPILE
990
991
992// DebuggerController: DebuggerController serves
993// both as a static class that dispatches exceptions coming from the
994// EE, and as an abstract base class for the five classes that derrive
995// from it.
996class DebuggerController
997{
998 VPTR_BASE_CONCRETE_VTABLE_CLASS(DebuggerController);
999
1000#if !defined(DACCESS_COMPILE)
1001
1002 // Needs friendship for lock because of EnC locking workarounds.
1003 friend class DebuggerEnCBreakpoint;
1004
1005 friend class DebuggerPatchSkip;
1006 friend class DebuggerRCThread; //so we can get offsets of fields the
1007 //right side needs to read
1008 friend class Debugger; // So Debugger can lock, use, unlock the patch
1009 // table in MapAndBindFunctionBreakpoints
1010 friend void Debugger::UnloadModule(Module* pRuntimeModule, AppDomain *pAppDomain);
1011
1012 //
1013 // Static functionality
1014 //
1015
1016 public:
1017 class ControllerLockHolder : public CrstHolder
1018 {
1019 public:
1020 ControllerLockHolder() : CrstHolder(&g_criticalSection) { WRAPPER_NO_CONTRACT; }
1021 };
1022
1023 static HRESULT Initialize();
1024
1025 // Remove and cleanup all DebuggerControllers for detach
1026 static void DeleteAllControllers();
1027
1028 //
1029 // global event dispatching functionality
1030 //
1031
1032
1033 // Controllers are notified when they enter/exit func-evals (on their same thread,
1034 // on any any thread if the controller doesn't have a thread).
1035 // The original use for this was to allow steppers to skip through func-evals.
1036 // thread - the thread doing the funceval.
1037 static void DispatchFuncEvalEnter(Thread * thread);
1038 static void DispatchFuncEvalExit(Thread * thread);
1039
1040 static bool DispatchNativeException(EXCEPTION_RECORD *exception,
1041 CONTEXT *context,
1042 DWORD code,
1043 Thread *thread);
1044
1045 static bool DispatchUnwind(Thread *thread,
1046 MethodDesc *fd, DebuggerJitInfo * pDJI, SIZE_T offset,
1047 FramePointer handlerFP,
1048 CorDebugStepReason unwindReason);
1049
1050 static bool DispatchTraceCall(Thread *thread,
1051 const BYTE *address);
1052
1053 static PRD_TYPE GetPatchedOpcode(CORDB_ADDRESS_TYPE *address);
1054
1055 static BOOL CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, /*OUT*/ PRD_TYPE *pOpcode);
1056
1057 // pIP is the ip right after the prolog of the method we've entered.
1058 // fp is the frame pointer for that method.
1059 static void DispatchMethodEnter(void * pIP, FramePointer fp);
1060
1061
1062 // Delete any patches that exist for a specific module and optionally a specific AppDomain.
1063 // If pAppDomain is specified, then only patches tied to the specified AppDomain are
1064 // removed. If pAppDomain is null, then all patches for the module are removed.
1065 static void RemovePatchesFromModule( Module* pModule, AppDomain* pAppdomain );
1066
1067 // Check whether there are any pathces in the patch table for the specified module.
1068 static bool ModuleHasPatches( Module* pModule );
1069
1070#if EnC_SUPPORTED
1071 static DebuggerControllerPatch *IsXXXPatched(const BYTE *eip,
1072 DEBUGGER_CONTROLLER_TYPE dct);
1073
1074 static DebuggerControllerPatch *GetEnCPatch(const BYTE *address);
1075#endif //EnC_SUPPORTED
1076
1077 static DPOSS_ACTION ScanForTriggers(CORDB_ADDRESS_TYPE *address,
1078 Thread *thread,
1079 CONTEXT *context,
1080 DebuggerControllerQueue *pDcq,
1081 SCAN_TRIGGER stWhat,
1082 TP_RESULT *pTpr);
1083
1084
1085 static DebuggerPatchSkip *ActivatePatchSkip(Thread *thread,
1086 const BYTE *eip,
1087 BOOL fForEnC);
1088
1089
1090 static DPOSS_ACTION DispatchPatchOrSingleStep(Thread *thread,
1091 CONTEXT *context,
1092 CORDB_ADDRESS_TYPE *ip,
1093 SCAN_TRIGGER which);
1094
1095
1096 static int GetNumberOfPatches()
1097 {
1098 if (g_patches == NULL)
1099 return 0;
1100
1101 return g_patches->GetNumberOfPatches();
1102 }
1103
1104 static int GetTotalMethodEnter() {LIMITED_METHOD_CONTRACT; return g_cTotalMethodEnter; }
1105
1106#if defined(_DEBUG)
1107 // Debug check that we only have 1 thread-starter per thread.
1108 // Check this new one against all existing ones.
1109 static void EnsureUniqueThreadStarter(DebuggerThreadStarter * pNew);
1110#endif
1111 // If we have a thread-starter on the given EE thread, make sure it's cancel.
1112 // Thread-Starters normally delete themselves when they fire. But if the EE
1113 // destroys the thread before it fires, then we'd still have an active DTS.
1114 static void CancelOutstandingThreadStarter(Thread * pThread);
1115
1116 static void AddRef(DebuggerControllerPatch *patch);
1117 static void Release(DebuggerControllerPatch *patch);
1118
1119 private:
1120
1121 static bool MatchPatch(Thread *thread, CONTEXT *context,
1122 DebuggerControllerPatch *patch);
1123
1124 // Returns TRUE if we should continue to dispatch after this exception
1125 // hook.
1126 static BOOL DispatchExceptionHook(Thread *thread, CONTEXT *context,
1127 EXCEPTION_RECORD *exception);
1128
1129protected:
1130#ifdef _DEBUG
1131 static bool HasLock()
1132 {
1133 return g_criticalSection.OwnedByCurrentThread() != 0;
1134 }
1135#endif
1136
1137#endif // !DACCESS_COMPILE
1138
1139private:
1140 SPTR_DECL(DebuggerPatchTable, g_patches);
1141 SVAL_DECL(BOOL, g_patchTableValid);
1142
1143#if !defined(DACCESS_COMPILE)
1144
1145private:
1146 static DebuggerControllerPage *g_protections;
1147 static DebuggerController *g_controllers;
1148
1149 // This is the "Controller" lock. It synchronizes the controller infrastructure.
1150 // It is smaller than the debugger lock, but larger than the debugger-data lock.
1151 // It needs to be taken in execution-control related callbacks; and will also call
1152 // back into the EE when held (most notably for the stub-managers; but also for various
1153 // query operations).
1154 static CrstStatic g_criticalSection;
1155
1156 // Write is protected by both Debugger + Controller Lock
1157 static int g_cTotalMethodEnter;
1158
1159 static bool BindPatch(DebuggerControllerPatch *patch,
1160 MethodDesc *fd,
1161 CORDB_ADDRESS_TYPE *startAddr);
1162 static bool ApplyPatch(DebuggerControllerPatch *patch);
1163 static bool UnapplyPatch(DebuggerControllerPatch *patch);
1164 static void UnapplyPatchAt(DebuggerControllerPatch *patch, CORDB_ADDRESS_TYPE *address);
1165 static bool IsPatched(CORDB_ADDRESS_TYPE *address, BOOL native);
1166
1167 static void ActivatePatch(DebuggerControllerPatch *patch);
1168 static void DeactivatePatch(DebuggerControllerPatch *patch);
1169
1170 static void ApplyTraceFlag(Thread *thread);
1171 static void UnapplyTraceFlag(Thread *thread);
1172
1173 virtual void DebuggerDetachClean();
1174
1175 public:
1176 static const BYTE *g_pMSCorEEStart, *g_pMSCorEEEnd;
1177
1178 static const BYTE *GetILPrestubDestination(const BYTE *prestub);
1179 static const BYTE *GetILFunctionCode(MethodDesc *fd);
1180
1181 //
1182 // Non-static functionality
1183 //
1184
1185 public:
1186
1187 DebuggerController(Thread * pThread, AppDomain * pAppDomain);
1188 virtual ~DebuggerController();
1189 void Delete();
1190 bool IsDeleted() { return m_deleted; }
1191
1192#endif // !DACCESS_COMPILE
1193
1194
1195 // Return the pointer g_patches.
1196 // Access to patch table for the RC threads (EE,DI)
1197 // Why: The right side needs to know the address of the patch
1198 // table (which never changes after it gets created) so that ReadMemory,
1199 // WriteMemory can work from out-of-process. This should only be used in
1200 // when the Runtime Controller is starting up, and not thereafter.
1201 // How:return g_patches;
1202public:
1203 static DebuggerPatchTable * GetPatchTable() {LIMITED_METHOD_DAC_CONTRACT; return g_patches; }
1204 static BOOL GetPatchTableValid() {LIMITED_METHOD_DAC_CONTRACT; return g_patchTableValid; }
1205
1206#if !defined(DACCESS_COMPILE)
1207 static BOOL *GetPatchTableValidAddr() {LIMITED_METHOD_CONTRACT; return &g_patchTableValid; }
1208
1209 // Is there a patch at addr?
1210 // We sometimes want to use this version of the method
1211 // (as opposed to IsPatched) because there is
1212 // a race condition wherein a patch can be added to the table, we can
1213 // ask about it, and then we can actually apply the patch.
1214 // How: If the patch table contains a patch at that address, there
1215 // is.
1216 static bool IsAddressPatched(CORDB_ADDRESS_TYPE *address)
1217 {
1218 return (g_patches->GetPatch(address) != NULL);
1219 }
1220
1221 //
1222 // Event setup
1223 //
1224
1225 Thread *GetThread() { return m_thread; }
1226
1227 // This one should be made private
1228 BOOL AddBindAndActivateILSlavePatch(DebuggerControllerPatch *master,
1229 DebuggerJitInfo *dji);
1230
1231 BOOL AddILPatch(AppDomain * pAppDomain, Module *module,
1232 mdMethodDef md,
1233 MethodDesc* pMethodFilter,
1234 SIZE_T encVersion, // what encVersion does this apply to?
1235 SIZE_T offset,
1236 BOOL offsetIsIL);
1237
1238 // The next two are very similar. Both work on offsets,
1239 // but one takes a "patch id". I don't think these are really needed: the
1240 // controller itself can act as the id of the patch.
1241 BOOL AddBindAndActivateNativeManagedPatch(
1242 MethodDesc * fd,
1243 DebuggerJitInfo *dji,
1244 SIZE_T offset,
1245 FramePointer fp,
1246 AppDomain *pAppDomain);
1247
1248 // Add a patch at the start of a not-yet-jitted method.
1249 void AddPatchToStartOfLatestMethod(MethodDesc * fd);
1250
1251
1252 // This version is particularly useful b/c it doesn't assume that the
1253 // patch is inside a managed method.
1254 DebuggerControllerPatch *AddAndActivateNativePatchForAddress(CORDB_ADDRESS_TYPE *address,
1255 FramePointer fp,
1256 bool managed,
1257 TraceType traceType);
1258
1259
1260
1261 bool PatchTrace(TraceDestination *trace, FramePointer fp, bool fStopInUnmanaged);
1262
1263 void AddProtection(const BYTE *start, const BYTE *end, bool readable);
1264 void RemoveProtection(const BYTE *start, const BYTE *end, bool readable);
1265
1266 static BOOL IsSingleStepEnabled(Thread *pThread);
1267 bool IsSingleStepEnabled();
1268 void EnableSingleStep();
1269 static void EnableSingleStep(Thread *pThread);
1270
1271 void DisableSingleStep();
1272
1273 void EnableExceptionHook();
1274 void DisableExceptionHook();
1275
1276 void EnableUnwind(FramePointer frame);
1277 void DisableUnwind();
1278 FramePointer GetUnwind();
1279
1280 void EnableTraceCall(FramePointer fp);
1281 void DisableTraceCall();
1282
1283 bool IsMethodEnterEnabled();
1284 void EnableMethodEnter();
1285 void DisableMethodEnter();
1286
1287 void DisableAll();
1288
1289 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1290 { return DEBUGGER_CONTROLLER_STATIC; }
1291
1292 // Return true iff this is one of the stepper types.
1293 // if true, we can safely cast this controller to a DebuggerStepper*.
1294 inline bool IsStepperDCType()
1295 {
1296 DEBUGGER_CONTROLLER_TYPE e = this->GetDCType();
1297 return (e == DEBUGGER_CONTROLLER_STEPPER) || (e == DEBUGGER_CONTROLLER_JMC_STEPPER);
1298 }
1299
1300 void Enqueue();
1301 void Dequeue();
1302
1303 private:
1304 // Helper function that is called on each virtual trace call target to set a trace patch
1305 static void PatchTargetVisitor(TADDR pVirtualTraceCallTarget, VOID* pUserData);
1306
1307 DebuggerControllerPatch *AddILMasterPatch(Module *module,
1308 mdMethodDef md,
1309 MethodDesc *pMethodDescFilter,
1310 SIZE_T offset,
1311 BOOL offsetIsIL,
1312 SIZE_T encVersion);
1313
1314 BOOL AddBindAndActivatePatchForMethodDesc(MethodDesc *fd,
1315 DebuggerJitInfo *dji,
1316 SIZE_T nativeOffset,
1317 DebuggerPatchKind kind,
1318 FramePointer fp,
1319 AppDomain *pAppDomain);
1320
1321
1322 protected:
1323
1324 //
1325 // Target event handlers
1326 //
1327
1328
1329 // Notify a controller that a func-eval is starting/ending on the given thread.
1330 // If a controller's m_thread!=NULL, then it is only notified of func-evals on
1331 // its thread.
1332 // Controllers don't need to Enable anything to get this, and most controllers
1333 // can ignore it.
1334 virtual void TriggerFuncEvalEnter(Thread * thread);
1335 virtual void TriggerFuncEvalExit(Thread * thread);
1336
1337 virtual TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1338 Thread *thread,
1339 TRIGGER_WHY tyWhy);
1340
1341 // Dispatched when we get a SingleStep exception on this thread.
1342 // Return true if we want SendEvent to get called.
1343
1344 virtual bool TriggerSingleStep(Thread *thread, const BYTE *ip);
1345
1346
1347 // Dispatched to notify the controller when we are going to a filter/handler
1348 // that's in the stepper's current frame or above (a caller frame).
1349 // 'desc' & 'offset' are the location of the filter/handler (ie, this is where
1350 // execution will continue)
1351 // 'frame' points into the stack at the return address for the function w/ the handler.
1352 // If (frame > m_unwindFP) then the filter/handler is in a caller, else
1353 // it's in the same function as the current stepper (It's not in a child because
1354 // we don't dispatch in that case).
1355 virtual void TriggerUnwind(Thread *thread, MethodDesc *fd, DebuggerJitInfo * pDJI,
1356 SIZE_T offset, FramePointer fp,
1357 CorDebugStepReason unwindReason);
1358
1359 virtual void TriggerTraceCall(Thread *thread, const BYTE *ip);
1360 virtual TP_RESULT TriggerExceptionHook(Thread *thread, CONTEXT * pContext,
1361 EXCEPTION_RECORD *exception);
1362
1363 // Trigger when we've entered a method
1364 // thread - current thread
1365 // desc - the method that we've entered
1366 // ip - the address after the prolog. A controller can patch this address.
1367 // To stop in this method.
1368 // Returns true if the trigger will disable itself from further method entry
1369 // triggers else returns false (passing through a cctor can cause this).
1370 // A controller can't block in this trigger! It can only update state / set patches
1371 // and then return.
1372 virtual void TriggerMethodEnter(Thread * thread,
1373 DebuggerJitInfo *dji,
1374 const BYTE * ip,
1375 FramePointer fp);
1376
1377
1378 // Send the managed debug event.
1379 // This is called after TriggerPatch/TriggerSingleStep actually trigger.
1380 // Note this can have a strange interaction with SetIp. Specifically this thread:
1381 // 1) may call TriggerXYZ which queues the controller for send event.
1382 // 2) blocks on a the debugger lock (in which case SetIp may get invoked on it)
1383 // 3) then sends the event
1384 // If SetIp gets invoked at step 2, the thread's IP may have changed such that it should no
1385 // longer trigger. Eg, perhaps we were about to send a breakpoint, and then SetIp moved us off
1386 // the bp. So we pass in an extra flag, fInteruptedBySetIp, to let the controller decide how to handle this.
1387 // Since SetIP only works within a single function, this can only be an issue if a thread's current stopping
1388 // location and the patch it set are in the same function. (So this could happen for step-over, but never
1389 // step-out).
1390 // This flag will almost always be false.
1391 //
1392 // Once we actually send the event, we're under the debugger lock, and so the world is stable underneath us.
1393 // But the world may change underneath a thread between when SendEvent gets queued and by the time it's actually called.
1394 // So SendIPCEvent may need to do some last-minute sanity checking (like the SetIP case) to ensure it should
1395 // still send.
1396 //
1397 // Returns true if send an event, false elsewise.
1398 virtual bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1399
1400 AppDomain *m_pAppDomain;
1401
1402 private:
1403
1404 Thread *m_thread;
1405 DebuggerController *m_next;
1406 bool m_singleStep;
1407 bool m_exceptionHook;
1408 bool m_traceCall;
1409protected:
1410 FramePointer m_traceCallFP;
1411private:
1412 FramePointer m_unwindFP;
1413 int m_eventQueuedCount;
1414 bool m_deleted;
1415 bool m_fEnableMethodEnter;
1416
1417#endif // !DACCESS_COMPILE
1418};
1419
1420
1421#if !defined(DACCESS_COMPILE)
1422
1423/* ------------------------------------------------------------------------- *
1424 * DebuggerPatchSkip routines
1425 * ------------------------------------------------------------------------- */
1426
1427class DebuggerPatchSkip : public DebuggerController
1428{
1429 friend class DebuggerController;
1430
1431 DebuggerPatchSkip(Thread *thread,
1432 DebuggerControllerPatch *patch,
1433 AppDomain *pAppDomain);
1434
1435 ~DebuggerPatchSkip();
1436
1437 bool TriggerSingleStep(Thread *thread,
1438 const BYTE *ip);
1439
1440 TP_RESULT TriggerExceptionHook(Thread *thread, CONTEXT * pContext,
1441 EXCEPTION_RECORD *exception);
1442
1443 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1444 Thread *thread,
1445 TRIGGER_WHY tyWhy);
1446
1447 virtual DEBUGGER_CONTROLLER_TYPE GetDCType(void)
1448 { return DEBUGGER_CONTROLLER_PATCH_SKIP; }
1449
1450 void CopyInstructionBlock(BYTE *to, const BYTE* from);
1451
1452 void DecodeInstruction(CORDB_ADDRESS_TYPE *code);
1453
1454 void DebuggerDetachClean();
1455
1456 CORDB_ADDRESS_TYPE *m_address;
1457 int m_iOrigDisp; // the original displacement of a relative call or jump
1458 InstructionAttribute m_instrAttrib; // info about the instruction being skipped over
1459#ifndef _TARGET_ARM_
1460 // this is shared among all the skippers and the controller. see the comments
1461 // right before the definition of SharedPatchBypassBuffer for lifetime info.
1462 SharedPatchBypassBuffer *m_pSharedPatchBypassBuffer;
1463
1464public:
1465 CORDB_ADDRESS_TYPE *GetBypassAddress()
1466 {
1467 _ASSERTE(m_pSharedPatchBypassBuffer);
1468 BYTE* patchBypass = m_pSharedPatchBypassBuffer->PatchBypass;
1469 return (CORDB_ADDRESS_TYPE *)patchBypass;
1470 }
1471#endif // _TARGET_ARM_
1472};
1473
1474/* ------------------------------------------------------------------------- *
1475 * DebuggerBreakpoint routines
1476 * ------------------------------------------------------------------------- */
1477
1478// DebuggerBreakpoint:
1479// DBp represents a user-placed breakpoint, and when Triggered, will
1480// always want to be activated, whereupon it will inform the right side of
1481// being hit.
1482class DebuggerBreakpoint : public DebuggerController
1483{
1484public:
1485 DebuggerBreakpoint(Module *module,
1486 mdMethodDef md,
1487 AppDomain *pAppDomain,
1488 SIZE_T m_offset,
1489 bool m_native,
1490 SIZE_T ilEnCVersion, // must give the EnC version for non-native bps
1491 MethodDesc *nativeMethodDesc, // must be non-null when m_native, null otherwise
1492 DebuggerJitInfo *nativeJITInfo, // optional when m_native, null otherwise
1493 bool nativeCodeBindAllVersions,
1494 BOOL *pSucceed
1495 );
1496
1497 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1498 { return DEBUGGER_CONTROLLER_BREAKPOINT; }
1499
1500private:
1501
1502 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1503 Thread *thread,
1504 TRIGGER_WHY tyWhy);
1505 bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1506};
1507
1508// * ------------------------------------------------------------------------ *
1509// * DebuggerStepper routines
1510// * ------------------------------------------------------------------------ *
1511//
1512
1513// DebuggerStepper: This subclass of DebuggerController will
1514// be instantiated to create a "Step" operation, meaning that execution
1515// should continue until a range of IL code is exited.
1516class DebuggerStepper : public DebuggerController
1517{
1518public:
1519 DebuggerStepper(Thread *thread,
1520 CorDebugUnmappedStop rgfMappingStop,
1521 CorDebugIntercept interceptStop,
1522 AppDomain *appDomain);
1523 ~DebuggerStepper();
1524
1525 bool Step(FramePointer fp, bool in,
1526 COR_DEBUG_STEP_RANGE *range, SIZE_T cRange, bool rangeIL);
1527 void StepOut(FramePointer fp, StackTraceTicket ticket);
1528
1529 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1530 { return DEBUGGER_CONTROLLER_STEPPER; }
1531
1532 //MoveToCurrentVersion makes sure that the stepper is prepared to
1533 // operate within the version of the code specified by djiNew.
1534 // Currently, this means to map the ranges into the ranges of the djiNew.
1535 // Idempotent.
1536 void MoveToCurrentVersion( DebuggerJitInfo *djiNew);
1537
1538 // Public & Polymorphic on flavor (traditional vs. JMC).
1539
1540 // Regular steppers want to EnableTraceCall; and JMC-steppers want to EnableMethodEnter.
1541 // (They're very related - they both stop at the next "interesting" managed code run).
1542 // So we just gloss over the difference w/ some polymorphism.
1543 virtual void EnablePolyTraceCall();
1544
1545protected:
1546 // Steppers override these so that they can skip func-evals.
1547 void TriggerFuncEvalEnter(Thread * thread);
1548 void TriggerFuncEvalExit(Thread * thread);
1549
1550 bool TrapStepInto(ControllerStackInfo *info,
1551 const BYTE *ip,
1552 TraceDestination *pTD);
1553
1554 bool TrapStep(ControllerStackInfo *info, bool in);
1555
1556 // @todo - must remove that fForceTraditional flag. Need a way for a JMC stepper
1557 // to do a Trad step out.
1558 void TrapStepOut(ControllerStackInfo *info, bool fForceTraditional = false);
1559
1560 // Polymorphic on flavor (Traditional vs. Just-My-Code)
1561 virtual void TrapStepNext(ControllerStackInfo *info);
1562 virtual bool TrapStepInHelper(ControllerStackInfo * pInfo,
1563 const BYTE * ipCallTarget,
1564 const BYTE * ipNext,
1565 bool fCallingIntoFunclet);
1566 virtual bool IsInterestingFrame(FrameInfo * pFrame);
1567 virtual bool DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo);
1568
1569
1570 //DetectHandleInterceptors will figure out if the current
1571 // frame is inside an interceptor, and if we're not interested in that
1572 // interceptor, it will set a breakpoint outside it so that we can
1573 // run to after the interceptor.
1574 virtual bool DetectHandleInterceptors(ControllerStackInfo *info);
1575
1576 // This function checks whether the given IP is in an LCG method. If so, it enables
1577 // JMC and does a step out. This effectively makes sure that we never stop in an LCG method.
1578 BOOL DetectHandleLCGMethods(const PCODE ip, MethodDesc * pMD, ControllerStackInfo * pInfo);
1579
1580 bool IsAddrWithinFrame(DebuggerJitInfo *dji,
1581 MethodDesc* pMD,
1582 const BYTE* currentAddr,
1583 const BYTE* targetAddr);
1584
1585 // x86 shouldn't need to call this method directly.
1586 // We should call IsAddrWithinFrame() on x86 instead.
1587 // That's why I use a name with the word "funclet" in it to scare people off.
1588 bool IsAddrWithinMethodIncludingFunclet(DebuggerJitInfo *dji,
1589 MethodDesc* pMD,
1590 const BYTE* targetAddr);
1591
1592 //ShouldContinue returns false if the DebuggerStepper should stop
1593 // execution and inform the right side. Returns true if the next
1594 // breakpointexecution should be set, and execution allowed to continue
1595 bool ShouldContinueStep( ControllerStackInfo *info, SIZE_T nativeOffset );
1596
1597 //IsInRange returns true if the given IL offset is inside of
1598 // any of the COR_DEBUG_STEP_RANGE structures given by range.
1599 bool IsInRange(SIZE_T offset, COR_DEBUG_STEP_RANGE *range, SIZE_T rangeCount,
1600 ControllerStackInfo *pInfo = NULL);
1601 bool IsRangeAppropriate(ControllerStackInfo *info);
1602
1603
1604
1605 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1606 Thread *thread,
1607 TRIGGER_WHY tyWhy);
1608 bool TriggerSingleStep(Thread *thread, const BYTE *ip);
1609 void TriggerUnwind(Thread *thread, MethodDesc *fd, DebuggerJitInfo * pDJI,
1610 SIZE_T offset, FramePointer fp,
1611 CorDebugStepReason unwindReason);
1612 void TriggerTraceCall(Thread *thread, const BYTE *ip);
1613 bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1614
1615
1616 virtual void TriggerMethodEnter(Thread * thread, DebuggerJitInfo * dji, const BYTE * ip, FramePointer fp);
1617
1618
1619 void ResetRange();
1620
1621 // Given a set of IL ranges, convert them to native and cache them.
1622 bool SetRangesFromIL(DebuggerJitInfo * dji, COR_DEBUG_STEP_RANGE *ranges, SIZE_T rangeCount);
1623
1624 // Return true if this stepper is alive, but frozen. (we freeze when the stepper
1625 // enters a nested func-eval).
1626 bool IsFrozen();
1627
1628 // Returns true if this stepper is 'dead' - which happens if a non-frozen stepper
1629 // gets a func-eval exit.
1630 bool IsDead();
1631
1632 // Prepare for sending an event.
1633 void PrepareForSendEvent(StackTraceTicket ticket);
1634
1635protected:
1636 bool m_stepIn;
1637 CorDebugStepReason m_reason; // Why did we stop?
1638 FramePointer m_fpStepInto; // if we get a trace call
1639 //callback, we may end up completing
1640 // a step into. If fp is less than th is
1641 // when we stop,
1642 // then we're actually in a STEP_CALL
1643
1644 CorDebugIntercept m_rgfInterceptStop; // If we hit a
1645 // frame that's an interceptor (internal or otherwise), should we stop?
1646
1647 CorDebugUnmappedStop m_rgfMappingStop; // If we hit a frame
1648 // that's at an interesting mapping point (prolog, epilog,etc), should
1649 // we stop?
1650
1651 COR_DEBUG_STEP_RANGE * m_range; // Ranges for active steppers are always in native offsets.
1652
1653 SIZE_T m_rangeCount;
1654 SIZE_T m_realRangeCount; // @todo - delete b/c only used for CodePitching & Old-Enc
1655
1656 // The original step intention.
1657 // As the stepper moves through code, it may change its other members.
1658 // ranges may get deleted, m_stepIn may get toggled, etc.
1659 // So we can't recover the original step direction from our other fields.
1660 // We need to know the original direction (as well as m_fp) so we know
1661 // if the frame we want to stop in is valid.
1662 //
1663 // Note that we can't really tell this by looking at our other state variables.
1664 // For example, a single-instruction step looks like a step-over.
1665 enum EStepMode
1666 {
1667 cStepOver, // Stop in level above or at m_fp.
1668 cStepIn, // Stop in level above, below, or at m_fp.
1669 cStepOut // Only stop in level above m_fp
1670 } m_eMode;
1671
1672 // The frame that the stepper was originally created in.
1673 // This is the only frame that the ranges are valid in.
1674 FramePointer m_fp;
1675
1676#if defined(WIN64EXCEPTIONS)
1677 // This frame pointer is used for funclet stepping.
1678 // See IsRangeAppropriate() for more information.
1679 FramePointer m_fpParentMethod;
1680#endif // WIN64EXCEPTIONS
1681
1682 //m_fpException is 0 if we haven't stepped into an exception,
1683 // and is ignored. If we get a TriggerUnwind while mid-step, we note
1684 // the value of frame here, and use that to figure out if we should stop.
1685 FramePointer m_fpException;
1686 MethodDesc * m_fdException;
1687
1688 // Counter of FuncEvalEnter/Exits - used to determine if we're entering / exiting
1689 // a func-eval.
1690 int m_cFuncEvalNesting;
1691
1692 // To freeze a stepper, we disable all triggers. We have to remember that so that
1693 // we can reenable them on Thaw.
1694 DWORD m_bvFrozenTriggers;
1695
1696 // Values to use in m_bvFrozenTriggers.
1697 enum ETriggers
1698 {
1699 kSingleStep = 0x1,
1700 kMethodEnter = 0x2,
1701 };
1702
1703
1704 void EnableJMCBackStop(MethodDesc * pStartMethod);
1705
1706#ifdef _DEBUG
1707 // MethodDesc that the Stepin started in.
1708 // This is used for the JMC-backstop.
1709 MethodDesc * m_StepInStartMethod;
1710
1711 // This flag is to ensure that PrepareForSendEvent is called before SendEvent.
1712 bool m_fReadyToSend;
1713#endif
1714};
1715
1716
1717
1718/* ------------------------------------------------------------------------- *
1719 * DebuggerJMCStepper routines
1720 * ------------------------------------------------------------------------- */
1721class DebuggerJMCStepper : public DebuggerStepper
1722{
1723public:
1724 DebuggerJMCStepper(Thread *thread,
1725 CorDebugUnmappedStop rgfMappingStop,
1726 CorDebugIntercept interceptStop,
1727 AppDomain *appDomain);
1728 ~DebuggerJMCStepper();
1729
1730 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1731 { return DEBUGGER_CONTROLLER_JMC_STEPPER; }
1732
1733 virtual void EnablePolyTraceCall();
1734protected:
1735 virtual void TrapStepNext(ControllerStackInfo *info);
1736 virtual bool TrapStepInHelper(ControllerStackInfo * pInfo,
1737 const BYTE * ipCallTarget,
1738 const BYTE * ipNext,
1739 bool fCallingIntoFunclet);
1740 virtual bool IsInterestingFrame(FrameInfo * pFrame);
1741 virtual void TriggerMethodEnter(Thread * thread, DebuggerJitInfo * dji, const BYTE * ip, FramePointer fp);
1742 virtual bool DetectHandleNonUserCode(ControllerStackInfo *info, DebuggerMethodInfo * pInfo);
1743 virtual bool DetectHandleInterceptors(ControllerStackInfo *info);
1744
1745
1746private:
1747
1748};
1749
1750
1751/* ------------------------------------------------------------------------- *
1752 * DebuggerThreadStarter routines
1753 * ------------------------------------------------------------------------- */
1754// DebuggerThreadStarter: Once triggered, it sends the thread attach
1755// message to the right side (where the CreateThread managed callback
1756// gets called). It then promptly disappears, as it's only purpose is to
1757// alert the right side that a new thread has begun execution.
1758class DebuggerThreadStarter : public DebuggerController
1759{
1760public:
1761 DebuggerThreadStarter(Thread *thread);
1762
1763 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1764 { return DEBUGGER_CONTROLLER_THREAD_STARTER; }
1765
1766private:
1767 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1768 Thread *thread,
1769 TRIGGER_WHY tyWhy);
1770 void TriggerTraceCall(Thread *thread, const BYTE *ip);
1771 bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1772};
1773
1774#ifdef FEATURE_DATABREAKPOINT
1775
1776class DebuggerDataBreakpoint : public DebuggerController
1777{
1778private:
1779 CONTEXT m_context;
1780public:
1781 DebuggerDataBreakpoint(Thread* pThread) : DebuggerController(pThread, NULL)
1782 {
1783 LOG((LF_CORDB, LL_INFO10000, "D:DDBP: Data Breakpoint event created\n"));
1784 memcpy(&m_context, g_pEEInterface->GetThreadFilterContext(pThread), sizeof(CONTEXT));
1785 }
1786
1787 virtual DEBUGGER_CONTROLLER_TYPE GetDCType(void)
1788 {
1789 return DEBUGGER_CONTROLLER_DATA_BREAKPOINT;
1790 }
1791
1792 virtual TP_RESULT TriggerPatch(DebuggerControllerPatch *patch, Thread *thread, TRIGGER_WHY tyWhy);
1793
1794 virtual bool TriggerSingleStep(Thread *thread, const BYTE *ip);
1795
1796 bool SendEvent(Thread *thread, bool fInteruptedBySetIp)
1797 {
1798 CONTRACTL
1799 {
1800 SO_NOT_MAINLINE;
1801 NOTHROW;
1802 SENDEVENT_CONTRACT_ITEMS;
1803 }
1804 CONTRACTL_END;
1805
1806 LOG((LF_CORDB, LL_INFO10000, "DDBP::SE: in DebuggerDataBreakpoint's SendEvent\n"));
1807
1808 g_pDebugger->SendDataBreakpoint(thread, &m_context, this);
1809
1810 Delete();
1811
1812 return true;
1813 }
1814
1815 static bool TriggerDataBreakpoint(Thread *thread, CONTEXT * pContext);
1816};
1817
1818#endif // FEATURE_DATABREAKPOINT
1819
1820
1821/* ------------------------------------------------------------------------- *
1822 * DebuggerUserBreakpoint routines. UserBreakpoints are used
1823 * by Runtime threads to send that they've hit a user breakpoint to the
1824 * Right Side.
1825 * ------------------------------------------------------------------------- */
1826class DebuggerUserBreakpoint : public DebuggerStepper
1827{
1828public:
1829 static void HandleDebugBreak(Thread * pThread);
1830
1831 static bool IsFrameInDebuggerNamespace(FrameInfo * pFrame);
1832
1833 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1834 { return DEBUGGER_CONTROLLER_USER_BREAKPOINT; }
1835private:
1836 // Don't construct these directly. Use HandleDebugBreak().
1837 DebuggerUserBreakpoint(Thread *thread);
1838
1839
1840 virtual bool IsInterestingFrame(FrameInfo * pFrame);
1841
1842 bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1843};
1844
1845/* ------------------------------------------------------------------------- *
1846 * DebuggerFuncEvalComplete routines
1847 * ------------------------------------------------------------------------- */
1848class DebuggerFuncEvalComplete : public DebuggerController
1849{
1850public:
1851 DebuggerFuncEvalComplete(Thread *thread,
1852 void *dest);
1853
1854 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1855 { return DEBUGGER_CONTROLLER_FUNC_EVAL_COMPLETE; }
1856
1857private:
1858 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1859 Thread *thread,
1860 TRIGGER_WHY tyWhy);
1861 bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1862 DebuggerEval* m_pDE;
1863};
1864
1865// continuable-exceptions
1866/* ------------------------------------------------------------------------- *
1867 * DebuggerContinuableExceptionBreakpoint routines
1868 * ------------------------------------------------------------------------- *
1869 *
1870 * DebuggerContinuableExceptionBreakpoint: Implementation of Continuable Exception support uses this.
1871 */
1872class DebuggerContinuableExceptionBreakpoint : public DebuggerController
1873{
1874public:
1875 DebuggerContinuableExceptionBreakpoint(Thread *pThread,
1876 SIZE_T m_offset,
1877 DebuggerJitInfo *jitInfo,
1878 AppDomain *pAppDomain);
1879
1880 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1881 { return DEBUGGER_CONTROLLER_CONTINUABLE_EXCEPTION; }
1882
1883private:
1884 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1885 Thread *thread,
1886 TRIGGER_WHY tyWhy);
1887
1888 bool SendEvent(Thread *thread, bool fInteruptedBySetIp);
1889};
1890
1891
1892#ifdef EnC_SUPPORTED
1893//---------------------------------------------------------------------------------------
1894//
1895// DebuggerEnCBreakpoint - used by edit and continue to support remapping
1896//
1897// When a method is updated, we make no immediate attempt to remap any existing execution
1898// of the old method. Instead we mine the old method with EnC breakpoints, and prompt the
1899// debugger whenever one is hit, giving it the opportunity to request a remap to the
1900// latest version of the method.
1901//
1902// Over long debugging sessions which make many edits to large methods, we can create
1903// a large number of these breakpoints. We currently make no attempt to reclaim the
1904// code or patch overhead for old methods. Ideally we'd be able to detect when there are
1905// no outstanding references to the old method version and clean up after it. At the
1906// very least, we could remove all but the first patch when there are no outstanding
1907// frames for a specific version of an edited method.
1908//
1909class DebuggerEnCBreakpoint : public DebuggerController
1910{
1911public:
1912 // We have two types of EnC breakpoints. The first is the one we
1913 // sprinkle through old code to let us know when execution is occuring
1914 // in a function that now has a new version. The second is when we've
1915 // actually resumed excecution into a remapped function and we need
1916 // to then notify the debugger.
1917 enum TriggerType {REMAP_PENDING, REMAP_COMPLETE};
1918
1919 // Create and activate an EnC breakpoint at the specified native offset
1920 DebuggerEnCBreakpoint(SIZE_T m_offset,
1921 DebuggerJitInfo *jitInfo,
1922 TriggerType fTriggerType,
1923 AppDomain *pAppDomain);
1924
1925 virtual DEBUGGER_CONTROLLER_TYPE GetDCType( void )
1926 { return DEBUGGER_CONTROLLER_ENC; }
1927
1928private:
1929 TP_RESULT TriggerPatch(DebuggerControllerPatch *patch,
1930 Thread *thread,
1931 TRIGGER_WHY tyWhy);
1932
1933 TP_RESULT HandleRemapComplete(DebuggerControllerPatch *patch,
1934 Thread *thread,
1935 TRIGGER_WHY tyWhy);
1936
1937 DebuggerJitInfo *m_jitInfo;
1938 TriggerType m_fTriggerType;
1939};
1940#endif //EnC_SUPPORTED
1941
1942/* ========================================================================= */
1943
1944enum
1945{
1946 EVENTS_INIT_ALLOC = 5
1947};
1948
1949class DebuggerControllerQueue
1950{
1951 DebuggerController **m_events;
1952 DWORD m_dwEventsCount;
1953 DWORD m_dwEventsAlloc;
1954 DWORD m_dwNewEventsAlloc;
1955
1956public:
1957 DebuggerControllerQueue()
1958 : m_events(NULL),
1959 m_dwEventsCount(0),
1960 m_dwEventsAlloc(0),
1961 m_dwNewEventsAlloc(0)
1962 {
1963 }
1964
1965
1966 ~DebuggerControllerQueue()
1967 {
1968 if (m_events != NULL)
1969 delete [] m_events;
1970 }
1971
1972 BOOL dcqEnqueue(DebuggerController *dc, BOOL fSort)
1973 {
1974 LOG((LF_CORDB, LL_INFO100000,"DCQ::dcqE\n"));
1975
1976 _ASSERTE( dc != NULL );
1977
1978 if (m_dwEventsCount == m_dwEventsAlloc)
1979 {
1980 if (m_events == NULL)
1981 m_dwNewEventsAlloc = EVENTS_INIT_ALLOC;
1982 else
1983 m_dwNewEventsAlloc = m_dwEventsAlloc<<1;
1984
1985 DebuggerController **newEvents = new (nothrow) DebuggerController * [m_dwNewEventsAlloc];
1986
1987 if (newEvents == NULL)
1988 return FALSE;
1989
1990 if (m_events != NULL)
1991 // The final argument to CopyMemory cannot over/underflow.
1992 // The amount of memory copied has a strict upper bound of the size of the array,
1993 // which cannot exceed the pointer size for the platform.
1994 CopyMemory(newEvents, m_events, (SIZE_T)sizeof(*m_events) * (SIZE_T)m_dwEventsAlloc);
1995
1996 m_events = newEvents;
1997 m_dwEventsAlloc = m_dwNewEventsAlloc;
1998 }
1999
2000 dc->Enqueue();
2001
2002 // Make sure to place high priority patches into
2003 // the event list first. This ensures, for
2004 // example, that thread starts fire before
2005 // breakpoints.
2006 if (fSort && (m_dwEventsCount > 0))
2007 {
2008 DWORD i;
2009 for (i = 0; i < m_dwEventsCount; i++)
2010 {
2011 _ASSERTE(m_events[i] != NULL);
2012
2013 if (m_events[i]->GetDCType() > dc->GetDCType())
2014 {
2015 // The final argument to CopyMemory cannot over/underflow.
2016 // The amount of memory copied has a strict upper bound of the size of the array,
2017 // which cannot exceed the pointer size for the platform.
2018 MoveMemory(&m_events[i+1], &m_events[i], (SIZE_T)sizeof(DebuggerController*) * (SIZE_T)(m_dwEventsCount - i));
2019 m_events[i] = dc;
2020 break;
2021 }
2022 }
2023
2024 if (i == m_dwEventsCount)
2025 m_events[m_dwEventsCount] = dc;
2026
2027 m_dwEventsCount++;
2028 }
2029 else
2030 m_events[m_dwEventsCount++] = dc;
2031
2032 return TRUE;
2033 }
2034
2035 DWORD dcqGetCount(void)
2036 {
2037 return m_dwEventsCount;
2038 }
2039
2040 DebuggerController *dcqGetElement(DWORD dwElement)
2041 {
2042 LOG((LF_CORDB, LL_INFO100000,"DCQ::dcqGE\n"));
2043
2044 DebuggerController *dcp = NULL;
2045
2046 _ASSERTE(dwElement < m_dwEventsCount);
2047 if (dwElement < m_dwEventsCount)
2048 {
2049 dcp = m_events[dwElement];
2050 }
2051
2052 _ASSERTE(dcp != NULL);
2053 return dcp;
2054 }
2055
2056 // Kinda wacked, but this actually releases stuff in FILO order, not
2057 // FIFO order. If we do this in an extra loop, then the perf
2058 // is better than sliding everything down one each time.
2059 void dcqDequeue(DWORD dw = 0xFFffFFff)
2060 {
2061 if (dw == 0xFFffFFff)
2062 {
2063 dw = (m_dwEventsCount - 1);
2064 }
2065
2066 LOG((LF_CORDB, LL_INFO100000,"DCQ::dcqD element index "
2067 "0x%x of 0x%x\n", dw, m_dwEventsCount));
2068
2069 _ASSERTE(dw < m_dwEventsCount);
2070
2071 m_events[dw]->Dequeue();
2072
2073 // Note that if we're taking the element off the end (m_dwEventsCount-1),
2074 // the following will no-op.
2075 // The final argument to MoveMemory cannot over/underflow.
2076 // The amount of memory copied has a strict upper bound of the size of the array,
2077 // which cannot exceed the pointer size for the platform.
2078 MoveMemory(&(m_events[dw]),
2079 &(m_events[dw + 1]),
2080 (SIZE_T)sizeof(DebuggerController *) * (SIZE_T)(m_dwEventsCount - dw - 1));
2081 m_dwEventsCount--;
2082 }
2083};
2084
2085// Include all of the inline stuff now.
2086#include "controller.inl"
2087
2088#endif // !DACCESS_COMPILE
2089
2090#endif /* CONTROLLER_H_ */
2091