1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | //***************************************************************************** |
5 | // File: debugger.h |
6 | // |
7 | |
8 | // |
9 | // Header file for Runtime Controller classes of the COM+ Debugging Services. |
10 | // |
11 | //***************************************************************************** |
12 | |
13 | #ifndef DEBUGGER_H_ |
14 | #define DEBUGGER_H_ |
15 | |
16 | #include <windows.h> |
17 | |
18 | #include <utilcode.h> |
19 | |
20 | #include <metahost.h> |
21 | |
22 | #if defined(_DEBUG) && !defined(DACCESS_COMPILE) |
23 | #define LOGGING |
24 | #endif |
25 | |
26 | #include <log.h> |
27 | |
28 | #include "cor.h" |
29 | #include "corpriv.h" |
30 | #include "daccess.h" |
31 | |
32 | #include "common.h" |
33 | #include "winwrap.h" |
34 | #include "threads.h" |
35 | #include "threadsuspend.h" |
36 | #include "frames.h" |
37 | |
38 | #include "appdomain.hpp" |
39 | #include "eedbginterface.h" |
40 | #include "dbginterface.h" |
41 | #include "corhost.h" |
42 | |
43 | |
44 | #include "corjit.h" |
45 | #include <dbgmeta.h> // <TODO>need to rip this out of here...</TODO> |
46 | |
47 | #include "frameinfo.h" |
48 | |
49 | #include "dllimportcallback.h" |
50 | |
51 | #include "canary.h" |
52 | |
53 | #undef ASSERT |
54 | #define CRASH(x) _ASSERTE(!x) |
55 | #define ASSERT(x) _ASSERTE(x) |
56 | |
57 | |
58 | #ifndef TRACE_MEMORY |
59 | #define TRACE_MEMORY 0 |
60 | #endif |
61 | |
62 | #if TRACE_MEMORY |
63 | #define TRACE_ALLOC(p) LOG((LF_CORDB, LL_INFO10000, \ |
64 | "--- Allocated %x at %s:%d\n", p, __FILE__, __LINE__)); |
65 | #define TRACE_FREE(p) LOG((LF_CORDB, LL_INFO10000, \ |
66 | "--- Freed %x at %s:%d\n", p, __FILE__, __LINE__)); |
67 | #else |
68 | #define TRACE_ALLOC(p) |
69 | #define TRACE_FREE(p) |
70 | #endif |
71 | |
72 | typedef CUnorderedArray<void*,11> UnorderedPtrArray; |
73 | |
74 | /* ------------------------------------------------------------------------ * |
75 | * Forward class declarations |
76 | * ------------------------------------------------------------------------ */ |
77 | |
78 | class DebuggerFrame; |
79 | class DebuggerModule; |
80 | class DebuggerModuleTable; |
81 | class Debugger; |
82 | class DebuggerBreakpoint; |
83 | class DebuggerPendingFuncEvalTable; |
84 | class DebuggerRCThread; |
85 | class DebuggerStepper; |
86 | class DebuggerMethodInfo; |
87 | class DebuggerJitInfo; |
88 | class DebuggerMethodInfoTable; |
89 | struct DebuggerControllerPatch; |
90 | class DebuggerEval; |
91 | class DebuggerControllerQueue; |
92 | class DebuggerController; |
93 | class Crst; |
94 | |
95 | typedef CUnorderedArray<DebuggerControllerPatch *, 17> PATCH_UNORDERED_ARRAY; |
96 | template<class T> void DeleteInteropSafe(T *p); |
97 | template<class T> void DeleteInteropSafeExecutable(T *p); |
98 | |
99 | typedef VPTR(class Debugger) PTR_Debugger; |
100 | typedef DPTR(struct DebuggerILToNativeMap) PTR_DebuggerILToNativeMap; |
101 | typedef DPTR(class DebuggerMethodInfo) PTR_DebuggerMethodInfo; |
102 | typedef VPTR(class DebuggerMethodInfoTable) PTR_DebuggerMethodInfoTable; |
103 | typedef DPTR(class DebuggerJitInfo) PTR_DebuggerJitInfo; |
104 | typedef DPTR(class DebuggerEval) PTR_DebuggerEval; |
105 | typedef DPTR(struct DebuggerIPCControlBlock) PTR_DebuggerIPCControlBlock; |
106 | |
107 | |
108 | /* ------------------------------------------------------------------------ * |
109 | * Global variables |
110 | * ------------------------------------------------------------------------ */ |
111 | |
112 | GPTR_DECL(Debugger, g_pDebugger); |
113 | GPTR_DECL(EEDebugInterface, g_pEEInterface); |
114 | GVAL_DECL(ULONG, CLRJitAttachState); |
115 | #ifndef FEATURE_PAL |
116 | GVAL_DECL(HANDLE, g_hContinueStartupEvent); |
117 | #endif |
118 | extern DebuggerRCThread *g_pRCThread; |
119 | |
120 | //--------------------------------------------------------------------------------------- |
121 | // Holder to ensure our calls to IncThreadsAtUnsafePlaces and DecThreadsAtUnsafePlaces |
122 | class AtSafePlaceHolder |
123 | { |
124 | public: |
125 | AtSafePlaceHolder(Thread * pThread); |
126 | |
127 | // Clear the holder. |
128 | ~AtSafePlaceHolder(); |
129 | |
130 | // True if the holder is acquired. |
131 | bool IsAtUnsafePlace(); |
132 | |
133 | // Clear the holder (call DecThreadsAtUnsafePlaces if needed) |
134 | void Clear(); |
135 | |
136 | private: |
137 | // If this is non-null, then the holder incremented the unsafe counter and it needs |
138 | // to decrement it. |
139 | Thread * m_pThreadAtUnsafePlace; |
140 | }; |
141 | |
142 | |
143 | template<BOOL COOPERATIVE, BOOL TOGGLE, BOOL IFTHREAD> |
144 | class GCHolderEEInterface |
145 | { |
146 | public: |
147 | DEBUG_NOINLINE GCHolderEEInterface(); |
148 | DEBUG_NOINLINE ~GCHolderEEInterface(); |
149 | }; |
150 | |
151 | #ifndef DACCESS_COMPILE |
152 | template<BOOL TOGGLE, BOOL IFTHREAD> |
153 | class GCHolderEEInterface<TRUE, TOGGLE, IFTHREAD> |
154 | { |
155 | private: |
156 | bool startInCoop; |
157 | |
158 | public: |
159 | DEBUG_NOINLINE GCHolderEEInterface() |
160 | { |
161 | SCAN_SCOPE_BEGIN; |
162 | STATIC_CONTRACT_MODE_COOPERATIVE; |
163 | |
164 | if (IFTHREAD && g_pEEInterface->GetThread() == NULL) |
165 | { |
166 | return; |
167 | } |
168 | |
169 | startInCoop = false; |
170 | |
171 | if (g_pEEInterface->IsPreemptiveGCDisabled()) |
172 | { |
173 | // we're starting in COOP, no need to switch |
174 | startInCoop = true; |
175 | } |
176 | else |
177 | { |
178 | // we're starting in PREEMP, need to switch to COOP |
179 | startInCoop = false; |
180 | g_pEEInterface->DisablePreemptiveGC(); |
181 | } |
182 | }; |
183 | |
184 | DEBUG_NOINLINE ~GCHolderEEInterface() |
185 | { |
186 | SCAN_SCOPE_END; |
187 | |
188 | if (IFTHREAD && g_pEEInterface->GetThread() == NULL) |
189 | { |
190 | return; |
191 | } |
192 | |
193 | _ASSERT(g_pEEInterface->IsPreemptiveGCDisabled()); |
194 | |
195 | if (TOGGLE) |
196 | { |
197 | // We're in COOP, toggle to PREEMPTIVE and back to COOP |
198 | // for synch purposes. |
199 | g_pEEInterface->EnablePreemptiveGC(); |
200 | g_pEEInterface->DisablePreemptiveGC(); |
201 | |
202 | // If we started in PREEMPTIVE switch back |
203 | if (!startInCoop) |
204 | { |
205 | g_pEEInterface->EnablePreemptiveGC(); |
206 | } |
207 | } |
208 | else |
209 | { |
210 | // If we started in PREEMPTIVE switch back |
211 | if (!startInCoop) |
212 | { |
213 | g_pEEInterface->EnablePreemptiveGC(); |
214 | } |
215 | } |
216 | }; |
217 | }; |
218 | |
219 | template<BOOL TOGGLE, BOOL IFTHREAD> |
220 | class GCHolderEEInterface<FALSE, TOGGLE, IFTHREAD> |
221 | { |
222 | private: |
223 | bool startInCoop; |
224 | bool conditional; |
225 | |
226 | void EnterInternal(bool bStartInCoop, bool bConditional) |
227 | { |
228 | startInCoop = bStartInCoop; |
229 | conditional = bConditional; |
230 | |
231 | if (!conditional || (IFTHREAD && g_pEEInterface->GetThread() == NULL)) |
232 | { |
233 | return; |
234 | } |
235 | |
236 | if (g_pEEInterface->IsPreemptiveGCDisabled()) |
237 | { |
238 | // we're starting in COOP, we need to switch to PREEMP |
239 | startInCoop = true; |
240 | g_pEEInterface->EnablePreemptiveGC(); |
241 | } |
242 | else |
243 | { |
244 | // We're starting in PREEMP, no need to switch |
245 | startInCoop = false; |
246 | } |
247 | } |
248 | |
249 | void LeaveInternal() |
250 | { |
251 | if (!conditional || (IFTHREAD && g_pEEInterface->GetThread() == NULL)) |
252 | { |
253 | return; |
254 | } |
255 | |
256 | _ASSERTE(!g_pEEInterface->IsPreemptiveGCDisabled()); |
257 | |
258 | if (TOGGLE) |
259 | { |
260 | // Explicitly toggle to COOP for eventin |
261 | g_pEEInterface->DisablePreemptiveGC(); |
262 | |
263 | // If we started in PREEMPTIVE switch back to PREEMPTIVE |
264 | if (!startInCoop) |
265 | { |
266 | g_pEEInterface->EnablePreemptiveGC(); |
267 | } |
268 | } |
269 | else |
270 | { |
271 | // If we started in COOP, flip back to COOP at the end of the |
272 | // scope, if we started in preemptive we should be fine. |
273 | if (startInCoop) |
274 | { |
275 | g_pEEInterface->DisablePreemptiveGC(); |
276 | } |
277 | } |
278 | } |
279 | |
280 | public: |
281 | DEBUG_NOINLINE GCHolderEEInterface() |
282 | { |
283 | SCAN_SCOPE_BEGIN; |
284 | STATIC_CONTRACT_MODE_PREEMPTIVE; |
285 | |
286 | this->EnterInternal(false, true); |
287 | } |
288 | |
289 | DEBUG_NOINLINE GCHolderEEInterface(bool bConditional) |
290 | { |
291 | SCAN_SCOPE_BEGIN; |
292 | if (bConditional) |
293 | { |
294 | STATIC_CONTRACT_MODE_PREEMPTIVE; |
295 | } |
296 | |
297 | this->EnterInternal(false, bConditional); |
298 | } |
299 | |
300 | DEBUG_NOINLINE ~GCHolderEEInterface() |
301 | { |
302 | SCAN_SCOPE_END; |
303 | |
304 | this->LeaveInternal(); |
305 | }; |
306 | }; |
307 | #endif //DACCESS_COMPILE |
308 | |
309 | #define GCX_COOP_EEINTERFACE() \ |
310 | GCHolderEEInterface<TRUE, FALSE, FALSE> __gcCoop_onlyOneAllowedPerScope |
311 | |
312 | #define GCX_PREEMP_EEINTERFACE() \ |
313 | GCHolderEEInterface<FALSE, FALSE, FALSE> __gcCoop_onlyOneAllowedPerScope |
314 | |
315 | #define GCX_COOP_EEINTERFACE_TOGGLE() \ |
316 | GCHolderEEInterface<TRUE, TRUE, FALSE> __gcCoop_onlyOneAllowedPerScope |
317 | |
318 | #define GCX_PREEMP_EEINTERFACE_TOGGLE() \ |
319 | GCHolderEEInterface<FALSE, TRUE, FALSE> __gcCoop_onlyOneAllowedPerScope |
320 | |
321 | #define GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD() \ |
322 | GCHolderEEInterface<FALSE, TRUE, TRUE> __gcCoop_onlyOneAllowedPerScope |
323 | |
324 | #define GCX_PREEMP_EEINTERFACE_TOGGLE_COND(cond) \ |
325 | GCHolderEEInterface<FALSE, TRUE, FALSE> __gcCoop_onlyOneAllowedPerScope((cond)) |
326 | |
327 | #define GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD_COND(cond) \ |
328 | GCHolderEEInterface<FALSE, TRUE, TRUE> __gcCoop_onlyOneAllowedPerScope((cond)) |
329 | |
330 | |
331 | |
332 | // There are still some APIs that call new that we call from the helper thread. |
333 | // These are unsafe operations, so we wrap them here. Each of these is a potential hang. |
334 | inline DWORD UnsafeGetConfigDWORD_DontUse_(LPCWSTR name, DWORD defValue) |
335 | { |
336 | SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; |
337 | return REGUTIL::GetConfigDWORD_DontUse_(name, defValue); |
338 | } |
339 | |
340 | inline DWORD UnsafeGetConfigDWORD(const CLRConfig::ConfigDWORDInfo & info) |
341 | { |
342 | SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; |
343 | return CLRConfig::GetConfigValue(info); |
344 | } |
345 | |
346 | #define FILE_DEBUG INDEBUG(__FILE__) NOT_DEBUG(NULL) |
347 | #define LINE_DEBUG INDEBUG(__LINE__) NOT_DEBUG(0) |
348 | |
349 | #define CORDBDebuggerSetUnrecoverableWin32Error(__d, __code, __w) \ |
350 | ((__d)->UnrecoverableError(HRESULT_FROM_WIN32(GetLastError()), \ |
351 | (__code), FILE_DEBUG, LINE_DEBUG, (__w)), \ |
352 | HRESULT_FROM_GetLastError()) |
353 | |
354 | #define CORDBDebuggerSetUnrecoverableError(__d, __hr, __w) \ |
355 | (__d)->UnrecoverableError((__hr), \ |
356 | (__hr), FILE_DEBUG, LINE_DEBUG, (__w)) |
357 | |
358 | #define CORDBUnrecoverableError(__d) ((__d)->m_unrecoverableError == TRUE) |
359 | |
360 | /* ------------------------------------------------------------------------ * |
361 | * Helpers used for contract preconditions. |
362 | * ------------------------------------------------------------------------ */ |
363 | |
364 | |
365 | bool ThisIsHelperThreadWorker(void); |
366 | bool ThisIsTempHelperThread(); |
367 | bool ThisIsTempHelperThread(DWORD tid); |
368 | |
369 | #ifdef _DEBUG |
370 | |
371 | // Functions can be split up into 3 categories: |
372 | // 1.) Functions that must run on the helper thread. |
373 | // Returns true if this is the helper thread (or the thread |
374 | // doing helper-threadduty). |
375 | |
376 | // 2.) Functions that can't run on the helper thread. |
377 | // This is just !ThisIsHelperThread(); |
378 | |
379 | // 3.) Functions that may or may not run on the helper thread. |
380 | // Note this is trivially true, but it's presences means that |
381 | // we're not case #1 or #2, so it's still valuable. |
382 | inline bool ThisMaybeHelperThread() { return true; } |
383 | |
384 | #endif |
385 | |
386 | |
387 | // These are methods for transferring information between a REGDISPLAY and |
388 | // a DebuggerREGDISPLAY. |
389 | extern void CopyREGDISPLAY(REGDISPLAY* pDst, REGDISPLAY* pSrc); |
390 | extern void SetDebuggerREGDISPLAYFromREGDISPLAY(DebuggerREGDISPLAY* pDRD, REGDISPLAY* pRD); |
391 | |
392 | // |
393 | // PUSHED_REG_ADDR gives us NULL if the register still lives in the thread's context, or it gives us the address |
394 | // of where the register was pushed for this frame. |
395 | // |
396 | // This macro is used in CopyREGDISPLAY() and SetDebuggerREGDISPLAYFromREGDISPLAY(). We really should make |
397 | // DebuggerREGDISPLAY to be a class with these two methods, but unfortunately, the RS has no notion of REGDISPLAY. |
398 | inline LPVOID PushedRegAddr(REGDISPLAY* pRD, LPVOID pAddr) |
399 | { |
400 | LIMITED_METHOD_CONTRACT; |
401 | |
402 | #ifdef WIN64EXCEPTIONS |
403 | if ( ((UINT_PTR)(pAddr) >= (UINT_PTR)pRD->pCurrentContextPointers) && |
404 | ((UINT_PTR)(pAddr) <= ((UINT_PTR)pRD->pCurrentContextPointers + sizeof(T_KNONVOLATILE_CONTEXT_POINTERS))) ) |
405 | #else |
406 | if ( ((UINT_PTR)(pAddr) >= (UINT_PTR)pRD->pContext) && |
407 | ((UINT_PTR)(pAddr) <= ((UINT_PTR)pRD->pContext + sizeof(T_CONTEXT))) ) |
408 | #endif |
409 | return NULL; |
410 | |
411 | // (Microsoft 2/9/07 - putting this in an else clause confuses gcc for some reason, so I've moved |
412 | // it to here) |
413 | return pAddr; |
414 | } |
415 | |
416 | bool HandleIPCEventWrapper(Debugger* pDebugger, DebuggerIPCEvent *e); |
417 | |
418 | HRESULT ValidateObject(Object *objPtr); |
419 | |
420 | //----------------------------------------------------------------------------- |
421 | // Execution control needs several ways to get at the context of a thread |
422 | // stopped in mangaged code (stepping, setip, func-eval). |
423 | // We want to abstract away a few things: |
424 | // - active: this thread is stopped at a patch |
425 | // - inactive: this threads was managed suspended somewhere in jitted code |
426 | // because of some other active thread. |
427 | // |
428 | // In general, execution control operations administered from the helper thread |
429 | // can occur on any managed thread (active or inactive). |
430 | // Intermediate triggers (eg, TriggerPatch) only occur on an active thread. |
431 | // |
432 | // Viewing the context in terms of Active vs. Inactive lets us abstract away |
433 | // filter context, redirected context, and interop hijacks. |
434 | //----------------------------------------------------------------------------- |
435 | |
436 | // Get the context for a thread stopped (perhaps temporarily) in managed code. |
437 | // The process may be live or stopped. |
438 | // This thread could be 'active' (stopped at patch) or inactive. |
439 | // This context should always be in managed code and this context can be manipulated |
440 | // for execution control (setip, single-step, func-eval, etc) |
441 | // Returns NULL if not available. |
442 | CONTEXT * GetManagedStoppedCtx(Thread * pThread); |
443 | |
444 | // Get the context for a thread live in or around managed code. |
445 | // Caller guarantees this is active. |
446 | // This ctx is just for a 'live' thread. This means that the ctx may include |
447 | // from a M2U hijack or from a Native patch (like . |
448 | // Never NULL. |
449 | CONTEXT * GetManagedLiveCtx(Thread * pThread); |
450 | |
451 | |
452 | #undef UtilMessageBoxCatastrophic |
453 | #undef UtilMessageBoxCatastrophicNonLocalized |
454 | #undef UtilMessageBoxCatastrophicVA |
455 | #undef UtilMessageBoxCatastrophicNonLocalizedVA |
456 | #undef UtilMessageBox |
457 | #undef UtilMessageBoxNonLocalized |
458 | #undef UtilMessageBoxVA |
459 | #undef UtilMessageBoxNonLocalizedVA |
460 | #undef WszMessageBox |
461 | #define UtilMessageBoxCatastrophic __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
462 | #define UtilMessageBoxCatastrophicNonLocalized __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
463 | #define UtilMessageBoxCatastrophicVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
464 | #define UtilMessageBoxCatastrophicNonLocalizedVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
465 | #define UtilMessageBox __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
466 | #define UtilMessageBoxNonLocalized __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
467 | #define UtilMessageBoxVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
468 | #define UtilMessageBoxNonLocalizedVA __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
469 | #define WszMessageBox __error("Use g_pDebugger->MessageBox from inside the left side of the debugger") |
470 | |
471 | |
472 | /* ------------------------------------------------------------------------ * |
473 | * Module classes |
474 | * ------------------------------------------------------------------------ */ |
475 | |
476 | // Once a module / appdomain is unloaded, all Right-side objects (such as breakpoints) |
477 | // in that appdomain will get neutered and will thus be prevented from accessing |
478 | // the unloaded appdomain. |
479 | // |
480 | // @dbgtodo jmc - This is now purely relegated to the LS. Eventually completely get rid of this |
481 | // by moving fields off to Module or getting rid of the fields completely. |
482 | typedef DPTR(class DebuggerModule) PTR_DebuggerModule; |
483 | class DebuggerModule |
484 | { |
485 | public: |
486 | DebuggerModule(Module * pRuntimeModule, DomainFile * pDomainFile, AppDomain * pAppDomain); |
487 | |
488 | // Do we have any optimized code in the module? |
489 | // JMC-probes aren't emitted in optimized code, |
490 | bool HasAnyOptimizedCode(); |
491 | |
492 | // If the debugger updates things to allow/disallow optimized code, then we have to track that. |
493 | void MarkAllowedOptimizedCode(); |
494 | void UnmarkAllowedOptimizedCode(); |
495 | |
496 | |
497 | BOOL ClassLoadCallbacksEnabled(void); |
498 | void EnableClassLoadCallbacks(BOOL f); |
499 | |
500 | AppDomain* GetAppDomain(); |
501 | |
502 | Module * GetRuntimeModule(); |
503 | |
504 | |
505 | // <TODO> (8/12/2002) |
506 | // Currently we create a new DebuggerModules for each appdomain a shared |
507 | // module lives in. We then pretend there aren't any shared modules. |
508 | // This is bad. We need to move away from this. |
509 | // Once we stop lying, then every module will be it's own PrimaryModule. :) |
510 | // |
511 | // Currently, Module* is 1:n w/ DebuggerModule. |
512 | // We add a notion of PrimaryModule so that: |
513 | // Module* is 1:1 w/ DebuggerModule::GetPrimaryModule(); |
514 | // This should help transition towards exposing shared modules. |
515 | // If the Runtime module is shared, then this gives a common DM. |
516 | // If the runtime module is not shared, then this is an identity function. |
517 | // |
518 | // The runtime has the notion of "DomainFile", which is 1:1 with DebuggerModule |
519 | // and thus 1:1 with CordbModule. The CordbModule hash table on the RS now uses |
520 | // the DomainFile as the key instead of DebuggerModule. This is a temporary |
521 | // workaround to facilitate the removal of DebuggerModule. |
522 | // </TODO> |
523 | DebuggerModule * GetPrimaryModule(); |
524 | DomainFile * GetDomainFile() |
525 | { |
526 | LIMITED_METHOD_DAC_CONTRACT; |
527 | return m_pRuntimeDomainFile; |
528 | } |
529 | |
530 | // Called by DebuggerModuleTable to set our primary module |
531 | void SetPrimaryModule(DebuggerModule * pPrimary); |
532 | |
533 | void SetCanChangeJitFlags(bool fCanChangeJitFlags); |
534 | |
535 | private: |
536 | BOOL m_enableClassLoadCallbacks; |
537 | |
538 | // First step in moving away from hiding shared modules. |
539 | DebuggerModule* m_pPrimaryModule; |
540 | |
541 | PTR_Module m_pRuntimeModule; |
542 | PTR_DomainFile m_pRuntimeDomainFile; |
543 | |
544 | AppDomain* m_pAppDomain; |
545 | |
546 | bool m_fHasOptimizedCode; |
547 | |
548 | void PickPrimaryModule(); |
549 | |
550 | // Can we change jit flags on the module? |
551 | // This is true during the Module creation |
552 | bool m_fCanChangeJitFlags; |
553 | |
554 | |
555 | }; |
556 | |
557 | /* ------------------------------------------------------------------------ * |
558 | * Hash to hold pending func evals by thread id |
559 | * ------------------------------------------------------------------------ */ |
560 | |
561 | struct DebuggerPendingFuncEval |
562 | { |
563 | FREEHASHENTRY entry; |
564 | PTR_Thread pThread; |
565 | PTR_DebuggerEval pDE; |
566 | }; |
567 | |
568 | typedef DPTR(struct DebuggerPendingFuncEval) PTR_DebuggerPendingFuncEval; |
569 | |
570 | /* ------------------------------------------------------------------------ * |
571 | * DebuggerRCThread class -- the Runtime Controller thread. |
572 | * ------------------------------------------------------------------------ */ |
573 | |
574 | #define DRCT_CONTROL_EVENT 0 |
575 | #define DRCT_RSEA 1 |
576 | #define DRCT_FAVORAVAIL 2 |
577 | #define DRCT_COUNT_INITIAL 3 |
578 | |
579 | #define DRCT_DEBUGGER_EVENT 3 |
580 | #define DRCT_COUNT_FINAL 4 |
581 | |
582 | |
583 | |
584 | |
585 | |
586 | |
587 | // Canary is used as way to have a runtime failure for the SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE |
588 | // contract violation. |
589 | // Have a macro which checks the canary and then uses the Suppress macro. |
590 | // We need this check to be a macro in order to chain to the Suppress_allocation macro. |
591 | #define CHECK_IF_CAN_TAKE_HELPER_LOCKS_IN_THIS_SCOPE(pHR, pCanary) \ |
592 | { \ |
593 | HelperCanary * __pCanary = (pCanary); \ |
594 | if (!__pCanary->AreLocksAvailable()) { \ |
595 | (*pHR) = CORDBG_E_HELPER_MAY_DEADLOCK; \ |
596 | } else { \ |
597 | (*pHR) = S_OK; \ |
598 | } \ |
599 | } \ |
600 | SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE \ |
601 | ; \ |
602 | |
603 | |
604 | // Mechanics for cross-thread call to helper thread (called "Favor"). |
605 | class HelperThreadFavor |
606 | { |
607 | // Only let RCThread access these fields. |
608 | friend class DebuggerRCThread; |
609 | |
610 | HelperThreadFavor(); |
611 | // No dtor because we intentionally leak all shutdown. |
612 | void Init(); |
613 | |
614 | protected: |
615 | // Stuff for having the helper thread do function calls for a thread |
616 | // that blew its stack |
617 | FAVORCALLBACK m_fpFavor; |
618 | void *m_pFavorData; |
619 | HANDLE m_FavorReadEvent; |
620 | Crst m_FavorLock; |
621 | |
622 | HANDLE m_FavorAvailableEvent; |
623 | }; |
624 | |
625 | |
626 | // The *LazyInit classes represents storage that the debugger doesn't need until after it has started up. |
627 | // This is effectively an extension to the debugger class; but for perf reasons, we only |
628 | // want to instantiate it if we're actually debugging. |
629 | |
630 | // Fields that are a logical extension of RCThread |
631 | class RCThreadLazyInit |
632 | { |
633 | // Only let RCThread access these fields. |
634 | friend class DebuggerRCThread; |
635 | |
636 | public: |
637 | RCThreadLazyInit() { } |
638 | ~RCThreadLazyInit() { } |
639 | |
640 | void Init() { } |
641 | protected: |
642 | |
643 | |
644 | |
645 | HelperCanary m_Canary; |
646 | }; |
647 | |
648 | // Fields that are a logical extension of Debugger |
649 | class DebuggerLazyInit |
650 | { |
651 | friend class Debugger; |
652 | public: |
653 | DebuggerLazyInit(); |
654 | ~DebuggerLazyInit(); |
655 | |
656 | protected: |
657 | void Init(); |
658 | |
659 | DebuggerPendingFuncEvalTable *m_pPendingEvals; |
660 | |
661 | // The "debugger data lock" is a very small leaf lock used to protect debugger internal data structures (such |
662 | // as DJIs, DMIs, module table). It is a GC-unsafe-anymode lock and so it can't trigger a GC while being held. |
663 | // It also can't issue any callbacks into the EE or anycode that it does not directly control. |
664 | // This is a separate lock from the the larger Debugger-lock / Controller lock, which allows regions under those |
665 | // locks to access debugger datastructures w/o blocking each other. |
666 | Crst m_DebuggerDataLock; |
667 | HANDLE m_CtrlCMutex; |
668 | HANDLE m_exAttachEvent; |
669 | HANDLE m_exUnmanagedAttachEvent; |
670 | HANDLE m_garbageCollectionBlockerEvent; |
671 | |
672 | BOOL m_DebuggerHandlingCtrlC; |
673 | |
674 | // Used by MapAndBindFunctionBreakpoints. Note that this is thread-safe |
675 | // only b/c we access it from within the DebuggerController::Lock |
676 | SIZE_T_UNORDERED_ARRAY m_BPMappingDuplicates; |
677 | |
678 | UnorderedPtrArray m_pMemBlobs; |
679 | |
680 | // Hang RCThread fields off DebuggerLazyInit to avoid an extra pointer. |
681 | RCThreadLazyInit m_RCThread; |
682 | }; |
683 | typedef DPTR(DebuggerLazyInit) PTR_DebuggerLazyInit; |
684 | |
685 | class DebuggerRCThread |
686 | { |
687 | public: |
688 | DebuggerRCThread(Debugger * pDebugger); |
689 | virtual ~DebuggerRCThread(); |
690 | void CloseIPCHandles(); |
691 | |
692 | // |
693 | // You create a new instance of this class, call Init() to set it up, |
694 | // then call Start() start processing events. Stop() terminates the |
695 | // thread and deleting the instance cleans all the handles and such |
696 | // up. |
697 | // |
698 | HRESULT Init(void); |
699 | HRESULT Start(void); |
700 | HRESULT AsyncStop(void); |
701 | |
702 | // |
703 | // These are used by this thread to send IPC events to the Debugger |
704 | // Interface side. |
705 | // |
706 | DebuggerIPCEvent* GetIPCEventSendBuffer() |
707 | { |
708 | CONTRACTL |
709 | { |
710 | NOTHROW; |
711 | GC_NOTRIGGER; |
712 | } |
713 | CONTRACTL_END; |
714 | |
715 | #ifdef LOGGING |
716 | if(IsRCThreadReady()) { |
717 | LOG((LF_CORDB, LL_EVERYTHING, "RCThread is ready\n" )); |
718 | } |
719 | #endif |
720 | |
721 | _ASSERTE(m_pDCB != NULL); |
722 | // In case this turns into a continuation event |
723 | GetRCThreadSendBuffer()->next = NULL; |
724 | LOG((LF_CORDB,LL_EVERYTHING, "GIPCESBuffer: got event 0x%x\n" , GetRCThreadSendBuffer())); |
725 | |
726 | return GetRCThreadSendBuffer(); |
727 | } |
728 | |
729 | DebuggerIPCEvent *GetIPCEventSendBufferContinuation( |
730 | DebuggerIPCEvent *eventCur) |
731 | { |
732 | CONTRACTL |
733 | { |
734 | NOTHROW; |
735 | GC_NOTRIGGER; |
736 | PRECONDITION(eventCur != NULL); |
737 | PRECONDITION(eventCur->next == NULL); |
738 | } |
739 | CONTRACTL_END; |
740 | |
741 | DebuggerIPCEvent *dipce = (DebuggerIPCEvent *) new (nothrow) BYTE [CorDBIPC_BUFFER_SIZE]; |
742 | dipce->next = NULL; |
743 | |
744 | LOG((LF_CORDB,LL_INFO1000000, "About to GIPCESBC 0x%x\n" ,dipce)); |
745 | |
746 | if (dipce != NULL) |
747 | { |
748 | eventCur->next = dipce; |
749 | } |
750 | #ifdef _DEBUG |
751 | else |
752 | { |
753 | _ASSERTE( !"GetIPCEventSendBufferContinuation failed to allocate mem!" ); |
754 | } |
755 | #endif //_DEBUG |
756 | |
757 | return dipce; |
758 | } |
759 | |
760 | // Send an IPCEvent once we're ready for sending. This should be done inbetween |
761 | // SENDIPCEVENT_BEGIN & SENDIPCEVENT_END. See definition of SENDIPCEVENT_BEGIN |
762 | // for usage pattern |
763 | HRESULT SendIPCEvent(); |
764 | |
765 | HRESULT EnsureRuntimeOffsetsInit(IpcTarget i); // helper function for SendIPCEvent |
766 | void NeedRuntimeOffsetsReInit(IpcTarget i); |
767 | |
768 | DebuggerIPCEvent* GetIPCEventReceiveBuffer() |
769 | { |
770 | CONTRACTL |
771 | { |
772 | NOTHROW; |
773 | GC_NOTRIGGER; |
774 | } |
775 | CONTRACTL_END; |
776 | _ASSERTE(m_pDCB != NULL); |
777 | |
778 | return GetRCThreadReceiveBuffer(); |
779 | } |
780 | |
781 | HRESULT SendIPCReply(); |
782 | |
783 | // |
784 | // Handle Favors - get the Helper thread to do a function call for us |
785 | // because our thread can't (eg, we don't have the stack space) |
786 | // DoFavor will call (*fp)(pData) and block until fp returns. |
787 | // pData can store parameters, return value, and a this ptr (if we |
788 | // need to call a member function) |
789 | // |
790 | void DoFavor(FAVORCALLBACK fp, void * pData); |
791 | |
792 | // |
793 | // Convience routines |
794 | // |
795 | PTR_DebuggerIPCControlBlock GetDCB() |
796 | { |
797 | LIMITED_METHOD_DAC_CONTRACT; |
798 | // This may be called before we init or after we shutdown. |
799 | |
800 | return m_pDCB; |
801 | } |
802 | |
803 | void WatchForStragglers(void); |
804 | |
805 | HRESULT SetupRuntimeOffsets(DebuggerIPCControlBlock *pDCB); |
806 | |
807 | bool HandleRSEA(); |
808 | void MainLoop(); |
809 | void TemporaryHelperThreadMainLoop(); |
810 | |
811 | HANDLE GetHelperThreadCanGoEvent(void) {LIMITED_METHOD_CONTRACT; return m_helperThreadCanGoEvent; } |
812 | |
813 | void EarlyHelperThreadDeath(void); |
814 | |
815 | void RightSideDetach(void); |
816 | |
817 | // |
818 | // |
819 | // |
820 | void ThreadProc(void); |
821 | static DWORD WINAPI ThreadProcStatic(LPVOID parameter); |
822 | static DWORD WINAPI ThreadProcRemote(LPVOID parameter); |
823 | |
824 | DWORD GetRCThreadId() |
825 | { |
826 | LIMITED_METHOD_CONTRACT; |
827 | |
828 | return m_pDCB->m_helperThreadId; |
829 | } |
830 | |
831 | // Return true if the Helper Thread up & initialized. |
832 | bool IsRCThreadReady(); |
833 | |
834 | HRESULT ReDaclEvents(PSECURITY_DESCRIPTOR securityDescriptor); |
835 | private: |
836 | |
837 | // The transport based communication protocol keeps the send and receive buffers outside of the DCB |
838 | // to keep the DCB size down (since we send it over the wire). |
839 | DebuggerIPCEvent * GetRCThreadReceiveBuffer() |
840 | { |
841 | #if defined(FEATURE_DBGIPC_TRANSPORT_VM) |
842 | return reinterpret_cast<DebuggerIPCEvent *>(&m_receiveBuffer[0]); |
843 | #else |
844 | return reinterpret_cast<DebuggerIPCEvent *>(&m_pDCB->m_receiveBuffer[0]); |
845 | #endif |
846 | } |
847 | |
848 | // The transport based communication protocol keeps the send and receive buffers outside of the DCB |
849 | // to keep the DCB size down (since we send it over the wire). |
850 | DebuggerIPCEvent * GetRCThreadSendBuffer() |
851 | { |
852 | #if defined(FEATURE_DBGIPC_TRANSPORT_VM) |
853 | return reinterpret_cast<DebuggerIPCEvent *>(&m_sendBuffer[0]); |
854 | #else // FEATURE_DBGIPC_TRANSPORT_VM |
855 | return reinterpret_cast<DebuggerIPCEvent *>(&m_pDCB->m_sendBuffer[0]); |
856 | #endif // FEATURE_DBGIPC_TRANSPORT_VM |
857 | } |
858 | |
859 | FAVORCALLBACK GetFavorFnPtr() { return m_favorData.m_fpFavor; } |
860 | void * GetFavorData() { return m_favorData.m_pFavorData; } |
861 | |
862 | void SetFavorFnPtr(FAVORCALLBACK fp, void * pData) |
863 | { |
864 | m_favorData.m_fpFavor = fp; |
865 | m_favorData.m_pFavorData = pData; |
866 | } |
867 | Crst * GetFavorLock() { return &m_favorData.m_FavorLock; } |
868 | |
869 | HANDLE GetFavorReadEvent() { return m_favorData.m_FavorReadEvent; } |
870 | HANDLE GetFavorAvailableEvent() { return m_favorData.m_FavorAvailableEvent; } |
871 | |
872 | HelperThreadFavor m_favorData; |
873 | |
874 | |
875 | HelperCanary * GetCanary() { return &GetLazyData()->m_Canary; } |
876 | |
877 | |
878 | friend class Debugger; |
879 | HRESULT VerifySecurityOnRSCreatedEvents(HANDLE sse, HANDLE lsea, HANDLE lser); |
880 | Debugger* m_debugger; |
881 | |
882 | // IPC_TARGET_* define default targets - if we ever want to do |
883 | // multiple right sides, we'll have to switch to a OUTOFPROC + iTargetProcess scheme |
884 | PTR_DebuggerIPCControlBlock m_pDCB; |
885 | |
886 | #ifdef FEATURE_DBGIPC_TRANSPORT_VM |
887 | // These buffers move here out of the DebuggerIPCControlBlock since the block is not shared memory when |
888 | // using the transport, but we do send its contents over the wire (and these buffers would greatly impact |
889 | // the number of bytes sent without being useful in any way). |
890 | BYTE m_receiveBuffer[CorDBIPC_BUFFER_SIZE]; |
891 | BYTE m_sendBuffer[CorDBIPC_BUFFER_SIZE]; |
892 | #endif // FEATURE_DBGIPC_TRANSPORT_VM |
893 | |
894 | HANDLE m_thread; |
895 | bool m_run; |
896 | |
897 | HANDLE m_threadControlEvent; |
898 | HANDLE m_helperThreadCanGoEvent; |
899 | bool m_rgfInitRuntimeOffsets[IPC_TARGET_COUNT]; |
900 | bool m_fDetachRightSide; |
901 | |
902 | RCThreadLazyInit * GetLazyData(); |
903 | #ifdef _DEBUG |
904 | // Tracking to ensure that the helper thread only calls New() on the interop-safe heap. |
905 | // We need a very light-weight way to track the helper b/c we need to check everytime somebody |
906 | // calls operator new, which may occur during shutdown paths. |
907 | static EEThreadId s_DbgHelperThreadId; |
908 | |
909 | friend void AssertAllocationAllowed(); |
910 | |
911 | public: |
912 | // The OS ThreadId of the helper as determined from the CreateThread call. |
913 | DWORD m_DbgHelperThreadOSTid; |
914 | private: |
915 | #endif |
916 | |
917 | }; |
918 | |
919 | typedef DPTR(DebuggerRCThread) PTR_DebuggerRCThread; |
920 | |
921 | /* ------------------------------------------------------------------------ * |
922 | * Debugger Method Info struct and hash table |
923 | * ------------------------------------------------------------------------ */ |
924 | |
925 | // class DebuggerMethodInfo: Struct to hold all the information |
926 | // necessary for a given function. |
927 | // |
928 | // m_module, m_token: Method that this DMI applies to |
929 | // |
930 | const bool bOriginalToInstrumented = true; |
931 | const bool bInstrumentedToOriginal = false; |
932 | |
933 | class DebuggerMethodInfo |
934 | { |
935 | // This is the most recent version of the function based on the latest update and is |
936 | // set in UpdateFunction. When a function is jitted, the version is copied from here |
937 | // and stored in the corresponding DebuggerJitInfo structure so can always know the |
938 | // version of a particular jitted function. |
939 | SIZE_T m_currentEnCVersion; |
940 | |
941 | public: |
942 | PTR_Module m_module; |
943 | mdMethodDef m_token; |
944 | |
945 | PTR_DebuggerMethodInfo m_prevMethodInfo; |
946 | PTR_DebuggerMethodInfo m_nextMethodInfo; |
947 | |
948 | |
949 | // Enumerate DJIs |
950 | // Expected usage: |
951 | // DMI.InitDJIIterator(&it); |
952 | // while(!it.IsAtEnd()) { |
953 | // f(it.Current()); it.Next(); |
954 | // } |
955 | class DJIIterator |
956 | { |
957 | friend class DebuggerMethodInfo; |
958 | |
959 | DebuggerJitInfo* m_pCurrent; |
960 | Module* m_pLoaderModuleFilter; |
961 | MethodDesc* m_pMethodDescFilter; |
962 | public: |
963 | DJIIterator(); |
964 | |
965 | bool IsAtEnd(); |
966 | DebuggerJitInfo * Current(); |
967 | void Next(BOOL fFirst = FALSE); |
968 | |
969 | }; |
970 | |
971 | // Ensure the DJI cache is completely up to date. (This can be an expensive call, but |
972 | // much less so if pMethodDescFilter is used). |
973 | void CreateDJIsForNativeBlobs(AppDomain * pAppDomain, Module * pModuleFilter, MethodDesc * pMethodDescFilter); |
974 | |
975 | // Ensure the DJI cache is up to date for a particular closed method desc |
976 | void CreateDJIsForMethodDesc(MethodDesc * pMethodDesc); |
977 | |
978 | // Get an iterator for all native blobs (accounts for Generics, Enc, + Prejiiting). |
979 | // Must be stopped when we do this. This could be heavy weight. |
980 | // This will call CreateDJIsForNativeBlobs() to ensure we have all DJIs available. |
981 | // You may optionally pass pLoaderModuleFilter to restrict the DJIs iterated to |
982 | // exist only on MethodDescs whose loader module matches the filter (pass NULL not |
983 | // to filter by loader module). |
984 | // You may optionally pass pMethodDescFilter to restrict the DJIs iterated to only |
985 | // a single generic instantiation. |
986 | void IterateAllDJIs(AppDomain * pAppDomain, Module * pLoaderModuleFilter, MethodDesc * pMethodDescFilter, DJIIterator * pEnum); |
987 | |
988 | private: |
989 | // The linked list of JIT's of this version of the method. This will ALWAYS |
990 | // contain one element except for code in generic classes or generic methods, |
991 | // which may get JITted more than once under different type instantiations. |
992 | // |
993 | // We find the appropriate JitInfo by searching the list (nearly always this |
994 | // will return the first element of course). |
995 | // |
996 | // The JitInfos contain back pointers to this MethodInfo. They should never be associated |
997 | // with any other MethodInfo. |
998 | // |
999 | // USE ACCESSOR FUNCTION GetLatestJitInfo(), as it does lazy init of this field. |
1000 | // |
1001 | |
1002 | PTR_DebuggerJitInfo m_latestJitInfo; |
1003 | |
1004 | public: |
1005 | |
1006 | PTR_DebuggerJitInfo GetLatestJitInfo(MethodDesc *fd); |
1007 | |
1008 | DebuggerJitInfo * GetLatestJitInfo_NoCreate(); |
1009 | |
1010 | |
1011 | // Find the DJI corresponding to the specified MD and native start address. |
1012 | DebuggerJitInfo * FindJitInfo(MethodDesc * pMD, TADDR addrNativeStartAddr); |
1013 | |
1014 | // Creating the Jit-infos. |
1015 | DebuggerJitInfo *FindOrCreateInitAndAddJitInfo(MethodDesc* fd, PCODE startAddr); |
1016 | DebuggerJitInfo *CreateInitAndAddJitInfo(MethodDesc* fd, TADDR startAddr, BOOL* jitInfoWasCreated); |
1017 | |
1018 | |
1019 | void DeleteJitInfo(DebuggerJitInfo *dji); |
1020 | void DeleteJitInfoList(void); |
1021 | |
1022 | // Return true iff this has been jitted. |
1023 | // Since we can create DMIs freely, a DMI's existence doesn't mean that the method was jitted. |
1024 | bool HasJitInfos(); |
1025 | |
1026 | // Return true iff this has been EnCed since the last time the function was jitted. |
1027 | bool HasMoreRecentEnCVersion(); |
1028 | |
1029 | |
1030 | // Return true iif this is a JMC function, else false. |
1031 | bool IsJMCFunction(); |
1032 | void SetJMCStatus(bool fStatus); |
1033 | |
1034 | |
1035 | DebuggerMethodInfo(Module *module, mdMethodDef token); |
1036 | ~DebuggerMethodInfo(); |
1037 | |
1038 | // A profiler can remap the IL. We track the "instrumented" IL map here. |
1039 | void SetInstrumentedILMap(COR_IL_MAP * pMap, SIZE_T cEntries); |
1040 | bool HasInstrumentedILMap() {return m_fHasInstrumentedILMap; } |
1041 | |
1042 | // TranslateToInstIL will take offOrig, and translate it to the |
1043 | // correct IL offset if this code happens to be instrumented |
1044 | ULONG32 TranslateToInstIL(const InstrumentedILOffsetMapping * pMapping, ULONG32 offOrig, bool fOrigToInst); |
1045 | |
1046 | |
1047 | // We don't always have a debugger module. (Ex: we're tracking debug info, |
1048 | // but no debugger's attached). So this may return NULL alot. |
1049 | // If we can, we should use the RuntimeModule when ever possible. |
1050 | DebuggerModule* GetPrimaryModule(); |
1051 | |
1052 | // We always have a runtime module. |
1053 | Module * GetRuntimeModule(); |
1054 | |
1055 | // Set the latest EnC version number for this method |
1056 | // This doesn't mean we have a DJI for this version yet. |
1057 | void SetCurrentEnCVersion(SIZE_T currentEnCVersion) |
1058 | { |
1059 | LIMITED_METHOD_CONTRACT; |
1060 | |
1061 | _ASSERTE(currentEnCVersion >= CorDB_DEFAULT_ENC_FUNCTION_VERSION); |
1062 | m_currentEnCVersion = currentEnCVersion; |
1063 | } |
1064 | |
1065 | SIZE_T GetCurrentEnCVersion() |
1066 | { |
1067 | LIMITED_METHOD_CONTRACT; |
1068 | SUPPORTS_DAC; |
1069 | |
1070 | return m_currentEnCVersion; |
1071 | } |
1072 | |
1073 | #ifdef DACCESS_COMPILE |
1074 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
1075 | #endif |
1076 | |
1077 | protected: |
1078 | // JMC info. Each method can have its own JMC setting. |
1079 | bool m_fJMCStatus; |
1080 | |
1081 | // "Instrumented" IL map set by the profiler. |
1082 | // @dbgtodo execution control - remove this when we do execution control from out-of-proc |
1083 | bool m_fHasInstrumentedILMap; |
1084 | }; |
1085 | |
1086 | // ------------------------------------------------------------------------ * |
1087 | // Executable code memory management for the debugger heap. |
1088 | // |
1089 | // Rather than allocating memory that needs to be executable on the process heap (which |
1090 | // is forbidden on some flavors of SELinux and is generally a bad idea), we use the |
1091 | // allocator below. It will handle allocating and managing the executable memory in a |
1092 | // different part of the address space (not on the heap). |
1093 | // ------------------------------------------------------------------------ */ |
1094 | |
1095 | #define DBG_MAX_EXECUTABLE_ALLOC_SIZE 48 |
1096 | |
1097 | // Forward declaration |
1098 | struct DebuggerHeapExecutableMemoryPage; |
1099 | |
1100 | // ------------------------------------------------------------------------ */ |
1101 | // DebuggerHeapExecutableMemoryChunk |
1102 | // |
1103 | // Each DebuggerHeapExecutableMemoryPage is divided into 64 of these chunks. |
1104 | // The first chunk is a BookkeepingChunk used for bookkeeping information |
1105 | // for the page, and the remaining ones are DataChunks and are handed out |
1106 | // by the allocator when it allocates memory. |
1107 | // ------------------------------------------------------------------------ */ |
1108 | union DECLSPEC_ALIGN(64) DebuggerHeapExecutableMemoryChunk { |
1109 | |
1110 | struct DataChunk |
1111 | { |
1112 | char data[DBG_MAX_EXECUTABLE_ALLOC_SIZE]; |
1113 | |
1114 | DebuggerHeapExecutableMemoryPage *startOfPage; |
1115 | |
1116 | // The chunk number within the page. |
1117 | uint8_t chunkNumber; |
1118 | |
1119 | } data; |
1120 | |
1121 | struct BookkeepingChunk |
1122 | { |
1123 | DebuggerHeapExecutableMemoryPage *nextPage; |
1124 | |
1125 | uint64_t pageOccupancy; |
1126 | |
1127 | } bookkeeping; |
1128 | |
1129 | char _alignpad[64]; |
1130 | }; |
1131 | |
1132 | static_assert(sizeof(DebuggerHeapExecutableMemoryChunk) == 64, "DebuggerHeapExecutableMemoryChunk is expect to be 64 bytes." ); |
1133 | |
1134 | // ------------------------------------------------------------------------ */ |
1135 | // DebuggerHeapExecutableMemoryPage |
1136 | // |
1137 | // We allocate the size of DebuggerHeapExecutableMemoryPage each time we need |
1138 | // more memory and divide each page into DebuggerHeapExecutableMemoryChunks for |
1139 | // use. The pages are self describing; the first chunk contains information |
1140 | // about which of the other chunks are used/free as well as a pointer to |
1141 | // the next page. |
1142 | // ------------------------------------------------------------------------ */ |
1143 | struct DECLSPEC_ALIGN(4096) DebuggerHeapExecutableMemoryPage |
1144 | { |
1145 | inline DebuggerHeapExecutableMemoryPage* GetNextPage() |
1146 | { |
1147 | return chunks[0].bookkeeping.nextPage; |
1148 | } |
1149 | |
1150 | inline void SetNextPage(DebuggerHeapExecutableMemoryPage* nextPage) |
1151 | { |
1152 | chunks[0].bookkeeping.nextPage = nextPage; |
1153 | } |
1154 | |
1155 | inline uint64_t GetPageOccupancy() const |
1156 | { |
1157 | return chunks[0].bookkeeping.pageOccupancy; |
1158 | } |
1159 | |
1160 | inline void SetPageOccupancy(uint64_t newOccupancy) |
1161 | { |
1162 | // Can't unset first bit of occupancy! |
1163 | ASSERT((newOccupancy & 0x8000000000000000) != 0); |
1164 | |
1165 | chunks[0].bookkeeping.pageOccupancy = newOccupancy; |
1166 | } |
1167 | |
1168 | inline void* GetPointerToChunk(int chunkNum) const |
1169 | { |
1170 | return (char*)this + chunkNum * sizeof(DebuggerHeapExecutableMemoryChunk); |
1171 | } |
1172 | |
1173 | DebuggerHeapExecutableMemoryPage() |
1174 | { |
1175 | SetPageOccupancy(0x8000000000000000); // only the first bit is set. |
1176 | for (uint8_t i = 1; i < sizeof(chunks)/sizeof(chunks[0]); i++) |
1177 | { |
1178 | ASSERT(i != 0); |
1179 | chunks[i].data.startOfPage = this; |
1180 | chunks[i].data.chunkNumber = i; |
1181 | } |
1182 | } |
1183 | |
1184 | private: |
1185 | DebuggerHeapExecutableMemoryChunk chunks[64]; |
1186 | }; |
1187 | |
1188 | // ------------------------------------------------------------------------ */ |
1189 | // DebuggerHeapExecutableMemoryAllocator class |
1190 | // Handles allocation and freeing (and all necessary bookkeeping) for |
1191 | // executable memory that the DebuggerHeap class needs. This is especially |
1192 | // useful on systems (like SELinux) where having executable code on the |
1193 | // heap is explicity disallowed for security reasons. |
1194 | // ------------------------------------------------------------------------ */ |
1195 | |
1196 | class DebuggerHeapExecutableMemoryAllocator |
1197 | { |
1198 | public: |
1199 | DebuggerHeapExecutableMemoryAllocator() |
1200 | : m_pages(NULL) |
1201 | , m_execMemAllocMutex(CrstDebuggerHeapExecMemLock, (CrstFlags)(CRST_UNSAFE_ANYMODE | CRST_REENTRANCY | CRST_DEBUGGER_THREAD)) |
1202 | { } |
1203 | |
1204 | ~DebuggerHeapExecutableMemoryAllocator(); |
1205 | |
1206 | void* Allocate(DWORD numberOfBytes); |
1207 | int Free(void* addr); |
1208 | |
1209 | private: |
1210 | enum class ChangePageUsageAction {ALLOCATE, FREE}; |
1211 | |
1212 | DebuggerHeapExecutableMemoryPage* AddNewPage(); |
1213 | bool CheckPageForAvailability(DebuggerHeapExecutableMemoryPage* page, /* _Out_ */ int* chunkToUse); |
1214 | void* ChangePageUsage(DebuggerHeapExecutableMemoryPage* page, int chunkNumber, ChangePageUsageAction action); |
1215 | |
1216 | private: |
1217 | // Linked list of pages that have been allocated |
1218 | DebuggerHeapExecutableMemoryPage* m_pages; |
1219 | Crst m_execMemAllocMutex; |
1220 | }; |
1221 | |
1222 | // ------------------------------------------------------------------------ * |
1223 | // DebuggerHeap class |
1224 | // For interop debugging, we need a heap that: |
1225 | // - does not take any outside looks |
1226 | // - returns memory which could be executed. |
1227 | // ------------------------------------------------------------------------ */ |
1228 | |
1229 | #ifdef FEATURE_INTEROP_DEBUGGING |
1230 | #define USE_INTEROPSAFE_HEAP |
1231 | #endif |
1232 | |
1233 | class DebuggerHeap |
1234 | { |
1235 | public: |
1236 | DebuggerHeap(); |
1237 | ~DebuggerHeap(); |
1238 | |
1239 | bool IsInit(); |
1240 | void Destroy(); |
1241 | HRESULT Init(BOOL fExecutable); |
1242 | |
1243 | void *Alloc(DWORD size); |
1244 | void *Realloc(void *pMem, DWORD newSize, DWORD oldSize); |
1245 | void Free(void *pMem); |
1246 | |
1247 | |
1248 | protected: |
1249 | #ifdef USE_INTEROPSAFE_HEAP |
1250 | HANDLE m_hHeap; |
1251 | #endif |
1252 | BOOL m_fExecutable; |
1253 | |
1254 | private: |
1255 | DebuggerHeapExecutableMemoryAllocator *m_execMemAllocator; |
1256 | }; |
1257 | |
1258 | class DebuggerJitInfo; |
1259 | |
1260 | #if defined(WIN64EXCEPTIONS) |
1261 | const int PARENT_METHOD_INDEX = -1; |
1262 | #endif // WIN64EXCEPTIONS |
1263 | |
1264 | class CodeRegionInfo |
1265 | { |
1266 | public: |
1267 | CodeRegionInfo() : |
1268 | m_addrOfHotCode(NULL), |
1269 | m_addrOfColdCode(NULL), |
1270 | m_sizeOfHotCode(0), |
1271 | m_sizeOfColdCode(0) |
1272 | { |
1273 | WRAPPER_NO_CONTRACT; |
1274 | SUPPORTS_DAC; |
1275 | } |
1276 | |
1277 | static CodeRegionInfo GetCodeRegionInfo(DebuggerJitInfo * dji, |
1278 | MethodDesc * md = NULL, |
1279 | PTR_CORDB_ADDRESS_TYPE addr = PTR_NULL); |
1280 | |
1281 | // Fills in the CodeRegoinInfo fields from the start address. |
1282 | void InitializeFromStartAddress(PCODE addr) |
1283 | { |
1284 | CONTRACTL |
1285 | { |
1286 | NOTHROW; |
1287 | GC_NOTRIGGER; |
1288 | SUPPORTS_DAC; |
1289 | } |
1290 | CONTRACTL_END; |
1291 | |
1292 | m_addrOfHotCode = addr; |
1293 | g_pEEInterface->GetMethodRegionInfo(addr, |
1294 | &m_addrOfColdCode, |
1295 | (size_t *) &m_sizeOfHotCode, |
1296 | (size_t *) &m_sizeOfColdCode); |
1297 | } |
1298 | |
1299 | // Converts an offset within a method to a code address |
1300 | PCODE OffsetToAddress(SIZE_T offset) |
1301 | { |
1302 | LIMITED_METHOD_CONTRACT; |
1303 | |
1304 | if (m_addrOfHotCode != NULL) |
1305 | { |
1306 | if (offset < m_sizeOfHotCode) |
1307 | { |
1308 | return m_addrOfHotCode + offset; |
1309 | } |
1310 | else |
1311 | { |
1312 | _ASSERTE(m_addrOfColdCode); |
1313 | _ASSERTE(offset <= m_sizeOfHotCode + m_sizeOfColdCode); |
1314 | |
1315 | return m_addrOfColdCode + (offset - m_sizeOfHotCode); |
1316 | } |
1317 | } |
1318 | else |
1319 | { |
1320 | return NULL; |
1321 | } |
1322 | } |
1323 | |
1324 | // Converts a code address to an offset within the method |
1325 | SIZE_T AddressToOffset(const BYTE *addr) |
1326 | { |
1327 | LIMITED_METHOD_CONTRACT; |
1328 | |
1329 | PCODE address = (PCODE)addr; |
1330 | |
1331 | if ((address >= m_addrOfHotCode) && |
1332 | (address < m_addrOfHotCode + m_sizeOfHotCode)) |
1333 | { |
1334 | return address - m_addrOfHotCode; |
1335 | } |
1336 | else if ((address >= m_addrOfColdCode) && |
1337 | (address < m_addrOfColdCode + m_sizeOfColdCode)) |
1338 | { |
1339 | return address - m_addrOfColdCode + m_sizeOfHotCode; |
1340 | } |
1341 | |
1342 | _ASSERTE(!"addressToOffset called with invalid address" ); |
1343 | return NULL; |
1344 | } |
1345 | |
1346 | // Determines whether the address lies within the method |
1347 | bool IsMethodAddress(const BYTE *addr) |
1348 | { |
1349 | LIMITED_METHOD_CONTRACT; |
1350 | |
1351 | PCODE address = PINSTRToPCODE((TADDR)addr); |
1352 | return (((address >= m_addrOfHotCode) && |
1353 | (address < m_addrOfHotCode + m_sizeOfHotCode)) || |
1354 | ((address >= m_addrOfColdCode) && |
1355 | (address < m_addrOfColdCode + m_sizeOfColdCode))); |
1356 | } |
1357 | |
1358 | // Determines whether the offset is in the hot section |
1359 | bool IsOffsetHot(SIZE_T offset) |
1360 | { |
1361 | LIMITED_METHOD_CONTRACT; |
1362 | |
1363 | return (offset < m_sizeOfHotCode); |
1364 | } |
1365 | |
1366 | PCODE getAddrOfHotCode() {LIMITED_METHOD_DAC_CONTRACT; return m_addrOfHotCode;} |
1367 | PCODE getAddrOfColdCode() {LIMITED_METHOD_DAC_CONTRACT; return m_addrOfColdCode;} |
1368 | SIZE_T getSizeOfHotCode() {LIMITED_METHOD_DAC_CONTRACT; return m_sizeOfHotCode;} |
1369 | SIZE_T getSizeOfColdCode() {LIMITED_METHOD_DAC_CONTRACT; return m_sizeOfColdCode;} |
1370 | SIZE_T getSizeOfTotalCode(){LIMITED_METHOD_DAC_CONTRACT; return m_sizeOfHotCode + m_sizeOfColdCode; } |
1371 | |
1372 | private: |
1373 | |
1374 | PCODE m_addrOfHotCode; |
1375 | PCODE m_addrOfColdCode; |
1376 | SIZE_T m_sizeOfHotCode; |
1377 | SIZE_T m_sizeOfColdCode; |
1378 | }; |
1379 | |
1380 | /* ------------------------------------------------------------------------ * |
1381 | * Debugger JIT Info struct |
1382 | * ------------------------------------------------------------------------ */ |
1383 | |
1384 | // class DebuggerJitInfo: Struct to hold all the JIT information |
1385 | // necessary for a given function. |
1386 | // - DJIs are 1:1 w/ native codeblobs. They're almost 1:1 w/ Native Method Descs. |
1387 | // except that a MethodDesc only refers to the most recent EnC version of a method. |
1388 | // - If 2 DJIs are different, they refer to different code-blobs. |
1389 | // - DJIs are lazily created, and so you can't safely enumerate them b/c |
1390 | // you can't rely on whether they're created or not. |
1391 | |
1392 | |
1393 | // |
1394 | // MethodDesc* m_fd: MethodDesc of the method that this DJI applies to |
1395 | // |
1396 | // CORDB_ADDRESS m_addrOfCode: Address of the code. This will be read by |
1397 | // the right side (via ReadProcessMemory) to grab the actual native start |
1398 | // address of the jitted method. |
1399 | // |
1400 | // SIZE_T m_sizeOfCode: Pseudo-private variable: use the GetSkzeOfCode |
1401 | // method to get this value. |
1402 | // |
1403 | // bool m_jitComplete: Set to true once JITComplete has been called. |
1404 | // |
1405 | // DebuggerILToNativeMap* m_sequenceMap: This is the sequence map, which |
1406 | // is actually a collection of IL-Native pairs, where each IL corresponds |
1407 | // to a line of source code. Each pair is refered to as a sequence map point. |
1408 | // |
1409 | // SIZE_T m_lastIL: last nonEPILOG instruction |
1410 | // |
1411 | // unsigned int m_sequenceMapCount: Count of the DebuggerILToNativeMaps |
1412 | // in m_sequenceMap. |
1413 | // |
1414 | // bool m_sequenceMapSorted: Set to true once m_sequenceMapSorted is sorted |
1415 | // into ascending IL order (Debugger::setBoundaries, SortMap). |
1416 | // |
1417 | |
1418 | class DebuggerJitInfo |
1419 | { |
1420 | public: |
1421 | PTR_MethodDesc m_fd; |
1422 | |
1423 | // Loader module is used to control life-time of DebufferJitInfo. Ideally, we would refactor the code to use LoaderAllocator here |
1424 | // instead because of it is what the VM actually uses to track the life time. It would make the debugger interface less chatty. |
1425 | PTR_Module m_pLoaderModule; |
1426 | |
1427 | bool m_jitComplete; |
1428 | |
1429 | #ifdef EnC_SUPPORTED |
1430 | // If this is true, then we've plastered the method with DebuggerEncBreakpoints |
1431 | // and the method has been EnC'd |
1432 | bool m_encBreakpointsApplied; |
1433 | #endif //EnC_SUPPORTED |
1434 | |
1435 | PTR_DebuggerMethodInfo m_methodInfo; |
1436 | |
1437 | CORDB_ADDRESS m_addrOfCode; |
1438 | SIZE_T m_sizeOfCode; |
1439 | |
1440 | CodeRegionInfo m_codeRegionInfo; |
1441 | |
1442 | PTR_DebuggerJitInfo m_prevJitInfo; |
1443 | PTR_DebuggerJitInfo m_nextJitInfo; |
1444 | |
1445 | protected: |
1446 | // The jit maps are lazy-initialized. |
1447 | // They are always sorted. |
1448 | ULONG m_lastIL; |
1449 | PTR_DebuggerILToNativeMap m_sequenceMap; |
1450 | unsigned int m_sequenceMapCount; |
1451 | PTR_DebuggerILToNativeMap m_callsiteMap; |
1452 | unsigned int m_callsiteMapCount; |
1453 | bool m_sequenceMapSorted; |
1454 | |
1455 | PTR_NativeVarInfo m_varNativeInfo; |
1456 | unsigned int m_varNativeInfoCount; |
1457 | |
1458 | bool m_fAttemptInit; |
1459 | |
1460 | #ifndef DACCESS_COMPILE |
1461 | void LazyInitBounds(); |
1462 | #else |
1463 | void LazyInitBounds() { LIMITED_METHOD_DAC_CONTRACT; } |
1464 | #endif |
1465 | |
1466 | public: |
1467 | unsigned int GetSequenceMapCount() |
1468 | { |
1469 | SUPPORTS_DAC; |
1470 | |
1471 | LazyInitBounds(); |
1472 | return m_sequenceMapCount; |
1473 | } |
1474 | |
1475 | //@todo: this method could return NULL, but some callers are not handling the case |
1476 | PTR_DebuggerILToNativeMap GetSequenceMap() |
1477 | { |
1478 | SUPPORTS_DAC; |
1479 | |
1480 | LazyInitBounds(); |
1481 | return m_sequenceMap; |
1482 | } |
1483 | |
1484 | unsigned int GetCallsiteMapCount() |
1485 | { |
1486 | SUPPORTS_DAC; |
1487 | |
1488 | LazyInitBounds(); |
1489 | return m_callsiteMapCount; |
1490 | } |
1491 | |
1492 | PTR_DebuggerILToNativeMap GetCallSiteMap() |
1493 | { |
1494 | SUPPORTS_DAC; |
1495 | |
1496 | LazyInitBounds(); |
1497 | return m_callsiteMap; |
1498 | } |
1499 | |
1500 | PTR_NativeVarInfo GetVarNativeInfo() |
1501 | { |
1502 | SUPPORTS_DAC; |
1503 | |
1504 | LazyInitBounds(); |
1505 | return m_varNativeInfo; |
1506 | } |
1507 | |
1508 | unsigned int GetVarNativeInfoCount() |
1509 | { |
1510 | SUPPORTS_DAC; |
1511 | |
1512 | LazyInitBounds(); |
1513 | return m_varNativeInfoCount; |
1514 | } |
1515 | |
1516 | |
1517 | // The version number of this jitted code |
1518 | SIZE_T m_encVersion; |
1519 | |
1520 | #if defined(WIN64EXCEPTIONS) |
1521 | DWORD *m_rgFunclet; |
1522 | int m_funcletCount; |
1523 | #endif // WIN64EXCEPTIONS |
1524 | |
1525 | #ifndef DACCESS_COMPILE |
1526 | |
1527 | DebuggerJitInfo(DebuggerMethodInfo *minfo, MethodDesc *fd); |
1528 | ~DebuggerJitInfo(); |
1529 | |
1530 | #endif // #ifdef DACCESS_COMPILE |
1531 | |
1532 | class ILToNativeOffsetIterator; |
1533 | |
1534 | // Usage of ILToNativeOffsetIterator: |
1535 | // |
1536 | // ILToNativeOffsetIterator it; |
1537 | // dji->InitILToNativeOffsetIterator(&it, ilOffset); |
1538 | // while (!it.IsAtEnd()) |
1539 | // { |
1540 | // nativeOffset = it.Current(&fExact); |
1541 | // it.Next(); |
1542 | // } |
1543 | struct ILOffset |
1544 | { |
1545 | friend class DebuggerJitInfo; |
1546 | friend class DebuggerJitInfo::ILToNativeOffsetIterator; |
1547 | |
1548 | private: |
1549 | SIZE_T m_ilOffset; |
1550 | #ifdef WIN64EXCEPTIONS |
1551 | int m_funcletIndex; |
1552 | #endif |
1553 | }; |
1554 | |
1555 | struct NativeOffset |
1556 | { |
1557 | friend class DebuggerJitInfo; |
1558 | friend class DebuggerJitInfo::ILToNativeOffsetIterator; |
1559 | |
1560 | private: |
1561 | SIZE_T m_nativeOffset; |
1562 | BOOL m_fExact; |
1563 | }; |
1564 | |
1565 | class ILToNativeOffsetIterator |
1566 | { |
1567 | friend class DebuggerJitInfo; |
1568 | |
1569 | public: |
1570 | ILToNativeOffsetIterator(); |
1571 | |
1572 | bool IsAtEnd(); |
1573 | SIZE_T Current(BOOL* pfExact); |
1574 | SIZE_T CurrentAssertOnlyOne(BOOL* pfExact); |
1575 | void Next(); |
1576 | |
1577 | private: |
1578 | void Init(DebuggerJitInfo* dji, SIZE_T ilOffset); |
1579 | |
1580 | DebuggerJitInfo* m_dji; |
1581 | ILOffset m_currentILOffset; |
1582 | NativeOffset m_currentNativeOffset; |
1583 | }; |
1584 | |
1585 | void InitILToNativeOffsetIterator(ILToNativeOffsetIterator &it, SIZE_T ilOffset); |
1586 | |
1587 | DebuggerILToNativeMap *MapILOffsetToMapEntry(SIZE_T ilOffset, BOOL *exact=NULL, BOOL fWantFirst = TRUE); |
1588 | void MapILRangeToMapEntryRange(SIZE_T ilStartOffset, SIZE_T ilEndOffset, |
1589 | DebuggerILToNativeMap **start, |
1590 | DebuggerILToNativeMap **end); |
1591 | NativeOffset MapILOffsetToNative(ILOffset ilOffset); |
1592 | |
1593 | // MapSpecialToNative maps a CordDebugMappingResult to a native |
1594 | // offset so that we can get the address of the prolog & epilog. which |
1595 | // determines which epilog or prolog, if there's more than one. |
1596 | SIZE_T MapSpecialToNative(CorDebugMappingResult mapping, |
1597 | SIZE_T which, |
1598 | BOOL *pfAccurate); |
1599 | #if defined(WIN64EXCEPTIONS) |
1600 | void MapSpecialToNative(int funcletIndex, DWORD* pPrologEndOffset, DWORD* pEpilogStartOffset); |
1601 | SIZE_T MapILOffsetToNativeForSetIP(SIZE_T offsetILTo, int funcletIndexFrom, EHRangeTree* pEHRT, BOOL* pExact); |
1602 | #endif // _WIN64 |
1603 | |
1604 | // MapNativeOffsetToIL Takes a given nativeOffset, and maps it back |
1605 | // to the corresponding IL offset, which it returns. If mapping indicates |
1606 | // that a the native offset corresponds to a special region of code (for |
1607 | // example, the epilog), then the return value will be specified by |
1608 | // ICorDebugILFrame::GetIP (see cordebug.idl) |
1609 | DWORD MapNativeOffsetToIL(SIZE_T nativeOffsetToMap, |
1610 | CorDebugMappingResult *mapping, |
1611 | DWORD *which, |
1612 | BOOL skipPrologs=FALSE); |
1613 | |
1614 | // If a method has multiple copies of code (because of EnC or code-pitching), |
1615 | // this returns the DJI corresponding to 'pbAddr' |
1616 | DebuggerJitInfo *GetJitInfoByAddress(const BYTE *pbAddr ); |
1617 | |
1618 | void Init(TADDR newAddress); |
1619 | |
1620 | #if defined(WIN64EXCEPTIONS) |
1621 | enum GetFuncletIndexMode |
1622 | { |
1623 | GFIM_BYOFFSET, |
1624 | GFIM_BYADDRESS, |
1625 | }; |
1626 | |
1627 | void InitFuncletAddress(); |
1628 | DWORD GetFuncletOffsetByIndex(int index); |
1629 | int GetFuncletIndex(CORDB_ADDRESS offset, GetFuncletIndexMode mode); |
1630 | int GetFuncletCount() {return m_funcletCount;} |
1631 | #endif // WIN64EXCEPTIONS |
1632 | |
1633 | void SetVars(ULONG32 cVars, ICorDebugInfo::NativeVarInfo *pVars); |
1634 | void SetBoundaries(ULONG32 cMap, ICorDebugInfo::OffsetMapping *pMap); |
1635 | |
1636 | ICorDebugInfo::SourceTypes GetSrcTypeFromILOffset(SIZE_T ilOffset); |
1637 | |
1638 | #ifdef DACCESS_COMPILE |
1639 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
1640 | #endif |
1641 | |
1642 | // Debug support |
1643 | CHECK Check() const; |
1644 | CHECK Invariant() const; |
1645 | }; |
1646 | |
1647 | #if !defined(DACCESS_COMPILE) |
1648 | // @dbgtodo Microsoft inspection: get rid of this class when IPC events are eliminated. It's been copied to |
1649 | // dacdbistructures |
1650 | /* |
1651 | * class MapSortIL: A template class that will sort an array of DebuggerILToNativeMap. |
1652 | * This class is intended to be instantiated on the stack / in temporary storage, and used to reorder the sequence map. |
1653 | */ |
1654 | class MapSortIL : public CQuickSort<DebuggerILToNativeMap> |
1655 | { |
1656 | public: |
1657 | //Constructor |
1658 | MapSortIL(DebuggerILToNativeMap *map, |
1659 | int count) |
1660 | : CQuickSort<DebuggerILToNativeMap>(map, count) {} |
1661 | |
1662 | inline int CompareInternal(DebuggerILToNativeMap *first, |
1663 | DebuggerILToNativeMap *second) |
1664 | { |
1665 | LIMITED_METHOD_CONTRACT; |
1666 | |
1667 | if (first->nativeStartOffset == second->nativeStartOffset) |
1668 | return 0; |
1669 | else if (first->nativeStartOffset < second->nativeStartOffset) |
1670 | return -1; |
1671 | else |
1672 | return 1; |
1673 | } |
1674 | |
1675 | //Comparison operator |
1676 | int Compare(DebuggerILToNativeMap *first, |
1677 | DebuggerILToNativeMap *second) |
1678 | { |
1679 | LIMITED_METHOD_CONTRACT; |
1680 | |
1681 | const DWORD call_inst = (DWORD)ICorDebugInfo::CALL_INSTRUCTION; |
1682 | |
1683 | //PROLOGs go first |
1684 | if (first->ilOffset == (ULONG) ICorDebugInfo::PROLOG |
1685 | && second->ilOffset == (ULONG) ICorDebugInfo::PROLOG) |
1686 | { |
1687 | return CompareInternal(first, second); |
1688 | } else if (first->ilOffset == (ULONG) ICorDebugInfo::PROLOG) |
1689 | { |
1690 | return -1; |
1691 | } else if (second->ilOffset == (ULONG) ICorDebugInfo::PROLOG) |
1692 | { |
1693 | return 1; |
1694 | } |
1695 | // call_instruction goes at the very very end of the table. |
1696 | else if ((first->source & call_inst) == call_inst |
1697 | && (second->source & call_inst) == call_inst) |
1698 | { |
1699 | return CompareInternal(first, second); |
1700 | } else if ((first->source & call_inst) == call_inst) |
1701 | { |
1702 | return 1; |
1703 | } else if ((second->source & call_inst) == call_inst) |
1704 | { |
1705 | return -1; |
1706 | } |
1707 | //NO_MAPPING go last |
1708 | else if (first->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING |
1709 | && second->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING) |
1710 | { |
1711 | return CompareInternal(first, second); |
1712 | } else if (first->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING) |
1713 | { |
1714 | return 1; |
1715 | } else if (second->ilOffset == (ULONG) ICorDebugInfo::NO_MAPPING) |
1716 | { |
1717 | return -1; |
1718 | } |
1719 | //EPILOGs go next-to-last |
1720 | else if (first->ilOffset == (ULONG) ICorDebugInfo::EPILOG |
1721 | && second->ilOffset == (ULONG) ICorDebugInfo::EPILOG) |
1722 | { |
1723 | return CompareInternal(first, second); |
1724 | } else if (first->ilOffset == (ULONG) ICorDebugInfo::EPILOG) |
1725 | { |
1726 | return 1; |
1727 | } else if (second->ilOffset == (ULONG) ICorDebugInfo::EPILOG) |
1728 | { |
1729 | return -1; |
1730 | } |
1731 | //normal offsets compared otherwise |
1732 | else if (first->ilOffset < second->ilOffset) |
1733 | return -1; |
1734 | else if (first->ilOffset == second->ilOffset) |
1735 | return CompareInternal(first, second); |
1736 | else |
1737 | return 1; |
1738 | } |
1739 | }; |
1740 | |
1741 | /* |
1742 | * class MapSortNative: A template class that will sort an array of DebuggerILToNativeMap by the nativeStartOffset field. |
1743 | * This class is intended to be instantiated on the stack / in temporary storage, and used to reorder the sequence map. |
1744 | */ |
1745 | class MapSortNative : public CQuickSort<DebuggerILToNativeMap> |
1746 | { |
1747 | public: |
1748 | //Constructor |
1749 | MapSortNative(DebuggerILToNativeMap *map, |
1750 | int count) |
1751 | : CQuickSort<DebuggerILToNativeMap>(map, count) |
1752 | { |
1753 | WRAPPER_NO_CONTRACT; |
1754 | } |
1755 | |
1756 | |
1757 | //Returns -1,0,or 1 if first's nativeStartOffset is less than, equal to, or greater than second's |
1758 | int Compare(DebuggerILToNativeMap *first, |
1759 | DebuggerILToNativeMap *second) |
1760 | { |
1761 | LIMITED_METHOD_CONTRACT; |
1762 | |
1763 | if (first->nativeStartOffset < second->nativeStartOffset) |
1764 | return -1; |
1765 | else if (first->nativeStartOffset == second->nativeStartOffset) |
1766 | return 0; |
1767 | else |
1768 | return 1; |
1769 | } |
1770 | }; |
1771 | #endif //!DACCESS_COMPILE |
1772 | |
1773 | /* ------------------------------------------------------------------------ * |
1774 | * Import flares from assembly file |
1775 | * We rely on flares having unique addresses, and so we need to keeps them |
1776 | * from getting folded by the linker (Since they are identical code). |
1777 | * ------------------------------------------------------------------------ */ |
1778 | |
1779 | extern "C" void __stdcall SignalHijackStartedFlare(void); |
1780 | extern "C" void __stdcall ExceptionForRuntimeHandoffStartFlare(void); |
1781 | extern "C" void __stdcall ExceptionForRuntimeHandoffCompleteFlare(void); |
1782 | extern "C" void __stdcall SignalHijackCompleteFlare(void); |
1783 | extern "C" void __stdcall ExceptionNotForRuntimeFlare(void); |
1784 | extern "C" void __stdcall NotifyRightSideOfSyncCompleteFlare(void); |
1785 | extern "C" void __stdcall NotifySecondChanceReadyForDataFlare(void); |
1786 | |
1787 | /* ------------------------------------------------------------------------ * |
1788 | * Debugger class |
1789 | * ------------------------------------------------------------------------ */ |
1790 | |
1791 | |
1792 | // Forward declare some parameter marshalling structs |
1793 | struct ShouldAttachDebuggerParams; |
1794 | struct EnsureDebuggerAttachedParams; |
1795 | struct SendMDANotificationParams; |
1796 | |
1797 | // class Debugger: This class implements DebugInterface to provide |
1798 | // the hooks to the Runtime directly. |
1799 | // |
1800 | |
1801 | class Debugger : public DebugInterface |
1802 | { |
1803 | VPTR_VTABLE_CLASS(Debugger, DebugInterface); |
1804 | public: |
1805 | |
1806 | #ifndef DACCESS_COMPILE |
1807 | Debugger(); |
1808 | virtual ~Debugger(); |
1809 | #else |
1810 | virtual ~Debugger() {} |
1811 | #endif |
1812 | |
1813 | // If 0, then not yet initialized. If non-zero, then LS is initialized. |
1814 | LONG m_fLeftSideInitialized; |
1815 | |
1816 | // This flag controls the window where SetDesiredNGENCompilerFlags is allowed, |
1817 | // which is until Debugger::StartupPhase2 is complete. Typically it would be |
1818 | // set during the CreateProcess debug event but it could be set other times such |
1819 | // as module load for clr.dll. |
1820 | SVAL_DECL(BOOL, s_fCanChangeNgenFlags); |
1821 | |
1822 | friend class DebuggerLazyInit; |
1823 | #ifdef TEST_DATA_CONSISTENCY |
1824 | friend class DataTest; |
1825 | #endif |
1826 | |
1827 | // Checks if the JitInfos table has been allocated, and if not does so. |
1828 | HRESULT inline CheckInitMethodInfoTable(); |
1829 | HRESULT inline CheckInitModuleTable(); |
1830 | HRESULT CheckInitPendingFuncEvalTable(); |
1831 | |
1832 | #ifndef DACCESS_COMPILE |
1833 | DWORD GetRCThreadId() |
1834 | { |
1835 | CONTRACTL |
1836 | { |
1837 | NOTHROW; |
1838 | GC_NOTRIGGER; |
1839 | } |
1840 | CONTRACTL_END; |
1841 | |
1842 | if (m_pRCThread) |
1843 | return m_pRCThread->GetRCThreadId(); |
1844 | else |
1845 | return 0; |
1846 | } |
1847 | #endif |
1848 | |
1849 | // |
1850 | // Methods exported from the Runtime Controller to the Runtime. |
1851 | // (These are the methods specified by DebugInterface.) |
1852 | // |
1853 | HRESULT Startup(void); |
1854 | |
1855 | HRESULT StartupPhase2(Thread * pThread); |
1856 | |
1857 | void InitializeLazyDataIfNecessary(); |
1858 | |
1859 | void LazyInit(); // will throw |
1860 | HRESULT LazyInitWrapper(); // calls LazyInit and converts to HR. |
1861 | |
1862 | // Helper on startup to notify debugger |
1863 | void RaiseStartupNotification(); |
1864 | |
1865 | // Send a raw managed debug event over the managed pipeline. |
1866 | void SendRawEvent(const DebuggerIPCEvent * pManagedEvent); |
1867 | |
1868 | // Message box API for the left side of the debugger. This API handles calls from the |
1869 | // debugger helper thread as well as from normal EE threads. It is the only one that |
1870 | // should be used from inside the debugger left side. |
1871 | int MessageBox( |
1872 | UINT uText, // Resource Identifier for Text message |
1873 | UINT uCaption, // Resource Identifier for Caption |
1874 | UINT uType, // Style of MessageBox |
1875 | BOOL displayForNonInteractive, // Display even if the process is running non interactive |
1876 | BOOL showFileNameInTitle, // Flag to show FileName in Caption |
1877 | ...); // Additional Arguments |
1878 | |
1879 | void SetEEInterface(EEDebugInterface* i); |
1880 | void StopDebugger(void); |
1881 | BOOL IsStopped(void) |
1882 | { |
1883 | LIMITED_METHOD_CONTRACT; |
1884 | // implements DebugInterface but also is called internally |
1885 | return m_stopped; |
1886 | } |
1887 | |
1888 | |
1889 | |
1890 | void ThreadCreated(Thread* pRuntimeThread); |
1891 | void ThreadStarted(Thread* pRuntimeThread); |
1892 | void DetachThread(Thread *pRuntimeThread); |
1893 | |
1894 | BOOL SuspendComplete(bool isEESuspendedForGC = false); |
1895 | |
1896 | void LoadModule(Module* pRuntimeModule, |
1897 | LPCWSTR pszModuleName, |
1898 | DWORD dwModuleName, |
1899 | Assembly *pAssembly, |
1900 | AppDomain *pAppDomain, |
1901 | DomainFile * pDomainFile, |
1902 | BOOL fAttaching); |
1903 | void LoadModuleFinished(Module* pRuntimeModule, AppDomain * pAppDomain); |
1904 | DebuggerModule * AddDebuggerModule(DomainFile * pDomainFile); |
1905 | |
1906 | |
1907 | void UnloadModule(Module* pRuntimeModule, |
1908 | AppDomain *pAppDomain); |
1909 | void DestructModule(Module *pModule); |
1910 | |
1911 | void RemoveModuleReferences(Module * pModule); |
1912 | |
1913 | |
1914 | void SendUpdateModuleSymsEventAndBlock(Module * pRuntimeModule, AppDomain * pAppDomain); |
1915 | void SendRawUpdateModuleSymsEvent(Module * pRuntimeModule, AppDomain * pAppDomain); |
1916 | |
1917 | BOOL LoadClass(TypeHandle th, |
1918 | mdTypeDef classMetadataToken, |
1919 | Module* classModule, |
1920 | AppDomain *pAppDomain); |
1921 | void UnloadClass(mdTypeDef classMetadataToken, |
1922 | Module* classModule, |
1923 | AppDomain *pAppDomain); |
1924 | |
1925 | void SendClassLoadUnloadEvent (mdTypeDef classMetadataToken, |
1926 | DebuggerModule *classModule, |
1927 | Assembly *pAssembly, |
1928 | AppDomain *pAppDomain, |
1929 | BOOL fIsLoadEvent); |
1930 | BOOL SendSystemClassLoadUnloadEvent (mdTypeDef classMetadataToken, |
1931 | Module *classModule, |
1932 | BOOL fIsLoadEvent); |
1933 | |
1934 | void SendCatchHandlerFound(Thread *pThread, |
1935 | FramePointer fp, |
1936 | SIZE_T nOffset, |
1937 | DWORD dwFlags); |
1938 | |
1939 | LONG NotifyOfCHFFilter(EXCEPTION_POINTERS* pExceptionPointers, PVOID pCatchStackAddr); |
1940 | |
1941 | |
1942 | bool FirstChanceNativeException(EXCEPTION_RECORD *exception, |
1943 | T_CONTEXT *context, |
1944 | DWORD code, |
1945 | Thread *thread); |
1946 | |
1947 | bool IsJMCMethod(Module* pModule, mdMethodDef tkMethod); |
1948 | |
1949 | int GetMethodEncNumber(MethodDesc * pMethod); |
1950 | |
1951 | |
1952 | bool FirstChanceManagedException(Thread *pThread, SIZE_T currentIP, SIZE_T currentSP); |
1953 | |
1954 | void FirstChanceManagedExceptionCatcherFound(Thread *pThread, |
1955 | MethodDesc *pMD, TADDR pMethodAddr, |
1956 | BYTE *currentSP, |
1957 | EE_ILEXCEPTION_CLAUSE *pEHClause); |
1958 | |
1959 | LONG LastChanceManagedException(EXCEPTION_POINTERS * pExceptionInfo, |
1960 | Thread *pThread, |
1961 | BOOL jitAttachRequested); |
1962 | |
1963 | void ManagedExceptionUnwindBegin(Thread *pThread); |
1964 | |
1965 | void DeleteInterceptContext(void *pContext); |
1966 | |
1967 | void ExceptionFilter(MethodDesc *fd, TADDR pMethodAddr, SIZE_T offset, BYTE *pStack); |
1968 | void ExceptionHandle(MethodDesc *fd, TADDR pMethodAddr, SIZE_T offset, BYTE *pStack); |
1969 | |
1970 | int NotifyUserOfFault(bool userBreakpoint, DebuggerLaunchSetting dls); |
1971 | |
1972 | SIZE_T GetArgCount(MethodDesc* md, BOOL *fVarArg = NULL); |
1973 | |
1974 | void FuncEvalComplete(Thread *pThread, DebuggerEval *pDE); |
1975 | |
1976 | DebuggerMethodInfo *CreateMethodInfo(Module *module, mdMethodDef md); |
1977 | void JITComplete(MethodDesc* fd, TADDR newAddress); |
1978 | |
1979 | HRESULT RequestFavor(FAVORCALLBACK fp, void * pData); |
1980 | |
1981 | #ifdef EnC_SUPPORTED |
1982 | HRESULT UpdateFunction(MethodDesc* pFD, SIZE_T encVersion); |
1983 | HRESULT AddFunction(MethodDesc* md, SIZE_T enCVersion); |
1984 | HRESULT UpdateNotYetLoadedFunction(mdMethodDef token, Module * pModule, SIZE_T enCVersion); |
1985 | |
1986 | HRESULT AddField(FieldDesc* fd, SIZE_T enCVersion); |
1987 | HRESULT RemapComplete(MethodDesc *pMd, TADDR addr, SIZE_T nativeOffset); |
1988 | |
1989 | HRESULT MapILInfoToCurrentNative(MethodDesc *pMD, |
1990 | SIZE_T ilOffset, |
1991 | TADDR nativeFnxStart, |
1992 | SIZE_T *nativeOffset); |
1993 | #endif // EnC_SUPPORTED |
1994 | |
1995 | void GetVarInfo(MethodDesc * fd, // [IN] method of interest |
1996 | void *DebuggerVersionToken, // [IN] which edit version |
1997 | SIZE_T * cVars, // [OUT] size of 'vars' |
1998 | const ICorDebugInfo::NativeVarInfo **vars // [OUT] map telling where local vars are stored |
1999 | ); |
2000 | |
2001 | void getBoundariesHelper(MethodDesc * ftn, |
2002 | unsigned int *cILOffsets, DWORD **pILOffsets); |
2003 | void getBoundaries(MethodDesc * ftn, |
2004 | unsigned int *cILOffsets, DWORD **pILOffsets, |
2005 | ICorDebugInfo::BoundaryTypes* implictBoundaries); |
2006 | |
2007 | void getVars(MethodDesc * ftn, |
2008 | ULONG32 *cVars, ICorDebugInfo::ILVarInfo **vars, |
2009 | bool *extendOthers); |
2010 | |
2011 | DebuggerMethodInfo *GetOrCreateMethodInfo(Module *pModule, mdMethodDef token); |
2012 | |
2013 | PTR_DebuggerMethodInfoTable GetMethodInfoTable() { return m_pMethodInfos; } |
2014 | |
2015 | // Gets the DJI for 'fd' |
2016 | // If 'pbAddr' is non-NULL and if the method has multiple copies of code |
2017 | // (because of EnC or code-pitching), this returns the DJI corresponding |
2018 | // to 'pbAddr' |
2019 | DebuggerJitInfo *GetJitInfo(MethodDesc *fd, const BYTE *pbAddr, DebuggerMethodInfo **pMethInfo = NULL); |
2020 | |
2021 | // Several ways of getting a DJI. DJIs are 1:1 w/ Native Code blobs. |
2022 | // Caller must guarantee good parameters. |
2023 | // DJIs can be lazily created; so the only way these will fail is in an OOM case. |
2024 | DebuggerJitInfo *GetJitInfoFromAddr(TADDR addr); |
2025 | |
2026 | // EnC trashes the methoddesc to point to the latest version. Thus given a method-desc, |
2027 | // we can get the most recent DJI. |
2028 | DebuggerJitInfo *GetLatestJitInfoFromMethodDesc(MethodDesc * pMethodDesc); |
2029 | |
2030 | |
2031 | HRESULT GetILToNativeMapping(PCODE pNativeCodeStartAddress, ULONG32 cMap, ULONG32 *pcMap, |
2032 | COR_DEBUG_IL_TO_NATIVE_MAP map[]); |
2033 | |
2034 | HRESULT GetILToNativeMappingIntoArrays( |
2035 | MethodDesc * pMethodDesc, |
2036 | PCODE pCode, |
2037 | USHORT cMapMax, |
2038 | USHORT * pcMap, |
2039 | UINT ** prguiILOffset, |
2040 | UINT ** prguiNativeOffset); |
2041 | |
2042 | PRD_TYPE GetPatchedOpcode(CORDB_ADDRESS_TYPE *ip); |
2043 | BOOL CheckGetPatchedOpcode(CORDB_ADDRESS_TYPE *address, /*OUT*/ PRD_TYPE *pOpcode); |
2044 | |
2045 | void TraceCall(const BYTE *address); |
2046 | |
2047 | bool ThreadsAtUnsafePlaces(void); |
2048 | |
2049 | |
2050 | void PollWaitingForHelper(); |
2051 | |
2052 | void IncThreadsAtUnsafePlaces(void) |
2053 | { |
2054 | LIMITED_METHOD_CONTRACT; |
2055 | InterlockedIncrement(&m_threadsAtUnsafePlaces); |
2056 | } |
2057 | |
2058 | void DecThreadsAtUnsafePlaces(void) |
2059 | { |
2060 | LIMITED_METHOD_CONTRACT; |
2061 | InterlockedDecrement(&m_threadsAtUnsafePlaces); |
2062 | } |
2063 | |
2064 | static StackWalkAction AtSafePlaceStackWalkCallback(CrawlFrame *pCF, |
2065 | VOID* data); |
2066 | bool IsThreadAtSafePlaceWorker(Thread *thread); |
2067 | bool IsThreadAtSafePlace(Thread *thread); |
2068 | |
2069 | CorDebugUserState GetFullUserState(Thread *pThread); |
2070 | |
2071 | |
2072 | void Terminate(); |
2073 | void Continue(); |
2074 | |
2075 | bool HandleIPCEvent(DebuggerIPCEvent* event); |
2076 | |
2077 | DebuggerModule * LookupOrCreateModule(VMPTR_DomainFile vmDomainFile); |
2078 | DebuggerModule * LookupOrCreateModule(DomainFile * pDomainFile); |
2079 | DebuggerModule * LookupOrCreateModule(Module * pModule, AppDomain * pAppDomain); |
2080 | |
2081 | HRESULT GetAndSendInterceptCommand(DebuggerIPCEvent *event); |
2082 | |
2083 | //HRESULT GetAndSendJITFunctionData(DebuggerRCThread* rcThread, |
2084 | // mdMethodDef methodToken, |
2085 | // void* functionModuleToken); |
2086 | HRESULT GetFuncData(mdMethodDef funcMetadataToken, |
2087 | DebuggerModule* pDebuggerModule, |
2088 | SIZE_T nVersion, |
2089 | DebuggerIPCE_FuncData *data); |
2090 | |
2091 | |
2092 | // The following four functions convert between type handles and the data that is |
2093 | // shipped for types to and from the right-side. |
2094 | // |
2095 | // I'm heading toward getting rid of the first two - they are almost never used. |
2096 | static HRESULT ExpandedTypeInfoToTypeHandle(DebuggerIPCE_ExpandedTypeData *data, |
2097 | unsigned int genericArgsCount, |
2098 | DebuggerIPCE_BasicTypeData *genericArgs, |
2099 | TypeHandle *pRes); |
2100 | static HRESULT BasicTypeInfoToTypeHandle(DebuggerIPCE_BasicTypeData *data, |
2101 | TypeHandle *pRes); |
2102 | void TypeHandleToBasicTypeInfo(AppDomain *pAppDomain, |
2103 | TypeHandle th, |
2104 | DebuggerIPCE_BasicTypeData *res); |
2105 | |
2106 | // TypeHandleToExpandedTypeInfo returns different DebuggerIPCE_ExpandedTypeData objects |
2107 | // depending on whether the object value that the TypeData corresponds to is |
2108 | // boxed or not. Different parts of the API transfer objects in slightly different ways. |
2109 | // AllBoxed: |
2110 | // For GetAndSendObjectData all values are boxed, |
2111 | // |
2112 | // StructsBoxed: |
2113 | // When returning results from FuncEval only "true" structs |
2114 | // get boxed, i.e. primitives are unboxed. |
2115 | // |
2116 | // NoSpecialBoxing: |
2117 | // TypeHandleToExpandedTypeInfo is also used to report type parameters, |
2118 | // and in this case none of the types are considered boxed ( |
2119 | enum AreValueTypesBoxed { NoValueTypeBoxing, OnlyPrimitivesUnboxed, AllBoxed }; |
2120 | |
2121 | void TypeHandleToExpandedTypeInfo(AreValueTypesBoxed boxed, |
2122 | AppDomain *pAppDomain, |
2123 | TypeHandle th, |
2124 | DebuggerIPCE_ExpandedTypeData *res); |
2125 | |
2126 | class TypeDataWalk |
2127 | { |
2128 | DebuggerIPCE_TypeArgData *m_curdata; |
2129 | unsigned int m_remaining; |
2130 | |
2131 | public: |
2132 | TypeDataWalk(DebuggerIPCE_TypeArgData *pData, unsigned int nData) |
2133 | { |
2134 | m_curdata = pData; |
2135 | m_remaining = nData; |
2136 | } |
2137 | |
2138 | |
2139 | // These are for type arguments in the funceval case. |
2140 | // They throw COMPLUS exceptions if they fail, so can only be used during funceval. |
2141 | void ReadTypeHandles(unsigned int nTypeArgs, TypeHandle *pRes); |
2142 | TypeHandle ReadInstantiation(Module *pModule, mdTypeDef tok, unsigned int nTypeArgs); |
2143 | TypeHandle ReadTypeHandle(); |
2144 | |
2145 | BOOL Finished() { LIMITED_METHOD_CONTRACT; return m_remaining == 0; } |
2146 | DebuggerIPCE_TypeArgData *ReadOne() { LIMITED_METHOD_CONTRACT; if (m_remaining) { m_remaining--; return m_curdata++; } else return NULL; } |
2147 | |
2148 | }; |
2149 | |
2150 | |
2151 | |
2152 | HRESULT GetMethodDescData(MethodDesc *pFD, |
2153 | DebuggerJitInfo *pJITInfo, |
2154 | DebuggerIPCE_JITFuncData *data); |
2155 | |
2156 | void GetAndSendTransitionStubInfo(CORDB_ADDRESS_TYPE *stubAddress); |
2157 | |
2158 | void SendBreakpoint(Thread *thread, T_CONTEXT *context, |
2159 | DebuggerBreakpoint *breakpoint); |
2160 | #ifdef FEATURE_DATABREAKPOINT |
2161 | void SendDataBreakpoint(Thread* thread, T_CONTEXT *context, DebuggerDataBreakpoint *breakpoint); |
2162 | #endif // FEATURE_DATABREAKPOINT |
2163 | void SendStep(Thread *thread, T_CONTEXT *context, |
2164 | DebuggerStepper *stepper, |
2165 | CorDebugStepReason reason); |
2166 | |
2167 | void LockAndSendEnCRemapEvent(DebuggerJitInfo * dji, SIZE_T currentIP, SIZE_T *resumeIP); |
2168 | void LockAndSendEnCRemapCompleteEvent(MethodDesc *pFD); |
2169 | void SendEnCUpdateEvent(DebuggerIPCEventType eventType, |
2170 | Module * pModule, |
2171 | mdToken memberToken, |
2172 | mdTypeDef classToken, |
2173 | SIZE_T enCVersion); |
2174 | void LockAndSendBreakpointSetError(PATCH_UNORDERED_ARRAY * listUnbindablePatches); |
2175 | |
2176 | // helper for SendException |
2177 | void SendExceptionEventsWorker( |
2178 | Thread * pThread, |
2179 | bool firstChance, |
2180 | bool fIsInterceptable, |
2181 | bool continuable, |
2182 | SIZE_T currentIP, |
2183 | FramePointer framePointer, |
2184 | bool atSafePlace); |
2185 | |
2186 | // Main function to send an exception event, handle jit-attach if needed, etc |
2187 | HRESULT SendException(Thread *pThread, |
2188 | bool fFirstChance, |
2189 | SIZE_T currentIP, |
2190 | SIZE_T currentSP, |
2191 | bool fContinuable, |
2192 | bool fAttaching, |
2193 | bool fForceNonInterceptable, |
2194 | EXCEPTION_POINTERS * pExceptionInfo); |
2195 | |
2196 | // Top-level function to handle sending a user-breakpoint, jit-attach, sync, etc. |
2197 | void SendUserBreakpoint(Thread * thread); |
2198 | |
2199 | // Send the user breakpoint and block waiting for a continue. |
2200 | void SendUserBreakpointAndSynchronize(Thread * pThread); |
2201 | |
2202 | // Just send the actual event. |
2203 | void SendRawUserBreakpoint(Thread *thread); |
2204 | |
2205 | |
2206 | |
2207 | void SendInterceptExceptionComplete(Thread *thread); |
2208 | |
2209 | HRESULT AttachDebuggerForBreakpoint(Thread *thread, |
2210 | __in_opt WCHAR *wszLaunchReason); |
2211 | |
2212 | |
2213 | void ThreadIsSafe(Thread *thread); |
2214 | |
2215 | void UnrecoverableError(HRESULT errorHR, |
2216 | unsigned int errorCode, |
2217 | const char *errorFile, |
2218 | unsigned int errorLine, |
2219 | bool exitThread); |
2220 | |
2221 | virtual BOOL IsSynchronizing(void) |
2222 | { |
2223 | LIMITED_METHOD_CONTRACT; |
2224 | |
2225 | return m_trappingRuntimeThreads; |
2226 | } |
2227 | |
2228 | // |
2229 | // The debugger mutex is used to protect any "global" Left Side |
2230 | // data structures. The RCThread takes it when handling a Right |
2231 | // Side event, and Runtime threads take it when processing |
2232 | // debugger events. |
2233 | // |
2234 | #ifdef _DEBUG |
2235 | int m_mutexCount; |
2236 | #endif |
2237 | |
2238 | // Helper function |
2239 | HRESULT AttachDebuggerForBreakpointOnHelperThread(Thread *pThread); |
2240 | |
2241 | // helper function to send Exception IPC event and Exception_CallBack2 event |
2242 | HRESULT SendExceptionHelperAndBlock( |
2243 | Thread *pThread, |
2244 | OBJECTHANDLE exceptionHandle, |
2245 | bool continuable, |
2246 | FramePointer framePointer, |
2247 | SIZE_T nOffset, |
2248 | CorDebugExceptionCallbackType eventType, |
2249 | DWORD dwFlags); |
2250 | |
2251 | |
2252 | // Helper function to send out LogMessage only. Can be either on helper thread or manager thread. |
2253 | void SendRawLogMessage( |
2254 | Thread *pThread, |
2255 | AppDomain *pAppDomain, |
2256 | int iLevel, |
2257 | SString * pCategory, |
2258 | SString * pMessage); |
2259 | |
2260 | |
2261 | // Helper function to send MDA notification |
2262 | void SendRawMDANotification(SendMDANotificationParams * params); |
2263 | static void SendMDANotificationOnHelperThreadProxy(SendMDANotificationParams * params); |
2264 | |
2265 | // Returns a bitfield reflecting the managed debugging state at the time of |
2266 | // the jit attach. |
2267 | CLR_DEBUGGING_PROCESS_FLAGS GetAttachStateFlags(); |
2268 | |
2269 | // Records that this thread is about to trigger jit attach and |
2270 | // resolves the race for which thread gets to trigger it |
2271 | BOOL PreJitAttach(BOOL willSendManagedEvent, BOOL willLaunchDebugger, BOOL explicitUserRequest); |
2272 | |
2273 | // Blocks until the debugger completes jit attach |
2274 | void WaitForDebuggerAttach(); |
2275 | |
2276 | // Cleans up after jit attach is complete |
2277 | void PostJitAttach(); |
2278 | |
2279 | // Main worker function to initiate, handle, and wait for a Jit-attach. |
2280 | void JitAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest); |
2281 | |
2282 | private: |
2283 | void DoNotCallDirectlyPrivateLock(void); |
2284 | void DoNotCallDirectlyPrivateUnlock(void); |
2285 | |
2286 | // This function gets the jit debugger launched and waits for the native attach to complete |
2287 | // Make sure you called PreJitAttach and it returned TRUE before you call this |
2288 | HRESULT LaunchJitDebuggerAndNativeAttach(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo); |
2289 | |
2290 | // Helper to serialize metadata that has been updated by the profiler into |
2291 | // a buffer so that it can be read out-of-proc |
2292 | BYTE* SerializeModuleMetaData(Module * pModule, DWORD * countBytes); |
2293 | |
2294 | /// Wrapps fusion Module FusionCopyPDBs. |
2295 | HRESULT CopyModulePdb(Module* pRuntimeModule); |
2296 | |
2297 | // When attaching to a process, this is called to enumerate all of the |
2298 | // AppDomains currently in the process and allow modules pdbs to be copied over to the shadow dir maintaining out V2 in-proc behaviour. |
2299 | HRESULT IterateAppDomainsForPdbs(); |
2300 | |
2301 | #ifndef DACCESS_COMPILE |
2302 | public: |
2303 | // Helper function to initialize JDI structure |
2304 | void InitDebuggerLaunchJitInfo(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo); |
2305 | |
2306 | // Helper function to retrieve JDI structure |
2307 | JIT_DEBUG_INFO * GetDebuggerLaunchJitInfo(void); |
2308 | |
2309 | private: |
2310 | static JIT_DEBUG_INFO s_DebuggerLaunchJitInfo; |
2311 | static EXCEPTION_RECORD s_DebuggerLaunchJitInfoExceptionRecord; |
2312 | static CONTEXT s_DebuggerLaunchJitInfoContext; |
2313 | |
2314 | static void AcquireDebuggerLock(Debugger *c) |
2315 | { |
2316 | WRAPPER_NO_CONTRACT; |
2317 | c->DoNotCallDirectlyPrivateLock(); |
2318 | } |
2319 | |
2320 | static void ReleaseDebuggerLock(Debugger *c) |
2321 | { |
2322 | WRAPPER_NO_CONTRACT; |
2323 | c->DoNotCallDirectlyPrivateUnlock(); |
2324 | } |
2325 | #else // DACCESS_COMPILE |
2326 | static void AcquireDebuggerLock(Debugger *c); |
2327 | static void ReleaseDebuggerLock(Debugger *c); |
2328 | #endif // DACCESS_COMPILE |
2329 | |
2330 | |
2331 | public: |
2332 | // define type for DebuggerLockHolder |
2333 | typedef DacHolder<Debugger *, Debugger::AcquireDebuggerLock, Debugger::ReleaseDebuggerLock> DebuggerLockHolder; |
2334 | |
2335 | void LockForEventSending(DebuggerLockHolder *dbgLockHolder); |
2336 | void UnlockFromEventSending(DebuggerLockHolder *dbgLockHolder); |
2337 | void SyncAllThreads(DebuggerLockHolder *dbgLockHolder); |
2338 | void SendSyncCompleteIPCEvent(bool isEESuspendedForGC = false); |
2339 | |
2340 | // Helper for sending a single pre-baked IPC event and blocking on the continue. |
2341 | // See definition of SENDIPCEVENT_BEGIN for usage pattern. |
2342 | void SendSimpleIPCEventAndBlock(); |
2343 | |
2344 | void SendCreateProcess(DebuggerLockHolder * pDbgLockHolder); |
2345 | |
2346 | void IncrementClassLoadCallbackCount(void) |
2347 | { |
2348 | LIMITED_METHOD_CONTRACT; |
2349 | InterlockedIncrement(&m_dClassLoadCallbackCount); |
2350 | } |
2351 | |
2352 | void DecrementClassLoadCallbackCount(void) |
2353 | { |
2354 | LIMITED_METHOD_CONTRACT; |
2355 | _ASSERTE(m_dClassLoadCallbackCount > 0); |
2356 | InterlockedDecrement(&m_dClassLoadCallbackCount); |
2357 | } |
2358 | |
2359 | |
2360 | #ifdef _DEBUG_IMPL |
2361 | bool ThreadHoldsLock(void) |
2362 | { |
2363 | CONTRACTL |
2364 | { |
2365 | NOTHROW; |
2366 | GC_NOTRIGGER; |
2367 | } |
2368 | CONTRACTL_END; |
2369 | |
2370 | if (g_fProcessDetach) |
2371 | return true; |
2372 | |
2373 | BEGIN_GETTHREAD_ALLOWED; |
2374 | if (g_pEEInterface->GetThread()) |
2375 | { |
2376 | return (GetThreadIdHelper(g_pEEInterface->GetThread()) == m_mutexOwner); |
2377 | } |
2378 | else |
2379 | { |
2380 | return (GetCurrentThreadId() == m_mutexOwner); |
2381 | } |
2382 | END_GETTHREAD_ALLOWED; |
2383 | } |
2384 | #endif // _DEBUG_IMPL |
2385 | |
2386 | #ifdef FEATURE_INTEROP_DEBUGGING |
2387 | static VOID M2UHandoffHijackWorker( |
2388 | T_CONTEXT *pContext, |
2389 | EXCEPTION_RECORD *pExceptionRecord); |
2390 | |
2391 | LONG FirstChanceSuspendHijackWorker( |
2392 | T_CONTEXT *pContext, |
2393 | EXCEPTION_RECORD *pExceptionRecord); |
2394 | static void GenericHijackFunc(void); |
2395 | static void SecondChanceHijackFunc(void); |
2396 | static void SecondChanceHijackFuncWorker(void); |
2397 | static void SignalHijackStarted(void); |
2398 | static void ExceptionForRuntimeHandoffStart(void); |
2399 | static void ExceptionForRuntimeHandoffComplete(void); |
2400 | static void SignalHijackComplete(void); |
2401 | static void ExceptionNotForRuntime(void); |
2402 | static void NotifyRightSideOfSyncComplete(void); |
2403 | static void NotifySecondChanceReadyForData(void); |
2404 | #endif // FEATURE_INTEROP_DEBUGGING |
2405 | |
2406 | void UnhandledHijackWorker(T_CONTEXT * pContext, EXCEPTION_RECORD * pRecord); |
2407 | |
2408 | // |
2409 | // InsertToMethodInfoList puts the given DMI onto the DMI list. |
2410 | // |
2411 | HRESULT InsertToMethodInfoList(DebuggerMethodInfo *dmi); |
2412 | |
2413 | |
2414 | // MapBreakpoints will map any and all breakpoints (except EnC |
2415 | // patches) from previous versions of the method into the current version. |
2416 | HRESULT MapAndBindFunctionPatches( DebuggerJitInfo *pJiNew, |
2417 | MethodDesc * fd, |
2418 | CORDB_ADDRESS_TYPE * addrOfCode); |
2419 | |
2420 | // MPTDJI takes the given patch (and djiFrom, if you've got it), and |
2421 | // does the IL mapping forwards to djiTo. Returns |
2422 | // CORDBG_E_CODE_NOT_AVAILABLE if there isn't a mapping, which means that |
2423 | // no patch was placed. |
2424 | HRESULT MapPatchToDJI(DebuggerControllerPatch *dcp, DebuggerJitInfo *djiTo); |
2425 | |
2426 | HRESULT LaunchDebuggerForUser(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, |
2427 | BOOL useManagedBPForManagedAttach, BOOL explicitUserRequest); |
2428 | |
2429 | void SendLogMessage (int iLevel, |
2430 | SString * pSwitchName, |
2431 | SString * pMessage); |
2432 | |
2433 | void SendLogSwitchSetting (int iLevel, |
2434 | int iReason, |
2435 | __in_z LPCWSTR pLogSwitchName, |
2436 | __in_z LPCWSTR pParentSwitchName); |
2437 | |
2438 | bool IsLoggingEnabled (void) |
2439 | { |
2440 | LIMITED_METHOD_CONTRACT; |
2441 | |
2442 | if (m_LoggingEnabled) |
2443 | return true; |
2444 | return false; |
2445 | } |
2446 | |
2447 | // send a custom debugger notification to the RS |
2448 | void SendCustomDebuggerNotification(Thread * pThread, DomainFile * pDomain, mdTypeDef classToken); |
2449 | |
2450 | // Send an MDA notification. This ultimately translates to an ICorDebugMDA object on the Right-Side. |
2451 | void SendMDANotification( |
2452 | Thread * pThread, // may be NULL. Lets us send on behalf of other threads. |
2453 | SString * szName, |
2454 | SString * szDescription, |
2455 | SString * szXML, |
2456 | CorDebugMDAFlags flags, |
2457 | BOOL bAttach |
2458 | ); |
2459 | |
2460 | |
2461 | void EnableLogMessages (bool fOnOff) {LIMITED_METHOD_CONTRACT; m_LoggingEnabled = fOnOff;} |
2462 | bool GetILOffsetFromNative (MethodDesc *PFD, const BYTE *pbAddr, |
2463 | DWORD nativeOffset, DWORD *ilOffset); |
2464 | |
2465 | DWORD GetHelperThreadID(void ); |
2466 | |
2467 | |
2468 | HRESULT SetIP( bool fCanSetIPOnly, |
2469 | Thread *thread, |
2470 | Module *module, |
2471 | mdMethodDef mdMeth, |
2472 | DebuggerJitInfo* dji, |
2473 | SIZE_T offsetILTo, |
2474 | BOOL fIsIL); |
2475 | |
2476 | // Helper routines used by Debugger::SetIP |
2477 | |
2478 | // If we have a varargs function, we can't set the IP (we don't know how to pack/unpack the arguments), so if we |
2479 | // call SetIP with fCanSetIPOnly = true, we need to check for that. |
2480 | BOOL IsVarArgsFunction(unsigned int nEntries, PTR_NativeVarInfo varNativeInfo); |
2481 | |
2482 | HRESULT ShuffleVariablesGet(DebuggerJitInfo *dji, |
2483 | SIZE_T offsetFrom, |
2484 | T_CONTEXT *pCtx, |
2485 | SIZE_T **prgVal1, |
2486 | SIZE_T **prgVal2, |
2487 | BYTE ***prgpVCs); |
2488 | |
2489 | HRESULT ShuffleVariablesSet(DebuggerJitInfo *dji, |
2490 | SIZE_T offsetTo, |
2491 | T_CONTEXT *pCtx, |
2492 | SIZE_T **prgVal1, |
2493 | SIZE_T **prgVal2, |
2494 | BYTE **rgpVCs); |
2495 | |
2496 | HRESULT GetVariablesFromOffset(MethodDesc *pMD, |
2497 | UINT varNativeInfoCount, |
2498 | ICorDebugInfo::NativeVarInfo *varNativeInfo, |
2499 | SIZE_T offsetFrom, |
2500 | T_CONTEXT *pCtx, |
2501 | SIZE_T *rgVal1, |
2502 | SIZE_T *rgVal2, |
2503 | UINT uRgValSize, // number of element of the preallocated rgVal1 and rgVal2 |
2504 | BYTE ***rgpVCs); |
2505 | |
2506 | HRESULT SetVariablesAtOffset(MethodDesc *pMD, |
2507 | UINT varNativeInfoCount, |
2508 | ICorDebugInfo::NativeVarInfo *varNativeInfo, |
2509 | SIZE_T offsetTo, |
2510 | T_CONTEXT *pCtx, |
2511 | SIZE_T *rgVal1, |
2512 | SIZE_T *rgVal2, |
2513 | BYTE **rgpVCs); |
2514 | |
2515 | BOOL IsThreadContextInvalid(Thread *pThread); |
2516 | |
2517 | // notification for SQL fiber debugging support |
2518 | void CreateConnection(CONNID dwConnectionId, __in_z WCHAR *wzName); |
2519 | void DestroyConnection(CONNID dwConnectionId); |
2520 | void ChangeConnection(CONNID dwConnectionId); |
2521 | |
2522 | // |
2523 | // This function is used to identify the helper thread. |
2524 | // |
2525 | bool ThisIsHelperThread(void); |
2526 | |
2527 | HRESULT ReDaclEvents(PSECURITY_DESCRIPTOR securityDescriptor); |
2528 | |
2529 | #ifdef DACCESS_COMPILE |
2530 | virtual void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
2531 | virtual void EnumMemoryRegionsIfFuncEvalFrame(CLRDataEnumMemoryFlags flags, Frame * pFrame); |
2532 | #endif |
2533 | |
2534 | BOOL ShouldAutoAttach(); |
2535 | BOOL FallbackJITAttachPrompt(); |
2536 | HRESULT SetFiberMode(bool isFiberMode); |
2537 | |
2538 | HRESULT AddAppDomainToIPC (AppDomain *pAppDomain); |
2539 | HRESULT RemoveAppDomainFromIPC (AppDomain *pAppDomain); |
2540 | HRESULT UpdateAppDomainEntryInIPC (AppDomain *pAppDomain); |
2541 | |
2542 | void SendCreateAppDomainEvent(AppDomain * pAppDomain); |
2543 | void SendExitAppDomainEvent (AppDomain *pAppDomain); |
2544 | |
2545 | // Notify the debugger that an assembly has been loaded |
2546 | void LoadAssembly(DomainAssembly * pDomainAssembly); |
2547 | |
2548 | // Notify the debugger that an assembly has been unloaded |
2549 | void UnloadAssembly(DomainAssembly * pDomainAssembly); |
2550 | |
2551 | HRESULT FuncEvalSetup(DebuggerIPCE_FuncEvalInfo *pEvalInfo, BYTE **argDataArea, DebuggerEval **debuggerEvalKey); |
2552 | HRESULT FuncEvalSetupReAbort(Thread *pThread, Thread::ThreadAbortRequester requester); |
2553 | HRESULT FuncEvalAbort(DebuggerEval *debuggerEvalKey); |
2554 | HRESULT FuncEvalRudeAbort(DebuggerEval *debuggerEvalKey); |
2555 | HRESULT FuncEvalCleanup(DebuggerEval *debuggerEvalKey); |
2556 | |
2557 | HRESULT SetReference(void *objectRefAddress, VMPTR_OBJECTHANDLE vmObjectHandle, void *newReference); |
2558 | HRESULT SetValueClass(void *oldData, void *newData, DebuggerIPCE_BasicTypeData *type); |
2559 | |
2560 | HRESULT SetILInstrumentedCodeMap(MethodDesc *fd, |
2561 | BOOL fStartJit, |
2562 | ULONG32 cILMapEntries, |
2563 | COR_IL_MAP rgILMapEntries[]); |
2564 | |
2565 | void EarlyHelperThreadDeath(void); |
2566 | |
2567 | void ShutdownBegun(void); |
2568 | |
2569 | void LockDebuggerForShutdown(void); |
2570 | |
2571 | void DisableDebugger(void); |
2572 | |
2573 | // Pid of the left side process that this Debugger instance is in. |
2574 | DWORD GetPid(void) { return m_processId; } |
2575 | |
2576 | HRESULT NameChangeEvent(AppDomain *pAppDomain, Thread *pThread); |
2577 | |
2578 | // send an event to the RS indicating that there's a Ctrl-C or Ctrl-Break |
2579 | BOOL SendCtrlCToDebugger(DWORD dwCtrlType); |
2580 | |
2581 | // Allows the debugger to keep an up to date list of special threads |
2582 | HRESULT UpdateSpecialThreadList(DWORD cThreadArrayLength, DWORD *rgdwThreadIDArray); |
2583 | |
2584 | // Updates the pointer for the debugger services |
2585 | void SetIDbgThreadControl(IDebuggerThreadControl *pIDbgThreadControl); |
2586 | |
2587 | #ifndef DACCESS_COMPILE |
2588 | static void AcquireDebuggerDataLock(Debugger *pDebugger); |
2589 | |
2590 | static void ReleaseDebuggerDataLock(Debugger *pDebugger); |
2591 | |
2592 | #else // DACCESS_COMPILE |
2593 | // determine whether the LS holds the data lock. If it does, we will assume the locked data is in an |
2594 | // inconsistent state and will throw an exception. The DAC will execute this if we are executing code |
2595 | // that takes the lock. |
2596 | static void AcquireDebuggerDataLock(Debugger *pDebugger); |
2597 | |
2598 | // unimplemented--nothing to do here |
2599 | static void ReleaseDebuggerDataLock(Debugger *pDebugger); |
2600 | |
2601 | #endif // DACCESS_COMPILE |
2602 | |
2603 | // define type for DebuggerDataLockHolder |
2604 | typedef DacHolder<Debugger *, Debugger::AcquireDebuggerDataLock, Debugger::ReleaseDebuggerDataLock> DebuggerDataLockHolder; |
2605 | |
2606 | #ifdef _DEBUG |
2607 | // Use for asserts |
2608 | bool HasDebuggerDataLock() |
2609 | { |
2610 | // If no lazy data yet, then can't possibly have the debugger-data lock. |
2611 | if (!g_pDebugger->HasLazyData()) |
2612 | { |
2613 | return false; |
2614 | } |
2615 | return (g_pDebugger->GetDebuggerDataLock()->OwnedByCurrentThread()) != 0; |
2616 | } |
2617 | #endif |
2618 | |
2619 | |
2620 | // For Just-My-Code (aka Just-User-Code). |
2621 | // The jit injects probes in debuggable managed methods that look like: |
2622 | // if (*pFlag != 0) call JIT_DbgIsJustMyCode. |
2623 | // pFlag is unique per-method constant determined by GetJMCFlagAddr. |
2624 | // JIT_DbgIsJustMyCode will get the ip & fp and call OnMethodEnter. |
2625 | |
2626 | // pIP is an ip within the method, right after the prolog. |
2627 | #ifndef DACCESS_COMPILE |
2628 | virtual void OnMethodEnter(void * pIP); |
2629 | virtual DWORD* GetJMCFlagAddr(Module * pModule); |
2630 | #endif |
2631 | |
2632 | // GetJMCFlagAddr provides a unique flag for each module. UpdateModuleJMCFlag |
2633 | // will go through all modules with user-code and set their flag to fStatus. |
2634 | void UpdateAllModuleJMCFlag(bool fStatus); |
2635 | void UpdateModuleJMCFlag(Module * pRuntime, bool fStatus); |
2636 | |
2637 | // Set the default JMC status of the specified module. This function |
2638 | // also finds all the DMIs in the specified module and update their |
2639 | // JMC status as well. |
2640 | void SetModuleDefaultJMCStatus(Module * pRuntimeModule, bool fStatus); |
2641 | |
2642 | #ifndef DACCESS_COMPILE |
2643 | static DWORD GetThreadIdHelper(Thread *pThread); |
2644 | #endif // DACCESS_COMPILE |
2645 | |
2646 | private: |
2647 | DebuggerJitInfo *GetJitInfoWorker(MethodDesc *fd, const BYTE *pbAddr, DebuggerMethodInfo **pMethInfo); |
2648 | |
2649 | // Save the necessary information for the debugger to recognize an IP in one of the thread redirection |
2650 | // functions. |
2651 | void InitializeHijackFunctionAddress(); |
2652 | |
2653 | void InitDebugEventCounting(); |
2654 | void DoHelperThreadDuty(); |
2655 | |
2656 | typedef enum |
2657 | { |
2658 | ATTACH_YES, |
2659 | ATTACH_NO, |
2660 | ATTACH_TERMINATE |
2661 | } ATTACH_ACTION; |
2662 | |
2663 | // Returns true if the debugger is not attached and DbgJITDebugLaunchSetting |
2664 | // is set to either ATTACH_DEBUGGER or ASK_USER and the user request attaching. |
2665 | ATTACH_ACTION ShouldAttachDebugger(bool fIsUserBreakpoint); |
2666 | ATTACH_ACTION ShouldAttachDebuggerProxy(bool fIsUserBreakpoint); |
2667 | friend void ShouldAttachDebuggerStub(ShouldAttachDebuggerParams * p); |
2668 | friend struct ShouldAttachDebuggerParams; |
2669 | |
2670 | void TrapAllRuntimeThreads(); |
2671 | void ReleaseAllRuntimeThreads(AppDomain *pAppDomain); |
2672 | |
2673 | #ifndef DACCESS_COMPILE |
2674 | // @dbgtodo inspection - eventually, all replies should be removed because requests will be DAC-ized. |
2675 | // Do not call this function unless you are getting ThreadId from RS |
2676 | void InitIPCReply(DebuggerIPCEvent *ipce, |
2677 | DebuggerIPCEventType type) |
2678 | { |
2679 | LIMITED_METHOD_CONTRACT; |
2680 | |
2681 | _ASSERTE(ipce != NULL); |
2682 | ipce->type = type; |
2683 | ipce->hr = S_OK; |
2684 | |
2685 | ipce->processId = m_processId; |
2686 | ipce->threadId = 0; |
2687 | // AppDomain, Thread, are already initialized |
2688 | } |
2689 | |
2690 | void InitIPCEvent(DebuggerIPCEvent *ipce, |
2691 | DebuggerIPCEventType type, |
2692 | Thread *pThread, |
2693 | AppDomain* pAppDomain) |
2694 | { |
2695 | WRAPPER_NO_CONTRACT; |
2696 | |
2697 | InitIPCEvent(ipce, type, pThread, VMPTR_AppDomain::MakePtr(pAppDomain)); |
2698 | } |
2699 | |
2700 | // Let this function to figure out the unique Id that we will use for Thread. |
2701 | void InitIPCEvent(DebuggerIPCEvent *ipce, |
2702 | DebuggerIPCEventType type, |
2703 | Thread *pThread, |
2704 | VMPTR_AppDomain vmAppDomain) |
2705 | { |
2706 | CONTRACTL |
2707 | { |
2708 | NOTHROW; |
2709 | GC_NOTRIGGER; |
2710 | } |
2711 | CONTRACTL_END; |
2712 | |
2713 | _ASSERTE(ipce != NULL); |
2714 | ipce->type = type; |
2715 | ipce->hr = S_OK; |
2716 | ipce->processId = m_processId; |
2717 | ipce->threadId = pThread ? pThread->GetOSThreadId() : 0; |
2718 | ipce->vmAppDomain = vmAppDomain; |
2719 | ipce->vmThread.SetRawPtr(pThread); |
2720 | } |
2721 | |
2722 | void InitIPCEvent(DebuggerIPCEvent *ipce, |
2723 | DebuggerIPCEventType type) |
2724 | { |
2725 | WRAPPER_NO_CONTRACT; |
2726 | |
2727 | _ASSERTE((type == DB_IPCE_SYNC_COMPLETE) || |
2728 | (type == DB_IPCE_TEST_CRST) || |
2729 | (type == DB_IPCE_TEST_RWLOCK)); |
2730 | |
2731 | Thread *pThread = g_pEEInterface->GetThread(); |
2732 | AppDomain *pAppDomain = NULL; |
2733 | |
2734 | if (pThread) |
2735 | { |
2736 | pAppDomain = pThread->GetDomain(); |
2737 | } |
2738 | |
2739 | InitIPCEvent(ipce, |
2740 | type, |
2741 | pThread, |
2742 | VMPTR_AppDomain::MakePtr(pAppDomain)); |
2743 | } |
2744 | #endif // DACCESS_COMPILE |
2745 | |
2746 | HRESULT GetFunctionInfo(Module *pModule, |
2747 | mdToken functionToken, |
2748 | BYTE **pCodeStart, |
2749 | unsigned int *pCodeSize, |
2750 | mdToken *pLocalSigToken); |
2751 | |
2752 | // Allocate a buffer and send it to the right side |
2753 | HRESULT GetAndSendBuffer(DebuggerRCThread* rcThread, ULONG bufSize); |
2754 | |
2755 | // Allocate a buffer in the left-side for use by the right-side |
2756 | HRESULT AllocateRemoteBuffer( ULONG bufSize, void **ppBuffer ); |
2757 | |
2758 | // Releases a previously requested remote bufer and send reply |
2759 | HRESULT SendReleaseBuffer(DebuggerRCThread* rcThread, void *pBuffer); |
2760 | |
2761 | public: |
2762 | // Release previously requested remmote buffer |
2763 | HRESULT ReleaseRemoteBuffer(void *pBuffer, bool removeFromBlobList); |
2764 | |
2765 | private: |
2766 | #ifdef EnC_SUPPORTED |
2767 | // Apply an EnC edit and send the result event to the RS |
2768 | HRESULT ApplyChangesAndSendResult(DebuggerModule * pDebuggerModule, |
2769 | DWORD cbMetadata, |
2770 | BYTE *pMetadata, |
2771 | DWORD cbIL, |
2772 | BYTE *pIL); |
2773 | #endif // EnC_SUPPORTED |
2774 | |
2775 | bool GetCompleteDebuggerLaunchString(SString * pStrArgsBuf); |
2776 | |
2777 | // Launch a debugger for jit-attach |
2778 | void EnsureDebuggerAttached(Thread * pThread, EXCEPTION_POINTERS * pExceptionInfo, BOOL willSendManagedEvent, BOOL explicitUserRequest); |
2779 | HRESULT EDAHelper(PROCESS_INFORMATION * pProcessInfo); |
2780 | HRESULT EDAHelperProxy(PROCESS_INFORMATION * pProcessInfo); |
2781 | friend void EDAHelperStub(EnsureDebuggerAttachedParams * p); |
2782 | DebuggerLaunchSetting GetDbgJITDebugLaunchSetting(); |
2783 | |
2784 | public: |
2785 | HRESULT InitAppDomainIPC(void); |
2786 | HRESULT TerminateAppDomainIPC(void); |
2787 | |
2788 | bool ResumeThreads(AppDomain* pAppDomain); |
2789 | |
2790 | void ProcessAnyPendingEvals(Thread *pThread); |
2791 | |
2792 | bool HasLazyData(); |
2793 | RCThreadLazyInit * GetRCThreadLazyData(); |
2794 | |
2795 | // The module table is lazy init, and may be NULL. Callers must check. |
2796 | DebuggerModuleTable * GetModuleTable(); |
2797 | |
2798 | DebuggerHeap *GetInteropSafeHeap(); |
2799 | DebuggerHeap *GetInteropSafeHeap_NoThrow(); |
2800 | DebuggerHeap *GetInteropSafeExecutableHeap(); |
2801 | DebuggerHeap *GetInteropSafeExecutableHeap_NoThrow(); |
2802 | DebuggerLazyInit *GetLazyData(); |
2803 | HelperCanary * GetCanary(); |
2804 | void MarkDebuggerAttachedInternal(); |
2805 | void MarkDebuggerUnattachedInternal(); |
2806 | |
2807 | HANDLE GetAttachEvent() { return GetLazyData()->m_exAttachEvent; } |
2808 | |
2809 | private: |
2810 | #ifndef DACCESS_COMPILE |
2811 | void StartCanaryThread(); |
2812 | #endif |
2813 | DebuggerPendingFuncEvalTable *GetPendingEvals() { return GetLazyData()->m_pPendingEvals; } |
2814 | SIZE_T_UNORDERED_ARRAY * GetBPMappingDuplicates() { return &GetLazyData()->m_BPMappingDuplicates; } |
2815 | HANDLE GetUnmanagedAttachEvent() { return GetLazyData()->m_exUnmanagedAttachEvent; } |
2816 | BOOL GetDebuggerHandlingCtrlC() { return GetLazyData()->m_DebuggerHandlingCtrlC; } |
2817 | void SetDebuggerHandlingCtrlC(BOOL f) { GetLazyData()->m_DebuggerHandlingCtrlC = f; } |
2818 | HANDLE GetCtrlCMutex() { return GetLazyData()->m_CtrlCMutex; } |
2819 | UnorderedPtrArray* GetMemBlobs() { return &GetLazyData()->m_pMemBlobs; } |
2820 | |
2821 | |
2822 | PTR_DebuggerRCThread m_pRCThread; |
2823 | DWORD m_processId; // our pid |
2824 | BOOL m_trappingRuntimeThreads; |
2825 | BOOL m_stopped; |
2826 | BOOL m_unrecoverableError; |
2827 | BOOL m_ignoreThreadDetach; |
2828 | PTR_DebuggerMethodInfoTable m_pMethodInfos; |
2829 | |
2830 | |
2831 | // This is the main debugger lock. It is a large lock and used to synchronize complex operations |
2832 | // such as sending IPC events, debugger sycnhronization, and attach / detach. |
2833 | // The debugger effectively can't make any radical state changes without holding this lock. |
2834 | // |
2835 | // |
2836 | Crst m_mutex; // The main debugger lock. |
2837 | |
2838 | // Flag to track if the debugger Crst needs to go into "Shutdown for Finalizer" mode. |
2839 | // This means that only special shutdown threads (helper / finalizer / shutdown) can |
2840 | // take the lock, and all others will just block forever if they take it. |
2841 | bool m_fShutdownMode; |
2842 | |
2843 | // |
2844 | // Flag to track if the VM has told the debugger that it should block all threads |
2845 | // as soon as possible as it goes thru the debugger. As of this writing, this is |
2846 | // done via the debugger Crst, anyone attempting to take the lock will block forever. |
2847 | // |
2848 | bool m_fDisabled; |
2849 | |
2850 | #ifdef _DEBUG |
2851 | // Ownership tracking for debugging. |
2852 | DWORD m_mutexOwner; |
2853 | |
2854 | // Tid that last called LockForEventSending. |
2855 | DWORD m_tidLockedForEventSending; |
2856 | #endif |
2857 | LONG m_threadsAtUnsafePlaces; |
2858 | Volatile<BOOL> m_jitAttachInProgress; |
2859 | BOOL m_launchingDebugger; |
2860 | BOOL m_LoggingEnabled; |
2861 | AppDomainEnumerationIPCBlock *m_pAppDomainCB; |
2862 | |
2863 | LONG m_dClassLoadCallbackCount; |
2864 | |
2865 | // Lazily initialized array of debugger modules |
2866 | // @dbgtodo module - eventually, DebuggerModule should go away, |
2867 | // and all such information should be stored in either the VM's module class or in the RS. |
2868 | DebuggerModuleTable *m_pModules; |
2869 | |
2870 | // DacDbiInterfaceImpl needs to be able to write to private fields in the debugger class. |
2871 | friend class DacDbiInterfaceImpl; |
2872 | |
2873 | // Set OOP by RS to request a sync after a debug event. |
2874 | // Clear by LS when we sync. |
2875 | Volatile<BOOL> m_RSRequestedSync; |
2876 | |
2877 | // send first chance/handler found callbacks for exceptions outside of JMC to the LS |
2878 | Volatile<BOOL> m_sendExceptionsOutsideOfJMC; |
2879 | |
2880 | // represents different thead redirection functions recognized by the debugger |
2881 | enum HijackFunction |
2882 | { |
2883 | kUnhandledException = 0, |
2884 | kRedirectedForGCThreadControl, |
2885 | kRedirectedForDbgThreadControl, |
2886 | kRedirectedForUserSuspend, |
2887 | kRedirectedForYieldTask, |
2888 | #if defined(HAVE_GCCOVER) && defined(_TARGET_AMD64_) |
2889 | kRedirectedForGCStress, |
2890 | #endif // HAVE_GCCOVER && _TARGET_AMD64_ |
2891 | kMaxHijackFunctions, |
2892 | }; |
2893 | |
2894 | // static array storing the range of the thread redirection functions |
2895 | static MemoryRange s_hijackFunction[kMaxHijackFunctions]; |
2896 | |
2897 | // Currently DAC doesn't support static array members. This field is used to work around this limitation. |
2898 | ARRAY_PTR_MemoryRange m_rgHijackFunction; |
2899 | |
2900 | public: |
2901 | |
2902 | |
2903 | IDebuggerThreadControl *m_pIDbgThreadControl; |
2904 | |
2905 | |
2906 | // Sometimes we force all exceptions to be non-interceptable. |
2907 | // There are currently three cases where we set this field to true: |
2908 | // |
2909 | // 1) NotifyOfCHFFilter() |
2910 | // - If the CHF filter is the first handler we encounter in the first pass, then there is no |
2911 | // managed stack frame at which we can intercept the exception anyway. |
2912 | // |
2913 | // 2) LastChanceManagedException() |
2914 | // - If Watson is launched for an unhandled exception, then the exception cannot be intercepted. |
2915 | // |
2916 | // 3) SecondChanceHijackFuncWorker() |
2917 | // - The RS hijack the thread to this function to prevent the OS from killing the process at |
2918 | // the end of the first pass. (When a debugger is attached, the OS does not run a second pass.) |
2919 | // This function ensures that the debugger gets a second chance notification. |
2920 | BOOL m_forceNonInterceptable; |
2921 | |
2922 | // When we are doing an early attach, the RS shim should not queue all the fake attach events for |
2923 | // the process, the appdomain, and the thread. Otherwise we'll get duplicate events when these |
2924 | // entities are actually created. This flag is used to mark whether we are doing an early attach. |
2925 | // There are still time windows where we can get duplicate events, but this flag closes down the |
2926 | // most common scenario. |
2927 | SVAL_DECL(BOOL, s_fEarlyAttach); |
2928 | |
2929 | private: |
2930 | Crst * GetDebuggerDataLock() { SUPPORTS_DAC; return &GetLazyData()-> m_DebuggerDataLock; } |
2931 | |
2932 | // This is lazily inititalized. It's just a wrapper around a handle so we embed it here. |
2933 | DebuggerHeap m_heap; |
2934 | DebuggerHeap m_executableHeap; |
2935 | |
2936 | PTR_DebuggerLazyInit m_pLazyData; |
2937 | |
2938 | |
2939 | // A list of all defines that affect layout of MD types |
2940 | typedef enum _Target_Defines |
2941 | { |
2942 | DEFINE__DEBUG = 1, |
2943 | } _Target_Defines; |
2944 | |
2945 | // A bitfield that has bits set at build time corresponding |
2946 | // to which defines are active |
2947 | static const int _defines = 0 |
2948 | #ifdef _DEBUG |
2949 | | DEFINE__DEBUG |
2950 | #endif |
2951 | ; |
2952 | |
2953 | public: |
2954 | DWORD m_defines; |
2955 | DWORD m_mdDataStructureVersion; |
2956 | #ifndef DACCESS_COMPILE |
2957 | virtual void SuspendForGarbageCollectionStarted(); |
2958 | virtual void SuspendForGarbageCollectionCompleted(); |
2959 | virtual void ResumeForGarbageCollectionStarted(); |
2960 | #endif |
2961 | BOOL m_isBlockedOnGarbageCollectionEvent; |
2962 | BOOL m_willBlockOnGarbageCollectionEvent; |
2963 | BOOL m_isGarbageCollectionEventsEnabled; |
2964 | // this latches m_isGarbageCollectionEventsEnabled in BeforeGarbageCollection so we can |
2965 | // guarantee the corresponding AfterGC event is sent even if the events are disabled during GC. |
2966 | BOOL m_isGarbageCollectionEventsEnabledLatch; |
2967 | private: |
2968 | HANDLE GetGarbageCollectionBlockerEvent() { return GetLazyData()->m_garbageCollectionBlockerEvent; } |
2969 | |
2970 | }; |
2971 | |
2972 | |
2973 | |
2974 | extern "C" { |
2975 | void STDCALL FuncEvalHijack(void); |
2976 | void * STDCALL FuncEvalHijackWorker(DebuggerEval *pDE); |
2977 | |
2978 | void STDCALL ExceptionHijack(void); |
2979 | void STDCALL ExceptionHijackEnd(void); |
2980 | void STDCALL ExceptionHijackWorker(T_CONTEXT * pContext, EXCEPTION_RECORD * pRecord, EHijackReason::EHijackReason reason, void * pData); |
2981 | |
2982 | void RedirectedHandledJITCaseForGCThreadControl_Stub(); |
2983 | void RedirectedHandledJITCaseForGCThreadControl_StubEnd(); |
2984 | |
2985 | void RedirectedHandledJITCaseForDbgThreadControl_Stub(); |
2986 | void RedirectedHandledJITCaseForDbgThreadControl_StubEnd(); |
2987 | |
2988 | void RedirectedHandledJITCaseForUserSuspend_Stub(); |
2989 | void RedirectedHandledJITCaseForUserSuspend_StubEnd(); |
2990 | |
2991 | #if defined(HAVE_GCCOVER) && defined(_TARGET_AMD64_) |
2992 | void RedirectedHandledJITCaseForGCStress_Stub(); |
2993 | void RedirectedHandledJITCaseForGCStress_StubEnd(); |
2994 | #endif // HAVE_GCCOVER && _TARGET_AMD64_ |
2995 | }; |
2996 | |
2997 | |
2998 | // CNewZeroData is the allocator used by the all the hash tables that the helper thread could possibly alter. It uses |
2999 | // the interop safe allocator. |
3000 | class CNewZeroData |
3001 | { |
3002 | public: |
3003 | #ifndef DACCESS_COMPILE |
3004 | static BYTE *Alloc(int iSize, int iMaxSize) |
3005 | { |
3006 | CONTRACTL |
3007 | { |
3008 | NOTHROW; |
3009 | GC_NOTRIGGER; |
3010 | PRECONDITION(g_pDebugger != NULL); |
3011 | } |
3012 | CONTRACTL_END; |
3013 | |
3014 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3015 | if (pHeap == NULL) |
3016 | { |
3017 | return NULL; |
3018 | } |
3019 | |
3020 | BYTE *pb = (BYTE *) pHeap->Alloc(iSize); |
3021 | if (pb == NULL) |
3022 | { |
3023 | return NULL; |
3024 | } |
3025 | |
3026 | memset(pb, 0, iSize); |
3027 | return pb; |
3028 | } |
3029 | static void Free(BYTE *pPtr, int iSize) |
3030 | { |
3031 | CONTRACTL |
3032 | { |
3033 | NOTHROW; |
3034 | GC_NOTRIGGER; |
3035 | PRECONDITION(g_pDebugger != NULL); |
3036 | } |
3037 | CONTRACTL_END; |
3038 | |
3039 | |
3040 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3041 | _ASSERTE(pHeap != NULL); // should already exist |
3042 | |
3043 | pHeap->Free(pPtr); |
3044 | } |
3045 | static BYTE *Grow(BYTE *&pPtr, int iCurSize) |
3046 | { |
3047 | CONTRACTL |
3048 | { |
3049 | NOTHROW; |
3050 | GC_NOTRIGGER; |
3051 | PRECONDITION(g_pDebugger != NULL); |
3052 | } |
3053 | CONTRACTL_END; |
3054 | |
3055 | void *p; |
3056 | |
3057 | DebuggerHeap* pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3058 | _ASSERTE(pHeap != NULL); // should already exist |
3059 | |
3060 | PREFIX_ASSUME( iCurSize >= 0 ); |
3061 | S_UINT32 iNewSize = S_UINT32( iCurSize ) + S_UINT32( GrowSize(iCurSize) ); |
3062 | if( iNewSize.IsOverflow() ) |
3063 | { |
3064 | return NULL; |
3065 | } |
3066 | p = pHeap->Realloc(pPtr, iNewSize.Value(), iCurSize); |
3067 | if (p == NULL) |
3068 | { |
3069 | return NULL; |
3070 | } |
3071 | |
3072 | memset((BYTE*)p+iCurSize, 0, GrowSize(iCurSize)); |
3073 | return (pPtr = (BYTE *)p); |
3074 | } |
3075 | |
3076 | // A hashtable may recycle memory. We need to zero it out again. |
3077 | static void Clean(BYTE * pData, int iSize) |
3078 | { |
3079 | LIMITED_METHOD_CONTRACT; |
3080 | |
3081 | memset(pData, 0, iSize); |
3082 | } |
3083 | #endif // DACCESS_COMPILE |
3084 | |
3085 | static int RoundSize(int iSize) |
3086 | { |
3087 | LIMITED_METHOD_CONTRACT; |
3088 | |
3089 | return (iSize); |
3090 | } |
3091 | static int GrowSize(int iCurSize) |
3092 | { |
3093 | LIMITED_METHOD_CONTRACT; |
3094 | int newSize = (3 * iCurSize) / 2; |
3095 | return (newSize < 256) ? 256 : newSize; |
3096 | } |
3097 | }; |
3098 | |
3099 | class DebuggerPendingFuncEvalTable : private CHashTableAndData<CNewZeroData> |
3100 | { |
3101 | public: |
3102 | virtual ~DebuggerPendingFuncEvalTable() = default; |
3103 | |
3104 | private: |
3105 | |
3106 | BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2) |
3107 | { |
3108 | LIMITED_METHOD_DAC_CONTRACT; |
3109 | |
3110 | #if defined(DACCESS_COMPILE) |
3111 | // This function hasn't been tested yet in the DAC build. Make sure the DACization is correct. |
3112 | DacNotImpl(); |
3113 | #endif // DACCESS_COMPILE |
3114 | |
3115 | Thread * pThread1 = reinterpret_cast<Thread *>(k1); |
3116 | Thread * pThread2 = dac_cast<PTR_DebuggerPendingFuncEval>(const_cast<HASHENTRY *>(pc2))->pThread; |
3117 | |
3118 | return (pThread1 != pThread2); |
3119 | } |
3120 | |
3121 | ULONG HASH(Thread* pThread) |
3122 | { |
3123 | LIMITED_METHOD_CONTRACT; |
3124 | return (ULONG)((SIZE_T)pThread); // only use low 32-bits if 64-bit |
3125 | } |
3126 | |
3127 | |
3128 | SIZE_T KEY(Thread * pThread) |
3129 | { |
3130 | LIMITED_METHOD_CONTRACT; |
3131 | return (SIZE_T)pThread; |
3132 | } |
3133 | |
3134 | public: |
3135 | |
3136 | #ifndef DACCESS_COMPILE |
3137 | DebuggerPendingFuncEvalTable() : CHashTableAndData<CNewZeroData>(11) |
3138 | { |
3139 | WRAPPER_NO_CONTRACT; |
3140 | |
3141 | SUPPRESS_ALLOCATION_ASSERTS_IN_THIS_SCOPE; |
3142 | NewInit(11, sizeof(DebuggerPendingFuncEval), 11); |
3143 | } |
3144 | |
3145 | void AddPendingEval(Thread *pThread, DebuggerEval *pDE) |
3146 | { |
3147 | WRAPPER_NO_CONTRACT; |
3148 | |
3149 | _ASSERTE((pThread != NULL) && (pDE != NULL)); |
3150 | |
3151 | DebuggerPendingFuncEval *pfe = (DebuggerPendingFuncEval*)Add(HASH(pThread)); |
3152 | pfe->pThread = pThread; |
3153 | pfe->pDE = pDE; |
3154 | } |
3155 | |
3156 | void RemovePendingEval(Thread* pThread) |
3157 | { |
3158 | WRAPPER_NO_CONTRACT; |
3159 | |
3160 | _ASSERTE(pThread != NULL); |
3161 | |
3162 | DebuggerPendingFuncEval *entry = (DebuggerPendingFuncEval*)Find(HASH(pThread), KEY(pThread)); |
3163 | Delete(HASH(pThread), (HASHENTRY*)entry); |
3164 | } |
3165 | |
3166 | #endif // #ifndef DACCESS_COMPILE |
3167 | |
3168 | DebuggerPendingFuncEval *GetPendingEval(Thread* pThread) |
3169 | { |
3170 | WRAPPER_NO_CONTRACT; |
3171 | |
3172 | DebuggerPendingFuncEval *entry = (DebuggerPendingFuncEval*)Find(HASH(pThread), KEY(pThread)); |
3173 | return entry; |
3174 | } |
3175 | }; |
3176 | |
3177 | struct DebuggerModuleEntry |
3178 | { |
3179 | FREEHASHENTRY entry; |
3180 | PTR_DebuggerModule module; |
3181 | }; |
3182 | |
3183 | typedef DPTR(struct DebuggerModuleEntry) PTR_DebuggerModuleEntry; |
3184 | |
3185 | class DebuggerModuleTable : private CHashTableAndData<CNewZeroData> |
3186 | { |
3187 | #ifdef DACCESS_COMPILE |
3188 | public: |
3189 | virtual ~DebuggerModuleTable() = default; |
3190 | #endif |
3191 | |
3192 | private: |
3193 | |
3194 | BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2) |
3195 | { |
3196 | LIMITED_METHOD_DAC_CONTRACT; |
3197 | |
3198 | #if defined(DACCESS_COMPILE) |
3199 | // This function hasn't been tested yet in the DAC build. Make sure the DACization is correct. |
3200 | DacNotImpl(); |
3201 | #endif // DACCESS_COMPILE |
3202 | |
3203 | Module * pModule1 = reinterpret_cast<Module *>(k1); |
3204 | Module * pModule2 = |
3205 | dac_cast<PTR_DebuggerModuleEntry>(const_cast<HASHENTRY *>(pc2))->module->GetRuntimeModule(); |
3206 | |
3207 | return (pModule1 != pModule2); |
3208 | } |
3209 | |
3210 | ULONG HASH(Module* module) |
3211 | { |
3212 | LIMITED_METHOD_CONTRACT; |
3213 | return (ULONG)((SIZE_T)module); // only use low 32-bits if 64-bit |
3214 | } |
3215 | |
3216 | SIZE_T KEY(Module * pModule) |
3217 | { |
3218 | LIMITED_METHOD_CONTRACT; |
3219 | return (SIZE_T)pModule; |
3220 | } |
3221 | |
3222 | #ifdef _DEBUG |
3223 | bool ThreadHoldsLock(); |
3224 | #endif |
3225 | |
3226 | public: |
3227 | |
3228 | #ifndef DACCESS_COMPILE |
3229 | |
3230 | DebuggerModuleTable(); |
3231 | virtual ~DebuggerModuleTable(); |
3232 | |
3233 | void AddModule(DebuggerModule *module); |
3234 | |
3235 | void RemoveModule(Module* module, AppDomain *pAppDomain); |
3236 | |
3237 | |
3238 | void Clear(); |
3239 | |
3240 | // |
3241 | // RemoveModules removes any module loaded into the given appdomain from the hash. This is used when we send an |
3242 | // ExitAppdomain event to ensure that there are no leftover modules in the hash. This can happen when we have shared |
3243 | // modules that aren't properly accounted for in the CLR. We miss sending UnloadModule events for those modules, so |
3244 | // we clean them up with this method. |
3245 | // |
3246 | void RemoveModules(AppDomain *pAppDomain); |
3247 | #endif // #ifndef DACCESS_COMPILE |
3248 | |
3249 | DebuggerModule *GetModule(Module* module); |
3250 | |
3251 | // We should never look for a NULL Module * |
3252 | DebuggerModule *GetModule(Module* module, AppDomain* pAppDomain); |
3253 | DebuggerModule *GetFirstModule(HASHFIND *info); |
3254 | DebuggerModule *GetNextModule(HASHFIND *info); |
3255 | }; |
3256 | |
3257 | // struct DebuggerMethodInfoKey: Key for each of the method info hash table entries. |
3258 | // Module * m_pModule: This and m_token make up the key |
3259 | // mdMethodDef m_token: This and m_pModule make up the key |
3260 | // |
3261 | // Note: This is used for hashing, so the structure must be totally blittable. |
3262 | typedef DPTR(struct DebuggerMethodInfoKey) PTR_DebuggerMethodInfoKey; |
3263 | struct DebuggerMethodInfoKey |
3264 | { |
3265 | PTR_Module pModule; |
3266 | mdMethodDef token; |
3267 | } ; |
3268 | |
3269 | // struct DebuggerMethodInfoEntry: Entry for the JIT info hash table. |
3270 | // FREEHASHENTRY entry: Needed for use by the hash table |
3271 | // DebuggerMethodInfo * ji: The actual DebuggerMethodInfo to |
3272 | // hash. Note that DMI's will be hashed by MethodDesc. |
3273 | typedef DPTR(struct DebuggerMethodInfoEntry) PTR_DebuggerMethodInfoEntry; |
3274 | struct DebuggerMethodInfoEntry |
3275 | { |
3276 | FREEHASHENTRY entry; |
3277 | DebuggerMethodInfoKey key; |
3278 | SIZE_T nVersion; |
3279 | SIZE_T nVersionLastRemapped; |
3280 | PTR_DebuggerMethodInfo mi; |
3281 | |
3282 | #ifdef DACCESS_COMPILE |
3283 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
3284 | #endif |
3285 | }; |
3286 | |
3287 | // class DebuggerMethodInfoTable: Hash table to hold all the non-JIT related |
3288 | // info for each method we see. The JIT infos live in a seperate table |
3289 | // keyed by MethodDescs - there may be multiple |
3290 | // JITted realizations of each MethodDef, e.g. under different generic |
3291 | // assumptions. Hangs off of the Debugger object. |
3292 | // INVARIANT: There is only one DebuggerMethodInfo per method |
3293 | // in the table. Note that DMI's will be hashed by MethodDesc. |
3294 | // |
3295 | class DebuggerMethodInfoTable : private CHashTableAndData<CNewZeroData> |
3296 | { |
3297 | VPTR_BASE_CONCRETE_VTABLE_CLASS(DebuggerMethodInfoTable); |
3298 | |
3299 | public: |
3300 | virtual ~DebuggerMethodInfoTable() = default; |
3301 | |
3302 | private: |
3303 | BOOL Cmp(SIZE_T k1, const HASHENTRY * pc2) |
3304 | { |
3305 | LIMITED_METHOD_DAC_CONTRACT; |
3306 | |
3307 | // This is the inverse of the KEY() function. |
3308 | DebuggerMethodInfoKey * pDjik = reinterpret_cast<DebuggerMethodInfoKey *>(k1); |
3309 | |
3310 | DebuggerMethodInfoEntry * pDjie = dac_cast<PTR_DebuggerMethodInfoEntry>(const_cast<HASHENTRY *>(pc2)); |
3311 | |
3312 | return (pDjik->pModule != pDjie->key.pModule) || |
3313 | (pDjik->token != pDjie->key.token); |
3314 | } |
3315 | |
3316 | ULONG HASH(DebuggerMethodInfoKey* pDjik) |
3317 | { |
3318 | LIMITED_METHOD_DAC_CONTRACT; |
3319 | return HashPtr( pDjik->token, pDjik->pModule ); |
3320 | } |
3321 | |
3322 | SIZE_T KEY(DebuggerMethodInfoKey * pDjik) |
3323 | { |
3324 | // This is casting a host pointer to a SIZE_T. So that key is restricted to the host address space. |
3325 | // This key is just passed to Cmp(), which will cast it back to a DebuggerMethodInfoKey*. |
3326 | LIMITED_METHOD_DAC_CONTRACT; |
3327 | return (SIZE_T)pDjik; |
3328 | } |
3329 | |
3330 | //#define _DEBUG_DMI_TABLE |
3331 | |
3332 | #ifdef _DEBUG_DMI_TABLE |
3333 | public: |
3334 | ULONG CheckDmiTable(); |
3335 | |
3336 | #define CHECK_DMI_TABLE (CheckDmiTable()) |
3337 | #define CHECK_DMI_TABLE_DEBUGGER (m_pMethodInfos->CheckDmiTable()) |
3338 | |
3339 | #else |
3340 | |
3341 | #define CHECK_DMI_TABLE |
3342 | #define CHECK_DMI_TABLE_DEBUGGER |
3343 | |
3344 | #endif // _DEBUG_DMI_TABLE |
3345 | |
3346 | public: |
3347 | |
3348 | #ifndef DACCESS_COMPILE |
3349 | |
3350 | DebuggerMethodInfoTable(); |
3351 | |
3352 | HRESULT AddMethodInfo(Module *pModule, |
3353 | mdMethodDef token, |
3354 | DebuggerMethodInfo *mi); |
3355 | |
3356 | HRESULT OverwriteMethodInfo(Module *pModule, |
3357 | mdMethodDef token, |
3358 | DebuggerMethodInfo *mi, |
3359 | BOOL fOnlyIfNull); |
3360 | |
3361 | // pModule is being unloaded - remove any entries that belong to it. Why? |
3362 | // (a) Correctness: the module can be reloaded at the same address, |
3363 | // which will cause accidental matches with our hashtable (indexed by |
3364 | // {Module*,mdMethodDef} |
3365 | // (b) Perf: don't waste the memory! |
3366 | void ClearMethodsOfModule(Module *pModule); |
3367 | void DeleteEntryDMI(DebuggerMethodInfoEntry *entry); |
3368 | |
3369 | #endif // #ifndef DACCESS_COMPILE |
3370 | |
3371 | DebuggerMethodInfo *GetMethodInfo(Module *pModule, mdMethodDef token); |
3372 | DebuggerMethodInfo *GetFirstMethodInfo(HASHFIND *info); |
3373 | DebuggerMethodInfo *GetNextMethodInfo(HASHFIND *info); |
3374 | |
3375 | #ifdef DACCESS_COMPILE |
3376 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
3377 | #endif |
3378 | }; |
3379 | |
3380 | class DebuggerEvalBreakpointInfoSegment |
3381 | { |
3382 | public: |
3383 | // DebuggerEvalBreakpointInfoSegment contains just the breakpoint |
3384 | // instruction and a pointer to the associated DebuggerEval. It makes |
3385 | // it easy to go from the instruction to the corresponding DebuggerEval |
3386 | // object. It has been separated from the rest of the DebuggerEval |
3387 | // because it needs to be in a section of memory that's executable, |
3388 | // while the rest of DebuggerEval does not. By having it separate, we |
3389 | // don't need to have the DebuggerEval contents in executable memory. |
3390 | BYTE m_breakpointInstruction[CORDbg_BREAK_INSTRUCTION_SIZE]; |
3391 | DebuggerEval *m_associatedDebuggerEval; |
3392 | |
3393 | DebuggerEvalBreakpointInfoSegment(DebuggerEval* dbgEval) |
3394 | : m_associatedDebuggerEval(dbgEval) |
3395 | { |
3396 | ASSERT(dbgEval != NULL); |
3397 | } |
3398 | }; |
3399 | |
3400 | /* ------------------------------------------------------------------------ * |
3401 | * DebuggerEval class |
3402 | * |
3403 | * Note that arguments get passsed in a block allocated when |
3404 | * the func-eval is set up. The setup phase passes the total count of arguments. |
3405 | * |
3406 | * In some situations type arguments must also be passed, e.g. |
3407 | * when performing a "newarr" operation or calling a generic function with a |
3408 | * "funceval". In the setup phase we pass a count of the number of |
3409 | * nodes in the "flattened" type expressions for the type arguments, if any. |
3410 | * e.g. for calls to non-generic code this is 0. |
3411 | * - for "newobj List<int>" this is 1: there is one type argument "int". |
3412 | * - for "newobj Dict<string,int>" this is 2: there are two |
3413 | * type arguments "string" and "int". |
3414 | * - for "newobj Dict<string,List<int>>" this is 3: there are two |
3415 | type arguments but the second contains two nodes (one for List and one for int). |
3416 | * The type argument will get placed in the allocated argument block, |
3417 | * the order being determined by the order they occur in the tree, i.e. |
3418 | * left-to-right, top-to-bottom in the type expressions tree, e.g. for |
3419 | * type arguments <string,List<int>> you get string followed by List followed by int. |
3420 | * ------------------------------------------------------------------------ */ |
3421 | |
3422 | class DebuggerEval |
3423 | { |
3424 | public: |
3425 | |
3426 | // |
3427 | // Used as a bit field. |
3428 | // |
3429 | enum FUNC_EVAL_ABORT_TYPE |
3430 | { |
3431 | FE_ABORT_NONE = 0, |
3432 | FE_ABORT_NORMAL = 1, |
3433 | FE_ABORT_RUDE = 2 |
3434 | }; |
3435 | |
3436 | T_CONTEXT m_context; |
3437 | Thread *m_thread; |
3438 | DebuggerIPCE_FuncEvalType m_evalType; |
3439 | mdMethodDef m_methodToken; |
3440 | mdTypeDef m_classToken; |
3441 | ADID m_appDomainId; // Safe even if AD unloaded |
3442 | PTR_DebuggerModule m_debuggerModule; // Only valid if AD is still around |
3443 | RSPTR_CORDBEVAL m_funcEvalKey; |
3444 | bool m_successful; // Did the eval complete successfully |
3445 | Debugger::AreValueTypesBoxed m_retValueBoxing; // Is the return value boxed? |
3446 | unsigned int m_argCount; |
3447 | unsigned int m_genericArgsCount; |
3448 | unsigned int m_genericArgsNodeCount; |
3449 | SIZE_T m_stringSize; |
3450 | BYTE *m_argData; |
3451 | MethodDesc *m_md; |
3452 | PCODE m_targetCodeAddr; |
3453 | ARG_SLOT m_result[NUMBER_RETURNVALUE_SLOTS]; |
3454 | TypeHandle m_resultType; |
3455 | SIZE_T m_arrayRank; |
3456 | FUNC_EVAL_ABORT_TYPE m_aborting; // Has an abort been requested, and what type. |
3457 | bool m_aborted; // Was this eval aborted |
3458 | bool m_completed; // Is the eval complete - successfully or by aborting |
3459 | bool m_evalDuringException; |
3460 | bool m_rethrowAbortException; |
3461 | Thread::ThreadAbortRequester m_requester; // For aborts, what kind? |
3462 | VMPTR_OBJECTHANDLE m_vmObjectHandle; |
3463 | TypeHandle m_ownerTypeHandle; |
3464 | DebuggerEvalBreakpointInfoSegment* m_bpInfoSegment; |
3465 | |
3466 | DebuggerEval(T_CONTEXT * pContext, DebuggerIPCE_FuncEvalInfo * pEvalInfo, bool fInException); |
3467 | |
3468 | // This constructor is only used when setting up an eval to re-abort a thread. |
3469 | DebuggerEval(T_CONTEXT * pContext, Thread * pThread, Thread::ThreadAbortRequester requester); |
3470 | |
3471 | bool Init() |
3472 | { |
3473 | _ASSERTE(DbgIsExecutable(&m_bpInfoSegment->m_breakpointInstruction, sizeof(m_bpInfoSegment->m_breakpointInstruction))); |
3474 | return true; |
3475 | } |
3476 | |
3477 | // The m_argData buffer holds both the type arg data (for generics) and the main argument data. |
3478 | // |
3479 | // For DB_IPCE_FET_NEW_STRING it holds the data specifying the string to create. |
3480 | DebuggerIPCE_TypeArgData *GetTypeArgData() |
3481 | { |
3482 | LIMITED_METHOD_CONTRACT; |
3483 | return (DebuggerIPCE_TypeArgData *) (m_argData); |
3484 | } |
3485 | |
3486 | DebuggerIPCE_FuncEvalArgData *GetArgData() |
3487 | { |
3488 | LIMITED_METHOD_CONTRACT; |
3489 | return (DebuggerIPCE_FuncEvalArgData*) (m_argData + m_genericArgsNodeCount * sizeof(DebuggerIPCE_TypeArgData)); |
3490 | } |
3491 | |
3492 | WCHAR *GetNewStringArgData() |
3493 | { |
3494 | LIMITED_METHOD_CONTRACT; |
3495 | _ASSERTE(m_evalType == DB_IPCE_FET_NEW_STRING); |
3496 | return (WCHAR*)m_argData; |
3497 | } |
3498 | |
3499 | ~DebuggerEval() |
3500 | { |
3501 | WRAPPER_NO_CONTRACT; |
3502 | |
3503 | // Clean up any temporary buffers used to send the argument type information. These were allocated |
3504 | // in respnse to a GET_BUFFER message |
3505 | DebuggerIPCE_FuncEvalArgData *argData = GetArgData(); |
3506 | for (unsigned int i = 0; i < m_argCount; i++) |
3507 | { |
3508 | if (argData[i].fullArgType != NULL) |
3509 | { |
3510 | _ASSERTE(g_pDebugger != NULL); |
3511 | g_pDebugger->ReleaseRemoteBuffer((BYTE*)argData[i].fullArgType, true); |
3512 | } |
3513 | } |
3514 | |
3515 | // Clean up the array of argument information. This was allocated as part of Func Eval setup. |
3516 | if (m_argData) |
3517 | { |
3518 | DeleteInteropSafe(m_argData); |
3519 | } |
3520 | |
3521 | #ifdef _DEBUG |
3522 | // Set flags to strategic values in case we access deleted memory. |
3523 | m_completed = false; |
3524 | m_rethrowAbortException = true; |
3525 | #endif |
3526 | } |
3527 | }; |
3528 | |
3529 | /* ------------------------------------------------------------------------ * |
3530 | * New/delete overrides to use the debugger's private heap |
3531 | * ------------------------------------------------------------------------ */ |
3532 | |
3533 | class InteropSafe {}; |
3534 | extern InteropSafe interopsafe; |
3535 | |
3536 | class InteropSafeExecutable {}; |
3537 | extern InteropSafeExecutable interopsafeEXEC; |
3538 | |
3539 | #ifndef DACCESS_COMPILE |
3540 | inline void * __cdecl operator new(size_t n, const InteropSafe&) |
3541 | { |
3542 | CONTRACTL |
3543 | { |
3544 | THROWS; // throw on OOM |
3545 | GC_NOTRIGGER; |
3546 | } |
3547 | CONTRACTL_END; |
3548 | |
3549 | _ASSERTE(g_pDebugger != NULL); |
3550 | void *result = g_pDebugger->GetInteropSafeHeap()->Alloc((DWORD)n); |
3551 | if (result == NULL) { |
3552 | ThrowOutOfMemory(); |
3553 | } |
3554 | return result; |
3555 | } |
3556 | |
3557 | inline void * __cdecl operator new[](size_t n, const InteropSafe&) |
3558 | { |
3559 | CONTRACTL |
3560 | { |
3561 | THROWS; // throw on OOM |
3562 | GC_NOTRIGGER; |
3563 | } |
3564 | CONTRACTL_END; |
3565 | _ASSERTE(g_pDebugger != NULL); |
3566 | void *result = g_pDebugger->GetInteropSafeHeap()->Alloc((DWORD)n); |
3567 | if (result == NULL) { |
3568 | ThrowOutOfMemory(); |
3569 | } |
3570 | return result; |
3571 | } |
3572 | |
3573 | inline void * __cdecl operator new(size_t n, const InteropSafe&, const NoThrow&) throw() |
3574 | { |
3575 | CONTRACTL |
3576 | { |
3577 | NOTHROW; |
3578 | GC_NOTRIGGER; |
3579 | } |
3580 | CONTRACTL_END; |
3581 | |
3582 | _ASSERTE(g_pDebugger != NULL); |
3583 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3584 | if (pHeap == NULL) |
3585 | { |
3586 | return NULL; |
3587 | } |
3588 | void *result = pHeap->Alloc((DWORD)n); |
3589 | return result; |
3590 | } |
3591 | |
3592 | inline void * __cdecl operator new[](size_t n, const InteropSafe&, const NoThrow&) throw() |
3593 | { |
3594 | CONTRACTL |
3595 | { |
3596 | NOTHROW; |
3597 | GC_NOTRIGGER; |
3598 | } |
3599 | CONTRACTL_END; |
3600 | |
3601 | _ASSERTE(g_pDebugger != NULL); |
3602 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3603 | if (pHeap == NULL) |
3604 | { |
3605 | return NULL; |
3606 | } |
3607 | void *result = pHeap->Alloc((DWORD)n); |
3608 | return result; |
3609 | } |
3610 | |
3611 | // Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that |
3612 | // this delete operator will be invoked automatically to destroy the object. |
3613 | inline void __cdecl operator delete(void *p, const InteropSafe&) |
3614 | { |
3615 | CONTRACTL |
3616 | { |
3617 | NOTHROW; |
3618 | GC_NOTRIGGER; |
3619 | } |
3620 | CONTRACTL_END; |
3621 | |
3622 | if (p != NULL) |
3623 | { |
3624 | _ASSERTE(g_pDebugger != NULL); |
3625 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3626 | _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting |
3627 | pHeap->Free(p); |
3628 | } |
3629 | } |
3630 | |
3631 | // Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that |
3632 | // this delete operator will be invoked automatically to destroy the object. |
3633 | inline void __cdecl operator delete[](void *p, const InteropSafe&) |
3634 | { |
3635 | CONTRACTL |
3636 | { |
3637 | NOTHROW; |
3638 | GC_NOTRIGGER; |
3639 | } |
3640 | CONTRACTL_END; |
3641 | |
3642 | if (p != NULL) |
3643 | { |
3644 | _ASSERTE(g_pDebugger != NULL); |
3645 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3646 | _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting |
3647 | |
3648 | pHeap->Free(p); |
3649 | } |
3650 | } |
3651 | |
3652 | // |
3653 | // Interop safe delete to match the interop safe new's above. There is no C++ syntax for actually invoking those interop |
3654 | // safe delete operators above, so we use this method to accomplish the same thing. |
3655 | // |
3656 | template<class T> void DeleteInteropSafe(T *p) |
3657 | { |
3658 | CONTRACTL |
3659 | { |
3660 | NOTHROW; |
3661 | GC_NOTRIGGER; |
3662 | } |
3663 | CONTRACTL_END; |
3664 | |
3665 | // Don't stop a thread that may hold the Interop-safe heap lock. |
3666 | // It may be in preemptive, but it's still "inside" the CLR and so inside the "Can't-Stop-Region" |
3667 | CantStopHolder hHolder; |
3668 | |
3669 | if (p != NULL) |
3670 | { |
3671 | p->~T(); |
3672 | |
3673 | _ASSERTE(g_pDebugger != NULL); |
3674 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeHeap_NoThrow(); |
3675 | _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting |
3676 | |
3677 | pHeap->Free(p); |
3678 | } |
3679 | } |
3680 | |
3681 | inline void * __cdecl operator new(size_t n, const InteropSafeExecutable&) |
3682 | { |
3683 | CONTRACTL |
3684 | { |
3685 | THROWS; // throw on OOM |
3686 | GC_NOTRIGGER; |
3687 | } |
3688 | CONTRACTL_END; |
3689 | |
3690 | _ASSERTE(g_pDebugger != NULL); |
3691 | void *result = g_pDebugger->GetInteropSafeExecutableHeap()->Alloc((DWORD)n); |
3692 | if (result == NULL) { |
3693 | ThrowOutOfMemory(); |
3694 | } |
3695 | return result; |
3696 | } |
3697 | |
3698 | inline void * __cdecl operator new(size_t n, const InteropSafeExecutable&, const NoThrow&) throw() |
3699 | { |
3700 | CONTRACTL |
3701 | { |
3702 | NOTHROW; |
3703 | GC_NOTRIGGER; |
3704 | } |
3705 | CONTRACTL_END; |
3706 | |
3707 | _ASSERTE(g_pDebugger != NULL); |
3708 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow(); |
3709 | if (pHeap == NULL) |
3710 | { |
3711 | return NULL; |
3712 | } |
3713 | void *result = pHeap->Alloc((DWORD)n); |
3714 | return result; |
3715 | } |
3716 | |
3717 | // Note: there is no C++ syntax for manually invoking this, but if a constructor throws an exception I understand that |
3718 | // this delete operator will be invoked automatically to destroy the object. |
3719 | inline void __cdecl operator delete(void *p, const InteropSafeExecutable&) |
3720 | { |
3721 | CONTRACTL |
3722 | { |
3723 | NOTHROW; |
3724 | GC_NOTRIGGER; |
3725 | } |
3726 | CONTRACTL_END; |
3727 | |
3728 | if (p != NULL) |
3729 | { |
3730 | _ASSERTE(g_pDebugger != NULL); |
3731 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow(); |
3732 | _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting |
3733 | pHeap->Free(p); |
3734 | } |
3735 | } |
3736 | |
3737 | // |
3738 | // Interop safe delete to match the interop safe new's above. There is no C++ syntax for actually invoking those interop |
3739 | // safe delete operators above, so we use this method to accomplish the same thing. |
3740 | // |
3741 | template<class T> void DeleteInteropSafeExecutable(T *p) |
3742 | { |
3743 | CONTRACTL |
3744 | { |
3745 | NOTHROW; |
3746 | GC_NOTRIGGER; |
3747 | } |
3748 | CONTRACTL_END; |
3749 | |
3750 | // Don't stop a thread that may hold the Interop-safe heap lock. |
3751 | // It may be in preemptive, but it's still "inside" the CLR and so inside the "Can't-Stop-Region" |
3752 | CantStopHolder hHolder; |
3753 | |
3754 | if (p != NULL) |
3755 | { |
3756 | p->~T(); |
3757 | |
3758 | _ASSERTE(g_pDebugger != NULL); |
3759 | DebuggerHeap * pHeap = g_pDebugger->GetInteropSafeExecutableHeap_NoThrow(); |
3760 | _ASSERTE(pHeap != NULL); // should have had heap around if we're deleting |
3761 | |
3762 | pHeap->Free(p); |
3763 | } |
3764 | } |
3765 | #endif // DACCESS_COMPILE |
3766 | |
3767 | |
3768 | #if _DEBUG |
3769 | #define DBG_RUNTIME_MAX ((DB_IPCE_RUNTIME_LAST&0xff)+1) |
3770 | #define DBG_DEBUGGER_MAX ((DB_IPCE_DEBUGGER_LAST&0xff)+1) |
3771 | |
3772 | #define DbgLog(event) DbgLogHelper(event) |
3773 | void DbgLogHelper(DebuggerIPCEventType event); |
3774 | #else |
3775 | #define DbgLog(event) |
3776 | #endif // _DEBUG |
3777 | |
3778 | //----------------------------------------------------------------------------- |
3779 | // Helpers for cleanup |
3780 | // These are various utility functions, mainly where we factor out code. |
3781 | //----------------------------------------------------------------------------- |
3782 | void GetPidDecoratedName(__out_ecount(cBufSizeInChars) WCHAR * pBuf, |
3783 | int cBufSizeInChars, |
3784 | const WCHAR * pPrefix); |
3785 | |
3786 | // Specify type of Win32 event |
3787 | enum EEventResetType { |
3788 | kManualResetEvent = TRUE, |
3789 | kAutoResetEvent = FALSE |
3790 | }; |
3791 | |
3792 | HANDLE CreateWin32EventOrThrow( |
3793 | LPSECURITY_ATTRIBUTES lpEventAttributes, |
3794 | EEventResetType eType, |
3795 | BOOL bInitialState |
3796 | ); |
3797 | |
3798 | HANDLE OpenWin32EventOrThrow( |
3799 | DWORD dwDesiredAccess, |
3800 | BOOL bInheritHandle, |
3801 | LPCWSTR lpName |
3802 | ); |
3803 | |
3804 | #define SENDIPCEVENT_RAW_BEGIN_EX(pDbgLockHolder, gcxStmt) \ |
3805 | { \ |
3806 | ThreadStoreLockHolderWithSuspendReason tsld(ThreadSuspend::SUSPEND_FOR_DEBUGGER); \ |
3807 | Debugger::DebuggerLockHolder *__pDbgLockHolder = pDbgLockHolder; \ |
3808 | gcxStmt; \ |
3809 | g_pDebugger->LockForEventSending(__pDbgLockHolder); |
3810 | |
3811 | #define SENDIPCEVENT_RAW_END_EX \ |
3812 | g_pDebugger->UnlockFromEventSending(__pDbgLockHolder); \ |
3813 | } |
3814 | |
3815 | #define SENDIPCEVENT_RAW_BEGIN(pDbgLockHolder) \ |
3816 | SENDIPCEVENT_RAW_BEGIN_EX(pDbgLockHolder, GCX_PREEMP_EEINTERFACE_TOGGLE_COND(CORDebuggerAttached())) |
3817 | |
3818 | #define SENDIPCEVENT_RAW_END SENDIPCEVENT_RAW_END_EX |
3819 | |
3820 | // Suspend-aware SENDIPCEVENT macros: |
3821 | // Check whether __thread has been suspended by the debugger via SetDebugState(). |
3822 | // If this thread has been suspended, it shouldn't send any event to the RS because the |
3823 | // debugger may not be expecting it. Instead, just leave the lock and retry. |
3824 | // When we leave, we'll enter coop mode first and get suspended if a suspension is in progress. |
3825 | // Afterwards, we'll transition back into preemptive mode, and we'll block because this thread |
3826 | // has been suspended by the debugger (see code:Thread::RareEnablePreemptiveGC). |
3827 | #define SENDIPCEVENT_BEGIN_EX(pDebugger, thread, gcxStmt) \ |
3828 | { \ |
3829 | FireEtwDebugIPCEventStart(); \ |
3830 | bool __fRetry = true; \ |
3831 | do \ |
3832 | { \ |
3833 | { \ |
3834 | Debugger::DebuggerLockHolder __dbgLockHolder(pDebugger, FALSE); \ |
3835 | Debugger::DebuggerLockHolder *__pDbgLockHolder = &__dbgLockHolder; \ |
3836 | gcxStmt; \ |
3837 | ThreadStoreLockHolderWithSuspendReason tsld(ThreadSuspend::SUSPEND_FOR_DEBUGGER); \ |
3838 | g_pDebugger->LockForEventSending(__pDbgLockHolder); \ |
3839 | /* Check if the thread has been suspended by the debugger via SetDebugState(). */ \ |
3840 | if (thread != NULL && thread->HasThreadStateNC(Thread::TSNC_DebuggerUserSuspend)) \ |
3841 | { \ |
3842 | /* Just leave the lock and retry (see comment above for explanation */ \ |
3843 | } \ |
3844 | else \ |
3845 | { \ |
3846 | __fRetry = false; \ |
3847 | |
3848 | #define SENDIPCEVENT_END_EX \ |
3849 | ; \ |
3850 | } \ |
3851 | g_pDebugger->UnlockFromEventSending(__pDbgLockHolder); \ |
3852 | } /* ~gcxStmt & ~DebuggerLockHolder & ~tsld */ \ |
3853 | } while (__fRetry); \ |
3854 | FireEtwDebugIPCEventEnd(); \ |
3855 | } |
3856 | |
3857 | |
3858 | // The typical SENDIPCEVENT - toggles the GC mode... |
3859 | #define SENDIPCEVENT_BEGIN(pDebugger, thread) \ |
3860 | SENDIPCEVENT_BEGIN_EX(pDebugger, thread, GCX_PREEMP_EEINTERFACE_TOGGLE_IFTHREAD_COND(CORDebuggerAttached())) |
3861 | |
3862 | // Convenience macro to match SENDIPCEVENT_BEGIN |
3863 | #define SENDIPCEVENT_END SENDIPCEVENT_END_EX |
3864 | |
3865 | |
3866 | // Use this if you need to access the DebuggerLockHolder set up by SENDIPCEVENT_BEGIN. |
3867 | // This is valid only between the SENDIPCEVENT_BEGIN / SENDIPCEVENT_END macros |
3868 | #define SENDIPCEVENT_PtrDbgLockHolder __pDbgLockHolder |
3869 | |
3870 | |
3871 | // Common contract for sending events. |
3872 | // Used inbetween SENDIPCEVENT_BEGIN & _END. |
3873 | // |
3874 | // Can't GC trigger b/c if we're sycning we'll deadlock: |
3875 | // - We'll block at the GC toggle (b/c we're syncing). |
3876 | // - But we're holding the LockForEventSending "lock", so we'll block the helper trying to send a |
3877 | // SuspendComplete |
3878 | // |
3879 | // @todo- we could also assert that: |
3880 | // - m_tidLockedForEventSending = GetCurrentThreadId(); |
3881 | #define SENDEVENT_CONTRACT_ITEMS \ |
3882 | GC_NOTRIGGER; \ |
3883 | MODE_PREEMPTIVE; \ |
3884 | PRECONDITION(g_pDebugger->ThreadHoldsLock()); \ |
3885 | PRECONDITION(!g_pDebugger->IsStopped()); \ |
3886 | |
3887 | |
3888 | //----------------------------------------------------------------------------- |
3889 | // Sample usage for sending IPC _Notification_ events. |
3890 | // This is different then SendIPCReply (which is used to reply to events |
3891 | // initiated by the RS). |
3892 | //----------------------------------------------------------------------------- |
3893 | |
3894 | // Thread *pThread = g_pEEInterface->GetThread(); |
3895 | // SENDIPCEVENT_BEGIN(g_pDebugger, pThread); // or use "this" if inside a Debugger method |
3896 | // _ASSERTE(ThreadHoldsLock()); // we now hold the debugger lock. |
3897 | // // debugger may have detached while we were blocked above. |
3898 | // |
3899 | // if (CORDebuggerAttached()) { |
3900 | // // Send as many IPC events as we wish. |
3901 | // SendIPCEvent(....); |
3902 | // SendIPCEvent(....); |
3903 | // SendIPCEvent(....); |
3904 | // |
3905 | // if (we sent an event) { |
3906 | // TrapAllRuntimeThreads(); |
3907 | // } |
3908 | // } |
3909 | // |
3910 | // // We block here while the debugger responds to the event. |
3911 | // SENDIPCEVENT_END; |
3912 | |
3913 | // Or if we just want to send a single IPC event and block, we can do this: |
3914 | // |
3915 | // < ... Init IPC Event ...> |
3916 | // SendSimpleIPCEventAndBlock(); <-- this will block |
3917 | // |
3918 | // Note we don't have to call SENDIPCEVENT_BEGIN / END in this case. |
3919 | |
3920 | // @todo - further potential cleanup to the IPC sending: |
3921 | // - Make SendIPCEvent + TrapAllRuntimeThreads check for CORDebuggerAttached() so that we |
3922 | // can always call them after SENDIPCEVENT_BEGIN |
3923 | // - Assert that SendIPCEVent is only called inbetween a Begin/End pair |
3924 | // - count if we actually send any IPCEvents inbetween a Begin/End pair, and then have |
3925 | // SendIPCEvent_END call TrapAllRuntimeThreads automatically for us. |
3926 | |
3927 | |
3928 | // Include all of the inline stuff now. |
3929 | #include "debugger.inl" |
3930 | |
3931 | |
3932 | // |
3933 | // |
3934 | // |
3935 | // The below contract defines should only be used (A) if they apply, and (B) they are the LEAST |
3936 | // definitive for the function you are contracting. The below defines represent the baseline contract |
3937 | // for each case. |
3938 | // |
3939 | // e.g. If a function FOO() throws, always, you should use THROWS, not any of the below. |
3940 | // |
3941 | // |
3942 | // |
3943 | #if _DEBUG |
3944 | |
3945 | #define MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT \ |
3946 | if ((m_pRCThread == NULL) || !m_pRCThread->IsRCThreadReady()) { THROWS; } else { NOTHROW; } |
3947 | |
3948 | #define MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT \ |
3949 | if ((m_pRCThread == NULL) || !m_pRCThread->IsRCThreadReady() || (GetThread() != NULL)) { GC_TRIGGERS; } else { GC_NOTRIGGER; } |
3950 | |
3951 | #define GC_TRIGGERS_FROM_GETJITINFO if (GetThreadNULLOk() != NULL) { GC_TRIGGERS; } else { GC_NOTRIGGER; } |
3952 | |
3953 | // |
3954 | // The DebuggerDataLock lock is UNSAFE_ANYMODE, which means that we cannot |
3955 | // take a GC while someone is holding it. Unfortunately this means that |
3956 | // we cannot contract for a "possible" GC trigger statically, and must |
3957 | // rely on runtime coverage to find any code path that may cause a GC. |
3958 | // |
3959 | #define CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT WRAPPER(GC_TRIGGERS) |
3960 | |
3961 | #else |
3962 | |
3963 | #define MAY_DO_HELPER_THREAD_DUTY_THROWS_CONTRACT |
3964 | #define MAY_DO_HELPER_THREAD_DUTY_GC_TRIGGERS_CONTRACT |
3965 | #define CALLED_IN_DEBUGGERDATALOCK_HOLDER_SCOPE_MAY_GC_TRIGGERS_CONTRACT |
3966 | |
3967 | #define GC_TRIGGERS_FROM_GETJITINFO |
3968 | |
3969 | #endif |
3970 | |
3971 | // Returns true if the specified IL offset has a special meaning (eg. prolog, etc.) |
3972 | bool DbgIsSpecialILOffset(DWORD offset); |
3973 | |
3974 | #if !defined(_TARGET_X86_) |
3975 | void FixupDispatcherContext(T_DISPATCHER_CONTEXT* pDispatcherContext, T_CONTEXT* pContext, T_CONTEXT* pOriginalContext, PEXCEPTION_ROUTINE pUnwindPersonalityRoutine = NULL); |
3976 | #endif |
3977 | |
3978 | #endif /* DEBUGGER_H_ */ |
3979 | |