1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// FILE: ProfToEEInterfaceImpl.cpp
6//
7// This module implements the ICorProfilerInfo* interfaces, which allow the
8// Profiler to communicate with the EE. This allows the Profiler DLL to get
9// access to private EE data structures and other things that should never be
10// exported outside of the EE.
11//
12
13//
14// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
15// NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE!
16//
17// PLEASE READ!
18//
19// There are strict rules for how to implement ICorProfilerInfo* methods. Please read
20// https://github.com/dotnet/coreclr/blob/master/Documentation/botr/profilability.md
21// to understand the rules and why they exist.
22//
23// As a reminder, here is a short summary of your responsibilities. Every PUBLIC
24// ENTRYPOINT (from profiler to EE) must have:
25//
26// - An entrypoint macro at the top (see code:#P2CLRRestrictionsOverview). Your choices are:
27// PROFILER_TO_CLR_ENTRYPOINT_SYNC (typical choice):
28// Indicates the method may only be called by the profiler from within
29// a callback (from EE to profiler).
30// PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY
31// Even more restrictive, this indicates the method may only be called
32// from within the Initialize() callback
33// PROFILER_TO_CLR_ENTRYPOINT_ASYNC
34// Indicates this method may be called anytime.
35// THIS IS DANGEROUS. PLEASE READ ABOVE DOC FOR GUIDANCE ON HOW TO SAFELY
36// CODE AN ASYNCHRONOUS METHOD.
37// You may use variants of these macros ending in _EX that accept bit flags (see
38// code:ProfToClrEntrypointFlags) if you need to specify additional parameters to how
39// the entrypoint should behave, though typically you can omit the flags and the
40// default (kP2EENone) will be used.
41//
42// - A complete contract block with comments over every contract choice. Wherever
43// possible, use the preferred contracts (if not possible, you must comment why):
44// NOTHROW
45// GC_NOTRIGGER
46// MODE_ANY
47// CANNOT_TAKE_LOCK
48// SO_NOT_MAINLINE
49// (EE_THREAD_(NOT)_REQUIRED are unenforced and are thus optional. If you wish
50// to specify these, EE_THREAD_NOT_REQUIRED is preferred.)
51// Note that the preferred contracts in this file are DIFFERENT than the preferred
52// contracts for eetoprofinterfaceimpl.cpp.
53//
54// Private helper functions in this file do not have the same preferred contracts as
55// public entrypoints, and they should be contracted following the same guidelines
56// as per the rest of the EE.
57//
58// NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE! NOTE!
59// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
60//
61//
62// #P2CLRRestrictionsOverview
63//
64// The public ICorProfilerInfo(N) functions below have different restrictions on when
65// they're allowed to be called. Listed roughly in order from most to least restrictive:
66// * PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY: Functions that are only
67// allowed to be called while the profiler is initializing on startup, from
68// inside the profiler's ICorProfilerCallback::Initialize method
69// * PROFILER_TO_CLR_ENTRYPOINT_SYNC: Functions that may be called from within any of
70// the profiler's callbacks, or anytime from a thread created by the profiler.
71// These functions may only be called by profilers loaded on startup
72// * PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach): Same as above,
73// except these may be called by startup AND attaching profilers.
74// * PROFILER_TO_CLR_ENTRYPOINT_ASYNC: Functions that may be called at any time and
75// from any thread by a profiler loaded on startup
76// * PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach): Same as above,
77// except these may be called by startup AND attaching profilers.
78//
79// The above restrictions are lifted for certain tests that run with these environment
80// variables set. (These are only available on DEBUG builds--including chk--not retail
81// builds.)
82// * COMPlus_TestOnlyEnableSlowELTHooks:
83// * If nonzero, then on startup the runtime will act as if a profiler was loaded
84// on startup and requested ELT slow-path (even if no profiler is loaded on
85// startup). This will also allow the SetEnterLeaveFunctionHooks(2) info
86// functions to be called outside of Initialize(). If a profiler later
87// attaches and calls these functions, then the slow-path wrapper will call
88// into the profiler's ELT hooks.
89// * COMPlus_TestOnlyEnableObjectAllocatedHook:
90// * If nonzero, then on startup the runtime will act as if a profiler was loaded
91// on startup and requested ObjectAllocated callback (even if no profiler is loaded
92// on startup). If a profiler later attaches and calls these functions, then the
93// ObjectAllocated notifications will call into the profiler's ObjectAllocated callback.
94// * COMPlus_TestOnlyEnableICorProfilerInfo:
95// * If nonzero, then attaching profilers allows to call ICorProfilerInfo inteface,
96// which would otherwise be disallowed for attaching profilers
97// * COMPlus_TestOnlyAllowedEventMask
98// * If a profiler needs to work around the restrictions of either
99// COR_PRF_ALLOWABLE_AFTER_ATTACH or COR_PRF_MONITOR_IMMUTABLE it may set
100// this environment variable. Its value should be a bitmask containing all
101// the flags that are:
102// * normally immutable or disallowed after attach, AND
103// * that the test plans to set after startup and / or by an attaching
104// profiler.
105//
106//
107
108//
109// ======================================================================================
110
111#include "common.h"
112#include <posterror.h>
113#include "proftoeeinterfaceimpl.h"
114#include "proftoeeinterfaceimpl.inl"
115#include "dllimport.h"
116#include "threads.h"
117#include "method.hpp"
118#include "vars.hpp"
119#include "dbginterface.h"
120#include "corprof.h"
121#include "class.h"
122#include "object.h"
123#include "ceegen.h"
124#include "eeconfig.h"
125#include "generics.h"
126#include "gcinfo.h"
127#include "safemath.h"
128#include "threadsuspend.h"
129#include "inlinetracking.h"
130
131#ifdef PROFILING_SUPPORTED
132#include "profilinghelper.h"
133#include "profilinghelper.inl"
134#include "eetoprofinterfaceimpl.inl"
135#include "profilingenumerators.h"
136#endif
137
138#include "profdetach.h"
139
140#include "metadataexports.h"
141
142//---------------------------------------------------------------------------------------
143// Helpers
144
145// An OR'd combination of these flags may be specified in the _EX entrypoint macros to
146// customize the behavior.
147enum ProfToClrEntrypointFlags
148{
149 // Just use the default behavior (this one is used if the non-_EX entrypoint macro is
150 // specified without any flags).
151 kP2EENone = 0x00000000,
152
153 // By default, Info functions are not allowed to be used by an attaching profiler.
154 // Specify this flag to override the default.
155 kP2EEAllowableAfterAttach = 0x00000001,
156
157 // This info method has a GC_TRIGGERS contract. Whereas contracts are debug-only,
158 // this flag is used in retail builds as well.
159 kP2EETriggers = 0x00000002,
160};
161
162// Default versions of the entrypoint macros use kP2EENone if no
163// ProfToClrEntrypointFlags are specified
164
165#define PROFILER_TO_CLR_ENTRYPOINT_ASYNC(logParams) \
166 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EENone, logParams)
167
168#define PROFILER_TO_CLR_ENTRYPOINT_SYNC(logParams) \
169 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EENone, logParams)
170
171// ASYNC entrypoints log and ensure an attaching profiler isn't making a call that's
172// only supported by startup profilers.
173
174#define CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED_HELPER(p2eeFlags) \
175 do \
176 { \
177 if ((((p2eeFlags) & kP2EEAllowableAfterAttach) == 0) && \
178 (g_profControlBlock.pProfInterface->IsLoadedViaAttach())) \
179 { \
180 LOG((LF_CORPROF, \
181 LL_ERROR, \
182 "**PROF: ERROR: Returning CORPROF_E_UNSUPPORTED_FOR_ATTACHING_PROFILER " \
183 "due to a call illegally made by an attaching profiler \n")); \
184 return CORPROF_E_UNSUPPORTED_FOR_ATTACHING_PROFILER; \
185 } \
186 } while(0)
187
188#ifdef _DEBUG
189
190#define CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED(p2eeFlags) \
191 do \
192 { \
193 if (!((&g_profControlBlock)->fTestOnlyEnableICorProfilerInfo)) \
194 { \
195 CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED_HELPER(p2eeFlags); \
196 } \
197 } while(0)
198
199
200
201#else //_DEBUG
202
203#define CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED(p2eeFlags) \
204 do \
205 { \
206 CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED_HELPER(p2eeFlags); \
207 } while(0)
208
209#endif //_DEBUG
210
211#define PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(p2eeFlags, logParams) \
212 do \
213 { \
214 INCONTRACT(AssertTriggersContract(((p2eeFlags) & kP2EETriggers))); \
215 _ASSERTE(g_profControlBlock.curProfStatus.Get() != kProfStatusNone); \
216 LOG(logParams); \
217 /* If profiler was neutered, disallow call */ \
218 if (g_profControlBlock.curProfStatus.Get() == kProfStatusDetaching) \
219 { \
220 LOG((LF_CORPROF, \
221 LL_ERROR, \
222 "**PROF: ERROR: Returning CORPROF_E_PROFILER_DETACHING " \
223 "due to a post-neutered profiler call\n")); \
224 return CORPROF_E_PROFILER_DETACHING; \
225 } \
226 CHECK_IF_ATTACHING_PROFILER_IS_ALLOWED(p2eeFlags); \
227 } while(0)
228
229// SYNC entrypoints must ensure the current EE Thread shows evidence that we're
230// inside a callback. If there's no EE Thread, then we automatically "pass"
231// the check, and the SYNC call is allowed.
232#define PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(p2eeFlags, logParams) \
233 do \
234 { \
235 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(p2eeFlags, logParams); \
236 DWORD __dwExpectedCallbackState = COR_PRF_CALLBACKSTATE_INCALLBACK; \
237 if (((p2eeFlags) & kP2EETriggers) != 0) \
238 { \
239 __dwExpectedCallbackState |= COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE; \
240 } \
241 if (!AreCallbackStateFlagsSet(__dwExpectedCallbackState)) \
242 { \
243 LOG((LF_CORPROF, \
244 LL_ERROR, \
245 "**PROF: ERROR: Returning CORPROF_E_UNSUPPORTED_CALL_SEQUENCE " \
246 "due to illegal asynchronous profiler call\n")); \
247 return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE; \
248 } \
249 } while(0)
250
251// INIT_ONLY entrypoints must ensure we're executing inside the profiler's
252// Initialize() implementation on startup (attach init doesn't count!).
253#define PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY(logParams) \
254 do \
255 { \
256 PROFILER_TO_CLR_ENTRYPOINT_ASYNC(logParams); \
257 if (g_profControlBlock.curProfStatus.Get() != kProfStatusInitializingForStartupLoad && \
258 g_profControlBlock.curProfStatus.Get() != kProfStatusInitializingForAttachLoad) \
259 { \
260 return CORPROF_E_CALL_ONLY_FROM_INIT; \
261 } \
262 } while(0)
263
264// This macro is used to ensure that the current thread is not in a forbid
265// suspend region. Some methods are allowed to be called asynchronously,
266// but some of them call JIT functions that take a reader lock. So we need to ensure
267// the current thread hasn't been hijacked by a profiler while it was holding the writer lock.
268// Checking the ForbidSuspendThread region is a sufficient test for this
269#define FAIL_IF_IN_FORBID_SUSPEND_REGION() \
270 do \
271 { \
272 Thread * __pThread = GetThreadNULLOk(); \
273 if ((__pThread != NULL) && (__pThread->IsInForbidSuspendRegion())) \
274 { \
275 return CORPROF_E_ASYNCHRONOUS_UNSAFE; \
276 } \
277 } while(0)
278
279//
280// This type is an overlay onto the exported type COR_PRF_FRAME_INFO.
281// The first four fields *must* line up with the same fields in the
282// exported type. After that, we can add to the end as we wish.
283//
284typedef struct _COR_PRF_FRAME_INFO_INTERNAL {
285 USHORT size;
286 USHORT version;
287 FunctionID funcID;
288 UINT_PTR IP;
289 void *extraArg;
290 LPVOID thisArg;
291} COR_PRF_FRAME_INFO_INTERNAL, *PCOR_PRF_FRAME_INFO_INTERNAL;
292
293//
294// After we ship a product with a certain struct type for COR_PRF_FRAME_INFO_INTERNAL
295// we have that as a version. If we change that in a later product, we can increment
296// the counter below and then we can properly do versioning.
297//
298#define COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION 1
299
300
301//---------------------------------------------------------------------------------------
302//
303// Converts TypeHandle to a ClassID
304//
305// Arguments:
306// th - TypeHandle to convert
307//
308// Return Value:
309// Requested ClassID.
310//
311
312ClassID TypeHandleToClassID(TypeHandle th)
313{
314 WRAPPER_NO_CONTRACT;
315 return reinterpret_cast<ClassID> (th.AsPtr());
316}
317
318//---------------------------------------------------------------------------------------
319//
320// Converts TypeHandle for a non-generic type to a ClassID
321//
322// Arguments:
323// th - TypeHandle to convert
324//
325// Return Value:
326// Requested ClassID. NULL if th represents a generic type
327//
328#ifdef PROFILING_SUPPORTED
329
330static ClassID NonGenericTypeHandleToClassID(TypeHandle th)
331{
332 CONTRACTL
333 {
334 SO_NOT_MAINLINE;
335 NOTHROW;
336 GC_NOTRIGGER;
337 MODE_ANY;
338 } CONTRACTL_END;
339
340 if ((!th.IsNull()) && (th.HasInstantiation()))
341{
342 return NULL;
343}
344
345 return TypeHandleToClassID(th);
346}
347
348//---------------------------------------------------------------------------------------
349//
350// Converts MethodDesc * to FunctionID
351//
352// Arguments:
353// pMD - MethodDesc * to convert
354//
355// Return Value:
356// Requested FunctionID
357//
358
359static FunctionID MethodDescToFunctionID(MethodDesc * pMD)
360{
361 LIMITED_METHOD_CONTRACT;
362 return reinterpret_cast< FunctionID > (pMD);
363}
364
365#endif
366
367//---------------------------------------------------------------------------------------
368//
369// Converts FunctionID to MethodDesc *
370//
371// Arguments:
372// functionID - FunctionID to convert
373//
374// Return Value:
375// MethodDesc * requested
376//
377
378MethodDesc *FunctionIdToMethodDesc(FunctionID functionID)
379{
380 LIMITED_METHOD_CONTRACT;
381
382 MethodDesc *pMethodDesc;
383
384 pMethodDesc = reinterpret_cast< MethodDesc* >(functionID);
385
386 _ASSERTE(pMethodDesc != NULL);
387 return pMethodDesc;
388}
389
390// (See comments for ArrayKindFromTypeHandle below.)
391typedef enum
392{
393 ARRAY_KIND_TYPEDESC, // Normal, garden-variety typedesc array
394 ARRAY_KIND_METHODTABLE, // Weirdo array with its own unshared methodtable (e.g., System.Object[])
395 ARRAY_KIND_NOTARRAY, // Not an array
396} ARRAY_KIND;
397
398//---------------------------------------------------------------------------------------
399//
400// A couple Info calls need to understand what constitutes an "array", and what
401// kinds of arrays there are. ArrayKindFromTypeHandle tries to put some of this
402// knowledge in a single place
403//
404// Arguments:
405// th - TypeHandle to inspect
406//
407// Return Value:
408// ARRAY_KIND describing th
409//
410
411inline ARRAY_KIND ArrayKindFromTypeHandle(TypeHandle th)
412{
413 LIMITED_METHOD_CONTRACT;
414
415 if (th.IsArray())
416 {
417 return ARRAY_KIND_TYPEDESC;
418 }
419
420 if (!th.IsTypeDesc() && th.GetMethodTable()->IsArray())
421 {
422 return ARRAY_KIND_METHODTABLE;
423 }
424
425 return ARRAY_KIND_NOTARRAY;
426}
427
428#ifdef PROFILING_SUPPORTED
429
430//---------------------------------------------------------------------------------------
431// ModuleILHeap IUnknown implementation
432//
433// Function headers unnecessary, as MSDN adequately documents IUnknown
434//
435
436ULONG ModuleILHeap::AddRef()
437{
438 // Lifetime of this object is controlled entirely by the CLR. This
439 // is created on first request, and is automatically destroyed when
440 // the profiler is detached.
441 return 1;
442}
443
444
445ULONG ModuleILHeap::Release()
446{
447 // Lifetime of this object is controlled entirely by the CLR. This
448 // is created on first request, and is automatically destroyed when
449 // the profiler is detached.
450 return 1;
451}
452
453
454HRESULT ModuleILHeap::QueryInterface(REFIID riid, void ** pp)
455{
456 HRESULT hr = S_OK;
457
458 if (pp == NULL)
459 {
460 return E_POINTER;
461 }
462
463 *pp = 0;
464 if (riid == IID_IUnknown)
465 {
466 *pp = static_cast<IUnknown *>(this);
467 }
468 else if (riid == IID_IMethodMalloc)
469 {
470 *pp = static_cast<IMethodMalloc *>(this);
471 }
472 else
473 {
474 hr = E_NOINTERFACE;
475 }
476
477 if (hr == S_OK)
478 {
479 // CLR manages lifetime of this object, but in case that changes (or
480 // this code gets copied/pasted elsewhere), we'll still AddRef here so
481 // QI remains a good citizen either way.
482 AddRef();
483 }
484 return hr;
485}
486
487//---------------------------------------------------------------------------------------
488// Profiler entrypoint to allocate space from this module's heap.
489//
490// Arguments
491// cb - size in bytes of allocation request
492//
493// Return value
494// pointer to allocated memory, or NULL if there was an error
495
496void * STDMETHODCALLTYPE ModuleILHeap::Alloc(ULONG cb)
497{
498 CONTRACTL
499 {
500 // Yay!
501 NOTHROW;
502
503 // (see GC_TRIGGERS comment below)
504 CAN_TAKE_LOCK;
505
506 // Allocations using loader heaps below enter a critsec, which switches
507 // to preemptive, which is effectively a GC trigger
508 GC_TRIGGERS;
509
510 // Yay!
511 MODE_ANY;
512
513 SO_NOT_MAINLINE;
514 }
515 CONTRACTL_END;
516
517 LOG((LF_CORPROF, LL_INFO1000, "**PROF: ModuleILHeap::Alloc 0x%08xp.\n", cb));
518
519 if (cb == 0)
520 {
521 return NULL;
522 }
523
524 return new (nothrow) BYTE[cb];
525}
526
527//---------------------------------------------------------------------------------------
528// The one and only instance of the IL heap
529
530ModuleILHeap ModuleILHeap::s_Heap;
531
532//---------------------------------------------------------------------------------------
533// Implementation of ProfToEEInterfaceImpl's IUnknown
534
535//
536// The VM controls the lifetime of ProfToEEInterfaceImpl, not the
537// profiler. We'll automatically take care of cleanup when profilers
538// unload and detach.
539//
540
541ULONG STDMETHODCALLTYPE ProfToEEInterfaceImpl::AddRef()
542 {
543 LIMITED_METHOD_CONTRACT;
544 return 1;
545}
546
547ULONG STDMETHODCALLTYPE ProfToEEInterfaceImpl::Release()
548{
549 LIMITED_METHOD_CONTRACT;
550 return 1;
551}
552
553COM_METHOD ProfToEEInterfaceImpl::QueryInterface(REFIID id, void ** pInterface)
554{
555 if (pInterface == NULL)
556 {
557 return E_POINTER;
558 }
559
560 if (id == IID_ICorProfilerInfo)
561 {
562 *pInterface = static_cast<ICorProfilerInfo *>(this);
563 }
564 else if (id == IID_ICorProfilerInfo2)
565 {
566 *pInterface = static_cast<ICorProfilerInfo2 *>(this);
567 }
568 else if (id == IID_ICorProfilerInfo3)
569 {
570 *pInterface = static_cast<ICorProfilerInfo3 *>(this);
571 }
572 else if (id == IID_ICorProfilerInfo4)
573 {
574 *pInterface = static_cast<ICorProfilerInfo4 *>(this);
575 }
576 else if (id == IID_ICorProfilerInfo5)
577 {
578 *pInterface = static_cast<ICorProfilerInfo5 *>(this);
579 }
580 else if (id == IID_ICorProfilerInfo6)
581 {
582 *pInterface = static_cast<ICorProfilerInfo6 *>(this);
583 }
584 else if (id == IID_ICorProfilerInfo7)
585 {
586 *pInterface = static_cast<ICorProfilerInfo7 *>(this);
587 }
588 else if (id == IID_ICorProfilerInfo8)
589 {
590 *pInterface = static_cast<ICorProfilerInfo8 *>(this);
591 }
592 else if (id == IID_ICorProfilerInfo9)
593 {
594 *pInterface = static_cast<ICorProfilerInfo9 *>(this);
595 }
596 else if (id == IID_IUnknown)
597 {
598 *pInterface = static_cast<IUnknown *>(static_cast<ICorProfilerInfo *>(this));
599 }
600 else
601 {
602 *pInterface = NULL;
603 return E_NOINTERFACE;
604 }
605
606 // CLR manages lifetime of this object, but in case that changes (or
607 // this code gets copied/pasted elsewhere), we'll still AddRef here so
608 // QI remains a good citizen either way.
609 AddRef();
610
611 return S_OK;
612}
613#endif // PROFILING_SUPPORTED
614
615//---------------------------------------------------------------------------------------
616//
617// GC-related helpers. These are called from elsewhere in the EE to determine profiler
618// state, and to update the profiling API with info from the GC.
619//
620
621//---------------------------------------------------------------------------------------
622//
623// ProfilerObjectAllocatedCallback is called if a profiler is attached, requesting
624// ObjectAllocated callbacks.
625//
626// Arguments:
627// objref - Reference to newly-allocated object
628// classId - ClassID of newly-allocated object
629//
630
631void __stdcall ProfilerObjectAllocatedCallback(OBJECTREF objref, ClassID classId)
632{
633 CONTRACTL
634{
635 THROWS;
636 GC_TRIGGERS;
637 MODE_COOPERATIVE;
638 }
639 CONTRACTL_END;
640
641 TypeHandle th = OBJECTREFToObject(objref)->GetTypeHandle();
642
643 // WARNING: objref can move as a result of the ObjectAllocated() call below if
644 // the profiler causes a GC, so any operations on the objref should occur above
645 // this comment (unless you're prepared to add a GCPROTECT around the objref).
646
647#ifdef PROFILING_SUPPORTED
648 // Notify the profiler of the allocation
649
650 {
651 BEGIN_PIN_PROFILER(CORProfilerTrackAllocations());
652 // Note that for generic code we always return uninstantiated ClassIDs and FunctionIDs.
653 // Thus we strip any instantiations of the ClassID (which is really a type handle) here.
654 g_profControlBlock.pProfInterface->ObjectAllocated(
655 (ObjectID) OBJECTREFToObject(objref),
656 classId);
657 END_PIN_PROFILER();
658 }
659#endif // PROFILING_SUPPORTED
660}
661
662//---------------------------------------------------------------------------------------
663//
664// Wrapper around the GC Started callback
665//
666// Arguments:
667// generation - Generation being collected
668// induced - Was this GC induced by GC.Collect?
669//
670
671void __stdcall GarbageCollectionStartedCallback(int generation, BOOL induced)
672{
673 CONTRACTL
674 {
675 NOTHROW;
676 GC_NOTRIGGER;
677 MODE_ANY; // can be called even on GC threads
678 }
679 CONTRACTL_END;
680
681#ifdef PROFILING_SUPPORTED
682 //
683 // Mark that we are starting a GC. This will allow profilers to do limited object inspection
684 // during callbacks that occur while a GC is happening.
685 //
686 g_profControlBlock.fGCInProgress = TRUE;
687
688 // Notify the profiler of start of the collection
689 {
690 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
691 BOOL generationCollected[COR_PRF_GC_LARGE_OBJECT_HEAP+1];
692 if (generation == COR_PRF_GC_GEN_2)
693 generation = COR_PRF_GC_LARGE_OBJECT_HEAP;
694 for (int gen = 0; gen <= COR_PRF_GC_LARGE_OBJECT_HEAP; gen++)
695 generationCollected[gen] = gen <= generation;
696
697 g_profControlBlock.pProfInterface->GarbageCollectionStarted(
698 COR_PRF_GC_LARGE_OBJECT_HEAP+1,
699 generationCollected,
700 induced ? COR_PRF_GC_INDUCED : COR_PRF_GC_OTHER);
701 END_PIN_PROFILER();
702 }
703#endif // PROFILING_SUPPORTED
704}
705
706//---------------------------------------------------------------------------------------
707//
708// Wrapper around the GC Finished callback
709//
710
711void __stdcall GarbageCollectionFinishedCallback()
712{
713 CONTRACTL
714 {
715 NOTHROW;
716 GC_NOTRIGGER;
717 MODE_ANY; // can be called even on GC threads
718 }
719 CONTRACTL_END;
720
721#ifdef PROFILING_SUPPORTED
722 // Notify the profiler of end of the collection
723 {
724 BEGIN_PIN_PROFILER(CORProfilerTrackGC());
725 g_profControlBlock.pProfInterface->GarbageCollectionFinished();
726 END_PIN_PROFILER();
727 }
728
729 // Mark that GC is finished.
730 g_profControlBlock.fGCInProgress = FALSE;
731#endif // PROFILING_SUPPORTED
732}
733
734#ifdef PROFILING_SUPPORTED
735//---------------------------------------------------------------------------------------
736//
737// Describes a GC generation by number and address range
738//
739
740struct GenerationDesc
741{
742 int generation;
743 BYTE *rangeStart;
744 BYTE *rangeEnd;
745 BYTE *rangeEndReserved;
746};
747
748struct GenerationTable
749{
750 ULONG count;
751 ULONG capacity;
752 static const ULONG defaultCapacity = 4; // that's the minimum for 3 generation plus the large object heap
753 GenerationTable *prev;
754 GenerationDesc *genDescTable;
755#ifdef _DEBUG
756 ULONG magic;
757#define GENERATION_TABLE_MAGIC 0x34781256
758#define GENERATION_TABLE_BAD_MAGIC 0x55aa55aa
759#endif
760};
761
762
763//---------------------------------------------------------------------------------------
764//
765// This is a callback used by the GC when we call GCHeapUtilities::DiagDescrGenerations
766// (from UpdateGenerationBounds() below). The GC gives us generation information through
767// this callback, which we use to update the GenerationDesc in the corresponding
768// GenerationTable
769//
770// Arguments:
771// context - The containing GenerationTable
772// generation - Generation number
773// rangeStart - Address where generation starts
774// rangeEnd - Address where generation ends
775// rangeEndReserved - Address where generation reserved space ends
776//
777
778// static
779static void GenWalkFunc(void * context,
780 int generation,
781 BYTE * rangeStart,
782 BYTE * rangeEnd,
783 BYTE * rangeEndReserved)
784{
785 CONTRACT_VOID
786 {
787 NOTHROW;
788 GC_NOTRIGGER;
789 MODE_ANY; // can be called even on GC threads
790 PRECONDITION(CheckPointer(context));
791 PRECONDITION(0 <= generation && generation <= 3);
792 PRECONDITION(CheckPointer(rangeStart));
793 PRECONDITION(CheckPointer(rangeEnd));
794 PRECONDITION(CheckPointer(rangeEndReserved));
795 } CONTRACT_END;
796
797 GenerationTable *generationTable = (GenerationTable *)context;
798
799 _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
800
801 ULONG count = generationTable->count;
802 if (count >= generationTable->capacity)
803 {
804 ULONG newCapacity = generationTable->capacity == 0 ? GenerationTable::defaultCapacity : generationTable->capacity * 2;
805 GenerationDesc *newGenDescTable = new (nothrow) GenerationDesc[newCapacity];
806 if (newGenDescTable == NULL)
807 {
808 // if we can't allocate a bigger table, we'll have to ignore this call
809 RETURN;
810 }
811 memcpy(newGenDescTable, generationTable->genDescTable, sizeof(generationTable->genDescTable[0]) * generationTable->count);
812 delete[] generationTable->genDescTable;
813 generationTable->genDescTable = newGenDescTable;
814 generationTable->capacity = newCapacity;
815 }
816 _ASSERTE(count < generationTable->capacity);
817
818 GenerationDesc *genDescTable = generationTable->genDescTable;
819
820 genDescTable[count].generation = generation;
821 genDescTable[count].rangeStart = rangeStart;
822 genDescTable[count].rangeEnd = rangeEnd;
823 genDescTable[count].rangeEndReserved = rangeEndReserved;
824
825 generationTable->count = count + 1;
826}
827
828// This is the table of generation bounds updated by the gc
829// and read by the profiler. So this is a single writer,
830// multiple readers scenario.
831static GenerationTable *s_currentGenerationTable;
832
833// The generation table is updated atomically by replacing the
834// pointer to it. The only tricky part is knowing when
835// the old table can be deleted.
836static Volatile<LONG> s_generationTableLock;
837
838// This is just so we can assert there's a single writer
839#ifdef ENABLE_CONTRACTS
840static Volatile<LONG> s_generationTableWriterCount;
841#endif
842#endif // PROFILING_SUPPORTED
843
844//---------------------------------------------------------------------------------------
845//
846// This is called from the gc to push a new set of generation bounds
847//
848
849void __stdcall UpdateGenerationBounds()
850{
851 CONTRACT_VOID
852 {
853 NOTHROW;
854 GC_NOTRIGGER;
855 MODE_ANY; // can be called even on GC threads
856#ifdef PROFILING_SUPPORTED
857 PRECONDITION(FastInterlockIncrement(&s_generationTableWriterCount) == 1);
858 POSTCONDITION(FastInterlockDecrement(&s_generationTableWriterCount) == 0);
859#endif // PROFILING_SUPPORTED
860 } CONTRACT_END;
861
862#ifdef PROFILING_SUPPORTED
863 // Notify the profiler of start of the collection
864 if (CORProfilerTrackGC())
865 {
866 // generate a new generation table
867 GenerationTable *newGenerationTable = new (nothrow) GenerationTable();
868 if (newGenerationTable == NULL)
869 RETURN;
870 newGenerationTable->count = 0;
871 newGenerationTable->capacity = GenerationTable::defaultCapacity;
872 // if there is already a current table, use its count as a guess for the capacity
873 if (s_currentGenerationTable != NULL)
874 newGenerationTable->capacity = s_currentGenerationTable->count;
875 newGenerationTable->prev = NULL;
876 newGenerationTable->genDescTable = new (nothrow) GenerationDesc[newGenerationTable->capacity];
877 if (newGenerationTable->genDescTable == NULL)
878 newGenerationTable->capacity = 0;
879
880#ifdef _DEBUG
881 newGenerationTable->magic = GENERATION_TABLE_MAGIC;
882#endif
883 // fill in the values by calling back into the gc, which will report
884 // the ranges by calling GenWalkFunc for each one
885 IGCHeap *hp = GCHeapUtilities::GetGCHeap();
886 hp->DiagDescrGenerations(GenWalkFunc, newGenerationTable);
887
888 // remember the old table and plug in the new one
889 GenerationTable *oldGenerationTable = s_currentGenerationTable;
890 s_currentGenerationTable = newGenerationTable;
891
892 // WARNING: tricky code!
893 //
894 // We sample the generation table lock *after* plugging in the new table
895 // We do so using an interlocked operation so the cpu can't reorder
896 // the write to the s_currentGenerationTable with the increment.
897 // If the interlocked increment returns 1, we know nobody can be using
898 // the old table (readers increment the lock before using the table,
899 // and decrement it afterwards). Any new readers coming in
900 // will use the new table. So it's safe to delete the old
901 // table.
902 // On the other hand, if the interlocked increment returns
903 // something other than one, we put the old table on a list
904 // dangling off of the new one. Next time around, we'll try again
905 // deleting any old tables.
906 if (FastInterlockIncrement(&s_generationTableLock) == 1)
907 {
908 // We know nobody can be using any of the old tables
909 while (oldGenerationTable != NULL)
910 {
911 _ASSERTE(oldGenerationTable->magic == GENERATION_TABLE_MAGIC);
912#ifdef _DEBUG
913 oldGenerationTable->magic = GENERATION_TABLE_BAD_MAGIC;
914#endif
915 GenerationTable *temp = oldGenerationTable;
916 oldGenerationTable = oldGenerationTable->prev;
917 delete[] temp->genDescTable;
918 delete temp;
919 }
920 }
921 else
922 {
923 // put the old table on a list
924 newGenerationTable->prev = oldGenerationTable;
925 }
926 FastInterlockDecrement(&s_generationTableLock);
927 }
928#endif // PROFILING_SUPPORTED
929 RETURN;
930}
931
932#ifdef PROFILING_SUPPORTED
933
934//---------------------------------------------------------------------------------------
935//
936// Determines whether we are in a window to allow object inspection.
937//
938// Return Value:
939// Returns S_OK if we can determine that we are in a window to allow object
940// inspection. Otherwise a failure HRESULT is returned
941//
942
943HRESULT AllowObjectInspection()
944{
945 CONTRACTL
946 {
947 NOTHROW;
948 GC_NOTRIGGER;
949 MODE_ANY; // tests for preemptive mode dynamically as its main function so contract enforcement is not appropriate
950 }
951 CONTRACTL_END;
952
953 //
954 // Check first to see if we are in the process of doing a GC and presume that the profiler
955 // is making this object inspection from the same thread that notified of a valid ObjectID.
956 //
957 if (g_profControlBlock.fGCInProgress)
958 {
959 return S_OK;
960 }
961
962 //
963 // Thus we must have a managed thread, and it must be in coop mode.
964 // (That will also guarantee we're in a callback).
965 //
966 Thread * pThread = GetThreadNULLOk();
967
968 if (pThread == NULL)
969 {
970 return CORPROF_E_NOT_MANAGED_THREAD;
971 }
972
973 // Note this is why we don't enforce the contract of being in cooperative mode the whole point
974 // is that clients of this fellow want to return a robust error if not cooperative
975 // so technically they are mode_any although the only true preemptive support they offer
976 // is graceful failure in that case
977 if (!pThread->PreemptiveGCDisabled())
978 {
979 return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
980 }
981
982 return S_OK;
983}
984
985//---------------------------------------------------------------------------------------
986//
987// helper functions for the GC events
988//
989
990
991#endif // PROFILING_SUPPORTED
992
993#if defined(PROFILING_SUPPORTED) || defined(FEATURE_EVENT_TRACE)
994
995//---------------------------------------------------------------------------------------
996//
997// It's generally unsafe for profiling API code to call Get(GCSafe)TypeHandle() on
998// objects, since we can encounter objects on the heap whose types belong to unloading
999// AppDomains. In such cases, getting the type handle of the object could AV. Use this
1000// function instead, which will return NULL for potentially unloaded types.
1001//
1002// Arguments:
1003// pObj - Object * whose ClassID is desired
1004//
1005// Return Value:
1006// ClassID of the object, if it's safe to look it up. Else NULL.
1007//
1008
1009ClassID SafeGetClassIDFromObject(Object * pObj)
1010{
1011 CONTRACTL
1012 {
1013 NOTHROW;
1014 GC_NOTRIGGER;
1015 }
1016 CONTRACTL_END;
1017
1018 TypeHandle th = pObj->GetGCSafeTypeHandleIfPossible();
1019 if(th == NULL)
1020 {
1021 return NULL;
1022 }
1023
1024 return TypeHandleToClassID(th);
1025}
1026
1027//---------------------------------------------------------------------------------------
1028//
1029// Callback of type walk_fn used by IGCHeap::DiagWalkObject. Keeps a count of each
1030// object reference found.
1031//
1032// Arguments:
1033// pBO - Object reference encountered in walk
1034// context - running count of object references encountered
1035//
1036// Return Value:
1037// Always returns TRUE to object walker so it walks the entire object
1038//
1039
1040bool CountContainedObjectRef(Object * pBO, void * context)
1041{
1042 LIMITED_METHOD_CONTRACT;
1043 // Increase the count
1044 (*((size_t *)context))++;
1045
1046 return TRUE;
1047}
1048
1049//---------------------------------------------------------------------------------------
1050//
1051// Callback of type walk_fn used by IGCHeap::DiagWalkObject. Stores each object reference
1052// encountered into an array.
1053//
1054// Arguments:
1055// pBO - Object reference encountered in walk
1056// context - Array of locations within the walked object that point to other
1057// objects. On entry, (*context) points to the next unfilled array
1058// entry. On exit, that location is filled, and (*context) is incremented
1059// to point to the next entry.
1060//
1061// Return Value:
1062// Always returns TRUE to object walker so it walks the entire object
1063//
1064
1065bool SaveContainedObjectRef(Object * pBO, void * context)
1066{
1067 LIMITED_METHOD_CONTRACT;
1068 // Assign the value
1069 **((Object ***)context) = pBO;
1070
1071 // Now increment the array pointer
1072 //
1073 // Note that HeapWalkHelper has already walked the references once to count them up,
1074 // and then allocated an array big enough to hold those references. First time this
1075 // callback is called for a given object, (*context) points to the first entry in the
1076 // array. So "blindly" incrementing (*context) here and using it next time around
1077 // for the next reference, over and over again, should be safe.
1078 (*((Object ***)context))++;
1079
1080 return TRUE;
1081}
1082
1083//---------------------------------------------------------------------------------------
1084//
1085// Callback of type walk_fn used by the GC when walking the heap, to help profapi and ETW
1086// track objects. This guy orchestrates the use of the above callbacks which dig
1087// into object references contained each object encountered by this callback.
1088// This method is defined when either GC_PROFILING is defined or FEATURE_EVENT_TRACING
1089// is defined and can operate fully when only one of the two is defined.
1090//
1091// Arguments:
1092// pBO - Object reference encountered on the heap
1093// pvContext - Pointer to ProfilerWalkHeapContext, containing ETW context built up
1094// during this GC, and which remembers if profapi-profiler is supposed to be called.
1095//
1096// Return Value:
1097// BOOL indicating whether the heap walk should continue.
1098// TRUE=continue
1099// FALSE=stop
1100//
1101extern bool s_forcedGCInProgress;
1102
1103bool HeapWalkHelper(Object * pBO, void * pvContext)
1104{
1105 CONTRACTL
1106 {
1107 NOTHROW;
1108 GC_NOTRIGGER;
1109 SO_INTOLERANT;
1110 MODE_ANY;
1111 }
1112 CONTRACTL_END;
1113
1114 OBJECTREF * arrObjRef = NULL;
1115 size_t cNumRefs = 0;
1116 bool bOnStack = false;
1117 MethodTable * pMT = pBO->GetMethodTable();
1118
1119 ProfilerWalkHeapContext * pProfilerWalkHeapContext = (ProfilerWalkHeapContext *) pvContext;
1120
1121 if (pMT->ContainsPointersOrCollectible())
1122 {
1123 // First round through calculates the number of object refs for this class
1124 GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &CountContainedObjectRef, (void *)&cNumRefs);
1125
1126 if (cNumRefs > 0)
1127 {
1128 // Create an array to contain all of the refs for this object
1129 bOnStack = cNumRefs <= 32 ? true : false;
1130
1131 if (bOnStack)
1132 {
1133 // It's small enough, so just allocate on the stack
1134 arrObjRef = (OBJECTREF *)_alloca(cNumRefs * sizeof(OBJECTREF));
1135 }
1136 else
1137 {
1138 // Otherwise, allocate from the heap
1139 arrObjRef = new (nothrow) OBJECTREF[cNumRefs];
1140
1141 if (!arrObjRef)
1142 {
1143 return FALSE;
1144 }
1145 }
1146
1147 // Second round saves off all of the ref values
1148 OBJECTREF * pCurObjRef = arrObjRef;
1149 GCHeapUtilities::GetGCHeap()->DiagWalkObject(pBO, &SaveContainedObjectRef, (void *)&pCurObjRef);
1150 }
1151 }
1152
1153 HRESULT hr = E_FAIL;
1154
1155#if defined(GC_PROFILING)
1156 if (pProfilerWalkHeapContext->fProfilerPinned)
1157 {
1158 // It is not safe and could be overflowed to downcast size_t to ULONG on WIN64.
1159 // However, we have to do this dangerous downcast here to comply with the existing Profiling COM interface.
1160 // We are currently evaluating ways to fix this potential overflow issue.
1161 hr = g_profControlBlock.pProfInterface->ObjectReference(
1162 (ObjectID) pBO,
1163 SafeGetClassIDFromObject(pBO),
1164 (ULONG) cNumRefs,
1165 (ObjectID *) arrObjRef);
1166 }
1167#endif
1168
1169#ifdef FEATURE_EVENT_TRACE
1170 if (s_forcedGCInProgress &&
1171 ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
1172 TRACE_LEVEL_INFORMATION,
1173 CLR_GCHEAPDUMP_KEYWORD))
1174 {
1175 ETW::GCLog::ObjectReference(
1176 pProfilerWalkHeapContext,
1177 pBO,
1178 (ULONGLONG) SafeGetClassIDFromObject(pBO),
1179 cNumRefs,
1180 (Object **) arrObjRef);
1181
1182 }
1183#endif // FEATURE_EVENT_TRACE
1184
1185 // If the data was not allocated on the stack, need to clean it up.
1186 if ((arrObjRef != NULL) && !bOnStack)
1187 {
1188 delete [] arrObjRef;
1189 }
1190
1191 // Return TRUE iff we want to the heap walk to continue. The only way we'd abort the
1192 // heap walk is if we're issuing profapi callbacks, and the profapi profiler
1193 // intentionally returned a failed HR (as its request that we stop the walk). There's
1194 // a potential conflict here. If a profapi profiler and an ETW profiler are both
1195 // monitoring the heap dump, and the profapi profiler requests to abort the walk (but
1196 // the ETW profiler may not want to abort the walk), then what do we do? The profapi
1197 // profiler gets precedence. We don't want to accidentally send more callbacks to a
1198 // profapi profiler that explicitly requested an abort. The ETW profiler will just
1199 // have to deal. In theory, I could make the code more complex by remembering that a
1200 // profapi profiler requested to abort the dump but an ETW profiler is still
1201 // attached, and then intentionally inhibit the remainder of the profapi callbacks
1202 // for this GC. But that's unnecessary complexity. In practice, it should be
1203 // extremely rare that a profapi profiler is monitoring heap dumps AND an ETW
1204 // profiler is also monitoring heap dumps.
1205 return (pProfilerWalkHeapContext->fProfilerPinned) ? SUCCEEDED(hr) : TRUE;
1206}
1207
1208#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACING)
1209
1210#ifdef PROFILING_SUPPORTED
1211//---------------------------------------------------------------------------------------
1212//
1213// Callback of type walk_fn used by the GC when walking the heap, to help profapi
1214// track objects. This is really just a wrapper around
1215// EEToProfInterfaceImpl::AllocByClass, which does the real work
1216//
1217// Arguments:
1218// pBO - Object reference encountered on the heap
1219// pv - Structure used by EEToProfInterfaceImpl::AllocByClass to do its work.
1220//
1221// Return Value:
1222// BOOL indicating whether the heap walk should continue.
1223// TRUE=continue
1224// FALSE=stop
1225// Currently always returns TRUE
1226//
1227
1228bool AllocByClassHelper(Object * pBO, void * pv)
1229{
1230 CONTRACTL
1231 {
1232 NOTHROW;
1233 GC_NOTRIGGER;
1234 MODE_ANY;
1235 }
1236 CONTRACTL_END;
1237 _ASSERTE(pv != NULL);
1238
1239 {
1240 BEGIN_PIN_PROFILER(CORProfilerPresent());
1241 // Pass along the call
1242 g_profControlBlock.pProfInterface->AllocByClass(
1243 (ObjectID) pBO,
1244 SafeGetClassIDFromObject(pBO),
1245 pv);
1246 END_PIN_PROFILER();
1247 }
1248
1249 return TRUE;
1250}
1251
1252#endif // PROFILING_SUPPORTED
1253#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1254
1255//---------------------------------------------------------------------------------------
1256//
1257// Callback of type promote_func called by GC while scanning roots (in GCProfileWalkHeap,
1258// called after the collection). Wrapper around EEToProfInterfaceImpl::RootReference2,
1259// which does the real work.
1260//
1261// Arguments:
1262// pObj - Object reference encountered
1263/// ppRoot - Address that references ppObject (can be interior pointer)
1264// pSC - ProfilingScanContext * containing the root kind and GCReferencesData used
1265// by RootReference2
1266// dwFlags - Properties of the root as GC_CALL* constants (this function converts
1267// to COR_PRF_GC_ROOT_FLAGS.
1268//
1269
1270void ScanRootsHelper(Object* pObj, Object ** ppRoot, ScanContext *pSC, uint32_t dwFlags)
1271{
1272 CONTRACTL
1273 {
1274 NOTHROW;
1275 GC_NOTRIGGER;
1276 SO_INTOLERANT;
1277 MODE_ANY;
1278 }
1279 CONTRACTL_END;
1280
1281 // RootReference2 can return E_OUTOFMEMORY, and we're swallowing that.
1282 // Furthermore, we can't really handle it because we're callable during GC promotion.
1283 // On the other hand, this only means profiling information will be incomplete,
1284 // so it's ok to swallow E_OUTOFMEMORY.
1285 //
1286 FAULT_NOT_FATAL();
1287
1288 ProfilingScanContext *pPSC = (ProfilingScanContext *)pSC;
1289
1290 DWORD dwEtwRootFlags = 0;
1291 if (dwFlags & GC_CALL_INTERIOR)
1292 dwEtwRootFlags |= kEtwGCRootFlagsInterior;
1293 if (dwFlags & GC_CALL_PINNED)
1294 dwEtwRootFlags |= kEtwGCRootFlagsPinning;
1295
1296#if defined(GC_PROFILING)
1297 void *rootID = NULL;
1298 switch (pPSC->dwEtwRootKind)
1299 {
1300 case kEtwGCRootKindStack:
1301 rootID = pPSC->pMD;
1302 break;
1303
1304 case kEtwGCRootKindHandle:
1305 _ASSERT(!"Shouldn't see handle here");
1306
1307 case kEtwGCRootKindFinalizer:
1308 default:
1309 break;
1310 }
1311
1312 // Notify profiling API of the root
1313 if (pPSC->fProfilerPinned)
1314 {
1315 // Let the profiling code know about this root reference
1316 g_profControlBlock.pProfInterface->
1317 RootReference2((BYTE *)pObj, pPSC->dwEtwRootKind, (EtwGCRootFlags)dwEtwRootFlags, (BYTE *)rootID, &((pPSC)->pHeapId));
1318 }
1319#endif
1320
1321#ifdef FEATURE_EVENT_TRACE
1322 // Notify ETW of the root
1323 if (s_forcedGCInProgress &&
1324 ETW_TRACING_CATEGORY_ENABLED(MICROSOFT_WINDOWS_DOTNETRUNTIME_PROVIDER_Context,
1325 TRACE_LEVEL_INFORMATION,
1326 CLR_GCHEAPDUMP_KEYWORD))
1327 {
1328 ETW::GCLog::RootReference(
1329 NULL, // handle is NULL, cuz this is a non-HANDLE root
1330 pObj, // object being rooted
1331 NULL, // pSecondaryNodeForDependentHandle is NULL, cuz this isn't a dependent handle
1332 FALSE, // is dependent handle
1333 pPSC,
1334 dwFlags, // dwGCFlags
1335 dwEtwRootFlags);
1336 }
1337#endif // FEATURE_EVENT_TRACE
1338}
1339
1340#endif // defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
1341#ifdef PROFILING_SUPPORTED
1342
1343//---------------------------------------------------------------------------------------
1344//
1345// Private ProfToEEInterfaceImpl maintenance functions
1346//
1347
1348
1349//---------------------------------------------------------------------------------------
1350//
1351// Initialize ProfToEEInterfaceImpl (including ModuleILHeap statics)
1352//
1353// Return Value:
1354// HRESULT indicating success
1355//
1356
1357HRESULT ProfToEEInterfaceImpl::Init()
1358{
1359 CONTRACTL
1360 {
1361 NOTHROW;
1362 CANNOT_TAKE_LOCK;
1363 GC_NOTRIGGER;
1364 MODE_ANY;
1365 }
1366 CONTRACTL_END;
1367
1368 LOG((LF_CORPROF, LL_INFO1000, "**PROF: Init.\n"));
1369
1370#ifdef _DEBUG
1371 if (ProfilingAPIUtility::ShouldInjectProfAPIFault(kProfAPIFault_StartupInternal))
1372 {
1373 return E_OUTOFMEMORY;
1374 }
1375#endif //_DEBUG
1376
1377 return S_OK;
1378}
1379
1380
1381//---------------------------------------------------------------------------------------
1382//
1383// Destroy ProfToEEInterfaceImpl (including ModuleILHeap statics)
1384//
1385
1386ProfToEEInterfaceImpl::~ProfToEEInterfaceImpl()
1387{
1388 CONTRACTL
1389 {
1390 NOTHROW;
1391 GC_NOTRIGGER;
1392 MODE_ANY;
1393 }
1394 CONTRACTL_END;
1395
1396 LOG((LF_CORPROF, LL_INFO1000, "**PROF: Terminate.\n"));
1397}
1398
1399//---------------------------------------------------------------------------------------
1400//
1401// Obsolete info functions
1402//
1403
1404HRESULT ProfToEEInterfaceImpl::GetInprocInspectionInterface(IUnknown **)
1405{
1406 LIMITED_METHOD_CONTRACT;
1407 return E_NOTIMPL;
1408}
1409
1410HRESULT ProfToEEInterfaceImpl::GetInprocInspectionIThisThread(IUnknown **)
1411{
1412 LIMITED_METHOD_CONTRACT;
1413 return E_NOTIMPL;
1414}
1415
1416HRESULT ProfToEEInterfaceImpl::BeginInprocDebugging(BOOL, DWORD *)
1417{
1418 LIMITED_METHOD_CONTRACT;
1419 return E_NOTIMPL;
1420}
1421
1422HRESULT ProfToEEInterfaceImpl::EndInprocDebugging(DWORD)
1423{
1424 LIMITED_METHOD_CONTRACT;
1425 return E_NOTIMPL;
1426}
1427
1428HRESULT ProfToEEInterfaceImpl::SetFunctionReJIT(FunctionID)
1429{
1430 LIMITED_METHOD_CONTRACT;
1431 return E_NOTIMPL;
1432}
1433
1434
1435
1436
1437//---------------------------------------------------------------------------------------
1438//
1439// *******************************
1440// Public Profiler->EE entrypoints
1441// *******************************
1442//
1443// ProfToEEInterfaceImpl implementation of public ICorProfilerInfo* methods
1444//
1445// NOTE: All ICorProfilerInfo* method implementations must follow the rules stated
1446// at the top of this file!
1447//
1448
1449// See corprof.idl / MSDN for detailed comments about each of these public
1450// functions, their parameters, return values, etc.
1451
1452HRESULT ProfToEEInterfaceImpl::SetEventMask(DWORD dwEventMask)
1453{
1454 CONTRACTL
1455 {
1456 // Yay!
1457 NOTHROW;
1458
1459 // Yay!
1460 GC_NOTRIGGER;
1461
1462 // Yay!
1463 MODE_ANY;
1464
1465 // Yay!
1466 EE_THREAD_NOT_REQUIRED;
1467
1468 CANNOT_TAKE_LOCK;
1469
1470 SO_NOT_MAINLINE;
1471 }
1472 CONTRACTL_END;
1473
1474 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
1475 (LF_CORPROF,
1476 LL_INFO1000,
1477 "**PROF: SetEventMask 0x%08x.\n",
1478 dwEventMask));
1479
1480 _ASSERTE(CORProfilerPresentOrInitializing());
1481
1482 return g_profControlBlock.pProfInterface->SetEventMask(dwEventMask, 0 /* No high bits */);
1483}
1484
1485HRESULT ProfToEEInterfaceImpl::SetEventMask2(DWORD dwEventsLow, DWORD dwEventsHigh)
1486{
1487 CONTRACTL
1488 {
1489 // Yay!
1490 NOTHROW;
1491
1492 // Yay!
1493 GC_NOTRIGGER;
1494
1495 // Yay!
1496 MODE_ANY;
1497
1498 // Yay!
1499 EE_THREAD_NOT_REQUIRED;
1500
1501 CANNOT_TAKE_LOCK;
1502
1503 SO_NOT_MAINLINE;
1504 }
1505 CONTRACTL_END;
1506
1507 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
1508 (LF_CORPROF,
1509 LL_INFO1000,
1510 "**PROF: SetEventMask2 0x%08x, 0x%08x.\n",
1511 dwEventsLow, dwEventsHigh));
1512
1513 _ASSERTE(CORProfilerPresentOrInitializing());
1514
1515 return g_profControlBlock.pProfInterface->SetEventMask(dwEventsLow, dwEventsHigh);
1516}
1517
1518
1519HRESULT ProfToEEInterfaceImpl::GetHandleFromThread(ThreadID threadId, HANDLE *phThread)
1520{
1521 CONTRACTL
1522{
1523 // Yay!
1524 NOTHROW;
1525
1526 // Yay!
1527 GC_NOTRIGGER;
1528
1529 // Yay!
1530 MODE_ANY;
1531
1532 // Yay!
1533 EE_THREAD_NOT_REQUIRED;
1534
1535 // Yay!
1536 CANNOT_TAKE_LOCK;
1537
1538 SO_NOT_MAINLINE;
1539 }
1540 CONTRACTL_END;
1541
1542 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
1543 (LF_CORPROF,
1544 LL_INFO1000,
1545 "**PROF: GetHandleFromThread 0x%p.\n",
1546 threadId));
1547
1548 if (!IsManagedThread(threadId))
1549 {
1550 return E_INVALIDARG;
1551 }
1552
1553 HRESULT hr = S_OK;
1554
1555 HANDLE hThread = ((Thread *)threadId)->GetThreadHandle();
1556
1557 if (hThread == INVALID_HANDLE_VALUE)
1558 hr = E_INVALIDARG;
1559
1560 else if (phThread)
1561 *phThread = hThread;
1562
1563 return (hr);
1564}
1565
1566HRESULT ProfToEEInterfaceImpl::GetObjectSize(ObjectID objectId, ULONG *pcSize)
1567{
1568 CONTRACTL
1569 {
1570 // Yay!
1571 NOTHROW;
1572
1573 // Yay!
1574 GC_NOTRIGGER;
1575
1576 // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
1577 MODE_ANY;
1578
1579 // Yay!
1580 EE_THREAD_NOT_REQUIRED;
1581
1582 // Yay!
1583 CANNOT_TAKE_LOCK;
1584
1585 SO_NOT_MAINLINE;
1586 }
1587 CONTRACTL_END;
1588
1589 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
1590 (LF_CORPROF,
1591 LL_INFO1000,
1592 "**PROF: GetObjectSize 0x%p.\n",
1593 objectId));
1594
1595 if (objectId == NULL)
1596 {
1597 return E_INVALIDARG;
1598 }
1599
1600 HRESULT hr = AllowObjectInspection();
1601 if (FAILED(hr))
1602 {
1603 return hr;
1604 }
1605
1606 // Get the object pointer
1607 Object *pObj = reinterpret_cast<Object *>(objectId);
1608
1609 // Get the size
1610 if (pcSize)
1611 {
1612 SIZE_T size = pObj->GetSize();
1613
1614 if(size < MIN_OBJECT_SIZE)
1615 {
1616 size = PtrAlign(size);
1617 }
1618
1619 if (size > ULONG_MAX)
1620 {
1621 *pcSize = ULONG_MAX;
1622 return COR_E_OVERFLOW;
1623 }
1624 *pcSize = (ULONG)size;
1625 }
1626
1627 // Indicate success
1628 return (S_OK);
1629}
1630
1631HRESULT ProfToEEInterfaceImpl::GetObjectSize2(ObjectID objectId, SIZE_T *pcSize)
1632{
1633 CONTRACTL
1634 {
1635 // Yay!
1636 NOTHROW;
1637
1638 // Yay!
1639 GC_NOTRIGGER;
1640
1641 // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
1642 MODE_ANY;
1643
1644 // Yay!
1645 EE_THREAD_NOT_REQUIRED;
1646
1647 // Yay!
1648 CANNOT_TAKE_LOCK;
1649
1650 SO_NOT_MAINLINE;
1651 }
1652 CONTRACTL_END;
1653
1654 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
1655 (LF_CORPROF,
1656 LL_INFO1000,
1657 "**PROF: GetObjectSize2 0x%p.\n",
1658 objectId));
1659
1660 if (objectId == NULL)
1661 {
1662 return E_INVALIDARG;
1663 }
1664
1665 HRESULT hr = AllowObjectInspection();
1666 if (FAILED(hr))
1667 {
1668 return hr;
1669 }
1670
1671 // Get the object pointer
1672 Object *pObj = reinterpret_cast<Object *>(objectId);
1673
1674 // Get the size
1675 if (pcSize)
1676 {
1677 SIZE_T size = pObj->GetSize();
1678
1679 if(size < MIN_OBJECT_SIZE)
1680 {
1681 size = PtrAlign(size);
1682 }
1683
1684 *pcSize = size;
1685 }
1686
1687 // Indicate success
1688 return (S_OK);
1689}
1690
1691
1692HRESULT ProfToEEInterfaceImpl::IsArrayClass(
1693 /* [in] */ ClassID classId,
1694 /* [out] */ CorElementType *pBaseElemType,
1695 /* [out] */ ClassID *pBaseClassId,
1696 /* [out] */ ULONG *pcRank)
1697{
1698 CONTRACTL
1699 {
1700 NOTHROW;
1701
1702 GC_NOTRIGGER;
1703
1704 // Yay!
1705 MODE_ANY;
1706
1707 // Yay!
1708 EE_THREAD_NOT_REQUIRED;
1709
1710 // Yay!
1711 CANNOT_TAKE_LOCK;
1712
1713 SO_NOT_MAINLINE;
1714 }
1715 CONTRACTL_END;
1716
1717 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
1718 (LF_CORPROF,
1719 LL_INFO1000,
1720 "**PROF: IsArrayClass 0x%p.\n",
1721 classId));
1722
1723 HRESULT hr;
1724
1725 if (classId == NULL)
1726 {
1727 return E_INVALIDARG;
1728 }
1729
1730 TypeHandle th = TypeHandle::FromPtr((void *)classId);
1731
1732 ARRAY_KIND arrayKind = ArrayKindFromTypeHandle(th);
1733
1734 // If this is indeed an array class, get some info about it
1735 switch (arrayKind)
1736 {
1737 default:
1738 {
1739 _ASSERTE(!"Unexpected return from ArrayKindFromTypeHandle()");
1740 hr = E_UNEXPECTED;
1741 break;
1742 }
1743
1744 case ARRAY_KIND_TYPEDESC:
1745 {
1746 // This is actually an array, so cast it up
1747 ArrayTypeDesc *pArr = th.AsArray();
1748
1749 // Fill in the type if they want it
1750 if (pBaseElemType != NULL)
1751 {
1752 *pBaseElemType = pArr->GetArrayElementTypeHandle().GetVerifierCorElementType();
1753 }
1754
1755 // If this is an array of classes and they wish to have the base type
1756 // If there is no associated class with this type, then there's no problem
1757 // because GetClass returns NULL which is the default we want to return in
1758 // this case.
1759 // Note that for generic code we always return uninstantiated ClassIDs and FunctionIDs
1760 if (pBaseClassId != NULL)
1761 {
1762 *pBaseClassId = TypeHandleToClassID(pArr->GetTypeParam());
1763 }
1764
1765 // If they want the number of dimensions of the array
1766 if (pcRank != NULL)
1767 {
1768 *pcRank = (ULONG) pArr->GetRank();
1769 }
1770
1771 // S_OK indicates that this was indeed an array
1772 hr = S_OK;
1773 break;
1774 }
1775 case ARRAY_KIND_METHODTABLE:
1776 {
1777 MethodTable *pArrMT = th.GetMethodTable();
1778
1779 // Fill in the type if they want it
1780 if (pBaseElemType != NULL)
1781 {
1782 *pBaseElemType = pArrMT->GetArrayElementType();
1783 }
1784
1785 // If this is an array of classes and they wish to have the base type.
1786 if (pBaseClassId != NULL)
1787 {
1788 *pBaseClassId = TypeHandleToClassID(pArrMT->GetApproxArrayElementTypeHandle());
1789 }
1790
1791 // If they want the number of dimensions of the array
1792 if (pcRank != NULL)
1793 {
1794 *pcRank = (ULONG) pArrMT->GetRank();
1795 }
1796
1797 // S_OK indicates that this was indeed an array
1798 hr = S_OK;
1799 break;
1800 }
1801 case ARRAY_KIND_NOTARRAY:
1802 {
1803 if (pBaseClassId != NULL)
1804 {
1805 *pBaseClassId = NULL;
1806 }
1807
1808 // This is not an array, S_FALSE indicates so.
1809 hr = S_FALSE;
1810 break;
1811 }
1812 }
1813
1814 return hr;
1815}
1816
1817HRESULT ProfToEEInterfaceImpl::GetThreadInfo(ThreadID threadId, DWORD *pdwWin32ThreadId)
1818{
1819 CONTRACTL
1820 {
1821 // Yay!
1822 NOTHROW;
1823
1824 // Yay!
1825 GC_NOTRIGGER;
1826
1827 // Yay!
1828 MODE_ANY;
1829
1830 // Yay!
1831 EE_THREAD_NOT_REQUIRED;
1832
1833 // Yay!
1834 CANNOT_TAKE_LOCK;
1835
1836 SO_NOT_MAINLINE;
1837 }
1838 CONTRACTL_END;
1839
1840 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
1841 (LF_CORPROF,
1842 LL_INFO1000,
1843 "**PROF: GetThreadInfo 0x%p.\n",
1844 threadId));
1845
1846 if (!IsManagedThread(threadId))
1847 {
1848 return E_INVALIDARG;
1849 }
1850
1851 if (pdwWin32ThreadId)
1852 {
1853 *pdwWin32ThreadId = ((Thread *)threadId)->GetOSThreadId();
1854 }
1855
1856 return S_OK;
1857}
1858
1859HRESULT ProfToEEInterfaceImpl::GetCurrentThreadID(ThreadID *pThreadId)
1860{
1861 CONTRACTL
1862 {
1863 // Yay!
1864 NOTHROW;
1865
1866 // Yay!
1867 GC_NOTRIGGER;
1868
1869 // Yay!
1870 MODE_ANY;
1871
1872 // Yay!
1873 EE_THREAD_NOT_REQUIRED;
1874
1875 // Yay!
1876 CANNOT_TAKE_LOCK;
1877
1878 SO_NOT_MAINLINE;
1879 }
1880 CONTRACTL_END;
1881
1882 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
1883 (LF_CORPROF,
1884 LL_INFO1000,
1885 "**PROF: GetCurrentThreadID.\n"));
1886
1887 HRESULT hr = S_OK;
1888
1889 // No longer assert that GetThread doesn't return NULL, since callbacks
1890 // can now occur on non-managed threads (such as the GC helper threads)
1891 Thread * pThread = GetThreadNULLOk();
1892
1893 // If pThread is null, then the thread has never run managed code and
1894 // so has no ThreadID
1895 if (!IsManagedThread(pThread))
1896 hr = CORPROF_E_NOT_MANAGED_THREAD;
1897
1898 // Only provide value if they want it
1899 else if (pThreadId)
1900 *pThreadId = (ThreadID) pThread;
1901
1902 return (hr);
1903}
1904
1905//---------------------------------------------------------------------------------------
1906//
1907// Internal helper function to wrap a call into the JIT manager to get information about
1908// a managed function based on IP
1909//
1910// Arguments:
1911// ip - IP address inside managed function of interest
1912// ppCodeInfo - [out] information about the managed function based on IP
1913//
1914// Return Value:
1915// HRESULT indicating success or failure.
1916//
1917//
1918
1919HRESULT GetFunctionInfoInternal(LPCBYTE ip, EECodeInfo * pCodeInfo)
1920{
1921 CONTRACTL
1922 {
1923 NOTHROW;
1924
1925 GC_NOTRIGGER;
1926 EE_THREAD_NOT_REQUIRED;
1927 CAN_TAKE_LOCK;
1928 CANNOT_RETAKE_LOCK;
1929
1930 SO_NOT_MAINLINE;
1931
1932 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
1933 // host (SQL). Corners will be cut to ensure this is the case
1934 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
1935 }
1936 CONTRACTL_END;
1937
1938 // Before calling into the code manager, ensure the GC heap has been
1939 // initialized--else the code manager will assert trying to get info from the heap.
1940 if (!IsGarbageCollectorFullyInitialized())
1941 {
1942 return CORPROF_E_NOT_YET_AVAILABLE;
1943 }
1944
1945 if (ShouldAvoidHostCalls())
1946 {
1947 ExecutionManager::ReaderLockHolder rlh(NoHostCalls);
1948 if (!rlh.Acquired())
1949 {
1950 // Couldn't get the info. Try again later
1951 return CORPROF_E_ASYNCHRONOUS_UNSAFE;
1952 }
1953
1954 pCodeInfo->Init((PCODE)ip, ExecutionManager::ScanNoReaderLock);
1955 }
1956 else
1957 {
1958 pCodeInfo->Init((PCODE)ip);
1959 }
1960
1961 if (!pCodeInfo->IsValid())
1962 {
1963 return E_FAIL;
1964 }
1965
1966 return S_OK;
1967}
1968
1969
1970HRESULT GetFunctionFromIPInternal(LPCBYTE ip, EECodeInfo * pCodeInfo, BOOL failOnNoMetadata)
1971{
1972 CONTRACTL
1973 {
1974 NOTHROW;
1975 GC_NOTRIGGER;
1976 MODE_ANY;
1977 EE_THREAD_NOT_REQUIRED;
1978 CAN_TAKE_LOCK;
1979 SO_NOT_MAINLINE;
1980 }
1981 CONTRACTL_END;
1982
1983 _ASSERTE (pCodeInfo != NULL);
1984
1985 HRESULT hr = GetFunctionInfoInternal(ip, pCodeInfo);
1986 if (FAILED(hr))
1987 {
1988 return hr;
1989 }
1990
1991 if (failOnNoMetadata)
1992 {
1993 // never return a method that the user of the profiler API cannot use
1994 if (pCodeInfo->GetMethodDesc()->IsNoMetadata())
1995 {
1996 return E_FAIL;
1997 }
1998 }
1999
2000 return S_OK;
2001}
2002
2003
2004HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP(LPCBYTE ip, FunctionID * pFunctionId)
2005{
2006 CONTRACTL
2007 {
2008 // Yay!
2009 NOTHROW;
2010
2011 // Yay!
2012 GC_NOTRIGGER;
2013
2014 // Yay!
2015 MODE_ANY;
2016
2017 // Yay!
2018 EE_THREAD_NOT_REQUIRED;
2019
2020 // Querying the code manager requires a reader lock. However, see
2021 // code:#DisableLockOnAsyncCalls
2022 DISABLED(CAN_TAKE_LOCK);
2023
2024 // Asynchronous functions can be called at arbitrary times when runtime
2025 // is holding locks that cannot be reentered without causing deadlock.
2026 // This contract detects any attempts to reenter locks held at the time
2027 // this function was called.
2028 CANNOT_RETAKE_LOCK;
2029
2030 SO_NOT_MAINLINE;
2031
2032 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
2033 // host (SQL). Corners will be cut to ensure this is the case
2034 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
2035 }
2036 CONTRACTL_END;
2037
2038 // See code:#DisableLockOnAsyncCalls
2039 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
2040
2041 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
2042 (LF_CORPROF,
2043 LL_INFO1000,
2044 "**PROF: GetFunctionFromIP 0x%p.\n",
2045 ip));
2046
2047 // This call is allowed asynchronously, but the JIT functions take a reader lock.
2048 // So we need to ensure the current thread hasn't been hijacked by a profiler while
2049 // it was holding the writer lock. Checking the ForbidSuspendThread region is a
2050 // sufficient test for this
2051 FAIL_IF_IN_FORBID_SUSPEND_REGION();
2052
2053 HRESULT hr = S_OK;
2054
2055 EECodeInfo codeInfo;
2056
2057 hr = GetFunctionFromIPInternal(ip, &codeInfo, /* failOnNoMetadata */ TRUE);
2058 if (FAILED(hr))
2059 {
2060 return hr;
2061 }
2062
2063 if (pFunctionId)
2064 {
2065 *pFunctionId = MethodDescToFunctionID(codeInfo.GetMethodDesc());
2066 }
2067
2068 return S_OK;
2069}
2070
2071
2072HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP2(LPCBYTE ip, FunctionID * pFunctionId, ReJITID * pReJitId)
2073{
2074 CONTRACTL
2075 {
2076 // Yay!
2077 NOTHROW;
2078
2079 // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
2080 // which can switch us to preemptive mode and trigger GCs
2081 GC_TRIGGERS;
2082
2083 // Yay!
2084 MODE_ANY;
2085
2086 // Yay!
2087 EE_THREAD_NOT_REQUIRED;
2088
2089 // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
2090 CAN_TAKE_LOCK;
2091
2092 SO_NOT_MAINLINE;
2093 }
2094 CONTRACTL_END;
2095
2096 // See code:#DisableLockOnAsyncCalls
2097 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
2098
2099 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
2100 kP2EEAllowableAfterAttach | kP2EETriggers,
2101 (LF_CORPROF,
2102 LL_INFO1000,
2103 "**PROF: GetFunctionFromIP2 0x%p.\n",
2104 ip));
2105
2106 HRESULT hr = S_OK;
2107
2108 EECodeInfo codeInfo;
2109
2110 hr = GetFunctionFromIPInternal(ip, &codeInfo, /* failOnNoMetadata */ TRUE);
2111 if (FAILED(hr))
2112 {
2113 return hr;
2114 }
2115
2116 if (pFunctionId)
2117 {
2118 *pFunctionId = MethodDescToFunctionID(codeInfo.GetMethodDesc());
2119 }
2120
2121 if (pReJitId != NULL)
2122 {
2123 MethodDesc * pMD = codeInfo.GetMethodDesc();
2124 *pReJitId = ReJitManager::GetReJitId(pMD, codeInfo.GetStartAddress());
2125 }
2126
2127 return S_OK;
2128}
2129
2130//*****************************************************************************
2131// Given a function id, retrieve the metadata token and a reader api that
2132// can be used against the token.
2133//*****************************************************************************
2134HRESULT ProfToEEInterfaceImpl::GetTokenAndMetaDataFromFunction(
2135 FunctionID functionId,
2136 REFIID riid,
2137 IUnknown **ppOut,
2138 mdToken *pToken)
2139{
2140 CONTRACTL
2141 {
2142 // Yay!
2143 NOTHROW;
2144
2145 // Yay!
2146 GC_NOTRIGGER;
2147
2148 // Yay!
2149 MODE_ANY;
2150
2151 // Yay!
2152 EE_THREAD_NOT_REQUIRED;
2153
2154 // PEFile::GetRWImporter and GetReadablePublicMetaDataInterface take locks
2155 CAN_TAKE_LOCK;
2156
2157 SO_NOT_MAINLINE;
2158 }
2159 CONTRACTL_END;
2160
2161 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
2162 (LF_CORPROF,
2163 LL_INFO1000,
2164 "**PROF: GetTokenAndMetaDataFromFunction 0x%p.\n",
2165 functionId));
2166
2167 if (functionId == NULL)
2168 {
2169 return E_INVALIDARG;
2170 }
2171
2172 HRESULT hr = S_OK;
2173
2174 MethodDesc *pMD = FunctionIdToMethodDesc(functionId);
2175
2176 // it's not safe to examine a methoddesc that has not been restored so do not do so
2177 if (!pMD->IsRestored())
2178 return CORPROF_E_DATAINCOMPLETE;
2179
2180 if (pToken)
2181 {
2182 *pToken = pMD->GetMemberDef();
2183 }
2184
2185 // don't bother with any of this module fetching if the metadata access isn't requested
2186 if (ppOut)
2187 {
2188 Module * pMod = pMD->GetModule();
2189 hr = pMod->GetReadablePublicMetaDataInterface(ofRead, riid, (LPVOID *) ppOut);
2190 }
2191
2192 return hr;
2193}
2194
2195//---------------------------------------------------------------------------------------
2196// What follows are the GetCodeInfo* APIs and their helpers. The two helpers factor out
2197// some of the common code to validate parameters and then determine the code info from
2198// the start of the code. Each individual GetCodeInfo* API differs in how it uses these
2199// helpers, particuarly in how it determines the start of the code (GetCodeInfo3 needs
2200// to use the rejit manager to determine the code start, whereas the others do not).
2201// Factoring out like this allows us to have statically determined contracts that differ
2202// based on whether we need to use the rejit manager, which requires locking and
2203// may trigger GCs.
2204//---------------------------------------------------------------------------------------
2205
2206
2207HRESULT ValidateParametersForGetCodeInfo(
2208 MethodDesc * pMethodDesc,
2209 ULONG32 cCodeInfos,
2210 COR_PRF_CODE_INFO codeInfos[])
2211{
2212 LIMITED_METHOD_CONTRACT;
2213
2214 if (pMethodDesc == NULL)
2215 {
2216 return E_INVALIDARG;
2217 }
2218
2219 if ((cCodeInfos != 0) && (codeInfos == NULL))
2220 {
2221 return E_INVALIDARG;
2222 }
2223
2224 // it's not safe to examine a methoddesc that has not been restored so do not do so
2225 if (!pMethodDesc->IsRestored())
2226 return CORPROF_E_DATAINCOMPLETE;
2227
2228 if (pMethodDesc->HasClassOrMethodInstantiation() && pMethodDesc->IsTypicalMethodDefinition())
2229 {
2230 // In this case, we used to replace pMethodDesc with its canonical instantiation
2231 // (FindOrCreateTypicalSharedInstantiation). However, a profiler should never be able
2232 // to get to this point anyway, since any MethodDesc a profiler gets from us
2233 // cannot be typical (i.e., cannot be a generic with types still left uninstantiated).
2234 // We assert here just in case a test proves me wrong, but generally we will
2235 // disallow this code path.
2236 _ASSERTE(!"Profiler passed a typical method desc (a generic with types still left uninstantiated) to GetCodeInfo2");
2237 return E_INVALIDARG;
2238 }
2239
2240 return S_OK;
2241}
2242
2243HRESULT GetCodeInfoFromCodeStart(
2244 PCODE start,
2245 ULONG32 cCodeInfos,
2246 ULONG32 * pcCodeInfos,
2247 COR_PRF_CODE_INFO codeInfos[])
2248{
2249 CONTRACTL
2250 {
2251 NOTHROW;
2252 GC_NOTRIGGER;
2253 MODE_ANY;
2254
2255 // We need to take the ExecutionManager reader lock to find the
2256 // appropriate jit manager.
2257 CAN_TAKE_LOCK;
2258
2259 SO_NOT_MAINLINE;
2260
2261 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
2262 // host (SQL). Corners will be cut to ensure this is the case
2263 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
2264 }
2265 CONTRACTL_END;
2266
2267 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
2268
2269 ///////////////////////////////////
2270 // Get the code region info for this function. This is a multi step process.
2271 //
2272 // MethodDesc ==> Code Address ==> JitMananger ==>
2273 // MethodToken ==> MethodRegionInfo
2274 //
2275 // (Our caller handled the first step: MethodDesc ==> Code Address.)
2276 //
2277 // <WIN64-ONLY>
2278 //
2279 // On WIN64 we have a choice of where to go to find out the function address range size:
2280 // GC info (which is what we're doing below on all architectures) or the OS unwind
2281 // info, stored in the RUNTIME_FUNCTION structure. The latter produces
2282 // a SMALLER size than the former, because the latter excludes some data from
2283 // the set we report to the OS for unwind info. For example, switch tables can be
2284 // separated out from the regular code and not be reported as OS unwind info, and thus
2285 // those addresses will not appear in the range reported by the RUNTIME_FUNCTION gotten via:
2286 //
2287 // IJitManager* pJitMan = ExecutionManager::FindJitMan((PBYTE)codeInfos[0].startAddress);
2288 // PRUNTIME_FUNCTION pfe = pJitMan->GetUnwindInfo((PBYTE)codeInfos[0].startAddress);
2289 // *pcCodeInfos = (ULONG) (pfe->EndAddress - pfe->BeginAddress);
2290 //
2291 // (Note that GCInfo & OS unwind info report the same start address--it's the size that's
2292 // different.)
2293 //
2294 // The advantage of using the GC info is that it's available on all architectures,
2295 // and it gives you a more complete picture of the addresses belonging to the function.
2296 //
2297 // A disadvantage of using GC info is we'll report those extra addresses (like switch
2298 // tables) that a profiler might turn back around and use in a call to
2299 // GetFunctionFromIP. A profiler may expect we'd be able to map back any address
2300 // in the function's GetCodeInfo ranges back to that function's FunctionID (methoddesc). But
2301 // querying these extra addresses will cause GetFunctionFromIP to fail, as they're not
2302 // actually valid instruction addresses that the IP register can be set to.
2303 //
2304 // The advantage wins out, so we're going with GC info everywhere.
2305 //
2306 // </WIN64-ONLY>
2307
2308 HRESULT hr;
2309
2310 if (start == NULL)
2311 {
2312 return CORPROF_E_FUNCTION_NOT_COMPILED;
2313 }
2314
2315 EECodeInfo codeInfo;
2316 hr = GetFunctionInfoInternal(
2317 (LPCBYTE) start,
2318 &codeInfo);
2319 if (hr == CORPROF_E_ASYNCHRONOUS_UNSAFE)
2320 {
2321 _ASSERTE(ShouldAvoidHostCalls());
2322 return hr;
2323 }
2324 if (FAILED(hr))
2325 {
2326 return CORPROF_E_FUNCTION_NOT_COMPILED;
2327 }
2328
2329 IJitManager::MethodRegionInfo methodRegionInfo;
2330 codeInfo.GetMethodRegionInfo(&methodRegionInfo);
2331
2332 //
2333 // Fill out the codeInfo structures with valuse from the
2334 // methodRegion
2335 //
2336 // Note that we're assuming that a method will never be split into
2337 // more than two regions ... this is unlikely to change any time in
2338 // the near future.
2339 //
2340 if (NULL != codeInfos)
2341 {
2342 if (cCodeInfos > 0)
2343 {
2344 //
2345 // We have to return the two regions in the order that they would appear
2346 // if straight-line compiled
2347 //
2348 if (PCODEToPINSTR(start) == methodRegionInfo.hotStartAddress)
2349 {
2350 codeInfos[0].startAddress =
2351 (UINT_PTR)methodRegionInfo.hotStartAddress;
2352 codeInfos[0].size = methodRegionInfo.hotSize;
2353 }
2354 else
2355 {
2356 _ASSERTE(methodRegionInfo.coldStartAddress != NULL);
2357 codeInfos[0].startAddress =
2358 (UINT_PTR)methodRegionInfo.coldStartAddress;
2359 codeInfos[0].size = methodRegionInfo.coldSize;
2360 }
2361
2362 if (NULL != methodRegionInfo.coldStartAddress)
2363 {
2364 if (cCodeInfos > 1)
2365 {
2366 if (PCODEToPINSTR(start) == methodRegionInfo.hotStartAddress)
2367 {
2368 codeInfos[1].startAddress =
2369 (UINT_PTR)methodRegionInfo.coldStartAddress;
2370 codeInfos[1].size = methodRegionInfo.coldSize;
2371 }
2372 else
2373 {
2374 codeInfos[1].startAddress =
2375 (UINT_PTR)methodRegionInfo.hotStartAddress;
2376 codeInfos[1].size = methodRegionInfo.hotSize;
2377 }
2378 }
2379 }
2380 }
2381 }
2382
2383 if (NULL != pcCodeInfos)
2384 {
2385 *pcCodeInfos = (NULL != methodRegionInfo.coldStartAddress) ? 2 : 1;
2386 }
2387
2388
2389 return S_OK;
2390}
2391
2392//*****************************************************************************
2393// Gets the location and size of a jitted function
2394//*****************************************************************************
2395
2396HRESULT ProfToEEInterfaceImpl::GetCodeInfo(FunctionID functionId, LPCBYTE * pStart, ULONG * pcSize)
2397{
2398 CONTRACTL
2399 {
2400 // Yay!
2401 NOTHROW;
2402
2403 // Yay!
2404 GC_NOTRIGGER;
2405
2406 // Yay!
2407 MODE_ANY;
2408
2409 // Yay!
2410 EE_THREAD_NOT_REQUIRED;
2411
2412 // (See locking contract comment in GetCodeInfoHelper.)
2413 DISABLED(CAN_TAKE_LOCK);
2414
2415 // (See locking contract comment in GetCodeInfoHelper.)
2416 CANNOT_RETAKE_LOCK;
2417
2418 SO_NOT_MAINLINE;
2419
2420 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
2421 // host (SQL). Corners will be cut to ensure this is the case
2422 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
2423 }
2424 CONTRACTL_END;
2425
2426 // See code:#DisableLockOnAsyncCalls
2427 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
2428
2429 // This is called asynchronously, but GetCodeInfoHelper() will
2430 // ensure we're not called at a dangerous time
2431 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
2432 (LF_CORPROF,
2433 LL_INFO1000,
2434 "**PROF: GetCodeInfo 0x%p.\n",
2435 functionId));
2436
2437 // GetCodeInfo may be called asynchronously, and the JIT functions take a reader
2438 // lock. So we need to ensure the current thread hasn't been hijacked by a profiler while
2439 // it was holding the writer lock. Checking the ForbidSuspendThread region is a sufficient test for this
2440 FAIL_IF_IN_FORBID_SUSPEND_REGION();
2441
2442 if (functionId == 0)
2443 {
2444 return E_INVALIDARG;
2445 }
2446
2447 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
2448
2449 COR_PRF_CODE_INFO codeInfos[2];
2450 ULONG32 cCodeInfos;
2451
2452 HRESULT hr = GetCodeInfoFromCodeStart(
2453 pMethodDesc->GetNativeCode(),
2454 _countof(codeInfos),
2455 &cCodeInfos,
2456 codeInfos);
2457
2458 if ((FAILED(hr)) || (0 == cCodeInfos))
2459 {
2460 return hr;
2461 }
2462
2463 if (NULL != pStart)
2464 {
2465 *pStart = reinterpret_cast< LPCBYTE >(codeInfos[0].startAddress);
2466 }
2467
2468 if (NULL != pcSize)
2469 {
2470 if (!FitsIn<ULONG>(codeInfos[0].size))
2471 {
2472 return E_UNEXPECTED;
2473 }
2474 *pcSize = static_cast<ULONG>(codeInfos[0].size);
2475 }
2476
2477 return hr;
2478}
2479
2480HRESULT ProfToEEInterfaceImpl::GetCodeInfo2(FunctionID functionId,
2481 ULONG32 cCodeInfos,
2482 ULONG32 * pcCodeInfos,
2483 COR_PRF_CODE_INFO codeInfos[])
2484{
2485 CONTRACTL
2486 {
2487 // Yay!
2488 NOTHROW;
2489
2490 // Yay!
2491 GC_NOTRIGGER;
2492
2493 // Yay!
2494 MODE_ANY;
2495
2496 // Yay!
2497 EE_THREAD_NOT_REQUIRED;
2498
2499 // (See locking contract comment in GetCodeInfoHelper.)
2500 DISABLED(CAN_TAKE_LOCK);
2501
2502 // (See locking contract comment in GetCodeInfoHelper.)
2503 CANNOT_RETAKE_LOCK;
2504
2505 SO_NOT_MAINLINE;
2506
2507 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
2508 // host (SQL). Corners will be cut to ensure this is the case
2509 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
2510
2511 PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
2512 PRECONDITION(CheckPointer(codeInfos, NULL_OK));
2513 }
2514 CONTRACTL_END;
2515
2516 // See code:#DisableLockOnAsyncCalls
2517 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
2518
2519 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
2520 (LF_CORPROF,
2521 LL_INFO1000,
2522 "**PROF: GetCodeInfo2 0x%p.\n",
2523 functionId));
2524
2525 HRESULT hr = S_OK;
2526
2527 EX_TRY
2528 {
2529 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
2530
2531 hr = ValidateParametersForGetCodeInfo(pMethodDesc, cCodeInfos, codeInfos);
2532 if (SUCCEEDED(hr))
2533 {
2534 hr = GetCodeInfoFromCodeStart(
2535 pMethodDesc->GetNativeCode(),
2536 cCodeInfos,
2537 pcCodeInfos,
2538 codeInfos);
2539 }
2540 }
2541 EX_CATCH_HRESULT(hr);
2542
2543 return hr;
2544}
2545
2546
2547HRESULT ProfToEEInterfaceImpl::GetCodeInfo3(FunctionID functionId,
2548 ReJITID reJitId,
2549 ULONG32 cCodeInfos,
2550 ULONG32* pcCodeInfos,
2551 COR_PRF_CODE_INFO codeInfos[])
2552
2553
2554{
2555 CONTRACTL
2556 {
2557 // Yay!
2558 NOTHROW;
2559
2560 // We need to access the rejitmanager, which means taking locks, which means we
2561 // may trigger a GC
2562 GC_TRIGGERS;
2563
2564 // Yay!
2565 MODE_ANY;
2566
2567 // Yay!
2568 EE_THREAD_NOT_REQUIRED;
2569
2570 // We need to access the rejitmanager, which means taking locks
2571 CAN_TAKE_LOCK;
2572
2573 SO_NOT_MAINLINE;
2574
2575 PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
2576 PRECONDITION(CheckPointer(codeInfos, NULL_OK));
2577 }
2578 CONTRACTL_END;
2579
2580 // See code:#DisableLockOnAsyncCalls
2581 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
2582
2583 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
2584 kP2EEAllowableAfterAttach | kP2EETriggers,
2585 (LF_CORPROF,
2586 LL_INFO1000,
2587 "**PROF: GetCodeInfo3 0x%p 0x%p.\n",
2588 functionId, reJitId));
2589
2590 HRESULT hr = S_OK;
2591
2592 EX_TRY
2593 {
2594 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
2595
2596 hr = ValidateParametersForGetCodeInfo(pMethodDesc, cCodeInfos, codeInfos);
2597 if (SUCCEEDED(hr))
2598 {
2599 PCODE pCodeStart = NULL;
2600 CodeVersionManager* pCodeVersionManager = pMethodDesc->GetCodeVersionManager();
2601 {
2602 CodeVersionManager::TableLockHolder lockHolder(pCodeVersionManager);
2603
2604 ILCodeVersion ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMethodDesc, reJitId);
2605
2606 NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMethodDesc);
2607 for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
2608 {
2609 // Now that tiered compilation can create more than one jitted code version for the same rejit id
2610 // we are arbitrarily choosing the first one to return. To address a specific version of native code
2611 // use GetCodeInfo4.
2612 pCodeStart = iter->GetNativeCode();
2613 break;
2614 }
2615 }
2616
2617 hr = GetCodeInfoFromCodeStart(pCodeStart,
2618 cCodeInfos,
2619 pcCodeInfos,
2620 codeInfos);
2621 }
2622 }
2623 EX_CATCH_HRESULT(hr);
2624
2625 return hr;
2626}
2627
2628
2629HRESULT ProfToEEInterfaceImpl::GetEventMask(DWORD * pdwEvents)
2630{
2631 CONTRACTL
2632 {
2633 // Yay!
2634 NOTHROW;
2635
2636 // Yay!
2637 GC_NOTRIGGER;
2638
2639 // Yay!
2640 MODE_ANY;
2641
2642 // Yay!
2643 EE_THREAD_NOT_REQUIRED;
2644
2645 // Yay!
2646 CANNOT_TAKE_LOCK;
2647
2648 SO_NOT_MAINLINE;
2649 }
2650 CONTRACTL_END;
2651
2652
2653 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
2654 (LF_CORPROF,
2655 LL_INFO10,
2656 "**PROF: GetEventMask.\n"));
2657
2658 if (pdwEvents == NULL)
2659 {
2660 return E_INVALIDARG;
2661 }
2662
2663 *pdwEvents = g_profControlBlock.dwEventMask;
2664 return S_OK;
2665}
2666
2667HRESULT ProfToEEInterfaceImpl::GetEventMask2(DWORD *pdwEventsLow, DWORD *pdwEventsHigh)
2668{
2669 CONTRACTL
2670 {
2671 // Yay!
2672 NOTHROW;
2673
2674 // Yay!
2675 GC_NOTRIGGER;
2676
2677 // Yay!
2678 MODE_ANY;
2679
2680 // Yay!
2681 EE_THREAD_NOT_REQUIRED;
2682
2683 // Yay!
2684 CANNOT_TAKE_LOCK;
2685
2686 SO_NOT_MAINLINE;
2687 }
2688 CONTRACTL_END;
2689
2690
2691 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
2692 (LF_CORPROF,
2693 LL_INFO10,
2694 "**PROF: GetEventMask2.\n"));
2695
2696 if ((pdwEventsLow == NULL) || (pdwEventsHigh == NULL))
2697 {
2698 return E_INVALIDARG;
2699 }
2700
2701 *pdwEventsLow = g_profControlBlock.dwEventMask;
2702 *pdwEventsHigh = g_profControlBlock.dwEventMaskHigh;
2703 return S_OK;
2704}
2705
2706// static
2707void ProfToEEInterfaceImpl::MethodTableCallback(void* context, void* objectUNSAFE)
2708{
2709 CONTRACTL
2710 {
2711 NOTHROW;
2712 GC_NOTRIGGER;
2713 SO_NOT_MAINLINE;
2714 MODE_ANY;
2715 }
2716 CONTRACTL_END;
2717
2718 // each callback identifies the address of a method table within the frozen object segment
2719 // that pointer is an object ID by definition -- object references point to the method table
2720 CDynArray< ObjectID >* objects = reinterpret_cast< CDynArray< ObjectID >* >(context);
2721
2722 *objects->Append() = reinterpret_cast< ObjectID >(objectUNSAFE);
2723}
2724
2725// static
2726void ProfToEEInterfaceImpl::ObjectRefCallback(void* context, void* objectUNSAFE)
2727{
2728 // we don't care about embedded object references, ignore them
2729}
2730
2731
2732HRESULT ProfToEEInterfaceImpl::EnumModuleFrozenObjects(ModuleID moduleID,
2733 ICorProfilerObjectEnum** ppEnum)
2734{
2735 CONTRACTL
2736 {
2737 // Yay!
2738 NOTHROW;
2739
2740 // Yay!
2741 GC_NOTRIGGER;
2742
2743 // Yay!
2744 MODE_ANY;
2745
2746 // Yay!
2747 EE_THREAD_NOT_REQUIRED;
2748
2749 // Yay!
2750 CANNOT_TAKE_LOCK;
2751
2752 SO_NOT_MAINLINE;
2753 }
2754 CONTRACTL_END;
2755
2756 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
2757 (LF_CORPROF,
2758 LL_INFO1000,
2759 "**PROF: EnumModuleFrozenObjects 0x%p.\n",
2760 moduleID));
2761
2762 if (NULL == ppEnum)
2763 {
2764 return E_INVALIDARG;
2765 }
2766
2767 Module* pModule = reinterpret_cast< Module* >(moduleID);
2768 if (pModule == NULL || pModule->IsBeingUnloaded())
2769 {
2770 return CORPROF_E_DATAINCOMPLETE;
2771 }
2772
2773 HRESULT hr = S_OK;
2774
2775 EX_TRY
2776 {
2777 // If we don't support frozen objects at all, then just return empty
2778 // enumerator.
2779 *ppEnum = new ProfilerObjectEnum();
2780 }
2781 EX_CATCH_HRESULT(hr);
2782
2783 return hr;
2784}
2785
2786
2787
2788/*
2789 * GetArrayObjectInfo
2790 *
2791 * This function returns informatin about array objects. In particular, the dimensions
2792 * and where the data buffer is stored.
2793 *
2794 */
2795HRESULT ProfToEEInterfaceImpl::GetArrayObjectInfo(ObjectID objectId,
2796 ULONG32 cDimensionSizes,
2797 ULONG32 pDimensionSizes[],
2798 int pDimensionLowerBounds[],
2799 BYTE **ppData)
2800{
2801 CONTRACTL
2802 {
2803 // Yay!
2804 NOTHROW;
2805
2806 // Yay!
2807 GC_NOTRIGGER;
2808
2809 // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
2810 MODE_ANY;
2811
2812 // Yay!
2813 CANNOT_TAKE_LOCK;
2814
2815 SO_NOT_MAINLINE;
2816 }
2817 CONTRACTL_END;
2818
2819 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
2820 (LF_CORPROF,
2821 LL_INFO1000,
2822 "**PROF: GetArrayObjectInfo 0x%p.\n",
2823 objectId));
2824
2825 if (objectId == NULL)
2826 {
2827 return E_INVALIDARG;
2828 }
2829
2830 if ((pDimensionSizes == NULL) ||
2831 (pDimensionLowerBounds == NULL) ||
2832 (ppData == NULL))
2833 {
2834 return E_INVALIDARG;
2835 }
2836
2837 HRESULT hr = AllowObjectInspection();
2838 if (FAILED(hr))
2839 {
2840 return hr;
2841 }
2842
2843 Object * pObj = reinterpret_cast<Object *>(objectId);
2844
2845 // GC callbacks may come from a non-EE thread, which is considered permanently preemptive.
2846 // We are about calling some object inspection functions, which require to be in co-op mode.
2847 // Given that none managed objects can be changed by managed code until GC resumes the
2848 // runtime, it is safe to violate the mode contract and to inspect managed objects from a
2849 // non-EE thread when GetArrayObjectInfo is called within GC callbacks.
2850 if (NativeThreadInGC())
2851 {
2852 CONTRACT_VIOLATION(ModeViolation);
2853 return GetArrayObjectInfoHelper(pObj, cDimensionSizes, pDimensionSizes, pDimensionLowerBounds, ppData);
2854 }
2855
2856 return GetArrayObjectInfoHelper(pObj, cDimensionSizes, pDimensionSizes, pDimensionLowerBounds, ppData);
2857}
2858
2859HRESULT ProfToEEInterfaceImpl::GetArrayObjectInfoHelper(Object * pObj,
2860 ULONG32 cDimensionSizes,
2861 __out_ecount(cDimensionSizes) ULONG32 pDimensionSizes[],
2862 __out_ecount(cDimensionSizes) int pDimensionLowerBounds[],
2863 BYTE **ppData)
2864{
2865 CONTRACTL
2866 {
2867 // Yay!
2868 NOTHROW;
2869
2870 // Yay!
2871 GC_NOTRIGGER;
2872
2873 // Because of the object pointer parameter, we must be either in CO-OP mode,
2874 // or on a non-EE thread in the process of doing a GC .
2875 if (!NativeThreadInGC()) { MODE_COOPERATIVE; }
2876
2877 // Yay!
2878 CANNOT_TAKE_LOCK;
2879
2880 SO_NOT_MAINLINE;
2881 }
2882 CONTRACTL_END;
2883
2884 // Must have an array.
2885 MethodTable * pMT = pObj->GetMethodTable();
2886 if (!pMT->IsArray())
2887 {
2888 return E_INVALIDARG;
2889 }
2890
2891 ArrayBase * pArray = static_cast<ArrayBase*> (pObj);
2892
2893 unsigned rank = pArray->GetRank();
2894
2895 if (cDimensionSizes < rank)
2896 {
2897 return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
2898 }
2899
2900 // Copy range for each dimension (rank)
2901 int * pBounds = pArray->GetBoundsPtr();
2902 int * pLowerBounds = pArray->GetLowerBoundsPtr();
2903
2904 unsigned i;
2905 for(i = 0; i < rank; i++)
2906 {
2907 pDimensionSizes[i] = pBounds[i];
2908 pDimensionLowerBounds[i] = pLowerBounds[i];
2909 }
2910
2911 // Pointer to data.
2912 *ppData = pArray->GetDataPtr();
2913
2914 return S_OK;
2915}
2916
2917/*
2918 * GetBoxClassLayout
2919 *
2920 * Returns information about how a particular value type is laid out.
2921 *
2922 */
2923HRESULT ProfToEEInterfaceImpl::GetBoxClassLayout(ClassID classId,
2924 ULONG32 *pBufferOffset)
2925{
2926 CONTRACTL
2927 {
2928 // Yay!
2929 NOTHROW;
2930
2931 // Yay!
2932 GC_NOTRIGGER;
2933
2934 // Yay!
2935 MODE_ANY;
2936
2937 // Yay!
2938 EE_THREAD_NOT_REQUIRED;
2939
2940 // Yay!
2941 CANNOT_TAKE_LOCK;
2942
2943 SO_NOT_MAINLINE;
2944 }
2945 CONTRACTL_END;
2946
2947 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
2948 (LF_CORPROF,
2949 LL_INFO1000,
2950 "**PROF: GetBoxClassLayout 0x%p.\n",
2951 classId));
2952
2953 if (pBufferOffset == NULL)
2954 {
2955 return E_INVALIDARG;
2956 }
2957
2958 if (classId == NULL)
2959 {
2960 return E_INVALIDARG;
2961 }
2962
2963 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
2964
2965 //
2966 // This is the incorrect API for arrays. Use GetArrayInfo and GetArrayLayout.
2967 //
2968 if (!typeHandle.IsValueType())
2969 {
2970 return E_INVALIDARG;
2971 }
2972
2973 *pBufferOffset = Object::GetOffsetOfFirstField();
2974
2975 return S_OK;
2976}
2977
2978/*
2979 * GetThreadAppDomain
2980 *
2981 * Returns the app domain currently associated with the given thread.
2982 *
2983 */
2984HRESULT ProfToEEInterfaceImpl::GetThreadAppDomain(ThreadID threadId,
2985 AppDomainID *pAppDomainId)
2986
2987{
2988 CONTRACTL
2989 {
2990 // Yay!
2991 NOTHROW;
2992
2993 // Yay!
2994 GC_NOTRIGGER;
2995
2996 // Yay!
2997 MODE_ANY;
2998
2999 // Yay!
3000 EE_THREAD_NOT_REQUIRED;
3001
3002 // Yay!
3003 CANNOT_TAKE_LOCK;
3004
3005 SO_NOT_MAINLINE;
3006 }
3007 CONTRACTL_END;
3008
3009 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
3010 (LF_CORPROF,
3011 LL_INFO1000,
3012 "**PROF: GetThreadAppDomain 0x%p.\n",
3013 threadId));
3014
3015 if (pAppDomainId == NULL)
3016 {
3017 return E_INVALIDARG;
3018 }
3019
3020 Thread *pThread;
3021
3022 if (threadId == NULL)
3023 {
3024 pThread = GetThreadNULLOk();
3025 }
3026 else
3027 {
3028 pThread = (Thread *)threadId;
3029 }
3030
3031 //
3032 // If pThread is null, then the thread has never run managed code and
3033 // so has no ThreadID.
3034 //
3035 if (!IsManagedThread(pThread))
3036 {
3037 return CORPROF_E_NOT_MANAGED_THREAD;
3038 }
3039
3040 *pAppDomainId = (AppDomainID)pThread->GetDomain();
3041
3042 return S_OK;
3043}
3044
3045
3046/*
3047 * GetRVAStaticAddress
3048 *
3049 * This function returns the absolute address of the given field in the given
3050 * class. The field must be an RVA Static token.
3051 *
3052 * Parameters:
3053 * classId - the containing class.
3054 * fieldToken - the field we are querying.
3055 * pAddress - location for storing the resulting address location.
3056 *
3057 * Returns:
3058 * S_OK on success,
3059 * E_INVALIDARG if not an RVA static,
3060 * CORPROF_E_DATAINCOMPLETE if not yet initialized.
3061 *
3062 */
3063HRESULT ProfToEEInterfaceImpl::GetRVAStaticAddress(ClassID classId,
3064 mdFieldDef fieldToken,
3065 void **ppAddress)
3066{
3067 CONTRACTL
3068 {
3069 // Yay!
3070 NOTHROW;
3071
3072 // Yay!
3073 GC_NOTRIGGER;
3074
3075 // Yay!
3076 MODE_ANY;
3077
3078 // FieldDesc::GetStaticAddress takes a lock
3079 CAN_TAKE_LOCK;
3080
3081 SO_NOT_MAINLINE;
3082 }
3083 CONTRACTL_END;
3084
3085 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
3086 (LF_CORPROF,
3087 LL_INFO1000,
3088 "**PROF: GetRVAStaticAddress 0x%p, 0x%08x.\n",
3089 classId,
3090 fieldToken));
3091
3092 //
3093 // Check for NULL parameters
3094 //
3095 if ((classId == NULL) || (ppAddress == NULL))
3096 {
3097 return E_INVALIDARG;
3098 }
3099
3100 if (GetThread() == NULL)
3101 {
3102 return CORPROF_E_NOT_MANAGED_THREAD;
3103 }
3104
3105 if (GetAppDomain() == NULL)
3106 {
3107 return E_FAIL;
3108 }
3109
3110 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
3111
3112 //
3113 // If this class is not fully restored, that is all the information we can get at this time.
3114 //
3115 if (!typeHandle.IsRestored())
3116 {
3117 return CORPROF_E_DATAINCOMPLETE;
3118 }
3119
3120 //
3121 // Get the field descriptor object
3122 //
3123 FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
3124
3125 if (pFieldDesc == NULL)
3126 {
3127 return E_INVALIDARG;
3128 }
3129
3130 //
3131 // Verify this field is of the right type
3132 //
3133 if(!pFieldDesc->IsStatic() ||
3134 !pFieldDesc->IsRVA() ||
3135 pFieldDesc->IsThreadStatic())
3136 {
3137 return E_INVALIDARG;
3138 }
3139
3140 // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
3141 // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
3142 MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
3143
3144 //
3145 // Check that the data is available
3146 //
3147 if (!IsClassOfMethodTableInited(pMethodTable, GetAppDomain()))
3148 {
3149 return CORPROF_E_DATAINCOMPLETE;
3150 }
3151
3152 //
3153 // Store the result and return
3154 //
3155 PTR_VOID pAddress = pFieldDesc->GetStaticAddress(NULL);
3156 if (pAddress == NULL)
3157 {
3158 return CORPROF_E_DATAINCOMPLETE;
3159 }
3160
3161 *ppAddress = pAddress;
3162
3163 return S_OK;
3164}
3165
3166
3167/*
3168 * GetAppDomainStaticAddress
3169 *
3170 * This function returns the absolute address of the given field in the given
3171 * class in the given app domain. The field must be an App Domain Static token.
3172 *
3173 * Parameters:
3174 * classId - the containing class.
3175 * fieldToken - the field we are querying.
3176 * appDomainId - the app domain container.
3177 * pAddress - location for storing the resulting address location.
3178 *
3179 * Returns:
3180 * S_OK on success,
3181 * E_INVALIDARG if not an app domain static,
3182 * CORPROF_E_DATAINCOMPLETE if not yet initialized.
3183 *
3184 */
3185HRESULT ProfToEEInterfaceImpl::GetAppDomainStaticAddress(ClassID classId,
3186 mdFieldDef fieldToken,
3187 AppDomainID appDomainId,
3188 void **ppAddress)
3189{
3190 CONTRACTL
3191 {
3192 // Yay!
3193 NOTHROW;
3194
3195 // Yay!
3196 GC_NOTRIGGER;
3197
3198 // Yay!
3199 MODE_ANY;
3200
3201 // Yay!
3202 EE_THREAD_NOT_REQUIRED;
3203
3204 // FieldDesc::GetStaticAddress & FieldDesc::GetBaseInDomain take locks
3205 CAN_TAKE_LOCK;
3206
3207 SO_NOT_MAINLINE;
3208 }
3209 CONTRACTL_END;
3210
3211 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
3212 (LF_CORPROF,
3213 LL_INFO1000,
3214 "**PROF: GetAppDomainStaticAddress 0x%p, 0x%08x, 0x%p.\n",
3215 classId,
3216 fieldToken,
3217 appDomainId));
3218
3219 //
3220 // Check for NULL parameters
3221 //
3222 if ((classId == NULL) || (appDomainId == NULL) || (ppAddress == NULL))
3223 {
3224 return E_INVALIDARG;
3225 }
3226
3227 // Some domains, like the system domain, aren't APP domains, and thus don't contain any
3228 // statics. See if the profiler is trying to be naughty.
3229 if (!((BaseDomain*) appDomainId)->IsAppDomain())
3230 {
3231 return E_INVALIDARG;
3232 }
3233
3234 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
3235
3236 //
3237 // If this class is not fully restored, that is all the information we can get at this time.
3238 //
3239 if (!typeHandle.IsRestored())
3240 {
3241 return CORPROF_E_DATAINCOMPLETE;
3242 }
3243
3244 //
3245 // Get the field descriptor object
3246 //
3247 FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
3248
3249 if (pFieldDesc == NULL)
3250 {
3251 //
3252 // Give specific error code for literals.
3253 //
3254 DWORD dwFieldAttrs;
3255 if (FAILED(typeHandle.GetModule()->GetMDImport()->GetFieldDefProps(fieldToken, &dwFieldAttrs)))
3256 {
3257 return E_INVALIDARG;
3258 }
3259
3260 if (IsFdLiteral(dwFieldAttrs))
3261 {
3262 return CORPROF_E_LITERALS_HAVE_NO_ADDRESS;
3263 }
3264
3265 return E_INVALIDARG;
3266 }
3267
3268 //
3269 // Verify this field is of the right type
3270 //
3271 if(!pFieldDesc->IsStatic() ||
3272 pFieldDesc->IsRVA() ||
3273 pFieldDesc->IsThreadStatic())
3274 {
3275 return E_INVALIDARG;
3276 }
3277
3278 // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
3279 // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
3280 MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
3281 AppDomain * pAppDomain = (AppDomain *)appDomainId;
3282
3283 //
3284 // Check that the data is available
3285 //
3286 if (!IsClassOfMethodTableInited(pMethodTable, pAppDomain))
3287 {
3288 return CORPROF_E_DATAINCOMPLETE;
3289 }
3290
3291 //
3292 // Get the address
3293 //
3294 void *base = (void*)pFieldDesc->GetBaseInDomain(pAppDomain);
3295
3296 if (base == NULL)
3297 {
3298 return CORPROF_E_DATAINCOMPLETE;
3299 }
3300
3301 //
3302 // Store the result and return
3303 //
3304 PTR_VOID pAddress = pFieldDesc->GetStaticAddress(base);
3305 if (pAddress == NULL)
3306 {
3307 return E_INVALIDARG;
3308 }
3309
3310 *ppAddress = pAddress;
3311
3312 return S_OK;
3313}
3314
3315/*
3316 * GetThreadStaticAddress
3317 *
3318 * This function returns the absolute address of the given field in the given
3319 * class on the given thread. The field must be an Thread Static token. threadId
3320 * must be the current thread ID or NULL, which means using curernt thread ID.
3321 *
3322 * Parameters:
3323 * classId - the containing class.
3324 * fieldToken - the field we are querying.
3325 * threadId - the thread container, which has to be the current managed thread or
3326 * NULL, which means to use the current managed thread.
3327 * pAddress - location for storing the resulting address location.
3328 *
3329 * Returns:
3330 * S_OK on success,
3331 * E_INVALIDARG if not a thread static,
3332 * CORPROF_E_DATAINCOMPLETE if not yet initialized.
3333 *
3334 */
3335HRESULT ProfToEEInterfaceImpl::GetThreadStaticAddress(ClassID classId,
3336 mdFieldDef fieldToken,
3337 ThreadID threadId,
3338 void **ppAddress)
3339{
3340 CONTRACTL
3341 {
3342 // Yay!
3343 NOTHROW;
3344
3345 // Yay!
3346 GC_NOTRIGGER;
3347
3348 // Yay!
3349 MODE_ANY;
3350
3351 // Yay!
3352 CANNOT_TAKE_LOCK;
3353
3354 SO_NOT_MAINLINE;
3355 }
3356 CONTRACTL_END;
3357
3358 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
3359 (LF_CORPROF,
3360 LL_INFO1000,
3361 "**PROF: GetThreadStaticAddress 0x%p, 0x%08x, 0x%p.\n",
3362 classId,
3363 fieldToken,
3364 threadId));
3365
3366 //
3367 // Verify the value of threadId, which must be the current thread ID or NULL, which means using curernt thread ID.
3368 //
3369 if ((threadId != NULL) && (threadId != ((ThreadID)GetThread())))
3370 {
3371 return E_INVALIDARG;
3372 }
3373
3374 threadId = reinterpret_cast<ThreadID>(GetThread());
3375 AppDomainID appDomainId = reinterpret_cast<AppDomainID>(GetAppDomain());
3376
3377 //
3378 // Check for NULL parameters
3379 //
3380 if ((classId == NULL) || (ppAddress == NULL) || !IsManagedThread(threadId) || (appDomainId == NULL))
3381 {
3382 return E_INVALIDARG;
3383 }
3384
3385 return GetThreadStaticAddress2(classId,
3386 fieldToken,
3387 appDomainId,
3388 threadId,
3389 ppAddress);
3390}
3391
3392/*
3393 * GetThreadStaticAddress2
3394 *
3395 * This function returns the absolute address of the given field in the given
3396 * class on the given thread. The field must be an Thread Static token.
3397 *
3398 * Parameters:
3399 * classId - the containing class.
3400 * fieldToken - the field we are querying.
3401 * appDomainId - the AppDomain container.
3402 * threadId - the thread container.
3403 * pAddress - location for storing the resulting address location.
3404 *
3405 * Returns:
3406 * S_OK on success,
3407 * E_INVALIDARG if not a thread static,
3408 * CORPROF_E_DATAINCOMPLETE if not yet initialized.
3409 *
3410 */
3411HRESULT ProfToEEInterfaceImpl::GetThreadStaticAddress2(ClassID classId,
3412 mdFieldDef fieldToken,
3413 AppDomainID appDomainId,
3414 ThreadID threadId,
3415 void **ppAddress)
3416{
3417 CONTRACTL
3418 {
3419 // Yay!
3420 NOTHROW;
3421
3422 // Yay!
3423 GC_NOTRIGGER;
3424
3425 // Yay!
3426 MODE_ANY;
3427
3428 // Yay!
3429 CANNOT_TAKE_LOCK;
3430
3431 SO_NOT_MAINLINE;
3432 }
3433 CONTRACTL_END;
3434
3435 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
3436 (LF_CORPROF,
3437 LL_INFO1000,
3438 "**PROF: GetThreadStaticAddress2 0x%p, 0x%08x, 0x%p, 0x%p.\n",
3439 classId,
3440 fieldToken,
3441 appDomainId,
3442 threadId));
3443
3444
3445 if (threadId == NULL)
3446 {
3447 if (GetThread() == NULL)
3448 {
3449 return CORPROF_E_NOT_MANAGED_THREAD;
3450 }
3451
3452 threadId = reinterpret_cast<ThreadID>(GetThread());
3453 }
3454
3455 //
3456 // Check for NULL parameters
3457 //
3458 if ((classId == NULL) || (ppAddress == NULL) || !IsManagedThread(threadId) || (appDomainId == NULL))
3459 {
3460 return E_INVALIDARG;
3461 }
3462
3463 // Some domains, like the system domain, aren't APP domains, and thus don't contain any
3464 // statics. See if the profiler is trying to be naughty.
3465 if (!((BaseDomain*) appDomainId)->IsAppDomain())
3466 {
3467 return E_INVALIDARG;
3468 }
3469
3470 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
3471
3472 //
3473 // If this class is not fully restored, that is all the information we can get at this time.
3474 //
3475 if (!typeHandle.IsRestored())
3476 {
3477 return CORPROF_E_DATAINCOMPLETE;
3478 }
3479
3480 //
3481 // Get the field descriptor object
3482 //
3483 FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
3484
3485 if (pFieldDesc == NULL)
3486 {
3487 return E_INVALIDARG;
3488 }
3489
3490 //
3491 // Verify this field is of the right type
3492 //
3493 if(!pFieldDesc->IsStatic() ||
3494 !pFieldDesc->IsThreadStatic() ||
3495 pFieldDesc->IsRVA())
3496 {
3497 return E_INVALIDARG;
3498 }
3499
3500 // It may seem redundant to try to retrieve the same method table from GetEnclosingMethodTable, but classId
3501 // leads to the instantiated method table while GetEnclosingMethodTable returns the uninstantiated one.
3502 MethodTable *pMethodTable = pFieldDesc->GetEnclosingMethodTable();
3503 AppDomain * pAppDomain = (AppDomain *)appDomainId;
3504
3505 //
3506 // Check that the data is available
3507 //
3508 if (!IsClassOfMethodTableInited(pMethodTable, pAppDomain))
3509 {
3510 return CORPROF_E_DATAINCOMPLETE;
3511 }
3512
3513 //
3514 // Store the result and return
3515 //
3516 PTR_VOID pAddress = (void *)(((Thread *)threadId)->GetStaticFieldAddrNoCreate(pFieldDesc));
3517 if (pAddress == NULL)
3518 {
3519 return E_INVALIDARG;
3520 }
3521
3522 *ppAddress = pAddress;
3523
3524 return S_OK;
3525}
3526
3527/*
3528 * GetContextStaticAddress
3529 *
3530 * This function returns the absolute address of the given field in the given
3531 * class in the given context. The field must be an Context Static token.
3532 *
3533 * Parameters:
3534 * classId - the containing class.
3535 * fieldToken - the field we are querying.
3536 * contextId - the context container.
3537 * pAddress - location for storing the resulting address location.
3538 *
3539 * Returns:
3540 * E_NOTIMPL
3541 *
3542 */
3543HRESULT ProfToEEInterfaceImpl::GetContextStaticAddress(ClassID classId,
3544 mdFieldDef fieldToken,
3545 ContextID contextId,
3546 void **ppAddress)
3547{
3548 CONTRACTL
3549 {
3550 // Yay!
3551 NOTHROW;
3552
3553 // Yay!
3554 GC_NOTRIGGER;
3555
3556 // Yay!
3557 MODE_ANY;
3558
3559 // Yay!
3560 CANNOT_TAKE_LOCK;
3561
3562 SO_NOT_MAINLINE;
3563 }
3564 CONTRACTL_END;
3565
3566 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
3567 (LF_CORPROF,
3568 LL_INFO1000,
3569 "**PROF: GetContextStaticAddress 0x%p, 0x%08x, 0x%p.\n",
3570 classId,
3571 fieldToken,
3572 contextId));
3573
3574 return E_NOTIMPL;
3575}
3576
3577/*
3578 * GetAppDomainsContainingModule
3579 *
3580 * This function returns the AppDomains in which the given module has been loaded
3581 *
3582 * Parameters:
3583 * moduleId - the module with static variables.
3584 * cAppDomainIds - the input size of appDomainIds array.
3585 * pcAppDomainIds - the output size of appDomainIds array.
3586 * appDomainIds - the array to be filled up with AppDomainIDs containing initialized
3587 * static variables from the moduleId's moudle.
3588 *
3589 * Returns:
3590 * S_OK on success,
3591 * E_INVALIDARG for invalid parameters,
3592 * CORPROF_E_DATAINCOMPLETE if moduleId's module is not yet initialized.
3593 *
3594 */
3595HRESULT ProfToEEInterfaceImpl::GetAppDomainsContainingModule(ModuleID moduleId,
3596 ULONG32 cAppDomainIds,
3597 ULONG32 * pcAppDomainIds,
3598 AppDomainID appDomainIds[])
3599{
3600 CONTRACTL
3601 {
3602 // Yay!
3603 NOTHROW;
3604
3605 // This method iterates over AppDomains, which adds, then releases, a reference on
3606 // each AppDomain iterated. This causes locking, and can cause triggering if the
3607 // AppDomain gets destroyed as a result of the release. (See code:AppDomainIterator::Next
3608 // and its call to code:AppDomain::Release.)
3609 GC_TRIGGERS;
3610
3611 // Yay!
3612 MODE_ANY;
3613
3614 // (See comment above GC_TRIGGERS.)
3615 CAN_TAKE_LOCK;
3616
3617 SO_NOT_MAINLINE;
3618 }
3619 CONTRACTL_END;
3620
3621 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
3622 kP2EEAllowableAfterAttach | kP2EETriggers,
3623 (LF_CORPROF,
3624 LL_INFO1000,
3625 "**PROF: GetAppDomainsContainingModule 0x%p, 0x%08x, 0x%p, 0x%p.\n",
3626 moduleId,
3627 cAppDomainIds,
3628 pcAppDomainIds,
3629 appDomainIds));
3630
3631
3632 //
3633 // Check for NULL parameters
3634 //
3635 if ((moduleId == NULL) || ((appDomainIds == NULL) && (cAppDomainIds != 0)) || (pcAppDomainIds == NULL))
3636 {
3637 return E_INVALIDARG;
3638 }
3639
3640 Module* pModule = reinterpret_cast< Module* >(moduleId);
3641 if (pModule->IsBeingUnloaded())
3642 {
3643 return CORPROF_E_DATAINCOMPLETE;
3644 }
3645
3646 // IterateAppDomainContainingModule uses AppDomainIterator, which cannot be called while the current thread
3647 // is holding the ThreadStore lock.
3648 if (ThreadStore::HoldingThreadStore())
3649 {
3650 return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
3651 }
3652
3653 IterateAppDomainContainingModule iterateAppDomainContainingModule(pModule, cAppDomainIds, pcAppDomainIds, appDomainIds);
3654
3655 return iterateAppDomainContainingModule.PopulateArray();
3656}
3657
3658
3659
3660/*
3661 * GetStaticFieldInfo
3662 *
3663 * This function returns a bit mask of the type of statics the
3664 * given field is.
3665 *
3666 * Parameters:
3667 * classId - the containing class.
3668 * fieldToken - the field we are querying.
3669 * pFieldInfo - location for storing the resulting bit mask.
3670 *
3671 * Returns:
3672 * S_OK on success,
3673 * E_INVALIDARG if pFieldInfo is NULL
3674 *
3675 */
3676HRESULT ProfToEEInterfaceImpl::GetStaticFieldInfo(ClassID classId,
3677 mdFieldDef fieldToken,
3678 COR_PRF_STATIC_TYPE *pFieldInfo)
3679{
3680 CONTRACTL
3681 {
3682 // Yay!
3683 NOTHROW;
3684
3685 // Yay!
3686 GC_NOTRIGGER;
3687
3688 // Yay!
3689 MODE_ANY;
3690
3691 // Yay!
3692 EE_THREAD_NOT_REQUIRED;
3693
3694 // Yay!
3695 CANNOT_TAKE_LOCK;
3696
3697 SO_NOT_MAINLINE;
3698 }
3699 CONTRACTL_END;
3700
3701 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
3702 (LF_CORPROF,
3703 LL_INFO1000,
3704 "**PROF: GetStaticFieldInfo 0x%p, 0x%08x.\n",
3705 classId,
3706 fieldToken));
3707
3708 //
3709 // Check for NULL parameters
3710 //
3711 if ((classId == NULL) || (pFieldInfo == NULL))
3712 {
3713 return E_INVALIDARG;
3714 }
3715
3716 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
3717
3718 //
3719 // If this class is not fully restored, that is all the information we can get at this time.
3720 //
3721 if (!typeHandle.IsRestored())
3722 {
3723 return CORPROF_E_DATAINCOMPLETE;
3724 }
3725
3726 //
3727 // Get the field descriptor object
3728 //
3729 FieldDesc *pFieldDesc = typeHandle.GetModule()->LookupFieldDef(fieldToken);
3730
3731 if (pFieldDesc == NULL)
3732 {
3733 return E_INVALIDARG;
3734 }
3735
3736 *pFieldInfo = COR_PRF_FIELD_NOT_A_STATIC;
3737
3738 if (pFieldDesc->IsRVA())
3739 {
3740 *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_RVA_STATIC);
3741 }
3742
3743 if (pFieldDesc->IsThreadStatic())
3744 {
3745 *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_THREAD_STATIC);
3746 }
3747
3748 if ((*pFieldInfo == COR_PRF_FIELD_NOT_A_STATIC) && pFieldDesc->IsStatic())
3749 {
3750 *pFieldInfo = (COR_PRF_STATIC_TYPE)(*pFieldInfo | COR_PRF_FIELD_APP_DOMAIN_STATIC);
3751 }
3752
3753 return S_OK;
3754}
3755
3756
3757
3758/*
3759 * GetClassIDInfo2
3760 *
3761 * This function generalizes GetClassIDInfo for all types, both generic and non-generic. It returns
3762 * the module, type token, and an array of instantiation classIDs that were used to instantiate the
3763 * given classId.
3764 *
3765 * Parameters:
3766 * classId - The classId (TypeHandle) to query information about.
3767 * pParentClassId - The ClassID (TypeHandle) of the parent class.
3768 * pModuleId - An optional parameter for returning the module of the class.
3769 * pTypeDefToken - An optional parameter for returning the metadata token of the class.
3770 * cNumTypeArgs - The count of the size of the array typeArgs
3771 * pcNumTypeArgs - Returns the number of elements of typeArgs filled in, or if typeArgs is NULL
3772 * the number that would be needed.
3773 * typeArgs - An array to store generic type parameters for the class.
3774 *
3775 * Returns:
3776 * S_OK if successful.
3777 */
3778HRESULT ProfToEEInterfaceImpl::GetClassIDInfo2(ClassID classId,
3779 ModuleID *pModuleId,
3780 mdTypeDef *pTypeDefToken,
3781 ClassID *pParentClassId,
3782 ULONG32 cNumTypeArgs,
3783 ULONG32 *pcNumTypeArgs,
3784 ClassID typeArgs[])
3785{
3786
3787 CONTRACTL
3788 {
3789 // Yay!
3790 NOTHROW;
3791
3792 // Yay!
3793 GC_NOTRIGGER;
3794
3795 // Yay!
3796 MODE_ANY;
3797
3798 // Yay!
3799 EE_THREAD_NOT_REQUIRED;
3800
3801 // Yay!
3802 CANNOT_TAKE_LOCK;
3803
3804 SO_NOT_MAINLINE;
3805
3806 PRECONDITION(CheckPointer(pParentClassId, NULL_OK));
3807 PRECONDITION(CheckPointer(pModuleId, NULL_OK));
3808 PRECONDITION(CheckPointer(pTypeDefToken, NULL_OK));
3809 PRECONDITION(CheckPointer(pcNumTypeArgs, NULL_OK));
3810 PRECONDITION(CheckPointer(typeArgs, NULL_OK));
3811 }
3812 CONTRACTL_END;
3813
3814 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
3815 (LF_CORPROF,
3816 LL_INFO1000,
3817 "**PROF: GetClassIDInfo2 0x%p.\n",
3818 classId));
3819
3820 //
3821 // Verify parameters.
3822 //
3823 if (classId == NULL)
3824 {
3825 return E_INVALIDARG;
3826 }
3827
3828 if ((cNumTypeArgs != 0) && (typeArgs == NULL))
3829 {
3830 return E_INVALIDARG;
3831 }
3832
3833 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
3834
3835 //
3836 // If this class is not fully restored, that is all the information we can get at this time.
3837 //
3838 if (!typeHandle.IsRestored())
3839 {
3840 return CORPROF_E_DATAINCOMPLETE;
3841 }
3842
3843 //
3844 // Handle globals which don't have the instances.
3845 //
3846 if (classId == PROFILER_GLOBAL_CLASS)
3847 {
3848 if (pParentClassId != NULL)
3849 {
3850 *pParentClassId = NULL;
3851 }
3852
3853 if (pModuleId != NULL)
3854 {
3855 *pModuleId = PROFILER_GLOBAL_MODULE;
3856 }
3857
3858 if (pTypeDefToken != NULL)
3859 {
3860 *pTypeDefToken = mdTokenNil;
3861 }
3862
3863 return S_OK;
3864 }
3865
3866 //
3867 // Do not do arrays via this API
3868 //
3869 ARRAY_KIND arrayKind = ArrayKindFromTypeHandle(typeHandle);
3870 if (arrayKind == ARRAY_KIND_TYPEDESC || arrayKind == ARRAY_KIND_METHODTABLE)
3871 {
3872 return CORPROF_E_CLASSID_IS_ARRAY;
3873 }
3874
3875 _ASSERTE (arrayKind == ARRAY_KIND_NOTARRAY);
3876
3877 if (typeHandle.IsTypeDesc())
3878 {
3879 // Not an array, but still a typedesc? We don't know how to
3880 // deal with those.
3881 return CORPROF_E_CLASSID_IS_COMPOSITE;
3882 }
3883
3884 //
3885 // Fill in the basic information
3886 //
3887 if (pParentClassId != NULL)
3888 {
3889 TypeHandle parentTypeHandle = typeHandle.GetParent();
3890 if (!parentTypeHandle.IsNull())
3891 {
3892 *pParentClassId = TypeHandleToClassID(parentTypeHandle);
3893 }
3894 else
3895 {
3896 *pParentClassId = NULL;
3897 }
3898 }
3899
3900 if (pModuleId != NULL)
3901 {
3902 *pModuleId = (ModuleID) typeHandle.GetModule();
3903 _ASSERTE(*pModuleId != NULL);
3904 }
3905
3906 if (pTypeDefToken != NULL)
3907 {
3908 *pTypeDefToken = typeHandle.GetCl();
3909 _ASSERTE(*pTypeDefToken != NULL);
3910 }
3911
3912 //
3913 // See if they are just looking to get the buffer size.
3914 //
3915 if (cNumTypeArgs == 0)
3916 {
3917 if (pcNumTypeArgs != NULL)
3918 {
3919 *pcNumTypeArgs = typeHandle.GetMethodTable()->GetNumGenericArgs();
3920 }
3921 return S_OK;
3922 }
3923
3924 //
3925 // Adjust the count for the size of the given array.
3926 //
3927 if (cNumTypeArgs > typeHandle.GetMethodTable()->GetNumGenericArgs())
3928 {
3929 cNumTypeArgs = typeHandle.GetMethodTable()->GetNumGenericArgs();
3930 }
3931
3932 if (pcNumTypeArgs != NULL)
3933 {
3934 *pcNumTypeArgs = cNumTypeArgs;
3935 }
3936
3937 //
3938 // Copy over the instantiating types.
3939 //
3940 ULONG32 count;
3941 Instantiation inst = typeHandle.GetMethodTable()->GetInstantiation();
3942
3943 for (count = 0; count < cNumTypeArgs; count ++)
3944 {
3945 typeArgs[count] = TypeHandleToClassID(inst[count]);
3946 }
3947
3948 return S_OK;
3949}
3950
3951HRESULT ProfToEEInterfaceImpl::GetModuleInfo(ModuleID moduleId,
3952 LPCBYTE * ppBaseLoadAddress,
3953 ULONG cchName,
3954 ULONG * pcchName,
3955 __out_ecount_part_opt(cchName, *pcchName) WCHAR wszName[],
3956 AssemblyID * pAssemblyId)
3957{
3958 CONTRACTL
3959 {
3960 // Yay!
3961 NOTHROW;
3962
3963 // Yay!
3964 GC_NOTRIGGER;
3965
3966 // See comment in code:ProfToEEInterfaceImpl::GetModuleInfo2
3967 CAN_TAKE_LOCK;
3968
3969 // Yay!
3970 MODE_ANY;
3971
3972 // Yay!
3973 EE_THREAD_NOT_REQUIRED;
3974
3975 SO_NOT_MAINLINE;
3976
3977 PRECONDITION(CheckPointer((Module *)moduleId, NULL_OK));
3978 PRECONDITION(CheckPointer(ppBaseLoadAddress, NULL_OK));
3979 PRECONDITION(CheckPointer(pcchName, NULL_OK));
3980 PRECONDITION(CheckPointer(wszName, NULL_OK));
3981 PRECONDITION(CheckPointer(pAssemblyId, NULL_OK));
3982 }
3983 CONTRACTL_END;
3984
3985 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
3986 (LF_CORPROF,
3987 LL_INFO1000,
3988 "**PROF: GetModuleInfo 0x%p.\n",
3989 moduleId));
3990
3991 // Paramter validation is taken care of in GetModuleInfo2.
3992
3993 return GetModuleInfo2(
3994 moduleId,
3995 ppBaseLoadAddress,
3996 cchName,
3997 pcchName,
3998 wszName,
3999 pAssemblyId,
4000 NULL); // Don't need module type
4001}
4002
4003//---------------------------------------------------------------------------------------
4004//
4005// Helper used by GetModuleInfo2 to determine the bitmask of COR_PRF_MODULE_FLAGS for
4006// the specified module.
4007//
4008// Arguments:
4009// pModule - Module to get the flags for
4010//
4011// Return Value:
4012// Bitmask of COR_PRF_MODULE_FLAGS corresponding to pModule
4013//
4014
4015DWORD ProfToEEInterfaceImpl::GetModuleFlags(Module * pModule)
4016{
4017 CONTRACTL
4018 {
4019 NOTHROW;
4020 GC_NOTRIGGER;
4021 CAN_TAKE_LOCK; // IsWindowsRuntimeModule accesses metadata directly, which takes locks
4022 MODE_ANY;
4023 }
4024 CONTRACTL_END;
4025
4026 PEFile * pPEFile = pModule->GetFile();
4027 if (pPEFile == NULL)
4028 {
4029 // Hopefully this should never happen; but just in case, don't try to determine the
4030 // flags without a PEFile.
4031 return 0;
4032 }
4033
4034 DWORD dwRet = 0;
4035
4036 // First, set the flags that are dependent on which PEImage / layout we look at
4037 // inside the Module (disk/ngen/flat)
4038
4039 if (pModule->HasNativeImage())
4040 {
4041 // NGEN
4042 dwRet |= (COR_PRF_MODULE_DISK | COR_PRF_MODULE_NGEN);
4043
4044 // Intentionally not checking for flat, since NGEN PEImages never have flat
4045 // layouts.
4046 }
4047 else
4048 {
4049#ifdef FEATURE_READYTORUN
4050 // pModule->HasNativeImage() returns false for ReadyToRun images
4051 if (pModule->IsReadyToRun())
4052 {
4053 // Ready To Run
4054 dwRet |= (COR_PRF_MODULE_DISK | COR_PRF_MODULE_NGEN);
4055 }
4056#endif
4057 // Not NGEN or ReadyToRun.
4058 if (pPEFile->HasOpenedILimage())
4059 {
4060 PEImage * pILImage = pPEFile->GetOpenedILimage();
4061 if (pILImage->IsFile())
4062 {
4063 dwRet |= COR_PRF_MODULE_DISK;
4064 }
4065 if (pPEFile->GetLoadedIL()->IsFlat())
4066 {
4067 dwRet |= COR_PRF_MODULE_FLAT_LAYOUT;
4068 }
4069 }
4070 }
4071
4072 if (pModule->IsReflection())
4073 {
4074 dwRet |= COR_PRF_MODULE_DYNAMIC;
4075 }
4076
4077 if (pModule->IsCollectible())
4078 {
4079 dwRet |= COR_PRF_MODULE_COLLECTIBLE;
4080 }
4081
4082 if (pModule->IsResource())
4083 {
4084 dwRet |= COR_PRF_MODULE_RESOURCE;
4085 }
4086
4087 if (pModule->IsWindowsRuntimeModule())
4088 {
4089 dwRet |= COR_PRF_MODULE_WINDOWS_RUNTIME;
4090 }
4091
4092 return dwRet;
4093}
4094
4095HRESULT ProfToEEInterfaceImpl::GetModuleInfo2(ModuleID moduleId,
4096 LPCBYTE * ppBaseLoadAddress,
4097 ULONG cchName,
4098 ULONG * pcchName,
4099 __out_ecount_part_opt(cchName, *pcchName) WCHAR wszName[],
4100 AssemblyID * pAssemblyId,
4101 DWORD * pdwModuleFlags)
4102{
4103 CONTRACTL
4104 {
4105 // Yay!
4106 NOTHROW;
4107
4108 // Yay!
4109 GC_NOTRIGGER;
4110
4111 // The pModule->GetScopeName() call below can result in locks getting taken to
4112 // access the metadata implementation. However, these locks do not do a mode
4113 // change.
4114 CAN_TAKE_LOCK;
4115
4116 // Yay!
4117 MODE_ANY;
4118
4119 // Yay!
4120 EE_THREAD_NOT_REQUIRED;
4121
4122 SO_NOT_MAINLINE;
4123
4124 PRECONDITION(CheckPointer((Module *)moduleId, NULL_OK));
4125 PRECONDITION(CheckPointer(ppBaseLoadAddress, NULL_OK));
4126 PRECONDITION(CheckPointer(pcchName, NULL_OK));
4127 PRECONDITION(CheckPointer(wszName, NULL_OK));
4128 PRECONDITION(CheckPointer(pAssemblyId, NULL_OK));
4129 }
4130 CONTRACTL_END;
4131
4132 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
4133 (LF_CORPROF,
4134 LL_INFO1000,
4135 "**PROF: GetModuleInfo2 0x%p.\n",
4136 moduleId));
4137
4138 if (moduleId == NULL)
4139 {
4140 return E_INVALIDARG;
4141 }
4142
4143 Module * pModule = (Module *) moduleId;
4144 if (pModule->IsBeingUnloaded())
4145 {
4146 return CORPROF_E_DATAINCOMPLETE;
4147 }
4148
4149 HRESULT hr = S_OK;
4150
4151 EX_TRY
4152 {
4153
4154 PEFile * pFile = pModule->GetFile();
4155
4156 // Pick some safe defaults to begin with.
4157 if (ppBaseLoadAddress != NULL)
4158 *ppBaseLoadAddress = 0;
4159 if (wszName != NULL)
4160 *wszName = 0;
4161 if (pcchName != NULL)
4162 *pcchName = 0;
4163 if (pAssemblyId != NULL)
4164 *pAssemblyId = PROFILER_PARENT_UNKNOWN;
4165
4166 // Module flags can be determined first without fear of error
4167 if (pdwModuleFlags != NULL)
4168 *pdwModuleFlags = GetModuleFlags(pModule);
4169
4170 // Get the module file name
4171 LPCWSTR wszFileName = pFile->GetPath();
4172 _ASSERTE(wszFileName != NULL);
4173 PREFIX_ASSUME(wszFileName != NULL);
4174
4175 // If there is no filename, which is the case for RefEmit modules and for SQL
4176 // modules, then rather than returning an empty string for the name, just use the
4177 // module name from metadata (a.k.a. Module.ScopeName). This is required to
4178 // support SQL F1 sampling profiling.
4179 StackSString strScopeName;
4180 LPCUTF8 szScopeName = NULL;
4181 if ((*wszFileName == W('\0')) && SUCCEEDED(pModule->GetScopeName(&szScopeName)))
4182 {
4183 strScopeName.SetUTF8(szScopeName);
4184 strScopeName.Normalize();
4185 wszFileName = strScopeName.GetUnicode();
4186 }
4187
4188 ULONG trueLen = (ULONG)(wcslen(wszFileName) + 1);
4189
4190 // Return name of module as required.
4191 if (wszName && cchName > 0)
4192 {
4193 if (cchName < trueLen)
4194 {
4195 hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
4196 }
4197 else
4198 {
4199 wcsncpy_s(wszName, cchName, wszFileName, trueLen);
4200 }
4201 }
4202
4203 // If they request the actual length of the name
4204 if (pcchName)
4205 *pcchName = trueLen;
4206
4207 if (ppBaseLoadAddress != NULL && !pFile->IsDynamic())
4208 {
4209 if (pModule->IsProfilerNotified())
4210 {
4211 // Set the base load address -- this could be null in certain error conditions
4212 *ppBaseLoadAddress = pModule->GetProfilerBase();
4213 }
4214 else
4215 {
4216 *ppBaseLoadAddress = NULL;
4217 }
4218
4219 if (*ppBaseLoadAddress == NULL)
4220 {
4221 hr = CORPROF_E_DATAINCOMPLETE;
4222 }
4223 }
4224
4225 // Return the parent assembly for this module if desired.
4226 if (pAssemblyId != NULL)
4227 {
4228 // Lie and say the assembly isn't avaialable until we are loaded (even though it is.)
4229 // This is for backward compatibilty - we may want to change it
4230 if (pModule->IsProfilerNotified())
4231 {
4232 Assembly *pAssembly = pModule->GetAssembly();
4233 _ASSERTE(pAssembly);
4234
4235 *pAssemblyId = (AssemblyID) pAssembly;
4236 }
4237 else
4238 {
4239 hr = CORPROF_E_DATAINCOMPLETE;
4240 }
4241 }
4242 }
4243 EX_CATCH_HRESULT(hr);
4244
4245 return (hr);
4246}
4247
4248
4249/*
4250 * Get a metadata interface instance which maps to the given module.
4251 * One may ask for the metadata to be opened in read+write mode, but
4252 * this will result in slower metadata execution of the program, because
4253 * changes made to the metadata cannot be optimized as they were from
4254 * the compiler.
4255 */
4256HRESULT ProfToEEInterfaceImpl::GetModuleMetaData(ModuleID moduleId,
4257 DWORD dwOpenFlags,
4258 REFIID riid,
4259 IUnknown **ppOut)
4260{
4261 CONTRACTL
4262 {
4263 // Yay!
4264 NOTHROW;
4265
4266 // Yay!
4267 GC_NOTRIGGER;
4268
4269 // Yay!
4270 MODE_ANY;
4271
4272 // Currently, this function is technically EE_THREAD_REQUIRED because
4273 // some functions in synch.cpp assert that there is a Thread object,
4274 // but we might be able to lift that restriction and make this be
4275 // EE_THREAD_NOT_REQUIRED.
4276
4277 // PEFile::GetRWImporter & PEFile::GetEmitter &
4278 // GetReadablePublicMetaDataInterface take locks
4279 CAN_TAKE_LOCK;
4280
4281 SO_NOT_MAINLINE;
4282 }
4283 CONTRACTL_END;
4284
4285 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
4286 (LF_CORPROF,
4287 LL_INFO1000,
4288 "**PROF: GetModuleMetaData 0x%p, 0x%08x.\n",
4289 moduleId,
4290 dwOpenFlags));
4291
4292 if (moduleId == NULL)
4293 {
4294 return E_INVALIDARG;
4295 }
4296
4297 // Check for unsupported bits, and return E_INVALIDARG if present
4298 if ((dwOpenFlags & ~(ofNoTransform | ofRead | ofWrite)) != 0)
4299 {
4300 return E_INVALIDARG;
4301 }
4302
4303 Module * pModule;
4304 HRESULT hr = S_OK;
4305
4306 pModule = (Module *) moduleId;
4307 _ASSERTE(pModule != NULL);
4308 if (pModule->IsBeingUnloaded())
4309 {
4310 return CORPROF_E_DATAINCOMPLETE;
4311 }
4312
4313 // Make sure we can get the importer first
4314 if (pModule->IsResource())
4315 {
4316 if (ppOut)
4317 *ppOut = NULL;
4318 return S_FALSE;
4319 }
4320
4321 // Decide which type of open mode we are in to see which you require.
4322 if ((dwOpenFlags & ofWrite) == 0)
4323 {
4324 // Readable interface
4325 return pModule->GetReadablePublicMetaDataInterface(dwOpenFlags, riid, (LPVOID *) ppOut);
4326 }
4327
4328 // Writeable interface
4329 IUnknown *pObj = NULL;
4330 EX_TRY
4331 {
4332 pObj = pModule->GetValidatedEmitter();
4333 }
4334 EX_CATCH_HRESULT_NO_ERRORINFO(hr);
4335
4336 // Ask for the interface the caller wanted, only if they provide a out param
4337 if (SUCCEEDED(hr) && ppOut)
4338 hr = pObj->QueryInterface(riid, (void **) ppOut);
4339
4340 return (hr);
4341}
4342
4343
4344/*
4345 * Retrieve a pointer to the body of a method starting at it's header.
4346 * A method is scoped by the module it lives in. Because this function
4347 * is designed to give a tool access to IL before it has been loaded
4348 * by the Runtime, it uses the metadata token of the method to find
4349 * the instance desired. Note that this function has no effect on
4350 * already compiled code.
4351 */
4352HRESULT ProfToEEInterfaceImpl::GetILFunctionBody(ModuleID moduleId,
4353 mdMethodDef methodId,
4354 LPCBYTE *ppMethodHeader,
4355 ULONG *pcbMethodSize)
4356{
4357 CONTRACTL
4358 {
4359 // Yay!
4360 NOTHROW;
4361
4362 // Yay!
4363 GC_NOTRIGGER;
4364
4365 // Yay!
4366 MODE_ANY;
4367
4368 // PEFile::CheckLoaded & Module::GetDynamicIL both take a lock
4369 CAN_TAKE_LOCK;
4370
4371 SO_NOT_MAINLINE;
4372 }
4373 CONTRACTL_END;
4374
4375
4376 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(
4377 kP2EEAllowableAfterAttach,
4378 (LF_CORPROF,
4379 LL_INFO1000,
4380 "**PROF: GetILFunctionBody 0x%p, 0x%08x.\n",
4381 moduleId,
4382 methodId));
4383
4384 Module * pModule; // Working pointer for real class.
4385 ULONG RVA; // Return RVA of the method body.
4386 DWORD dwImplFlags; // Flags for the item.
4387
4388 if ((moduleId == NULL) ||
4389 (methodId == mdMethodDefNil) ||
4390 (methodId == 0) ||
4391 (TypeFromToken(methodId) != mdtMethodDef))
4392 {
4393 return E_INVALIDARG;
4394 }
4395
4396 pModule = (Module *) moduleId;
4397 _ASSERTE(pModule != NULL && methodId != mdMethodDefNil);
4398 if (pModule->IsBeingUnloaded())
4399 {
4400 return CORPROF_E_DATAINCOMPLETE;
4401 }
4402
4403 // Find the method body based on metadata.
4404 IMDInternalImport *pImport = pModule->GetMDImport();
4405 _ASSERTE(pImport);
4406
4407 PEFile *pFile = pModule->GetFile();
4408
4409 if (!pFile->CheckLoaded())
4410 return (CORPROF_E_DATAINCOMPLETE);
4411
4412 LPCBYTE pbMethod = NULL;
4413
4414 // Don't return rewritten IL, use the new API to get that.
4415 pbMethod = (LPCBYTE) pModule->GetDynamicIL(methodId, FALSE);
4416
4417 // Method not overriden - get the original copy of the IL by going to metadata
4418 if (pbMethod == NULL)
4419 {
4420 HRESULT hr = S_OK;
4421 IfFailRet(pImport->GetMethodImplProps(methodId, &RVA, &dwImplFlags));
4422
4423 // Check to see if the method has associated IL
4424 if ((RVA == 0 && !pFile->IsDynamic()) || !(IsMiIL(dwImplFlags) || IsMiOPTIL(dwImplFlags) || IsMiInternalCall(dwImplFlags)))
4425 {
4426 return (CORPROF_E_FUNCTION_NOT_IL);
4427 }
4428
4429 EX_TRY
4430 {
4431 // Get the location of the IL
4432 pbMethod = (LPCBYTE) (pModule->GetIL(RVA));
4433 }
4434 EX_CATCH_HRESULT(hr);
4435
4436 if (FAILED(hr))
4437 {
4438 return hr;
4439 }
4440 }
4441
4442 // Fill out param if provided
4443 if (ppMethodHeader)
4444 *ppMethodHeader = pbMethod;
4445
4446 // Calculate the size of the method itself.
4447 if (pcbMethodSize)
4448 {
4449 if (!FitsIn<ULONG>(PEDecoder::ComputeILMethodSize((TADDR)pbMethod)))
4450 {
4451 return E_UNEXPECTED;
4452 }
4453 *pcbMethodSize = static_cast<ULONG>(PEDecoder::ComputeILMethodSize((TADDR)pbMethod));
4454 }
4455 return (S_OK);
4456}
4457
4458//---------------------------------------------------------------------------------------
4459// Retrieves an IMethodMalloc pointer around a ModuleILHeap instance that will own
4460// allocating heap space for this module (for IL rewriting).
4461//
4462// Arguments:
4463// moduleId - ModuleID this allocator shall allocate for
4464// ppMalloc - [out] IMethodMalloc pointer the profiler will use for allocation requests
4465// against this module
4466//
4467// Return value
4468// HRESULT indicating success / failure
4469//
4470// Notes
4471// IL method bodies used to have the requirement that they must be referenced as
4472// RVA's to the loaded module, which means they come after the module within
4473// METHOD_MAX_RVA. In order to make it easier for a tool to swap out the body of
4474// a method, this allocator will ensure memory allocated after that point.
4475//
4476// Now that requirement is completely gone, so there's nothing terribly special
4477// about this allocator, we just keep it around for legacy purposes.
4478
4479HRESULT ProfToEEInterfaceImpl::GetILFunctionBodyAllocator(ModuleID moduleId,
4480 IMethodMalloc ** ppMalloc)
4481{
4482 CONTRACTL
4483 {
4484 // Yay!
4485 NOTHROW;
4486
4487 // ModuleILHeap::FindOrCreateHeap may take a Crst if it
4488 // needs to create a new allocator and add it to the list. Taking a crst
4489 // switches to preemptive, which is effectively a GC trigger
4490 GC_TRIGGERS;
4491
4492 // Yay!
4493 MODE_ANY;
4494
4495 // Yay!
4496 EE_THREAD_NOT_REQUIRED;
4497
4498 // (see GC_TRIGGERS comment)
4499 CAN_TAKE_LOCK;
4500
4501 SO_NOT_MAINLINE;
4502 }
4503 CONTRACTL_END;
4504
4505 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
4506 kP2EEAllowableAfterAttach | kP2EETriggers,
4507 (LF_CORPROF,
4508 LL_INFO1000,
4509 "**PROF: GetILFunctionBodyAllocator 0x%p.\n",
4510 moduleId));
4511
4512 if ((moduleId == NULL) || (ppMalloc == NULL))
4513 {
4514 return E_INVALIDARG;
4515 }
4516
4517 Module * pModule = (Module *) moduleId;
4518
4519 if (pModule->IsBeingUnloaded() ||
4520 !pModule->GetFile()->CheckLoaded())
4521 {
4522 return (CORPROF_E_DATAINCOMPLETE);
4523 }
4524
4525 *ppMalloc = &ModuleILHeap::s_Heap;
4526 return S_OK;
4527}
4528
4529/*
4530 * Replaces the method body for a function in a module. This will replace
4531 * the RVA of the method in the metadata to point to this new method body,
4532 * and adjust any internal data structures as required. This function can
4533 * only be called on those methods which have never been compiled by a JITTER.
4534 * Please use the GetILFunctionBodyAllocator to allocate space for the new method to
4535 * ensure the buffer is compatible.
4536 */
4537HRESULT ProfToEEInterfaceImpl::SetILFunctionBody(ModuleID moduleId,
4538 mdMethodDef methodId,
4539 LPCBYTE pbNewILMethodHeader)
4540{
4541 CONTRACTL
4542 {
4543 // PEFile::GetEmitter, Module::SetDynamicIL all throw
4544 THROWS;
4545
4546 // Locks are taken (see CAN_TAKE_LOCK below), which may cause mode switch to
4547 // preemptive, which is triggers.
4548 GC_TRIGGERS;
4549
4550 // Yay!
4551 MODE_ANY;
4552
4553 // Module::SetDynamicIL & PEFile::CheckLoaded & PEFile::GetEmitter take locks
4554 CAN_TAKE_LOCK;
4555
4556 SO_NOT_MAINLINE;
4557 }
4558 CONTRACTL_END;
4559
4560 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
4561 kP2EEAllowableAfterAttach | kP2EETriggers,
4562 (LF_CORPROF,
4563 LL_INFO1000,
4564 "**PROF: SetILFunctionBody 0x%p, 0x%08x.\n",
4565 moduleId,
4566 methodId));
4567
4568 if ((moduleId == NULL) ||
4569 (methodId == mdMethodDefNil) ||
4570 (TypeFromToken(methodId) != mdtMethodDef) ||
4571 (pbNewILMethodHeader == NULL))
4572 {
4573 return E_INVALIDARG;
4574 }
4575
4576 Module *pModule; // Working pointer for real class.
4577 HRESULT hr = S_OK;
4578
4579 // Cannot set the body for anything other than a method def
4580 if (TypeFromToken(methodId) != mdtMethodDef)
4581 return (E_INVALIDARG);
4582
4583 // Cast module to appropriate type
4584 pModule = (Module *) moduleId;
4585 _ASSERTE (pModule != NULL); // Enforced in CorProfInfo::SetILFunctionBody
4586 if (pModule->IsBeingUnloaded())
4587 {
4588 return CORPROF_E_DATAINCOMPLETE;
4589 }
4590
4591 // Remember the profiler is doing this, as that means we must never detach it!
4592 g_profControlBlock.pProfInterface->SetUnrevertiblyModifiedILFlag();
4593
4594 // This action is not temporary!
4595 // If the profiler want to be able to revert, they need to use
4596 // the new ReJIT APIs.
4597 pModule->SetDynamicIL(methodId, (TADDR)pbNewILMethodHeader, FALSE);
4598
4599 return (hr);
4600}
4601
4602/*
4603 * Sets the codemap for the replaced IL function body
4604 */
4605HRESULT ProfToEEInterfaceImpl::SetILInstrumentedCodeMap(FunctionID functionId,
4606 BOOL fStartJit,
4607 ULONG cILMapEntries,
4608 COR_IL_MAP rgILMapEntries[])
4609{
4610 CONTRACTL
4611 {
4612 // Debugger::SetILInstrumentedCodeMap throws
4613 THROWS;
4614
4615 // Debugger::SetILInstrumentedCodeMap triggers
4616 GC_TRIGGERS;
4617
4618 // Yay!
4619 MODE_ANY;
4620
4621 // Yay!
4622 EE_THREAD_NOT_REQUIRED;
4623
4624 // Debugger::SetILInstrumentedCodeMap takes a lock when it calls Debugger::GetOrCreateMethodInfo
4625 CAN_TAKE_LOCK;
4626
4627 SO_NOT_MAINLINE;
4628 }
4629 CONTRACTL_END;
4630
4631 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
4632 kP2EEAllowableAfterAttach | kP2EETriggers,
4633 (LF_CORPROF,
4634 LL_INFO1000,
4635 "**PROF: SetILInstrumentedCodeMap 0x%p, %d.\n",
4636 functionId,
4637 fStartJit));
4638
4639 if (functionId == NULL)
4640 {
4641 return E_INVALIDARG;
4642 }
4643
4644 if (cILMapEntries >= (MAXULONG / sizeof(COR_IL_MAP)))
4645 {
4646 // Too big! The allocation below would overflow when calculating the size.
4647 return E_INVALIDARG;
4648 }
4649
4650
4651#ifdef DEBUGGING_SUPPORTED
4652
4653 MethodDesc *pMethodDesc = FunctionIdToMethodDesc(functionId);
4654
4655 // it's not safe to examine a methoddesc that has not been restored so do not do so
4656 if (!pMethodDesc ->IsRestored())
4657 return CORPROF_E_DATAINCOMPLETE;
4658
4659 if (g_pDebugInterface == NULL)
4660 {
4661 return CORPROF_E_DEBUGGING_DISABLED;
4662 }
4663
4664 COR_IL_MAP * rgNewILMapEntries = new (nothrow) COR_IL_MAP[cILMapEntries];
4665
4666 if (rgNewILMapEntries == NULL)
4667 return E_OUTOFMEMORY;
4668
4669 memcpy_s(rgNewILMapEntries, sizeof(COR_IL_MAP) * cILMapEntries, rgILMapEntries, sizeof(COR_IL_MAP) * cILMapEntries);
4670
4671 return g_pDebugInterface->SetILInstrumentedCodeMap(pMethodDesc,
4672 fStartJit,
4673 cILMapEntries,
4674 rgNewILMapEntries);
4675
4676#else //DEBUGGING_SUPPORTED
4677 return E_NOTIMPL;
4678#endif //DEBUGGING_SUPPORTED
4679}
4680
4681HRESULT ProfToEEInterfaceImpl::ForceGC()
4682{
4683 CONTRACTL
4684 {
4685 // GC calls "new" which throws
4686 THROWS;
4687
4688 // Uh duh, look at the name of the function, dude
4689 GC_TRIGGERS;
4690
4691 // Yay!
4692 MODE_ANY;
4693
4694 // Yay!
4695 EE_THREAD_NOT_REQUIRED;
4696
4697 // Initiating a GC causes a runtime suspension which requires the
4698 // mother of all locks: the thread store lock.
4699 CAN_TAKE_LOCK;
4700
4701 SO_NOT_MAINLINE;
4702 }
4703 CONTRACTL_END;
4704
4705 ASSERT_NO_EE_LOCKS_HELD();
4706
4707 // We need to use IsGarbageCollectorFullyInitialized() instead of IsGCHeapInitialized() because
4708 // there are other GC initialization being done after IsGCHeapInitialized() becomes TRUE,
4709 // and before IsGarbageCollectorFullyInitialized() becomes TRUE.
4710 if (!IsGarbageCollectorFullyInitialized())
4711 {
4712 return CORPROF_E_NOT_YET_AVAILABLE;
4713 }
4714
4715 // Disallow the cases where a profiler calls this off a hijacked CLR thread
4716 // or inside a profiler callback. (Allow cases where this is a native thread, or a
4717 // thread which previously successfully called ForceGC.)
4718 Thread * pThread = GetThreadNULLOk();
4719 if ((pThread != NULL) &&
4720 (!AreCallbackStateFlagsSet(COR_PRF_CALLBACKSTATE_FORCEGC_WAS_CALLED)) &&
4721 (pThread->GetFrame() != FRAME_TOP
4722 || AreCallbackStateFlagsSet(COR_PRF_CALLBACKSTATE_INCALLBACK)))
4723 {
4724 LOG((LF_CORPROF,
4725 LL_ERROR,
4726 "**PROF: ERROR: Returning CORPROF_E_UNSUPPORTED_CALL_SEQUENCE "
4727 "due to illegal hijacked profiler call or call from inside another callback\n"));
4728 return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
4729 }
4730
4731 // NOTE: We cannot use the standard macro PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX
4732 // here because the macro ensures that either the current thread is not an
4733 // EE thread, or, if it is, that the CALLBACK flag is set. In classic apps
4734 // a profiler-owned native thread will not get an EE thread associated with
4735 // it, however, in AppX apps, during the first call into the GC on a
4736 // profiler-owned thread, the EE will associate an EE-thread with the profiler
4737 // thread. As a consequence the second call to ForceGC on the same thread
4738 // would fail, since this is now an EE thread and this API is not called from
4739 // a callback.
4740
4741 // First part of the PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX macro:
4742 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(
4743 kP2EEAllowableAfterAttach | kP2EETriggers,
4744 (LF_CORPROF,
4745 LL_INFO1000,
4746 "**PROF: ForceGC.\n"));
4747
4748#ifdef FEATURE_EVENT_TRACE
4749 // This helper, used by ETW and profAPI ensures a managed thread gets created for
4750 // this thread before forcing the GC (to work around Jupiter issues where it's
4751 // expected this thread is already managed before starting the GC).
4752 HRESULT hr = ETW::GCLog::ForceGCForDiagnostics();
4753#else // !FEATURE_EVENT_TRACE
4754 HRESULT hr = E_FAIL;
4755#endif // FEATURE_EVENT_TRACE
4756
4757 // If a Thread object was just created for this thread, remember the fact that it
4758 // was a ForceGC() thread, so we can be more lenient when doing
4759 // COR_PRF_CALLBACKSTATE_INCALLBACK later on from other APIs
4760 pThread = GetThreadNULLOk();
4761 if (pThread != NULL)
4762 {
4763 pThread->SetProfilerCallbackStateFlags(COR_PRF_CALLBACKSTATE_FORCEGC_WAS_CALLED);
4764 }
4765
4766 return hr;
4767}
4768
4769
4770/*
4771 * Returns the ContextID for the current thread.
4772 */
4773HRESULT ProfToEEInterfaceImpl::GetThreadContext(ThreadID threadId,
4774 ContextID *pContextId)
4775{
4776 CONTRACTL
4777 {
4778 // Yay!
4779 NOTHROW;
4780
4781 // Yay!
4782 GC_NOTRIGGER;
4783
4784 // Yay!
4785 MODE_ANY;
4786
4787 // Yay!
4788 EE_THREAD_NOT_REQUIRED;
4789
4790 // Yay!
4791 CANNOT_TAKE_LOCK;
4792
4793 SO_NOT_MAINLINE;
4794 }
4795 CONTRACTL_END;
4796
4797 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
4798 (LF_CORPROF,
4799 LL_INFO1000,
4800 "**PROF: GetThreadContext 0x%p.\n",
4801 threadId));
4802
4803 if (!IsManagedThread(threadId))
4804 {
4805 return E_INVALIDARG;
4806 }
4807
4808 // Cast to right type
4809 Thread *pThread = reinterpret_cast<Thread *>(threadId);
4810
4811 // Get the context for the Thread* provided
4812 AppDomain *pContext = pThread->GetDomain(); // Context is same as AppDomain in CoreCLR
4813 _ASSERTE(pContext);
4814
4815 // If there's no current context, return incomplete info
4816 if (!pContext)
4817 return (CORPROF_E_DATAINCOMPLETE);
4818
4819 // Set the result and return
4820 if (pContextId)
4821 *pContextId = reinterpret_cast<ContextID>(pContext);
4822
4823 return (S_OK);
4824}
4825
4826HRESULT ProfToEEInterfaceImpl::GetClassIDInfo(ClassID classId,
4827 ModuleID *pModuleId,
4828 mdTypeDef *pTypeDefToken)
4829{
4830 CONTRACTL
4831 {
4832 // Yay!
4833 NOTHROW;
4834
4835 // Yay!
4836 GC_NOTRIGGER;
4837
4838 // Yay!
4839 MODE_ANY;
4840
4841 // Yay!
4842 EE_THREAD_NOT_REQUIRED;
4843
4844 // Yay!
4845 CANNOT_TAKE_LOCK;
4846
4847 SO_NOT_MAINLINE;
4848 }
4849 CONTRACTL_END;
4850
4851 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
4852 (LF_CORPROF,
4853 LL_INFO1000,
4854 "**PROF: GetClassIDInfo 0x%p.\n",
4855 classId));
4856
4857 if (classId == NULL)
4858 {
4859 return E_INVALIDARG;
4860 }
4861
4862 if (pModuleId != NULL)
4863 {
4864 *pModuleId = NULL;
4865 }
4866
4867 if (pTypeDefToken != NULL)
4868 {
4869 *pTypeDefToken = NULL;
4870 }
4871
4872 // Handle globals which don't have the instances.
4873 if (classId == PROFILER_GLOBAL_CLASS)
4874 {
4875 if (pModuleId != NULL)
4876 {
4877 *pModuleId = PROFILER_GLOBAL_MODULE;
4878 }
4879
4880 if (pTypeDefToken != NULL)
4881 {
4882 *pTypeDefToken = mdTokenNil;
4883 }
4884 }
4885 else if (classId == NULL)
4886 {
4887 return E_INVALIDARG;
4888 }
4889 // Get specific data.
4890 else
4891 {
4892 TypeHandle th = TypeHandle::FromPtr((void *)classId);
4893
4894 if (!th.IsTypeDesc())
4895 {
4896 if (!th.IsArray())
4897 {
4898 //
4899 // If this class is not fully restored, that is all the information we can get at this time.
4900 //
4901 if (!th.IsRestored())
4902 {
4903 return CORPROF_E_DATAINCOMPLETE;
4904 }
4905
4906 if (pModuleId != NULL)
4907 {
4908 *pModuleId = (ModuleID) th.GetModule();
4909 _ASSERTE(*pModuleId != NULL);
4910 }
4911
4912 if (pTypeDefToken != NULL)
4913 {
4914 *pTypeDefToken = th.GetCl();
4915 _ASSERTE(*pTypeDefToken != NULL);
4916 }
4917 }
4918 }
4919 }
4920
4921 return (S_OK);
4922}
4923
4924
4925HRESULT ProfToEEInterfaceImpl::GetFunctionInfo(FunctionID functionId,
4926 ClassID *pClassId,
4927 ModuleID *pModuleId,
4928 mdToken *pToken)
4929{
4930 CONTRACTL
4931 {
4932 // Yay!
4933 NOTHROW;
4934
4935 // Yay!
4936 GC_NOTRIGGER;
4937
4938 // Yay!
4939 MODE_ANY;
4940
4941 // Yay!
4942 EE_THREAD_NOT_REQUIRED;
4943
4944 // Yay!
4945 CANNOT_TAKE_LOCK;
4946
4947 SO_NOT_MAINLINE;
4948 }
4949 CONTRACTL_END;
4950
4951 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
4952 (LF_CORPROF,
4953 LL_INFO1000,
4954 "**PROF: GetFunctionInfo 0x%p.\n",
4955 functionId));
4956
4957 if (functionId == NULL)
4958 {
4959 return E_INVALIDARG;
4960 }
4961
4962 MethodDesc *pMDesc = (MethodDesc *) functionId;
4963 if (!pMDesc->IsRestored())
4964 {
4965 return CORPROF_E_DATAINCOMPLETE;
4966 }
4967
4968 MethodTable *pMT = pMDesc->GetMethodTable();
4969 if (!pMT->IsRestored())
4970 {
4971 return CORPROF_E_DATAINCOMPLETE;
4972 }
4973
4974 ClassID classId = PROFILER_GLOBAL_CLASS;
4975
4976 if (pMT != NULL)
4977 {
4978 classId = NonGenericTypeHandleToClassID(TypeHandle(pMT));
4979 }
4980
4981 if (pClassId != NULL)
4982 {
4983 *pClassId = classId;
4984 }
4985
4986 if (pModuleId != NULL)
4987 {
4988 *pModuleId = (ModuleID) pMDesc->GetModule();
4989 }
4990
4991 if (pToken != NULL)
4992 {
4993 *pToken = pMDesc->GetMemberDef();
4994 }
4995
4996 return (S_OK);
4997}
4998
4999/*
5000 * GetILToNativeMapping returns a map from IL offsets to native
5001 * offsets for this code. An array of COR_DEBUG_IL_TO_NATIVE_MAP
5002 * structs will be returned, and some of the ilOffsets in this array
5003 * may be the values specified in CorDebugIlToNativeMappingTypes.
5004 */
5005HRESULT ProfToEEInterfaceImpl::GetILToNativeMapping(FunctionID functionId,
5006 ULONG32 cMap,
5007 ULONG32 * pcMap, // [out]
5008 COR_DEBUG_IL_TO_NATIVE_MAP map[]) // [out]
5009{
5010 CONTRACTL
5011 {
5012 // MethodDesc::FindOrCreateTypicalSharedInstantiation throws
5013 THROWS;
5014
5015 // MethodDesc::FindOrCreateTypicalSharedInstantiation triggers, but shouldn't trigger when
5016 // called from here. Since the profiler has a valid functionId, the methoddesc for
5017 // this code will already have been created. We should be able to enforce this by
5018 // passing allowCreate=FALSE to FindOrCreateTypicalSharedInstantiation.
5019 DISABLED(GC_NOTRIGGER);
5020
5021 // Yay!
5022 MODE_ANY;
5023
5024 // The call to g_pDebugInterface->GetILToNativeMapping() below may call
5025 // Debugger::AcquireDebuggerLock
5026 CAN_TAKE_LOCK;
5027
5028 SO_NOT_MAINLINE;
5029 }
5030 CONTRACTL_END;
5031
5032 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
5033 (LF_CORPROF,
5034 LL_INFO1000,
5035 "**PROF: GetILToNativeMapping 0x%p.\n",
5036 functionId));
5037
5038 return GetILToNativeMapping2(functionId, 0, cMap, pcMap, map);
5039}
5040
5041HRESULT ProfToEEInterfaceImpl::GetILToNativeMapping2(FunctionID functionId,
5042 ReJITID reJitId,
5043 ULONG32 cMap,
5044 ULONG32 * pcMap, // [out]
5045 COR_DEBUG_IL_TO_NATIVE_MAP map[]) // [out]
5046{
5047 CONTRACTL
5048 {
5049 // MethodDesc::FindOrCreateTypicalSharedInstantiation throws
5050 THROWS;
5051
5052 // MethodDesc::FindOrCreateTypicalSharedInstantiation triggers, but shouldn't trigger when
5053 // called from here. Since the profiler has a valid functionId, the methoddesc for
5054 // this code will already have been created. We should be able to enforce this by
5055 // passing allowCreate=FALSE to FindOrCreateTypicalSharedInstantiation.
5056 DISABLED(GC_NOTRIGGER);
5057
5058 // Yay!
5059 MODE_ANY;
5060
5061 // The call to g_pDebugInterface->GetILToNativeMapping() below may call
5062 // Debugger::AcquireDebuggerLock
5063 CAN_TAKE_LOCK;
5064
5065 SO_NOT_MAINLINE;
5066 }
5067 CONTRACTL_END;
5068
5069 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
5070 (LF_CORPROF,
5071 LL_INFO1000,
5072 "**PROF: GetILToNativeMapping2 0x%p 0x%p.\n",
5073 functionId, reJitId));
5074
5075 if (functionId == NULL)
5076 {
5077 return E_INVALIDARG;
5078 }
5079
5080 if ((cMap > 0) &&
5081 ((pcMap == NULL) || (map == NULL)))
5082 {
5083 return E_INVALIDARG;
5084 }
5085
5086 HRESULT hr = S_OK;
5087
5088 EX_TRY
5089 {
5090 // Cast to proper type
5091 MethodDesc * pMD = FunctionIdToMethodDesc(functionId);
5092
5093 if (pMD->HasClassOrMethodInstantiation() && pMD->IsTypicalMethodDefinition())
5094 {
5095 // In this case, we used to replace pMD with its canonical instantiation
5096 // (FindOrCreateTypicalSharedInstantiation). However, a profiler should never be able
5097 // to get to this point anyway, since any MethodDesc a profiler gets from us
5098 // cannot be typical (i.e., cannot be a generic with types still left uninstantiated).
5099 // We assert here just in case a test proves me wrong, but generally we will
5100 // disallow this code path.
5101 _ASSERTE(!"Profiler passed a typical method desc (a generic with types still left uninstantiated) to GetILToNativeMapping2");
5102 hr = E_INVALIDARG;
5103 }
5104 else
5105 {
5106 PCODE pCodeStart = NULL;
5107 CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager();
5108 ILCodeVersion ilCodeVersion = NULL;
5109 {
5110 CodeVersionManager::TableLockHolder lockHolder(pCodeVersionManager);
5111
5112 pCodeVersionManager->GetILCodeVersion(pMD, reJitId);
5113
5114 NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMD);
5115 for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
5116 {
5117 // Now that tiered compilation can create more than one jitted code version for the same rejit id
5118 // we are arbitrarily choosing the first one to return. To address a specific version of native code
5119 // use GetILToNativeMapping3.
5120 pCodeStart = iter->GetNativeCode();
5121 break;
5122 }
5123 }
5124
5125 hr = GetILToNativeMapping3(pCodeStart, cMap, pcMap, map);
5126 }
5127 }
5128 EX_CATCH_HRESULT(hr);
5129
5130 return hr;
5131}
5132
5133
5134
5135//*****************************************************************************
5136// Given an ObjectID, go get the EE ClassID for it.
5137//*****************************************************************************
5138HRESULT ProfToEEInterfaceImpl::GetClassFromObject(ObjectID objectId,
5139 ClassID * pClassId)
5140{
5141 CONTRACTL
5142 {
5143 // Yay!
5144 NOTHROW;
5145
5146 // Yay!
5147 GC_NOTRIGGER;
5148
5149 // Yay! Fail at runtime if in preemptive mode via AllowObjectInspection()
5150 MODE_ANY;
5151
5152 // Object::GetTypeHandle takes a lock
5153 CAN_TAKE_LOCK;
5154
5155 SO_NOT_MAINLINE;
5156 }
5157 CONTRACTL_END;
5158
5159 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
5160 (LF_CORPROF,
5161 LL_INFO1000,
5162 "**PROF: GetClassFromObject 0x%p.\n",
5163 objectId));
5164
5165 if (objectId == NULL)
5166 {
5167 return E_INVALIDARG;
5168 }
5169
5170 HRESULT hr = AllowObjectInspection();
5171 if (FAILED(hr))
5172 {
5173 return hr;
5174 }
5175
5176 // Cast the ObjectID as a Object
5177 Object *pObj = reinterpret_cast<Object *>(objectId);
5178
5179 // Set the out param and indicate success
5180 // Note that for generic code we always return uninstantiated ClassIDs and FunctionIDs
5181 if (pClassId)
5182 {
5183 *pClassId = SafeGetClassIDFromObject(pObj);
5184 }
5185
5186 return S_OK;
5187}
5188
5189//*****************************************************************************
5190// Given a module and a token for a class, go get the EE data structure for it.
5191//*****************************************************************************
5192HRESULT ProfToEEInterfaceImpl::GetClassFromToken(ModuleID moduleId,
5193 mdTypeDef typeDef,
5194 ClassID *pClassId)
5195{
5196 CONTRACTL
5197 {
5198 // Yay!
5199 NOTHROW;
5200
5201 // ClassLoader::LoadTypeDefOrRefThrowing triggers
5202 GC_TRIGGERS;
5203
5204 // Yay!
5205 MODE_ANY;
5206
5207 // ClassLoader::LoadTypeDefOrRefThrowing takes a lock
5208 CAN_TAKE_LOCK;
5209
5210 SO_NOT_MAINLINE;
5211 }
5212 CONTRACTL_END;
5213
5214 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
5215 kP2EEAllowableAfterAttach | kP2EETriggers,
5216 (LF_CORPROF,
5217 LL_INFO1000,
5218 "**PROF: GetClassFromToken 0x%p, 0x%08x.\n",
5219 moduleId,
5220 typeDef));
5221
5222 if ((moduleId == NULL) || (typeDef == mdTypeDefNil) || (typeDef == NULL))
5223 {
5224 return E_INVALIDARG;
5225 }
5226
5227 if (!g_profControlBlock.fBaseSystemClassesLoaded)
5228 {
5229 return CORPROF_E_RUNTIME_UNINITIALIZED;
5230 }
5231
5232 // Get the module
5233 Module *pModule = (Module *) moduleId;
5234
5235 // No module, or it's disassociated from metadata
5236 if ((pModule == NULL) || (pModule->IsBeingUnloaded()))
5237 {
5238 return CORPROF_E_DATAINCOMPLETE;
5239 }
5240
5241 // First, check the RID map. This is important since it
5242 // works during teardown (and the below doesn't)
5243 TypeHandle th;
5244 th = pModule->LookupTypeDef(typeDef);
5245 if (th.IsNull())
5246 {
5247 HRESULT hr = S_OK;
5248
5249 EX_TRY {
5250 th = ClassLoader::LoadTypeDefOrRefThrowing(pModule, typeDef,
5251 ClassLoader::ThrowIfNotFound,
5252 ClassLoader::PermitUninstDefOrRef);
5253 }
5254 EX_CATCH_HRESULT(hr);
5255
5256 if (FAILED(hr))
5257 {
5258 return hr;
5259 }
5260 }
5261
5262 if (!th.GetMethodTable())
5263 {
5264 return CORPROF_E_DATAINCOMPLETE;
5265 }
5266
5267 //
5268 // Check if it is generic
5269 //
5270 ClassID classId = NonGenericTypeHandleToClassID(th);
5271
5272 if (classId == NULL)
5273 {
5274 return CORPROF_E_TYPE_IS_PARAMETERIZED;
5275 }
5276
5277 // Return value if necessary
5278 if (pClassId)
5279 {
5280 *pClassId = classId;
5281 }
5282
5283 return S_OK;
5284}
5285
5286
5287HRESULT ProfToEEInterfaceImpl::GetClassFromTokenAndTypeArgs(ModuleID moduleID,
5288 mdTypeDef typeDef,
5289 ULONG32 cTypeArgs,
5290 ClassID typeArgs[],
5291 ClassID* pClassID)
5292{
5293 CONTRACTL
5294 {
5295 // Yay!
5296 NOTHROW;
5297
5298 // LoadGenericInstantiationThrowing may load
5299 GC_TRIGGERS;
5300
5301 // Yay!
5302 MODE_ANY;
5303
5304 // ClassLoader::LoadGenericInstantiationThrowing takes a lock
5305 CAN_TAKE_LOCK;
5306
5307 SO_NOT_MAINLINE;
5308 }
5309 CONTRACTL_END;
5310
5311 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
5312 kP2EEAllowableAfterAttach | kP2EETriggers,
5313 (LF_CORPROF,
5314 LL_INFO1000,
5315 "**PROF: GetClassFromTokenAndTypeArgs 0x%p, 0x%08x.\n",
5316 moduleID,
5317 typeDef));
5318
5319 if (!g_profControlBlock.fBaseSystemClassesLoaded)
5320 {
5321 return CORPROF_E_RUNTIME_UNINITIALIZED;
5322 }
5323
5324 Module* pModule = reinterpret_cast< Module* >(moduleID);
5325
5326 if (pModule == NULL || pModule->IsBeingUnloaded())
5327 {
5328 return CORPROF_E_DATAINCOMPLETE;
5329 }
5330
5331 // This array needs to be accessible at least until the call to
5332 // ClassLoader::LoadGenericInstantiationThrowing
5333 TypeHandle* genericParameters = new (nothrow) TypeHandle[cTypeArgs];
5334 NewArrayHolder< TypeHandle > holder(genericParameters);
5335
5336 if (NULL == genericParameters)
5337 {
5338 return E_OUTOFMEMORY;
5339 }
5340
5341 for (ULONG32 i = 0; i < cTypeArgs; ++i)
5342 {
5343 genericParameters[i] = TypeHandle(reinterpret_cast< MethodTable* >(typeArgs[i]));
5344 }
5345
5346 //
5347 // nickbe 11/24/2003 10:12:56
5348 //
5349 // In RTM/Everett we decided to load the class if it hadn't been loaded yet
5350 // (see ProfToEEInterfaceImpl::GetClassFromToken). For compatibility we're
5351 // going to make the same decision here. It's potentially confusing to tell
5352 // someone a type doesn't exist at one point in time, but does exist later,
5353 // and there is no good way for us to determing that a class may eventually
5354 // be loaded without going ahead and loading it
5355 //
5356 TypeHandle th;
5357 HRESULT hr = S_OK;
5358
5359 EX_TRY
5360 {
5361 // Not sure if this is a valid override or not - making this a VIOLATION
5362 // until we're sure.
5363 CONTRACT_VIOLATION(LoadsTypeViolation);
5364
5365 if (GetThreadNULLOk() == NULL)
5366 {
5367 // Type system will try to validate as part of its contract if the current
5368 // AppDomain returned by GetAppDomain can load types in specified module's
5369 // assembly. On a non-EE thread it results in an AV in a check build
5370 // since the type system tries to dereference NULL returned by GetAppDomain.
5371 // More importantly, loading a type on a non-EE thread is not allowed.
5372 //
5373 // ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE() states that callers will not
5374 // try to load a type, so that type system will not try to test type
5375 // loadability in the current AppDomain. However,
5376 // ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE does not prevent callers from
5377 // loading a type. It is profiler's responsibility not to attempt to load
5378 // a type in unsupported ways (e.g. from a non-EE thread). It doesn't
5379 // impact retail builds, in which contracts are not available.
5380 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
5381
5382 // ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE also defines FAULT_FORBID, which
5383 // causes Scanruntime to flag a fault violation in AssemblySpec::InitializeSpec,
5384 // which is defined as FAULTS. It only happens in a type-loading path, which
5385 // is not supported on a non-EE thread. Suppressing a contract violation in an
5386 // unsupported execution path is more preferable than causing AV when calling
5387 // GetClassFromTokenAndTypeArgs on a non-EE thread in a check build. See Dev10
5388 // 682526 for more details.
5389 FAULT_NOT_FATAL();
5390
5391 th = ClassLoader::LoadGenericInstantiationThrowing(pModule,
5392 typeDef,
5393 Instantiation(genericParameters, cTypeArgs),
5394 ClassLoader::LoadTypes);
5395 }
5396 else
5397 {
5398 th = ClassLoader::LoadGenericInstantiationThrowing(pModule,
5399 typeDef,
5400 Instantiation(genericParameters, cTypeArgs),
5401 ClassLoader::LoadTypes);
5402 }
5403 }
5404 EX_CATCH_HRESULT(hr);
5405
5406 if (FAILED(hr))
5407 {
5408 return hr;
5409 }
5410
5411 if (th.IsNull())
5412 {
5413 // Hmm, the type isn't loaded yet.
5414 return CORPROF_E_DATAINCOMPLETE;
5415 }
5416
5417 *pClassID = TypeHandleToClassID(th);
5418
5419 return S_OK;
5420}
5421
5422//*****************************************************************************
5423// Given the token for a method, return the fucntion id.
5424//*****************************************************************************
5425HRESULT ProfToEEInterfaceImpl::GetFunctionFromToken(ModuleID moduleId,
5426 mdToken typeDef,
5427 FunctionID *pFunctionId)
5428{
5429 CONTRACTL
5430 {
5431 // Yay!
5432 NOTHROW;
5433
5434 // Yay!
5435 GC_NOTRIGGER;
5436
5437 // Yay!
5438 MODE_ANY;
5439
5440 // Yay!
5441 EE_THREAD_NOT_REQUIRED;
5442
5443 // Yay!
5444 CANNOT_TAKE_LOCK;
5445
5446 SO_NOT_MAINLINE;
5447 }
5448 CONTRACTL_END;
5449
5450 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
5451 (LF_CORPROF,
5452 LL_INFO1000,
5453 "**PROF: GetFunctionFromToken 0x%p, 0x%08x.\n",
5454 moduleId,
5455 typeDef));
5456
5457 if ((moduleId == NULL) || (typeDef == mdTokenNil))
5458 {
5459 return E_INVALIDARG;
5460 }
5461
5462 if (!g_profControlBlock.fBaseSystemClassesLoaded)
5463 {
5464 return CORPROF_E_RUNTIME_UNINITIALIZED;
5465 }
5466
5467 // Default HRESULT
5468 HRESULT hr = S_OK;
5469
5470 // Get the module
5471 Module *pModule = (Module *) moduleId;
5472
5473 // No module, or disassociated from metadata
5474 if (pModule == NULL || pModule->IsBeingUnloaded())
5475 {
5476 return CORPROF_E_DATAINCOMPLETE;
5477 }
5478
5479 // Default return value of NULL
5480 MethodDesc *pDesc = NULL;
5481
5482 // Different lookup depending on whether it's a Def or Ref
5483 if (TypeFromToken(typeDef) == mdtMethodDef)
5484 {
5485 pDesc = pModule->LookupMethodDef(typeDef);
5486 }
5487 else if (TypeFromToken(typeDef) == mdtMemberRef)
5488 {
5489 pDesc = pModule->LookupMemberRefAsMethod(typeDef);
5490 }
5491 else
5492 {
5493 return E_INVALIDARG;
5494 }
5495
5496 if (NULL == pDesc)
5497 {
5498 return E_INVALIDARG;
5499 }
5500
5501 //
5502 // Check that this is a non-generic method
5503 //
5504 if (pDesc->HasClassOrMethodInstantiation())
5505 {
5506 return CORPROF_E_FUNCTION_IS_PARAMETERIZED;
5507 }
5508
5509 if (pFunctionId && SUCCEEDED(hr))
5510 {
5511 *pFunctionId = MethodDescToFunctionID(pDesc);
5512 }
5513
5514 return (hr);
5515}
5516
5517HRESULT ProfToEEInterfaceImpl::GetFunctionFromTokenAndTypeArgs(ModuleID moduleID,
5518 mdMethodDef funcDef,
5519 ClassID classId,
5520 ULONG32 cTypeArgs,
5521 ClassID typeArgs[],
5522 FunctionID* pFunctionID)
5523{
5524 CONTRACTL
5525 {
5526 // Yay!
5527 NOTHROW;
5528
5529 // It can trigger type loads
5530 GC_TRIGGERS;
5531
5532 // Yay!
5533 MODE_ANY;
5534
5535 // MethodDesc::FindOrCreateAssociatedMethodDesc enters a Crst
5536 CAN_TAKE_LOCK;
5537
5538 SO_NOT_MAINLINE;
5539 }
5540 CONTRACTL_END;
5541
5542 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
5543 kP2EEAllowableAfterAttach | kP2EETriggers,
5544 (LF_CORPROF,
5545 LL_INFO1000,
5546 "**PROF: GetFunctionFromTokenAndTypeArgs 0x%p, 0x%08x, 0x%p.\n",
5547 moduleID,
5548 funcDef,
5549 classId));
5550
5551 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classId);
5552 Module* pModule = reinterpret_cast< Module* >(moduleID);
5553
5554 if ((pModule == NULL) || typeHandle.IsNull())
5555 {
5556 return E_INVALIDARG;
5557 }
5558
5559 if (!g_profControlBlock.fBaseSystemClassesLoaded)
5560 {
5561 return CORPROF_E_RUNTIME_UNINITIALIZED;
5562 }
5563
5564 if (pModule->IsBeingUnloaded())
5565 {
5566 return CORPROF_E_DATAINCOMPLETE;
5567 }
5568
5569 MethodDesc* pMethodDesc = NULL;
5570
5571 if (mdtMethodDef == TypeFromToken(funcDef))
5572 {
5573 pMethodDesc = pModule->LookupMethodDef(funcDef);
5574 }
5575 else if (mdtMemberRef == TypeFromToken(funcDef))
5576 {
5577 pMethodDesc = pModule->LookupMemberRefAsMethod(funcDef);
5578 }
5579 else
5580 {
5581 return E_INVALIDARG;
5582 }
5583
5584 MethodTable* pMethodTable = typeHandle.GetMethodTable();
5585
5586 if (pMethodTable == NULL || !pMethodTable->IsRestored() ||
5587 pMethodDesc == NULL || !pMethodDesc->IsRestored())
5588 {
5589 return CORPROF_E_DATAINCOMPLETE;
5590 }
5591
5592 // This array needs to be accessible at least until the call to
5593 // MethodDesc::FindOrCreateAssociatedMethodDesc
5594 TypeHandle* genericParameters = new (nothrow) TypeHandle[cTypeArgs];
5595 NewArrayHolder< TypeHandle > holder(genericParameters);
5596
5597 if (NULL == genericParameters)
5598 {
5599 return E_OUTOFMEMORY;
5600 }
5601
5602 for (ULONG32 i = 0; i < cTypeArgs; ++i)
5603 {
5604 genericParameters[i] = TypeHandle(reinterpret_cast< MethodTable* >(typeArgs[i]));
5605 }
5606
5607 MethodDesc* result = NULL;
5608 HRESULT hr = S_OK;
5609
5610 EX_TRY
5611 {
5612 result = MethodDesc::FindOrCreateAssociatedMethodDesc(pMethodDesc,
5613 pMethodTable,
5614 FALSE,
5615 Instantiation(genericParameters, cTypeArgs),
5616 TRUE);
5617 }
5618 EX_CATCH_HRESULT(hr);
5619
5620 if (NULL != result)
5621 {
5622 *pFunctionID = MethodDescToFunctionID(result);
5623 }
5624
5625 return hr;
5626}
5627
5628//*****************************************************************************
5629// Retrieve information about a given application domain, which is like a
5630// sub-process.
5631//*****************************************************************************
5632HRESULT ProfToEEInterfaceImpl::GetAppDomainInfo(AppDomainID appDomainId,
5633 ULONG cchName,
5634 ULONG *pcchName,
5635 __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
5636 ProcessID *pProcessId)
5637{
5638 CONTRACTL
5639 {
5640 // Yay!
5641 NOTHROW;
5642
5643 // AppDomain::GetFriendlyNameForDebugger triggers
5644 GC_TRIGGERS;
5645
5646 // Yay!
5647 MODE_ANY;
5648
5649 // AppDomain::GetFriendlyNameForDebugger takes a lock
5650 CAN_TAKE_LOCK;
5651
5652 SO_NOT_MAINLINE;
5653 }
5654 CONTRACTL_END;
5655
5656 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
5657 kP2EEAllowableAfterAttach | kP2EETriggers,
5658 (LF_CORPROF,
5659 LL_INFO1000,
5660 "**PROF: GetAppDomainInfo 0x%p.\n",
5661 appDomainId));
5662
5663 if (appDomainId == NULL)
5664 {
5665 return E_INVALIDARG;
5666 }
5667
5668 BaseDomain *pDomain; // Internal data structure.
5669 HRESULT hr = S_OK;
5670
5671 // <TODO>@todo:
5672 // Right now, this ID is not a true AppDomain, since we use the old
5673 // AppDomain/SystemDomain model in the profiling API. This means that
5674 // the profiler exposes the SharedDomain and the SystemDomain to the
5675 // outside world. It's not clear whether this is actually the right thing
5676 // to do or not. - seantrow
5677 //
5678 // Postponed to V2.
5679 // </TODO>
5680
5681 pDomain = (BaseDomain *) appDomainId;
5682
5683 // Make sure they've passed in a valid appDomainId
5684 if (pDomain == NULL)
5685 return (E_INVALIDARG);
5686
5687 // Pick sensible defaults.
5688 if (pcchName)
5689 *pcchName = 0;
5690 if (szName)
5691 *szName = 0;
5692 if (pProcessId)
5693 *pProcessId = 0;
5694
5695 LPCWSTR szFriendlyName;
5696 if (pDomain == SystemDomain::System())
5697 szFriendlyName = g_pwBaseLibrary;
5698 else
5699 szFriendlyName = ((AppDomain*)pDomain)->GetFriendlyNameForDebugger();
5700
5701 if (szFriendlyName != NULL)
5702 {
5703 // Get the module file name
5704 ULONG trueLen = (ULONG)(wcslen(szFriendlyName) + 1);
5705
5706 // Return name of module as required.
5707 if (szName && cchName > 0)
5708 {
5709 ULONG copyLen = trueLen;
5710
5711 if (copyLen >= cchName)
5712 {
5713 copyLen = cchName - 1;
5714 }
5715
5716 wcsncpy_s(szName, cchName, szFriendlyName, copyLen);
5717 }
5718
5719 // If they request the actual length of the name
5720 if (pcchName)
5721 *pcchName = trueLen;
5722 }
5723
5724 // If we don't have a friendly name but the call was requesting it, then return incomplete data HR
5725 else
5726 {
5727 if ((szName != NULL && cchName > 0) || pcchName)
5728 hr = CORPROF_E_DATAINCOMPLETE;
5729 }
5730
5731 if (pProcessId)
5732 *pProcessId = (ProcessID) GetCurrentProcessId();
5733
5734 return (hr);
5735}
5736
5737
5738//*****************************************************************************
5739// Retrieve information about an assembly, which is a collection of dll's.
5740//*****************************************************************************
5741HRESULT ProfToEEInterfaceImpl::GetAssemblyInfo(AssemblyID assemblyId,
5742 ULONG cchName,
5743 ULONG *pcchName,
5744 __out_ecount_part_opt(cchName, *pcchName) WCHAR szName[],
5745 AppDomainID *pAppDomainId,
5746 ModuleID *pModuleId)
5747{
5748 CONTRACTL
5749 {
5750 // SString::SString throws
5751 THROWS;
5752
5753 // Yay!
5754 GC_NOTRIGGER;
5755
5756 // Yay!
5757 MODE_ANY;
5758
5759 // Yay!
5760 EE_THREAD_NOT_REQUIRED;
5761
5762 // PEAssembly::GetSimpleName() enters a lock via use of the metadata interface
5763 CAN_TAKE_LOCK;
5764
5765 SO_NOT_MAINLINE;
5766 }
5767 CONTRACTL_END;
5768
5769 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
5770 (LF_CORPROF,
5771 LL_INFO1000,
5772 "**PROF: GetAssemblyInfo 0x%p.\n",
5773 assemblyId));
5774
5775 if (assemblyId == NULL)
5776 {
5777 return E_INVALIDARG;
5778 }
5779
5780 HRESULT hr = S_OK;
5781
5782 Assembly *pAssembly; // Internal data structure for assembly.
5783
5784 pAssembly = (Assembly *) assemblyId;
5785 _ASSERTE(pAssembly != NULL);
5786
5787 if (pcchName || szName)
5788 {
5789 // Get the friendly name of the assembly
5790 SString name(SString::Utf8, pAssembly->GetSimpleName());
5791
5792 const COUNT_T nameLength = name.GetCount() + 1;
5793
5794 if ((NULL != szName) && (cchName > 0))
5795 {
5796 wcsncpy_s(szName, cchName, name.GetUnicode(), min(nameLength, cchName - 1));
5797 }
5798
5799 if (NULL != pcchName)
5800 {
5801 *pcchName = nameLength;
5802 }
5803 }
5804
5805 // Get the parent application domain.
5806 if (pAppDomainId)
5807 {
5808 *pAppDomainId = (AppDomainID) pAssembly->GetDomain();
5809 _ASSERTE(*pAppDomainId != NULL);
5810 }
5811
5812 // Find the module the manifest lives in.
5813 if (pModuleId)
5814 {
5815 *pModuleId = (ModuleID) pAssembly->GetManifestModule();
5816
5817 // This is the case where the profiler has called GetAssemblyInfo
5818 // on an assembly that has been completely created yet.
5819 if (!*pModuleId)
5820 hr = CORPROF_E_DATAINCOMPLETE;
5821 }
5822
5823 return (hr);
5824}
5825
5826// Setting ELT hooks is only allowed from within Initialize(). However, test-only
5827// profilers may need to set those hooks from an attaching profiling. See
5828// code:ProfControlBlock#TestOnlyELT
5829#ifdef PROF_TEST_ONLY_FORCE_ELT
5830#define PROFILER_TO_CLR_ENTRYPOINT_SET_ELT(logParams) \
5831 do \
5832 { \
5833 if (g_profControlBlock.fTestOnlyForceEnterLeave) \
5834 { \
5835 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach, logParams); \
5836 } \
5837 else \
5838 { \
5839 PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY(logParams); \
5840 } \
5841 } while(0)
5842#else // PROF_TEST_ONLY_FORCE_ELT
5843#define PROFILER_TO_CLR_ENTRYPOINT_SET_ELT \
5844 PROFILER_TO_CLR_ENTRYPOINT_CALLABLE_ON_INIT_ONLY
5845#endif // PROF_TEST_ONLY_FORCE_ELT
5846
5847
5848HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks(FunctionEnter * pFuncEnter,
5849 FunctionLeave * pFuncLeave,
5850 FunctionTailcall * pFuncTailcall)
5851{
5852 CONTRACTL
5853 {
5854 // Yay!
5855 NOTHROW;
5856
5857 // Yay!
5858 GC_NOTRIGGER;
5859
5860 // Yay!
5861 MODE_ANY;
5862
5863 // Yay!
5864 EE_THREAD_NOT_REQUIRED;
5865
5866 CANNOT_TAKE_LOCK;
5867
5868 SO_NOT_MAINLINE;
5869 }
5870 CONTRACTL_END;
5871
5872 // The profiler must call SetEnterLeaveFunctionHooks during initialization, since
5873 // the enter/leave events are immutable and must also be set during initialization.
5874 PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
5875 LL_INFO10,
5876 "**PROF: SetEnterLeaveFunctionHooks 0x%p, 0x%p, 0x%p.\n",
5877 pFuncEnter,
5878 pFuncLeave,
5879 pFuncTailcall));
5880
5881 return g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks(pFuncEnter, pFuncLeave, pFuncTailcall);
5882}
5883
5884
5885HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks2(FunctionEnter2 * pFuncEnter,
5886 FunctionLeave2 * pFuncLeave,
5887 FunctionTailcall2 * pFuncTailcall)
5888{
5889 CONTRACTL
5890 {
5891 // Yay!
5892 NOTHROW;
5893
5894 // Yay!
5895 GC_NOTRIGGER;
5896
5897 // Yay!
5898 MODE_ANY;
5899
5900 // Yay!
5901 EE_THREAD_NOT_REQUIRED;
5902
5903 CANNOT_TAKE_LOCK;
5904
5905 SO_NOT_MAINLINE;
5906 }
5907 CONTRACTL_END;
5908
5909 // The profiler must call SetEnterLeaveFunctionHooks2 during initialization, since
5910 // the enter/leave events are immutable and must also be set during initialization.
5911 PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
5912 LL_INFO10,
5913 "**PROF: SetEnterLeaveFunctionHooks2 0x%p, 0x%p, 0x%p.\n",
5914 pFuncEnter,
5915 pFuncLeave,
5916 pFuncTailcall));
5917
5918 return
5919 g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks2(pFuncEnter, pFuncLeave, pFuncTailcall);
5920}
5921
5922
5923HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks3(FunctionEnter3 * pFuncEnter3,
5924 FunctionLeave3 * pFuncLeave3,
5925 FunctionTailcall3 * pFuncTailcall3)
5926{
5927 CONTRACTL
5928 {
5929 // Yay!
5930 NOTHROW;
5931
5932 // Yay!
5933 GC_NOTRIGGER;
5934
5935 // Yay!
5936 MODE_ANY;
5937
5938 // Yay!
5939 EE_THREAD_NOT_REQUIRED;
5940
5941 CANNOT_TAKE_LOCK;
5942
5943 SO_NOT_MAINLINE;
5944 }
5945 CONTRACTL_END;
5946
5947 // The profiler must call SetEnterLeaveFunctionHooks3 during initialization, since
5948 // the enter/leave events are immutable and must also be set during initialization.
5949 PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
5950 LL_INFO10,
5951 "**PROF: SetEnterLeaveFunctionHooks3 0x%p, 0x%p, 0x%p.\n",
5952 pFuncEnter3,
5953 pFuncLeave3,
5954 pFuncTailcall3));
5955
5956 return
5957 g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks3(pFuncEnter3,
5958 pFuncLeave3,
5959 pFuncTailcall3);
5960}
5961
5962
5963
5964HRESULT ProfToEEInterfaceImpl::SetEnterLeaveFunctionHooks3WithInfo(FunctionEnter3WithInfo * pFuncEnter3WithInfo,
5965 FunctionLeave3WithInfo * pFuncLeave3WithInfo,
5966 FunctionTailcall3WithInfo * pFuncTailcall3WithInfo)
5967{
5968 CONTRACTL
5969 {
5970 // Yay!
5971 NOTHROW;
5972
5973 // Yay!
5974 GC_NOTRIGGER;
5975
5976 // Yay!
5977 MODE_ANY;
5978
5979 // Yay!
5980 EE_THREAD_NOT_REQUIRED;
5981
5982 CANNOT_TAKE_LOCK;
5983
5984 SO_NOT_MAINLINE;
5985 }
5986 CONTRACTL_END;
5987
5988 // The profiler must call SetEnterLeaveFunctionHooks3WithInfo during initialization, since
5989 // the enter/leave events are immutable and must also be set during initialization.
5990 PROFILER_TO_CLR_ENTRYPOINT_SET_ELT((LF_CORPROF,
5991 LL_INFO10,
5992 "**PROF: SetEnterLeaveFunctionHooks3WithInfo 0x%p, 0x%p, 0x%p.\n",
5993 pFuncEnter3WithInfo,
5994 pFuncLeave3WithInfo,
5995 pFuncTailcall3WithInfo));
5996
5997 return
5998 g_profControlBlock.pProfInterface->SetEnterLeaveFunctionHooks3WithInfo(pFuncEnter3WithInfo,
5999 pFuncLeave3WithInfo,
6000 pFuncTailcall3WithInfo);
6001}
6002
6003
6004HRESULT ProfToEEInterfaceImpl::SetFunctionIDMapper(FunctionIDMapper *pFunc)
6005{
6006 CONTRACTL
6007 {
6008 // Yay!
6009 NOTHROW;
6010
6011 // Yay!
6012 GC_NOTRIGGER;
6013
6014 // Yay!
6015 MODE_ANY;
6016
6017 // Yay!
6018 EE_THREAD_NOT_REQUIRED;
6019
6020 // Yay!
6021 CANNOT_TAKE_LOCK;
6022
6023 SO_NOT_MAINLINE;
6024 }
6025 CONTRACTL_END;
6026
6027 PROFILER_TO_CLR_ENTRYPOINT_ASYNC((LF_CORPROF,
6028 LL_INFO10,
6029 "**PROF: SetFunctionIDMapper 0x%p.\n",
6030 pFunc));
6031
6032 g_profControlBlock.pProfInterface->SetFunctionIDMapper(pFunc);
6033
6034 return (S_OK);
6035}
6036
6037HRESULT ProfToEEInterfaceImpl::SetFunctionIDMapper2(FunctionIDMapper2 *pFunc, void * clientData)
6038{
6039 CONTRACTL
6040 {
6041 // Yay!
6042 NOTHROW;
6043
6044 // Yay!
6045 GC_NOTRIGGER;
6046
6047 // Yay!
6048 MODE_ANY;
6049
6050 // Yay!
6051 EE_THREAD_NOT_REQUIRED;
6052
6053 // Yay!
6054 CANNOT_TAKE_LOCK;
6055
6056 SO_NOT_MAINLINE;
6057 }
6058 CONTRACTL_END;
6059
6060 PROFILER_TO_CLR_ENTRYPOINT_ASYNC((LF_CORPROF,
6061 LL_INFO10,
6062 "**PROF: SetFunctionIDMapper2. pFunc: 0x%p. clientData: 0x%p.\n",
6063 pFunc,
6064 clientData));
6065
6066 g_profControlBlock.pProfInterface->SetFunctionIDMapper2(pFunc, clientData);
6067
6068 return (S_OK);
6069}
6070
6071/*
6072 * GetFunctionInfo2
6073 *
6074 * This function takes the frameInfo returned from a profiler callback and splays it
6075 * out into as much information as possible.
6076 *
6077 * Parameters:
6078 * funcId - The function that is being requested.
6079 * frameInfo - Frame specific information from a callback (for resolving generics).
6080 * pClassId - An optional parameter for returning the class id of the function.
6081 * pModuleId - An optional parameter for returning the module of the function.
6082 * pToken - An optional parameter for returning the metadata token of the function.
6083 * cTypeArgs - The count of the size of the array typeArgs
6084 * pcTypeArgs - Returns the number of elements of typeArgs filled in, or if typeArgs is NULL
6085 * the number that would be needed.
6086 * typeArgs - An array to store generic type parameters for the function.
6087 *
6088 * Returns:
6089 * S_OK if successful.
6090 */
6091HRESULT ProfToEEInterfaceImpl::GetFunctionInfo2(FunctionID funcId,
6092 COR_PRF_FRAME_INFO frameInfo,
6093 ClassID *pClassId,
6094 ModuleID *pModuleId,
6095 mdToken *pToken,
6096 ULONG32 cTypeArgs,
6097 ULONG32 *pcTypeArgs,
6098 ClassID typeArgs[])
6099{
6100 CONTRACTL
6101 {
6102 // Yay!
6103 NOTHROW;
6104
6105 // Yay!
6106 GC_NOTRIGGER;
6107
6108 // Yay!
6109 MODE_ANY;
6110
6111 // Yay!
6112 EE_THREAD_NOT_REQUIRED;
6113
6114 // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation eventually
6115 // reads metadata which causes us to take a reader lock. However, see
6116 // code:#DisableLockOnAsyncCalls
6117 DISABLED(CAN_TAKE_LOCK);
6118
6119 // Asynchronous functions can be called at arbitrary times when runtime
6120 // is holding locks that cannot be reentered without causing deadlock.
6121 // This contract detects any attempts to reenter locks held at the time
6122 // this function was called.
6123 CANNOT_RETAKE_LOCK;
6124
6125 SO_NOT_MAINLINE;
6126
6127 PRECONDITION(CheckPointer(pClassId, NULL_OK));
6128 PRECONDITION(CheckPointer(pModuleId, NULL_OK));
6129 PRECONDITION(CheckPointer(pToken, NULL_OK));
6130 PRECONDITION(CheckPointer(pcTypeArgs, NULL_OK));
6131 PRECONDITION(CheckPointer(typeArgs, NULL_OK));
6132 }
6133 CONTRACTL_END;
6134
6135 // See code:#DisableLockOnAsyncCalls
6136 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
6137
6138 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
6139 (LF_CORPROF,
6140 LL_INFO1000,
6141 "**PROF: GetFunctionInfo2 0x%p.\n",
6142 funcId));
6143
6144 //
6145 // Verify parameters.
6146 //
6147 COR_PRF_FRAME_INFO_INTERNAL *pFrameInfo = (COR_PRF_FRAME_INFO_INTERNAL *)frameInfo;
6148
6149 if ((funcId == NULL) ||
6150 ((pFrameInfo != NULL) && (pFrameInfo->funcID != funcId)))
6151 {
6152 return E_INVALIDARG;
6153 }
6154
6155 MethodDesc *pMethDesc = FunctionIdToMethodDesc(funcId);
6156
6157 if (pMethDesc == NULL)
6158 {
6159 return E_INVALIDARG;
6160 }
6161
6162 if ((cTypeArgs != 0) && (typeArgs == NULL))
6163 {
6164 return E_INVALIDARG;
6165 }
6166
6167 // it's not safe to examine a methoddesc that has not been restored so do not do so
6168 if (!pMethDesc ->IsRestored())
6169 return CORPROF_E_DATAINCOMPLETE;
6170
6171 //
6172 // Find the exact instantiation of this function.
6173 //
6174 TypeHandle specificClass;
6175 MethodDesc* pActualMethod;
6176
6177 ClassID classId = NULL;
6178
6179 if (pMethDesc->IsSharedByGenericInstantiations())
6180 {
6181 BOOL exactMatch;
6182 OBJECTREF pThis = NULL;
6183
6184 if (pFrameInfo != NULL)
6185 {
6186 // If FunctionID represents a generic methoddesc on a struct, then pFrameInfo->thisArg
6187 // isn't an Object*. It's a pointer directly into the struct's members (i.e., it's not pointing at the
6188 // method table). That means pFrameInfo->thisArg cannot be casted to an OBJECTREF for
6189 // use by Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation. However,
6190 // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation won't even need a this pointer
6191 // for the methoddesc it's processing if the methoddesc is on a value type. So we
6192 // can safely pass NULL for the methoddesc's this in such a case.
6193 if (pMethDesc->GetMethodTable()->IsValueType())
6194 {
6195 _ASSERTE(!pMethDesc->AcquiresInstMethodTableFromThis());
6196 _ASSERTE(pThis == NULL);
6197 }
6198 else
6199 {
6200 pThis = ObjectToOBJECTREF((PTR_Object)(pFrameInfo->thisArg));
6201 }
6202 }
6203
6204 exactMatch = Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation(
6205 pMethDesc,
6206 pThis,
6207 PTR_VOID((pFrameInfo != NULL) ? pFrameInfo->extraArg : NULL),
6208 &specificClass,
6209 &pActualMethod);
6210
6211 if (exactMatch)
6212 {
6213 classId = TypeHandleToClassID(specificClass);
6214 }
6215 else if (!specificClass.HasInstantiation() || !specificClass.IsSharedByGenericInstantiations())
6216 {
6217 //
6218 // In this case we could not get the type args for the method, but if the class
6219 // is not a generic class or is instantiated with value types, this value is correct.
6220 //
6221 classId = TypeHandleToClassID(specificClass);
6222 }
6223 else
6224 {
6225 //
6226 // We could not get any class information.
6227 //
6228 classId = NULL;
6229 }
6230 }
6231 else
6232 {
6233 TypeHandle typeHandle(pMethDesc->GetMethodTable());
6234 classId = TypeHandleToClassID(typeHandle);
6235 pActualMethod = pMethDesc;
6236 }
6237
6238
6239 //
6240 // Fill in the ClassId, if desired
6241 //
6242 if (pClassId != NULL)
6243 {
6244 *pClassId = classId;
6245 }
6246
6247 //
6248 // Fill in the ModuleId, if desired.
6249 //
6250 if (pModuleId != NULL)
6251 {
6252 *pModuleId = (ModuleID)pMethDesc->GetModule();
6253 }
6254
6255 //
6256 // Fill in the token, if desired.
6257 //
6258 if (pToken != NULL)
6259 {
6260 *pToken = (mdToken)pMethDesc->GetMemberDef();
6261 }
6262
6263 if ((cTypeArgs == 0) && (pcTypeArgs != NULL))
6264 {
6265 //
6266 // They are searching for the size of the array needed, we can return that now and
6267 // short-circuit all the work below.
6268 //
6269 if (pcTypeArgs != NULL)
6270 {
6271 *pcTypeArgs = pActualMethod->GetNumGenericMethodArgs();
6272 }
6273 return S_OK;
6274 }
6275
6276 //
6277 // If no place to store resulting count, quit now.
6278 //
6279 if (pcTypeArgs == NULL)
6280 {
6281 return S_OK;
6282 }
6283
6284 //
6285 // Fill in the type args
6286 //
6287 DWORD cArgsToFill = pActualMethod->GetNumGenericMethodArgs();
6288
6289 if (cArgsToFill > cTypeArgs)
6290 {
6291 cArgsToFill = cTypeArgs;
6292 }
6293
6294 *pcTypeArgs = cArgsToFill;
6295
6296 if (cArgsToFill == 0)
6297 {
6298 return S_OK;
6299 }
6300
6301 Instantiation inst = pActualMethod->GetMethodInstantiation();
6302
6303 for (DWORD i = 0; i < cArgsToFill; i++)
6304 {
6305 typeArgs[i] = TypeHandleToClassID(inst[i]);
6306 }
6307
6308 return S_OK;
6309}
6310
6311/*
6312* IsFunctionDynamic
6313*
6314* This function takes a functionId that maybe of a metadata-less method like an IL Stub
6315* or LCG method and returns true in the pHasNoMetadata if it is indeed a metadata-less
6316* method.
6317*
6318* Parameters:
6319* functionId - The function that is being requested.
6320* isDynamic - An optional parameter for returning if the function has metadata or not.
6321*
6322* Returns:
6323* S_OK if successful.
6324*/
6325HRESULT ProfToEEInterfaceImpl::IsFunctionDynamic(FunctionID functionId, BOOL *isDynamic)
6326{
6327 CONTRACTL
6328 {
6329 NOTHROW;
6330 GC_NOTRIGGER;
6331 MODE_ANY;
6332 EE_THREAD_NOT_REQUIRED;
6333
6334 // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation eventually
6335 // reads metadata which causes us to take a reader lock. However, see
6336 // code:#DisableLockOnAsyncCalls
6337 DISABLED(CAN_TAKE_LOCK);
6338
6339 // Asynchronous functions can be called at arbitrary times when runtime
6340 // is holding locks that cannot be reentered without causing deadlock.
6341 // This contract detects any attempts to reenter locks held at the time
6342 // this function was called.
6343 CANNOT_RETAKE_LOCK;
6344
6345 SO_NOT_MAINLINE;
6346
6347 PRECONDITION(CheckPointer(isDynamic, NULL_OK));
6348 }
6349 CONTRACTL_END;
6350
6351 // See code:#DisableLockOnAsyncCalls
6352 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
6353
6354 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
6355 (LF_CORPROF,
6356 LL_INFO1000,
6357 "**PROF: IsFunctionDynamic 0x%p.\n",
6358 functionId));
6359
6360 //
6361 // Verify parameters.
6362 //
6363
6364 if (functionId == NULL)
6365 {
6366 return E_INVALIDARG;
6367 }
6368
6369 MethodDesc *pMethDesc = FunctionIdToMethodDesc(functionId);
6370
6371 if (pMethDesc == NULL)
6372 {
6373 return E_INVALIDARG;
6374 }
6375
6376 // it's not safe to examine a methoddesc that has not been restored so do not do so
6377 if (!pMethDesc->IsRestored())
6378 return CORPROF_E_DATAINCOMPLETE;
6379
6380 //
6381 // Fill in the pHasNoMetadata, if desired.
6382 //
6383 if (isDynamic != NULL)
6384 {
6385 *isDynamic = pMethDesc->IsNoMetadata();
6386 }
6387
6388 return S_OK;
6389}
6390
6391/*
6392* GetFunctionFromIP3
6393*
6394* This function takes an IP and determines if it is a managed function returning its
6395* FunctionID. This method is different from GetFunctionFromIP in that will return
6396* FunctionIDs even if they have no associated metadata.
6397*
6398* Parameters:
6399* ip - The instruction pointer.
6400* pFunctionId - An optional parameter for returning the FunctionID.
6401* pReJitId - The ReJIT id.
6402*
6403* Returns:
6404* S_OK if successful.
6405*/
6406HRESULT ProfToEEInterfaceImpl::GetFunctionFromIP3(LPCBYTE ip, FunctionID * pFunctionId, ReJITID * pReJitId)
6407{
6408 CONTRACTL
6409 {
6410 NOTHROW;
6411
6412 // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
6413 // which can switch us to preemptive mode and trigger GCs
6414 GC_TRIGGERS;
6415 MODE_ANY;
6416 EE_THREAD_NOT_REQUIRED;
6417
6418 // Grabbing the rejitid requires entering the rejit manager's hash table & lock,
6419 CAN_TAKE_LOCK;
6420
6421 SO_NOT_MAINLINE;
6422 }
6423 CONTRACTL_END;
6424
6425 // See code:#DisableLockOnAsyncCalls
6426 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
6427
6428 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
6429 kP2EEAllowableAfterAttach | kP2EETriggers,
6430 (LF_CORPROF,
6431 LL_INFO1000,
6432 "**PROF: GetFunctionFromIP3 0x%p.\n",
6433 ip));
6434
6435 HRESULT hr = S_OK;
6436
6437 EECodeInfo codeInfo;
6438
6439 hr = GetFunctionFromIPInternal(ip, &codeInfo, /* failOnNoMetadata */ FALSE);
6440 if (FAILED(hr))
6441 {
6442 return hr;
6443 }
6444
6445 if (pFunctionId)
6446 {
6447 *pFunctionId = MethodDescToFunctionID(codeInfo.GetMethodDesc());
6448 }
6449
6450 if (pReJitId != NULL)
6451 {
6452 MethodDesc * pMD = codeInfo.GetMethodDesc();
6453 *pReJitId = ReJitManager::GetReJitId(pMD, codeInfo.GetStartAddress());
6454 }
6455
6456 return S_OK;
6457}
6458
6459/*
6460* GetDynamicFunctionInfo
6461*
6462* This function takes a functionId that maybe of a metadata-less method like an IL Stub
6463* or LCG method and gives information about it without failing like GetFunctionInfo.
6464*
6465* Parameters:
6466* functionId - The function that is being requested.
6467* pModuleId - An optional parameter for returning the module of the function.
6468* ppvSig - An optional parameter for returning the signature of the function.
6469* pbSig - An optional parameter for returning the size of the signature of the function.
6470* cchName - A parameter for indicating the size of buffer for the wszName parameter.
6471* pcchName - An optional parameter for returning the true size of the wszName parameter.
6472* wszName - A parameter to the caller allocated buffer of size cchName
6473*
6474* Returns:
6475* S_OK if successful.
6476*/
6477HRESULT ProfToEEInterfaceImpl::GetDynamicFunctionInfo(FunctionID functionId,
6478 ModuleID *pModuleId,
6479 PCCOR_SIGNATURE* ppvSig,
6480 ULONG* pbSig,
6481 ULONG cchName,
6482 ULONG *pcchName,
6483 __out_ecount_part_opt(cchName, *pcchName) WCHAR wszName[])
6484{
6485 CONTRACTL
6486 {
6487 NOTHROW;
6488 GC_NOTRIGGER;
6489 MODE_ANY;
6490 EE_THREAD_NOT_REQUIRED;
6491
6492 // Generics::GetExactInstantiationsOfMethodAndItsClassFromCallInformation eventually
6493 // reads metadata which causes us to take a reader lock. However, see
6494 // code:#DisableLockOnAsyncCalls
6495 DISABLED(CAN_TAKE_LOCK);
6496
6497 // Asynchronous functions can be called at arbitrary times when runtime
6498 // is holding locks that cannot be reentered without causing deadlock.
6499 // This contract detects any attempts to reenter locks held at the time
6500 // this function was called.
6501 CANNOT_RETAKE_LOCK;
6502
6503 SO_NOT_MAINLINE;
6504
6505 PRECONDITION(CheckPointer(pModuleId, NULL_OK));
6506 PRECONDITION(CheckPointer(ppvSig, NULL_OK));
6507 PRECONDITION(CheckPointer(pbSig, NULL_OK));
6508 PRECONDITION(CheckPointer(pcchName, NULL_OK));
6509 }
6510 CONTRACTL_END;
6511
6512 // See code:#DisableLockOnAsyncCalls
6513 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
6514
6515 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
6516 (LF_CORPROF,
6517 LL_INFO1000,
6518 "**PROF: GetDynamicFunctionInfo 0x%p.\n",
6519 functionId));
6520
6521 //
6522 // Verify parameters.
6523 //
6524
6525 if (functionId == NULL)
6526 {
6527 return E_INVALIDARG;
6528 }
6529
6530 MethodDesc *pMethDesc = FunctionIdToMethodDesc(functionId);
6531
6532 if (pMethDesc == NULL)
6533 {
6534 return E_INVALIDARG;
6535 }
6536
6537 // it's not safe to examine a methoddesc that has not been restored so do not do so
6538 if (!pMethDesc->IsRestored())
6539 return CORPROF_E_DATAINCOMPLETE;
6540
6541
6542 if (!pMethDesc->IsNoMetadata())
6543 return E_INVALIDARG;
6544
6545 //
6546 // Fill in the ModuleId, if desired.
6547 //
6548 if (pModuleId != NULL)
6549 {
6550 *pModuleId = (ModuleID)pMethDesc->GetModule();
6551 }
6552
6553 //
6554 // Fill in the ppvSig and pbSig, if desired
6555 //
6556 if (ppvSig != NULL && pbSig != NULL)
6557 {
6558 pMethDesc->GetSig(ppvSig, pbSig);
6559 }
6560
6561 HRESULT hr = S_OK;
6562
6563 EX_TRY
6564 {
6565 if (wszName != NULL)
6566 *wszName = 0;
6567 if (pcchName != NULL)
6568 *pcchName = 0;
6569
6570 StackSString ss;
6571 ss.SetUTF8(pMethDesc->GetName());
6572 ss.Normalize();
6573 LPCWSTR methodName = ss.GetUnicode();
6574
6575 ULONG trueLen = (ULONG)(wcslen(methodName) + 1);
6576
6577 // Return name of method as required.
6578 if (wszName && cchName > 0)
6579 {
6580 if (cchName < trueLen)
6581 {
6582 hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
6583 }
6584 else
6585 {
6586 wcsncpy_s(wszName, cchName, methodName, trueLen);
6587 }
6588 }
6589
6590 // If they request the actual length of the name
6591 if (pcchName)
6592 *pcchName = trueLen;
6593 }
6594 EX_CATCH_HRESULT(hr);
6595
6596 return (hr);
6597}
6598
6599/*
6600 * GetNativeCodeStartAddresses
6601 *
6602 * Gets all of the native code addresses associated with a particular function. iered compilation
6603 * potentially creates different native code versions for a method, and this function allows profilers
6604 * to view all native versions of a method.
6605 *
6606 * Parameters:
6607 * functionID - The function that is being requested.
6608 * reJitId - The ReJIT id.
6609 * cCodeStartAddresses - A parameter for indicating the size of buffer for the codeStartAddresses parameter.
6610 * pcCodeStartAddresses - An optional parameter for returning the true size of the codeStartAddresses parameter.
6611 * codeStartAddresses - The array to be filled up with native code addresses.
6612 *
6613 * Returns:
6614 * S_OK if successful
6615 *
6616 */
6617HRESULT ProfToEEInterfaceImpl::GetNativeCodeStartAddresses(FunctionID functionID,
6618 ReJITID reJitId,
6619 ULONG32 cCodeStartAddresses,
6620 ULONG32 *pcCodeStartAddresses,
6621 UINT_PTR codeStartAddresses[])
6622{
6623 CONTRACTL
6624 {
6625 NOTHROW;
6626 GC_NOTRIGGER;
6627 MODE_ANY;
6628 EE_THREAD_NOT_REQUIRED;
6629 CAN_TAKE_LOCK;
6630
6631 SO_NOT_MAINLINE;
6632
6633 PRECONDITION(CheckPointer(pcCodeStartAddresses, NULL_OK));
6634 PRECONDITION(CheckPointer(codeStartAddresses, NULL_OK));
6635 }
6636 CONTRACTL_END;
6637
6638 if (functionID == NULL)
6639 {
6640 return E_INVALIDARG;
6641 }
6642
6643 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
6644 (LF_CORPROF,
6645 LL_INFO1000,
6646 "**PROF: GetNativeCodeStartAddresses 0x%p 0x%p.\n",
6647 functionID, reJitId));
6648
6649 HRESULT hr = S_OK;
6650
6651 EX_TRY
6652 {
6653 if (pcCodeStartAddresses != NULL)
6654 {
6655 *pcCodeStartAddresses = 0;
6656 }
6657
6658 MethodDesc * methodDesc = FunctionIdToMethodDesc(functionID);
6659 PTR_MethodDesc pMD = PTR_MethodDesc(methodDesc);
6660 ULONG32 trueLen = 0;
6661 StackSArray<UINT_PTR> addresses;
6662
6663 CodeVersionManager *pCodeVersionManager = pMD->GetCodeVersionManager();
6664
6665 ILCodeVersion ilCodeVersion = NULL;
6666 {
6667 CodeVersionManager::TableLockHolder lockHolder(pCodeVersionManager);
6668
6669 ilCodeVersion = pCodeVersionManager->GetILCodeVersion(pMD, reJitId);
6670
6671 NativeCodeVersionCollection nativeCodeVersions = ilCodeVersion.GetNativeCodeVersions(pMD);
6672 for (NativeCodeVersionIterator iter = nativeCodeVersions.Begin(); iter != nativeCodeVersions.End(); iter++)
6673 {
6674 addresses.Append((*iter).GetNativeCode());
6675
6676 ++trueLen;
6677 }
6678 }
6679
6680 if (pcCodeStartAddresses != NULL)
6681 {
6682 *pcCodeStartAddresses = trueLen;
6683 }
6684
6685 if (codeStartAddresses != NULL)
6686 {
6687 if (cCodeStartAddresses < trueLen)
6688 {
6689 hr = HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
6690 }
6691 else
6692 {
6693 for(ULONG32 i = 0; i < trueLen; ++i)
6694 {
6695 codeStartAddresses[i] = addresses[i];
6696 }
6697 }
6698 }
6699 }
6700 EX_CATCH_HRESULT(hr);
6701
6702 return hr;
6703}
6704
6705/*
6706 * GetILToNativeMapping3
6707 *
6708 * This overload behaves the same as GetILToNativeMapping2, except it allows the profiler
6709 * to address specific native code versions instead of defaulting to the first one.
6710 *
6711 * Parameters:
6712 * pNativeCodeStartAddress - start address of the native code version, returned by GetNativeCodeStartAddresses
6713 * cMap - size of the map array
6714 * pcMap - how many items are returned in the map array
6715 * map - an array to store the il to native mappings in
6716 *
6717 * Returns:
6718 * S_OK if successful
6719 *
6720 */
6721HRESULT ProfToEEInterfaceImpl::GetILToNativeMapping3(UINT_PTR pNativeCodeStartAddress,
6722 ULONG32 cMap,
6723 ULONG32 *pcMap,
6724 COR_DEBUG_IL_TO_NATIVE_MAP map[])
6725{
6726 CONTRACTL
6727 {
6728 THROWS;
6729 DISABLED(GC_NOTRIGGER);
6730 MODE_ANY;
6731 CAN_TAKE_LOCK;
6732
6733 SO_NOT_MAINLINE;
6734
6735 PRECONDITION(CheckPointer(pcMap, NULL_OK));
6736 PRECONDITION(CheckPointer(map, NULL_OK));
6737 }
6738 CONTRACTL_END;
6739
6740 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
6741 (LF_CORPROF,
6742 LL_INFO1000,
6743 "**PROF: GetILToNativeMapping3 0x%p.\n",
6744 pNativeCodeStartAddress));
6745
6746 if (pNativeCodeStartAddress == NULL)
6747 {
6748 return E_INVALIDARG;
6749 }
6750
6751 if ((cMap > 0) &&
6752 ((pcMap == NULL) || (map == NULL)))
6753 {
6754 return E_INVALIDARG;
6755 }
6756
6757#ifdef DEBUGGING_SUPPORTED
6758 if (g_pDebugInterface == NULL)
6759 {
6760 return CORPROF_E_DEBUGGING_DISABLED;
6761 }
6762
6763 return (g_pDebugInterface->GetILToNativeMapping(pNativeCodeStartAddress, cMap, pcMap, map));
6764#else
6765 return E_NOTIMPL;
6766#endif
6767}
6768
6769/*
6770 * GetCodeInfo4
6771 *
6772 * Gets the location and size of a jitted function. Tiered compilation potentially creates different native code
6773 * versions for a method, and this overload allows profilers to specify which native version it would like the
6774 * code info for.
6775 *
6776 * Parameters:
6777 * pNativeCodeStartAddress - start address of the native code version, returned by GetNativeCodeStartAddresses
6778 * cCodeInfos - size of the codeInfos array
6779 * pcCodeInfos - how many items are returned in the codeInfos array
6780 * codeInfos - an array to store the code infos in
6781 *
6782 * Returns:
6783 * S_OK if successful
6784 *
6785 */
6786HRESULT ProfToEEInterfaceImpl::GetCodeInfo4(UINT_PTR pNativeCodeStartAddress,
6787 ULONG32 cCodeInfos,
6788 ULONG32* pcCodeInfos,
6789 COR_PRF_CODE_INFO codeInfos[])
6790{
6791 CONTRACTL
6792 {
6793 NOTHROW;
6794 GC_TRIGGERS;
6795 MODE_ANY;
6796 EE_THREAD_NOT_REQUIRED;
6797 CAN_TAKE_LOCK;
6798
6799 SO_NOT_MAINLINE;
6800
6801 PRECONDITION(CheckPointer(pcCodeInfos, NULL_OK));
6802 PRECONDITION(CheckPointer(codeInfos, NULL_OK));
6803 }
6804 CONTRACTL_END;
6805
6806 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
6807 kP2EEAllowableAfterAttach | kP2EETriggers,
6808 (LF_CORPROF,
6809 LL_INFO1000,
6810 "**PROF: GetCodeInfo4 0x%p.\n",
6811 pNativeCodeStartAddress));
6812
6813 if ((cCodeInfos != 0) && (codeInfos == NULL))
6814 {
6815 return E_INVALIDARG;
6816 }
6817
6818 return GetCodeInfoFromCodeStart(pNativeCodeStartAddress,
6819 cCodeInfos,
6820 pcCodeInfos,
6821 codeInfos);
6822}
6823
6824/*
6825 * GetStringLayout
6826 *
6827 * This function describes to a profiler the internal layout of a string.
6828 *
6829 * Parameters:
6830 * pBufferLengthOffset - Offset within an OBJECTREF of a string of the ArrayLength field.
6831 * pStringLengthOffset - Offset within an OBJECTREF of a string of the StringLength field.
6832 * pBufferOffset - Offset within an OBJECTREF of a string of the Buffer field.
6833 *
6834 * Returns:
6835 * S_OK if successful.
6836 */
6837HRESULT ProfToEEInterfaceImpl::GetStringLayout(ULONG *pBufferLengthOffset,
6838 ULONG *pStringLengthOffset,
6839 ULONG *pBufferOffset)
6840{
6841 CONTRACTL
6842 {
6843 // Yay!
6844 NOTHROW;
6845
6846 // Yay!
6847 GC_NOTRIGGER;
6848
6849 // Yay!
6850 MODE_ANY;
6851
6852 // Yay!
6853 EE_THREAD_NOT_REQUIRED;
6854
6855 // Yay!
6856 CANNOT_TAKE_LOCK;
6857
6858 SO_NOT_MAINLINE;
6859
6860 PRECONDITION(CheckPointer(pBufferLengthOffset, NULL_OK));
6861 PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
6862 PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
6863 }
6864 CONTRACTL_END;
6865
6866 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
6867 (LF_CORPROF,
6868 LL_INFO1000,
6869 "**PROF: GetStringLayout.\n"));
6870
6871 return this->GetStringLayoutHelper(pBufferLengthOffset, pStringLengthOffset, pBufferOffset);
6872}
6873
6874/*
6875 * GetStringLayout2
6876 *
6877 * This function describes to a profiler the internal layout of a string.
6878 *
6879 * Parameters:
6880 * pStringLengthOffset - Offset within an OBJECTREF of a string of the StringLength field.
6881 * pBufferOffset - Offset within an OBJECTREF of a string of the Buffer field.
6882 *
6883 * Returns:
6884 * S_OK if successful.
6885 */
6886HRESULT ProfToEEInterfaceImpl::GetStringLayout2(ULONG *pStringLengthOffset,
6887 ULONG *pBufferOffset)
6888{
6889 CONTRACTL
6890 {
6891 // Yay!
6892 NOTHROW;
6893
6894 // Yay!
6895 GC_NOTRIGGER;
6896
6897 // Yay!
6898 MODE_ANY;
6899
6900 // Yay!
6901 EE_THREAD_NOT_REQUIRED;
6902
6903 // Yay!
6904 CANNOT_TAKE_LOCK;
6905
6906 SO_NOT_MAINLINE;
6907
6908 PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
6909 PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
6910 }
6911 CONTRACTL_END;
6912
6913 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
6914 (LF_CORPROF,
6915 LL_INFO1000,
6916 "**PROF: GetStringLayout2.\n"));
6917
6918 ULONG dummyBufferLengthOffset;
6919 return this->GetStringLayoutHelper(&dummyBufferLengthOffset, pStringLengthOffset, pBufferOffset);
6920}
6921
6922/*
6923 * GetStringLayoutHelper
6924 *
6925 * This function describes to a profiler the internal layout of a string.
6926 *
6927 * Parameters:
6928 * pBufferLengthOffset - Offset within an OBJECTREF of a string of the ArrayLength field.
6929 * pStringLengthOffset - Offset within an OBJECTREF of a string of the StringLength field.
6930 * pBufferOffset - Offset within an OBJECTREF of a string of the Buffer field.
6931 *
6932 * Returns:
6933 * S_OK if successful.
6934 */
6935HRESULT ProfToEEInterfaceImpl::GetStringLayoutHelper(ULONG *pBufferLengthOffset,
6936 ULONG *pStringLengthOffset,
6937 ULONG *pBufferOffset)
6938{
6939 CONTRACTL
6940 {
6941 // Yay!
6942 NOTHROW;
6943
6944 // Yay!
6945 GC_NOTRIGGER;
6946
6947 // Yay!
6948 MODE_ANY;
6949
6950 // Yay!
6951 EE_THREAD_NOT_REQUIRED;
6952
6953 // Yay!
6954 CANNOT_TAKE_LOCK;
6955
6956 SO_NOT_MAINLINE;
6957
6958 PRECONDITION(CheckPointer(pBufferLengthOffset, NULL_OK));
6959 PRECONDITION(CheckPointer(pStringLengthOffset, NULL_OK));
6960 PRECONDITION(CheckPointer(pBufferOffset, NULL_OK));
6961 }
6962 CONTRACTL_END;
6963
6964 // The String class no longer has a bufferLength field in it.
6965 // We are returning the offset of the stringLength because that is the closest we can get
6966 // This is most certainly a breaking change and a new method
6967 // ICorProfilerInfo3::GetStringLayout2 has been added on the interface ICorProfilerInfo3
6968 if (pBufferLengthOffset != NULL)
6969 {
6970 *pBufferLengthOffset = StringObject::GetStringLengthOffset();
6971 }
6972
6973 if (pStringLengthOffset != NULL)
6974 {
6975 *pStringLengthOffset = StringObject::GetStringLengthOffset();
6976 }
6977
6978 if (pBufferOffset != NULL)
6979 {
6980 *pBufferOffset = StringObject::GetBufferOffset();
6981 }
6982
6983 return S_OK;
6984}
6985
6986/*
6987 * GetClassLayout
6988 *
6989 * This function describes to a profiler the internal layout of a class.
6990 *
6991 * Parameters:
6992 * classID - The class that is being queried. It is really a TypeHandle.
6993 * rFieldOffset - An array to store information about each field in the class.
6994 * cFieldOffset - Count of the number of elements in rFieldOffset.
6995 * pcFieldOffset - Upon return contains the number of elements filled in, or if
6996 * cFieldOffset is zero, the number of elements needed.
6997 * pulClassSize - Optional parameter for containing the size in bytes of the underlying
6998 * internal class structure.
6999 *
7000 * Returns:
7001 * S_OK if successful.
7002 */
7003HRESULT ProfToEEInterfaceImpl::GetClassLayout(ClassID classID,
7004 COR_FIELD_OFFSET rFieldOffset[],
7005 ULONG cFieldOffset,
7006 ULONG *pcFieldOffset,
7007 ULONG *pulClassSize)
7008{
7009 CONTRACTL
7010 {
7011 // Yay!
7012 NOTHROW;
7013
7014 // Yay!
7015 GC_NOTRIGGER;
7016
7017 // Yay!
7018 MODE_ANY;
7019
7020 // Yay!
7021 EE_THREAD_NOT_REQUIRED;
7022
7023 // Yay!
7024 CANNOT_TAKE_LOCK;
7025
7026 SO_NOT_MAINLINE;
7027
7028 PRECONDITION(CheckPointer(rFieldOffset, NULL_OK));
7029 PRECONDITION(CheckPointer(pcFieldOffset));
7030 PRECONDITION(CheckPointer(pulClassSize, NULL_OK));
7031 }
7032 CONTRACTL_END;
7033
7034 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
7035 (LF_CORPROF,
7036 LL_INFO1000,
7037 "**PROF: GetClassLayout 0x%p.\n",
7038 classID));
7039
7040 //
7041 // Verify parameters
7042 //
7043 if ((pcFieldOffset == NULL) || (classID == NULL))
7044 {
7045 return E_INVALIDARG;
7046 }
7047
7048 if ((cFieldOffset != 0) && (rFieldOffset == NULL))
7049 {
7050 return E_INVALIDARG;
7051 }
7052
7053 TypeHandle typeHandle = TypeHandle::FromPtr((void *)classID);
7054
7055 //
7056 // This is the incorrect API for arrays or strings. Use GetArrayObjectInfo, and GetStringLayout
7057 //
7058 if (typeHandle.IsTypeDesc() || typeHandle.AsMethodTable()->IsArray())
7059 {
7060 return E_INVALIDARG;
7061 }
7062
7063 //
7064 // We used to have a bug where this API incorrectly succeeded for strings during startup. Profilers
7065 // took dependency on this bug. Let the API to succeed for strings during startup for backward compatibility.
7066 //
7067 if (typeHandle.AsMethodTable()->IsString() && g_profControlBlock.fBaseSystemClassesLoaded)
7068 {
7069 return E_INVALIDARG;
7070 }
7071
7072 //
7073 // If this class is not fully restored, that is all the information we can get at this time.
7074 //
7075 if (!typeHandle.IsRestored())
7076 {
7077 return CORPROF_E_DATAINCOMPLETE;
7078 }
7079
7080 // Types can be pre-restored, but they still aren't expected to handle queries before
7081 // eager fixups have run. This is a targetted band-aid for a bug intellitrace was
7082 // running into - attempting to get the class layout for all types at module load time.
7083 // If we don't detect this the runtime will AV during the field iteration below. Feel
7084 // free to eliminate this check when a more complete solution is available.
7085 if (MethodTable::IsParentMethodTableTagged(typeHandle.AsMethodTable()))
7086 {
7087 return CORPROF_E_DATAINCOMPLETE;
7088 }
7089
7090 // !IsValueType = IsArray || IsReferenceType Since IsArry has been ruled out above, it must
7091 // be a reference type if !IsValueType.
7092 BOOL fReferenceType = !typeHandle.IsValueType();
7093
7094 //
7095 // Fill in class size now
7096 //
7097 // Move after the check for typeHandle.GetMethodTable()->IsRestored()
7098 // because an unrestored MethodTable may have a bad EE class pointer
7099 // which will be used by MethodTable::GetNumInstanceFieldBytes
7100 //
7101 if (pulClassSize != NULL)
7102 {
7103 if (fReferenceType)
7104 {
7105 // aligned size including the object header for reference types
7106 *pulClassSize = typeHandle.GetMethodTable()->GetBaseSize();
7107 }
7108 else
7109 {
7110 // unboxed and unaligned size for value types
7111 *pulClassSize = typeHandle.GetMethodTable()->GetNumInstanceFieldBytes();
7112 }
7113 }
7114
7115 ApproxFieldDescIterator fieldDescIterator(typeHandle.GetMethodTable(), ApproxFieldDescIterator::INSTANCE_FIELDS);
7116
7117 ULONG cFields = fieldDescIterator.Count();
7118
7119 //
7120 // If they are looking to just get the count, return that.
7121 //
7122 if ((cFieldOffset == 0) || (rFieldOffset == NULL))
7123 {
7124 *pcFieldOffset = cFields;
7125 return S_OK;
7126 }
7127
7128 //
7129 // Dont put too many in the array.
7130 //
7131 if (cFields > cFieldOffset)
7132 {
7133 cFields = cFieldOffset;
7134 }
7135
7136 *pcFieldOffset = cFields;
7137
7138 //
7139 // Now fill in the array
7140 //
7141 ULONG i;
7142 FieldDesc *pField;
7143
7144 for (i = 0; i < cFields; i++)
7145 {
7146 pField = fieldDescIterator.Next();
7147 rFieldOffset[i].ridOfField = (ULONG)pField->GetMemberDef();
7148 rFieldOffset[i].ulOffset = (ULONG)pField->GetOffset() + (fReferenceType ? Object::GetOffsetOfFirstField() : 0);
7149 }
7150
7151 return S_OK;
7152}
7153
7154
7155typedef struct _PROFILER_STACK_WALK_DATA
7156{
7157 StackSnapshotCallback *callback;
7158 ULONG32 infoFlags;
7159 ULONG32 contextFlags;
7160 void *clientData;
7161
7162#ifdef WIN64EXCEPTIONS
7163 StackFrame sfParent;
7164#endif
7165} PROFILER_STACK_WALK_DATA;
7166
7167
7168/*
7169 * ProfilerStackWalkCallback
7170 *
7171 * This routine is used as the callback from the general stack walker for
7172 * doing snapshot stack walks
7173 *
7174 */
7175StackWalkAction ProfilerStackWalkCallback(CrawlFrame *pCf, PROFILER_STACK_WALK_DATA *pData)
7176{
7177 CONTRACTL
7178 {
7179 SO_NOT_MAINLINE;
7180 NOTHROW; // throw is RIGHT out... the throw at minimum allocates the thrown object which we *must* not do
7181 GC_NOTRIGGER; // the stack is not necessarily crawlable at this state !!!) we must not induce a GC
7182 }
7183 CONTRACTL_END;
7184
7185 MethodDesc *pFunc = pCf->GetFunction();
7186
7187 COR_PRF_FRAME_INFO_INTERNAL frameInfo;
7188 ULONG32 contextSize = 0;
7189 BYTE *context = NULL;
7190
7191 UINT_PTR currentIP = 0;
7192 REGDISPLAY *pRegDisplay = pCf->GetRegisterSet();
7193#if defined(_TARGET_X86_)
7194 CONTEXT builtContext;
7195#endif
7196
7197 //
7198 // For Unmanaged-to-managed transitions we get a NativeMarker back, which we want
7199 // to return to the profiler as the context seed if it wants to walk the unmanaged
7200 // stack frame, so we report the functionId as NULL to indicate this.
7201 //
7202 if (pCf->IsNativeMarker())
7203 {
7204 pFunc = NULL;
7205 }
7206
7207 //
7208 // Skip all Lightweight reflection/emit functions
7209 //
7210 if ((pFunc != NULL) && pFunc->IsNoMetadata())
7211 {
7212 return SWA_CONTINUE;
7213 }
7214
7215 //
7216 // If this is not a transition of any sort and not a managed
7217 // method, ignore it.
7218 //
7219 if (!pCf->IsNativeMarker() && !pCf->IsFrameless())
7220 {
7221 return SWA_CONTINUE;
7222 }
7223
7224 currentIP = (UINT_PTR)pRegDisplay->ControlPC;
7225
7226 frameInfo.size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
7227 frameInfo.version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
7228
7229 if (pFunc != NULL)
7230 {
7231 frameInfo.funcID = MethodDescToFunctionID(pFunc);
7232 frameInfo.extraArg = NULL;
7233 }
7234 else
7235 {
7236 frameInfo.funcID = NULL;
7237 frameInfo.extraArg = NULL;
7238 }
7239
7240 frameInfo.IP = currentIP;
7241 frameInfo.thisArg = NULL;
7242
7243 if (pData->infoFlags & COR_PRF_SNAPSHOT_REGISTER_CONTEXT)
7244 {
7245#if defined(_TARGET_X86_)
7246 //
7247 // X86 stack walking does not keep the context up-to-date during the
7248 // walk. Instead it keeps the REGDISPLAY up-to-date. Thus, we need to
7249 // build a CONTEXT from the REGDISPLAY.
7250 //
7251
7252 memset(&builtContext, 0, sizeof(builtContext));
7253 builtContext.ContextFlags = CONTEXT_INTEGER | CONTEXT_CONTROL;
7254 CopyRegDisplay(pRegDisplay, NULL, &builtContext);
7255 context = (BYTE *)(&builtContext);
7256#else
7257 context = (BYTE *)pRegDisplay->pCurrentContext;
7258#endif
7259 contextSize = sizeof(CONTEXT);
7260 }
7261
7262 // NOTE: We are intentionally not setting any callback state flags here (i.e., not using
7263 // SetCallbackStateFlagsHolder), as we want the DSS callback to "inherit" the
7264 // same callback state that DSS has: if DSS was called asynchronously, then consider
7265 // the DSS callback to be called asynchronously.
7266 if (pData->callback(frameInfo.funcID,
7267 frameInfo.IP,
7268 (COR_PRF_FRAME_INFO)&frameInfo,
7269 contextSize,
7270 context,
7271 pData->clientData) == S_OK)
7272 {
7273 return SWA_CONTINUE;
7274 }
7275
7276 return SWA_ABORT;
7277}
7278
7279#ifdef _TARGET_X86_
7280
7281//---------------------------------------------------------------------------------------
7282// Normally, calling GetFunction() on the frame is sufficient to ensure
7283// HelperMethodFrames are intialized. However, sometimes we need to be able to specify
7284// that we should not enter the host while initializing, so we need to initialize such
7285// frames more directly. This small helper function directly forces the initialization,
7286// and ensures we don't enter the host as a result if we're executing in an asynchronous
7287// call (i.e., hijacked thread)
7288//
7289// Arguments:
7290// pFrame - Frame to initialize.
7291//
7292// Return Value:
7293// TRUE iff pFrame was successfully initialized (or was already initialized). If
7294// pFrame is not a HelperMethodFrame (or derived type), this returns TRUE
7295// immediately. FALSE indicates we tried to initialize w/out entering the host, and
7296// had to abort as a result when a reader lock was needed but unavailable.
7297//
7298
7299static BOOL EnsureFrameInitialized(Frame * pFrame)
7300{
7301 CONTRACTL
7302 {
7303 NOTHROW;
7304 GC_NOTRIGGER;
7305
7306 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
7307 // host (SQL). Corners will be cut to ensure this is the case
7308 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
7309
7310 SUPPORTS_DAC;
7311 }
7312 CONTRACTL_END;
7313
7314 if (pFrame->GetFrameType() != Frame::TYPE_HELPER_METHOD_FRAME)
7315 {
7316 // This frame is not a HelperMethodFrame or a frame derived from
7317 // HelperMethodFrame, so HMF-specific lazy initialization is not an issue.
7318 return TRUE;
7319 }
7320
7321 HelperMethodFrame * pHMF = (HelperMethodFrame *) pFrame;
7322
7323 if (pHMF->InsureInit(
7324 false, // initialInit
7325 NULL, // unwindState
7326 (ShouldAvoidHostCalls() ?
7327 NoHostCalls :
7328 AllowHostCalls)
7329 ) != NULL)
7330 {
7331 // InsureInit() succeeded and found the return address
7332 return TRUE;
7333 }
7334
7335 // No return address was found. It must be because we asked InsureInit() to bail if
7336 // it would have entered the host
7337 _ASSERTE(ShouldAvoidHostCalls());
7338 return FALSE;
7339}
7340
7341//---------------------------------------------------------------------------------------
7342//
7343// Implements the COR_PRF_SNAPSHOT_X86_OPTIMIZED algorithm called by DoStackSnapshot.
7344// Does a simple EBP walk, rather than invoking all of StackWalkFramesEx.
7345//
7346// Arguments:
7347// pThreadToSnapshot - Thread whose stack should be walked
7348// pctxSeed - Register context with which to seed the walk
7349// callback - Function to call at each frame found during the walk
7350// clientData - Parameter to pass through to callback
7351//
7352// Return Value:
7353// HRESULT indicating success or failure.
7354//
7355
7356HRESULT ProfToEEInterfaceImpl::ProfilerEbpWalker(
7357 Thread * pThreadToSnapshot,
7358 LPCONTEXT pctxSeed,
7359 StackSnapshotCallback * callback,
7360 void * clientData)
7361{
7362 CONTRACTL
7363 {
7364 GC_NOTRIGGER;
7365 NOTHROW;
7366 MODE_ANY;
7367 EE_THREAD_NOT_REQUIRED;
7368
7369 // If this is called asynchronously (from a hijacked thread, as with F1), it must not re-enter the
7370 // host (SQL). Corners will be cut to ensure this is the case
7371 if (ShouldAvoidHostCalls()) { HOST_NOCALLS; } else { HOST_CALLS; }
7372 }
7373 CONTRACTL_END;
7374
7375 HRESULT hr;
7376
7377 // We haven't set the stackwalker thread type flag yet (see next line), so it shouldn't be set. Only
7378 // exception to this is if the current call is made by a hijacking profiler which
7379 // redirected this thread while it was previously in the middle of another stack walk
7380 _ASSERTE(IsCalledAsynchronously() || !IsStackWalkerThread());
7381
7382 // Remember that we're walking the stack. This holder will reinstate the original
7383 // value of the stackwalker flag (from the thread type mask) in its destructor.
7384 ClrFlsValueSwitch _threadStackWalking(TlsIdx_StackWalkerWalkingThread, pThreadToSnapshot);
7385
7386 // This flag remembers if we reported a managed frame since the last unmanaged block
7387 // we reported. It's used to avoid reporting two unmanaged blocks in a row.
7388 BOOL fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock = FALSE;
7389
7390 Frame * pFrameCur = pThreadToSnapshot->GetFrame();
7391
7392 CONTEXT ctxCur;
7393 ZeroMemory(&ctxCur, sizeof(ctxCur));
7394
7395 // Use seed if we got one. Otherwise, EE explicit Frame chain will seed the walk.
7396 if (pctxSeed != NULL)
7397 {
7398 ctxCur.Ebp = pctxSeed->Ebp;
7399 ctxCur.Eip = pctxSeed->Eip;
7400 ctxCur.Esp = pctxSeed->Esp;
7401 }
7402
7403 while (TRUE)
7404 {
7405 // At each iteration of the loop:
7406 // * Analyze current frame (get managed data if it's a managed frame)
7407 // * Report current frame via callback()
7408 // * Walk down to next frame
7409
7410 // **** Managed or unmanaged frame? ****
7411
7412 EECodeInfo codeInfo;
7413 MethodDesc * pMethodDescCur = NULL;
7414
7415 if (ctxCur.Eip != 0)
7416 {
7417 hr = GetFunctionInfoInternal(
7418 (LPCBYTE) ctxCur.Eip,
7419 &codeInfo);
7420 if (hr == CORPROF_E_ASYNCHRONOUS_UNSAFE)
7421 {
7422 _ASSERTE(ShouldAvoidHostCalls());
7423 return hr;
7424 }
7425 if (SUCCEEDED(hr))
7426 {
7427 pMethodDescCur = codeInfo.GetMethodDesc();
7428 }
7429 }
7430
7431 // **** Report frame to profiler ****
7432
7433 if (
7434 // Make sure the frame gave us an IP
7435 (ctxCur.Eip != 0) &&
7436
7437 // Make sure any managed frame isn't for an IL stub or LCG
7438 ((pMethodDescCur == NULL) || !pMethodDescCur->IsNoMetadata()) &&
7439
7440 // Only report unmanaged frames if the last frame we reported was managed
7441 // (avoid reporting two unmanaged blocks in a row)
7442 ((pMethodDescCur != NULL) || fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock))
7443 {
7444 // Around the call to the profiler, temporarily clear the
7445 // ThreadType_StackWalker type flag, as we have no control over what the
7446 // profiler may do inside its callback (it could theoretically attempt to
7447 // load other types, though I don't personally know of profilers that
7448 // currently do this).
7449
7450 CLEAR_THREAD_TYPE_STACKWALKER();
7451 hr = callback(
7452 (FunctionID) pMethodDescCur,
7453 ctxCur.Eip,
7454 NULL, // COR_PRF_FRAME_INFO
7455 sizeof(ctxCur), // contextSize,
7456 (LPBYTE) &ctxCur, // context,
7457 clientData);
7458 SET_THREAD_TYPE_STACKWALKER(pThreadToSnapshot);
7459
7460 if (hr != S_OK)
7461 {
7462 return hr;
7463 }
7464 if (pMethodDescCur == NULL)
7465 {
7466 // Just reported an unmanaged block, so reset the flag
7467 fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock = FALSE;
7468 }
7469 else
7470 {
7471 // Just reported a managed block, so remember it
7472 fReportedAtLeastOneManagedFrameSinceLastUnmanagedBlock = TRUE;
7473 }
7474 }
7475
7476 // **** Walk down to next frame ****
7477
7478 // Is current frame managed or unmanaged?
7479 if (pMethodDescCur == NULL)
7480 {
7481 // Unmanaged frame. Use explicit EE Frame chain to help
7482
7483 REGDISPLAY frameRD;
7484 ZeroMemory(&frameRD, sizeof(frameRD));
7485
7486 while (pFrameCur != FRAME_TOP)
7487 {
7488 // Frame is only useful if it will contain register context info
7489 if (!pFrameCur->NeedsUpdateRegDisplay())
7490 {
7491 goto Loop;
7492 }
7493
7494
7495 // This should be the first call we make to the Frame, as it will
7496 // ensure we force lazy initialize of HelperMethodFrames
7497 if (!EnsureFrameInitialized(pFrameCur))
7498 {
7499 return CORPROF_E_ASYNCHRONOUS_UNSAFE;
7500 }
7501
7502 // This frame is only useful if it gives us an actual return address,
7503 // and is situated on the stack at or below our current ESP (stack
7504 // grows up)
7505 if ((pFrameCur->GetReturnAddress() != NULL) &&
7506 (dac_cast<TADDR>(pFrameCur) >= dac_cast<TADDR>(ctxCur.Esp)))
7507 {
7508 pFrameCur->UpdateRegDisplay(&frameRD);
7509 break;
7510 }
7511
7512Loop:
7513 pFrameCur = pFrameCur->PtrNextFrame();
7514 }
7515
7516 if (pFrameCur == FRAME_TOP)
7517 {
7518 // No more frames. Stackwalk is over
7519 return S_OK;
7520 }
7521
7522 // Update ctxCur based on frame
7523 ctxCur.Eip = pFrameCur->GetReturnAddress();
7524 ctxCur.Ebp = GetRegdisplayFP(&frameRD);
7525 ctxCur.Esp = GetRegdisplaySP(&frameRD);
7526 }
7527 else
7528 {
7529 // Managed frame.
7530
7531 // GC info will assist us in determining whether this is a non-EBP frame and
7532 // info about pushed arguments.
7533 GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
7534 PTR_VOID gcInfo = gcInfoToken.Info;
7535 InfoHdr header;
7536 unsigned uiMethodSizeDummy;
7537 PTR_CBYTE table = PTR_CBYTE(gcInfo);
7538 table += decodeUnsigned(table, &uiMethodSizeDummy);
7539 table = decodeHeader(table, gcInfoToken.Version, &header);
7540
7541 // Ok, GCInfo, can we do a simple EBP walk or what?
7542
7543 if ((codeInfo.GetRelOffset() < header.prologSize) ||
7544 (!header.ebpFrame && !header.doubleAlign))
7545 {
7546 // We're either in the prolog or we're not in an EBP frame, in which case
7547 // we'll just defer to the code manager to unwind for us. This condition
7548 // is relatively rare, but can occur if:
7549 //
7550 // * The profiler did a DSS from its Enter hook, in which case we're
7551 // still inside the prolog, OR
7552 // * The seed context or explicit EE Frame chain seeded us with a
7553 // non-EBP frame function. In this case, using a naive EBP
7554 // unwinding algorithm would actually skip over the next EBP
7555 // frame, and would get SP all wrong as we try skipping over
7556 // the pushed parameters. So let's just ask the code manager for
7557 // help.
7558 //
7559 // Note that there are yet more conditions (much more rare) where the EBP
7560 // walk could get lost (e.g., we're inside an epilog). But we only care
7561 // about the most likely cases, and it's ok if the unlikely cases result
7562 // in truncated stacks, as unlikely cases will be statistically
7563 // irrelevant to CPU performance sampling profilers
7564 CodeManState codeManState;
7565 codeManState.dwIsSet = 0;
7566 REGDISPLAY rd;
7567 ZeroMemory(&rd, sizeof(rd));
7568
7569 rd.SetEbpLocation(&ctxCur.Ebp);
7570 rd.SP = ctxCur.Esp;
7571 rd.ControlPC = ctxCur.Eip;
7572
7573 codeInfo.GetCodeManager()->UnwindStackFrame(
7574 &rd,
7575 &codeInfo,
7576 SpeculativeStackwalk,
7577 &codeManState,
7578 NULL);
7579
7580 ctxCur.Ebp = *rd.GetEbpLocation();
7581 ctxCur.Esp = rd.SP;
7582 ctxCur.Eip = rd.ControlPC;
7583 }
7584 else
7585 {
7586 // We're in an actual EBP frame, so we can simplistically walk down to
7587 // the next frame using EBP.
7588
7589 // Return address is stored just below saved EBP (stack grows up)
7590 ctxCur.Eip = *(DWORD *) (ctxCur.Ebp + sizeof(DWORD));
7591
7592 ctxCur.Esp =
7593 // Stack location where current function pushed its EBP
7594 ctxCur.Ebp +
7595
7596 // Skip past that EBP
7597 sizeof(DWORD) +
7598
7599 // Skip past return address pushed by caller
7600 sizeof(DWORD) +
7601
7602 // Skip past arguments to current function that were pushed by caller.
7603 // (Caller will pop varargs, so don't count those.)
7604 (header.varargs ? 0 : (header.argCount * sizeof(DWORD)));
7605
7606 // EBP for frame below us (stack grows up) has been saved onto our own
7607 // frame. Dereference it now.
7608 ctxCur.Ebp = *(DWORD *) ctxCur.Ebp;
7609 }
7610 }
7611 }
7612}
7613#endif // _TARGET_X86_
7614
7615//*****************************************************************************
7616// The profiler stackwalk Wrapper
7617//*****************************************************************************
7618HRESULT ProfToEEInterfaceImpl::ProfilerStackWalkFramesWrapper(Thread * pThreadToSnapshot, PROFILER_STACK_WALK_DATA * pData, unsigned flags)
7619{
7620 STATIC_CONTRACT_WRAPPER;
7621
7622 StackWalkAction swaRet = pThreadToSnapshot->StackWalkFrames(
7623 (PSTACKWALKFRAMESCALLBACK)ProfilerStackWalkCallback,
7624 pData,
7625 flags,
7626 NULL);
7627
7628 switch (swaRet)
7629 {
7630 default:
7631 _ASSERTE(!"Unexpected StackWalkAction returned from Thread::StackWalkFrames");
7632 return E_FAIL;
7633
7634 case SWA_FAILED:
7635 return E_FAIL;
7636
7637 case SWA_ABORT:
7638 return CORPROF_E_STACKSNAPSHOT_ABORTED;
7639
7640 case SWA_DONE:
7641 return S_OK;
7642 }
7643}
7644
7645//---------------------------------------------------------------------------------------
7646//
7647// DoStackSnapshot helper to call FindJitMan to determine if the specified
7648// context is in managed code.
7649//
7650// Arguments:
7651// pCtx - Context to look at
7652// hostCallPreference - Describes how to acquire the reader lock--either AllowHostCalls
7653// or NoHostCalls (see code:HostCallPreference).
7654//
7655// Return Value:
7656// S_OK: The context is in managed code
7657// S_FALSE: The context is not in managed code.
7658// Error: Unable to determine (typically because hostCallPreference was NoHostCalls
7659// and the reader lock was unattainable without yielding)
7660//
7661
7662HRESULT IsContextInManagedCode(const CONTEXT * pCtx, HostCallPreference hostCallPreference)
7663{
7664 WRAPPER_NO_CONTRACT;
7665 BOOL fFailedReaderLock = FALSE;
7666
7667 // if there's no Jit Manager for the IP, it's not managed code.
7668 BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(pCtx), hostCallPreference, &fFailedReaderLock);
7669 if (fFailedReaderLock)
7670 {
7671 return CORPROF_E_ASYNCHRONOUS_UNSAFE;
7672 }
7673
7674 return fIsManagedCode ? S_OK : S_FALSE;
7675}
7676
7677//*****************************************************************************
7678// Perform a stack walk, calling back to callback at each managed frame.
7679//*****************************************************************************
7680HRESULT ProfToEEInterfaceImpl::DoStackSnapshot(ThreadID thread,
7681 StackSnapshotCallback *callback,
7682 ULONG32 infoFlags,
7683 void *clientData,
7684 BYTE * pbContext,
7685 ULONG32 contextSize)
7686{
7687
7688#if !defined(FEATURE_HIJACK)
7689
7690 // DoStackSnapshot needs Thread::Suspend/ResumeThread functionality.
7691 // On platforms w/o support for these APIs return E_NOTIMPL.
7692 return E_NOTIMPL;
7693
7694#else // !defined(FEATURE_HIJACK)
7695
7696 CONTRACTL
7697 {
7698 // Yay! (Note: NOTHROW is vital. The throw at minimum allocates
7699 // the thrown object which we *must* not do.)
7700 NOTHROW;
7701
7702 // Yay! (Note: this is called asynchronously to view the stack at arbitrary times,
7703 // so the stack is not necessarily crawlable for GC at this state!)
7704 GC_NOTRIGGER;
7705
7706 // Yay!
7707 MODE_ANY;
7708
7709 // Yay!
7710 EE_THREAD_NOT_REQUIRED;
7711
7712 // #DisableLockOnAsyncCalls
7713 // This call is allowed asynchronously, however it does take locks. Therefore,
7714 // we will hit contract asserts if we happen to be in a CANNOT_TAKE_LOCK zone when
7715 // a hijacking profiler hijacks this thread to run DoStackSnapshot. CANNOT_RETAKE_LOCK
7716 // is a more granular locking contract that says "I promise that if I take locks, I
7717 // won't reenter any locks that were taken before this function was called".
7718 DISABLED(CAN_TAKE_LOCK);
7719
7720 // Asynchronous functions can be called at arbitrary times when runtime
7721 // is holding locks that cannot be reentered without causing deadlock.
7722 // This contract detects any attempts to reenter locks held at the time
7723 // this function was called.
7724 CANNOT_RETAKE_LOCK;
7725
7726 SO_NOT_MAINLINE;
7727 }
7728 CONTRACTL_END;
7729
7730 // This CONTRACT_VIOLATION is still needed because DISABLED(CAN_TAKE_LOCK) does not
7731 // turn off contract violations.
7732 PERMANENT_CONTRACT_VIOLATION(TakesLockViolation, ReasonProfilerAsyncCannotRetakeLock);
7733
7734 LPCONTEXT pctxSeed = reinterpret_cast<LPCONTEXT> (pbContext);
7735
7736 PROFILER_TO_CLR_ENTRYPOINT_ASYNC_EX(kP2EEAllowableAfterAttach,
7737 (LF_CORPROF,
7738 LL_INFO1000,
7739 "**PROF: DoStackSnapshot 0x%p, 0x%p, 0x%08x, 0x%p, 0x%p, 0x%08x.\n",
7740 thread,
7741 callback,
7742 infoFlags,
7743 clientData,
7744 pctxSeed,
7745 contextSize));
7746
7747 HRESULT hr = E_UNEXPECTED;
7748 // (hr assignment is to appease the compiler; we won't actually return without explicitly setting hr again)
7749
7750 Thread *pThreadToSnapshot = NULL;
7751 Thread * pCurrentThread = GetThreadNULLOk();
7752 BOOL fResumeThread = FALSE;
7753 INDEBUG(ULONG ulForbidTypeLoad = 0;)
7754 BOOL fResetSnapshotThreadExternalCount = FALSE;
7755 int cRefsSnapshotThread = 0;
7756
7757 // Remember whether we've already determined the current context of the target thread
7758 // is in managed (S_OK), not in managed (S_FALSE), or unknown (error).
7759 HRESULT hrCurrentContextIsManaged = E_FAIL;
7760
7761 CONTEXT ctxCurrent;
7762 memset(&ctxCurrent, 0, sizeof(ctxCurrent));
7763
7764 REGDISPLAY rd;
7765
7766 PROFILER_STACK_WALK_DATA data;
7767
7768 if (!g_fEEStarted )
7769 {
7770 // no managed code has run and things are likely in a very bad have loaded state
7771 // this is a bad time to try to walk the stack
7772
7773 // Returning directly as there is nothing to cleanup yet
7774 return CORPROF_E_STACKSNAPSHOT_UNSAFE;
7775 }
7776
7777 if (!CORProfilerStackSnapshotEnabled())
7778 {
7779 // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
7780 return CORPROF_E_INCONSISTENT_WITH_FLAGS;
7781 }
7782
7783 if (thread == NULL)
7784 {
7785 pThreadToSnapshot = pCurrentThread;
7786 }
7787 else
7788 {
7789 pThreadToSnapshot = (Thread *)thread;
7790 }
7791
7792#ifdef _TARGET_X86_
7793 if ((infoFlags & ~(COR_PRF_SNAPSHOT_REGISTER_CONTEXT | COR_PRF_SNAPSHOT_X86_OPTIMIZED)) != 0)
7794#else
7795 if ((infoFlags & ~(COR_PRF_SNAPSHOT_REGISTER_CONTEXT)) != 0)
7796#endif
7797 {
7798 // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
7799 return E_INVALIDARG;
7800 }
7801
7802 if (!IsManagedThread(pThreadToSnapshot) || !IsGarbageCollectorFullyInitialized())
7803 {
7804 //
7805 // No managed frames, return now.
7806 //
7807 // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
7808 return S_OK;
7809 }
7810
7811 // We must make sure no other thread tries to hijack the thread we're about to walk
7812 // Hijacking means Thread::HijackThread, i.e. bashing return addresses which would break the stack walk
7813 Thread::HijackLockHolder hijackLockHolder(pThreadToSnapshot);
7814 if (!hijackLockHolder.Acquired())
7815 {
7816 // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
7817 return CORPROF_E_STACKSNAPSHOT_UNSAFE;
7818 }
7819
7820 if (pThreadToSnapshot != pCurrentThread // Walking separate thread
7821 && pCurrentThread != NULL // Walker (current) thread is a managed / VM thread
7822 && ThreadSuspend::SysIsSuspendInProgress()) // EE is trying suspend itself
7823 {
7824 // Since we're walking a separate thread, we'd have to suspend it first (see below).
7825 // And since the current thread is a VM thread, that means the current thread's
7826 // m_dwForbidSuspendThread count will go up while it's trying to suspend the
7827 // target thread (see Thread::SuspendThread). THAT means no one will be able
7828 // to suspend the current thread until its m_dwForbidSuspendThread is decremented
7829 // (which happens as soon as the target thread of DoStackSnapshot has been suspended).
7830 // Since we're in the process of suspending the entire runtime, now would be a bad time to
7831 // make the walker thread un-suspendable (see VsWhidbey bug 454936). So let's just abort
7832 // now. Note that there is no synchronization around calling Thread::SysIsSuspendInProgress().
7833 // So we will get occasional false positives or false negatives. But that's benign, as the worst
7834 // that might happen is we might occasionally delay the EE suspension a little bit, or we might
7835 // too eagerly fail from ProfToEEInterfaceImpl::DoStackSnapshot sometimes. But there won't
7836 // be any corruption or AV.
7837 //
7838 // Returning directly as there is nothing to cleanup yet, and can't skip gcholder ctor
7839 return CORPROF_E_STACKSNAPSHOT_UNSAFE;
7840 }
7841
7842 // We only allow stackwalking if:
7843 // 1) Target thread to walk == current thread OR Target thread is suspended, AND
7844 // 2) Target thread to walk is currently executing JITted / NGENd code, AND
7845 // 3) Target thread to walk is seeded OR currently NOT unwinding the stack, AND
7846 // 4) Target thread to walk != current thread OR current thread is NOT in a can't stop or forbid suspend region
7847
7848 // If the thread is in a forbid suspend region, it's dangerous to do anything:
7849 // - The code manager datastructures accessed during the stackwalk may be in inconsistent state.
7850 // - Thread::Suspend won't be able to suspend the thread.
7851 if (pThreadToSnapshot->IsInForbidSuspendRegion())
7852 {
7853 hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
7854 goto Cleanup;
7855 }
7856
7857 HostCallPreference hostCallPreference;
7858
7859 // First, check "1) Target thread to walk == current thread OR Target thread is suspended"
7860 if (pThreadToSnapshot != pCurrentThread)
7861 {
7862#ifndef PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
7863 hr = E_NOTIMPL;
7864 goto Cleanup;
7865#else
7866 // Walking separate thread, so it must be suspended. First, ensure that
7867 // target thread exists.
7868 //
7869 // NOTE: We're using the "dangerous" variant of this refcount function, because we
7870 // rely on the profiler to ensure it never tries to walk a thread being destroyed.
7871 // (Profiler must block in its ThreadDestroyed() callback until all uses of that thread,
7872 // such as walking its stack, are complete.)
7873 cRefsSnapshotThread = pThreadToSnapshot->IncExternalCountDANGEROUSProfilerOnly();
7874 fResetSnapshotThreadExternalCount = TRUE;
7875
7876 if (cRefsSnapshotThread == 1 || !pThreadToSnapshot->HasValidThreadHandle())
7877 {
7878 // At this point, we've modified the VM state based on bad input
7879 // (pThreadToSnapshot) from the profiler. This could cause
7880 // memory corruption and leave us vulnerable to security problems.
7881 // So destroy the process.
7882 _ASSERTE(!"Profiler trying to walk destroyed thread");
7883 EEPOLICY_HANDLE_FATAL_ERROR(CORPROF_E_STACKSNAPSHOT_INVALID_TGT_THREAD);
7884 }
7885
7886 // Thread::SuspendThread() ensures that no one else should try to suspend us
7887 // while we're suspending pThreadToSnapshot.
7888 //
7889 // TRUE: OneTryOnly. Don't loop waiting for others to get out of our way in
7890 // order to suspend the thread. If it's not safe, just return an error immediately.
7891 Thread::SuspendThreadResult str = pThreadToSnapshot->SuspendThread(TRUE);
7892 if (str == Thread::STR_Success)
7893 {
7894 fResumeThread = TRUE;
7895 }
7896 else
7897 {
7898 hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
7899 goto Cleanup;
7900 }
7901#endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
7902 }
7903
7904 hostCallPreference =
7905 ShouldAvoidHostCalls() ?
7906 NoHostCalls : // Async call: Ensure this thread won't yield & re-enter host
7907 AllowHostCalls; // Synchronous calls may re-enter host just fine
7908
7909 // If target thread is in pre-emptive mode, the profiler's seed context is unnecessary
7910 // because our frame chain is good enough: it will give us at least as accurate a
7911 // starting point as the profiler could. Also, since profiler contexts cannot be
7912 // trusted, we don't want to set the thread's profiler filter context to this, as a GC
7913 // that interrupts the profiler's stackwalk will end up using the profiler's (potentially
7914 // bogus) filter context.
7915 if (!pThreadToSnapshot->PreemptiveGCDisabledOther())
7916 {
7917 // Thread to be walked is in preemptive mode. Throw out seed.
7918 pctxSeed = NULL;
7919 }
7920 else if (pThreadToSnapshot != pCurrentThread)
7921 {
7922 // With cross-thread stack-walks, the target thread's context could be unreliable.
7923 // That would shed doubt on either a profiler-provided context, or a default
7924 // context we chose. So check if we're in a potentially unreliable case, and return
7925 // an error if so.
7926 //
7927 // These heurisitics are based on an actual bug where GetThreadContext returned a
7928 // self-consistent, but stale, context for a thread suspended after being redirected by
7929 // the GC (TFS Dev 10 bug # 733263).
7930 //
7931 // (Note that this whole block is skipped if pThreadToSnapshot is in preemptive mode (the IF
7932 // above), as the context is unused in such a case--the EE Frame chain is used
7933 // to seed the walk instead.)
7934#ifndef PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
7935 hr = E_NOTIMPL;
7936 goto Cleanup;
7937#else
7938 if (!pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd))
7939 {
7940 LOG((LF_CORPROF, LL_INFO100, "**PROF: GetSafelyRedirectableThreadContext failure leads to CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
7941 hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
7942 goto Cleanup;
7943 }
7944
7945 hrCurrentContextIsManaged = IsContextInManagedCode(&ctxCurrent, hostCallPreference);
7946 if (FAILED(hrCurrentContextIsManaged))
7947 {
7948 // Couldn't get the info. Try again later
7949 _ASSERTE(ShouldAvoidHostCalls());
7950 hr = CORPROF_E_ASYNCHRONOUS_UNSAFE;
7951 goto Cleanup;
7952 }
7953
7954 if ((hrCurrentContextIsManaged == S_OK) &&
7955 (!pThreadToSnapshot->PreemptiveGCDisabledOther()))
7956 {
7957 // Thread is in preemptive mode while executing managed code?! This lie is
7958 // an early warning sign that the context is bogus. Bail.
7959 LOG((LF_CORPROF, LL_INFO100, "**PROF: Target thread context is likely bogus. Returning CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
7960 hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
7961 goto Cleanup;
7962 }
7963
7964 Frame * pFrame = pThreadToSnapshot->GetFrame();
7965 if (pFrame != FRAME_TOP)
7966 {
7967 TADDR spTargetThread = GetSP(&ctxCurrent);
7968 if (dac_cast<TADDR>(pFrame) < spTargetThread)
7969 {
7970 // An Explicit EE Frame is more recent on the stack than the current
7971 // stack pointer itself? This lie is an early warning sign that the
7972 // context is bogus. Bail.
7973 LOG((LF_CORPROF, LL_INFO100, "**PROF: Target thread context is likely bogus. Returning CORPROF_E_STACKSNAPSHOT_UNSAFE.\n"));
7974 hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
7975 goto Cleanup;
7976 }
7977 }
7978
7979 // If the profiler did not specify a seed context of its own, use the current one we
7980 // just produced.
7981 //
7982 // Failing to seed the walk can cause us to to "miss" functions on the stack. This is
7983 // because StackWalkFrames(), when doing an unseeded stackwalk, sets the
7984 // starting regdisplay's IP/SP to 0. This, in turn causes StackWalkFramesEx
7985 // to set cf.isFrameless = (pEEJM != NULL); (which is FALSE, since we have no
7986 // jit manager, since we have no IP). Once frameless is false, we look solely to
7987 // the Frame chain for our goodies, rather than looking at the code actually
7988 // being executed by the thread. The problem with the frame chain is that some
7989 // frames (e.g., GCFrame) don't point to any functions being executed. So
7990 // StackWalkFramesEx just skips such frames and moves to the next one. That
7991 // can cause a chunk of calls to be skipped. To prevent this from happening, we
7992 // "fake" a seed by just seeding the thread with its current context. This forces
7993 // StackWalkFramesEx() to look at the IP rather than just the frame chain.
7994 if (pctxSeed == NULL)
7995 {
7996 pctxSeed = &ctxCurrent;
7997 }
7998#endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
7999 }
8000
8001 // Second, check "2) Target thread to walk is currently executing JITted / NGENd code"
8002 // To do this, we need to find the proper context to investigate. Start with
8003 // the seeded context, if available. If not, use the target thread's current context.
8004 if (pctxSeed != NULL)
8005 {
8006 BOOL fSeedIsManaged;
8007
8008 // Short cut: If we're just using the current context as the seed, we may
8009 // already have determined whether it's in managed code. If so, just use that
8010 // result rather than calculating it again
8011 if ((pctxSeed == &ctxCurrent) && SUCCEEDED(hrCurrentContextIsManaged))
8012 {
8013 fSeedIsManaged = (hrCurrentContextIsManaged == S_OK);
8014 }
8015 else
8016 {
8017 hr = IsContextInManagedCode(pctxSeed, hostCallPreference);
8018 if (FAILED(hr))
8019 {
8020 hr = CORPROF_E_ASYNCHRONOUS_UNSAFE;
8021 goto Cleanup;
8022 }
8023 fSeedIsManaged = (hr == S_OK);
8024 }
8025
8026 if (!fSeedIsManaged)
8027 {
8028 hr = CORPROF_E_STACKSNAPSHOT_UNMANAGED_CTX;
8029 goto Cleanup;
8030 }
8031 }
8032
8033#ifdef _DEBUG
8034 //
8035 // Sanity check: If we are doing a cross-thread walk and there is no seed context, then
8036 // we better not be in managed code, otw we do not have a Frame on the stack from which to start
8037 // walking and we may miss the leaf-most chain of managed calls due to the way StackWalkFrames
8038 // is implemented. However, there is an exception when the leaf-most EE frame of pThreadToSnapshot
8039 // is an InlinedCallFrame, which has an active call, implying pThreadToShanpshot is inside an
8040 // inlined P/Invoke. In this case, the InlinedCallFrame will be used to help start off our
8041 // stackwalk at the top of the stack.
8042 //
8043 if (pThreadToSnapshot != pCurrentThread)
8044 {
8045#ifndef PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
8046 hr = E_NOTIMPL;
8047 goto Cleanup;
8048#else
8049 if (pctxSeed == NULL)
8050 {
8051 if (pThreadToSnapshot->GetSafelyRedirectableThreadContext(Thread::kDefaultChecks, &ctxCurrent, &rd))
8052 {
8053 BOOL fFailedReaderLock = FALSE;
8054 BOOL fIsManagedCode = ExecutionManager::IsManagedCode(GetIP(&ctxCurrent), hostCallPreference, &fFailedReaderLock);
8055
8056 if (!fFailedReaderLock)
8057 {
8058 // not in jitted or ngend code or inside an inlined P/Invoke (the leaf-most EE Frame is
8059 // an InlinedCallFrame with an active call)
8060 _ASSERTE(!fIsManagedCode ||
8061 (InlinedCallFrame::FrameHasActiveCall(pThreadToSnapshot->GetFrame())));
8062 }
8063 }
8064 }
8065#endif // !PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
8066 }
8067#endif //_DEBUG
8068 // Third, verify the target thread is seeded or not in the midst of an unwind.
8069 if (pctxSeed == NULL)
8070 {
8071 ThreadExceptionState* pExState = pThreadToSnapshot->GetExceptionState();
8072
8073 // this tests to see if there is an exception in flight
8074 if (pExState->IsExceptionInProgress() && pExState->GetFlags()->UnwindHasStarted())
8075 {
8076 EHClauseInfo *pCurrentEHClauseInfo = pThreadToSnapshot->GetExceptionState()->GetCurrentEHClauseInfo();
8077
8078 // if the exception code is telling us that we have entered a managed context then all is well
8079 if (!pCurrentEHClauseInfo->IsManagedCodeEntered())
8080 {
8081 hr = CORPROF_E_STACKSNAPSHOT_UNMANAGED_CTX;
8082 goto Cleanup;
8083 }
8084 }
8085 }
8086
8087 // Check if the exception state is consistent. See the comment for ThreadExceptionFlag for more information.
8088 if (pThreadToSnapshot->GetExceptionState()->HasThreadExceptionFlag(ThreadExceptionState::TEF_InconsistentExceptionState))
8089 {
8090 hr = CORPROF_E_STACKSNAPSHOT_UNSAFE;
8091 goto Cleanup;
8092 }
8093
8094 data.callback = callback;
8095 data.infoFlags = infoFlags;
8096 data.contextFlags = 0;
8097 data.clientData = clientData;
8098#ifdef WIN64EXCEPTIONS
8099 data.sfParent.Clear();
8100#endif
8101
8102 // workaround: The ForbidTypeLoad book keeping in the stackwalker is not robust against exceptions.
8103 // Unfortunately, it is hard to get it right in the stackwalker since it has to be exception
8104 // handling free (frame unwinding may never return). We restore the ForbidTypeLoad counter here
8105 // in case it got messed up by exception thrown during the stackwalk.
8106 INDEBUG(if (pCurrentThread) ulForbidTypeLoad = pCurrentThread->m_ulForbidTypeLoad;)
8107
8108 {
8109 // An AV during a profiler stackwalk is an isolated event and shouldn't bring
8110 // down the runtime. Need to place the holder here, outside of ProfilerStackWalkFramesWrapper
8111 // since ProfilerStackWalkFramesWrapper uses __try, which doesn't like objects
8112 // with destructors.
8113 AVInRuntimeImplOkayHolder AVOkay;
8114
8115 hr = DoStackSnapshotHelper(
8116 pThreadToSnapshot,
8117 &data,
8118 HANDLESKIPPEDFRAMES |
8119 FUNCTIONSONLY |
8120 NOTIFY_ON_U2M_TRANSITIONS |
8121 ((pThreadToSnapshot == pCurrentThread) ?
8122 0 :
8123 ALLOW_ASYNC_STACK_WALK | THREAD_IS_SUSPENDED) |
8124 THREAD_EXECUTING_MANAGED_CODE |
8125 PROFILER_DO_STACK_SNAPSHOT |
8126 ALLOW_INVALID_OBJECTS, // stack walk logic should not look at objects - we could be in the middle of a gc.
8127 pctxSeed);
8128 }
8129
8130 INDEBUG(if (pCurrentThread) pCurrentThread->m_ulForbidTypeLoad = ulForbidTypeLoad;)
8131
8132Cleanup:
8133#if defined(PLATFORM_SUPPORTS_SAFE_THREADSUSPEND)
8134 if (fResumeThread)
8135 {
8136 pThreadToSnapshot->ResumeThread();
8137 }
8138#endif // PLATFORM_SUPPORTS_SAFE_THREADSUSPEND
8139 if (fResetSnapshotThreadExternalCount)
8140 {
8141 pThreadToSnapshot->DecExternalCountDANGEROUSProfilerOnly();
8142 }
8143
8144 return hr;
8145
8146#endif // !defined(FEATURE_HIJACK)
8147}
8148
8149
8150//---------------------------------------------------------------------------------------
8151//
8152// Exception swallowing wrapper around the profiler stackwalk
8153//
8154// Arguments:
8155// pThreadToSnapshot - Thread whose stack should be walked
8156// pData - data for stack walker
8157// flags - flags parameter to pass to StackWalkFramesEx, and StackFrameIterator
8158// pctxSeed - Register context with which to seed the walk
8159//
8160// Return Value:
8161// HRESULT indicating success or failure.
8162//
8163HRESULT ProfToEEInterfaceImpl::DoStackSnapshotHelper(Thread * pThreadToSnapshot,
8164 PROFILER_STACK_WALK_DATA * pData,
8165 unsigned flags,
8166 LPCONTEXT pctxSeed)
8167{
8168 STATIC_CONTRACT_NOTHROW;
8169
8170 // We want to catch and swallow AVs here. For example, if the profiler gives
8171 // us a bogus seed context (this happens), we could AV when inspecting memory pointed to
8172 // by the (bogus) EBP register.
8173 //
8174 // EX_TRY/EX_CATCH does a lot of extras that we do not need and that can go wrong for us.
8175 // E.g. It asserts in debug build for AVs in mscorwks or it synthetizes an object for the exception.
8176 // We use a plain PAL_TRY/PAL_EXCEPT since it is all we need.
8177 struct Param {
8178 HRESULT hr;
8179 Thread * pThreadToSnapshot;
8180 PROFILER_STACK_WALK_DATA * pData;
8181 unsigned flags;
8182 ProfToEEInterfaceImpl * pProfToEE;
8183 LPCONTEXT pctxSeed;
8184 BOOL fResetProfilerFilterContext;
8185 };
8186
8187 Param param;
8188 param.hr = E_UNEXPECTED;
8189 param.pThreadToSnapshot = pThreadToSnapshot;
8190 param.pData = pData;
8191 param.flags = flags;
8192 param.pProfToEE = this;
8193 param.pctxSeed = pctxSeed;
8194 param.fResetProfilerFilterContext = FALSE;
8195
8196 PAL_TRY(Param *, pParam, &param)
8197 {
8198 if ((pParam->pData->infoFlags & COR_PRF_SNAPSHOT_X86_OPTIMIZED) != 0)
8199 {
8200#ifndef _TARGET_X86_
8201 // If check in the begining of DoStackSnapshot (to return E_INVALIDARG) should
8202 // make this unreachable
8203 _ASSERTE(!"COR_PRF_SNAPSHOT_X86_OPTIMIZED on non-X86 should be unreachable!");
8204#else
8205 // New, simple EBP walker
8206 pParam->hr = pParam->pProfToEE->ProfilerEbpWalker(
8207 pParam->pThreadToSnapshot,
8208 pParam->pctxSeed,
8209 pParam->pData->callback,
8210 pParam->pData->clientData);
8211#endif // _TARGET_X86_
8212 }
8213 else
8214 {
8215 // We're now fairly confident the stackwalk should be ok, so set
8216 // the context seed, if one was provided or cooked up.
8217 if (pParam->pctxSeed != NULL)
8218 {
8219 pParam->pThreadToSnapshot->SetProfilerFilterContext(pParam->pctxSeed);
8220 pParam->fResetProfilerFilterContext = TRUE;
8221 }
8222
8223 // Whidbey-style walker, uses StackWalkFramesEx
8224 pParam->hr = pParam->pProfToEE->ProfilerStackWalkFramesWrapper(
8225 pParam->pThreadToSnapshot,
8226 pParam->pData,
8227 pParam->flags);
8228 }
8229 }
8230 PAL_EXCEPT(EXCEPTION_EXECUTE_HANDLER)
8231 {
8232 param.hr = E_UNEXPECTED;
8233 }
8234 PAL_ENDTRY;
8235
8236 // Undo the context seeding & thread suspend we did (if any)
8237 // to ensure that the thread we walked stayed suspended
8238 if (param.fResetProfilerFilterContext)
8239 {
8240 pThreadToSnapshot->SetProfilerFilterContext(NULL);
8241 }
8242
8243 return param.hr;
8244}
8245
8246
8247HRESULT ProfToEEInterfaceImpl::GetGenerationBounds(ULONG cObjectRanges,
8248 ULONG *pcObjectRanges,
8249 COR_PRF_GC_GENERATION_RANGE ranges[])
8250{
8251 CONTRACTL
8252 {
8253 // Yay!
8254 NOTHROW;
8255
8256 // Yay!
8257 GC_NOTRIGGER;
8258
8259 // Yay!
8260 MODE_ANY;
8261
8262 // Yay!
8263 EE_THREAD_NOT_REQUIRED;
8264
8265 // Yay!
8266 CANNOT_TAKE_LOCK;
8267
8268 SO_NOT_MAINLINE;
8269
8270 PRECONDITION(CheckPointer(pcObjectRanges));
8271 PRECONDITION(cObjectRanges <= 0 || ranges != NULL);
8272 PRECONDITION(s_generationTableLock >= 0);
8273 }
8274 CONTRACTL_END;
8275
8276 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
8277 (LF_CORPROF,
8278 LL_INFO1000,
8279 "**PROF: GetGenerationBounds.\n"));
8280
8281 // Announce we are using the generation table now
8282 CounterHolder genTableLock(&s_generationTableLock);
8283
8284 GenerationTable *generationTable = s_currentGenerationTable;
8285
8286 if (generationTable == NULL)
8287 {
8288 return E_FAIL;
8289 }
8290
8291 _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
8292
8293 GenerationDesc *genDescTable = generationTable->genDescTable;
8294 ULONG count = min(generationTable->count, cObjectRanges);
8295 for (ULONG i = 0; i < count; i++)
8296 {
8297 ranges[i].generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
8298 ranges[i].rangeStart = (ObjectID)genDescTable[i].rangeStart;
8299 ranges[i].rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
8300 ranges[i].rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
8301 }
8302
8303 *pcObjectRanges = generationTable->count;
8304
8305 return S_OK;
8306}
8307
8308
8309HRESULT ProfToEEInterfaceImpl::GetNotifiedExceptionClauseInfo(COR_PRF_EX_CLAUSE_INFO * pinfo)
8310{
8311 CONTRACTL
8312 {
8313 // Yay!
8314 NOTHROW;
8315
8316 // Yay!
8317 GC_NOTRIGGER;
8318
8319 // Yay!
8320 MODE_ANY;
8321
8322 // Yay!
8323 CANNOT_TAKE_LOCK;
8324
8325 SO_NOT_MAINLINE;
8326
8327 PRECONDITION(CheckPointer(pinfo));
8328 }
8329 CONTRACTL_END;
8330
8331 PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
8332 LL_INFO1000,
8333 "**PROF: GetNotifiedExceptionClauseInfo.\n"));
8334
8335 HRESULT hr = S_OK;
8336
8337 ThreadExceptionState* pExState = NULL;
8338 EHClauseInfo* pCurrentEHClauseInfo = NULL;
8339
8340 // notification requires that we are on a managed thread with an exception in flight
8341 Thread *pThread = GetThread();
8342
8343 // If pThread is null, then the thread has never run managed code
8344 if (pThread == NULL)
8345 {
8346 hr = CORPROF_E_NOT_MANAGED_THREAD;
8347 goto NullReturn;
8348 }
8349
8350 pExState = pThread->GetExceptionState();
8351 if (!pExState->IsExceptionInProgress())
8352 {
8353 // no exception is in flight -- successful failure
8354 hr = S_FALSE;
8355 goto NullReturn;
8356 }
8357
8358 pCurrentEHClauseInfo = pExState->GetCurrentEHClauseInfo();
8359 if (pCurrentEHClauseInfo->GetClauseType() == COR_PRF_CLAUSE_NONE)
8360 {
8361 // no exception is in flight -- successful failure
8362 hr = S_FALSE;
8363 goto NullReturn;
8364 }
8365
8366 pinfo->clauseType = pCurrentEHClauseInfo->GetClauseType();
8367 pinfo->programCounter = pCurrentEHClauseInfo->GetIPForEHClause();
8368 pinfo->framePointer = pCurrentEHClauseInfo->GetFramePointerForEHClause();
8369 pinfo->shadowStackPointer = 0;
8370
8371 return S_OK;
8372
8373NullReturn:
8374 memset(pinfo, 0, sizeof(*pinfo));
8375 return hr;
8376}
8377
8378
8379HRESULT ProfToEEInterfaceImpl::GetObjectGeneration(ObjectID objectId,
8380 COR_PRF_GC_GENERATION_RANGE *range)
8381{
8382 CONTRACTL
8383 {
8384 // Yay!
8385 NOTHROW;
8386
8387 // Yay!
8388 GC_NOTRIGGER;
8389
8390 // Yay!
8391 MODE_ANY;
8392
8393 // Yay!
8394 EE_THREAD_NOT_REQUIRED;
8395
8396 // Yay!
8397 CANNOT_TAKE_LOCK;
8398
8399 SO_NOT_MAINLINE;
8400
8401 PRECONDITION(objectId != NULL);
8402 PRECONDITION(CheckPointer(range));
8403 PRECONDITION(s_generationTableLock >= 0);
8404 }
8405 CONTRACTL_END;
8406
8407 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
8408 (LF_CORPROF,
8409 LL_INFO1000,
8410 "**PROF: GetObjectGeneration 0x%p.\n",
8411 objectId));
8412
8413 BEGIN_GETTHREAD_ALLOWED;
8414 _ASSERTE((GetThread() == NULL) || (GetThread()->PreemptiveGCDisabled()));
8415 END_GETTHREAD_ALLOWED;
8416
8417 // Announce we are using the generation table now
8418 CounterHolder genTableLock(&s_generationTableLock);
8419
8420 GenerationTable *generationTable = s_currentGenerationTable;
8421
8422 if (generationTable == NULL)
8423 {
8424 return E_FAIL;
8425 }
8426
8427 _ASSERTE(generationTable->magic == GENERATION_TABLE_MAGIC);
8428
8429 GenerationDesc *genDescTable = generationTable->genDescTable;
8430 ULONG count = generationTable->count;
8431 for (ULONG i = 0; i < count; i++)
8432 {
8433 if (genDescTable[i].rangeStart <= (BYTE *)objectId && (BYTE *)objectId < genDescTable[i].rangeEndReserved)
8434 {
8435 range->generation = (COR_PRF_GC_GENERATION)genDescTable[i].generation;
8436 range->rangeStart = (ObjectID)genDescTable[i].rangeStart;
8437 range->rangeLength = genDescTable[i].rangeEnd - genDescTable[i].rangeStart;
8438 range->rangeLengthReserved = genDescTable[i].rangeEndReserved - genDescTable[i].rangeStart;
8439
8440 return S_OK;
8441 }
8442 }
8443
8444 return E_FAIL;
8445}
8446
8447HRESULT ProfToEEInterfaceImpl::GetReJITIDs(
8448 FunctionID functionId, // in
8449 ULONG cReJitIds, // in
8450 ULONG * pcReJitIds, // out
8451 ReJITID reJitIds[]) // out
8452{
8453 CONTRACTL
8454 {
8455 // Yay!
8456 NOTHROW;
8457
8458 // taking a lock causes a GC
8459 GC_TRIGGERS;
8460
8461 // Yay!
8462 MODE_ANY;
8463
8464 // The rejit tables use a lock
8465 CAN_TAKE_LOCK;
8466
8467 SO_NOT_MAINLINE;
8468
8469 PRECONDITION(CheckPointer(pcReJitIds, NULL_OK));
8470 PRECONDITION(CheckPointer(reJitIds, NULL_OK));
8471
8472 }
8473 CONTRACTL_END;
8474
8475 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
8476 kP2EEAllowableAfterAttach,
8477 (LF_CORPROF,
8478 LL_INFO1000,
8479 "**PROF: GetReJITIDs 0x%p.\n",
8480 functionId));
8481
8482 if (functionId == 0)
8483 {
8484 return E_INVALIDARG;
8485 }
8486
8487 if ((cReJitIds == 0) || (pcReJitIds == NULL) || (reJitIds == NULL))
8488 {
8489 return E_INVALIDARG;
8490 }
8491
8492 MethodDesc * pMD = FunctionIdToMethodDesc(functionId);
8493
8494 return ReJitManager::GetReJITIDs(pMD, cReJitIds, pcReJitIds, reJitIds);
8495}
8496
8497HRESULT ProfToEEInterfaceImpl::RequestReJIT(ULONG cFunctions, // in
8498 ModuleID moduleIds[], // in
8499 mdMethodDef methodIds[]) // in
8500{
8501 CONTRACTL
8502 {
8503 // Yay!
8504 NOTHROW;
8505
8506 // When we suspend the runtime we drop into premptive mode
8507 GC_TRIGGERS;
8508
8509 // Yay!
8510 MODE_ANY;
8511
8512 // We need to suspend the runtime, this takes a lot of locks!
8513 CAN_TAKE_LOCK;
8514
8515 SO_NOT_MAINLINE;
8516
8517 PRECONDITION(CheckPointer(moduleIds, NULL_OK));
8518 PRECONDITION(CheckPointer(methodIds, NULL_OK));
8519 }
8520 CONTRACTL_END;
8521
8522 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
8523 kP2EETriggers | kP2EEAllowableAfterAttach,
8524 (LF_CORPROF,
8525 LL_INFO1000,
8526 "**PROF: RequestReJIT.\n"));
8527
8528 if (!g_profControlBlock.pProfInterface->IsCallback4Supported())
8529 {
8530 return CORPROF_E_CALLBACK4_REQUIRED;
8531 }
8532
8533 if (!CORProfilerEnableRejit())
8534 {
8535 return CORPROF_E_REJIT_NOT_ENABLED;
8536 }
8537
8538 // Request at least 1 method to reJIT!
8539 if ((cFunctions == 0) || (moduleIds == NULL) || (methodIds == NULL))
8540 {
8541 return E_INVALIDARG;
8542 }
8543
8544 // Remember the profiler is doing this, as that means we must never detach it!
8545 g_profControlBlock.pProfInterface->SetUnrevertiblyModifiedILFlag();
8546
8547 GCX_PREEMP();
8548 return ReJitManager::RequestReJIT(cFunctions, moduleIds, methodIds);
8549}
8550
8551HRESULT ProfToEEInterfaceImpl::RequestRevert(ULONG cFunctions, // in
8552 ModuleID moduleIds[], // in
8553 mdMethodDef methodIds[], // in
8554 HRESULT rgHrStatuses[]) // out
8555{
8556 CONTRACTL
8557 {
8558 // Yay!
8559 NOTHROW;
8560
8561 // The rejit manager requires a lock to iterate through methods to revert, and
8562 // taking the lock can drop us into preemptive mode.
8563 GC_TRIGGERS;
8564
8565 // Yay!
8566 MODE_ANY;
8567
8568 // The rejit manager requires a lock to iterate through methods to revert
8569 CAN_TAKE_LOCK;
8570
8571 SO_NOT_MAINLINE;
8572
8573 PRECONDITION(CheckPointer(moduleIds, NULL_OK));
8574 PRECONDITION(CheckPointer(methodIds, NULL_OK));
8575 PRECONDITION(CheckPointer(rgHrStatuses, NULL_OK));
8576 }
8577 CONTRACTL_END;
8578
8579 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
8580 kP2EEAllowableAfterAttach | kP2EETriggers,
8581 (LF_CORPROF,
8582 LL_INFO1000,
8583 "**PROF: RequestRevert.\n"));
8584
8585 if (!CORProfilerEnableRejit())
8586 {
8587 return CORPROF_E_REJIT_NOT_ENABLED;
8588 }
8589
8590 // Request at least 1 method to revert!
8591 if ((cFunctions == 0) || (moduleIds == NULL) || (methodIds == NULL))
8592 {
8593 return E_INVALIDARG;
8594 }
8595
8596 // Remember the profiler is doing this, as that means we must never detach it!
8597 g_profControlBlock.pProfInterface->SetUnrevertiblyModifiedILFlag();
8598
8599 // Initialize the status array
8600 if (rgHrStatuses != NULL)
8601 {
8602 memset(rgHrStatuses, 0, sizeof(HRESULT) * cFunctions);
8603 _ASSERTE(S_OK == rgHrStatuses[0]);
8604 }
8605
8606 GCX_PREEMP();
8607 return ReJitManager::RequestRevert(cFunctions, moduleIds, methodIds, rgHrStatuses);
8608}
8609
8610
8611HRESULT ProfToEEInterfaceImpl::EnumJITedFunctions(ICorProfilerFunctionEnum ** ppEnum)
8612{
8613 CONTRACTL
8614 {
8615 // Yay!
8616 NOTHROW;
8617
8618 // Yay!
8619 GC_NOTRIGGER;
8620
8621 // Yay!
8622 MODE_ANY;
8623
8624 // If we're in preemptive mode we need to take a read lock to safely walk
8625 // the JIT data structures.
8626 CAN_TAKE_LOCK;
8627
8628 SO_NOT_MAINLINE;
8629
8630 PRECONDITION(CheckPointer(ppEnum, NULL_OK));
8631
8632 }
8633 CONTRACTL_END;
8634
8635 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
8636 (LF_CORPROF,
8637 LL_INFO10,
8638 "**PROF: EnumJITedFunctions.\n"));
8639
8640 if (ppEnum == NULL)
8641 {
8642 return E_INVALIDARG;
8643 }
8644
8645 *ppEnum = NULL;
8646
8647 NewHolder<ProfilerFunctionEnum> pJitEnum(new (nothrow) ProfilerFunctionEnum());
8648 if (pJitEnum == NULL)
8649 {
8650 return E_OUTOFMEMORY;
8651 }
8652
8653 if (!pJitEnum->Init())
8654 {
8655 return E_OUTOFMEMORY;
8656 }
8657
8658 // Ownership transferred to [out] param. Caller must Release() when done with this.
8659 *ppEnum = (ICorProfilerFunctionEnum *)pJitEnum.Extract();
8660
8661 return S_OK;
8662}
8663
8664HRESULT ProfToEEInterfaceImpl::EnumJITedFunctions2(ICorProfilerFunctionEnum ** ppEnum)
8665{
8666 CONTRACTL
8667 {
8668 // Yay!
8669 NOTHROW;
8670
8671 // Gathering rejitids requires taking a lock and that lock might switch to
8672 // preemptimve mode...
8673 GC_TRIGGERS;
8674
8675 // Yay!
8676 MODE_ANY;
8677
8678 // If we're in preemptive mode we need to take a read lock to safely walk
8679 // the JIT data structures.
8680 // Gathering RejitIDs also takes a lock.
8681 CAN_TAKE_LOCK;
8682
8683 SO_NOT_MAINLINE;
8684
8685 PRECONDITION(CheckPointer(ppEnum, NULL_OK));
8686
8687 }
8688 CONTRACTL_END;
8689
8690 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
8691 kP2EEAllowableAfterAttach | kP2EETriggers,
8692 (LF_CORPROF,
8693 LL_INFO10,
8694 "**PROF: EnumJITedFunctions.\n"));
8695
8696 if (ppEnum == NULL)
8697 {
8698 return E_INVALIDARG;
8699 }
8700
8701 *ppEnum = NULL;
8702
8703 NewHolder<ProfilerFunctionEnum> pJitEnum(new (nothrow) ProfilerFunctionEnum());
8704 if (pJitEnum == NULL)
8705 {
8706 return E_OUTOFMEMORY;
8707 }
8708
8709 if (!pJitEnum->Init(TRUE /* fWithReJITIDs */))
8710 {
8711 // If it fails, it's because of OOM.
8712 return E_OUTOFMEMORY;
8713 }
8714
8715 // Ownership transferred to [out] param. Caller must Release() when done with this.
8716 *ppEnum = (ICorProfilerFunctionEnum *)pJitEnum.Extract();
8717
8718 return S_OK;
8719}
8720
8721HRESULT ProfToEEInterfaceImpl::EnumModules(ICorProfilerModuleEnum ** ppEnum)
8722{
8723 CONTRACTL
8724 {
8725 // Yay!
8726 NOTHROW;
8727
8728 // This method populates the enumerator, which requires iterating over
8729 // AppDomains, which adds, then releases, a reference on each AppDomain iterated.
8730 // This causes locking, and can cause triggering if the AppDomain gets destroyed
8731 // as a result of the release. (See code:AppDomainIterator::Next and its call to
8732 // code:AppDomain::Release.)
8733 GC_TRIGGERS;
8734
8735 // Yay!
8736 MODE_ANY;
8737
8738 // (See comment above GC_TRIGGERS.)
8739 CAN_TAKE_LOCK;
8740
8741 SO_NOT_MAINLINE;
8742
8743 PRECONDITION(CheckPointer(ppEnum, NULL_OK));
8744
8745 }
8746 CONTRACTL_END;
8747
8748 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
8749 kP2EEAllowableAfterAttach | kP2EETriggers,
8750 (LF_CORPROF,
8751 LL_INFO10,
8752 "**PROF: EnumModules.\n"));
8753
8754 HRESULT hr;
8755
8756 if (ppEnum == NULL)
8757 {
8758 return E_INVALIDARG;
8759 }
8760
8761 *ppEnum = NULL;
8762
8763 // ProfilerModuleEnum uese AppDomainIterator, which cannot be called while the current thead
8764 // is holding the ThreadStore lock.
8765 if (ThreadStore::HoldingThreadStore())
8766 {
8767 return CORPROF_E_UNSUPPORTED_CALL_SEQUENCE;
8768 }
8769
8770 NewHolder<ProfilerModuleEnum> pModuleEnum(new (nothrow) ProfilerModuleEnum);
8771 if (pModuleEnum == NULL)
8772 {
8773 return E_OUTOFMEMORY;
8774 }
8775
8776 hr = pModuleEnum->Init();
8777 if (FAILED(hr))
8778 {
8779 return hr;
8780 }
8781
8782 // Ownership transferred to [out] param. Caller must Release() when done with this.
8783 *ppEnum = (ICorProfilerModuleEnum *) pModuleEnum.Extract();
8784
8785 return S_OK;
8786}
8787
8788HRESULT ProfToEEInterfaceImpl::GetRuntimeInformation(USHORT * pClrInstanceId,
8789 COR_PRF_RUNTIME_TYPE * pRuntimeType,
8790 USHORT * pMajorVersion,
8791 USHORT * pMinorVersion,
8792 USHORT * pBuildNumber,
8793 USHORT * pQFEVersion,
8794 ULONG cchVersionString,
8795 ULONG * pcchVersionString,
8796 __out_ecount_part_opt(cchVersionString, *pcchVersionString) WCHAR szVersionString[])
8797{
8798 CONTRACTL
8799 {
8800 // Yay!
8801 NOTHROW;
8802
8803 // Yay!
8804 GC_NOTRIGGER;
8805
8806 // Yay!
8807 MODE_ANY;
8808
8809 // Yay!
8810 EE_THREAD_NOT_REQUIRED;
8811
8812 // Yay!
8813 CANNOT_TAKE_LOCK;
8814
8815 SO_NOT_MAINLINE;
8816 }
8817 CONTRACTL_END;
8818
8819 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach,
8820 (LF_CORPROF,
8821 LL_INFO1000,
8822 "**PROF: GetRuntimeInformation.\n"));
8823
8824 if ((szVersionString != NULL) && (pcchVersionString == NULL))
8825 {
8826 return E_INVALIDARG;
8827 }
8828
8829 if (pcchVersionString != NULL)
8830 {
8831 HRESULT hr = GetCORVersionInternal(szVersionString, (DWORD)cchVersionString, (DWORD *)pcchVersionString);
8832 if (FAILED(hr))
8833 return hr;
8834 }
8835
8836 if (pClrInstanceId != NULL)
8837 *pClrInstanceId = static_cast<USHORT>(GetClrInstanceId());
8838
8839 if (pRuntimeType != NULL)
8840 {
8841 *pRuntimeType = COR_PRF_CORE_CLR;
8842 }
8843
8844 if (pMajorVersion != NULL)
8845 *pMajorVersion = CLR_MAJOR_VERSION;
8846
8847 if (pMinorVersion != NULL)
8848 *pMinorVersion = CLR_MINOR_VERSION;
8849
8850 if (pBuildNumber != NULL)
8851 *pBuildNumber = CLR_BUILD_VERSION;
8852
8853 if (pQFEVersion != NULL)
8854 *pQFEVersion = CLR_BUILD_VERSION_QFE;
8855
8856 return S_OK;
8857}
8858
8859
8860HRESULT ProfToEEInterfaceImpl::RequestProfilerDetach(DWORD dwExpectedCompletionMilliseconds)
8861{
8862 CONTRACTL
8863 {
8864 // Yay!
8865 NOTHROW;
8866
8867 // Crst is used in ProfilingAPIDetach::RequestProfilerDetach so GC may be triggered
8868 GC_TRIGGERS;
8869
8870 // Yay!
8871 MODE_ANY;
8872
8873 // Yay!
8874 EE_THREAD_NOT_REQUIRED;
8875
8876 // Crst is used in ProfilingAPIDetach::RequestProfilerDetach
8877 CAN_TAKE_LOCK;
8878
8879 SO_NOT_MAINLINE;
8880 }
8881 CONTRACTL_END;
8882
8883 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
8884 kP2EEAllowableAfterAttach | kP2EETriggers,
8885 (LF_CORPROF,
8886 LL_INFO1000,
8887 "**PROF: RequestProfilerDetach.\n"));
8888
8889#ifdef FEATURE_PROFAPI_ATTACH_DETACH
8890 return ProfilingAPIDetach::RequestProfilerDetach(dwExpectedCompletionMilliseconds);
8891#else // FEATURE_PROFAPI_ATTACH_DETACH
8892 return E_NOTIMPL;
8893#endif // FEATURE_PROFAPI_ATTACH_DETACH
8894}
8895
8896typedef struct _COR_PRF_ELT_INFO_INTERNAL
8897{
8898 // Point to a platform dependent structure ASM helper push on the stack
8899 void * platformSpecificHandle;
8900
8901 // startAddress of COR_PRF_FUNCTION_ARGUMENT_RANGE structure needs to point
8902 // TO the argument value, not BE the argument value. So, when the argument
8903 // is this, we need to point TO this. Because of the calling sequence change
8904 // in ELT3, we need to reserve the pointer here instead of using one of our
8905 // stack variables.
8906 void * pThis;
8907
8908 // Reserve space for output parameter COR_PRF_FRAME_INFO of
8909 // GetFunctionXXXX3Info functions
8910 COR_PRF_FRAME_INFO_INTERNAL frameInfo;
8911
8912} COR_PRF_ELT_INFO_INTERNAL;
8913
8914//---------------------------------------------------------------------------------------
8915//
8916// ProfilingGetFunctionEnter3Info provides frame information and argument infomation of
8917// the function ELT callback is inspecting. It is called either by the profiler or the
8918// C helper function.
8919//
8920// Arguments:
8921// * functionId - [in] FunctionId of the function being inspected by ELT3
8922// * eltInfo - [in] The opaque pointer FunctionEnter3WithInfo callback passed to the profiler
8923// * pFrameInfo - [out] Pointer to COR_PRF_FRAME_INFO the profiler later can use to inspect
8924// generic types
8925// * pcbArgumentInfo - [in, out] Pointer to ULONG that specifies the size of structure
8926// pointed by pArgumentInfo
8927// * pArgumentInfo - [out] Pointer to COR_PRF_FUNCTION_ARGUMENT_INFO structure the profiler
8928// must preserve enough space for the function it is inspecting
8929//
8930// Return Value:
8931// HRESULT indicating success or failure.
8932//
8933
8934HRESULT ProfilingGetFunctionEnter3Info(FunctionID functionId, // in
8935 COR_PRF_ELT_INFO eltInfo, // in
8936 COR_PRF_FRAME_INFO * pFrameInfo, // out
8937 ULONG * pcbArgumentInfo, // in, out
8938 COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo) // out
8939{
8940 CONTRACTL
8941 {
8942 // Yay!
8943 NOTHROW;
8944
8945 // Yay!
8946 GC_NOTRIGGER;
8947
8948 // Yay!
8949 MODE_ANY;
8950
8951 // ProfileArgIterator::ProfileArgIterator may take locks
8952 CAN_TAKE_LOCK;
8953
8954 SO_NOT_MAINLINE;
8955
8956 }
8957 CONTRACTL_END;
8958
8959 if ((functionId == NULL) || (eltInfo == NULL))
8960 {
8961 return E_INVALIDARG;
8962 }
8963
8964 COR_PRF_ELT_INFO_INTERNAL * pELTInfo = (COR_PRF_ELT_INFO_INTERNAL *)eltInfo;
8965 ProfileSetFunctionIDInPlatformSpecificHandle(pELTInfo->platformSpecificHandle, functionId);
8966
8967 // The loader won't trigger a GC or throw for already loaded argument types.
8968 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
8969
8970 //
8971 // Find the method this is referring to, so we can get the signature
8972 //
8973 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
8974 MetaSig metaSig(pMethodDesc);
8975
8976 NewHolder<ProfileArgIterator> pProfileArgIterator;
8977
8978 {
8979 // Can handle E_OUTOFMEMORY from ProfileArgIterator.
8980 FAULT_NOT_FATAL();
8981
8982 pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, pELTInfo->platformSpecificHandle);
8983
8984 if (pProfileArgIterator == NULL)
8985 {
8986 return E_UNEXPECTED;
8987 }
8988 }
8989
8990 if (CORProfilerFrameInfoEnabled())
8991 {
8992 if (pFrameInfo == NULL)
8993 {
8994 return E_INVALIDARG;
8995 }
8996
8997 //
8998 // Setup the COR_PRF_FRAME_INFO structure first.
8999 //
9000 COR_PRF_FRAME_INFO_INTERNAL * pCorPrfFrameInfo = &(pELTInfo->frameInfo);
9001
9002 pCorPrfFrameInfo->size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
9003 pCorPrfFrameInfo->version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
9004 pCorPrfFrameInfo->funcID = functionId;
9005 pCorPrfFrameInfo->IP = ProfileGetIPFromPlatformSpecificHandle(pELTInfo->platformSpecificHandle);
9006 pCorPrfFrameInfo->extraArg = pProfileArgIterator->GetHiddenArgValue();
9007 pCorPrfFrameInfo->thisArg = pProfileArgIterator->GetThis();
9008
9009 *pFrameInfo = (COR_PRF_FRAME_INFO)pCorPrfFrameInfo;
9010 }
9011
9012 //
9013 // Do argument processing if desired.
9014 //
9015 if (CORProfilerFunctionArgsEnabled())
9016 {
9017 if (pcbArgumentInfo == NULL)
9018 {
9019 return E_INVALIDARG;
9020 }
9021
9022 if ((*pcbArgumentInfo != 0) && (pArgumentInfo == NULL))
9023 {
9024 return E_INVALIDARG;
9025 }
9026
9027 ULONG32 count = pProfileArgIterator->GetNumArgs();
9028
9029 if (metaSig.HasThis())
9030 {
9031 count++;
9032 }
9033
9034 ULONG ulArgInfoSize = sizeof(COR_PRF_FUNCTION_ARGUMENT_INFO) + (count * sizeof(COR_PRF_FUNCTION_ARGUMENT_RANGE));
9035
9036 if (*pcbArgumentInfo < ulArgInfoSize)
9037 {
9038 *pcbArgumentInfo = ulArgInfoSize;
9039 return HRESULT_FROM_WIN32(ERROR_INSUFFICIENT_BUFFER);
9040 }
9041
9042 _ASSERTE(pArgumentInfo != NULL);
9043
9044 pArgumentInfo->numRanges = count;
9045 pArgumentInfo->totalArgumentSize = 0;
9046
9047 count = 0;
9048
9049 if (metaSig.HasThis())
9050 {
9051 pELTInfo->pThis = pProfileArgIterator->GetThis();
9052 pArgumentInfo->ranges[count].startAddress = (UINT_PTR) (&(pELTInfo->pThis));
9053
9054 UINT length = sizeof(pELTInfo->pThis);
9055 pArgumentInfo->ranges[count].length = length;
9056 pArgumentInfo->totalArgumentSize += length;
9057 count++;
9058 }
9059
9060 while (count < pArgumentInfo->numRanges)
9061 {
9062 pArgumentInfo->ranges[count].startAddress = (UINT_PTR)(pProfileArgIterator->GetNextArgAddr());
9063
9064 UINT length = pProfileArgIterator->GetArgSize();
9065 pArgumentInfo->ranges[count].length = length;
9066 pArgumentInfo->totalArgumentSize += length;
9067 count++;
9068 }
9069 }
9070
9071 return S_OK;
9072}
9073
9074
9075
9076HRESULT ProfToEEInterfaceImpl::GetFunctionEnter3Info(FunctionID functionId, // in
9077 COR_PRF_ELT_INFO eltInfo, // in
9078 COR_PRF_FRAME_INFO * pFrameInfo, // out
9079 ULONG * pcbArgumentInfo, // in, out
9080 COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo) // out
9081{
9082 CONTRACTL
9083 {
9084 // Yay!
9085 NOTHROW;
9086
9087 // Yay!
9088 GC_NOTRIGGER;
9089
9090 // Yay!
9091 MODE_ANY;
9092
9093 // ProfilingGetFunctionEnter3Info may take locks
9094 CAN_TAKE_LOCK;
9095
9096 SO_NOT_MAINLINE;
9097
9098 }
9099 CONTRACTL_END;
9100
9101 PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
9102 LL_INFO1000,
9103 "**PROF: GetFunctionEnter3Info.\n"));
9104
9105 _ASSERTE(g_profControlBlock.pProfInterface->GetEnter3WithInfoHook() != NULL);
9106
9107 if (!CORProfilerELT3SlowPathEnterEnabled())
9108 {
9109 return CORPROF_E_INCONSISTENT_WITH_FLAGS;
9110 }
9111
9112 return ProfilingGetFunctionEnter3Info(functionId, eltInfo, pFrameInfo, pcbArgumentInfo, pArgumentInfo);
9113}
9114
9115//---------------------------------------------------------------------------------------
9116//
9117// ProfilingGetFunctionLeave3Info provides frame information and return value infomation
9118// of the function ELT callback is inspecting. It is called either by the profiler or the
9119// C helper function.
9120//
9121// Arguments:
9122// * functionId - [in] FunctionId of the function being inspected by ELT3
9123// * eltInfo - [in] The opaque pointer FunctionLeave3WithInfo callback passed to the profiler
9124// * pFrameInfo - [out] Pointer to COR_PRF_FRAME_INFO the profiler later can use to inspect
9125// generic types
9126// * pRetvalRange - [out] Pointer to COR_PRF_FUNCTION_ARGUMENT_RANGE to store return value
9127//
9128// Return Value:
9129// HRESULT indicating success or failure.
9130//
9131
9132HRESULT ProfilingGetFunctionLeave3Info(FunctionID functionId, // in
9133 COR_PRF_ELT_INFO eltInfo, // in
9134 COR_PRF_FRAME_INFO * pFrameInfo, // out
9135 COR_PRF_FUNCTION_ARGUMENT_RANGE * pRetvalRange) // out
9136{
9137 CONTRACTL
9138 {
9139 // Yay!
9140 NOTHROW;
9141
9142 // Yay!
9143 GC_NOTRIGGER;
9144
9145 // Yay!
9146 MODE_ANY;
9147
9148 // ProfileArgIterator::ProfileArgIterator may take locks
9149 CAN_TAKE_LOCK;
9150
9151 SO_NOT_MAINLINE;
9152 }
9153 CONTRACTL_END;
9154
9155 if ((pFrameInfo == NULL) || (eltInfo == NULL))
9156 {
9157 return E_INVALIDARG;
9158 }
9159
9160 COR_PRF_ELT_INFO_INTERNAL * pELTInfo = (COR_PRF_ELT_INFO_INTERNAL *)eltInfo;
9161 ProfileSetFunctionIDInPlatformSpecificHandle(pELTInfo->platformSpecificHandle, functionId);
9162
9163 // The loader won't trigger a GC or throw for already loaded argument types.
9164 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
9165
9166 //
9167 // Find the method this is referring to, so we can get the signature
9168 //
9169 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
9170 MetaSig metaSig(pMethodDesc);
9171
9172 NewHolder<ProfileArgIterator> pProfileArgIterator;
9173
9174 {
9175 // Can handle E_OUTOFMEMORY from ProfileArgIterator.
9176 FAULT_NOT_FATAL();
9177
9178 pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, pELTInfo->platformSpecificHandle);
9179
9180 if (pProfileArgIterator == NULL)
9181 {
9182 return E_UNEXPECTED;
9183 }
9184 }
9185
9186 if (CORProfilerFrameInfoEnabled())
9187 {
9188 if (pFrameInfo == NULL)
9189 {
9190 return E_INVALIDARG;
9191 }
9192
9193 COR_PRF_FRAME_INFO_INTERNAL * pCorPrfFrameInfo = &(pELTInfo->frameInfo);
9194
9195 //
9196 // Setup the COR_PRF_FRAME_INFO structure first.
9197 //
9198 pCorPrfFrameInfo->size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
9199 pCorPrfFrameInfo->version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
9200 pCorPrfFrameInfo->funcID = functionId;
9201 pCorPrfFrameInfo->IP = ProfileGetIPFromPlatformSpecificHandle(pELTInfo->platformSpecificHandle);
9202
9203 // Upon entering Leave hook, the register assigned to store this pointer on function calls may
9204 // already be reused and is likely not to contain this pointer.
9205 pCorPrfFrameInfo->extraArg = NULL;
9206 pCorPrfFrameInfo->thisArg = NULL;
9207
9208 *pFrameInfo = (COR_PRF_FRAME_INFO)pCorPrfFrameInfo;
9209 }
9210
9211 //
9212 // Do argument processing if desired.
9213 //
9214 if (CORProfilerFunctionReturnValueEnabled())
9215 {
9216 if (pRetvalRange == NULL)
9217 {
9218 return E_INVALIDARG;
9219 }
9220
9221 if (!metaSig.IsReturnTypeVoid())
9222 {
9223 pRetvalRange->length = metaSig.GetReturnTypeSize();
9224 pRetvalRange->startAddress = (UINT_PTR)pProfileArgIterator->GetReturnBufferAddr();
9225 }
9226 else
9227 {
9228 pRetvalRange->length = 0;
9229 pRetvalRange->startAddress = 0;
9230 }
9231 }
9232
9233 return S_OK;
9234}
9235
9236
9237HRESULT ProfToEEInterfaceImpl::GetFunctionLeave3Info(FunctionID functionId, // in
9238 COR_PRF_ELT_INFO eltInfo, // in
9239 COR_PRF_FRAME_INFO * pFrameInfo, // out
9240 COR_PRF_FUNCTION_ARGUMENT_RANGE * pRetvalRange) // out
9241{
9242 CONTRACTL
9243 {
9244 // Yay!
9245 NOTHROW;
9246
9247 // Yay!
9248 GC_NOTRIGGER;
9249
9250 // Yay!
9251 MODE_ANY;
9252
9253 // ProfilingGetFunctionLeave3Info may take locks
9254 CAN_TAKE_LOCK;
9255
9256 SO_NOT_MAINLINE;
9257
9258 }
9259 CONTRACTL_END;
9260
9261 PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
9262 LL_INFO1000,
9263 "**PROF: GetFunctionLeave3Info.\n"));
9264
9265 _ASSERTE(g_profControlBlock.pProfInterface->GetLeave3WithInfoHook() != NULL);
9266
9267 if (!CORProfilerELT3SlowPathLeaveEnabled())
9268 {
9269 return CORPROF_E_INCONSISTENT_WITH_FLAGS;
9270 }
9271
9272 return ProfilingGetFunctionLeave3Info(functionId, eltInfo, pFrameInfo, pRetvalRange);
9273}
9274
9275//---------------------------------------------------------------------------------------
9276//
9277// ProfilingGetFunctionTailcall3Info provides frame information of the function ELT callback
9278// is inspecting. It is called either by the profiler or the C helper function.
9279//
9280// Arguments:
9281// * functionId - [in] FunctionId of the function being inspected by ELT3
9282// * eltInfo - [in] The opaque pointer FunctionTailcall3WithInfo callback passed to the
9283// profiler
9284// * pFrameInfo - [out] Pointer to COR_PRF_FRAME_INFO the profiler later can use to inspect
9285// generic types
9286//
9287// Return Value:
9288// HRESULT indicating success or failure.
9289//
9290
9291HRESULT ProfilingGetFunctionTailcall3Info(FunctionID functionId, // in
9292 COR_PRF_ELT_INFO eltInfo, // in
9293 COR_PRF_FRAME_INFO * pFrameInfo) // out
9294{
9295 CONTRACTL
9296 {
9297 // Yay!
9298 NOTHROW;
9299
9300 // Yay!
9301 GC_NOTRIGGER;
9302
9303 // Yay!
9304 MODE_ANY;
9305
9306 // ProfileArgIterator::ProfileArgIterator may take locks
9307 CAN_TAKE_LOCK;
9308
9309 SO_NOT_MAINLINE;
9310
9311 }
9312 CONTRACTL_END;
9313
9314 if ((functionId == NULL) || (eltInfo == NULL) || (pFrameInfo == NULL))
9315 {
9316 return E_INVALIDARG;
9317 }
9318
9319 COR_PRF_ELT_INFO_INTERNAL * pELTInfo = (COR_PRF_ELT_INFO_INTERNAL *)eltInfo;
9320 ProfileSetFunctionIDInPlatformSpecificHandle(pELTInfo->platformSpecificHandle, functionId);
9321
9322 // The loader won't trigger a GC or throw for already loaded argument types.
9323 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
9324
9325 //
9326 // Find the method this is referring to, so we can get the signature
9327 //
9328 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
9329 MetaSig metaSig(pMethodDesc);
9330
9331 NewHolder<ProfileArgIterator> pProfileArgIterator;
9332
9333 {
9334 // Can handle E_OUTOFMEMORY from ProfileArgIterator.
9335 FAULT_NOT_FATAL();
9336
9337 pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, pELTInfo->platformSpecificHandle);
9338
9339 if (pProfileArgIterator == NULL)
9340 {
9341 return E_UNEXPECTED;
9342 }
9343 }
9344
9345 COR_PRF_FRAME_INFO_INTERNAL * pCorPrfFrameInfo = &(pELTInfo->frameInfo);
9346
9347 //
9348 // Setup the COR_PRF_FRAME_INFO structure first.
9349 //
9350 pCorPrfFrameInfo->size = sizeof(COR_PRF_FRAME_INFO_INTERNAL);
9351 pCorPrfFrameInfo->version = COR_PRF_FRAME_INFO_INTERNAL_CURRENT_VERSION;
9352 pCorPrfFrameInfo->funcID = functionId;
9353 pCorPrfFrameInfo->IP = ProfileGetIPFromPlatformSpecificHandle(pELTInfo->platformSpecificHandle);
9354
9355 // Tailcall is designed to report the caller, not the callee. But the taillcall hook is invoked
9356 // with registers containing parameters passed to the callee before calling into the callee.
9357 // This pointer we get here is for the callee. Because of the constraints imposed on tailcall
9358 // optimization, this pointer passed to the callee accidentally happens to be the same this pointer
9359 // passed to the caller.
9360 //
9361 // It is a fragile coincidence we should not depend on because JIT is free to change the
9362 // implementation details in the future.
9363 pCorPrfFrameInfo->extraArg = NULL;
9364 pCorPrfFrameInfo->thisArg = NULL;
9365
9366 *pFrameInfo = (COR_PRF_FRAME_INFO)pCorPrfFrameInfo;
9367
9368 return S_OK;
9369}
9370
9371
9372HRESULT ProfToEEInterfaceImpl::GetFunctionTailcall3Info(FunctionID functionId, // in
9373 COR_PRF_ELT_INFO eltInfo, // in
9374 COR_PRF_FRAME_INFO * pFrameInfo) // out
9375{
9376 CONTRACTL
9377 {
9378 // Yay!
9379 NOTHROW;
9380
9381 // Yay!
9382 GC_NOTRIGGER;
9383
9384 // Yay!
9385 MODE_ANY;
9386
9387 // ProfilingGetFunctionTailcall3Info may take locks
9388 CAN_TAKE_LOCK;
9389
9390 SO_NOT_MAINLINE;
9391
9392 }
9393 CONTRACTL_END;
9394
9395 PROFILER_TO_CLR_ENTRYPOINT_SYNC((LF_CORPROF,
9396 LL_INFO1000,
9397 "**PROF: GetFunctionTailcall3Info.\n"));
9398
9399 _ASSERTE(g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook() != NULL);
9400
9401 if (!CORProfilerELT3SlowPathTailcallEnabled())
9402 {
9403 return CORPROF_E_INCONSISTENT_WITH_FLAGS;
9404 }
9405
9406 return ProfilingGetFunctionTailcall3Info(functionId, eltInfo, pFrameInfo);
9407}
9408
9409HRESULT ProfToEEInterfaceImpl::EnumThreads(
9410 /* out */ ICorProfilerThreadEnum ** ppEnum)
9411{
9412
9413 CONTRACTL
9414 {
9415 // Yay!
9416 NOTHROW;
9417
9418 // Yay!
9419 GC_NOTRIGGER;
9420
9421 // Yay!
9422 MODE_ANY;
9423
9424 // Need to acquire the thread store lock
9425 CAN_TAKE_LOCK;
9426
9427 SO_NOT_MAINLINE;
9428
9429 PRECONDITION(CheckPointer(ppEnum, NULL_OK));
9430
9431 }
9432 CONTRACTL_END;
9433
9434 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
9435 kP2EEAllowableAfterAttach,
9436 (LF_CORPROF,
9437 LL_INFO10,
9438 "**PROF: EnumThreads.\n"));
9439
9440 HRESULT hr;
9441
9442 if (ppEnum == NULL)
9443 {
9444 return E_INVALIDARG;
9445 }
9446
9447 *ppEnum = NULL;
9448
9449 NewHolder<ProfilerThreadEnum> pThreadEnum(new (nothrow) ProfilerThreadEnum);
9450 if (pThreadEnum == NULL)
9451 {
9452 return E_OUTOFMEMORY;
9453 }
9454
9455 hr = pThreadEnum->Init();
9456 if (FAILED(hr))
9457 {
9458 return hr;
9459 }
9460
9461 // Ownership transferred to [out] param. Caller must Release() when done with this.
9462 *ppEnum = (ICorProfilerThreadEnum *) pThreadEnum.Extract();
9463
9464 return S_OK;
9465}
9466
9467// This function needs to be called on any thread before making any ICorProfilerInfo* calls and must be
9468// made before any thread is suspended by this profiler.
9469// As you might have already figured out, this is done to avoid deadlocks situation when
9470// the suspended thread holds on the loader lock / heap lock while the current thread is trying to obtain
9471// the same lock.
9472HRESULT ProfToEEInterfaceImpl::InitializeCurrentThread()
9473{
9474
9475 CONTRACTL
9476 {
9477 // Yay!
9478 NOTHROW;
9479
9480 // Yay!
9481 GC_NOTRIGGER;
9482
9483 // Yay!
9484 MODE_ANY;
9485
9486 // May take thread store lock and OS APIs may also take locks
9487 CAN_TAKE_LOCK;
9488
9489 SO_NOT_MAINLINE;
9490 }
9491 CONTRACTL_END;
9492
9493
9494 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
9495 kP2EEAllowableAfterAttach,
9496 (LF_CORPROF,
9497 LL_INFO10,
9498 "**PROF: InitializeCurrentThread.\n"));
9499
9500 HRESULT hr = S_OK;
9501
9502 EX_TRY
9503 {
9504 CExecutionEngine::SetupTLSForThread(GetThread());
9505 }
9506 EX_CATCH_HRESULT(hr);
9507
9508 if (FAILED(hr))
9509 return hr;
9510
9511 return S_OK;
9512}
9513
9514struct InternalProfilerModuleEnum : public ProfilerModuleEnum
9515{
9516 CDynArray<ModuleID> *GetRawElementsArray()
9517 {
9518 return &m_elements;
9519 }
9520};
9521
9522HRESULT ProfToEEInterfaceImpl::EnumNgenModuleMethodsInliningThisMethod(
9523 ModuleID inlinersModuleId,
9524 ModuleID inlineeModuleId,
9525 mdMethodDef inlineeMethodId,
9526 BOOL *incompleteData,
9527 ICorProfilerMethodEnum** ppEnum)
9528{
9529 CONTRACTL
9530 {
9531 NOTHROW;
9532 GC_TRIGGERS;
9533 MODE_ANY;
9534 SO_NOT_MAINLINE;
9535 CAN_TAKE_LOCK;
9536 PRECONDITION(CheckPointer(ppEnum));
9537 }
9538 CONTRACTL_END;
9539
9540 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EETriggers, (LF_CORPROF, LL_INFO1000, "**PROF: EnumNgenModuleMethodsInliningThisMethod.\n"));
9541
9542 if (ppEnum == NULL)
9543 {
9544 return E_INVALIDARG;
9545 }
9546 *ppEnum = NULL;
9547 HRESULT hr = S_OK;
9548
9549 Module *inlineeOwnerModule = reinterpret_cast<Module *>(inlineeModuleId);
9550 if (inlineeOwnerModule == NULL)
9551 {
9552 return E_INVALIDARG;
9553 }
9554 if (inlineeOwnerModule->IsBeingUnloaded())
9555 {
9556 return CORPROF_E_DATAINCOMPLETE;
9557 }
9558
9559 Module *inlinersModule = reinterpret_cast<Module *>(inlinersModuleId);
9560 if (inlinersModule == NULL)
9561 {
9562 return E_INVALIDARG;
9563 }
9564 if(inlinersModule->IsBeingUnloaded())
9565 {
9566 return CORPROF_E_DATAINCOMPLETE;
9567 }
9568
9569 if (!inlinersModule->HasInlineTrackingMap())
9570 {
9571 return CORPROF_E_DATAINCOMPLETE;
9572 }
9573
9574 CDynArray<COR_PRF_METHOD> results;
9575 const COUNT_T staticBufferSize = 10;
9576 MethodInModule staticBuffer[staticBufferSize];
9577 NewArrayHolder<MethodInModule> dynamicBuffer;
9578 MethodInModule *methodsBuffer = staticBuffer;
9579 EX_TRY
9580 {
9581 // Trying to use static buffer
9582 COUNT_T methodsAvailable = inlinersModule->GetInliners(inlineeOwnerModule, inlineeMethodId, staticBufferSize, staticBuffer, incompleteData);
9583
9584 // If static buffer is not enough, allocate an array.
9585 if (methodsAvailable > staticBufferSize)
9586 {
9587 DWORD dynamicBufferSize = methodsAvailable;
9588 dynamicBuffer = methodsBuffer = new MethodInModule[dynamicBufferSize];
9589 methodsAvailable = inlinersModule->GetInliners(inlineeOwnerModule, inlineeMethodId, dynamicBufferSize, dynamicBuffer, incompleteData);
9590 if (methodsAvailable > dynamicBufferSize)
9591 {
9592 _ASSERTE(!"Ngen image inlining info changed, this shouldn't be possible.");
9593 methodsAvailable = dynamicBufferSize;
9594 }
9595 }
9596
9597 //Go through all inliners found in the inlinersModule and prepare them to export via results.
9598 results.AllocateBlockThrowing(methodsAvailable);
9599 for (COUNT_T j = 0; j < methodsAvailable; j++)
9600 {
9601 COR_PRF_METHOD *newPrfMethod = &results[j];
9602 newPrfMethod->moduleId = reinterpret_cast<ModuleID>(methodsBuffer[j].m_module);
9603 newPrfMethod->methodId = methodsBuffer[j].m_methodDef;
9604 }
9605 *ppEnum = new ProfilerMethodEnum(&results);
9606 }
9607 EX_CATCH_HRESULT(hr);
9608
9609 return hr;
9610}
9611
9612HRESULT ProfToEEInterfaceImpl::GetInMemorySymbolsLength(
9613 ModuleID moduleId,
9614 DWORD* pCountSymbolBytes)
9615{
9616
9617 CONTRACTL
9618 {
9619 NOTHROW;
9620 GC_NOTRIGGER;
9621 MODE_ANY;
9622 SO_NOT_MAINLINE;
9623 }
9624 CONTRACTL_END;
9625
9626
9627 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
9628 kP2EEAllowableAfterAttach,
9629 (LF_CORPROF,
9630 LL_INFO10,
9631 "**PROF: GetInMemorySymbolsLength.\n"));
9632
9633 HRESULT hr = S_OK;
9634 if (pCountSymbolBytes == NULL)
9635 {
9636 return E_INVALIDARG;
9637 }
9638 *pCountSymbolBytes = 0;
9639
9640 Module* pModule = reinterpret_cast< Module* >(moduleId);
9641 if (pModule == NULL)
9642 {
9643 return E_INVALIDARG;
9644 }
9645 if (pModule->IsBeingUnloaded())
9646 {
9647 return CORPROF_E_DATAINCOMPLETE;
9648 }
9649
9650 //This method would work fine on reflection.emit, but there would be no way to know
9651 //if some other thread was changing the size of the symbols before this method returned.
9652 //Adding events or locks to detect/prevent changes would make the scenario workable
9653 if (pModule->IsReflection())
9654 {
9655 return COR_PRF_MODULE_DYNAMIC;
9656 }
9657
9658 CGrowableStream* pStream = pModule->GetInMemorySymbolStream();
9659 if (pStream == NULL)
9660 {
9661 return S_OK;
9662 }
9663
9664 STATSTG SizeData = { 0 };
9665 hr = pStream->Stat(&SizeData, STATFLAG_NONAME);
9666 if (FAILED(hr))
9667 {
9668 return hr;
9669 }
9670 if (SizeData.cbSize.u.HighPart > 0)
9671 {
9672 return COR_E_OVERFLOW;
9673 }
9674 *pCountSymbolBytes = SizeData.cbSize.u.LowPart;
9675
9676 return S_OK;
9677}
9678
9679HRESULT ProfToEEInterfaceImpl::ReadInMemorySymbols(
9680 ModuleID moduleId,
9681 DWORD symbolsReadOffset,
9682 BYTE* pSymbolBytes,
9683 DWORD countSymbolBytes,
9684 DWORD* pCountSymbolBytesRead)
9685{
9686 CONTRACTL
9687 {
9688 NOTHROW;
9689 GC_NOTRIGGER;
9690 MODE_ANY;
9691 SO_NOT_MAINLINE;
9692 }
9693 CONTRACTL_END;
9694
9695 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(
9696 kP2EEAllowableAfterAttach,
9697 (LF_CORPROF,
9698 LL_INFO10,
9699 "**PROF: ReadInMemorySymbols.\n"));
9700
9701 HRESULT hr = S_OK;
9702 if (pSymbolBytes == NULL)
9703 {
9704 return E_INVALIDARG;
9705 }
9706 if (pCountSymbolBytesRead == NULL)
9707 {
9708 return E_INVALIDARG;
9709 }
9710 *pCountSymbolBytesRead = 0;
9711
9712 Module* pModule = reinterpret_cast< Module* >(moduleId);
9713 if (pModule == NULL)
9714 {
9715 return E_INVALIDARG;
9716 }
9717 if (pModule->IsBeingUnloaded())
9718 {
9719 return CORPROF_E_DATAINCOMPLETE;
9720 }
9721
9722 //This method would work fine on reflection.emit, but there would be no way to know
9723 //if some other thread was changing the size of the symbols before this method returned.
9724 //Adding events or locks to detect/prevent changes would make the scenario workable
9725 if (pModule->IsReflection())
9726 {
9727 return COR_PRF_MODULE_DYNAMIC;
9728 }
9729
9730 CGrowableStream* pStream = pModule->GetInMemorySymbolStream();
9731 if (pStream == NULL)
9732 {
9733 return E_INVALIDARG;
9734 }
9735
9736 STATSTG SizeData = { 0 };
9737 hr = pStream->Stat(&SizeData, STATFLAG_NONAME);
9738 if (FAILED(hr))
9739 {
9740 return hr;
9741 }
9742 if (SizeData.cbSize.u.HighPart > 0)
9743 {
9744 return COR_E_OVERFLOW;
9745 }
9746 DWORD streamSize = SizeData.cbSize.u.LowPart;
9747 if (symbolsReadOffset >= streamSize)
9748 {
9749 return E_INVALIDARG;
9750 }
9751
9752 *pCountSymbolBytesRead = min(streamSize - symbolsReadOffset, countSymbolBytes);
9753 memcpy_s(pSymbolBytes, countSymbolBytes, ((BYTE*)pStream->GetRawBuffer().StartAddress()) + symbolsReadOffset, *pCountSymbolBytesRead);
9754
9755 return S_OK;
9756}
9757
9758HRESULT ProfToEEInterfaceImpl::ApplyMetaData(
9759 ModuleID moduleId)
9760{
9761 CONTRACTL
9762 {
9763 NOTHROW;
9764 GC_NOTRIGGER;
9765 MODE_ANY;
9766 SO_NOT_MAINLINE;
9767 }
9768 CONTRACTL_END;
9769
9770 PROFILER_TO_CLR_ENTRYPOINT_SYNC_EX(kP2EEAllowableAfterAttach, (LF_CORPROF, LL_INFO1000, "**PROF: ApplyMetaData.\n"));
9771
9772 if (moduleId == NULL)
9773 {
9774 return E_INVALIDARG;
9775 }
9776
9777 HRESULT hr = S_OK;
9778 EX_TRY
9779 {
9780 Module *pModule = (Module *)moduleId;
9781 _ASSERTE(pModule != NULL);
9782 if (pModule->IsBeingUnloaded())
9783 {
9784 hr = CORPROF_E_DATAINCOMPLETE;
9785 }
9786 else
9787 {
9788 pModule->ApplyMetaData();
9789 }
9790 }
9791 EX_CATCH_HRESULT(hr);
9792 return hr;
9793}
9794
9795//---------------------------------------------------------------------------------------
9796//
9797// Simple wrapper around EEToProfInterfaceImpl::ManagedToUnmanagedTransition. This
9798// can be called by C++ code and directly by generated stubs.
9799//
9800// Arguments:
9801// pMD - MethodDesc for the managed function involved in the transition
9802// reason - Passed on to profiler to indicate why the transition is occurring
9803//
9804
9805void __stdcall ProfilerManagedToUnmanagedTransitionMD(MethodDesc *pMD,
9806 COR_PRF_TRANSITION_REASON reason)
9807{
9808 CONTRACTL
9809 {
9810 NOTHROW;
9811 GC_TRIGGERS;
9812 MODE_PREEMPTIVE;
9813 SO_TOLERANT;
9814 }
9815 CONTRACTL_END;
9816
9817 // This function is called within the runtime, not directly from managed code.
9818 // Also, the only case MD is NULL is the calli pinvoke case, and we still
9819 // want to notify the profiler in that case.
9820
9821 // Do not notify the profiler about QCalls
9822 if (pMD == NULL || !pMD->IsQCall())
9823 {
9824 BEGIN_PIN_PROFILER(CORProfilerPresent());
9825 g_profControlBlock.pProfInterface->ManagedToUnmanagedTransition(MethodDescToFunctionID(pMD),
9826 reason);
9827 END_PIN_PROFILER();
9828 }
9829}
9830
9831//---------------------------------------------------------------------------------------
9832//
9833// Simple wrapper around EEToProfInterfaceImpl::UnmanagedToManagedTransition. This
9834// can be called by C++ code and directly by generated stubs.
9835//
9836// Arguments:
9837// pMD - MethodDesc for the managed function involved in the transition
9838// reason - Passed on to profiler to indicate why the transition is occurring
9839//
9840
9841void __stdcall ProfilerUnmanagedToManagedTransitionMD(MethodDesc *pMD,
9842 COR_PRF_TRANSITION_REASON reason)
9843{
9844 CONTRACTL
9845 {
9846 NOTHROW;
9847 GC_TRIGGERS;
9848 MODE_PREEMPTIVE;
9849 SO_TOLERANT;
9850 }
9851 CONTRACTL_END;
9852
9853 // This function is called within the runtime, not directly from managed code.
9854 // Also, the only case MD is NULL is the calli pinvoke case, and we still
9855 // want to notify the profiler in that case.
9856
9857 // Do not notify the profiler about QCalls
9858 if (pMD == NULL || !pMD->IsQCall())
9859 {
9860 BEGIN_PIN_PROFILER(CORProfilerPresent());
9861 g_profControlBlock.pProfInterface->UnmanagedToManagedTransition(MethodDescToFunctionID(pMD),
9862 reason);
9863 END_PIN_PROFILER();
9864 }
9865}
9866
9867
9868
9869#endif // PROFILING_SUPPORTED
9870
9871
9872FCIMPL0(FC_BOOL_RET, ProfilingFCallHelper::FC_TrackRemoting)
9873{
9874 FCALL_CONTRACT;
9875
9876#ifdef PROFILING_SUPPORTED
9877 FC_RETURN_BOOL(CORProfilerTrackRemoting());
9878#else // !PROFILING_SUPPORTED
9879 FC_RETURN_BOOL(FALSE);
9880#endif // !PROFILING_SUPPORTED
9881}
9882FCIMPLEND
9883
9884FCIMPL0(FC_BOOL_RET, ProfilingFCallHelper::FC_TrackRemotingCookie)
9885{
9886 FCALL_CONTRACT;
9887
9888#ifdef PROFILING_SUPPORTED
9889 FC_RETURN_BOOL(CORProfilerTrackRemotingCookie());
9890#else // !PROFILING_SUPPORTED
9891 FC_RETURN_BOOL(FALSE);
9892#endif // !PROFILING_SUPPORTED
9893}
9894FCIMPLEND
9895
9896FCIMPL0(FC_BOOL_RET, ProfilingFCallHelper::FC_TrackRemotingAsync)
9897{
9898 FCALL_CONTRACT;
9899
9900#ifdef PROFILING_SUPPORTED
9901 FC_RETURN_BOOL(CORProfilerTrackRemotingAsync());
9902#else // !PROFILING_SUPPORTED
9903 FC_RETURN_BOOL(FALSE);
9904#endif // !PROFILING_SUPPORTED
9905}
9906FCIMPLEND
9907
9908FCIMPL2(void, ProfilingFCallHelper::FC_RemotingClientSendingMessage, GUID *pId, CLR_BOOL fIsAsync)
9909{
9910 FCALL_CONTRACT;
9911
9912#ifdef PROFILING_SUPPORTED
9913 // Need to erect a GC frame so that GCs can occur without a problem
9914 // within the profiler code.
9915
9916 // Note that we don't need to worry about pId moving around since
9917 // it is a value class declared on the stack and so GC doesn't
9918 // know about it.
9919
9920 _ASSERTE (!GCHeapUtilities::GetGCHeap()->IsHeapPointer(pId)); // should be on the stack, not in the heap
9921 HELPER_METHOD_FRAME_BEGIN_NOPOLL();
9922
9923 {
9924 BEGIN_PIN_PROFILER(CORProfilerPresent());
9925 GCX_PREEMP();
9926 if (CORProfilerTrackRemotingCookie())
9927 {
9928 g_profControlBlock.pProfInterface->GetGUID(pId);
9929 _ASSERTE(pId->Data1);
9930
9931 g_profControlBlock.pProfInterface->RemotingClientSendingMessage(pId, fIsAsync);
9932 }
9933 else
9934 {
9935 g_profControlBlock.pProfInterface->RemotingClientSendingMessage(NULL, fIsAsync);
9936 }
9937 END_PIN_PROFILER();
9938 }
9939 HELPER_METHOD_FRAME_END_POLL();
9940#endif // PROFILING_SUPPORTED
9941}
9942FCIMPLEND
9943
9944
9945FCIMPL2_VI(void, ProfilingFCallHelper::FC_RemotingClientReceivingReply, GUID id, CLR_BOOL fIsAsync)
9946{
9947 FCALL_CONTRACT;
9948
9949#ifdef PROFILING_SUPPORTED
9950 // Need to erect a GC frame so that GCs can occur without a problem
9951 // within the profiler code.
9952
9953 // Note that we don't need to worry about pId moving around since
9954 // it is a value class declared on the stack and so GC doesn't
9955 // know about it.
9956
9957 HELPER_METHOD_FRAME_BEGIN_NOPOLL();
9958
9959
9960 {
9961 BEGIN_PIN_PROFILER(CORProfilerPresent());
9962 GCX_PREEMP();
9963 if (CORProfilerTrackRemotingCookie())
9964 {
9965 g_profControlBlock.pProfInterface->RemotingClientReceivingReply(&id, fIsAsync);
9966 }
9967 else
9968 {
9969 g_profControlBlock.pProfInterface->RemotingClientReceivingReply(NULL, fIsAsync);
9970 }
9971 END_PIN_PROFILER();
9972 }
9973
9974 HELPER_METHOD_FRAME_END_POLL();
9975#endif // PROFILING_SUPPORTED
9976}
9977FCIMPLEND
9978
9979
9980FCIMPL2_VI(void, ProfilingFCallHelper::FC_RemotingServerReceivingMessage, GUID id, CLR_BOOL fIsAsync)
9981{
9982 FCALL_CONTRACT;
9983
9984#ifdef PROFILING_SUPPORTED
9985 // Need to erect a GC frame so that GCs can occur without a problem
9986 // within the profiler code.
9987
9988 // Note that we don't need to worry about pId moving around since
9989 // it is a value class declared on the stack and so GC doesn't
9990 // know about it.
9991
9992 HELPER_METHOD_FRAME_BEGIN_NOPOLL();
9993
9994 {
9995 BEGIN_PIN_PROFILER(CORProfilerPresent());
9996 GCX_PREEMP();
9997 if (CORProfilerTrackRemotingCookie())
9998 {
9999 g_profControlBlock.pProfInterface->RemotingServerReceivingMessage(&id, fIsAsync);
10000 }
10001 else
10002 {
10003 g_profControlBlock.pProfInterface->RemotingServerReceivingMessage(NULL, fIsAsync);
10004 }
10005 END_PIN_PROFILER();
10006 }
10007
10008 HELPER_METHOD_FRAME_END_POLL();
10009#endif // PROFILING_SUPPORTED
10010}
10011FCIMPLEND
10012
10013FCIMPL2(void, ProfilingFCallHelper::FC_RemotingServerSendingReply, GUID *pId, CLR_BOOL fIsAsync)
10014{
10015 FCALL_CONTRACT;
10016
10017#ifdef PROFILING_SUPPORTED
10018 // Need to erect a GC frame so that GCs can occur without a problem
10019 // within the profiler code.
10020
10021 // Note that we don't need to worry about pId moving around since
10022 // it is a value class declared on the stack and so GC doesn't
10023 // know about it.
10024
10025 HELPER_METHOD_FRAME_BEGIN_NOPOLL();
10026
10027 {
10028 BEGIN_PIN_PROFILER(CORProfilerPresent());
10029 GCX_PREEMP();
10030 if (CORProfilerTrackRemotingCookie())
10031 {
10032 g_profControlBlock.pProfInterface->GetGUID(pId);
10033 _ASSERTE(pId->Data1);
10034
10035 g_profControlBlock.pProfInterface->RemotingServerSendingReply(pId, fIsAsync);
10036 }
10037 else
10038 {
10039 g_profControlBlock.pProfInterface->RemotingServerSendingReply(NULL, fIsAsync);
10040 }
10041 END_PIN_PROFILER();
10042 }
10043
10044 HELPER_METHOD_FRAME_END_POLL();
10045#endif // PROFILING_SUPPORTED
10046}
10047FCIMPLEND
10048
10049
10050//*******************************************************************************************
10051// These do a lot of work for us, setting up Frames, gathering arg info and resolving generics.
10052 //*******************************************************************************************
10053
10054HCIMPL2(EXTERN_C void, ProfileEnter, UINT_PTR clientData, void * platformSpecificHandle)
10055{
10056 FCALL_CONTRACT;
10057
10058#ifdef PROFILING_SUPPORTED
10059
10060#ifdef PROF_TEST_ONLY_FORCE_ELT
10061 // If this test-only flag is set, it's possible we might not have a profiler
10062 // attached, or might not have any of the hooks set. See
10063 // code:ProfControlBlock#TestOnlyELT
10064 if (g_profControlBlock.fTestOnlyForceEnterLeave)
10065 {
10066 if ((g_profControlBlock.pProfInterface.Load() == NULL) ||
10067 (
10068 (g_profControlBlock.pProfInterface->GetEnterHook() == NULL) &&
10069 (g_profControlBlock.pProfInterface->GetEnter2Hook() == NULL) &&
10070 (g_profControlBlock.pProfInterface->GetEnter3Hook() == NULL) &&
10071 (g_profControlBlock.pProfInterface->GetEnter3WithInfoHook() == NULL)
10072 )
10073 )
10074 {
10075 return;
10076 }
10077 }
10078#endif // PROF_TEST_ONLY_FORCE_ELT
10079
10080 // ELT3 Fast-Path hooks should be NULL when ELT intermediary is used.
10081 _ASSERTE(g_profControlBlock.pProfInterface->GetEnter3Hook() == NULL);
10082 _ASSERTE(GetThread()->PreemptiveGCDisabled());
10083 _ASSERTE(platformSpecificHandle != NULL);
10084
10085 // Set up a frame
10086 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
10087
10088 // Our contract is FCALL_CONTRACT, which is considered triggers if you set up a
10089 // frame, like we're about to do.
10090 SetCallbackStateFlagsHolder csf(
10091 COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE);
10092
10093 COR_PRF_ELT_INFO_INTERNAL eltInfo;
10094 eltInfo.platformSpecificHandle = platformSpecificHandle;
10095
10096 //
10097 // CLR v4 Slow-Path ELT
10098 //
10099 if (g_profControlBlock.pProfInterface->GetEnter3WithInfoHook() != NULL)
10100 {
10101 FunctionIDOrClientID functionIDOrClientID;
10102 functionIDOrClientID.clientID = clientData;
10103 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10104 g_profControlBlock.pProfInterface->GetEnter3WithInfoHook()(
10105 functionIDOrClientID,
10106 (COR_PRF_ELT_INFO)&eltInfo);
10107 goto LExit;
10108 }
10109
10110 if (g_profControlBlock.pProfInterface->GetEnter2Hook() != NULL)
10111 {
10112 // We have run out of heap memory, so the content of the mapping table becomes stale.
10113 // All Whidbey ETL hooks must be turned off.
10114 if (!g_profControlBlock.pProfInterface->IsClientIDToFunctionIDMappingEnabled())
10115 {
10116 goto LExit;
10117 }
10118
10119 // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
10120 // instead of using clientID because the profiler may map several functionIDs to a clientID to
10121 // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
10122 // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
10123 // key to retrieve the corresponding clientID from the internal FunctionID hash table.
10124 FunctionID functionId = clientData;
10125 _ASSERTE(functionId != NULL);
10126 clientData = g_profControlBlock.pProfInterface->LookupClientIDFromCache(functionId);
10127
10128 //
10129 // Whidbey Fast-Path ELT
10130 //
10131 if (CORProfilerELT2FastPathEnterEnabled())
10132 {
10133 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10134 g_profControlBlock.pProfInterface->GetEnter2Hook()(
10135 functionId,
10136 clientData,
10137 NULL,
10138 NULL);
10139 goto LExit;
10140 }
10141
10142 //
10143 // Whidbey Slow-Path ELT
10144 //
10145 ProfileSetFunctionIDInPlatformSpecificHandle(platformSpecificHandle, functionId);
10146
10147 COR_PRF_FRAME_INFO frameInfo = NULL;
10148 COR_PRF_FUNCTION_ARGUMENT_INFO * pArgumentInfo = NULL;
10149 ULONG ulArgInfoSize = 0;
10150
10151 if (CORProfilerFunctionArgsEnabled())
10152 {
10153 // The loader won't trigger a GC or throw for already loaded argument types.
10154 ENABLE_FORBID_GC_LOADER_USE_IN_THIS_SCOPE();
10155
10156 //
10157 // Find the method this is referring to, so we can get the signature
10158 //
10159 MethodDesc * pMethodDesc = FunctionIdToMethodDesc(functionId);
10160 MetaSig metaSig(pMethodDesc);
10161
10162 NewHolder<ProfileArgIterator> pProfileArgIterator;
10163
10164 {
10165 // Can handle E_OUTOFMEMORY from ProfileArgIterator.
10166 FAULT_NOT_FATAL();
10167
10168 pProfileArgIterator = new (nothrow) ProfileArgIterator(&metaSig, platformSpecificHandle);
10169
10170 if (pProfileArgIterator == NULL)
10171 {
10172 goto LExit;
10173 }
10174 }
10175
10176 ULONG32 count = pProfileArgIterator->GetNumArgs();
10177
10178 if (metaSig.HasThis())
10179 {
10180 count++;
10181 }
10182
10183 ulArgInfoSize = sizeof(COR_PRF_FUNCTION_ARGUMENT_INFO) + count * sizeof(COR_PRF_FUNCTION_ARGUMENT_RANGE);
10184 pArgumentInfo = (COR_PRF_FUNCTION_ARGUMENT_INFO *)_alloca(ulArgInfoSize);
10185 }
10186
10187 HRESULT hr = ProfilingGetFunctionEnter3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo, &ulArgInfoSize, pArgumentInfo);
10188
10189 _ASSERTE(hr == S_OK);
10190 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10191 g_profControlBlock.pProfInterface->GetEnter2Hook()(functionId, clientData, frameInfo, pArgumentInfo);
10192
10193 goto LExit;
10194 }
10195
10196
10197 // We will not be here unless the jit'd or ngen'd function we're about to enter
10198 // was backpatched with this wrapper around the profiler's hook, and that
10199 // wouldn't have happened unless the profiler supplied us with a hook
10200 // in the first place. (Note that SetEnterLeaveFunctionHooks* will return
10201 // an error unless it's called in the profiler's Initialize(), so a profiler can't change
10202 // its mind about where the hooks are.)
10203 _ASSERTE(g_profControlBlock.pProfInterface->GetEnterHook() != NULL);
10204
10205 // Note that we cannot assert CORProfilerTrackEnterLeave() (i.e., profiler flag
10206 // COR_PRF_MONITOR_ENTERLEAVE), because the profiler may decide whether
10207 // to enable the jitter to add enter/leave callouts independently of whether
10208 // the profiler actually has enter/leave hooks. (If the profiler has no such hooks,
10209 // the callouts quickly return and do nothing.)
10210
10211 //
10212 // Everett ELT
10213 //
10214 {
10215 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10216 g_profControlBlock.pProfInterface->GetEnterHook()((FunctionID)clientData);
10217 }
10218
10219LExit:
10220 ;
10221
10222 HELPER_METHOD_FRAME_END(); // Un-link the frame
10223
10224#endif // PROFILING_SUPPORTED
10225}
10226HCIMPLEND
10227
10228HCIMPL2(EXTERN_C void, ProfileLeave, UINT_PTR clientData, void * platformSpecificHandle)
10229{
10230 FCALL_CONTRACT;
10231
10232 FC_GC_POLL_NOT_NEEDED(); // we pulse GC mode, so we are doing a poll
10233
10234#ifdef PROFILING_SUPPORTED
10235
10236#ifdef PROF_TEST_ONLY_FORCE_ELT
10237 // If this test-only flag is set, it's possible we might not have a profiler
10238 // attached, or might not have any of the hooks set. See
10239 // code:ProfControlBlock#TestOnlyELT
10240 if (g_profControlBlock.fTestOnlyForceEnterLeave)
10241 {
10242 if ((g_profControlBlock.pProfInterface.Load() == NULL) ||
10243 (
10244 (g_profControlBlock.pProfInterface->GetLeaveHook() == NULL) &&
10245 (g_profControlBlock.pProfInterface->GetLeave2Hook() == NULL) &&
10246 (g_profControlBlock.pProfInterface->GetLeave3Hook() == NULL) &&
10247 (g_profControlBlock.pProfInterface->GetLeave3WithInfoHook() == NULL)
10248 )
10249 )
10250 {
10251 return;
10252 }
10253 }
10254#endif // PROF_TEST_ONLY_FORCE_ELT
10255
10256 // ELT3 Fast-Path hooks should be NULL when ELT intermediary is used.
10257 _ASSERTE(g_profControlBlock.pProfInterface->GetLeave3Hook() == NULL);
10258 _ASSERTE(GetThread()->PreemptiveGCDisabled());
10259 _ASSERTE(platformSpecificHandle != NULL);
10260
10261 // Set up a frame
10262 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
10263
10264 // Our contract is FCALL_CONTRACT, which is considered triggers if you set up a
10265 // frame, like we're about to do.
10266 SetCallbackStateFlagsHolder csf(
10267 COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE);
10268
10269 COR_PRF_ELT_INFO_INTERNAL eltInfo;
10270 eltInfo.platformSpecificHandle = platformSpecificHandle;
10271
10272 //
10273 // CLR v4 Slow-Path ELT
10274 //
10275 if (g_profControlBlock.pProfInterface->GetLeave3WithInfoHook() != NULL)
10276 {
10277 FunctionIDOrClientID functionIDOrClientID;
10278 functionIDOrClientID.clientID = clientData;
10279 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10280 g_profControlBlock.pProfInterface->GetLeave3WithInfoHook()(
10281 functionIDOrClientID,
10282 (COR_PRF_ELT_INFO)&eltInfo);
10283 goto LExit;
10284 }
10285
10286 if (g_profControlBlock.pProfInterface->GetLeave2Hook() != NULL)
10287 {
10288 // We have run out of heap memory, so the content of the mapping table becomes stale.
10289 // All Whidbey ETL hooks must be turned off.
10290 if (!g_profControlBlock.pProfInterface->IsClientIDToFunctionIDMappingEnabled())
10291 {
10292 goto LExit;
10293 }
10294
10295 // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
10296 // instead of using clientID because the profiler may map several functionIDs to a clientID to
10297 // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
10298 // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
10299 // key to retrieve the corresponding clientID from the internal FunctionID hash table.
10300 FunctionID functionId = clientData;
10301 _ASSERTE(functionId != NULL);
10302 clientData = g_profControlBlock.pProfInterface->LookupClientIDFromCache(functionId);
10303
10304 //
10305 // Whidbey Fast-Path ELT
10306 //
10307 if (CORProfilerELT2FastPathLeaveEnabled())
10308 {
10309 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10310 g_profControlBlock.pProfInterface->GetLeave2Hook()(
10311 functionId,
10312 clientData,
10313 NULL,
10314 NULL);
10315 goto LExit;
10316 }
10317
10318 //
10319 // Whidbey Slow-Path ELT
10320 //
10321 COR_PRF_FRAME_INFO frameInfo = NULL;
10322 COR_PRF_FUNCTION_ARGUMENT_RANGE argumentRange;
10323
10324 HRESULT hr = ProfilingGetFunctionLeave3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo, &argumentRange);
10325 _ASSERTE(hr == S_OK);
10326
10327 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10328 g_profControlBlock.pProfInterface->GetLeave2Hook()(functionId, clientData, frameInfo, &argumentRange);
10329 goto LExit;
10330 }
10331
10332 // We will not be here unless the jit'd or ngen'd function we're about to leave
10333 // was backpatched with this wrapper around the profiler's hook, and that
10334 // wouldn't have happened unless the profiler supplied us with a hook
10335 // in the first place. (Note that SetEnterLeaveFunctionHooks* will return
10336 // an error unless it's called in the profiler's Initialize(), so a profiler can't change
10337 // its mind about where the hooks are.)
10338 _ASSERTE(g_profControlBlock.pProfInterface->GetLeaveHook() != NULL);
10339
10340 // Note that we cannot assert CORProfilerTrackEnterLeave() (i.e., profiler flag
10341 // COR_PRF_MONITOR_ENTERLEAVE), because the profiler may decide whether
10342 // to enable the jitter to add enter/leave callouts independently of whether
10343 // the profiler actually has enter/leave hooks. (If the profiler has no such hooks,
10344 // the callouts quickly return and do nothing.)
10345
10346 //
10347 // Everett ELT
10348 //
10349 {
10350 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10351 g_profControlBlock.pProfInterface->GetLeaveHook()((FunctionID)clientData);
10352 }
10353
10354LExit:
10355
10356 ;
10357
10358 HELPER_METHOD_FRAME_END(); // Un-link the frame
10359
10360#endif // PROFILING_SUPPORTED
10361}
10362HCIMPLEND
10363
10364HCIMPL2(EXTERN_C void, ProfileTailcall, UINT_PTR clientData, void * platformSpecificHandle)
10365{
10366 FCALL_CONTRACT;
10367
10368 FC_GC_POLL_NOT_NEEDED(); // we pulse GC mode, so we are doing a poll
10369
10370#ifdef PROFILING_SUPPORTED
10371
10372#ifdef PROF_TEST_ONLY_FORCE_ELT
10373 // If this test-only flag is set, it's possible we might not have a profiler
10374 // attached, or might not have any of the hooks set. See
10375 // code:ProfControlBlock#TestOnlyELT
10376 if (g_profControlBlock.fTestOnlyForceEnterLeave)
10377 {
10378 if ((g_profControlBlock.pProfInterface.Load() == NULL) ||
10379 (
10380 (g_profControlBlock.pProfInterface->GetTailcallHook() == NULL) &&
10381 (g_profControlBlock.pProfInterface->GetTailcall2Hook() == NULL) &&
10382 (g_profControlBlock.pProfInterface->GetTailcall3Hook() == NULL) &&
10383 (g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook() == NULL)
10384 )
10385 )
10386 {
10387 return;
10388 }
10389 }
10390#endif // PROF_TEST_ONLY_FORCE_ELT
10391
10392 // ELT3 fast-path hooks should be NULL when ELT intermediary is used.
10393 _ASSERTE(g_profControlBlock.pProfInterface->GetTailcall3Hook() == NULL);
10394 _ASSERTE(GetThread()->PreemptiveGCDisabled());
10395 _ASSERTE(platformSpecificHandle != NULL);
10396
10397 // Set up a frame
10398 HELPER_METHOD_FRAME_BEGIN_ATTRIB_NOPOLL(Frame::FRAME_ATTR_CAPTURE_DEPTH_2);
10399
10400 // Our contract is FCALL_CONTRACT, which is considered triggers if you set up a
10401 // frame, like we're about to do.
10402 SetCallbackStateFlagsHolder csf(
10403 COR_PRF_CALLBACKSTATE_INCALLBACK | COR_PRF_CALLBACKSTATE_IN_TRIGGERS_SCOPE);
10404
10405 COR_PRF_ELT_INFO_INTERNAL eltInfo;
10406 eltInfo.platformSpecificHandle = platformSpecificHandle;
10407
10408 //
10409 // CLR v4 Slow-Path ELT
10410 //
10411 if (g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook() != NULL)
10412 {
10413 FunctionIDOrClientID functionIDOrClientID;
10414 functionIDOrClientID.clientID = clientData;
10415 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10416 g_profControlBlock.pProfInterface->GetTailcall3WithInfoHook()(
10417 functionIDOrClientID,
10418 (COR_PRF_ELT_INFO)&eltInfo);
10419 goto LExit;
10420 }
10421
10422 if (g_profControlBlock.pProfInterface->GetTailcall2Hook() != NULL)
10423 {
10424 // We have run out of heap memory, so the content of the mapping table becomes stale.
10425 // All Whidbey ETL hooks must be turned off.
10426 if (!g_profControlBlock.pProfInterface->IsClientIDToFunctionIDMappingEnabled())
10427 {
10428 goto LExit;
10429 }
10430
10431 // If ELT2 is in use, FunctionID will be returned to the JIT to be embedded into the ELT3 probes
10432 // instead of using clientID because the profiler may map several functionIDs to a clientID to
10433 // do things like code coverage analysis. FunctionID to clientID has the one-on-one relationship,
10434 // while the reverse may not have this one-on-one mapping. Therefore, FunctionID is used as the
10435 // key to retrieve the corresponding clientID from the internal FunctionID hash table.
10436 FunctionID functionId = clientData;
10437 _ASSERTE(functionId != NULL);
10438 clientData = g_profControlBlock.pProfInterface->LookupClientIDFromCache(functionId);
10439
10440 //
10441 // Whidbey Fast-Path ELT
10442 //
10443 if (CORProfilerELT2FastPathTailcallEnabled())
10444 {
10445 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10446 g_profControlBlock.pProfInterface->GetTailcall2Hook()(
10447 functionId,
10448 clientData,
10449 NULL);
10450 goto LExit;
10451 }
10452
10453 //
10454 // Whidbey Slow-Path ELT
10455 //
10456 COR_PRF_FRAME_INFO frameInfo = NULL;
10457
10458 HRESULT hr = ProfilingGetFunctionTailcall3Info(functionId, (COR_PRF_ELT_INFO)&eltInfo, &frameInfo);
10459 _ASSERTE(hr == S_OK);
10460
10461 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10462 g_profControlBlock.pProfInterface->GetTailcall2Hook()(functionId, clientData, frameInfo);
10463 goto LExit;
10464 }
10465
10466 // We will not be here unless the jit'd or ngen'd function we're about to tailcall
10467 // was backpatched with this wrapper around the profiler's hook, and that
10468 // wouldn't have happened unless the profiler supplied us with a hook
10469 // in the first place. (Note that SetEnterLeaveFunctionHooks* will return
10470 // an error unless it's called in the profiler's Initialize(), so a profiler can't change
10471 // its mind about where the hooks are.)
10472 _ASSERTE(g_profControlBlock.pProfInterface->GetTailcallHook() != NULL);
10473
10474 // Note that we cannot assert CORProfilerTrackEnterLeave() (i.e., profiler flag
10475 // COR_PRF_MONITOR_ENTERLEAVE), because the profiler may decide whether
10476 // to enable the jitter to add enter/leave callouts independently of whether
10477 // the profiler actually has enter/leave hooks. (If the profiler has no such hooks,
10478 // the callouts quickly return and do nothing.)
10479
10480 //
10481 // Everett ELT
10482 //
10483 {
10484 REMOVE_STACK_GUARD_FOR_PROFILER_CALL;
10485 g_profControlBlock.pProfInterface->GetTailcallHook()((FunctionID)clientData);
10486 }
10487
10488LExit:
10489
10490 ;
10491
10492 HELPER_METHOD_FRAME_END(); // Un-link the frame
10493
10494#endif // PROFILING_SUPPORTED
10495}
10496HCIMPLEND
10497
10498