1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | /**************************************************************/ |
6 | /* gmscpu.h */ |
7 | /**************************************************************/ |
8 | /* HelperFrame is defines 'GET_STATE(machState)' macro, which |
9 | figures out what the state of the machine will be when the |
10 | current method returns. It then stores the state in the |
11 | JIT_machState structure. */ |
12 | |
13 | /**************************************************************/ |
14 | |
15 | #ifndef __gmsAMD64_h__ |
16 | #define __gmsAMD64_h__ |
17 | |
18 | #ifdef _DEBUG |
19 | class HelperMethodFrame; |
20 | struct MachState; |
21 | EXTERN_C MachState* __stdcall HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal); |
22 | #endif // _DEBUG |
23 | |
24 | // A MachState indicates the register state of the processor at some point in time (usually |
25 | // just before or after a call is made). It can be made one of two ways. Either explicitly |
26 | // (when you for some reason know the values of all the registers), or implicitly using the |
27 | // GET_STATE macros. |
28 | |
29 | typedef DPTR(struct MachState) PTR_MachState; |
30 | struct MachState |
31 | { |
32 | MachState() |
33 | { |
34 | LIMITED_METHOD_DAC_CONTRACT; |
35 | INDEBUG(memset(this, 0xCC, sizeof(MachState));) |
36 | } |
37 | |
38 | bool isValid() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(dac_cast<TADDR>(_pRetAddr) != INVALID_POINTER_CC); return(_pRetAddr != 0); } |
39 | TADDR* pRetAddr() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return(_pRetAddr); } |
40 | TADDR GetRetAddr() { LIMITED_METHOD_DAC_CONTRACT; _ASSERTE(isValid()); return *_pRetAddr; } |
41 | #ifndef DACCESS_COMPILE |
42 | void SetRetAddr(TADDR* addr) { _ASSERTE(isValid()); _pRetAddr = addr; } |
43 | #endif |
44 | |
45 | friend class HelperMethodFrame; |
46 | friend class CheckAsmOffsets; |
47 | friend struct LazyMachState; |
48 | #ifdef _DEBUG |
49 | friend MachState* __stdcall HelperMethodFrameConfirmState(HelperMethodFrame* frame, void* esiVal, void* ediVal, void* ebxVal, void* ebpVal); |
50 | #endif |
51 | |
52 | protected: |
53 | PCODE m_Rip; |
54 | TADDR m_Rsp; |
55 | |
56 | // |
57 | // These "capture" fields are READ ONLY once initialized by |
58 | // LazyMachStateCaptureState because we are racing to update |
59 | // the MachState when we do a stackwalk so, we must not update |
60 | // any state used to initialize the unwind from the captured |
61 | // state to the managed caller. |
62 | // |
63 | // Note also, that these fields need to be in the base struct |
64 | // because the context pointers below may point up to these |
65 | // fields. |
66 | // |
67 | CalleeSavedRegisters m_Capture; |
68 | |
69 | // context pointers for preserved registers |
70 | CalleeSavedRegistersPointers m_Ptrs; |
71 | |
72 | PTR_TADDR _pRetAddr; |
73 | |
74 | #ifdef FEATURE_PAL |
75 | // On PAL, we don't always have the context pointers available due to |
76 | // a limitation of an unwinding library. In such case, preserve |
77 | // the unwound values. |
78 | CalleeSavedRegisters m_Unwound; |
79 | #endif |
80 | }; |
81 | |
82 | /********************************************************************/ |
83 | /* This allows you to defer the computation of the Machine state |
84 | until later. Note that we don't reuse slots, because we want |
85 | this to be threadsafe without locks */ |
86 | |
87 | EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState); |
88 | |
89 | typedef DPTR(struct LazyMachState) PTR_LazyMachState; |
90 | struct LazyMachState : public MachState |
91 | { |
92 | // compute the machine state of the processor as it will exist just |
93 | // after the return after at most'funCallDepth' number of functions. |
94 | // if 'testFtn' is non-NULL, the return address is tested at each |
95 | // return instruction encountered. If this test returns non-NULL, |
96 | // then stack walking stops (thus you can walk up to the point that the |
97 | // return address matches some criteria |
98 | |
99 | // Normally this is called with funCallDepth=1 and testFtn = 0 so that |
100 | // it returns the state of the processor after the function that called 'captureState()' |
101 | void setLazyStateFromUnwind(MachState* copy); |
102 | static void unwindLazyState(LazyMachState* baseState, |
103 | MachState* lazyState, |
104 | DWORD threadId, |
105 | int funCallDepth = 1, |
106 | HostCallPreference hostCallPreference = AllowHostCalls); |
107 | |
108 | friend class HelperMethodFrame; |
109 | friend class CheckAsmOffsets; |
110 | |
111 | // |
112 | // These "capture" fields are READ ONLY once initialized by |
113 | // LazyMachStateCaptureState because we are racing to update |
114 | // the MachState when we do a stackwalk so, we must not update |
115 | // any state used to initialize the unwind from the captured |
116 | // state to the managed caller. |
117 | // |
118 | ULONG64 m_CaptureRip; |
119 | ULONG64 m_CaptureRsp; |
120 | }; |
121 | |
122 | inline void LazyMachState::setLazyStateFromUnwind(MachState* copy) |
123 | { |
124 | LIMITED_METHOD_CONTRACT; |
125 | |
126 | #if defined(DACCESS_COMPILE) |
127 | // This function cannot be called in DAC because DAC cannot update target memory. |
128 | DacError(E_FAIL); |
129 | return; |
130 | |
131 | #else // !DACCESS_COMPILE |
132 | this->m_Rip = copy->m_Rip; |
133 | this->m_Rsp = copy->m_Rsp; |
134 | |
135 | #ifdef FEATURE_PAL |
136 | this->m_Unwound = copy->m_Unwound; |
137 | #endif |
138 | |
139 | // Capture* has already been set, so there is no need to touch it |
140 | |
141 | // loop over the nonvolatile context pointers and make |
142 | // sure to properly copy interior pointers into the |
143 | // new struct |
144 | |
145 | PULONG64* pSrc = (PULONG64 *)©->m_Ptrs; |
146 | PULONG64* pDst = (PULONG64 *)&this->m_Ptrs; |
147 | |
148 | const PULONG64 LowerBoundDst = (PULONG64) this; |
149 | const PULONG64 LowerBoundSrc = (PULONG64) copy; |
150 | |
151 | const PULONG64 UpperBoundSrc = (PULONG64) ((BYTE*)LowerBoundSrc + sizeof(*copy)); |
152 | |
153 | for (int i = 0; i < NUM_CALLEE_SAVED_REGISTERS; i++) |
154 | { |
155 | PULONG64 valueSrc = *pSrc++; |
156 | |
157 | if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc)) |
158 | { |
159 | // make any pointer interior to 'src' interior to 'dst' |
160 | valueSrc = (PULONG64)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst); |
161 | } |
162 | |
163 | *pDst++ = valueSrc; |
164 | } |
165 | |
166 | // this has to be last because we depend on write ordering to |
167 | // synchronize the race implicit in updating this struct |
168 | VolatileStore(&_pRetAddr, (PTR_TADDR)(TADDR)&m_Rip); |
169 | |
170 | #endif // !DACCESS_COMPILE |
171 | } |
172 | |
173 | // Do the initial capture of the machine state. This is meant to be |
174 | // as light weight as possible, as we may never need the state that |
175 | // we capture. Thus to complete the process you need to call |
176 | // 'getMachState()', which finishes the process |
177 | EXTERN_C void LazyMachStateCaptureState(struct LazyMachState *pState); |
178 | |
179 | // CAPTURE_STATE captures just enough register state so that the state of the |
180 | // processor can be deterined just after the the routine that has CAPTURE_STATE in |
181 | // it returns. |
182 | |
183 | #define CAPTURE_STATE(machState, ret) \ |
184 | LazyMachStateCaptureState(machState) |
185 | |
186 | #endif // __gmsAMD64_h__ |
187 | |