1 | // Licensed to the .NET Foundation under one or more agreements. |
2 | // The .NET Foundation licenses this file to you under the MIT license. |
3 | // See the LICENSE file in the project root for more information. |
4 | |
5 | #ifndef STUBLINKERX86_H_ |
6 | #define STUBLINKERX86_H_ |
7 | |
8 | #include "stublink.h" |
9 | |
10 | struct ArrayOpScript; |
11 | class MetaSig; |
12 | |
13 | extern PCODE GetPreStubEntryPoint(); |
14 | |
15 | //======================================================================= |
16 | |
17 | #define X86_INSTR_CALL_REL32 0xE8 // call rel32 |
18 | #define X86_INSTR_CALL_IND 0x15FF // call dword ptr[addr32] |
19 | #define X86_INSTR_CALL_IND_EAX 0x10FF // call dword ptr[eax] |
20 | #define X86_INSTR_CALL_IND_EAX_OFFSET 0x50FF // call dword ptr[eax + offset] ; where offset follows these 2 bytes |
21 | #define X86_INSTR_CALL_EAX 0xD0FF // call eax |
22 | #define X86_INSTR_JMP_REL32 0xE9 // jmp rel32 |
23 | #define X86_INSTR_JMP_IND 0x25FF // jmp dword ptr[addr32] |
24 | #define X86_INSTR_JMP_EAX 0xE0FF // jmp eax |
25 | #define X86_INSTR_MOV_EAX_IMM32 0xB8 // mov eax, imm32 |
26 | #define X86_INSTR_MOV_EAX_ECX_IND 0x018b // mov eax, [ecx] |
27 | #define X86_INSTR_CMP_IND_ECX_IMM32 0x3981 // cmp [ecx], imm32 |
28 | #define X86_INSTR_MOV_RM_R 0x89 // mov r/m,reg |
29 | |
30 | #define X86_INSTR_MOV_AL 0xB0 // mov al, imm8 |
31 | #define X86_INSTR_JMP_REL8 0xEB // jmp short rel8 |
32 | |
33 | #define X86_INSTR_NOP 0x90 // nop |
34 | #define X86_INSTR_NOP3_1 0x9090 // 1st word of 3-byte nop |
35 | #define X86_INSTR_NOP3_3 0x90 // 3rd byte of 3-byte nop |
36 | #define X86_INSTR_INT3 0xCC // int 3 |
37 | #define X86_INSTR_HLT 0xF4 // hlt |
38 | |
39 | #define X86_INSTR_MOVAPS_R_RM 0x280F // movaps xmm1, xmm2/mem128 |
40 | #define X86_INSTR_MOVAPS_RM_R 0x290F // movaps xmm1/mem128, xmm2 |
41 | #define X86_INSTR_MOVLPS_R_RM 0x120F // movlps xmm1, xmm2/mem128 |
42 | #define X86_INSTR_MOVLPS_RM_R 0x130F // movlps xmm1/mem128, xmm2 |
43 | #define X86_INSTR_MOVUPS_R_RM 0x100F // movups xmm1, xmm2/mem128 |
44 | #define X86_INSTR_MOVUPS_RM_R 0x110F // movups xmm1/mem128, xmm2 |
45 | #define X86_INSTR_XORPS 0x570F // xorps xmm1, xmm2/mem128 |
46 | |
47 | #ifdef _TARGET_AMD64_ |
48 | #define X86_INSTR_MOV_R10_IMM64 0xBA49 // mov r10, imm64 |
49 | #endif |
50 | |
51 | //---------------------------------------------------------------------- |
52 | // Encodes X86 registers. The numbers are chosen to match Intel's opcode |
53 | // encoding. |
54 | //---------------------------------------------------------------------- |
55 | enum X86Reg |
56 | { |
57 | kEAX = 0, |
58 | kECX = 1, |
59 | kEDX = 2, |
60 | kEBX = 3, |
61 | // kESP intentionally omitted because of its irregular treatment in MOD/RM |
62 | kEBP = 5, |
63 | kESI = 6, |
64 | kEDI = 7, |
65 | |
66 | #ifdef _TARGET_X86_ |
67 | NumX86Regs = 8, |
68 | #endif // _TARGET_X86_ |
69 | |
70 | kXMM0 = 0, |
71 | kXMM1 = 1, |
72 | kXMM2 = 2, |
73 | kXMM3 = 3, |
74 | kXMM4 = 4, |
75 | kXMM5 = 5, |
76 | #if defined(_TARGET_AMD64_) |
77 | kXMM6 = 6, |
78 | kXMM7 = 7, |
79 | kXMM8 = 8, |
80 | kXMM9 = 9, |
81 | kXMM10 = 10, |
82 | kXMM11 = 11, |
83 | kXMM12 = 12, |
84 | kXMM13 = 13, |
85 | kXMM14 = 14, |
86 | kXMM15 = 15, |
87 | // Integer registers commence here |
88 | kRAX = 0, |
89 | kRCX = 1, |
90 | kRDX = 2, |
91 | kRBX = 3, |
92 | // kRSP intentionally omitted because of its irregular treatment in MOD/RM |
93 | kRBP = 5, |
94 | kRSI = 6, |
95 | kRDI = 7, |
96 | kR8 = 8, |
97 | kR9 = 9, |
98 | kR10 = 10, |
99 | kR11 = 11, |
100 | kR12 = 12, |
101 | kR13 = 13, |
102 | kR14 = 14, |
103 | kR15 = 15, |
104 | NumX86Regs = 16, |
105 | |
106 | #endif // _TARGET_AMD64_ |
107 | |
108 | // We use "push ecx" instead of "sub esp, sizeof(LPVOID)" |
109 | kDummyPushReg = kECX |
110 | }; |
111 | |
112 | |
113 | // Use this only if you are absolutely sure that the instruction format |
114 | // handles it. This is not declared as X86Reg so that users are forced |
115 | // to add a cast and think about what exactly they are doing. |
116 | const int kESP_Unsafe = 4; |
117 | |
118 | //---------------------------------------------------------------------- |
119 | // Encodes X86 conditional jumps. The numbers are chosen to match |
120 | // Intel's opcode encoding. |
121 | //---------------------------------------------------------------------- |
122 | class X86CondCode { |
123 | public: |
124 | enum cc { |
125 | kJA = 0x7, |
126 | kJAE = 0x3, |
127 | kJB = 0x2, |
128 | kJBE = 0x6, |
129 | kJC = 0x2, |
130 | kJE = 0x4, |
131 | kJZ = 0x4, |
132 | kJG = 0xf, |
133 | kJGE = 0xd, |
134 | kJL = 0xc, |
135 | kJLE = 0xe, |
136 | kJNA = 0x6, |
137 | kJNAE = 0x2, |
138 | kJNB = 0x3, |
139 | kJNBE = 0x7, |
140 | kJNC = 0x3, |
141 | kJNE = 0x5, |
142 | kJNG = 0xe, |
143 | kJNGE = 0xc, |
144 | kJNL = 0xd, |
145 | kJNLE = 0xf, |
146 | kJNO = 0x1, |
147 | kJNP = 0xb, |
148 | kJNS = 0x9, |
149 | kJNZ = 0x5, |
150 | kJO = 0x0, |
151 | kJP = 0xa, |
152 | kJPE = 0xa, |
153 | kJPO = 0xb, |
154 | kJS = 0x8, |
155 | }; |
156 | }; |
157 | |
158 | //---------------------------------------------------------------------- |
159 | // StubLinker with extensions for generating X86 code. |
160 | //---------------------------------------------------------------------- |
161 | class StubLinkerCPU : public StubLinker |
162 | { |
163 | public: |
164 | |
165 | #ifdef _TARGET_AMD64_ |
166 | enum X86OperandSize |
167 | { |
168 | k32BitOp, |
169 | k64BitOp, |
170 | }; |
171 | #endif |
172 | |
173 | VOID X86EmitAddReg(X86Reg reg, INT32 imm32); |
174 | VOID X86EmitAddRegReg(X86Reg destreg, X86Reg srcReg); |
175 | VOID X86EmitSubReg(X86Reg reg, INT32 imm32); |
176 | VOID X86EmitSubRegReg(X86Reg destreg, X86Reg srcReg); |
177 | |
178 | VOID X86EmitMovRegReg(X86Reg destReg, X86Reg srcReg); |
179 | VOID X86EmitMovSPReg(X86Reg srcReg); |
180 | VOID X86EmitMovRegSP(X86Reg destReg); |
181 | |
182 | VOID X86EmitPushReg(X86Reg reg); |
183 | VOID X86EmitPopReg(X86Reg reg); |
184 | VOID X86EmitPushRegs(unsigned regSet); |
185 | VOID X86EmitPopRegs(unsigned regSet); |
186 | VOID X86EmitPushImm32(UINT value); |
187 | VOID X86EmitPushImm32(CodeLabel &pTarget); |
188 | VOID X86EmitPushImm8(BYTE value); |
189 | VOID X86EmitPushImmPtr(LPVOID value WIN64_ARG(X86Reg tmpReg = kR10)); |
190 | |
191 | VOID X86EmitCmpRegImm32(X86Reg reg, INT32 imm32); // cmp reg, imm32 |
192 | VOID X86EmitCmpRegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32); // cmp [reg+offs], imm32 |
193 | #ifdef _TARGET_AMD64_ |
194 | VOID X64EmitCmp32RegIndexImm32(X86Reg reg, INT32 offs, INT32 imm32); // cmp dword ptr [reg+offs], imm32 |
195 | |
196 | VOID X64EmitMovXmmXmm(X86Reg destXmmreg, X86Reg srcXmmReg); |
197 | VOID X64EmitMovdqaFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
198 | VOID X64EmitMovdqaToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
199 | VOID X64EmitMovSDFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
200 | VOID X64EmitMovSDToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
201 | VOID X64EmitMovSSFromMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
202 | VOID X64EmitMovSSToMem(X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
203 | |
204 | VOID X64EmitMovXmmWorker(BYTE prefix, BYTE opcode, X86Reg Xmmreg, X86Reg baseReg, __int32 ofs = 0); |
205 | #endif |
206 | |
207 | VOID X86EmitZeroOutReg(X86Reg reg); |
208 | VOID X86EmitJumpReg(X86Reg reg); |
209 | |
210 | VOID X86EmitOffsetModRM(BYTE opcode, X86Reg altreg, X86Reg indexreg, __int32 ofs); |
211 | VOID X86EmitOffsetModRmSIB(BYTE opcode, X86Reg opcodeOrReg, X86Reg baseReg, X86Reg indexReg, __int32 scale, __int32 ofs); |
212 | |
213 | VOID X86EmitTailcallWithESPAdjust(CodeLabel *pTarget, INT32 imm32); |
214 | VOID X86EmitTailcallWithSinglePop(CodeLabel *pTarget, X86Reg reg); |
215 | |
216 | VOID X86EmitNearJump(CodeLabel *pTarget); |
217 | VOID X86EmitCondJump(CodeLabel *pTarget, X86CondCode::cc condcode); |
218 | VOID X86EmitCall(CodeLabel *target, int iArgBytes); |
219 | VOID X86EmitReturn(WORD wArgBytes); |
220 | #ifdef _TARGET_AMD64_ |
221 | VOID X86EmitLeaRIP(CodeLabel *target, X86Reg reg); |
222 | #endif |
223 | |
224 | VOID X86EmitCurrentThreadFetch(X86Reg dstreg, unsigned preservedRegSet); |
225 | |
226 | VOID X86EmitIndexRegLoad(X86Reg dstreg, X86Reg srcreg, __int32 ofs = 0); |
227 | VOID X86EmitIndexRegStore(X86Reg dstreg, __int32 ofs, X86Reg srcreg); |
228 | #if defined(_TARGET_AMD64_) |
229 | VOID X86EmitIndexRegStoreRSP(__int32 ofs, X86Reg srcreg); |
230 | VOID X86EmitIndexRegStoreR12(__int32 ofs, X86Reg srcreg); |
231 | #endif // defined(_TARGET_AMD64_) |
232 | |
233 | VOID X86EmitIndexPush(X86Reg srcreg, __int32 ofs); |
234 | VOID X86EmitBaseIndexPush(X86Reg baseReg, X86Reg indexReg, __int32 scale, __int32 ofs); |
235 | VOID X86EmitIndexPop(X86Reg srcreg, __int32 ofs); |
236 | VOID X86EmitIndexLea(X86Reg dstreg, X86Reg srcreg, __int32 ofs); |
237 | #if defined(_TARGET_AMD64_) |
238 | VOID X86EmitIndexLeaRSP(X86Reg dstreg, X86Reg srcreg, __int32 ofs); |
239 | #endif // defined(_TARGET_AMD64_) |
240 | |
241 | VOID X86EmitSPIndexPush(__int32 ofs); |
242 | VOID X86EmitSubEsp(INT32 imm32); |
243 | VOID X86EmitAddEsp(INT32 imm32); |
244 | VOID X86EmitEspOffset(BYTE opcode, |
245 | X86Reg altreg, |
246 | __int32 ofs |
247 | AMD64_ARG(X86OperandSize OperandSize = k64BitOp) |
248 | ); |
249 | VOID X86EmitPushEBPframe(); |
250 | |
251 | #if defined(_TARGET_X86_) |
252 | #if defined(PROFILING_SUPPORTED) && !defined(FEATURE_STUBS_AS_IL) |
253 | // These are used to emit calls to notify the profiler of transitions in and out of |
254 | // managed code through COM->COM+ interop or N/Direct |
255 | VOID EmitProfilerComCallProlog(TADDR pFrameVptr, X86Reg regFrame); |
256 | VOID EmitProfilerComCallEpilog(TADDR pFrameVptr, X86Reg regFrame); |
257 | #endif // PROFILING_SUPPORTED && !FEATURE_STUBS_AS_IL |
258 | #endif // _TARGET_X86_ |
259 | |
260 | |
261 | |
262 | // Emits the most efficient form of the operation: |
263 | // |
264 | // opcode altreg, [basereg + scaledreg*scale + ofs] |
265 | // |
266 | // or |
267 | // |
268 | // opcode [basereg + scaledreg*scale + ofs], altreg |
269 | // |
270 | // (the opcode determines which comes first.) |
271 | // |
272 | // |
273 | // Limitations: |
274 | // |
275 | // scale must be 0,1,2,4 or 8. |
276 | // if scale == 0, scaledreg is ignored. |
277 | // basereg and altreg may be equal to 4 (ESP) but scaledreg cannot |
278 | // for some opcodes, "altreg" may actually select an operation |
279 | // rather than a second register argument. |
280 | // |
281 | |
282 | VOID X86EmitOp(WORD opcode, |
283 | X86Reg altreg, |
284 | X86Reg basereg, |
285 | __int32 ofs = 0, |
286 | X86Reg scaledreg = (X86Reg)0, |
287 | BYTE scale = 0 |
288 | AMD64_ARG(X86OperandSize OperandSize = k32BitOp) |
289 | ); |
290 | |
291 | #ifdef _TARGET_AMD64_ |
292 | FORCEINLINE |
293 | VOID X86EmitOp(WORD opcode, |
294 | X86Reg altreg, |
295 | X86Reg basereg, |
296 | __int32 ofs, |
297 | X86OperandSize OperandSize |
298 | ) |
299 | { |
300 | X86EmitOp(opcode, altreg, basereg, ofs, (X86Reg)0, 0, OperandSize); |
301 | } |
302 | #endif // _TARGET_AMD64_ |
303 | |
304 | // Emits |
305 | // |
306 | // opcode altreg, modrmreg |
307 | // |
308 | // or |
309 | // |
310 | // opcode modrmreg, altreg |
311 | // |
312 | // (the opcode determines which one comes first) |
313 | // |
314 | // For single-operand opcodes, "altreg" actually selects |
315 | // an operation rather than a register. |
316 | |
317 | VOID X86EmitR2ROp(WORD opcode, |
318 | X86Reg altreg, |
319 | X86Reg modrmreg |
320 | AMD64_ARG(X86OperandSize OperandSize = k64BitOp) |
321 | ); |
322 | |
323 | VOID X86EmitRegLoad(X86Reg reg, UINT_PTR imm); |
324 | |
325 | VOID X86EmitRegSave(X86Reg altreg, __int32 ofs) |
326 | { |
327 | LIMITED_METHOD_CONTRACT; |
328 | X86EmitEspOffset(0x89, altreg, ofs); |
329 | // X86Reg values never are outside a byte. |
330 | UnwindSavedReg(static_cast<UCHAR>(altreg), ofs); |
331 | } |
332 | |
333 | VOID X86_64BitOperands () |
334 | { |
335 | WRAPPER_NO_CONTRACT; |
336 | #ifdef _TARGET_AMD64_ |
337 | Emit8(0x48); |
338 | #endif |
339 | } |
340 | |
341 | VOID EmitEnable(CodeLabel *pForwardRef); |
342 | VOID EmitRareEnable(CodeLabel *pRejoinPoint); |
343 | |
344 | VOID EmitDisable(CodeLabel *pForwardRef, BOOL fCallIn, X86Reg ThreadReg); |
345 | VOID EmitRareDisable(CodeLabel *pRejoinPoint); |
346 | VOID EmitRareDisableHRESULT(CodeLabel *pRejoinPoint, CodeLabel *pExitPoint); |
347 | |
348 | VOID EmitSetup(CodeLabel *pForwardRef); |
349 | VOID EmitRareSetup(CodeLabel* pRejoinPoint, BOOL fThrow); |
350 | |
351 | #ifndef FEATURE_STUBS_AS_IL |
352 | VOID EmitMethodStubProlog(TADDR pFrameVptr, int transitionBlockOffset); |
353 | VOID EmitMethodStubEpilog(WORD numArgBytes, int transitionBlockOffset); |
354 | |
355 | VOID EmitCheckGSCookie(X86Reg frameReg, int gsCookieOffset); |
356 | |
357 | #ifdef _TARGET_X86_ |
358 | void EmitComMethodStubProlog(TADDR pFrameVptr, CodeLabel** rgRareLabels, |
359 | CodeLabel** rgRejoinLabels, BOOL bShouldProfile); |
360 | |
361 | void EmitComMethodStubEpilog(TADDR pFrameVptr, CodeLabel** rgRareLabels, |
362 | CodeLabel** rgRejoinLabels, BOOL bShouldProfile); |
363 | #endif // _TARGET_X86_ |
364 | #endif // !FEATURE_STUBS_AS_IL |
365 | |
366 | VOID EmitUnboxMethodStub(MethodDesc* pRealMD); |
367 | #if defined(FEATURE_SHARE_GENERIC_CODE) |
368 | VOID EmitInstantiatingMethodStub(MethodDesc* pSharedMD, void* ); |
369 | #endif // FEATURE_SHARE_GENERIC_CODE |
370 | |
371 | #if defined(FEATURE_COMINTEROP) && defined(_TARGET_X86_) |
372 | //======================================================================== |
373 | // shared Epilog for stubs that enter managed code from COM |
374 | // uses a return thunk within the method desc |
375 | void EmitSharedComMethodStubEpilog(TADDR pFrameVptr, |
376 | CodeLabel** rgRareLabels, |
377 | CodeLabel** rgRejoinLabels, |
378 | unsigned offsetReturnThunk, |
379 | BOOL bShouldProfile); |
380 | #endif // FEATURE_COMINTEROP && _TARGET_X86_ |
381 | |
382 | #ifndef FEATURE_STUBS_AS_IL |
383 | //=========================================================================== |
384 | // Computes hash code for MulticastDelegate.Invoke() |
385 | static UINT_PTR HashMulticastInvoke(MetaSig* pSig); |
386 | |
387 | #ifdef _TARGET_X86_ |
388 | //=========================================================================== |
389 | // Emits code for Delegate.Invoke() any delegate type |
390 | VOID EmitDelegateInvoke(); |
391 | #endif // _TARGET_X86_ |
392 | |
393 | //=========================================================================== |
394 | // Emits code for MulticastDelegate.Invoke() - sig specific |
395 | VOID EmitMulticastInvoke(UINT_PTR hash); |
396 | |
397 | //=========================================================================== |
398 | // Emits code for Delegate.Invoke() on delegates that recorded creator assembly |
399 | VOID EmitSecureDelegateInvoke(UINT_PTR hash); |
400 | #endif // !FEATURE_STUBS_AS_IL |
401 | |
402 | //=========================================================================== |
403 | // Emits code to adjust for a static delegate target. |
404 | VOID EmitShuffleThunk(struct ShuffleEntry *pShuffleEntryArray); |
405 | |
406 | |
407 | #ifndef FEATURE_ARRAYSTUB_AS_IL |
408 | //=========================================================================== |
409 | // Emits code to do an array operation. |
410 | VOID EmitArrayOpStub(const ArrayOpScript*); |
411 | |
412 | //Worker function to emit throw helpers for array ops. |
413 | VOID EmitArrayOpStubThrow(unsigned exConst, unsigned cbRetArg); |
414 | #endif |
415 | |
416 | #ifndef FEATURE_STUBS_AS_IL |
417 | //=========================================================================== |
418 | // Emits code to break into debugger |
419 | VOID EmitDebugBreak(); |
420 | #endif // !FEATURE_STUBS_AS_IL |
421 | |
422 | #if defined(_DEBUG) && !defined(FEATURE_PAL) |
423 | //=========================================================================== |
424 | // Emits code to log JITHelper access |
425 | void EmitJITHelperLoggingThunk(PCODE pJitHelper, LPVOID helperFuncCount); |
426 | #endif |
427 | |
428 | #ifdef _DEBUG |
429 | VOID X86EmitDebugTrashReg(X86Reg reg); |
430 | #endif |
431 | |
432 | #if defined(_DEBUG) && defined(STUBLINKER_GENERATES_UNWIND_INFO) && !defined(CROSSGEN_COMPILE) |
433 | virtual VOID EmitUnwindInfoCheckWorker (CodeLabel *pCheckLabel); |
434 | virtual VOID EmitUnwindInfoCheckSubfunction(); |
435 | #endif |
436 | |
437 | #ifdef _TARGET_AMD64_ |
438 | |
439 | static Stub * CreateTailCallCopyArgsThunk(CORINFO_SIG_INFO * pSig, |
440 | MethodDesc* pMD, |
441 | CorInfoHelperTailCallSpecialHandling flags); |
442 | |
443 | #endif // _TARGET_AMD64_ |
444 | |
445 | private: |
446 | VOID X86EmitSubEspWorker(INT32 imm32); |
447 | |
448 | public: |
449 | static void Init(); |
450 | |
451 | }; |
452 | |
453 | inline TADDR rel32Decode(/*PTR_INT32*/ TADDR pRel32) |
454 | { |
455 | LIMITED_METHOD_CONTRACT; |
456 | SUPPORTS_DAC; |
457 | return pRel32 + 4 + *PTR_INT32(pRel32); |
458 | } |
459 | |
460 | void rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, MethodDesc* pMD); |
461 | BOOL rel32SetInterlocked(/*PINT32*/ PVOID pRel32, TADDR target, TADDR expected, MethodDesc* pMD); |
462 | |
463 | //------------------------------------------------------------------------ |
464 | // |
465 | // Precode definitions |
466 | // |
467 | //------------------------------------------------------------------------ |
468 | |
469 | EXTERN_C VOID STDCALL PrecodeFixupThunk(); |
470 | |
471 | #ifdef _WIN64 |
472 | |
473 | #define OFFSETOF_PRECODE_TYPE 0 |
474 | #define OFFSETOF_PRECODE_TYPE_CALL_OR_JMP 5 |
475 | #define OFFSETOF_PRECODE_TYPE_MOV_R10 10 |
476 | |
477 | #define SIZEOF_PRECODE_BASE 16 |
478 | |
479 | #else |
480 | |
481 | EXTERN_C VOID STDCALL PrecodeRemotingThunk(); |
482 | |
483 | #define OFFSETOF_PRECODE_TYPE 5 |
484 | #define OFFSETOF_PRECODE_TYPE_CALL_OR_JMP 5 |
485 | #define OFFSETOF_PRECODE_TYPE_MOV_RM_R 6 |
486 | |
487 | #define SIZEOF_PRECODE_BASE 8 |
488 | |
489 | #endif // _WIN64 |
490 | |
491 | |
492 | #include <pshpack1.h> |
493 | |
494 | // Invalid precode type |
495 | struct InvalidPrecode { |
496 | // int3 |
497 | static const int Type = 0xCC; |
498 | }; |
499 | |
500 | |
501 | // Regular precode |
502 | struct StubPrecode { |
503 | |
504 | #ifdef _WIN64 |
505 | static const BYTE Type = 0x40; |
506 | // mov r10,pMethodDesc |
507 | // inc eax |
508 | // jmp Stub |
509 | #else |
510 | static const BYTE Type = 0xED; |
511 | // mov eax,pMethodDesc |
512 | // mov ebp,ebp |
513 | // jmp Stub |
514 | #endif // _WIN64 |
515 | |
516 | IN_WIN64(USHORT m_movR10;) |
517 | IN_WIN32(BYTE m_movEAX;) |
518 | TADDR m_pMethodDesc; |
519 | IN_WIN32(BYTE m_mov_rm_r;) |
520 | BYTE m_type; |
521 | BYTE m_jmp; |
522 | INT32 m_rel32; |
523 | |
524 | void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator = NULL, BYTE type = StubPrecode::Type, TADDR target = NULL); |
525 | |
526 | TADDR GetMethodDesc() |
527 | { |
528 | LIMITED_METHOD_DAC_CONTRACT; |
529 | |
530 | return m_pMethodDesc; |
531 | } |
532 | |
533 | PCODE GetTarget() |
534 | { |
535 | LIMITED_METHOD_DAC_CONTRACT; |
536 | |
537 | return rel32Decode(PTR_HOST_MEMBER_TADDR(StubPrecode, this, m_rel32)); |
538 | } |
539 | |
540 | void ResetTargetInterlocked() |
541 | { |
542 | CONTRACTL |
543 | { |
544 | THROWS; |
545 | GC_TRIGGERS; |
546 | } |
547 | CONTRACTL_END; |
548 | |
549 | EnsureWritableExecutablePages(&m_rel32); |
550 | rel32SetInterlocked(&m_rel32, GetPreStubEntryPoint(), (MethodDesc*)GetMethodDesc()); |
551 | } |
552 | |
553 | BOOL SetTargetInterlocked(TADDR target, TADDR expected) |
554 | { |
555 | CONTRACTL |
556 | { |
557 | THROWS; |
558 | GC_TRIGGERS; |
559 | } |
560 | CONTRACTL_END; |
561 | |
562 | EnsureWritableExecutablePages(&m_rel32); |
563 | return rel32SetInterlocked(&m_rel32, target, expected, (MethodDesc*)GetMethodDesc()); |
564 | } |
565 | }; |
566 | IN_WIN64(static_assert_no_msg(offsetof(StubPrecode, m_movR10) == OFFSETOF_PRECODE_TYPE);) |
567 | IN_WIN64(static_assert_no_msg(offsetof(StubPrecode, m_type) == OFFSETOF_PRECODE_TYPE_MOV_R10);) |
568 | IN_WIN32(static_assert_no_msg(offsetof(StubPrecode, m_mov_rm_r) == OFFSETOF_PRECODE_TYPE);) |
569 | IN_WIN32(static_assert_no_msg(offsetof(StubPrecode, m_type) == OFFSETOF_PRECODE_TYPE_MOV_RM_R);) |
570 | typedef DPTR(StubPrecode) PTR_StubPrecode; |
571 | |
572 | |
573 | #ifdef HAS_NDIRECT_IMPORT_PRECODE |
574 | |
575 | // NDirect import precode |
576 | // (This is fake precode. VTable slot does not point to it.) |
577 | struct NDirectImportPrecode : StubPrecode { |
578 | |
579 | #ifdef _WIN64 |
580 | static const int Type = 0x48; |
581 | // mov r10,pMethodDesc |
582 | // dec eax |
583 | // jmp NDirectImportThunk |
584 | #else |
585 | static const int Type = 0xC0; |
586 | // mov eax,pMethodDesc |
587 | // mov eax,eax |
588 | // jmp NDirectImportThunk |
589 | #endif // _WIN64 |
590 | |
591 | void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator); |
592 | |
593 | LPVOID GetEntrypoint() |
594 | { |
595 | LIMITED_METHOD_CONTRACT; |
596 | return this; |
597 | } |
598 | }; |
599 | typedef DPTR(NDirectImportPrecode) PTR_NDirectImportPrecode; |
600 | |
601 | #endif // HAS_NDIRECT_IMPORT_PRECODE |
602 | |
603 | |
604 | #ifdef HAS_FIXUP_PRECODE |
605 | |
606 | // Fixup precode is used in ngen images when the prestub does just one time fixup. |
607 | // The fixup precode is simple jump once patched. It does not have the two instruction overhead of regular precode. |
608 | struct FixupPrecode { |
609 | |
610 | static const int TypePrestub = 0x5E; |
611 | // The entrypoint has to be 8-byte aligned so that the "call PrecodeFixupThunk" can be patched to "jmp NativeCode" atomically. |
612 | // call PrecodeFixupThunk |
613 | // db TypePrestub (pop esi) |
614 | // db MethodDescChunkIndex |
615 | // db PrecodeChunkIndex |
616 | |
617 | static const int Type = 0x5F; |
618 | // After it has been patched to point to native code |
619 | // jmp NativeCode |
620 | // db Type (pop edi) |
621 | |
622 | BYTE m_op; |
623 | INT32 m_rel32; |
624 | BYTE m_type; |
625 | BYTE m_MethodDescChunkIndex; |
626 | BYTE m_PrecodeChunkIndex; |
627 | #ifdef HAS_FIXUP_PRECODE_CHUNKS |
628 | // Fixup precode chunk is associated with MethodDescChunk. The layout of the fixup precode chunk is: |
629 | // |
630 | // FixupPrecode Entrypoint PrecodeChunkIndex = 2 |
631 | // FixupPrecode Entrypoint PrecodeChunkIndex = 1 |
632 | // FixupPrecode Entrypoint PrecodeChunkIndex = 0 |
633 | // TADDR Base of MethodDescChunk |
634 | #else |
635 | TADDR m_pMethodDesc; |
636 | #endif |
637 | |
638 | void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator, int iMethodDescChunkIndex = 0, int iPrecodeChunkIndex = 0); |
639 | |
640 | #ifdef HAS_FIXUP_PRECODE_CHUNKS |
641 | TADDR GetBase() |
642 | { |
643 | LIMITED_METHOD_CONTRACT; |
644 | SUPPORTS_DAC; |
645 | |
646 | return dac_cast<TADDR>(this) + (m_PrecodeChunkIndex + 1) * sizeof(FixupPrecode); |
647 | } |
648 | |
649 | TADDR GetMethodDesc(); |
650 | #else // HAS_FIXUP_PRECODE_CHUNKS |
651 | TADDR GetMethodDesc() |
652 | { |
653 | LIMITED_METHOD_CONTRACT; |
654 | return m_pMethodDesc; |
655 | } |
656 | #endif // HAS_FIXUP_PRECODE_CHUNKS |
657 | |
658 | #ifdef FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS |
659 | PCODE GetDynamicMethodPrecodeFixupJumpStub(); |
660 | PCODE GetDynamicMethodEntryJumpStub(); |
661 | #endif // FIXUP_PRECODE_PREALLOCATE_DYNAMIC_METHOD_JUMP_STUBS |
662 | |
663 | PCODE GetTarget() |
664 | { |
665 | LIMITED_METHOD_DAC_CONTRACT; |
666 | |
667 | return rel32Decode(PTR_HOST_MEMBER_TADDR(FixupPrecode, this, m_rel32)); |
668 | } |
669 | |
670 | void ResetTargetInterlocked(); |
671 | BOOL SetTargetInterlocked(TADDR target, TADDR expected); |
672 | |
673 | static BOOL IsFixupPrecodeByASM(TADDR addr) |
674 | { |
675 | LIMITED_METHOD_CONTRACT; |
676 | |
677 | return *dac_cast<PTR_BYTE>(addr) == X86_INSTR_JMP_REL32; |
678 | } |
679 | |
680 | #ifdef FEATURE_PREJIT |
681 | // Partial initialization. Used to save regrouped chunks. |
682 | void InitForSave(int iPrecodeChunkIndex); |
683 | |
684 | void Fixup(DataImage *image, MethodDesc * pMD); |
685 | #endif |
686 | |
687 | #ifdef DACCESS_COMPILE |
688 | void EnumMemoryRegions(CLRDataEnumMemoryFlags flags); |
689 | #endif |
690 | }; |
691 | IN_WIN32(static_assert_no_msg(offsetof(FixupPrecode, m_type) == OFFSETOF_PRECODE_TYPE)); |
692 | IN_WIN64(static_assert_no_msg(offsetof(FixupPrecode, m_op) == OFFSETOF_PRECODE_TYPE);) |
693 | IN_WIN64(static_assert_no_msg(offsetof(FixupPrecode, m_type) == OFFSETOF_PRECODE_TYPE_CALL_OR_JMP);) |
694 | |
695 | typedef DPTR(FixupPrecode) PTR_FixupPrecode; |
696 | |
697 | #endif // HAS_FIXUP_PRECODE |
698 | |
699 | #ifdef HAS_THISPTR_RETBUF_PRECODE |
700 | |
701 | // Precode to stuffle this and retbuf for closed delegates over static methods with return buffer |
702 | struct ThisPtrRetBufPrecode { |
703 | |
704 | #ifdef _WIN64 |
705 | static const int Type = 0x90; |
706 | #else |
707 | static const int Type = 0xC2; |
708 | #endif // _WIN64 |
709 | |
710 | // mov regScratch,regArg0 |
711 | // mov regArg0,regArg1 |
712 | // mov regArg1,regScratch |
713 | // nop |
714 | // jmp EntryPoint |
715 | // dw pMethodDesc |
716 | |
717 | IN_WIN64(BYTE m_nop1;) |
718 | IN_WIN64(BYTE m_prefix1;) |
719 | WORD m_movScratchArg0; |
720 | IN_WIN64(BYTE m_prefix2;) |
721 | WORD m_movArg0Arg1; |
722 | IN_WIN64(BYTE m_prefix3;) |
723 | WORD m_movArg1Scratch; |
724 | BYTE m_nop2; |
725 | BYTE m_jmp; |
726 | INT32 m_rel32; |
727 | TADDR m_pMethodDesc; |
728 | |
729 | void Init(MethodDesc* pMD, LoaderAllocator *pLoaderAllocator); |
730 | |
731 | TADDR GetMethodDesc() |
732 | { |
733 | LIMITED_METHOD_CONTRACT; |
734 | SUPPORTS_DAC; |
735 | |
736 | return m_pMethodDesc; |
737 | } |
738 | |
739 | PCODE GetTarget(); |
740 | |
741 | BOOL SetTargetInterlocked(TADDR target, TADDR expected); |
742 | }; |
743 | IN_WIN32(static_assert_no_msg(offsetof(ThisPtrRetBufPrecode, m_movArg1Scratch) + 1 == OFFSETOF_PRECODE_TYPE);) |
744 | typedef DPTR(ThisPtrRetBufPrecode) PTR_ThisPtrRetBufPrecode; |
745 | |
746 | #endif // HAS_THISPTR_RETBUF_PRECODE |
747 | |
748 | #include <poppack.h> |
749 | |
750 | #endif // STUBLINKERX86_H_ |
751 | |