1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4//
5// File: DllImportCallback.cpp
6//
7
8//
9
10
11#include "common.h"
12
13#include "threads.h"
14#include "excep.h"
15#include "object.h"
16#include "dllimportcallback.h"
17#include "mlinfo.h"
18#include "comdelegate.h"
19#include "ceeload.h"
20#include "eeconfig.h"
21#include "dbginterface.h"
22#include "stubgen.h"
23#include "mdaassistants.h"
24#include "appdomain.inl"
25
26#ifndef CROSSGEN_COMPILE
27
28struct UM2MThunk_Args
29{
30 UMEntryThunk *pEntryThunk;
31 void *pAddr;
32 void *pThunkArgs;
33 int argLen;
34};
35
36class UMEntryThunkFreeList
37{
38public:
39 UMEntryThunkFreeList(size_t threshold) :
40 m_threshold(threshold),
41 m_count(0),
42 m_pHead(NULL),
43 m_pTail(NULL)
44 {
45 WRAPPER_NO_CONTRACT;
46
47 m_crst.Init(CrstLeafLock, CRST_UNSAFE_ANYMODE);
48 }
49
50 UMEntryThunk *GetUMEntryThunk()
51 {
52 WRAPPER_NO_CONTRACT;
53
54 if (m_count < m_threshold)
55 return NULL;
56
57 CrstHolder ch(&m_crst);
58
59 UMEntryThunk *pThunk = m_pHead;
60
61 if (pThunk == NULL)
62 return NULL;
63
64 m_pHead = m_pHead->m_pNextFreeThunk;
65 --m_count;
66
67 return pThunk;
68 }
69
70 void AddToList(UMEntryThunk *pThunk)
71 {
72 CONTRACTL
73 {
74 NOTHROW;
75 }
76 CONTRACTL_END;
77
78 CrstHolder ch(&m_crst);
79
80 if (m_pHead == NULL)
81 {
82 m_pHead = pThunk;
83 m_pTail = pThunk;
84 }
85 else
86 {
87 m_pTail->m_pNextFreeThunk = pThunk;
88 m_pTail = pThunk;
89 }
90
91 pThunk->m_pNextFreeThunk = NULL;
92
93 ++m_count;
94 }
95
96private:
97 // Used to delay reusing freed thunks
98 size_t m_threshold;
99 size_t m_count;
100 UMEntryThunk *m_pHead;
101 UMEntryThunk *m_pTail;
102 CrstStatic m_crst;
103};
104
105#define DEFAULT_THUNK_FREE_LIST_THRESHOLD 64
106
107static UMEntryThunkFreeList s_thunkFreeList(DEFAULT_THUNK_FREE_LIST_THRESHOLD);
108
109#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
110
111EXTERN_C VOID __cdecl UMThunkStubRareDisable();
112EXTERN_C Thread* __stdcall CreateThreadBlockThrow();
113
114// argument stack offsets are multiple of sizeof(SLOT) so we can tag them by OR'ing with 1
115static_assert_no_msg((sizeof(SLOT) & 1) == 0);
116#define MAKE_BYVAL_STACK_OFFSET(x) (x)
117#define MAKE_BYREF_STACK_OFFSET(x) ((x) | 1)
118#define IS_BYREF_STACK_OFFSET(x) ((x) & 1)
119#define GET_STACK_OFFSET(x) ((x) & ~1)
120
121// -1 means not used
122#define UNUSED_STACK_OFFSET (UINT)-1
123
124// static
125VOID UMEntryThunk::CompileUMThunkWorker(UMThunkStubInfo *pInfo,
126 CPUSTUBLINKER *pcpusl,
127 UINT *psrcofsregs, // NUM_ARGUMENT_REGISTERS elements
128 UINT *psrcofs, // pInfo->m_cbDstStack/STACK_ELEM_SIZE elements
129 UINT retbufofs) // the large structure return buffer ptr arg offset (if any)
130{
131 STANDARD_VM_CONTRACT;
132
133 CodeLabel* pSetupThreadLabel = pcpusl->NewCodeLabel();
134 CodeLabel* pRejoinThreadLabel = pcpusl->NewCodeLabel();
135 CodeLabel* pDisableGCLabel = pcpusl->NewCodeLabel();
136 CodeLabel* pRejoinGCLabel = pcpusl->NewCodeLabel();
137
138 // We come into this code with UMEntryThunk in EAX
139 const X86Reg kEAXentryThunk = kEAX;
140
141 // For ThisCall, we make it look like a normal stdcall so that
142 // the rest of the code (like repushing the arguments) does not
143 // have to worry about it.
144
145 if (pInfo->m_wFlags & umtmlThisCall)
146 {
147 // pop off the return address into EDX
148 pcpusl->X86EmitPopReg(kEDX);
149
150 if (pInfo->m_wFlags & umtmlThisCallHiddenArg)
151 {
152 // exchange ecx ( "this") with the hidden structure return buffer
153 // xchg ecx, [esp]
154 pcpusl->X86EmitOp(0x87, kECX, (X86Reg)4 /*ESP*/);
155 }
156
157 // jam ecx (the "this" param onto stack. Now it looks like a normal stdcall.)
158 pcpusl->X86EmitPushReg(kECX);
159
160 // push edx - repush the return address
161 pcpusl->X86EmitPushReg(kEDX);
162 }
163
164 // Setup the EBP frame
165 pcpusl->X86EmitPushEBPframe();
166
167 // Save EBX
168 pcpusl->X86EmitPushReg(kEBX);
169
170 // Make space for return value - instead of repeatedly doing push eax edx <trash regs> pop edx eax
171 // we will save the return value once and restore it just before returning.
172 pcpusl->X86EmitSubEsp(sizeof(PCONTEXT(NULL)->Eax) + sizeof(PCONTEXT(NULL)->Edx));
173
174 // Load thread descriptor into ECX
175 const X86Reg kECXthread = kECX;
176
177 // save UMEntryThunk
178 pcpusl->X86EmitPushReg(kEAXentryThunk);
179
180 pcpusl->EmitSetup(pSetupThreadLabel);
181
182 pcpusl->X86EmitMovRegReg(kECX, kEBX);
183
184 pcpusl->EmitLabel(pRejoinThreadLabel);
185
186 // restore UMEntryThunk
187 pcpusl->X86EmitPopReg(kEAXentryThunk);
188
189#ifdef _DEBUG
190 // Save incoming registers
191 pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
192 pcpusl->X86EmitPushReg(kECXthread); // thread descriptor
193
194 pcpusl->X86EmitPushReg(kEAXentryThunk);
195 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) LogUMTransition), 4);
196
197 // Restore registers
198 pcpusl->X86EmitPopReg(kECXthread);
199 pcpusl->X86EmitPopReg(kEAXentryThunk);
200#endif
201
202#ifdef PROFILING_SUPPORTED
203 // Notify profiler of transition into runtime, before we disable preemptive GC
204 if (CORProfilerTrackTransitions())
205 {
206 // Load the methoddesc into EBX (UMEntryThunk->m_pMD)
207 pcpusl->X86EmitIndexRegLoad(kEBX, kEAXentryThunk, UMEntryThunk::GetOffsetOfMethodDesc());
208
209 // Save registers
210 pcpusl->X86EmitPushReg(kEAXentryThunk); // UMEntryThunk
211 pcpusl->X86EmitPushReg(kECXthread); // pCurThread
212
213 // Push arguments and notify profiler
214 pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_CALL); // Reason
215 pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
216 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerUnmanagedToManagedTransitionMD), 8);
217
218 // Restore registers
219 pcpusl->X86EmitPopReg(kECXthread);
220 pcpusl->X86EmitPopReg(kEAXentryThunk);
221
222 // Push the MethodDesc* (in EBX) for use by the transition on the way out.
223 pcpusl->X86EmitPushReg(kEBX);
224 }
225#endif // PROFILING_SUPPORTED
226
227 pcpusl->EmitDisable(pDisableGCLabel, TRUE, kECXthread);
228
229 pcpusl->EmitLabel(pRejoinGCLabel);
230
231 // construct a FrameHandlerExRecord
232
233 // push [ECX]Thread.m_pFrame - corresponding to FrameHandlerExRecord::m_pEntryFrame
234 pcpusl->X86EmitIndexPush(kECXthread, offsetof(Thread, m_pFrame));
235
236 // push offset FastNExportExceptHandler
237 pcpusl->X86EmitPushImm32((INT32)(size_t)FastNExportExceptHandler);
238
239 // push fs:[0]
240 const static BYTE codeSEH1[] = { 0x64, 0xFF, 0x35, 0x0, 0x0, 0x0, 0x0};
241 pcpusl->EmitBytes(codeSEH1, sizeof(codeSEH1));
242
243 // link in the exception frame
244 // mov dword ptr fs:[0], esp
245 const static BYTE codeSEH2[] = { 0x64, 0x89, 0x25, 0x0, 0x0, 0x0, 0x0};
246 pcpusl->EmitBytes(codeSEH2, sizeof(codeSEH2));
247
248 // EBX will hold address of start of arguments. Calculate here so the AD switch case can access
249 // the arguments at their original location rather than re-copying them to the inner frame.
250 // lea ebx, [ebp + 8]
251 pcpusl->X86EmitIndexLea(kEBX, kEBP, 8);
252
253 //
254 // ----------------------------------------------------------------------------------------------
255 //
256 // From this point on (until noted) we might be executing as the result of calling into the
257 // runtime in order to switch AppDomain. In order for the following code to function in both
258 // scenarios it must be careful when making assumptions about the current stack layout (in the AD
259 // switch case a new inner frame has been pushed which is not identical to the original outer
260 // frame).
261 //
262 // Our guaranteed state at this point is as follows:
263 // EAX: Pointer to UMEntryThunk
264 // EBX: Pointer to start of caller's arguments
265 // ECX: Pointer to current Thread
266 // EBP: Equals EBX - 8 (no AD switch) or unspecified (AD switch)
267 //
268 // Stack:
269 //
270 // +-------------------------+
271 // ESP + 0 | |
272 //
273 // | Varies |
274 //
275 // | |
276 // +-------------------------+
277 // EBX - 20 | Saved Result: EDX/ST(0) |
278 // +- - - - - - - - - - - - -+
279 // EBX - 16 | Saved Result: EAX/ST(0) |
280 // +-------------------------+
281 // EBX - 12 | Caller's EBX |
282 // +-------------------------+
283 // EBX - 8 | Caller's EBP |
284 // +-------------------------+
285 // EBX - 4 | Return address |
286 // +-------------------------+
287 // EBX + 0 | |
288 //
289 // | Caller's arguments |
290 //
291 // | |
292 // +-------------------------+
293 //
294
295 // save the thread pointer
296 pcpusl->X86EmitPushReg(kECXthread);
297
298 // reserve the space for call slot
299 pcpusl->X86EmitSubEsp(4);
300
301 // remember stack size for offset computations
302 INT iStackSizeAtCallSlot = pcpusl->GetStackSize();
303
304 if (!(pInfo->m_wFlags & umtmlSkipStub))
305 {
306 // save EDI (it's used by the IL stub invocation code)
307 pcpusl->X86EmitPushReg(kEDI);
308 }
309
310 // repush any stack arguments
311 int arg = pInfo->m_cbDstStack/STACK_ELEM_SIZE;
312
313 while (arg--)
314 {
315 if (IS_BYREF_STACK_OFFSET(psrcofs[arg]))
316 {
317 // lea ecx, [ebx + ofs]
318 pcpusl->X86EmitIndexLea(kECX, kEBX, GET_STACK_OFFSET(psrcofs[arg]));
319
320 // push ecx
321 pcpusl->X86EmitPushReg(kECX);
322 }
323 else
324 {
325 // push dword ptr [ebx + ofs]
326 pcpusl->X86EmitIndexPush(kEBX, GET_STACK_OFFSET(psrcofs[arg]));
327 }
328 }
329
330 // load register arguments
331 int regidx = 0;
332
333#define ARGUMENT_REGISTER(regname) \
334 if (psrcofsregs[regidx] != UNUSED_STACK_OFFSET) \
335 { \
336 if (IS_BYREF_STACK_OFFSET(psrcofsregs[regidx])) \
337 { \
338 /* lea reg, [ebx + ofs] */ \
339 pcpusl->X86EmitIndexLea(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
340 } \
341 else \
342 { \
343 /* mov reg, [ebx + ofs] */ \
344 pcpusl->X86EmitIndexRegLoad(k##regname, kEBX, GET_STACK_OFFSET(psrcofsregs[regidx])); \
345 } \
346 } \
347 regidx++;
348
349 ENUM_ARGUMENT_REGISTERS_BACKWARD();
350
351#undef ARGUMENT_REGISTER
352
353 if (!(pInfo->m_wFlags & umtmlSkipStub))
354 {
355 //
356 // Call the IL stub which will:
357 // 1) marshal
358 // 2) call the managed method
359 // 3) unmarshal
360 //
361
362 // the delegate object is extracted by the stub from UMEntryThunk
363 _ASSERTE(pInfo->m_wFlags & umtmlIsStatic);
364
365 // mov EDI, [EAX + UMEntryThunk.m_pUMThunkMarshInfo]
366 pcpusl->X86EmitIndexRegLoad(kEDI, kEAXentryThunk, offsetof(UMEntryThunk, m_pUMThunkMarshInfo));
367
368 // mov EDI, [EDI + UMThunkMarshInfo.m_pILStub]
369 pcpusl->X86EmitIndexRegLoad(kEDI, kEDI, UMThunkMarshInfo::GetOffsetOfStub());
370
371 // EAX still contains the UMEntryThunk pointer, so we cannot really use SCRATCHREG
372 // we can use EDI, though
373
374 INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
375
376 // mov [ESP+iCallSlotOffset], EDI
377 pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, kEDI);
378
379 // call [ESP+iCallSlotOffset]
380 pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
381
382 // Emit a NOP so we know that we can call managed code
383 INDEBUG(pcpusl->Emit8(X86_INSTR_NOP));
384
385 // restore EDI
386 pcpusl->X86EmitPopReg(kEDI);
387 }
388 else if (!(pInfo->m_wFlags & umtmlIsStatic))
389 {
390 //
391 // This is call on delegate
392 //
393
394 // mov THIS, [EAX + UMEntryThunk.m_pObjectHandle]
395 pcpusl->X86EmitOp(0x8b, THIS_kREG, kEAXentryThunk, offsetof(UMEntryThunk, m_pObjectHandle));
396
397 // mov THIS, [THIS]
398 pcpusl->X86EmitOp(0x8b, THIS_kREG, THIS_kREG);
399
400 //
401 // Inline Delegate.Invoke for perf
402 //
403
404 // mov SCRATCHREG, [THISREG + Delegate.FP] ; Save target stub in register
405 pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, THIS_kREG, DelegateObject::GetOffsetOfMethodPtr());
406
407 // mov THISREG, [THISREG + Delegate.OR] ; replace "this" pointer
408 pcpusl->X86EmitIndexRegLoad(THIS_kREG, THIS_kREG, DelegateObject::GetOffsetOfTarget());
409
410 INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
411
412 // mov [ESP+iCallSlotOffset], SCRATCHREG
413 pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe,iCallSlotOffset,SCRATCH_REGISTER_X86REG);
414
415 // call [ESP+iCallSlotOffset]
416 pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
417
418 INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
419 }
420 else
421 {
422 //
423 // Call the managed method
424 //
425
426 INT iCallSlotOffset = pcpusl->GetStackSize() - iStackSizeAtCallSlot;
427
428 // mov SCRATCH, [SCRATCH + offsetof(UMEntryThunk.m_pManagedTarget)]
429 pcpusl->X86EmitIndexRegLoad(SCRATCH_REGISTER_X86REG, SCRATCH_REGISTER_X86REG, offsetof(UMEntryThunk, m_pManagedTarget));
430
431 // mov [ESP+iCallSlotOffset], SCRATCHREG
432 pcpusl->X86EmitIndexRegStore((X86Reg)kESP_Unsafe, iCallSlotOffset, SCRATCH_REGISTER_X86REG);
433
434 // call [ESP+iCallSlotOffset]
435 pcpusl->X86EmitOp(0xff, (X86Reg)2, (X86Reg)kESP_Unsafe, iCallSlotOffset);
436
437 INDEBUG(pcpusl->Emit8(X86_INSTR_NOP)); // Emit a NOP so we know that we can call managed code
438 }
439
440 // skip the call slot
441 pcpusl->X86EmitAddEsp(4);
442
443 // Save the return value to the outer frame
444 if (pInfo->m_wFlags & umtmlFpu)
445 {
446 // save FP return value
447
448 // fstp qword ptr [ebx - 0x8 - 0xc]
449 pcpusl->X86EmitOffsetModRM(0xdd, (X86Reg)3, kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX */);
450 }
451 else
452 {
453 // save EDX:EAX
454 if (retbufofs == UNUSED_STACK_OFFSET)
455 {
456 pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
457 pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0xc /* skip saved EBP, EBX, EAX */, kEDX);
458 }
459 else
460 {
461 // pretend that the method returned the ret buf hidden argument
462 // (the structure ptr); C++ compiler seems to rely on this
463
464 // mov dword ptr eax, [ebx + retbufofs]
465 pcpusl->X86EmitIndexRegLoad(kEAX, kEBX, retbufofs);
466
467 // save it as the return value
468 pcpusl->X86EmitIndexRegStore(kEBX, -0x8 /* to outer EBP */ -0x8 /* skip saved EBP, EBX */, kEAX);
469 }
470 }
471
472 // restore the thread pointer
473 pcpusl->X86EmitPopReg(kECXthread);
474
475 //
476 // Once we reach this point in the code we're back to a single scenario: the outer frame of the
477 // reverse p/invoke.
478 //
479 // ----------------------------------------------------------------------------------------------
480 //
481
482 // move byte ptr [ecx + Thread.m_fPreemptiveGCDisabled],0
483 pcpusl->X86EmitOffsetModRM(0xc6, (X86Reg)0, kECXthread, Thread::GetOffsetOfGCFlag());
484 pcpusl->Emit8(0);
485
486 CodeLabel *pRareEnable, *pEnableRejoin;
487 pRareEnable = pcpusl->NewCodeLabel();
488 pEnableRejoin = pcpusl->NewCodeLabel();
489
490 // test byte ptr [ecx + Thread.m_State], TS_CatchAtSafePoint
491 pcpusl->X86EmitOffsetModRM(0xf6, (X86Reg)0, kECXthread, Thread::GetOffsetOfState());
492 pcpusl->Emit8(Thread::TS_CatchAtSafePoint);
493
494 pcpusl->X86EmitCondJump(pRareEnable,X86CondCode::kJNZ);
495
496 pcpusl->EmitLabel(pEnableRejoin);
497
498 // *** unhook SEH frame
499
500 // mov edx,[esp] ;;pointer to the next exception record
501 pcpusl->X86EmitEspOffset(0x8B, kEDX, 0);
502
503 // mov dword ptr fs:[0], edx
504 static const BYTE codeSEH[] = { 0x64, 0x89, 0x15, 0x0, 0x0, 0x0, 0x0 };
505 pcpusl->EmitBytes(codeSEH, sizeof(codeSEH));
506
507 // deallocate SEH frame
508 pcpusl->X86EmitAddEsp(sizeof(FrameHandlerExRecord));
509
510#ifdef PROFILING_SUPPORTED
511 if (CORProfilerTrackTransitions())
512 {
513 // Load the MethodDesc* we pushed on the entry transition into EBX.
514 pcpusl->X86EmitPopReg(kEBX);
515
516 // Save registers
517 pcpusl->X86EmitPushReg(kECX);
518
519 // Push arguments and notify profiler
520 pcpusl->X86EmitPushImm32(COR_PRF_TRANSITION_RETURN); // Reason
521 pcpusl->X86EmitPushReg(kEBX); // MethodDesc*
522 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID)ProfilerManagedToUnmanagedTransitionMD), 8);
523
524 // Restore registers
525 pcpusl->X86EmitPopReg(kECX);
526 }
527#endif // PROFILING_SUPPORTED
528
529 // Load the saved return value
530 if (pInfo->m_wFlags & umtmlFpu)
531 {
532 // fld qword ptr [esp]
533 pcpusl->Emit8(0xdd);
534 pcpusl->Emit16(0x2404);
535
536 pcpusl->X86EmitAddEsp(8);
537 }
538 else
539 {
540 pcpusl->X86EmitPopReg(kEDX);
541 pcpusl->X86EmitPopReg(kEAX);
542 }
543
544 // Restore EBX, which was saved in prolog
545 pcpusl->X86EmitPopReg(kEBX);
546
547 pcpusl->X86EmitPopReg(kEBP);
548
549 //retn n
550 pcpusl->X86EmitReturn(pInfo->m_cbRetPop);
551
552 //-------------------------------------------------------------
553 // coming here if the thread is not set up yet
554 //
555
556 pcpusl->EmitLabel(pSetupThreadLabel);
557
558 // call CreateThreadBlock
559 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) CreateThreadBlockThrow), 0);
560
561 // mov ecx,eax
562 pcpusl->Emit16(0xc189);
563
564 // jump back into the main code path
565 pcpusl->X86EmitNearJump(pRejoinThreadLabel);
566
567 //-------------------------------------------------------------
568 // coming here if g_TrapReturningThreads was true
569 //
570
571 pcpusl->EmitLabel(pDisableGCLabel);
572
573 // call UMThunkStubRareDisable. This may throw if we are not allowed
574 // to enter. Note that we have not set up our SEH yet (deliberately).
575 // This is important to handle the case where we cannot enter the CLR
576 // during shutdown and cannot coordinate with the GC because of
577 // deadlocks.
578 pcpusl->X86EmitCall(pcpusl->NewExternalCodeLabel((LPVOID) UMThunkStubRareDisable), 0);
579
580 // jump back into the main code path
581 pcpusl->X86EmitNearJump(pRejoinGCLabel);
582
583 //-------------------------------------------------------------
584 // Coming here for rare case when enabling GC pre-emptive mode
585 //
586
587 pcpusl->EmitLabel(pRareEnable);
588
589 // Thread object is expected to be in EBX. So first save caller's EBX
590 pcpusl->X86EmitPushReg(kEBX);
591 // mov ebx, ecx
592 pcpusl->X86EmitMovRegReg(kEBX, kECXthread);
593
594 pcpusl->EmitRareEnable(NULL);
595
596 // restore ebx
597 pcpusl->X86EmitPopReg(kEBX);
598
599 // return to mainline of function
600 pcpusl->X86EmitNearJump(pEnableRejoin);
601}
602
603// Compiles an unmanaged to managed thunk for the given signature.
604Stub *UMThunkMarshInfo::CompileNExportThunk(LoaderHeap *pLoaderHeap, PInvokeStaticSigInfo* pSigInfo, MetaSig *pMetaSig, BOOL fNoStub)
605{
606 STANDARD_VM_CONTRACT;
607
608 // stub is always static
609 BOOL fIsStatic = (fNoStub ? pSigInfo->IsStatic() : TRUE);
610
611 ArgIterator argit(pMetaSig);
612
613 UINT nStackBytes = argit.SizeOfArgStack();
614 _ASSERTE((nStackBytes % STACK_ELEM_SIZE) == 0);
615
616 // size of stack passed to us from unmanaged, may be bigger that nStackBytes if there are
617 // parameters with copy constructors where we perform value-to-reference transformation
618 UINT nStackBytesIncoming = nStackBytes;
619
620 UINT *psrcofs = (UINT *)_alloca((nStackBytes / STACK_ELEM_SIZE) * sizeof(UINT));
621 UINT psrcofsregs[NUM_ARGUMENT_REGISTERS];
622 UINT retbufofs = UNUSED_STACK_OFFSET;
623
624 for (int i = 0; i < NUM_ARGUMENT_REGISTERS; i++)
625 psrcofsregs[i] = UNUSED_STACK_OFFSET;
626
627 UINT nNumArgs = pMetaSig->NumFixedArgs();
628
629 UINT nOffset = 0;
630 int numRegistersUsed = 0;
631 int numStackSlotsIndex = nStackBytes / STACK_ELEM_SIZE;
632
633 // process this
634 if (!fIsStatic)
635 {
636 // just reserve ECX, instance target is special-cased in the thunk compiler
637 numRegistersUsed++;
638 }
639
640 // process the return buffer parameter
641 if (argit.HasRetBuffArg())
642 {
643 numRegistersUsed++;
644 _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
645 psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] = nOffset;
646 retbufofs = nOffset;
647
648 nOffset += StackElemSize(sizeof(LPVOID));
649 }
650
651 // process ordinary parameters
652 for (DWORD i = nNumArgs; i > 0; i--)
653 {
654 TypeHandle thValueType;
655 CorElementType type = pMetaSig->NextArgNormalized(&thValueType);
656
657 UINT cbSize = MetaSig::GetElemSize(type, thValueType);
658
659 BOOL fPassPointer = FALSE;
660 if (!fNoStub && type == ELEMENT_TYPE_PTR)
661 {
662 // this is a copy-constructed argument - get its size
663 TypeHandle thPtr = pMetaSig->GetLastTypeHandleThrowing();
664
665 _ASSERTE(thPtr.IsPointer());
666 cbSize = thPtr.AsTypeDesc()->GetTypeParam().GetSize();
667
668 // the incoming stack may be bigger that the outgoing (IL stub) stack
669 nStackBytesIncoming += (StackElemSize(cbSize) - StackElemSize(sizeof(LPVOID)));
670 fPassPointer = TRUE;
671 }
672
673 if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
674 {
675 _ASSERTE(numRegistersUsed - 1 < NUM_ARGUMENT_REGISTERS);
676 psrcofsregs[NUM_ARGUMENT_REGISTERS - numRegistersUsed] =
677 (fPassPointer ?
678 MAKE_BYREF_STACK_OFFSET(nOffset) : // the register will get pointer to the incoming stack slot
679 MAKE_BYVAL_STACK_OFFSET(nOffset)); // the register will get the incoming stack slot
680 }
681 else if (fPassPointer)
682 {
683 // the stack slot will get pointer to the incoming stack slot
684 psrcofs[--numStackSlotsIndex] = MAKE_BYREF_STACK_OFFSET(nOffset);
685 }
686 else
687 {
688 // stack slots will get incoming stack slots (we may need more stack slots for larger parameters)
689 for (UINT nSlotOfs = StackElemSize(cbSize); nSlotOfs > 0; nSlotOfs -= STACK_ELEM_SIZE)
690 {
691 // note the reverse order here which is necessary to maintain
692 // the original layout of the structure (it'll be reversed once
693 // more when repushing)
694 psrcofs[--numStackSlotsIndex] = MAKE_BYVAL_STACK_OFFSET(nOffset + nSlotOfs - STACK_ELEM_SIZE);
695 }
696 }
697
698 nOffset += StackElemSize(cbSize);
699 }
700 _ASSERTE(numStackSlotsIndex == 0);
701
702 UINT cbActualArgSize = nStackBytesIncoming + (numRegistersUsed * STACK_ELEM_SIZE);
703
704 if (!fIsStatic)
705 {
706 // do not count THIS
707 cbActualArgSize -= StackElemSize(sizeof(LPVOID));
708 }
709
710 m_cbActualArgSize = cbActualArgSize;
711
712 m_callConv = static_cast<UINT16>(pSigInfo->GetCallConv());
713
714 UMThunkStubInfo stubInfo;
715 memset(&stubInfo, 0, sizeof(stubInfo));
716
717 if (!FitsInU2(m_cbActualArgSize))
718 COMPlusThrow(kMarshalDirectiveException, IDS_EE_SIGTOOCOMPLEX);
719
720 stubInfo.m_cbSrcStack = static_cast<UINT16>(m_cbActualArgSize);
721 stubInfo.m_cbDstStack = nStackBytes;
722
723 if (pSigInfo->GetCallConv() == pmCallConvCdecl)
724 {
725 // caller pop
726 m_cbRetPop = 0;
727 }
728 else
729 {
730 // callee pop
731 m_cbRetPop = static_cast<UINT16>(m_cbActualArgSize);
732
733 if (pSigInfo->GetCallConv() == pmCallConvThiscall)
734 {
735 stubInfo.m_wFlags |= umtmlThisCall;
736 if (argit.HasRetBuffArg())
737 {
738 stubInfo.m_wFlags |= umtmlThisCallHiddenArg;
739 }
740 }
741 }
742 stubInfo.m_cbRetPop = m_cbRetPop;
743
744 if (fIsStatic) stubInfo.m_wFlags |= umtmlIsStatic;
745 if (fNoStub) stubInfo.m_wFlags |= umtmlSkipStub;
746
747 if (pMetaSig->HasFPReturn()) stubInfo.m_wFlags |= umtmlFpu;
748
749 CPUSTUBLINKER cpusl;
750 CPUSTUBLINKER *pcpusl = &cpusl;
751
752 // call the worker to emit the actual thunk
753 UMEntryThunk::CompileUMThunkWorker(&stubInfo, pcpusl, psrcofsregs, psrcofs, retbufofs);
754
755 return pcpusl->Link(pLoaderHeap);
756}
757
758#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
759
760PCODE UMThunkMarshInfo::GetExecStubEntryPoint()
761{
762 LIMITED_METHOD_CONTRACT;
763
764 return GetEEFuncEntryPoint(UMThunkStub);
765}
766
767#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
768
769UMEntryThunkCache::UMEntryThunkCache(AppDomain *pDomain) :
770 m_crst(CrstUMEntryThunkCache),
771 m_pDomain(pDomain)
772{
773 WRAPPER_NO_CONTRACT;
774 _ASSERTE(pDomain != NULL);
775}
776
777UMEntryThunkCache::~UMEntryThunkCache()
778{
779 WRAPPER_NO_CONTRACT;
780
781 for (SHash<ThunkSHashTraits>::Iterator i = m_hash.Begin(); i != m_hash.End(); i++)
782 {
783 // UMEntryThunks in this cache own UMThunkMarshInfo in 1-1 fashion
784 DestroyMarshInfo(i->m_pThunk->GetUMThunkMarshInfo());
785 UMEntryThunk::FreeUMEntryThunk(i->m_pThunk);
786 }
787}
788
789UMEntryThunk *UMEntryThunkCache::GetUMEntryThunk(MethodDesc *pMD)
790{
791 CONTRACT (UMEntryThunk *)
792 {
793 THROWS;
794 GC_TRIGGERS;
795 MODE_ANY;
796 PRECONDITION(CheckPointer(pMD));
797 POSTCONDITION(CheckPointer(RETVAL));
798 }
799 CONTRACT_END;
800
801 UMEntryThunk *pThunk;
802
803 CrstHolder ch(&m_crst);
804
805 const CacheElement *pElement = m_hash.LookupPtr(pMD);
806 if (pElement != NULL)
807 {
808 pThunk = pElement->m_pThunk;
809 }
810 else
811 {
812 // cache miss -> create a new thunk
813 pThunk = UMEntryThunk::CreateUMEntryThunk();
814 Holder<UMEntryThunk *, DoNothing, UMEntryThunk::FreeUMEntryThunk> umHolder;
815 umHolder.Assign(pThunk);
816
817 UMThunkMarshInfo *pMarshInfo = (UMThunkMarshInfo *)(void *)(m_pDomain->GetStubHeap()->AllocMem(S_SIZE_T(sizeof(UMThunkMarshInfo))));
818 Holder<UMThunkMarshInfo *, DoNothing, UMEntryThunkCache::DestroyMarshInfo> miHolder;
819 miHolder.Assign(pMarshInfo);
820
821 pMarshInfo->LoadTimeInit(pMD);
822 pThunk->LoadTimeInit(NULL, NULL, pMarshInfo, pMD, m_pDomain->GetId());
823
824 // add it to the cache
825 CacheElement element;
826 element.m_pMD = pMD;
827 element.m_pThunk = pThunk;
828 m_hash.Add(element);
829
830 miHolder.SuppressRelease();
831 umHolder.SuppressRelease();
832 }
833
834 RETURN pThunk;
835}
836
837// FailFast if a native callable method invoked directly from managed code.
838// UMThunkStub.asm check the mode and call this function to failfast.
839extern "C" VOID STDCALL ReversePInvokeBadTransition()
840{
841 STATIC_CONTRACT_THROWS;
842 STATIC_CONTRACT_GC_TRIGGERS;
843 // Fail
844 EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(
845 COR_E_EXECUTIONENGINE,
846 W("Invalid Program: attempted to call a NativeCallable method from runtime-typesafe code.")
847 );
848}
849
850// Disable from a place that is calling into managed code via a UMEntryThunk.
851extern "C" VOID STDCALL UMThunkStubRareDisableWorker(Thread *pThread, UMEntryThunk *pUMEntryThunk)
852{
853 STATIC_CONTRACT_THROWS;
854 STATIC_CONTRACT_GC_TRIGGERS;
855
856 // Do not add a CONTRACT here. We haven't set up SEH. We rely
857 // on HandleThreadAbort and COMPlusThrowBoot dealing with this situation properly.
858
859 // WARNING!!!!
860 // when we start executing here, we are actually in cooperative mode. But we
861 // haven't synchronized with the barrier to reentry yet. So we are in a highly
862 // dangerous mode. If we call managed code, we will potentially be active in
863 // the GC heap, even as GC's are occuring!
864
865 // Check for ShutDown scenario. This happens only when we have initiated shutdown
866 // and someone is trying to call in after the CLR is suspended. In that case, we
867 // must either raise an unmanaged exception or return an HRESULT, depending on the
868 // expectations of our caller.
869 if (!CanRunManagedCode())
870 {
871 // DO NOT IMPROVE THIS EXCEPTION! It cannot be a managed exception. It
872 // cannot be a real exception object because we cannot execute any managed
873 // code here.
874 pThread->m_fPreemptiveGCDisabled = 0;
875 COMPlusThrowBoot(E_PROCESS_SHUTDOWN_REENTRY);
876 }
877
878 // We must do the following in this order, because otherwise we would be constructing
879 // the exception for the abort without synchronizing with the GC. Also, we have no
880 // CLR SEH set up, despite the fact that we may throw a ThreadAbortException.
881 pThread->RareDisablePreemptiveGC();
882 pThread->HandleThreadAbort();
883
884#ifdef DEBUGGING_SUPPORTED
885 // If the debugger is attached, we use this opportunity to see if
886 // we're disabling preemptive GC on the way into the runtime from
887 // unmanaged code. We end up here because
888 // Increment/DecrementTraceCallCount() will bump
889 // g_TrapReturningThreads for us.
890 if (CORDebuggerTraceCall())
891 g_pDebugInterface->TraceCall((const BYTE *)pUMEntryThunk->GetManagedTarget());
892#endif // DEBUGGING_SUPPORTED
893}
894
895PCODE TheUMEntryPrestubWorker(UMEntryThunk * pUMEntryThunk)
896{
897 STATIC_CONTRACT_THROWS;
898 STATIC_CONTRACT_GC_TRIGGERS;
899 STATIC_CONTRACT_MODE_PREEMPTIVE;
900
901 if (!CanRunManagedCode())
902 COMPlusThrowBoot(E_PROCESS_SHUTDOWN_REENTRY);
903
904 Thread * pThread = GetThreadNULLOk();
905 if (pThread == NULL)
906 pThread = CreateThreadBlockThrow();
907
908 GCX_COOP_THREAD_EXISTS(pThread);
909
910 if (pThread->IsAbortRequested())
911 pThread->HandleThreadAbort();
912
913 UMEntryThunk::DoRunTimeInit(pUMEntryThunk);
914
915 return (PCODE)pUMEntryThunk->GetCode();
916}
917
918void RunTimeInit_Wrapper(LPVOID /* UMThunkMarshInfo * */ ptr)
919{
920 WRAPPER_NO_CONTRACT;
921
922 UMEntryThunk::DoRunTimeInit((UMEntryThunk*)ptr);
923}
924
925
926// asm entrypoint
927void STDCALL UMEntryThunk::DoRunTimeInit(UMEntryThunk* pUMEntryThunk)
928{
929
930 CONTRACTL
931 {
932 THROWS;
933 GC_TRIGGERS;
934 MODE_COOPERATIVE;
935 ENTRY_POINT;
936 PRECONDITION(CheckPointer(pUMEntryThunk));
937 }
938 CONTRACTL_END;
939
940 INSTALL_MANAGED_EXCEPTION_DISPATCHER;
941 // this method is called by stubs which are called by managed code,
942 // so we need an unwind and continue handler so that our internal
943 // exceptions don't leak out into managed code.
944 INSTALL_UNWIND_AND_CONTINUE_HANDLER;
945
946 {
947 GCX_PREEMP();
948 pUMEntryThunk->RunTimeInit();
949 }
950
951 UNINSTALL_UNWIND_AND_CONTINUE_HANDLER;
952 UNINSTALL_MANAGED_EXCEPTION_DISPATCHER;
953}
954
955UMEntryThunk* UMEntryThunk::CreateUMEntryThunk()
956{
957 CONTRACT (UMEntryThunk*)
958 {
959 THROWS;
960 GC_NOTRIGGER;
961 MODE_ANY;
962 INJECT_FAULT(COMPlusThrowOM());
963 POSTCONDITION(CheckPointer(RETVAL));
964 }
965 CONTRACT_END;
966
967 UMEntryThunk * p;
968
969 p = s_thunkFreeList.GetUMEntryThunk();
970
971 if (p == NULL)
972 p = (UMEntryThunk *)(void *)SystemDomain::GetGlobalLoaderAllocator()->GetExecutableHeap()->AllocMem(S_SIZE_T(sizeof(UMEntryThunk)));
973
974 RETURN p;
975}
976
977void UMEntryThunk::Terminate()
978{
979 CONTRACTL
980 {
981 NOTHROW;
982 }
983 CONTRACTL_END;
984
985 m_code.Poison();
986
987 s_thunkFreeList.AddToList(this);
988}
989
990VOID UMEntryThunk::FreeUMEntryThunk(UMEntryThunk* p)
991{
992 CONTRACTL
993 {
994 NOTHROW;
995 GC_TRIGGERS;
996 MODE_ANY;
997 PRECONDITION(CheckPointer(p));
998 }
999 CONTRACTL_END;
1000
1001 p->Terminate();
1002}
1003
1004#endif // CROSSGEN_COMPILE
1005
1006//-------------------------------------------------------------------------
1007// This function is used to report error when we call collected delegate.
1008// But memory that was allocated for thunk can be reused, due to it this
1009// function will not be called in all cases of the collected delegate call,
1010// also it may crash while trying to report the problem.
1011//-------------------------------------------------------------------------
1012VOID __fastcall UMEntryThunk::ReportViolation(UMEntryThunk* pEntryThunk)
1013{
1014 CONTRACTL
1015 {
1016 THROWS;
1017 GC_TRIGGERS;
1018 MODE_COOPERATIVE;
1019 PRECONDITION(CheckPointer(pEntryThunk));
1020 }
1021 CONTRACTL_END;
1022
1023 MethodDesc* pMethodDesc = pEntryThunk->GetMethod();
1024
1025 SString namespaceOrClassName;
1026 SString methodName;
1027 SString moduleName;
1028
1029 pMethodDesc->GetMethodInfoNoSig(namespaceOrClassName, methodName);
1030 moduleName.SetUTF8(pMethodDesc->GetModule()->GetSimpleName());
1031
1032 SString message;
1033
1034 message.Printf(W("A callback was made on a garbage collected delegate of type '%s!%s::%s'."),
1035 moduleName.GetUnicode(),
1036 namespaceOrClassName.GetUnicode(),
1037 methodName.GetUnicode());
1038
1039 EEPOLICY_HANDLE_FATAL_ERROR_WITH_MESSAGE(COR_E_FAILFAST, message.GetUnicode());
1040}
1041
1042UMThunkMarshInfo::~UMThunkMarshInfo()
1043{
1044 CONTRACTL
1045 {
1046 NOTHROW;
1047 GC_TRIGGERS;
1048 MODE_ANY;
1049 }
1050 CONTRACTL_END;
1051
1052#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
1053 if (m_pExecStub)
1054 m_pExecStub->DecRef();
1055#endif
1056
1057#ifdef _DEBUG
1058 FillMemory(this, sizeof(*this), 0xcc);
1059#endif
1060}
1061
1062MethodDesc* UMThunkMarshInfo::GetILStubMethodDesc(MethodDesc* pInvokeMD, PInvokeStaticSigInfo* pSigInfo, DWORD dwStubFlags)
1063{
1064 STANDARD_VM_CONTRACT;
1065
1066 MethodDesc* pStubMD = NULL;
1067 dwStubFlags |= NDIRECTSTUB_FL_REVERSE_INTEROP; // could be either delegate interop or not--that info is passed in from the caller
1068
1069#if defined(DEBUGGING_SUPPORTED)
1070 // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
1071 CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(pSigInfo->GetModule(), CORJIT_FLAGS());
1072 if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
1073 {
1074 dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
1075 }
1076#endif // DEBUGGING_SUPPORTED
1077
1078 pStubMD = NDirect::CreateCLRToNativeILStub(
1079 pSigInfo,
1080 dwStubFlags,
1081 pInvokeMD // may be NULL
1082 );
1083
1084 return pStubMD;
1085}
1086
1087//----------------------------------------------------------
1088// This initializer is called during load time.
1089// It does not do any stub initialization or sigparsing.
1090// The RunTimeInit() must be called subsequently to fully
1091// UMThunkMarshInfo.
1092//----------------------------------------------------------
1093VOID UMThunkMarshInfo::LoadTimeInit(MethodDesc* pMD)
1094{
1095 LIMITED_METHOD_CONTRACT;
1096 PRECONDITION(pMD != NULL);
1097
1098 LoadTimeInit(pMD->GetSignature(), pMD->GetModule(), pMD);
1099}
1100
1101VOID UMThunkMarshInfo::LoadTimeInit(Signature sig, Module * pModule, MethodDesc * pMD)
1102{
1103 LIMITED_METHOD_CONTRACT;
1104
1105 FillMemory(this, sizeof(UMThunkMarshInfo), 0); // Prevent problems with partial deletes
1106
1107 // This will be overwritten by the actual code pointer (or NULL) at the end of UMThunkMarshInfo::RunTimeInit()
1108 m_pILStub = (PCODE)1;
1109
1110 m_pMD = pMD;
1111 m_pModule = pModule;
1112 m_sig = sig;
1113
1114#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
1115 INDEBUG(m_cbRetPop = 0xcccc;)
1116#endif
1117}
1118
1119#ifndef CROSSGEN_COMPILE
1120//----------------------------------------------------------
1121// This initializer finishes the init started by LoadTimeInit.
1122// It does stub creation and can throw an exception.
1123//
1124// It can safely be called multiple times and by concurrent
1125// threads.
1126//----------------------------------------------------------
1127VOID UMThunkMarshInfo::RunTimeInit()
1128{
1129 STANDARD_VM_CONTRACT;
1130
1131 // Nothing to do if already inited
1132 if (IsCompletelyInited())
1133 return;
1134
1135 PCODE pFinalILStub = NULL;
1136 MethodDesc* pStubMD = NULL;
1137
1138 MethodDesc * pMD = GetMethod();
1139
1140 // Lookup NGened stub - currently we only support ngening of reverse delegate invoke interop stubs
1141 if (pMD != NULL && pMD->IsEEImpl())
1142 {
1143 DWORD dwStubFlags = NDIRECTSTUB_FL_NGENEDSTUB | NDIRECTSTUB_FL_REVERSE_INTEROP | NDIRECTSTUB_FL_DELEGATE;
1144
1145#if defined(DEBUGGING_SUPPORTED)
1146 // Combining the next two lines, and eliminating jitDebuggerFlags, leads to bad codegen in x86 Release builds using Visual C++ 19.00.24215.1.
1147 CORJIT_FLAGS jitDebuggerFlags = GetDebuggerCompileFlags(GetModule(), CORJIT_FLAGS());
1148 if (jitDebuggerFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_CODE))
1149 {
1150 dwStubFlags |= NDIRECTSTUB_FL_GENERATEDEBUGGABLEIL;
1151 }
1152#endif // DEBUGGING_SUPPORTED
1153
1154 pFinalILStub = GetStubForInteropMethod(pMD, dwStubFlags, &pStubMD);
1155 }
1156
1157#if defined(_TARGET_X86_) && !defined(FEATURE_STUBS_AS_IL)
1158 PInvokeStaticSigInfo sigInfo;
1159
1160 if (pMD != NULL)
1161 new (&sigInfo) PInvokeStaticSigInfo(pMD);
1162 else
1163 new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
1164
1165 Stub *pFinalExecStub = NULL;
1166
1167 // we will always emit the argument-shuffling thunk, m_cbActualArgSize is set inside
1168 LoaderHeap *pHeap = (pMD == NULL ? NULL : pMD->GetLoaderAllocator()->GetStubHeap());
1169
1170 if (pFinalILStub != NULL ||
1171#ifdef MDA_SUPPORTED
1172 // GC.Collect calls are emitted to IL stubs
1173 MDA_GET_ASSISTANT(GcManagedToUnmanaged) || MDA_GET_ASSISTANT(GcUnmanagedToManaged) ||
1174#endif // MDA_SUPPORTED
1175 NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
1176 {
1177 if (pFinalILStub == NULL)
1178 {
1179 DWORD dwStubFlags = 0;
1180
1181 if (sigInfo.IsDelegateInterop())
1182 dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
1183
1184 pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
1185 pFinalILStub = JitILStub(pStubMD);
1186 }
1187
1188 MetaSig msig(pStubMD);
1189 pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, FALSE);
1190 }
1191 else
1192 {
1193 MetaSig msig(GetSignature(), GetModule(), NULL);
1194 pFinalExecStub = CompileNExportThunk(pHeap, &sigInfo, &msig, TRUE);
1195 }
1196
1197 if (FastInterlockCompareExchangePointer(&m_pExecStub,
1198 pFinalExecStub,
1199 NULL) != NULL)
1200 {
1201
1202 // Some thread swooped in and set us. Our stub is now a
1203 // duplicate, so throw it away.
1204 if (pFinalExecStub)
1205 pFinalExecStub->DecRef();
1206 }
1207
1208#else // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
1209
1210 if (pFinalILStub == NULL)
1211 {
1212 if (pMD != NULL && !pMD->IsEEImpl() &&
1213#ifdef MDA_SUPPORTED
1214 // GC.Collect calls are emitted to IL stubs
1215 !MDA_GET_ASSISTANT(GcManagedToUnmanaged) && !MDA_GET_ASSISTANT(GcUnmanagedToManaged) &&
1216#endif // MDA_SUPPORTED
1217 !NDirect::MarshalingRequired(pMD, GetSignature().GetRawSig(), GetModule()))
1218 {
1219 // Call the method directly in no-delegate case if possible. This is important to avoid JITing
1220 // for stubs created via code:ICLRRuntimeHost2::CreateDelegate during coreclr startup.
1221 pFinalILStub = pMD->GetMultiCallableAddrOfCode();
1222 }
1223 else
1224 {
1225 // For perf, it is important to avoid expensive initialization of
1226 // PInvokeStaticSigInfo if we have NGened stub.
1227 PInvokeStaticSigInfo sigInfo;
1228
1229 if (pMD != NULL)
1230 new (&sigInfo) PInvokeStaticSigInfo(pMD);
1231 else
1232 new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
1233
1234 DWORD dwStubFlags = 0;
1235
1236 if (sigInfo.IsDelegateInterop())
1237 dwStubFlags |= NDIRECTSTUB_FL_DELEGATE;
1238
1239 pStubMD = GetILStubMethodDesc(pMD, &sigInfo, dwStubFlags);
1240 pFinalILStub = JitILStub(pStubMD);
1241
1242 }
1243 }
1244
1245#if defined(_TARGET_X86_)
1246 MetaSig sig(pMD);
1247 int numRegistersUsed = 0;
1248 UINT16 cbRetPop = 0;
1249
1250 //
1251 // cbStackArgSize represents the number of arg bytes for the MANAGED signature
1252 //
1253 UINT32 cbStackArgSize = 0;
1254
1255 int offs = 0;
1256
1257#ifdef UNIX_X86_ABI
1258 if (HasRetBuffArgUnmanagedFixup(&sig))
1259 {
1260 // callee should pop retbuf
1261 numRegistersUsed += 1;
1262 offs += STACK_ELEM_SIZE;
1263 cbRetPop += STACK_ELEM_SIZE;
1264 }
1265#endif // UNIX_X86_ABI
1266
1267 for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
1268 {
1269 TypeHandle thValueType;
1270 CorElementType type = sig.NextArgNormalized(&thValueType);
1271 int cbSize = sig.GetElemSize(type, thValueType);
1272 if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
1273 {
1274 offs += STACK_ELEM_SIZE;
1275 }
1276 else
1277 {
1278 offs += StackElemSize(cbSize);
1279 cbStackArgSize += StackElemSize(cbSize);
1280 }
1281 }
1282 m_cbStackArgSize = cbStackArgSize;
1283 m_cbActualArgSize = (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : offs;
1284
1285 PInvokeStaticSigInfo sigInfo;
1286 if (pMD != NULL)
1287 new (&sigInfo) PInvokeStaticSigInfo(pMD);
1288 else
1289 new (&sigInfo) PInvokeStaticSigInfo(GetSignature(), GetModule());
1290 if (sigInfo.GetCallConv() == pmCallConvCdecl)
1291 {
1292 m_cbRetPop = cbRetPop;
1293 }
1294 else
1295 {
1296 // For all the other calling convention except cdecl, callee pops the stack arguments
1297 m_cbRetPop = cbRetPop + static_cast<UINT16>(m_cbActualArgSize);
1298 }
1299#else // _TARGET_X86_
1300 //
1301 // m_cbActualArgSize gets the number of arg bytes for the NATIVE signature
1302 //
1303 m_cbActualArgSize =
1304 (pStubMD != NULL) ? pStubMD->AsDynamicMethodDesc()->GetNativeStackArgSize() : pMD->SizeOfArgStack();
1305
1306#endif // _TARGET_X86_
1307
1308#endif // _TARGET_X86_ && !FEATURE_STUBS_AS_IL
1309
1310 // Must be the last thing we set!
1311 InterlockedCompareExchangeT<PCODE>(&m_pILStub, pFinalILStub, (PCODE)1);
1312}
1313
1314#if defined(_TARGET_X86_) && defined(FEATURE_STUBS_AS_IL)
1315VOID UMThunkMarshInfo::SetupArguments(char *pSrc, ArgumentRegisters *pArgRegs, char *pDst)
1316{
1317 MethodDesc *pMD = GetMethod();
1318
1319 _ASSERTE(pMD);
1320
1321 //
1322 // x86 native uses the following stack layout:
1323 // | saved eip |
1324 // | --------- | <- CFA
1325 // | stkarg 0 |
1326 // | stkarg 1 |
1327 // | ... |
1328 // | stkarg N |
1329 //
1330 // x86 managed, however, uses a bit different stack layout:
1331 // | saved eip |
1332 // | --------- | <- CFA
1333 // | stkarg M | (NATIVE/MANAGE may have different number of stack arguments)
1334 // | ... |
1335 // | stkarg 1 |
1336 // | stkarg 0 |
1337 //
1338 // This stub bridges the gap between them.
1339 //
1340 char *pCurSrc = pSrc;
1341 char *pCurDst = pDst + m_cbStackArgSize;
1342
1343 MetaSig sig(pMD);
1344
1345 int numRegistersUsed = 0;
1346
1347#ifdef UNIX_X86_ABI
1348 if (HasRetBuffArgUnmanagedFixup(&sig))
1349 {
1350 // Pass retbuf via Ecx
1351 numRegistersUsed += 1;
1352 pArgRegs->Ecx = *((UINT32 *)pCurSrc);
1353 pCurSrc += STACK_ELEM_SIZE;
1354 }
1355#endif // UNIX_X86_ABI
1356
1357 for (UINT i = 0 ; i < sig.NumFixedArgs(); i++)
1358 {
1359 TypeHandle thValueType;
1360 CorElementType type = sig.NextArgNormalized(&thValueType);
1361 int cbSize = sig.GetElemSize(type, thValueType);
1362 int elemSize = StackElemSize(cbSize);
1363
1364 if (ArgIterator::IsArgumentInRegister(&numRegistersUsed, type))
1365 {
1366 _ASSERTE(elemSize == STACK_ELEM_SIZE);
1367
1368 if (numRegistersUsed == 1)
1369 pArgRegs->Ecx = *((UINT32 *)pCurSrc);
1370 else if (numRegistersUsed == 2)
1371 pArgRegs->Edx = *((UINT32 *)pCurSrc);
1372 }
1373 else
1374 {
1375 pCurDst -= elemSize;
1376 memcpy(pCurDst, pCurSrc, elemSize);
1377 }
1378
1379 pCurSrc += elemSize;
1380 }
1381
1382 _ASSERTE(pDst == pCurDst);
1383}
1384
1385EXTERN_C VOID STDCALL UMThunkStubSetupArgumentsWorker(UMThunkMarshInfo *pMarshInfo,
1386 char *pSrc,
1387 UMThunkMarshInfo::ArgumentRegisters *pArgRegs,
1388 char *pDst)
1389{
1390 pMarshInfo->SetupArguments(pSrc, pArgRegs, pDst);
1391}
1392#endif // _TARGET_X86_ && FEATURE_STUBS_AS_IL
1393
1394#ifdef _DEBUG
1395void STDCALL LogUMTransition(UMEntryThunk* thunk)
1396{
1397 CONTRACTL
1398 {
1399 NOTHROW;
1400 DEBUG_ONLY;
1401 GC_NOTRIGGER;
1402 ENTRY_POINT;
1403 if (GetThread()) MODE_PREEMPTIVE; else MODE_ANY;
1404 DEBUG_ONLY;
1405 PRECONDITION(CheckPointer(thunk));
1406 PRECONDITION((GetThread() != NULL) ? (!GetThread()->PreemptiveGCDisabled()) : TRUE);
1407 }
1408 CONTRACTL_END;
1409
1410 BEGIN_ENTRYPOINT_VOIDRET;
1411
1412 void** retESP = ((void**) &thunk) + 4;
1413
1414 MethodDesc* method = thunk->GetMethod();
1415 if (method)
1416 {
1417 LOG((LF_STUBS, LL_INFO1000000, "UNMANAGED -> MANAGED Stub To Method = %s::%s SIG %s Ret Address ESP = 0x%x ret = 0x%x\n",
1418 method->m_pszDebugClassName,
1419 method->m_pszDebugMethodName,
1420 method->m_pszDebugMethodSignature, retESP, *retESP));
1421 }
1422
1423 END_ENTRYPOINT_VOIDRET;
1424
1425 }
1426#endif
1427
1428#endif // CROSSGEN_COMPILE
1429