1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5
6#include "common.h"
7
8#include "eetwain.h"
9#include "dbginterface.h"
10#include "gcenv.h"
11
12#define RETURN_ADDR_OFFS 1 // in DWORDS
13
14#ifdef USE_GC_INFO_DECODER
15#include "gcinfodecoder.h"
16#endif
17
18#include "argdestination.h"
19
20#define X86_INSTR_W_TEST_ESP 0x4485 // test [esp+N], eax
21#define X86_INSTR_TEST_ESP_SIB 0x24
22#define X86_INSTR_PUSH_0 0x6A // push 00, entire instruction is 0x6A00
23#define X86_INSTR_PUSH_IMM 0x68 // push NNNN,
24#define X86_INSTR_W_PUSH_IND_IMM 0x35FF // push [NNNN]
25#define X86_INSTR_CALL_REL32 0xE8 // call rel32
26#define X86_INSTR_W_CALL_IND_IMM 0x15FF // call [addr32]
27#define X86_INSTR_NOP 0x90 // nop
28#define X86_INSTR_NOP2 0x9090 // 2-byte nop
29#define X86_INSTR_NOP3_1 0x9090 // 1st word of 3-byte nop
30#define X86_INSTR_NOP3_3 0x90 // 3rd byte of 3-byte nop
31#define X86_INSTR_NOP4 0x90909090 // 4-byte nop
32#define X86_INSTR_NOP5_1 0x90909090 // 1st dword of 5-byte nop
33#define X86_INSTR_NOP5_5 0x90 // 5th byte of 5-byte nop
34#define X86_INSTR_INT3 0xCC // int3
35#define X86_INSTR_HLT 0xF4 // hlt
36#define X86_INSTR_PUSH_EBP 0x55 // push ebp
37#define X86_INSTR_W_MOV_EBP_ESP 0xEC8B // mov ebp, esp
38#define X86_INSTR_POP_ECX 0x59 // pop ecx
39#define X86_INSTR_RET 0xC2 // ret imm16
40#define X86_INSTR_RETN 0xC3 // ret
41#define X86_INSTR_w_LEA_ESP_EBP_BYTE_OFFSET 0x658d // lea esp, [ebp-bOffset]
42#define X86_INSTR_w_LEA_ESP_EBP_DWORD_OFFSET 0xa58d // lea esp, [ebp-dwOffset]
43#define X86_INSTR_JMP_NEAR_REL32 0xE9 // near jmp rel32
44#define X86_INSTR_w_JMP_FAR_IND_IMM 0x25FF // far jmp [addr32]
45
46#ifndef USE_GC_INFO_DECODER
47
48
49#ifdef _DEBUG
50// For dumping of verbose info.
51#ifndef DACCESS_COMPILE
52static bool trFixContext = false;
53#endif
54static bool trEnumGCRefs = false;
55static bool dspPtr = false; // prints the live ptrs as reported
56#endif
57
58// NOTE: enabling compiler optimizations, even for debug builds.
59// Comment this out in order to be able to fully debug methods here.
60#if defined(_MSC_VER)
61#pragma optimize("tg", on)
62#endif
63
64__forceinline unsigned decodeUnsigned(PTR_CBYTE& src)
65{
66 LIMITED_METHOD_CONTRACT;
67 SUPPORTS_DAC;
68
69#ifdef DACCESS_COMPILE
70 PTR_CBYTE begin = src;
71#endif
72
73 BYTE byte = *src++;
74 unsigned value = byte & 0x7f;
75 while (byte & 0x80)
76 {
77#ifdef DACCESS_COMPILE
78 // In DAC builds, the target data may be corrupt. Rather than return incorrect data
79 // and risk wasting time in a potentially long loop, we want to fail early and gracefully.
80 // The data is encoded with 7 value-bits per byte, and so we may need to read a maximum
81 // of 5 bytes (7*5=35) to read a full 32-bit integer.
82 if ((src - begin) > 5)
83 {
84 DacError(CORDBG_E_TARGET_INCONSISTENT);
85 }
86#endif
87
88 byte = *src++;
89 value <<= 7;
90 value += byte & 0x7f;
91 }
92 return value;
93}
94
95__forceinline int decodeSigned(PTR_CBYTE& src)
96{
97 LIMITED_METHOD_CONTRACT;
98 SUPPORTS_DAC;
99
100#ifdef DACCESS_COMPILE
101 PTR_CBYTE begin = src;
102#endif
103
104 BYTE byte = *src++;
105 BYTE first = byte;
106 int value = byte & 0x3f;
107 while (byte & 0x80)
108 {
109#ifdef DACCESS_COMPILE
110 // In DAC builds, the target data may be corrupt. Rather than return incorrect data
111 // and risk wasting time in a potentially long loop, we want to fail early and gracefully.
112 // The data is encoded with 7 value-bits per byte, and so we may need to read a maximum
113 // of 5 bytes (7*5=35) to read a full 32-bit integer.
114 if ((src - begin) > 5)
115 {
116 DacError(CORDBG_E_TARGET_INCONSISTENT);
117 }
118#endif
119
120 byte = *src++;
121 value <<= 7;
122 value += byte & 0x7f;
123 }
124 if (first & 0x40)
125 value = -value;
126 return value;
127}
128
129// Fast versions of the above, with one iteration of the loop unrolled
130#define fastDecodeUnsigned(src) (((*(src) & 0x80) == 0) ? (unsigned) (*(src)++) : decodeUnsigned((src)))
131#define fastDecodeSigned(src) (((*(src) & 0xC0) == 0) ? (unsigned) (*(src)++) : decodeSigned((src)))
132
133// Fast skipping past encoded integers
134#ifndef DACCESS_COMPILE
135#define fastSkipUnsigned(src) { while ((*(src)++) & 0x80) { } }
136#define fastSkipSigned(src) { while ((*(src)++) & 0x80) { } }
137#else
138// In DAC builds we want to trade-off a little perf in the common case for reliaiblity against corrupt data.
139#define fastSkipUnsigned(src) (decodeUnsigned(src))
140#define fastSkipSigned(src) (decodeSigned(src))
141#endif
142
143
144/*****************************************************************************
145 *
146 * Decodes the X86 GcInfo header and returns the decoded information
147 * in the hdrInfo struct.
148 * curOffset is the code offset within the active method used in the
149 * computation of PrologOffs/EpilogOffs.
150 * Returns the size of the header (number of bytes decoded).
151 */
152static size_t DecodeGCHdrInfo(GCInfoToken gcInfoToken,
153 unsigned curOffset,
154 hdrInfo * infoPtr)
155{
156 CONTRACTL {
157 NOTHROW;
158 GC_NOTRIGGER;
159 HOST_NOCALLS;
160 SUPPORTS_DAC;
161 } CONTRACTL_END;
162
163 PTR_CBYTE table = (PTR_CBYTE) gcInfoToken.Info;
164#if VERIFY_GC_TABLES
165 _ASSERTE(*castto(table, unsigned short *)++ == 0xFEEF);
166#endif
167
168 infoPtr->methodSize = fastDecodeUnsigned(table);
169
170 _ASSERTE(curOffset >= 0);
171 _ASSERTE(curOffset <= infoPtr->methodSize);
172
173 /* Decode the InfoHdr */
174
175 InfoHdr header;
176 table = decodeHeader(table, gcInfoToken.Version, &header);
177
178 BOOL hasArgTabOffset = FALSE;
179 if (header.untrackedCnt == HAS_UNTRACKED)
180 {
181 hasArgTabOffset = TRUE;
182 header.untrackedCnt = fastDecodeUnsigned(table);
183 }
184
185 if (header.varPtrTableSize == HAS_VARPTR)
186 {
187 hasArgTabOffset = TRUE;
188 header.varPtrTableSize = fastDecodeUnsigned(table);
189 }
190
191 if (header.gsCookieOffset == HAS_GS_COOKIE_OFFSET)
192 {
193 header.gsCookieOffset = fastDecodeUnsigned(table);
194 }
195
196 if (header.syncStartOffset == HAS_SYNC_OFFSET)
197 {
198 header.syncStartOffset = decodeUnsigned(table);
199 header.syncEndOffset = decodeUnsigned(table);
200
201 _ASSERTE(header.syncStartOffset != INVALID_SYNC_OFFSET && header.syncEndOffset != INVALID_SYNC_OFFSET);
202 _ASSERTE(header.syncStartOffset < header.syncEndOffset);
203 }
204
205 if (header.revPInvokeOffset == HAS_REV_PINVOKE_FRAME_OFFSET)
206 {
207 header.revPInvokeOffset = fastDecodeUnsigned(table);
208 }
209
210 /* Some sanity checks on header */
211
212 _ASSERTE( header.prologSize +
213 (size_t)(header.epilogCount*header.epilogSize) <= infoPtr->methodSize);
214 _ASSERTE( header.epilogCount == 1 || !header.epilogAtEnd);
215
216 _ASSERTE( header.untrackedCnt <= header.argCount+header.frameSize);
217
218 _ASSERTE( header.ebpSaved || !(header.ebpFrame || header.doubleAlign));
219 _ASSERTE(!header.ebpFrame || !header.doubleAlign );
220 _ASSERTE( header.ebpFrame || !header.security );
221 _ASSERTE( header.ebpFrame || !header.handlers );
222 _ASSERTE( header.ebpFrame || !header.localloc );
223 _ASSERTE( header.ebpFrame || !header.editNcontinue); // <TODO> : Esp frames NYI for EnC</TODO>
224
225 /* Initialize the infoPtr struct */
226
227 infoPtr->argSize = header.argCount * 4;
228 infoPtr->ebpFrame = header.ebpFrame;
229 infoPtr->interruptible = header.interruptible;
230 infoPtr->returnKind = (ReturnKind) header.returnKind;
231
232 infoPtr->prologSize = header.prologSize;
233 infoPtr->epilogSize = header.epilogSize;
234 infoPtr->epilogCnt = header.epilogCount;
235 infoPtr->epilogEnd = header.epilogAtEnd;
236
237 infoPtr->untrackedCnt = header.untrackedCnt;
238 infoPtr->varPtrTableSize = header.varPtrTableSize;
239 infoPtr->gsCookieOffset = header.gsCookieOffset;
240
241 infoPtr->syncStartOffset = header.syncStartOffset;
242 infoPtr->syncEndOffset = header.syncEndOffset;
243 infoPtr->revPInvokeOffset = header.revPInvokeOffset;
244
245 infoPtr->doubleAlign = header.doubleAlign;
246 infoPtr->securityCheck = header.security;
247 infoPtr->handlers = header.handlers;
248 infoPtr->localloc = header.localloc;
249 infoPtr->editNcontinue = header.editNcontinue;
250 infoPtr->varargs = header.varargs;
251 infoPtr->profCallbacks = header.profCallbacks;
252 infoPtr->genericsContext = header.genericsContext;
253 infoPtr->genericsContextIsMethodDesc = header.genericsContextIsMethodDesc;
254 infoPtr->isSpeculativeStackWalk = false;
255
256 /* Are we within the prolog of the method? */
257
258 if (curOffset < infoPtr->prologSize)
259 {
260 infoPtr->prologOffs = curOffset;
261 }
262 else
263 {
264 infoPtr->prologOffs = hdrInfo::NOT_IN_PROLOG;
265 }
266
267 /* Assume we're not in the epilog of the method */
268
269 infoPtr->epilogOffs = hdrInfo::NOT_IN_EPILOG;
270
271 /* Are we within an epilog of the method? */
272
273 if (infoPtr->epilogCnt)
274 {
275 unsigned epilogStart;
276
277 if (infoPtr->epilogCnt > 1 || !infoPtr->epilogEnd)
278 {
279#if VERIFY_GC_TABLES
280 _ASSERTE(*castto(table, unsigned short *)++ == 0xFACE);
281#endif
282 epilogStart = 0;
283 for (unsigned i = 0; i < infoPtr->epilogCnt; i++)
284 {
285 epilogStart += fastDecodeUnsigned(table);
286 if (curOffset > epilogStart &&
287 curOffset < epilogStart + infoPtr->epilogSize)
288 {
289 infoPtr->epilogOffs = curOffset - epilogStart;
290 }
291 }
292 }
293 else
294 {
295 epilogStart = infoPtr->methodSize - infoPtr->epilogSize;
296
297 if (curOffset > epilogStart &&
298 curOffset < epilogStart + infoPtr->epilogSize)
299 {
300 infoPtr->epilogOffs = curOffset - epilogStart;
301 }
302 }
303
304 infoPtr->syncEpilogStart = epilogStart;
305 }
306
307 unsigned argTabOffset = INVALID_ARGTAB_OFFSET;
308 if (hasArgTabOffset)
309 {
310 argTabOffset = fastDecodeUnsigned(table);
311 }
312 infoPtr->argTabOffset = argTabOffset;
313
314 size_t frameDwordCount = header.frameSize;
315
316 /* Set the rawStackSize to the number of bytes that it bumps ESP */
317
318 infoPtr->rawStkSize = (UINT)(frameDwordCount * sizeof(size_t));
319
320 /* Calculate the callee saves regMask and adjust stackSize to */
321 /* include the callee saves register spills */
322
323 unsigned savedRegs = RM_NONE;
324 unsigned savedRegsCount = 0;
325
326 if (header.ediSaved)
327 {
328 savedRegsCount++;
329 savedRegs |= RM_EDI;
330 }
331 if (header.esiSaved)
332 {
333 savedRegsCount++;
334 savedRegs |= RM_ESI;
335 }
336 if (header.ebxSaved)
337 {
338 savedRegsCount++;
339 savedRegs |= RM_EBX;
340 }
341 if (header.ebpSaved)
342 {
343 savedRegsCount++;
344 savedRegs |= RM_EBP;
345 }
346
347 infoPtr->savedRegMask = (RegMask)savedRegs;
348
349 infoPtr->savedRegsCountExclFP = savedRegsCount;
350 if (header.ebpFrame || header.doubleAlign)
351 {
352 _ASSERTE(header.ebpSaved);
353 infoPtr->savedRegsCountExclFP = savedRegsCount - 1;
354 }
355
356 frameDwordCount += savedRegsCount;
357
358 infoPtr->stackSize = (UINT)(frameDwordCount * sizeof(size_t));
359
360 _ASSERTE(infoPtr->gsCookieOffset == INVALID_GS_COOKIE_OFFSET ||
361 (infoPtr->gsCookieOffset < infoPtr->stackSize) &&
362 ((header.gsCookieOffset % sizeof(void*)) == 0));
363
364 return table - PTR_CBYTE(gcInfoToken.Info);
365}
366
367/*****************************************************************************/
368
369// We do a "pop eax; jmp eax" to return from a fault or finally handler
370const size_t END_FIN_POP_STACK = sizeof(TADDR);
371
372
373// The offset (in bytes) from EBP for the secutiy object on the stack
374inline size_t GetSecurityObjectOffset(hdrInfo * info)
375{
376 LIMITED_METHOD_DAC_CONTRACT;
377
378 _ASSERTE(info->securityCheck && info->ebpFrame);
379
380 unsigned position = info->savedRegsCountExclFP +
381 1;
382 return position * sizeof(TADDR);
383}
384
385inline
386size_t GetLocallocSPOffset(hdrInfo * info)
387{
388 LIMITED_METHOD_DAC_CONTRACT;
389
390 _ASSERTE(info->localloc && info->ebpFrame);
391
392 unsigned position = info->savedRegsCountExclFP +
393 info->securityCheck +
394 1;
395 return position * sizeof(TADDR);
396}
397
398inline
399size_t GetParamTypeArgOffset(hdrInfo * info)
400{
401 LIMITED_METHOD_DAC_CONTRACT;
402
403 _ASSERTE((info->genericsContext || info->handlers) && info->ebpFrame);
404
405 unsigned position = info->savedRegsCountExclFP +
406 info->securityCheck +
407 info->localloc +
408 1; // For CORINFO_GENERICS_CTXT_FROM_PARAMTYPEARG
409 return position * sizeof(TADDR);
410}
411
412inline size_t GetStartShadowSPSlotsOffset(hdrInfo * info)
413{
414 LIMITED_METHOD_DAC_CONTRACT;
415
416 _ASSERTE(info->handlers && info->ebpFrame);
417
418 return GetParamTypeArgOffset(info) +
419 sizeof(TADDR); // Slot for end-of-last-executed-filter
420}
421
422/*****************************************************************************
423 * Returns the start of the hidden slots for the shadowSP for functions
424 * with exception handlers. There is one slot per nesting level starting
425 * near Ebp and is zero-terminated after the active slots.
426 */
427
428inline
429PTR_TADDR GetFirstBaseSPslotPtr(TADDR ebp, hdrInfo * info)
430{
431 LIMITED_METHOD_DAC_CONTRACT;
432
433 _ASSERTE(info->handlers && info->ebpFrame);
434
435 size_t offsetFromEBP = GetStartShadowSPSlotsOffset(info)
436 + sizeof(TADDR); // to get to the *start* of the next slot
437
438 return PTR_TADDR(ebp - offsetFromEBP);
439}
440
441inline size_t GetEndShadowSPSlotsOffset(hdrInfo * info, unsigned maxHandlerNestingLevel)
442{
443 LIMITED_METHOD_DAC_CONTRACT;
444
445 _ASSERTE(info->handlers && info->ebpFrame);
446
447 unsigned numberOfShadowSPSlots = maxHandlerNestingLevel +
448 1 + // For zero-termination
449 1; // For a filter (which can be active at the same time as a catch/finally handler
450
451 return GetStartShadowSPSlotsOffset(info) +
452 (numberOfShadowSPSlots * sizeof(TADDR));
453}
454
455/*****************************************************************************
456 * returns the base frame pointer corresponding to the target nesting level.
457 */
458
459inline
460TADDR GetOutermostBaseFP(TADDR ebp, hdrInfo * info)
461{
462 LIMITED_METHOD_DAC_CONTRACT;
463
464 // we are not taking into account double alignment. We are
465 // safe because the jit currently bails on double alignment if there
466 // are handles or localalloc
467 _ASSERTE(!info->doubleAlign);
468 if (info->localloc)
469 {
470 // If the function uses localloc we will fetch the ESP from the localloc
471 // slot.
472 PTR_TADDR pLocalloc = PTR_TADDR(ebp - GetLocallocSPOffset(info));
473
474 return (*pLocalloc);
475 }
476 else
477 {
478 // Default, go back all the method's local stack size
479 return ebp - info->stackSize + sizeof(int);
480 }
481}
482
483/*****************************************************************************
484 *
485 * For functions with handlers, checks if it is currently in a handler.
486 * Either of unwindESP or unwindLevel will specify the target nesting level.
487 * If unwindLevel is specified, info about the funclet at that nesting level
488 * will be returned. (Use if you are interested in a specific nesting level.)
489 * If unwindESP is specified, info for nesting level invoked before the stack
490 * reached unwindESP will be returned. (Use if you have a specific ESP value
491 * during stack walking.)
492 *
493 * *pBaseSP is set to the base SP (base of the stack on entry to
494 * the current funclet) corresponding to the target nesting level.
495 * *pNestLevel is set to the nesting level of the target nesting level (useful
496 * if unwindESP!=IGNORE_VAL
497 * *pHasInnerFilter will be set to true (only when unwindESP!=IGNORE_VAL) if a filter
498 * is currently active, but the target nesting level is an outer nesting level.
499 * *pHadInnerFilter - was the last use of the frame to execute a filter.
500 * This mainly affects GC lifetime reporting.
501 */
502
503enum FrameType
504{
505 FR_NORMAL, // Normal method frame - no exceptions currently active
506 FR_FILTER, // Frame-let of a filter
507 FR_HANDLER, // Frame-let of a callable catch/fault/finally
508
509 FR_INVALID, // Invalid frame (for speculative stackwalks)
510};
511
512enum { IGNORE_VAL = -1 };
513
514FrameType GetHandlerFrameInfo(hdrInfo * info,
515 TADDR frameEBP,
516 TADDR unwindESP,
517 DWORD unwindLevel,
518 TADDR * pBaseSP = NULL, /* OUT */
519 DWORD * pNestLevel = NULL, /* OUT */
520 bool * pHasInnerFilter = NULL, /* OUT */
521 bool * pHadInnerFilter = NULL) /* OUT */
522{
523 CONTRACTL {
524 NOTHROW;
525 GC_NOTRIGGER;
526 HOST_NOCALLS;
527 SUPPORTS_DAC;
528 } CONTRACTL_END;
529
530 _ASSERTE(info->ebpFrame && info->handlers);
531 // One and only one of them should be IGNORE_VAL
532 _ASSERTE((unwindESP == (TADDR) IGNORE_VAL) !=
533 (unwindLevel == (DWORD) IGNORE_VAL));
534 _ASSERTE(pHasInnerFilter == NULL || unwindESP != (TADDR) IGNORE_VAL);
535
536 // Many of the conditions that we'd like to assert cannot be asserted in the case that we're
537 // in the middle of a stackwalk seeded by a profiler, since such seeds can't be trusted
538 // (profilers are external, untrusted sources). So during profiler walks, we test the condition
539 // and throw an exception if it's not met. Otherwise, we just assert the condition.
540 #define FAIL_IF_SPECULATIVE_WALK(condition) \
541 if (info->isSpeculativeStackWalk) \
542 { \
543 if (!(condition)) \
544 { \
545 return FR_INVALID; \
546 } \
547 } \
548 else \
549 { \
550 _ASSERTE(condition); \
551 }
552
553 PTR_TADDR pFirstBaseSPslot = GetFirstBaseSPslotPtr(frameEBP, info);
554 TADDR baseSP = GetOutermostBaseFP(frameEBP, info);
555 bool nonLocalHandlers = false; // Are the funclets invoked by EE (instead of managed code itself)
556 bool hasInnerFilter = false;
557 bool hadInnerFilter = false;
558
559 /* Get the last non-zero slot >= unwindESP, or lvl<unwindLevel.
560 Also do some sanity checks */
561
562 // The shadow slots contain the SP of the nested EH clauses currently active on the stack.
563 // The slots grow towards lower address on the stack and is terminted by a NULL entry.
564 // Since each subsequent slot contains the SP of a more nested EH clause, the contents of the slots are
565 // expected to be in decreasing order.
566 size_t lvl = 0;
567#ifndef WIN64EXCEPTIONS
568 PTR_TADDR pSlot;
569 for(lvl = 0, pSlot = pFirstBaseSPslot;
570 *pSlot && lvl < unwindLevel;
571 pSlot--, lvl++)
572 {
573 // Filters cant have inner funclets
574 FAIL_IF_SPECULATIVE_WALK(!(baseSP & ICodeManager::SHADOW_SP_IN_FILTER));
575
576 TADDR curSlotVal = *pSlot;
577
578 // The shadowSPs have to be less unless the stack has been unwound.
579 FAIL_IF_SPECULATIVE_WALK(baseSP > curSlotVal ||
580 (baseSP == curSlotVal && pSlot == pFirstBaseSPslot));
581
582 if (curSlotVal == LCL_FINALLY_MARK)
583 {
584 // Locally called finally
585 baseSP -= sizeof(TADDR);
586 }
587 else
588 {
589 // Is this a funclet we unwound before (can only happen with filters) ?
590 // If unwindESP is specified, normally we expect it to be the last entry in the shadow slot array.
591 // Or, if there is a filter, we expect unwindESP to be the second last entry. However, this may
592 // not be the case in DAC builds. For example, the user can use .cxr in an EH clause to set a
593 // CONTEXT captured in the try clause. In this case, unwindESP will be the ESP of the parent
594 // function, but the shadow slot array will contain the SP of the EH clause, which is closer to
595 // the leaf than the parent method.
596
597 if (unwindESP != (TADDR) IGNORE_VAL &&
598 unwindESP > END_FIN_POP_STACK +
599 (curSlotVal & ~ICodeManager::SHADOW_SP_BITS))
600 {
601 // In non-DAC builds, the only time unwindESP is closer to the root than entries in the shadow
602 // slot array is when the last entry in the array is for a filter. Also, filters can't have
603 // nested handlers.
604 if ((pSlot[0] & ICodeManager::SHADOW_SP_IN_FILTER) &&
605 (pSlot[-1] == 0) &&
606 !(baseSP & ICodeManager::SHADOW_SP_IN_FILTER))
607 {
608 if (pSlot[0] & ICodeManager::SHADOW_SP_FILTER_DONE)
609 hadInnerFilter = true;
610 else
611 hasInnerFilter = true;
612 break;
613 }
614 else
615 {
616#if defined(DACCESS_COMPILE)
617 // In DAC builds, this could happen. We just need to bail out of this loop early.
618 break;
619#else // !DACCESS_COMPILE
620 // In non-DAC builds, this is an error.
621 FAIL_IF_SPECULATIVE_WALK(FALSE);
622#endif // DACCESS_COMPILE
623 }
624 }
625
626 nonLocalHandlers = true;
627 baseSP = curSlotVal;
628 }
629 }
630#endif // WIN64EXCEPTIONS
631
632 if (unwindESP != (TADDR) IGNORE_VAL)
633 {
634 FAIL_IF_SPECULATIVE_WALK(baseSP >= unwindESP ||
635 baseSP == unwindESP - sizeof(TADDR)); // About to locally call a finally
636
637 if (baseSP < unwindESP) // About to locally call a finally
638 baseSP = unwindESP;
639 }
640 else
641 {
642 FAIL_IF_SPECULATIVE_WALK(lvl == unwindLevel); // unwindLevel must be currently active on stack
643 }
644
645 if (pBaseSP)
646 *pBaseSP = baseSP & ~ICodeManager::SHADOW_SP_BITS;
647
648 if (pNestLevel)
649 {
650 *pNestLevel = (DWORD)lvl;
651 }
652
653 if (pHasInnerFilter)
654 *pHasInnerFilter = hasInnerFilter;
655
656 if (pHadInnerFilter)
657 *pHadInnerFilter = hadInnerFilter;
658
659 if (baseSP & ICodeManager::SHADOW_SP_IN_FILTER)
660 {
661 FAIL_IF_SPECULATIVE_WALK(!hasInnerFilter); // nested filters not allowed
662 return FR_FILTER;
663 }
664 else if (nonLocalHandlers)
665 {
666 return FR_HANDLER;
667 }
668 else
669 {
670 return FR_NORMAL;
671 }
672
673 #undef FAIL_IF_SPECULATIVE_WALK
674}
675
676// Returns the number of bytes at the beginning of the stack frame that shouldn't be
677// modified by an EnC. This is everything except the space for locals and temporaries.
678inline size_t GetSizeOfFrameHeaderForEnC(hdrInfo * info)
679{
680 WRAPPER_NO_CONTRACT;
681
682 // See comment above Compiler::lvaAssignFrameOffsets() in src\jit\il\lclVars.cpp
683 // for frame layout
684
685 // EnC supports increasing the maximum handler nesting level by always
686 // assuming that the max is MAX_EnC_HANDLER_NESTING_LEVEL. Methods with
687 // a higher max cannot be updated by EnC
688
689 // Take the offset (from EBP) of the last slot of the header, plus one for the EBP slot itself
690 // to get the total size of the header.
691 return sizeof(TADDR) +
692 GetEndShadowSPSlotsOffset(info, MAX_EnC_HANDLER_NESTING_LEVEL);
693}
694#endif // !USE_GC_INFO_DECODER
695
696#ifndef DACCESS_COMPILE
697#ifndef WIN64EXCEPTIONS
698
699/*****************************************************************************
700 *
701 * Setup context to enter an exception handler (a 'catch' block).
702 * This is the last chance for the runtime support to do fixups in
703 * the context before execution continues inside a filter, catch handler,
704 * or finally.
705 */
706void EECodeManager::FixContext( ContextType ctxType,
707 EHContext *ctx,
708 EECodeInfo *pCodeInfo,
709 DWORD dwRelOffset,
710 DWORD nestingLevel,
711 OBJECTREF thrownObject,
712 CodeManState *pState,
713 size_t ** ppShadowSP,
714 size_t ** ppEndRegion)
715{
716 CONTRACTL {
717 NOTHROW;
718 GC_NOTRIGGER;
719 } CONTRACTL_END;
720
721 _ASSERTE((ctxType == FINALLY_CONTEXT) == (thrownObject == NULL));
722
723 _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
724 CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
725
726 /* Extract the necessary information from the info block header */
727
728 stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(pCodeInfo->GetGCInfoToken(),
729 dwRelOffset,
730 &stateBuf->hdrInfoBody);
731 pState->dwIsSet = 1;
732
733#ifdef _DEBUG
734 if (trFixContext) {
735 printf("FixContext [%s][%s] for %s.%s: ",
736 stateBuf->hdrInfoBody.ebpFrame?"ebp":" ",
737 stateBuf->hdrInfoBody.interruptible?"int":" ",
738 "UnknownClass","UnknownMethod");
739 fflush(stdout);
740 }
741#endif
742
743 /* make sure that we have an ebp stack frame */
744
745 _ASSERTE(stateBuf->hdrInfoBody.ebpFrame);
746 _ASSERTE(stateBuf->hdrInfoBody.handlers); // <TODO>@TODO : This will alway be set. Remove it</TODO>
747
748 TADDR baseSP;
749 GetHandlerFrameInfo(&stateBuf->hdrInfoBody, ctx->Ebp,
750 ctxType == FILTER_CONTEXT ? ctx->Esp : IGNORE_VAL,
751 ctxType == FILTER_CONTEXT ? (DWORD) IGNORE_VAL : nestingLevel,
752 &baseSP,
753 &nestingLevel);
754
755 _ASSERTE((size_t)ctx->Ebp >= baseSP);
756 _ASSERTE(baseSP >= (size_t)ctx->Esp);
757
758 ctx->Esp = (DWORD)baseSP;
759
760 // EE will write Esp to **pShadowSP before jumping to handler
761
762 PTR_TADDR pBaseSPslots =
763 GetFirstBaseSPslotPtr(ctx->Ebp, &stateBuf->hdrInfoBody);
764 *ppShadowSP = (size_t *)&pBaseSPslots[-(int) nestingLevel ];
765 pBaseSPslots[-(int)(nestingLevel+1)] = 0; // Zero out the next slot
766
767 // EE will write the end offset of the filter
768 if (ctxType == FILTER_CONTEXT)
769 *ppEndRegion = (size_t *)pBaseSPslots + 1;
770
771 /* This is just a simple assigment of throwObject to ctx->Eax,
772 just pretend the cast goo isn't there.
773 */
774
775 *((OBJECTREF*)&(ctx->Eax)) = thrownObject;
776}
777
778#endif // !WIN64EXCEPTIONS
779
780
781
782
783
784/*****************************************************************************/
785
786bool VarIsInReg(ICorDebugInfo::VarLoc varLoc)
787{
788 LIMITED_METHOD_CONTRACT;
789
790 switch(varLoc.vlType)
791 {
792 case ICorDebugInfo::VLT_REG:
793 case ICorDebugInfo::VLT_REG_REG:
794 case ICorDebugInfo::VLT_REG_STK:
795 return true;
796
797 default:
798 return false;
799 }
800}
801
802#ifdef EnC_SUPPORTED
803/*****************************************************************************
804 * Last chance for the runtime support to do fixups in the context
805 * before execution continues inside an EnC updated function.
806 * It also adjusts ESP and munges on the stack. So the caller has to make
807 * sure that that stack region isnt needed (by doing a localloc)
808 * Also, if this returns EnC_FAIL, we should not have munged the
809 * context ie. transcated commit
810 * The plan of attack is:
811 * 1) Error checking up front. If we get through here, everything
812 * else should work
813 * 2) Get all the info about current variables, registers, etc
814 * 3) zero out the stack frame - this'll initialize _all_ variables
815 * 4) Put the variables from step 3 into their new locations.
816 *
817 * Note that while we use the ShuffleVariablesGet/Set methods, they don't
818 * have any info/logic that's internal to the runtime: another codemanger
819 * could easily duplicate what they do, which is why we're calling into them.
820 */
821
822HRESULT EECodeManager::FixContextForEnC(PCONTEXT pCtx,
823 EECodeInfo * pOldCodeInfo,
824 const ICorDebugInfo::NativeVarInfo * oldMethodVars,
825 SIZE_T oldMethodVarsCount,
826 EECodeInfo * pNewCodeInfo,
827 const ICorDebugInfo::NativeVarInfo * newMethodVars,
828 SIZE_T newMethodVarsCount)
829{
830 CONTRACTL {
831 DISABLED(NOTHROW);
832 DISABLED(GC_NOTRIGGER);
833 } CONTRACTL_END;
834
835 HRESULT hr = S_OK;
836
837 // Grab a copy of the context before the EnC update.
838 T_CONTEXT oldCtx = *pCtx;
839
840#if defined(_TARGET_X86_)
841 LOG((LF_CORDB, LL_INFO100, "EECM::FixContextForEnC\n"));
842
843 /* Extract the necessary information from the info block header */
844
845 hdrInfo oldInfo, newInfo;
846
847 DecodeGCHdrInfo(pOldCodeInfo->GetGCInfoToken(),
848 pOldCodeInfo->GetRelOffset(),
849 &oldInfo);
850
851 DecodeGCHdrInfo(pNewCodeInfo->GetGCInfoToken(),
852 pNewCodeInfo->GetRelOffset(),
853 &newInfo);
854
855 //1) Error checking up front. If we get through here, everything
856 // else should work
857
858 if (!oldInfo.editNcontinue || !newInfo.editNcontinue) {
859 LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC EnC_INFOLESS_METHOD\n"));
860 return CORDBG_E_ENC_INFOLESS_METHOD;
861 }
862
863 if (!oldInfo.ebpFrame || !newInfo.ebpFrame) {
864 LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC Esp frames NYI\n"));
865 return E_FAIL; // Esp frames NYI
866 }
867
868 if (pCtx->Esp != pCtx->Ebp - oldInfo.stackSize + sizeof(DWORD)) {
869 LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC stack should be empty\n"));
870 return E_FAIL; // stack should be empty - <TODO> @TODO : Barring localloc</TODO>
871 }
872
873 if (oldInfo.handlers)
874 {
875 bool hasInnerFilter;
876 TADDR baseSP;
877 FrameType frameType = GetHandlerFrameInfo(&oldInfo, pCtx->Ebp,
878 pCtx->Esp, IGNORE_VAL,
879 &baseSP, NULL, &hasInnerFilter);
880 _ASSERTE(frameType != FR_INVALID);
881 _ASSERTE(!hasInnerFilter); // FixContextForEnC() is called for bottommost funclet
882
883 // If the method is in a fuclet, and if the framesize grows, we are in trouble.
884
885 if (frameType != FR_NORMAL)
886 {
887 /* <TODO> @TODO : What if the new method offset is in a fuclet,
888 and the old is not, or the nesting level changed, etc </TODO> */
889
890 if (oldInfo.stackSize != newInfo.stackSize) {
891 LOG((LF_ENC, LL_INFO100, "**Error** EECM::FixContextForEnC stack size mismatch\n"));
892 return CORDBG_E_ENC_IN_FUNCLET;
893 }
894 }
895 }
896
897 /* @TODO: Check if we have grown out of space for locals, in the face of localloc */
898 _ASSERTE(!oldInfo.localloc && !newInfo.localloc);
899
900 // Always reserve space for the securityCheck slot
901 _ASSERTE(oldInfo.securityCheck && newInfo.securityCheck);
902
903 // @TODO: If nesting level grows above the MAX_EnC_HANDLER_NESTING_LEVEL,
904 // we should return EnC_NESTED_HANLDERS
905 _ASSERTE(oldInfo.handlers && newInfo.handlers);
906
907 LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: Checks out\n"));
908
909#elif defined(_TARGET_AMD64_)
910
911 // Strategy for zeroing out the frame on x64:
912 //
913 // The stack frame looks like this (stack grows up)
914 //
915 // =======================================
916 // <--- RSP == RBP (invariant: localalloc disallowed before remap)
917 // Arguments for next call (if there is one)
918 // PSPSym (optional)
919 // JIT temporaries (if any)
920 // Security object (if any)
921 // Local variables (if any)
922 // ---------------------------------------
923 // Frame header (stuff we must preserve, such as bool for synchronized
924 // methods, saved RBP, etc.)
925 // Return address (also included in frame header)
926 // ---------------------------------------
927 // Arguments for this frame (that's getting remapped). Will naturally be preserved
928 // since fixed-frame size doesn't include this.
929 // =======================================
930 //
931 // Goal: Zero out everything AFTER (above) frame header.
932 //
933 // How do we find this stuff?
934 //
935 // EECodeInfo::GetFixedStackSize() gives us the full size from the top ("Arguments
936 // for next call") all the way down to and including Return Address.
937 //
938 // GetSizeOfEditAndContinuePreservedArea() gives us the size in bytes of the
939 // frame header at the bottom.
940 //
941 // So we start at RSP, and zero out:
942 // GetFixedStackSize() - GetSizeOfEditAndContinuePreservedArea() bytes.
943 //
944 // We'll need to restore PSPSym; location gotten from GCInfo.
945 // We'll need to copy security object; location gotten from GCInfo.
946
947 // GCInfo for old method
948 GcInfoDecoder oldGcDecoder(
949 pOldCodeInfo->GetGCInfoToken(),
950 GcInfoDecoderFlags(DECODE_SECURITY_OBJECT | DECODE_PSP_SYM | DECODE_EDIT_AND_CONTINUE),
951 0 // Instruction offset (not needed)
952 );
953
954 // GCInfo for new method
955 GcInfoDecoder newGcDecoder(
956 pNewCodeInfo->GetGCInfoToken(),
957 GcInfoDecoderFlags(DECODE_SECURITY_OBJECT | DECODE_PSP_SYM | DECODE_EDIT_AND_CONTINUE),
958 0 // Instruction offset (not needed)
959 );
960
961 UINT32 oldSizeOfPreservedArea = oldGcDecoder.GetSizeOfEditAndContinuePreservedArea();
962 UINT32 newSizeOfPreservedArea = newGcDecoder.GetSizeOfEditAndContinuePreservedArea();
963
964 // This ensures the JIT generated EnC compliant code.
965 if ((oldSizeOfPreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA) ||
966 (newSizeOfPreservedArea == NO_SIZE_OF_EDIT_AND_CONTINUE_PRESERVED_AREA))
967 {
968 _ASSERTE(!"FixContextForEnC called on a non-EnC-compliant method frame");
969 return CORDBG_E_ENC_INFOLESS_METHOD;
970 }
971
972 // JIT is required to emit frame register for EnC-compliant code
973 _ASSERTE(pOldCodeInfo->HasFrameRegister());
974 _ASSERTE(pNewCodeInfo->HasFrameRegister());
975
976 TADDR oldStackBase = GetSP(&oldCtx);
977
978 // This verifies no localallocs were used in the old method. (RBP == RSP for
979 // EnC-compliant x64 code.)
980 if (oldStackBase != oldCtx.Rbp)
981 return E_FAIL;
982
983 // EnC remap inside handlers is not supported
984 if (pOldCodeInfo->IsFunclet() || pNewCodeInfo->IsFunclet())
985 return CORDBG_E_ENC_IN_FUNCLET;
986
987 if (oldSizeOfPreservedArea != newSizeOfPreservedArea)
988 {
989 _ASSERTE(!"FixContextForEnC called with method whose frame header size changed from old to new version.");
990 return E_FAIL;
991 }
992
993 // Note: we cannot assert anything about the relationship between oldFixedStackSize
994 // and newFixedStackSize. It's possible the edited frame grows (new locals) or
995 // shrinks (less temporaries).
996
997 DWORD oldFixedStackSize = pOldCodeInfo->GetFixedStackSize();
998 DWORD newFixedStackSize = pNewCodeInfo->GetFixedStackSize();
999
1000 TADDR callerSP = oldStackBase + oldFixedStackSize;
1001
1002 // If the old code saved a security object, store the object's reference now.
1003 OBJECTREF securityObject = NULL;
1004 INT32 nOldSecurityObjectStackSlot = oldGcDecoder.GetSecurityObjectStackSlot();
1005 if (nOldSecurityObjectStackSlot != NO_SECURITY_OBJECT)
1006 {
1007 securityObject = ObjectToOBJECTREF(*PTR_PTR_Object(callerSP + nOldSecurityObjectStackSlot));
1008 }
1009
1010#ifdef _DEBUG
1011 // If the old method has a PSPSym, then its value should == FP
1012 INT32 nOldPspSymStackSlot = oldGcDecoder.GetPSPSymStackSlot();
1013 if (nOldPspSymStackSlot != NO_PSP_SYM)
1014 {
1015 // Read the PSP.
1016 TADDR oldPSP = *PTR_TADDR(oldStackBase + nOldPspSymStackSlot);
1017
1018 // Now we're set up to assert that PSPSym's value == FP
1019 _ASSERTE(oldPSP == GetFP(&oldCtx));
1020 }
1021#endif // _DEBUG
1022
1023#else
1024 PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
1025#endif
1026
1027 // 2) Get all the info about current variables, registers, etc
1028
1029 const ICorDebugInfo::NativeVarInfo * pOldVar;
1030
1031 // sorted by varNumber
1032 ICorDebugInfo::NativeVarInfo * oldMethodVarsSorted = NULL;
1033 ICorDebugInfo::NativeVarInfo * oldMethodVarsSortedBase = NULL;
1034 ICorDebugInfo::NativeVarInfo *newMethodVarsSorted = NULL;
1035 ICorDebugInfo::NativeVarInfo *newMethodVarsSortedBase = NULL;
1036
1037 SIZE_T *rgVal1 = NULL;
1038 SIZE_T *rgVal2 = NULL;
1039
1040 {
1041 SIZE_T local;
1042
1043 // We'll need to sort the old native var info by variable number, since the
1044 // order of them isn't necc. the same. We'll use the number as the key.
1045 // We will assume we may have hidden arguments (which have negative values as the index)
1046
1047 unsigned oldNumVars = unsigned(-ICorDebugInfo::UNKNOWN_ILNUM);
1048 for (pOldVar = oldMethodVars, local = 0;
1049 local < oldMethodVarsCount;
1050 local++, pOldVar++)
1051 {
1052 DWORD varNumber = pOldVar->varNumber;
1053 if (signed(varNumber) >= 0)
1054 {
1055 // This is an explicit (not special) var, so add its varNumber + 1 to our
1056 // max count ("+1" because varNumber is zero-based).
1057 oldNumVars = max(oldNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1);
1058 }
1059 }
1060
1061 oldMethodVarsSortedBase = new (nothrow) ICorDebugInfo::NativeVarInfo[oldNumVars];
1062 if (!oldMethodVarsSortedBase)
1063 {
1064 hr = E_FAIL;
1065 goto ErrExit;
1066 }
1067 oldMethodVarsSorted = oldMethodVarsSortedBase + (-ICorDebugInfo::UNKNOWN_ILNUM);
1068
1069 memset((void *)oldMethodVarsSortedBase, 0, oldNumVars * sizeof(ICorDebugInfo::NativeVarInfo));
1070
1071 for (local = 0; local < oldNumVars;local++)
1072 oldMethodVarsSortedBase[local].loc.vlType = ICorDebugInfo::VLT_INVALID;
1073
1074 BYTE **rgVCs = NULL;
1075 DWORD oldMethodOffset = pOldCodeInfo->GetRelOffset();
1076
1077 for (pOldVar = oldMethodVars, local = 0;
1078 local < oldMethodVarsCount;
1079 local++, pOldVar++)
1080 {
1081 DWORD varNumber = pOldVar->varNumber;
1082
1083 _ASSERTE(varNumber + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) < oldNumVars);
1084
1085 // Only care about old local variables alive at oldMethodOffset
1086 if (pOldVar->startOffset <= oldMethodOffset &&
1087 pOldVar->endOffset > oldMethodOffset)
1088 {
1089 oldMethodVarsSorted[varNumber] = *pOldVar;
1090 }
1091 }
1092
1093 // 3) Next sort the new var info by varNumber. We want to do this here, since
1094 // we're allocating memory (which may fail) - do this before going to step 2
1095
1096 // First, count the new vars the same way we did the old vars above.
1097
1098 const ICorDebugInfo::NativeVarInfo * pNewVar;
1099
1100 unsigned newNumVars = unsigned(-ICorDebugInfo::UNKNOWN_ILNUM);
1101 for (pNewVar = newMethodVars, local = 0;
1102 local < newMethodVarsCount;
1103 local++, pNewVar++)
1104 {
1105 DWORD varNumber = pNewVar->varNumber;
1106 if (signed(varNumber) >= 0)
1107 {
1108 // This is an explicit (not special) var, so add its varNumber + 1 to our
1109 // max count ("+1" because varNumber is zero-based).
1110 newNumVars = max(newNumVars, unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) + varNumber + 1);
1111 }
1112 }
1113
1114 // sorted by varNumber
1115 newMethodVarsSortedBase = new (nothrow) ICorDebugInfo::NativeVarInfo[newNumVars];
1116 if (!newMethodVarsSortedBase)
1117 {
1118 hr = E_FAIL;
1119 goto ErrExit;
1120 }
1121 newMethodVarsSorted = newMethodVarsSortedBase + (-ICorDebugInfo::UNKNOWN_ILNUM);
1122
1123 memset(newMethodVarsSortedBase, 0, newNumVars * sizeof(ICorDebugInfo::NativeVarInfo));
1124 for (local = 0; local < newNumVars;local++)
1125 newMethodVarsSortedBase[local].loc.vlType = ICorDebugInfo::VLT_INVALID;
1126
1127 DWORD newMethodOffset = pNewCodeInfo->GetRelOffset();
1128
1129 for (pNewVar = newMethodVars, local = 0;
1130 local < newMethodVarsCount;
1131 local++, pNewVar++)
1132 {
1133 DWORD varNumber = pNewVar->varNumber;
1134
1135 _ASSERTE(varNumber + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) < newNumVars);
1136
1137 // Only care about new local variables alive at newMethodOffset
1138 if (pNewVar->startOffset <= newMethodOffset &&
1139 pNewVar->endOffset > newMethodOffset)
1140 {
1141 newMethodVarsSorted[varNumber] = *pNewVar;
1142 }
1143 }
1144
1145 _ASSERTE(newNumVars >= oldNumVars ||
1146 !"Not allowed to reduce the number of locals between versions!");
1147
1148 LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: gathered info!\n"));
1149
1150 rgVal1 = new (nothrow) SIZE_T[newNumVars];
1151 if (rgVal1 == NULL)
1152 {
1153 hr = E_FAIL;
1154 goto ErrExit;
1155 }
1156
1157 rgVal2 = new (nothrow) SIZE_T[newNumVars];
1158 if (rgVal2 == NULL)
1159 {
1160 hr = E_FAIL;
1161 goto ErrExit;
1162 }
1163
1164 // 4) Next we'll zero them out, so any variables that aren't in scope
1165 // in the old method, but are in scope in the new, will have the
1166 // default, zero, value.
1167
1168 memset(rgVal1, 0, sizeof(SIZE_T) * newNumVars);
1169 memset(rgVal2, 0, sizeof(SIZE_T) * newNumVars);
1170
1171 unsigned varsToGet = (oldNumVars > newNumVars) ? newNumVars
1172 : oldNumVars;
1173
1174 // 2) Get all the info about current variables, registers, etc.
1175
1176 hr = g_pDebugInterface->GetVariablesFromOffset(pOldCodeInfo->GetMethodDesc(),
1177 varsToGet,
1178 oldMethodVarsSortedBase,
1179 oldMethodOffset,
1180 &oldCtx,
1181 rgVal1,
1182 rgVal2,
1183 newNumVars,
1184 &rgVCs);
1185 if (FAILED(hr))
1186 {
1187 goto ErrExit;
1188 }
1189
1190
1191 LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: got vars!\n"));
1192
1193 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*
1194 * IMPORTANT : Once we start munging on the context, we cannot return
1195 * EnC_FAIL, as this should be a transacted commit,
1196 **=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
1197
1198#if defined(_TARGET_X86_)
1199 // Zero out all the registers as some may hold new variables.
1200 pCtx->Eax = pCtx->Ecx = pCtx->Edx = pCtx->Ebx =
1201 pCtx->Esi = pCtx->Edi = 0;
1202
1203 // 3) zero out the stack frame - this'll initialize _all_ variables
1204
1205 /*-------------------------------------------------------------------------
1206 * Adjust the stack height
1207 */
1208 pCtx->Esp -= (newInfo.stackSize - oldInfo.stackSize);
1209
1210 // Zero-init the local and tempory section of new stack frame being careful to avoid
1211 // touching anything in the frame header.
1212 // This is necessary to ensure that any JIT temporaries in the old version can't be mistaken
1213 // for ObjRefs now.
1214 size_t frameHeaderSize = GetSizeOfFrameHeaderForEnC( &newInfo );
1215 _ASSERTE( frameHeaderSize <= oldInfo.stackSize );
1216 _ASSERTE( GetSizeOfFrameHeaderForEnC( &oldInfo ) == frameHeaderSize );
1217
1218#elif defined(_TARGET_AMD64_)
1219
1220 // Next few statements zero out all registers that may end up holding new variables.
1221
1222 // volatile int registers (JIT may use these to enregister variables)
1223 pCtx->Rax = pCtx->Rcx = pCtx->Rdx = pCtx->R8 = pCtx->R9 = pCtx->R10 = pCtx->R11 = 0;
1224
1225 // volatile float registers
1226 pCtx->Xmm1.High = pCtx->Xmm1.Low = 0;
1227 pCtx->Xmm2.High = pCtx->Xmm2.Low = 0;
1228 pCtx->Xmm3.High = pCtx->Xmm3.Low = 0;
1229 pCtx->Xmm4.High = pCtx->Xmm4.Low = 0;
1230 pCtx->Xmm5.High = pCtx->Xmm5.Low = 0;
1231
1232 // Any saved nonvolatile registers should also be zeroed out, but there are none
1233 // in EnC-compliant x64 code. Yes, you read that right. Registers like RDI, RSI,
1234 // RBX, etc., which are often saved in the prolog of non-EnC code are NOT saved in
1235 // EnC code. EnC code instead just agrees never to use those registers so they
1236 // remain pristine for the caller (except RBP, which is considered part of the frame
1237 // header, and is thus not zeroed out by us).
1238
1239 // 3) zero out the stack frame - this'll initialize _all_ variables
1240
1241 /*-------------------------------------------------------------------------
1242 * Adjust the stack height
1243 */
1244
1245 TADDR newStackBase = callerSP - newFixedStackSize;
1246
1247 SetSP(pCtx, newStackBase);
1248
1249 // We want to zero-out everything pushed after the frame header. This way we'll zero
1250 // out locals (both old & new) and temporaries. This is necessary to ensure that any
1251 // JIT temporaries in the old version can't be mistaken for ObjRefs now. (I am told
1252 // this last point is less of an issue on x64 as it is on x86, but zeroing out the
1253 // temporaries is still the cleanest, most robust way to go.)
1254 size_t frameHeaderSize = newSizeOfPreservedArea;
1255 _ASSERTE(frameHeaderSize <= oldFixedStackSize);
1256 _ASSERTE(frameHeaderSize <= newFixedStackSize);
1257
1258 // For EnC-compliant x64 code, Rbp == Rsp. Since Rsp changed above, update Rbp now
1259 pCtx->Rbp = newStackBase;
1260#else // !X86, !AMD64
1261 PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
1262#endif
1263
1264 // Perform some debug-only sanity checks on stack variables. Some checks are
1265 // performed differently between X86/AMD64.
1266
1267#ifdef _DEBUG
1268 for( unsigned i = 0; i < newNumVars; i++ )
1269 {
1270 // Make sure that stack variables existing in both old and new methods did not
1271 // move. This matters if the address of a local is used in the remapped method.
1272 // For example:
1273 //
1274 // static unsafe void Main(string[] args)
1275 // {
1276 // int x;
1277 // int* p = &x;
1278 // <- Edit made here - cannot move address of x
1279 // *p = 5;
1280 // }
1281 //
1282 if ((i + unsigned(-ICorDebugInfo::UNKNOWN_ILNUM) < oldNumVars) && // Does variable exist in old method?
1283 (oldMethodVarsSorted[i].loc.vlType == ICorDebugInfo::VLT_STK) && // Is the variable on the stack?
1284 (newMethodVarsSorted[i].loc.vlType == ICorDebugInfo::VLT_STK))
1285 {
1286 SIZE_T * pOldVarStackLocation = NativeVarStackAddr(oldMethodVarsSorted[i].loc, &oldCtx);
1287 SIZE_T * pNewVarStackLocation = NativeVarStackAddr(newMethodVarsSorted[i].loc, pCtx);
1288 _ASSERTE(pOldVarStackLocation == pNewVarStackLocation);
1289 }
1290
1291 // Sanity-check that the range we're clearing contains all of the stack variables
1292
1293#if defined(_TARGET_X86_)
1294 const ICorDebugInfo::VarLoc &varLoc = newMethodVarsSortedBase[i].loc;
1295 if( varLoc.vlType == ICorDebugInfo::VLT_STK )
1296 {
1297 // This is an EBP frame, all stack variables should be EBP relative
1298 _ASSERTE( varLoc.vlStk.vlsBaseReg == ICorDebugInfo::REGNUM_EBP );
1299 // Generic special args may show up as locals with positive offset from EBP, so skip them
1300 if( varLoc.vlStk.vlsOffset <= 0 )
1301 {
1302 // Normal locals must occur after the header on the stack
1303 _ASSERTE( unsigned(-varLoc.vlStk.vlsOffset) >= frameHeaderSize );
1304 // Value must occur before the top of the stack
1305 _ASSERTE( unsigned(-varLoc.vlStk.vlsOffset) < newInfo.stackSize );
1306 }
1307
1308 // Ideally we'd like to verify that the stack locals (if any) start at exactly the end
1309 // of the header. However, we can't easily determine the size of value classes here,
1310 // and so (since the stack grows towards 0) can't easily determine where the end of
1311 // the local lies.
1312 }
1313#elif defined (_TARGET_AMD64_)
1314 switch(newMethodVarsSortedBase[i].loc.vlType)
1315 {
1316 default:
1317 // No validation here for non-stack locals
1318 break;
1319
1320 case ICorDebugInfo::VLT_STK_BYREF:
1321 {
1322 // For byrefs, verify that the ptr will be zeroed out
1323
1324 SIZE_T regOffs = GetRegOffsInCONTEXT(newMethodVarsSortedBase[i].loc.vlStk.vlsBaseReg);
1325 TADDR baseReg = *(TADDR *)(regOffs + (BYTE*)pCtx);
1326 TADDR addrOfPtr = baseReg + newMethodVarsSortedBase[i].loc.vlStk.vlsOffset;
1327
1328 _ASSERTE(
1329 // The ref must exist in the portion we'll zero-out
1330 (
1331 (newStackBase <= addrOfPtr) &&
1332 (addrOfPtr < newStackBase + (newFixedStackSize - frameHeaderSize))
1333 ) ||
1334 // OR in the caller's frame (for parameters)
1335 (addrOfPtr >= newStackBase + newFixedStackSize));
1336
1337 // Deliberately fall through, so that we also verify that the value that the ptr
1338 // points to will be zeroed out
1339 // ...
1340 }
1341
1342 case ICorDebugInfo::VLT_STK:
1343 case ICorDebugInfo::VLT_STK2:
1344 case ICorDebugInfo::VLT_REG_STK:
1345 case ICorDebugInfo::VLT_STK_REG:
1346 SIZE_T * pVarStackLocation = NativeVarStackAddr(newMethodVarsSortedBase[i].loc, pCtx);
1347 _ASSERTE (pVarStackLocation != NULL);
1348 _ASSERTE(
1349 // The value must exist in the portion we'll zero-out
1350 (
1351 (newStackBase <= (TADDR) pVarStackLocation) &&
1352 ((TADDR) pVarStackLocation < newStackBase + (newFixedStackSize - frameHeaderSize))
1353 ) ||
1354 // OR in the caller's frame (for parameters)
1355 ((TADDR) pVarStackLocation >= newStackBase + newFixedStackSize));
1356 break;
1357 }
1358#else // !X86, !X64
1359 PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
1360#endif
1361 }
1362
1363#endif // _DEBUG
1364
1365 // Clear the local and temporary stack space
1366
1367#if defined (_TARGET_X86_)
1368 memset((void*)(size_t)(pCtx->Esp), 0, newInfo.stackSize - frameHeaderSize );
1369#elif defined (_TARGET_AMD64_)
1370 memset((void*)newStackBase, 0, newFixedStackSize - frameHeaderSize);
1371
1372 // On AMD64, after zeroing out the stack, restore the security object and PSPSym...
1373
1374 // There is no relationship we can guarantee between the old code having a security
1375 // object and the new code having a security object. If the new code does have a
1376 // security object, then we copy over the old security object's reference if there
1377 // was one (else we copy over NULL, which is fine). If the new code doesn't have a
1378 // security object, we do nothing.
1379 INT32 nNewSecurityObjectStackSlot = newGcDecoder.GetSecurityObjectStackSlot();
1380 if (nNewSecurityObjectStackSlot != NO_SECURITY_OBJECT)
1381 {
1382 *PTR_PTR_Object(callerSP + nNewSecurityObjectStackSlot) = OBJECTREFToObject(securityObject);
1383 }
1384
1385 // Restore PSPSym for the new function. Its value should be set to our new FP. But
1386 // first, we gotta find PSPSym's location on the stack
1387 INT32 nNewPspSymStackSlot = newGcDecoder.GetPSPSymStackSlot();
1388 if (nNewPspSymStackSlot != NO_PSP_SYM)
1389 {
1390 *PTR_TADDR(newStackBase + nNewPspSymStackSlot) = GetFP(pCtx);
1391 }
1392#else // !X86, !X64
1393 PORTABILITY_ASSERT("Edit-and-continue not enabled on this platform.");
1394#endif
1395
1396 // 4) Put the variables from step 3 into their new locations.
1397
1398 LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: set vars!\n"));
1399
1400 // Move the old variables into their new places.
1401
1402 hr = g_pDebugInterface->SetVariablesAtOffset(pNewCodeInfo->GetMethodDesc(),
1403 newNumVars,
1404 newMethodVarsSortedBase,
1405 newMethodOffset,
1406 pCtx, // place them into the new context
1407 rgVal1,
1408 rgVal2,
1409 rgVCs);
1410
1411 /*-----------------------------------------------------------------------*/
1412 }
1413ErrExit:
1414 if (oldMethodVarsSortedBase)
1415 delete[] oldMethodVarsSortedBase;
1416 if (newMethodVarsSortedBase)
1417 delete[] newMethodVarsSortedBase;
1418 if (rgVal1 != NULL)
1419 delete[] rgVal1;
1420 if (rgVal2 != NULL)
1421 delete[] rgVal2;
1422
1423 LOG((LF_ENC, LL_INFO100, "EECM::FixContextForEnC: exiting!\n"));
1424
1425 return hr;
1426}
1427#endif // !EnC_SUPPORTED
1428
1429#endif // #ifndef DACCESS_COMPILE
1430
1431#ifdef USE_GC_INFO_DECODER
1432/*****************************************************************************
1433 *
1434 * Is the function currently at a "GC safe point" ?
1435 */
1436bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo,
1437 DWORD dwRelOffset)
1438{
1439 CONTRACTL {
1440 NOTHROW;
1441 GC_NOTRIGGER;
1442 } CONTRACTL_END;
1443
1444 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
1445
1446 GcInfoDecoder gcInfoDecoder(
1447 gcInfoToken,
1448 DECODE_INTERRUPTIBILITY,
1449 dwRelOffset
1450 );
1451
1452 return gcInfoDecoder.IsInterruptible();
1453}
1454
1455#if defined(_TARGET_ARM_) || defined(_TARGET_ARM64_)
1456bool EECodeManager::HasTailCalls( EECodeInfo *pCodeInfo)
1457{
1458 CONTRACTL {
1459 NOTHROW;
1460 GC_NOTRIGGER;
1461 } CONTRACTL_END;
1462
1463 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
1464
1465 GcInfoDecoder gcInfoDecoder(
1466 gcInfoToken,
1467 DECODE_HAS_TAILCALLS,
1468 0
1469 );
1470
1471 return gcInfoDecoder.HasTailCalls();
1472}
1473#endif // _TARGET_ARM_ || _TARGET_ARM64_
1474
1475#if defined(_TARGET_AMD64_) && defined(_DEBUG)
1476
1477struct FindEndOfLastInterruptibleRegionState
1478{
1479 unsigned curOffset;
1480 unsigned endOffset;
1481 unsigned lastRangeOffset;
1482};
1483
1484bool FindEndOfLastInterruptibleRegionCB (
1485 UINT32 startOffset,
1486 UINT32 stopOffset,
1487 LPVOID hCallback)
1488{
1489 FindEndOfLastInterruptibleRegionState *pState = (FindEndOfLastInterruptibleRegionState*)hCallback;
1490
1491 //
1492 // If the current range doesn't overlap the given range, keep searching.
1493 //
1494 if ( startOffset >= pState->endOffset
1495 || stopOffset < pState->curOffset)
1496 {
1497 return false;
1498 }
1499
1500 //
1501 // If the range overlaps the end, then the last point is the end.
1502 //
1503 if ( stopOffset > pState->endOffset
1504 /*&& startOffset < pState->endOffset*/)
1505 {
1506 // The ranges should be sorted in increasing order.
1507 CONSISTENCY_CHECK(startOffset >= pState->lastRangeOffset);
1508
1509 pState->lastRangeOffset = pState->endOffset;
1510 return true;
1511 }
1512
1513 //
1514 // See if the end of this range is the closet to the end that we've found
1515 // so far.
1516 //
1517 if (stopOffset > pState->lastRangeOffset)
1518 pState->lastRangeOffset = stopOffset;
1519
1520 return false;
1521}
1522
1523/*
1524 Locates the end of the last interruptible region in the given code range.
1525 Returns 0 if the entire range is uninterruptible. Returns the end point
1526 if the entire range is interruptible.
1527*/
1528unsigned EECodeManager::FindEndOfLastInterruptibleRegion(unsigned curOffset,
1529 unsigned endOffset,
1530 GCInfoToken gcInfoToken)
1531{
1532#ifndef DACCESS_COMPILE
1533 GcInfoDecoder gcInfoDecoder(
1534 gcInfoToken,
1535 DECODE_FOR_RANGES_CALLBACK
1536 );
1537
1538 FindEndOfLastInterruptibleRegionState state;
1539 state.curOffset = curOffset;
1540 state.endOffset = endOffset;
1541 state.lastRangeOffset = 0;
1542
1543 gcInfoDecoder.EnumerateInterruptibleRanges(&FindEndOfLastInterruptibleRegionCB, &state);
1544
1545 return state.lastRangeOffset;
1546#else
1547 DacNotImpl();
1548 return NULL;
1549#endif // #ifndef DACCESS_COMPILE
1550}
1551
1552#endif // _TARGET_AMD64_ && _DEBUG
1553
1554
1555#else // !USE_GC_INFO_DECODER
1556
1557/*****************************************************************************
1558 *
1559 * Is the function currently at a "GC safe point" ?
1560 */
1561bool EECodeManager::IsGcSafe( EECodeInfo *pCodeInfo,
1562 DWORD dwRelOffset)
1563{
1564 CONTRACTL {
1565 NOTHROW;
1566 GC_NOTRIGGER;
1567 SUPPORTS_DAC;
1568 } CONTRACTL_END;
1569
1570 hdrInfo info;
1571 BYTE * table;
1572
1573 /* Extract the necessary information from the info block header */
1574
1575 table = (BYTE *)DecodeGCHdrInfo(pCodeInfo->GetGCInfoToken(),
1576 dwRelOffset,
1577 &info);
1578
1579 /* workaround: prevent interruption within prolog/epilog */
1580
1581 if (info.prologOffs != hdrInfo::NOT_IN_PROLOG || info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
1582 return false;
1583
1584#if VERIFY_GC_TABLES
1585 _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
1586#endif
1587
1588 return (info.interruptible);
1589}
1590
1591
1592/*****************************************************************************/
1593static
1594PTR_CBYTE skipToArgReg(const hdrInfo& info, PTR_CBYTE table)
1595{
1596 CONTRACTL {
1597 NOTHROW;
1598 GC_NOTRIGGER;
1599 SUPPORTS_DAC;
1600 } CONTRACTL_END;
1601
1602#ifdef _DEBUG
1603 PTR_CBYTE tableStart = table;
1604#else
1605 if (info.argTabOffset != INVALID_ARGTAB_OFFSET)
1606 {
1607 return table + info.argTabOffset;
1608 }
1609#endif
1610
1611 unsigned count;
1612
1613#if VERIFY_GC_TABLES
1614 _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
1615#endif
1616
1617 /* Skip over the untracked frame variable table */
1618
1619 count = info.untrackedCnt;
1620 while (count-- > 0) {
1621 fastSkipSigned(table);
1622 }
1623
1624#if VERIFY_GC_TABLES
1625 _ASSERTE(*castto(table, unsigned short *)++ == 0xCAFE);
1626#endif
1627
1628 /* Skip over the frame variable lifetime table */
1629
1630 count = info.varPtrTableSize;
1631 while (count-- > 0) {
1632 fastSkipUnsigned(table); fastSkipUnsigned(table); fastSkipUnsigned(table);
1633 }
1634
1635#if VERIFY_GC_TABLES
1636 _ASSERTE(*castto(table, unsigned short *) == 0xBABE);
1637#endif
1638
1639#ifdef _DEBUG
1640 if (info.argTabOffset != INVALID_ARGTAB_OFFSET)
1641 {
1642 CONSISTENCY_CHECK_MSGF((info.argTabOffset == (unsigned) (table - tableStart)),
1643 ("table = %p, tableStart = %p, info.argTabOffset = %d", table, tableStart, info.argTabOffset));
1644 }
1645#endif
1646
1647 return table;
1648}
1649
1650/*****************************************************************************/
1651
1652#define regNumToMask(regNum) RegMask(1<<regNum)
1653
1654/*****************************************************************************
1655 Helper for scanArgRegTable() and scanArgRegTableI() for regMasks
1656 */
1657
1658void * getCalleeSavedReg(PREGDISPLAY pContext, regNum reg)
1659{
1660 LIMITED_METHOD_CONTRACT;
1661 SUPPORTS_DAC;
1662
1663 switch (reg)
1664 {
1665 case REGI_EBP: return pContext->GetEbpLocation();
1666 case REGI_EBX: return pContext->GetEbxLocation();
1667 case REGI_ESI: return pContext->GetEsiLocation();
1668 case REGI_EDI: return pContext->GetEdiLocation();
1669
1670 default: _ASSERTE(!"bad info.thisPtrResult"); return NULL;
1671 }
1672}
1673
1674/*****************************************************************************
1675 These functions converts the bits in the GC encoding to RegMask
1676 */
1677
1678inline
1679RegMask convertCalleeSavedRegsMask(unsigned inMask) // EBP,EBX,ESI,EDI
1680{
1681 LIMITED_METHOD_CONTRACT;
1682 SUPPORTS_DAC;
1683
1684 _ASSERTE((inMask & 0x0F) == inMask);
1685
1686 unsigned outMask = RM_NONE;
1687 if (inMask & 0x1) outMask |= RM_EDI;
1688 if (inMask & 0x2) outMask |= RM_ESI;
1689 if (inMask & 0x4) outMask |= RM_EBX;
1690 if (inMask & 0x8) outMask |= RM_EBP;
1691
1692 return (RegMask) outMask;
1693}
1694
1695inline
1696RegMask convertAllRegsMask(unsigned inMask) // EAX,ECX,EDX,EBX, EBP,ESI,EDI
1697{
1698 LIMITED_METHOD_CONTRACT;
1699 SUPPORTS_DAC;
1700
1701 _ASSERTE((inMask & 0xEF) == inMask);
1702
1703 unsigned outMask = RM_NONE;
1704 if (inMask & 0x01) outMask |= RM_EAX;
1705 if (inMask & 0x02) outMask |= RM_ECX;
1706 if (inMask & 0x04) outMask |= RM_EDX;
1707 if (inMask & 0x08) outMask |= RM_EBX;
1708 if (inMask & 0x20) outMask |= RM_EBP;
1709 if (inMask & 0x40) outMask |= RM_ESI;
1710 if (inMask & 0x80) outMask |= RM_EDI;
1711
1712 return (RegMask)outMask;
1713}
1714
1715/*****************************************************************************
1716 * scan the register argument table for the not fully interruptible case.
1717 this function is called to find all live objects (pushed arguments)
1718 and to get the stack base for EBP-less methods.
1719
1720 NOTE: If info->argTabResult is NULL, info->argHnumResult indicates
1721 how many bits in argMask are valid
1722 If info->argTabResult is non-NULL, then the argMask field does
1723 not fit in 32-bits and the value in argMask meaningless.
1724 Instead argHnum specifies the number of (variable-length) elements
1725 in the array, and argTabBytes specifies the total byte size of the
1726 array. [ Note this is an extremely rare case ]
1727 */
1728
1729#ifdef _PREFAST_
1730#pragma warning(push)
1731#pragma warning(disable:21000) // Suppress PREFast warning about overly large function
1732#endif
1733static
1734unsigned scanArgRegTable(PTR_CBYTE table,
1735 unsigned curOffs,
1736 hdrInfo * info)
1737{
1738 CONTRACTL {
1739 NOTHROW;
1740 GC_NOTRIGGER;
1741 SUPPORTS_DAC;
1742 } CONTRACTL_END;
1743
1744 regNum thisPtrReg = REGI_NA;
1745#ifdef _DEBUG
1746 bool isCall = false;
1747#endif
1748 unsigned regMask = 0; // EBP,EBX,ESI,EDI
1749 unsigned argMask = 0;
1750 unsigned argHnum = 0;
1751 PTR_CBYTE argTab = 0;
1752 unsigned argTabBytes = 0;
1753 unsigned stackDepth = 0;
1754
1755 unsigned iregMask = 0; // EBP,EBX,ESI,EDI
1756 unsigned iargMask = 0;
1757 unsigned iptrMask = 0;
1758
1759#if VERIFY_GC_TABLES
1760 _ASSERTE(*castto(table, unsigned short *)++ == 0xBABE);
1761#endif
1762
1763 unsigned scanOffs = 0;
1764
1765 _ASSERTE(scanOffs <= info->methodSize);
1766
1767 if (info->ebpFrame) {
1768 /*
1769 Encoding table for methods with an EBP frame and
1770 that are not fully interruptible
1771
1772 The encoding used is as follows:
1773
1774 this pointer encodings:
1775
1776 01000000 this pointer in EBX
1777 00100000 this pointer in ESI
1778 00010000 this pointer in EDI
1779
1780 tiny encoding:
1781
1782 0bsdDDDD
1783 requires code delta < 16 (4-bits)
1784 requires pushed argmask == 0
1785
1786 where DDDD is code delta
1787 b indicates that register EBX is a live pointer
1788 s indicates that register ESI is a live pointer
1789 d indicates that register EDI is a live pointer
1790
1791 small encoding:
1792
1793 1DDDDDDD bsdAAAAA
1794
1795 requires code delta < 120 (7-bits)
1796 requires pushed argmask < 64 (5-bits)
1797
1798 where DDDDDDD is code delta
1799 AAAAA is the pushed args mask
1800 b indicates that register EBX is a live pointer
1801 s indicates that register ESI is a live pointer
1802 d indicates that register EDI is a live pointer
1803
1804 medium encoding
1805
1806 0xFD aaaaaaaa AAAAdddd bseDDDDD
1807
1808 requires code delta < 0x1000000000 (9-bits)
1809 requires pushed argmask < 0x1000000000000 (12-bits)
1810
1811 where DDDDD is the upper 5-bits of the code delta
1812 dddd is the low 4-bits of the code delta
1813 AAAA is the upper 4-bits of the pushed arg mask
1814 aaaaaaaa is the low 8-bits of the pushed arg mask
1815 b indicates that register EBX is a live pointer
1816 s indicates that register ESI is a live pointer
1817 e indicates that register EDI is a live pointer
1818
1819 medium encoding with interior pointers
1820
1821 0xF9 DDDDDDDD bsdAAAAAA iiiIIIII
1822
1823 requires code delta < (8-bits)
1824 requires pushed argmask < (5-bits)
1825
1826 where DDDDDDD is the code delta
1827 b indicates that register EBX is a live pointer
1828 s indicates that register ESI is a live pointer
1829 d indicates that register EDI is a live pointer
1830 AAAAA is the pushed arg mask
1831 iii indicates that EBX,EDI,ESI are interior pointers
1832 IIIII indicates that bits is the arg mask are interior
1833 pointers
1834
1835 large encoding
1836
1837 0xFE [0BSD0bsd][32-bit code delta][32-bit argMask]
1838
1839 b indicates that register EBX is a live pointer
1840 s indicates that register ESI is a live pointer
1841 d indicates that register EDI is a live pointer
1842 B indicates that register EBX is an interior pointer
1843 S indicates that register ESI is an interior pointer
1844 D indicates that register EDI is an interior pointer
1845 requires pushed argmask < 32-bits
1846
1847 large encoding with interior pointers
1848
1849 0xFA [0BSD0bsd][32-bit code delta][32-bit argMask][32-bit interior pointer mask]
1850
1851
1852 b indicates that register EBX is a live pointer
1853 s indicates that register ESI is a live pointer
1854 d indicates that register EDI is a live pointer
1855 B indicates that register EBX is an interior pointer
1856 S indicates that register ESI is an interior pointer
1857 D indicates that register EDI is an interior pointer
1858 requires pushed argmask < 32-bits
1859 requires pushed iArgmask < 32-bits
1860
1861 huge encoding This is the only encoding that supports
1862 a pushed argmask which is greater than
1863 32-bits.
1864
1865 0xFB [0BSD0bsd][32-bit code delta]
1866 [32-bit table count][32-bit table size]
1867 [pushed ptr offsets table...]
1868
1869 b indicates that register EBX is a live pointer
1870 s indicates that register ESI is a live pointer
1871 d indicates that register EDI is a live pointer
1872 B indicates that register EBX is an interior pointer
1873 S indicates that register ESI is an interior pointer
1874 D indicates that register EDI is an interior pointer
1875 the list count is the number of entries in the list
1876 the list size gives the byte-lenght of the list
1877 the offsets in the list are variable-length
1878 */
1879 while (scanOffs < curOffs)
1880 {
1881 iregMask = 0;
1882 iargMask = 0;
1883 argTab = NULL;
1884#ifdef _DEBUG
1885 isCall = true;
1886#endif
1887
1888 /* Get the next byte and check for a 'special' entry */
1889
1890 unsigned encType = *table++;
1891#if defined(DACCESS_COMPILE)
1892 // In this scenario, it is invalid to have a zero byte in the GC info encoding (refer to the
1893 // comments above). At least one bit has to be set. For example, a byte can represent which
1894 // register is the "this" pointer, and this byte has to be 0x10, 0x20, or 0x40. Having a zero
1895 // byte indicates there is most likely some sort of DAC error, and it may lead to problems such as
1896 // infinite loops. So we bail out early instead.
1897 if (encType == 0)
1898 {
1899 DacError(CORDBG_E_TARGET_INCONSISTENT);
1900 UNREACHABLE();
1901 }
1902#endif // DACCESS_COMPILE
1903
1904 switch (encType)
1905 {
1906 unsigned val, nxt;
1907
1908 default:
1909
1910 /* A tiny or small call entry */
1911 val = encType;
1912 if ((val & 0x80) == 0x00) {
1913 if (val & 0x0F) {
1914 /* A tiny call entry */
1915 scanOffs += (val & 0x0F);
1916 regMask = (val & 0x70) >> 4;
1917 argMask = 0;
1918 argHnum = 0;
1919 }
1920 else {
1921 /* This pointer liveness encoding */
1922 regMask = (val & 0x70) >> 4;
1923 if (regMask == 0x1)
1924 thisPtrReg = REGI_EDI;
1925 else if (regMask == 0x2)
1926 thisPtrReg = REGI_ESI;
1927 else if (regMask == 0x4)
1928 thisPtrReg = REGI_EBX;
1929 else
1930 _ASSERTE(!"illegal encoding for 'this' pointer liveness");
1931 }
1932 }
1933 else {
1934 /* A small call entry */
1935 scanOffs += (val & 0x7F);
1936 val = *table++;
1937 regMask = val >> 5;
1938 argMask = val & 0x1F;
1939 argHnum = 5;
1940 }
1941 break;
1942
1943 case 0xFD: // medium encoding
1944
1945 argMask = *table++;
1946 val = *table++;
1947 argMask |= ((val & 0xF0) << 4);
1948 argHnum = 12;
1949 nxt = *table++;
1950 scanOffs += (val & 0x0F) + ((nxt & 0x1F) << 4);
1951 regMask = nxt >> 5; // EBX,ESI,EDI
1952
1953 break;
1954
1955 case 0xF9: // medium encoding with interior pointers
1956
1957 scanOffs += *table++;
1958 val = *table++;
1959 argMask = val & 0x1F;
1960 argHnum = 5;
1961 regMask = val >> 5;
1962 val = *table++;
1963 iargMask = val & 0x1F;
1964 iregMask = val >> 5;
1965
1966 break;
1967
1968 case 0xFE: // large encoding
1969 case 0xFA: // large encoding with interior pointers
1970
1971 val = *table++;
1972 regMask = val & 0x7;
1973 iregMask = val >> 4;
1974 scanOffs += *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
1975 argMask = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
1976 argHnum = 31;
1977 if (encType == 0xFA) // read iargMask
1978 {
1979 iargMask = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
1980 }
1981 break;
1982
1983 case 0xFB: // huge encoding This is the only partially interruptible
1984 // encoding that supports a pushed ArgMask
1985 // which is greater than 32-bits.
1986 // The ArgMask is encoded using the argTab
1987 val = *table++;
1988 regMask = val & 0x7;
1989 iregMask = val >> 4;
1990 scanOffs += *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
1991 argHnum = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
1992 argTabBytes = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
1993 argTab = table; table += argTabBytes;
1994
1995 argMask = 0;
1996 break;
1997
1998 case 0xFF:
1999 scanOffs = curOffs + 1;
2000 break;
2001
2002 } // end case
2003
2004 // iregMask & iargMask are subsets of regMask & argMask respectively
2005
2006 _ASSERTE((iregMask & regMask) == iregMask);
2007 _ASSERTE((iargMask & argMask) == iargMask);
2008
2009 } // end while
2010
2011 }
2012 else {
2013
2014/*
2015 * Encoding table for methods with an ESP frame and are not fully interruptible
2016 * This encoding does not support a pushed ArgMask greater than 32
2017 *
2018 * The encoding used is as follows:
2019 *
2020 * push 000DDDDD ESP push one item with 5-bit delta
2021 * push 00100000 [pushCount] ESP push multiple items
2022 * reserved 0011xxxx
2023 * skip 01000000 [Delta] Skip Delta, arbitrary sized delta
2024 * skip 0100DDDD Skip small Delta, for call (DDDD != 0)
2025 * pop 01CCDDDD ESP pop CC items with 4-bit delta (CC != 00)
2026 * call 1PPPPPPP Call Pattern, P=[0..79]
2027 * call 1101pbsd DDCCCMMM Call RegMask=pbsd,ArgCnt=CCC,
2028 * ArgMask=MMM Delta=commonDelta[DD]
2029 * call 1110pbsd [ArgCnt] [ArgMask] Call ArgCnt,RegMask=pbsd,[32-bit ArgMask]
2030 * call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
2031 * [32-bit PndCnt][32-bit PndSize][PndOffs...]
2032 * iptr 11110000 [IPtrMask] Arbitrary 32-bit Interior Pointer Mask
2033 * thisptr 111101RR This pointer is in Register RR
2034 * 00=EDI,01=ESI,10=EBX,11=EBP
2035 * reserved 111100xx xx != 00
2036 * reserved 111110xx xx != 00
2037 * reserved 11111xxx xxx != 000 && xxx != 111(EOT)
2038 *
2039 * The value 11111111 [0xFF] indicates the end of the table.
2040 *
2041 * An offset (at which stack-walking is performed) without an explicit encoding
2042 * is assumed to be a trivial call-site (no GC registers, stack empty before and
2043 * after) to avoid having to encode all trivial calls.
2044 *
2045 * Note on the encoding used for interior pointers
2046 *
2047 * The iptr encoding must immediately preceed a call encoding. It is used to
2048 * transform a normal GC pointer addresses into an interior pointers for GC purposes.
2049 * The mask supplied to the iptr encoding is read from the least signicant bit
2050 * to the most signicant bit. (i.e the lowest bit is read first)
2051 *
2052 * p indicates that register EBP is a live pointer
2053 * b indicates that register EBX is a live pointer
2054 * s indicates that register ESI is a live pointer
2055 * d indicates that register EDI is a live pointer
2056 * P indicates that register EBP is an interior pointer
2057 * B indicates that register EBX is an interior pointer
2058 * S indicates that register ESI is an interior pointer
2059 * D indicates that register EDI is an interior pointer
2060 *
2061 * As an example the following sequence indicates that EDI.ESI and the 2nd pushed pointer
2062 * in ArgMask are really interior pointers. The pointer in ESI in a normal pointer:
2063 *
2064 * iptr 11110000 00010011 => read Interior Ptr, Interior Ptr, Normal Ptr, Normal Ptr, Interior Ptr
2065 * call 11010011 DDCCC011 RRRR=1011 => read EDI is a GC-pointer, ESI is a GC-pointer. EBP is a GC-pointer
2066 * MMM=0011 => read two GC-pointers arguments on the stack (nested call)
2067 *
2068 * Since the call instruction mentions 5 GC-pointers we list them in the required order:
2069 * EDI, ESI, EBP, 1st-pushed pointer, 2nd-pushed pointer
2070 *
2071 * And we apply the Interior Pointer mask mmmm=10011 to the above five ordered GC-pointers
2072 * we learn that EDI and ESI are interior GC-pointers and that the second push arg is an
2073 * interior GC-pointer.
2074 */
2075
2076#if defined(DACCESS_COMPILE)
2077 DWORD cbZeroBytes = 0;
2078#endif // DACCESS_COMPILE
2079
2080 while (scanOffs <= curOffs)
2081 {
2082 unsigned callArgCnt;
2083 unsigned skip;
2084 unsigned newRegMask, inewRegMask;
2085 unsigned newArgMask, inewArgMask;
2086 unsigned oldScanOffs = scanOffs;
2087
2088 if (iptrMask)
2089 {
2090 // We found this iptrMask in the previous iteration.
2091 // This iteration must be for a call. Set these variables
2092 // so that they are available at the end of the loop
2093
2094 inewRegMask = iptrMask & 0x0F; // EBP,EBX,ESI,EDI
2095 inewArgMask = iptrMask >> 4;
2096
2097 iptrMask = 0;
2098 }
2099 else
2100 {
2101 // Zero out any stale values.
2102
2103 inewRegMask = 0;
2104 inewArgMask = 0;
2105 }
2106
2107 /* Get the next byte and decode it */
2108
2109 unsigned val = *table++;
2110#if defined(DACCESS_COMPILE)
2111 // In this scenario, a 0 means that there is a push at the current offset. For a struct with
2112 // two double fields, the JIT may use two movq instructions to push the struct onto the stack, and
2113 // the JIT will encode 4 pushes at the same code offset. This means that we can have up to 4
2114 // consecutive bytes of 0 without changing the code offset. Having more than 4 consecutive bytes
2115 // of zero indicates that there is most likely some sort of DAC error, and it may lead to problems
2116 // such as infinite loops. So we bail out early instead.
2117 if (val == 0)
2118 {
2119 cbZeroBytes += 1;
2120 if (cbZeroBytes > 4)
2121 {
2122 DacError(CORDBG_E_TARGET_INCONSISTENT);
2123 UNREACHABLE();
2124 }
2125 }
2126 else
2127 {
2128 cbZeroBytes = 0;
2129 }
2130#endif // DACCESS_COMPILE
2131
2132#ifdef _DEBUG
2133 if (scanOffs != curOffs)
2134 isCall = false;
2135#endif
2136
2137 /* Check pushes, pops, and skips */
2138
2139 if (!(val & 0x80)) {
2140
2141 // iptrMask can immediately precede only calls
2142
2143 _ASSERTE(inewRegMask == 0);
2144 _ASSERTE(inewArgMask == 0);
2145
2146 if (!(val & 0x40)) {
2147
2148 unsigned pushCount;
2149
2150 if (!(val & 0x20))
2151 {
2152 //
2153 // push 000DDDDD ESP push one item, 5-bit delta
2154 //
2155 pushCount = 1;
2156 scanOffs += val & 0x1f;
2157 }
2158 else
2159 {
2160 //
2161 // push 00100000 [pushCount] ESP push multiple items
2162 //
2163 _ASSERTE(val == 0x20);
2164 pushCount = fastDecodeUnsigned(table);
2165 }
2166
2167 if (scanOffs > curOffs)
2168 {
2169 scanOffs = oldScanOffs;
2170 goto FINISHED;
2171 }
2172
2173 stackDepth += pushCount;
2174 }
2175 else if ((val & 0x3f) != 0) {
2176 //
2177 // pop 01CCDDDD pop CC items, 4-bit delta
2178 //
2179 scanOffs += val & 0x0f;
2180 if (scanOffs > curOffs)
2181 {
2182 scanOffs = oldScanOffs;
2183 goto FINISHED;
2184 }
2185 stackDepth -= (val & 0x30) >> 4;
2186
2187 } else if (scanOffs < curOffs) {
2188 //
2189 // skip 01000000 [Delta] Skip arbitrary sized delta
2190 //
2191 skip = fastDecodeUnsigned(table);
2192 scanOffs += skip;
2193 }
2194 else // don't process a skip if we are already at curOffs
2195 goto FINISHED;
2196
2197 /* reset regs and args state since we advance past last call site */
2198
2199 regMask = 0;
2200 iregMask = 0;
2201 argMask = 0;
2202 iargMask = 0;
2203 argHnum = 0;
2204
2205 }
2206 else /* It must be a call, thisptr, or iptr */
2207 {
2208 switch ((val & 0x70) >> 4) {
2209 default: // case 0-4, 1000xxxx through 1100xxxx
2210 //
2211 // call 1PPPPPPP Call Pattern, P=[0..79]
2212 //
2213 decodeCallPattern((val & 0x7f), &callArgCnt,
2214 &newRegMask, &newArgMask, &skip);
2215 // If we've already reached curOffs and the skip amount
2216 // is non-zero then we are done
2217 if ((scanOffs == curOffs) && (skip > 0))
2218 goto FINISHED;
2219 // otherwise process this call pattern
2220 scanOffs += skip;
2221 if (scanOffs > curOffs)
2222 goto FINISHED;
2223#ifdef _DEBUG
2224 isCall = true;
2225#endif
2226 regMask = newRegMask;
2227 argMask = newArgMask; argTab = NULL;
2228 iregMask = inewRegMask;
2229 iargMask = inewArgMask;
2230 stackDepth -= callArgCnt;
2231 argHnum = 2; // argMask is known to be <= 3
2232 break;
2233
2234 case 5:
2235 //
2236 // call 1101RRRR DDCCCMMM Call RegMask=RRRR,ArgCnt=CCC,
2237 // ArgMask=MMM Delta=commonDelta[DD]
2238 //
2239 newRegMask = val & 0xf; // EBP,EBX,ESI,EDI
2240 val = *table++; // read next byte
2241 skip = callCommonDelta[val>>6];
2242 // If we've already reached curOffs and the skip amount
2243 // is non-zero then we are done
2244 if ((scanOffs == curOffs) && (skip > 0))
2245 goto FINISHED;
2246 // otherwise process this call encoding
2247 scanOffs += skip;
2248 if (scanOffs > curOffs)
2249 goto FINISHED;
2250#ifdef _DEBUG
2251 isCall = true;
2252#endif
2253 regMask = newRegMask;
2254 iregMask = inewRegMask;
2255 callArgCnt = (val >> 3) & 0x7;
2256 stackDepth -= callArgCnt;
2257 argMask = (val & 0x7); argTab = NULL;
2258 iargMask = inewArgMask;
2259 argHnum = 3;
2260 break;
2261
2262 case 6:
2263 //
2264 // call 1110RRRR [ArgCnt] [ArgMask]
2265 // Call ArgCnt,RegMask=RRR,ArgMask
2266 //
2267#ifdef _DEBUG
2268 isCall = true;
2269#endif
2270 regMask = val & 0xf; // EBP,EBX,ESI,EDI
2271 iregMask = inewRegMask;
2272 callArgCnt = fastDecodeUnsigned(table);
2273 stackDepth -= callArgCnt;
2274 argMask = fastDecodeUnsigned(table); argTab = NULL;
2275 iargMask = inewArgMask;
2276 argHnum = sizeof(argMask) * 8; // The size of argMask in bits
2277 break;
2278
2279 case 7:
2280 switch (val & 0x0C)
2281 {
2282 case 0x00:
2283 //
2284 // 0xF0 iptr 11110000 [IPtrMask] Arbitrary Interior Pointer Mask
2285 //
2286 iptrMask = fastDecodeUnsigned(table);
2287 break;
2288
2289 case 0x04:
2290 //
2291 // 0xF4 thisptr 111101RR This pointer is in Register RR
2292 // 00=EDI,01=ESI,10=EBX,11=EBP
2293 //
2294 {
2295 static const regNum calleeSavedRegs[] =
2296 { REGI_EDI, REGI_ESI, REGI_EBX, REGI_EBP };
2297 thisPtrReg = calleeSavedRegs[val&0x3];
2298 }
2299 break;
2300
2301 case 0x08:
2302 //
2303 // 0xF8 call 11111000 [PBSDpbsd][32-bit delta][32-bit ArgCnt]
2304 // [32-bit PndCnt][32-bit PndSize][PndOffs...]
2305 //
2306 val = *table++;
2307 skip = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
2308// [VSUQFE 4670]
2309 // If we've already reached curOffs and the skip amount
2310 // is non-zero then we are done
2311 if ((scanOffs == curOffs) && (skip > 0))
2312 goto FINISHED;
2313// [VSUQFE 4670]
2314 scanOffs += skip;
2315 if (scanOffs > curOffs)
2316 goto FINISHED;
2317#ifdef _DEBUG
2318 isCall = true;
2319#endif
2320 regMask = val & 0xF;
2321 iregMask = val >> 4;
2322 callArgCnt = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
2323 stackDepth -= callArgCnt;
2324 argHnum = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
2325 argTabBytes = *dac_cast<PTR_DWORD>(table); table += sizeof(DWORD);
2326 argTab = table;
2327 table += argTabBytes;
2328 break;
2329
2330 case 0x0C:
2331 //
2332 // 0xFF end 11111111 End of table marker
2333 //
2334 _ASSERTE(val==0xff);
2335 goto FINISHED;
2336
2337 default:
2338 _ASSERTE(!"reserved GC encoding");
2339 break;
2340 }
2341 break;
2342
2343 } // end switch
2344
2345 } // end else (!(val & 0x80))
2346
2347 // iregMask & iargMask are subsets of regMask & argMask respectively
2348
2349 _ASSERTE((iregMask & regMask) == iregMask);
2350 _ASSERTE((iargMask & argMask) == iargMask);
2351
2352 } // end while
2353
2354 } // end else ebp-less frame
2355
2356FINISHED:
2357
2358 // iregMask & iargMask are subsets of regMask & argMask respectively
2359
2360 _ASSERTE((iregMask & regMask) == iregMask);
2361 _ASSERTE((iargMask & argMask) == iargMask);
2362
2363 if (scanOffs != curOffs)
2364 {
2365 /* must have been a boring call */
2366 info->regMaskResult = RM_NONE;
2367 info->argMaskResult = ptrArgTP(0);
2368 info->iregMaskResult = RM_NONE;
2369 info->iargMaskResult = ptrArgTP(0);
2370 info->argHnumResult = 0;
2371 info->argTabResult = NULL;
2372 info->argTabBytes = 0;
2373 }
2374 else
2375 {
2376 info->regMaskResult = convertCalleeSavedRegsMask(regMask);
2377 info->argMaskResult = ptrArgTP(argMask);
2378 info->argHnumResult = argHnum;
2379 info->iregMaskResult = convertCalleeSavedRegsMask(iregMask);
2380 info->iargMaskResult = ptrArgTP(iargMask);
2381 info->argTabResult = argTab;
2382 info->argTabBytes = argTabBytes;
2383 }
2384
2385#ifdef _DEBUG
2386 if (scanOffs != curOffs) {
2387 isCall = false;
2388 }
2389 _ASSERTE(thisPtrReg == REGI_NA || (!isCall || (regNumToMask(thisPtrReg) & info->regMaskResult)));
2390#endif
2391 info->thisPtrResult = thisPtrReg;
2392
2393 _ASSERTE(int(stackDepth) < INT_MAX); // check that it did not underflow
2394 return (stackDepth * sizeof(unsigned));
2395}
2396#ifdef _PREFAST_
2397#pragma warning(pop)
2398#endif
2399
2400
2401/*****************************************************************************
2402 * scan the register argument table for the fully interruptible case.
2403 this function is called to find all live objects (pushed arguments)
2404 and to get the stack base for fully interruptible methods.
2405 Returns size of things pushed on the stack for ESP frames
2406
2407 Arguments:
2408 table - The pointer table
2409 curOffsRegs - The current code offset that should be used for reporting registers
2410 curOffsArgs - The current code offset that should be used for reporting args
2411 info - Incoming arg used to determine if there's a frame, and to save results
2412 */
2413
2414static
2415unsigned scanArgRegTableI(PTR_CBYTE table,
2416 unsigned curOffsRegs,
2417 unsigned curOffsArgs,
2418 hdrInfo * info)
2419{
2420 CONTRACTL {
2421 NOTHROW;
2422 GC_NOTRIGGER;
2423 SUPPORTS_DAC;
2424 } CONTRACTL_END;
2425
2426 regNum thisPtrReg = REGI_NA;
2427 unsigned ptrRegs = 0; // The mask of registers that contain pointers
2428 unsigned iptrRegs = 0; // The subset of ptrRegs that are interior pointers
2429 unsigned ptrOffs = 0; // The code offset of the table entry we are currently looking at
2430 unsigned argCnt = 0; // The number of args that have been pushed
2431
2432 ptrArgTP ptrArgs(0); // The mask of stack values that contain pointers.
2433 ptrArgTP iptrArgs(0); // The subset of ptrArgs that are interior pointers.
2434 ptrArgTP argHigh(0); // The current mask position that corresponds to the top of the stack.
2435
2436 bool isThis = false;
2437 bool iptr = false;
2438
2439 // The comment before the call to scanArgRegTableI in EnumGCRefs
2440 // describes why curOffsRegs can be smaller than curOffsArgs.
2441 _ASSERTE(curOffsRegs <= curOffsArgs);
2442
2443#if VERIFY_GC_TABLES
2444 _ASSERTE(*castto(table, unsigned short *)++ == 0xBABE);
2445#endif
2446
2447 bool hasPartialArgInfo;
2448
2449#ifndef UNIX_X86_ABI
2450 hasPartialArgInfo = info->ebpFrame;
2451#else
2452 // For x86/Linux, interruptible code always has full arg info
2453 //
2454 // This should be aligned with emitFullArgInfo setting at
2455 // emitter::emitEndCodeGen (in JIT)
2456 hasPartialArgInfo = false;
2457#endif
2458
2459 /*
2460 Encoding table for methods that are fully interruptible
2461
2462 The encoding used is as follows:
2463
2464 ptr reg dead 00RRRDDD [RRR != 100]
2465 ptr reg live 01RRRDDD [RRR != 100]
2466
2467 non-ptr arg push 10110DDD [SSS == 110]
2468 ptr arg push 10SSSDDD [SSS != 110] && [SSS != 111]
2469 ptr arg pop 11CCCDDD [CCC != 000] && [CCC != 110] && [CCC != 111]
2470 little delta skip 11000DDD [CCC == 000]
2471 bigger delta skip 11110BBB [CCC == 110]
2472
2473 The values used in the encodings are as follows:
2474
2475 DDD code offset delta from previous entry (0-7)
2476 BBB bigger delta 000=8,001=16,010=24,...,111=64
2477 RRR register number (EAX=000,ECX=001,EDX=010,EBX=011,
2478 EBP=101,ESI=110,EDI=111), ESP=100 is reserved
2479 SSS argument offset from base of stack. This is
2480 redundant for frameless methods as we can
2481 infer it from the previous pushes+pops. However,
2482 for EBP-methods, we only report GC pushes, and
2483 so we need SSS
2484 CCC argument count being popped (includes only ptrs for EBP methods)
2485
2486 The following are the 'large' versions:
2487
2488 large delta skip 10111000 [0xB8] , encodeUnsigned(delta)
2489
2490 large ptr arg push 11111000 [0xF8] , encodeUnsigned(pushCount)
2491 large non-ptr arg push 11111001 [0xF9] , encodeUnsigned(pushCount)
2492 large ptr arg pop 11111100 [0xFC] , encodeUnsigned(popCount)
2493 large arg dead 11111101 [0xFD] , encodeUnsigned(popCount) for caller-pop args.
2494 Any GC args go dead after the call,
2495 but are still sitting on the stack
2496
2497 this pointer prefix 10111100 [0xBC] the next encoding is a ptr live
2498 or a ptr arg push
2499 and contains the this pointer
2500
2501 interior or by-ref 10111111 [0xBF] the next encoding is a ptr live
2502 pointer prefix or a ptr arg push
2503 and contains an interior
2504 or by-ref pointer
2505
2506
2507 The value 11111111 [0xFF] indicates the end of the table.
2508 */
2509
2510#if defined(DACCESS_COMPILE)
2511 bool fLastByteIsZero = false;
2512#endif // DACCESS_COMPILE
2513
2514 /* Have we reached the instruction we're looking for? */
2515
2516 while (ptrOffs <= curOffsArgs)
2517 {
2518 unsigned val;
2519
2520 int isPop;
2521 unsigned argOfs;
2522
2523 unsigned regMask;
2524
2525 // iptrRegs & iptrArgs are subsets of ptrRegs & ptrArgs respectively
2526
2527 _ASSERTE((iptrRegs & ptrRegs) == iptrRegs);
2528 _ASSERTE((iptrArgs & ptrArgs) == iptrArgs);
2529
2530 /* Now find the next 'life' transition */
2531
2532 val = *table++;
2533#if defined(DACCESS_COMPILE)
2534 // In this scenario, a zero byte means that EAX is going dead at the current offset. Since EAX
2535 // can't go dead more than once at any given offset, it's invalid to have two consecutive bytes
2536 // of zero. If this were to happen, then it means that there is most likely some sort of DAC
2537 // error, and it may lead to problems such as infinite loops. So we bail out early instead.
2538 if ((val == 0) && fLastByteIsZero)
2539 {
2540 DacError(CORDBG_E_TARGET_INCONSISTENT);
2541 UNREACHABLE();
2542 }
2543 fLastByteIsZero = (val == 0);
2544#endif // DACCESS_COMPILE
2545
2546 if (!(val & 0x80))
2547 {
2548 /* A small 'regPtr' encoding */
2549
2550 regNum reg;
2551
2552 ptrOffs += (val ) & 0x7;
2553 if (ptrOffs > curOffsArgs) {
2554 iptr = isThis = false;
2555 goto REPORT_REFS;
2556 }
2557 else if (ptrOffs > curOffsRegs) {
2558 iptr = isThis = false;
2559 continue;
2560 }
2561
2562 reg = (regNum)((val >> 3) & 0x7);
2563 regMask = 1 << reg; // EAX,ECX,EDX,EBX,---,EBP,ESI,EDI
2564
2565#if 0
2566 printf("regMask = %04X -> %04X\n", ptrRegs,
2567 (val & 0x40) ? (ptrRegs | regMask)
2568 : (ptrRegs & ~regMask));
2569#endif
2570
2571 /* The register is becoming live/dead here */
2572
2573 if (val & 0x40)
2574 {
2575 /* Becomes Live */
2576 _ASSERTE((ptrRegs & regMask) == 0);
2577
2578 ptrRegs |= regMask;
2579
2580 if (isThis)
2581 {
2582 thisPtrReg = reg;
2583 }
2584 if (iptr)
2585 {
2586 iptrRegs |= regMask;
2587 }
2588 }
2589 else
2590 {
2591 /* Becomes Dead */
2592 _ASSERTE((ptrRegs & regMask) != 0);
2593
2594 ptrRegs &= ~regMask;
2595
2596 if (reg == thisPtrReg)
2597 {
2598 thisPtrReg = REGI_NA;
2599 }
2600 if (iptrRegs & regMask)
2601 {
2602 iptrRegs &= ~regMask;
2603 }
2604 }
2605 iptr = isThis = false;
2606 continue;
2607 }
2608
2609 /* This is probably an argument push/pop */
2610
2611 argOfs = (val & 0x38) >> 3;
2612
2613 /* 6 [110] and 7 [111] are reserved for other encodings */
2614 if (argOfs < 6)
2615 {
2616
2617 /* A small argument encoding */
2618
2619 ptrOffs += (val & 0x07);
2620 if (ptrOffs > curOffsArgs) {
2621 iptr = isThis = false;
2622 goto REPORT_REFS;
2623 }
2624 isPop = (val & 0x40);
2625
2626 ARG:
2627
2628 if (isPop)
2629 {
2630 if (argOfs == 0)
2631 continue; // little skip encoding
2632
2633 /* We remove (pop) the top 'argOfs' entries */
2634
2635 _ASSERTE(argOfs || argOfs <= argCnt);
2636
2637 /* adjust # of arguments */
2638
2639 argCnt -= argOfs;
2640 _ASSERTE(argCnt < MAX_PTRARG_OFS);
2641
2642// printf("[%04X] popping %u args: mask = %04X\n", ptrOffs, argOfs, (int)ptrArgs);
2643
2644 do
2645 {
2646 _ASSERTE(!isZero(argHigh));
2647
2648 /* Do we have an argument bit that's on? */
2649
2650 if (intersect(ptrArgs, argHigh))
2651 {
2652 /* Turn off the bit */
2653
2654 setDiff(ptrArgs, argHigh);
2655 setDiff(iptrArgs, argHigh);
2656
2657 /* We've removed one more argument bit */
2658
2659 argOfs--;
2660 }
2661 else if (hasPartialArgInfo)
2662 argCnt--;
2663 else /* full arg info && not a ref */
2664 argOfs--;
2665
2666 /* Continue with the next lower bit */
2667
2668 argHigh >>= 1;
2669 }
2670 while (argOfs);
2671
2672 _ASSERTE(!hasPartialArgInfo ||
2673 isZero(argHigh) ||
2674 (argHigh == CONSTRUCT_ptrArgTP(1, (argCnt-1))));
2675
2676 if (hasPartialArgInfo)
2677 {
2678 // We always leave argHigh pointing to the next ptr arg.
2679 // So, while argHigh is non-zero, and not a ptrArg, we shift right (and subtract
2680 // one arg from our argCnt) until it is a ptrArg.
2681 while (!intersect(argHigh, ptrArgs) && (!isZero(argHigh)))
2682 {
2683 argHigh >>= 1;
2684 argCnt--;
2685 }
2686 }
2687
2688 }
2689 else
2690 {
2691 /* Add a new ptr arg entry at stack offset 'argOfs' */
2692
2693 if (argOfs >= MAX_PTRARG_OFS)
2694 {
2695 _ASSERTE_ALL_BUILDS("clr/src/VM/eetwain.cpp", !"scanArgRegTableI: args pushed 'too deep'");
2696 }
2697 else
2698 {
2699 /* Full arg info reports all pushes, and thus
2700 argOffs has to be consistent with argCnt */
2701
2702 _ASSERTE(hasPartialArgInfo || argCnt == argOfs);
2703
2704 /* store arg count */
2705
2706 argCnt = argOfs + 1;
2707 _ASSERTE((argCnt < MAX_PTRARG_OFS));
2708
2709 /* Compute the appropriate argument offset bit */
2710
2711 ptrArgTP argMask = CONSTRUCT_ptrArgTP(1, argOfs);
2712
2713// printf("push arg at offset %02u --> mask = %04X\n", argOfs, (int)argMask);
2714
2715 /* We should never push twice at the same offset */
2716
2717 _ASSERTE(!intersect( ptrArgs, argMask));
2718 _ASSERTE(!intersect(iptrArgs, argMask));
2719
2720 /* We should never push within the current highest offset */
2721
2722 // _ASSERTE(argHigh < argMask);
2723
2724 /* This is now the highest bit we've set */
2725
2726 argHigh = argMask;
2727
2728 /* Set the appropriate bit in the argument mask */
2729
2730 ptrArgs |= argMask;
2731
2732 if (iptr)
2733 iptrArgs |= argMask;
2734 }
2735
2736 iptr = isThis = false;
2737 }
2738 continue;
2739 }
2740 else if (argOfs == 6)
2741 {
2742 if (val & 0x40) {
2743 /* Bigger delta 000=8,001=16,010=24,...,111=64 */
2744 ptrOffs += (((val & 0x07) + 1) << 3);
2745 }
2746 else {
2747 /* non-ptr arg push */
2748 _ASSERTE(!hasPartialArgInfo);
2749 ptrOffs += (val & 0x07);
2750 if (ptrOffs > curOffsArgs) {
2751 iptr = isThis = false;
2752 goto REPORT_REFS;
2753 }
2754 argHigh = CONSTRUCT_ptrArgTP(1, argCnt);
2755 argCnt++;
2756 _ASSERTE(argCnt < MAX_PTRARG_OFS);
2757 }
2758 continue;
2759 }
2760
2761 /* argOfs was 7 [111] which is reserved for the larger encodings */
2762
2763 _ASSERTE(argOfs==7);
2764
2765 switch (val)
2766 {
2767 case 0xFF:
2768 iptr = isThis = false;
2769 goto REPORT_REFS; // the method might loop !!!
2770
2771 case 0xB8:
2772 val = fastDecodeUnsigned(table);
2773 ptrOffs += val;
2774 continue;
2775
2776 case 0xBC:
2777 isThis = true;
2778 break;
2779
2780 case 0xBF:
2781 iptr = true;
2782 break;
2783
2784 case 0xF8:
2785 case 0xFC:
2786 isPop = val & 0x04;
2787 argOfs = fastDecodeUnsigned(table);
2788 goto ARG;
2789
2790 case 0xFD: {
2791 argOfs = fastDecodeUnsigned(table);
2792 _ASSERTE(argOfs && argOfs <= argCnt);
2793
2794 // Kill the top "argOfs" pointers.
2795
2796 ptrArgTP argMask;
2797 for(argMask = CONSTRUCT_ptrArgTP(1, argCnt); (argOfs != 0); argMask >>= 1)
2798 {
2799 _ASSERTE(!isZero(argMask) && !isZero(ptrArgs)); // there should be remaining pointers
2800
2801 if (intersect(ptrArgs, argMask))
2802 {
2803 setDiff(ptrArgs, argMask);
2804 setDiff(iptrArgs, argMask);
2805 argOfs--;
2806 }
2807 }
2808
2809 // For partial arg info, need to find the next higest pointer for argHigh
2810
2811 if (hasPartialArgInfo)
2812 {
2813 for(argHigh = ptrArgTP(0); !isZero(argMask); argMask >>= 1)
2814 {
2815 if (intersect(ptrArgs, argMask)) {
2816 argHigh = argMask;
2817 break;
2818 }
2819 }
2820 }
2821 } break;
2822
2823 case 0xF9:
2824 argOfs = fastDecodeUnsigned(table);
2825 argCnt += argOfs;
2826 break;
2827
2828 default:
2829 _ASSERTE(!"Unexpected special code %04X");
2830 }
2831 }
2832
2833 /* Report all live pointer registers */
2834REPORT_REFS:
2835
2836 _ASSERTE((iptrRegs & ptrRegs) == iptrRegs); // iptrRegs is a subset of ptrRegs
2837 _ASSERTE((iptrArgs & ptrArgs) == iptrArgs); // iptrArgs is a subset of ptrArgs
2838
2839 /* Save the current live register, argument set, and argCnt */
2840
2841 info->regMaskResult = convertAllRegsMask(ptrRegs);
2842 info->argMaskResult = ptrArgs;
2843 info->argHnumResult = 0;
2844 info->iregMaskResult = convertAllRegsMask(iptrRegs);
2845 info->iargMaskResult = iptrArgs;
2846
2847 info->thisPtrResult = thisPtrReg;
2848 _ASSERTE(thisPtrReg == REGI_NA || (regNumToMask(thisPtrReg) & info->regMaskResult));
2849
2850 if (hasPartialArgInfo)
2851 {
2852 return 0;
2853 }
2854 else
2855 {
2856 _ASSERTE(int(argCnt) < INT_MAX); // check that it did not underflow
2857 return (argCnt * sizeof(unsigned));
2858 }
2859}
2860
2861/*****************************************************************************/
2862
2863unsigned GetPushedArgSize(hdrInfo * info, PTR_CBYTE table, DWORD curOffs)
2864{
2865 SUPPORTS_DAC;
2866
2867 unsigned sz;
2868
2869 if (info->interruptible)
2870 {
2871 sz = scanArgRegTableI(skipToArgReg(*info, table),
2872 curOffs,
2873 curOffs,
2874 info);
2875 }
2876 else
2877 {
2878 sz = scanArgRegTable(skipToArgReg(*info, table),
2879 curOffs,
2880 info);
2881 }
2882
2883 return sz;
2884}
2885
2886/*****************************************************************************/
2887
2888inline
2889void TRASH_CALLEE_UNSAVED_REGS(PREGDISPLAY pContext)
2890{
2891 LIMITED_METHOD_DAC_CONTRACT;
2892
2893#ifdef _DEBUG
2894 /* This is not completely correct as we lose the current value, but
2895 it should not really be useful to anyone. */
2896 static DWORD s_badData = 0xDEADBEEF;
2897 pContext->SetEaxLocation(&s_badData);
2898 pContext->SetEcxLocation(&s_badData);
2899 pContext->SetEdxLocation(&s_badData);
2900#endif //_DEBUG
2901}
2902
2903/*****************************************************************************
2904 * Sizes of certain i386 instructions which are used in the prolog/epilog
2905 */
2906
2907// Can we use sign-extended byte to encode the imm value, or do we need a dword
2908#define CAN_COMPRESS(val) ((INT8)(val) == (INT32)(val))
2909
2910#define SZ_ADD_REG(val) ( 2 + (CAN_COMPRESS(val) ? 1 : 4))
2911#define SZ_AND_REG(val) SZ_ADD_REG(val)
2912#define SZ_POP_REG 1
2913#define SZ_LEA(offset) SZ_ADD_REG(offset)
2914#define SZ_MOV_REG_REG 2
2915
2916bool IsMarkerInstr(BYTE val)
2917{
2918 SUPPORTS_DAC;
2919#ifdef _DEBUG
2920 return (val == X86_INSTR_INT3) || // Debugger might stomp with an int3
2921 (val == X86_INSTR_HLT && GCStress<cfg_any>::IsEnabled()); // GcCover might stomp with a Hlt
2922#else
2923 return false;
2924#endif
2925}
2926
2927/* Check if the given instruction opcode is the one we expect.
2928 This is a "necessary" but not "sufficient" check as it ignores the check
2929 if the instruction is one of our special markers (for debugging and GcStress) */
2930
2931bool CheckInstrByte(BYTE val, BYTE expectedValue)
2932{
2933 SUPPORTS_DAC;
2934 return ((val == expectedValue) || IsMarkerInstr(val));
2935}
2936
2937/* Similar to CheckInstrByte(). Use this to check a masked opcode (ignoring
2938 optional bits in the opcode encoding).
2939 valPattern is the masked out value.
2940 expectedPattern is the mask value we expect.
2941 val is the actual instruction opcode
2942 */
2943bool CheckInstrBytePattern(BYTE valPattern, BYTE expectedPattern, BYTE val)
2944{
2945 SUPPORTS_DAC;
2946
2947 _ASSERTE((valPattern & val) == valPattern);
2948
2949 return ((valPattern == expectedPattern) || IsMarkerInstr(val));
2950}
2951
2952/* Similar to CheckInstrByte() */
2953
2954bool CheckInstrWord(WORD val, WORD expectedValue)
2955{
2956 LIMITED_METHOD_CONTRACT;
2957 SUPPORTS_DAC;
2958
2959 return ((val == expectedValue) || IsMarkerInstr(val & 0xFF));
2960}
2961
2962// Use this to check if the instruction at offset "walkOffset" has already
2963// been executed
2964// "actualHaltOffset" is the offset when the code was suspended
2965// It is assumed that there is linear control flow from offset 0 to "actualHaltOffset".
2966//
2967// This has been factored out just so that the intent of the comparison
2968// is clear (compared to the opposite intent)
2969
2970bool InstructionAlreadyExecuted(unsigned walkOffset, unsigned actualHaltOffset)
2971{
2972 SUPPORTS_DAC;
2973 return (walkOffset < actualHaltOffset);
2974}
2975
2976// skips past a "arith REG, IMM"
2977inline unsigned SKIP_ARITH_REG(int val, PTR_CBYTE base, unsigned offset)
2978{
2979 LIMITED_METHOD_DAC_CONTRACT;
2980
2981 unsigned delta = 0;
2982 if (val != 0)
2983 {
2984#ifdef _DEBUG
2985 // Confirm that arith instruction is at the correct place
2986 _ASSERTE(CheckInstrBytePattern(base[offset ] & 0xFD, 0x81, base[offset]) &&
2987 CheckInstrBytePattern(base[offset+1] & 0xC0, 0xC0, base[offset+1]));
2988 // only use DWORD form if needed
2989 _ASSERTE(((base[offset] & 2) != 0) == CAN_COMPRESS(val) ||
2990 IsMarkerInstr(base[offset]));
2991#endif
2992 delta = 2 + (CAN_COMPRESS(val) ? 1 : 4);
2993 }
2994 return(offset + delta);
2995}
2996
2997inline unsigned SKIP_PUSH_REG(PTR_CBYTE base, unsigned offset)
2998{
2999 LIMITED_METHOD_DAC_CONTRACT;
3000
3001 // Confirm it is a push instruction
3002 _ASSERTE(CheckInstrBytePattern(base[offset] & 0xF8, 0x50, base[offset]));
3003 return(offset + 1);
3004}
3005
3006inline unsigned SKIP_POP_REG(PTR_CBYTE base, unsigned offset)
3007{
3008 LIMITED_METHOD_DAC_CONTRACT;
3009
3010 // Confirm it is a pop instruction
3011 _ASSERTE(CheckInstrBytePattern(base[offset] & 0xF8, 0x58, base[offset]));
3012 return(offset + 1);
3013}
3014
3015inline unsigned SKIP_MOV_REG_REG(PTR_CBYTE base, unsigned offset)
3016{
3017 LIMITED_METHOD_DAC_CONTRACT;
3018
3019 // Confirm it is a move instruction
3020 // Note that only the first byte may have been stomped on by IsMarkerInstr()
3021 // So we can check the second byte directly
3022 _ASSERTE(CheckInstrBytePattern(base[offset] & 0xFD, 0x89, base[offset]) &&
3023 (base[offset+1] & 0xC0) == 0xC0);
3024 return(offset + 2);
3025}
3026
3027inline unsigned SKIP_LEA_ESP_EBP(int val, PTR_CBYTE base, unsigned offset)
3028{
3029 LIMITED_METHOD_DAC_CONTRACT;
3030
3031#ifdef _DEBUG
3032 // Confirm it is the right instruction
3033 // Note that only the first byte may have been stomped on by IsMarkerInstr()
3034 // So we can check the second byte directly
3035 WORD wOpcode = *(PTR_WORD)base;
3036 _ASSERTE((CheckInstrWord(wOpcode, X86_INSTR_w_LEA_ESP_EBP_BYTE_OFFSET) &&
3037 (val == *(PTR_SBYTE)(base+2)) &&
3038 CAN_COMPRESS(val)) ||
3039 (CheckInstrWord(wOpcode, X86_INSTR_w_LEA_ESP_EBP_DWORD_OFFSET) &&
3040 (val == *(PTR_INT32)(base+2)) &&
3041 !CAN_COMPRESS(val)));
3042#endif
3043
3044 unsigned delta = 2 + (CAN_COMPRESS(val) ? 1 : 4);
3045 return(offset + delta);
3046}
3047
3048unsigned SKIP_ALLOC_FRAME(int size, PTR_CBYTE base, unsigned offset)
3049{
3050 CONTRACTL {
3051 NOTHROW;
3052 GC_NOTRIGGER;
3053 SUPPORTS_DAC;
3054 } CONTRACTL_END;
3055
3056 _ASSERTE(size != 0);
3057
3058 if (size == sizeof(void*))
3059 {
3060 // We do "push eax" instead of "sub esp,4"
3061 return (SKIP_PUSH_REG(base, offset));
3062 }
3063
3064 if (size >= (int)GetOsPageSize())
3065 {
3066 if (size < int(3 * GetOsPageSize()))
3067 {
3068 // add 7 bytes for one or two TEST EAX, [ESP+GetOsPageSize()]
3069 offset += (size / GetOsPageSize()) * 7;
3070 }
3071 else
3072 {
3073 // xor eax, eax 2
3074 // [nop] 0-3
3075 // loop:
3076 // test [esp + eax], eax 3
3077 // sub eax, 0x1000 5
3078 // cmp EAX, -size 5
3079 // jge loop 2
3080 offset += 2;
3081
3082 // NGEN images that support rejit may have extra nops we need to skip over
3083 while (offset < 5)
3084 {
3085 if (CheckInstrByte(base[offset], X86_INSTR_NOP))
3086 {
3087 offset++;
3088 }
3089 else
3090 {
3091 break;
3092 }
3093 }
3094 offset += 15;
3095 }
3096 }
3097
3098 // sub ESP, size
3099 return (SKIP_ARITH_REG(size, base, offset));
3100}
3101
3102
3103#endif // !USE_GC_INFO_DECODER
3104
3105
3106#if defined(WIN64EXCEPTIONS) && !defined(CROSSGEN_COMPILE)
3107
3108void EECodeManager::EnsureCallerContextIsValid( PREGDISPLAY pRD, StackwalkCacheEntry* pCacheEntry, EECodeInfo * pCodeInfo /*= NULL*/ )
3109{
3110 CONTRACTL
3111 {
3112 NOTHROW;
3113 GC_NOTRIGGER;
3114 SUPPORTS_DAC;
3115 }
3116 CONTRACTL_END;
3117
3118 if( !pRD->IsCallerContextValid )
3119 {
3120#if !defined(DACCESS_COMPILE) && defined(HAS_QUICKUNWIND)
3121 if (pCacheEntry != NULL)
3122 {
3123 // lightened schema: take stack unwind info from stackwalk cache
3124 QuickUnwindStackFrame(pRD, pCacheEntry, EnsureCallerStackFrameIsValid);
3125 }
3126 else
3127#endif // !DACCESS_COMPILE
3128 {
3129 // We need to make a copy here (instead of switching the pointers), in order to preserve the current context
3130 *(pRD->pCallerContext) = *(pRD->pCurrentContext);
3131 *(pRD->pCallerContextPointers) = *(pRD->pCurrentContextPointers);
3132
3133 Thread::VirtualUnwindCallFrame(pRD->pCallerContext, pRD->pCallerContextPointers, pCodeInfo);
3134 }
3135
3136 pRD->IsCallerContextValid = TRUE;
3137 }
3138
3139 _ASSERTE( pRD->IsCallerContextValid );
3140}
3141
3142size_t EECodeManager::GetCallerSp( PREGDISPLAY pRD )
3143{
3144 CONTRACTL {
3145 NOTHROW;
3146 GC_NOTRIGGER;
3147 SUPPORTS_DAC;
3148 } CONTRACTL_END;
3149
3150 // Don't add usage of this field. This is only temporary.
3151 // See ExceptionTracker::InitializeCrawlFrame() for more information.
3152 if (!pRD->IsCallerSPValid)
3153 {
3154 EnsureCallerContextIsValid(pRD, NULL);
3155 }
3156
3157 return GetSP(pRD->pCallerContext);
3158}
3159
3160#endif // WIN64EXCEPTIONS && !CROSSGEN_COMPILE
3161
3162#ifdef HAS_QUICKUNWIND
3163/*
3164 * Light unwind the current stack frame, using provided cache entry.
3165 * pPC, Esp and pEbp of pContext are updated.
3166 */
3167
3168// static
3169void EECodeManager::QuickUnwindStackFrame(PREGDISPLAY pRD, StackwalkCacheEntry *pCacheEntry, QuickUnwindFlag flag)
3170{
3171 CONTRACTL {
3172 NOTHROW;
3173 GC_NOTRIGGER;
3174 } CONTRACTL_END;
3175
3176 _ASSERTE(pCacheEntry);
3177 _ASSERTE(GetControlPC(pRD) == (PCODE)(pCacheEntry->IP));
3178
3179#if defined(_TARGET_X86_)
3180 _ASSERTE(flag == UnwindCurrentStackFrame);
3181
3182 _ASSERTE(!pCacheEntry->fUseEbp || pCacheEntry->fUseEbpAsFrameReg);
3183
3184 if (pCacheEntry->fUseEbpAsFrameReg)
3185 {
3186 _ASSERTE(pCacheEntry->fUseEbp);
3187 TADDR curEBP = (TADDR)*pRD->GetEbpLocation();
3188
3189 // EBP frame, update ESP through EBP, since ESPOffset may vary
3190 pRD->SetEbpLocation(PTR_DWORD(curEBP));
3191 pRD->SP = curEBP + sizeof(void*);
3192 }
3193 else
3194 {
3195 _ASSERTE(!pCacheEntry->fUseEbp);
3196 // ESP frame, update up to retAddr using ESPOffset
3197 pRD->SP += pCacheEntry->ESPOffset;
3198 }
3199 pRD->PCTAddr = (TADDR)pRD->SP;
3200 pRD->ControlPC = *PTR_PCODE(pRD->PCTAddr);
3201 pRD->SP += sizeof(void*) + pCacheEntry->argSize;
3202
3203#elif defined(_TARGET_AMD64_)
3204 if (pRD->IsCallerContextValid)
3205 {
3206 pRD->pCurrentContext->Rbp = pRD->pCallerContext->Rbp;
3207 pRD->pCurrentContext->Rsp = pRD->pCallerContext->Rsp;
3208 pRD->pCurrentContext->Rip = pRD->pCallerContext->Rip;
3209 }
3210 else
3211 {
3212 PCONTEXT pSourceCtx = NULL;
3213 PCONTEXT pTargetCtx = NULL;
3214 if (flag == UnwindCurrentStackFrame)
3215 {
3216 pTargetCtx = pRD->pCurrentContext;
3217 pSourceCtx = pRD->pCurrentContext;
3218 }
3219 else
3220 {
3221 pTargetCtx = pRD->pCallerContext;
3222 pSourceCtx = pRD->pCurrentContext;
3223 }
3224
3225 // Unwind RBP. The offset is relative to the current sp.
3226 if (pCacheEntry->RBPOffset == 0)
3227 {
3228 pTargetCtx->Rbp = pSourceCtx->Rbp;
3229 }
3230 else
3231 {
3232 pTargetCtx->Rbp = *(UINT_PTR*)(pSourceCtx->Rsp + pCacheEntry->RBPOffset);
3233 }
3234
3235 // Adjust the sp. From this pointer onwards pCurrentContext->Rsp is the caller sp.
3236 pTargetCtx->Rsp = pSourceCtx->Rsp + pCacheEntry->RSPOffset;
3237
3238 // Retrieve the return address.
3239 pTargetCtx->Rip = *(UINT_PTR*)((pTargetCtx->Rsp) - sizeof(UINT_PTR));
3240 }
3241
3242 if (flag == UnwindCurrentStackFrame)
3243 {
3244 SyncRegDisplayToCurrentContext(pRD);
3245 pRD->IsCallerContextValid = FALSE;
3246 pRD->IsCallerSPValid = FALSE; // Don't add usage of this field. This is only temporary.
3247 }
3248
3249#else // !_TARGET_X86_ && !_TARGET_AMD64_
3250 PORTABILITY_ASSERT("EECodeManager::QuickUnwindStackFrame is not implemented on this platform.");
3251#endif // !_TARGET_X86_ && !_TARGET_AMD64_
3252}
3253#endif // HAS_QUICKUNWIND
3254
3255/*****************************************************************************/
3256#ifdef _TARGET_X86_ // UnwindStackFrame
3257/*****************************************************************************/
3258
3259const RegMask CALLEE_SAVED_REGISTERS_MASK[] =
3260{
3261 RM_EDI, // first register to be pushed
3262 RM_ESI,
3263 RM_EBX,
3264 RM_EBP // last register to be pushed
3265};
3266
3267static void SetLocation(PREGDISPLAY pRD, int ind, PDWORD loc)
3268{
3269#ifdef WIN64EXCEPTIONS
3270 static const SIZE_T OFFSET_OF_CALLEE_SAVED_REGISTERS[] =
3271 {
3272 offsetof(T_KNONVOLATILE_CONTEXT_POINTERS, Edi), // first register to be pushed
3273 offsetof(T_KNONVOLATILE_CONTEXT_POINTERS, Esi),
3274 offsetof(T_KNONVOLATILE_CONTEXT_POINTERS, Ebx),
3275 offsetof(T_KNONVOLATILE_CONTEXT_POINTERS, Ebp), // last register to be pushed
3276 };
3277
3278 SIZE_T offsetOfRegPtr = OFFSET_OF_CALLEE_SAVED_REGISTERS[ind];
3279 *(LPVOID*)(PBYTE(pRD->pCurrentContextPointers) + offsetOfRegPtr) = loc;
3280#else
3281 static const SIZE_T OFFSET_OF_CALLEE_SAVED_REGISTERS[] =
3282 {
3283 offsetof(REGDISPLAY, pEdi), // first register to be pushed
3284 offsetof(REGDISPLAY, pEsi),
3285 offsetof(REGDISPLAY, pEbx),
3286 offsetof(REGDISPLAY, pEbp), // last register to be pushed
3287 };
3288
3289 SIZE_T offsetOfRegPtr = OFFSET_OF_CALLEE_SAVED_REGISTERS[ind];
3290 *(LPVOID*)(PBYTE(pRD) + offsetOfRegPtr) = loc;
3291#endif
3292}
3293
3294/*****************************************************************************/
3295
3296void UnwindEspFrameEpilog(
3297 PREGDISPLAY pContext,
3298 hdrInfo * info,
3299 PTR_CBYTE epilogBase,
3300 unsigned flags)
3301{
3302 LIMITED_METHOD_CONTRACT;
3303 SUPPORTS_DAC;
3304
3305 _ASSERTE(info->epilogOffs != hdrInfo::NOT_IN_EPILOG);
3306 _ASSERTE(!info->ebpFrame && !info->doubleAlign);
3307 _ASSERTE(info->epilogOffs > 0);
3308
3309 int offset = 0;
3310 unsigned ESP = pContext->SP;
3311
3312 if (info->rawStkSize)
3313 {
3314 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3315 {
3316 /* We have NOT executed the "ADD ESP, FrameSize",
3317 so manually adjust stack pointer */
3318 ESP += info->rawStkSize;
3319 }
3320
3321 // We have already popped off the frame (excluding the callee-saved registers)
3322
3323 if (epilogBase[0] == X86_INSTR_POP_ECX)
3324 {
3325 // We may use "POP ecx" for doing "ADD ESP, 4",
3326 // or we may not (in the case of JMP epilogs)
3327 _ASSERTE(info->rawStkSize == sizeof(void*));
3328 offset = SKIP_POP_REG(epilogBase, offset);
3329 }
3330 else
3331 {
3332 // "add esp, rawStkSize"
3333 offset = SKIP_ARITH_REG(info->rawStkSize, epilogBase, offset);
3334 }
3335 }
3336
3337 /* Remaining callee-saved regs are at ESP. Need to update
3338 regsMask as well to exclude registers which have already been popped. */
3339
3340 const RegMask regsMask = info->savedRegMask;
3341
3342 /* Increment "offset" in steps to see which callee-saved
3343 registers have already been popped */
3344
3345 for (unsigned i = NumItems(CALLEE_SAVED_REGISTERS_MASK); i > 0; i--)
3346 {
3347 RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i - 1];
3348
3349 if (!(regMask & regsMask))
3350 continue;
3351
3352 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3353 {
3354 /* We have NOT yet popped off the register.
3355 Get the value from the stack if needed */
3356 if ((flags & UpdateAllRegs) || (regMask == RM_EBP))
3357 {
3358 SetLocation(pContext, i - 1, PTR_DWORD((TADDR)ESP));
3359 }
3360
3361 /* Adjust ESP */
3362 ESP += sizeof(void*);
3363 }
3364
3365 offset = SKIP_POP_REG(epilogBase, offset);
3366 }
3367
3368 //CEE_JMP generates an epilog similar to a normal CEE_RET epilog except for the last instruction
3369 _ASSERTE(CheckInstrBytePattern(epilogBase[offset] & X86_INSTR_RET, X86_INSTR_RET, epilogBase[offset]) //ret
3370 || CheckInstrBytePattern(epilogBase[offset], X86_INSTR_JMP_NEAR_REL32, epilogBase[offset]) //jmp ret32
3371 || CheckInstrWord(*PTR_WORD(epilogBase + offset), X86_INSTR_w_JMP_FAR_IND_IMM)); //jmp [addr32]
3372
3373 /* Finally we can set pPC */
3374 pContext->PCTAddr = (TADDR)ESP;
3375 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3376
3377 pContext->SP = ESP;
3378}
3379
3380/*****************************************************************************/
3381
3382void UnwindEbpDoubleAlignFrameEpilog(
3383 PREGDISPLAY pContext,
3384 hdrInfo * info,
3385 PTR_CBYTE epilogBase,
3386 unsigned flags)
3387{
3388 LIMITED_METHOD_CONTRACT;
3389 SUPPORTS_DAC;
3390
3391 _ASSERTE(info->epilogOffs != hdrInfo::NOT_IN_EPILOG);
3392 _ASSERTE(info->ebpFrame || info->doubleAlign);
3393
3394 _ASSERTE(info->argSize < 0x10000); // "ret" only has a 2 byte operand
3395
3396 /* See how many instructions we have executed in the
3397 epilog to determine which callee-saved registers
3398 have already been popped */
3399 int offset = 0;
3400
3401 unsigned ESP = pContext->SP;
3402
3403 bool needMovEspEbp = false;
3404
3405 if (info->doubleAlign)
3406 {
3407 // add esp, rawStkSize
3408
3409 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3410 ESP += info->rawStkSize;
3411 _ASSERTE(info->rawStkSize != 0);
3412 offset = SKIP_ARITH_REG(info->rawStkSize, epilogBase, offset);
3413
3414 // We also need "mov esp, ebp" after popping the callee-saved registers
3415 needMovEspEbp = true;
3416 }
3417 else
3418 {
3419 bool needLea = false;
3420
3421 if (info->localloc)
3422 {
3423 // ESP may be variable if a localloc was actually executed. We will reset it.
3424 // lea esp, [ebp-calleeSavedRegs]
3425
3426 needLea = true;
3427 }
3428 else if (info->savedRegsCountExclFP == 0)
3429 {
3430 // We will just generate "mov esp, ebp" and be done with it.
3431
3432 if (info->rawStkSize != 0)
3433 {
3434 needMovEspEbp = true;
3435 }
3436 }
3437 else if (info->rawStkSize == 0)
3438 {
3439 // do nothing before popping the callee-saved registers
3440 }
3441 else if (info->rawStkSize == sizeof(void*))
3442 {
3443 // "pop ecx" will make ESP point to the callee-saved registers
3444 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3445 ESP += sizeof(void*);
3446 offset = SKIP_POP_REG(epilogBase, offset);
3447 }
3448 else
3449 {
3450 // We need to make ESP point to the callee-saved registers
3451 // lea esp, [ebp-calleeSavedRegs]
3452
3453 needLea = true;
3454 }
3455
3456 if (needLea)
3457 {
3458 // lea esp, [ebp-calleeSavedRegs]
3459
3460 unsigned calleeSavedRegsSize = info->savedRegsCountExclFP * sizeof(void*);
3461
3462 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3463 ESP = *pContext->GetEbpLocation() - calleeSavedRegsSize;
3464
3465 offset = SKIP_LEA_ESP_EBP(-int(calleeSavedRegsSize), epilogBase, offset);
3466 }
3467 }
3468
3469 for (unsigned i = NumItems(CALLEE_SAVED_REGISTERS_MASK) - 1; i > 0; i--)
3470 {
3471 RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i - 1];
3472 _ASSERTE(regMask != RM_EBP);
3473
3474 if ((info->savedRegMask & regMask) == 0)
3475 continue;
3476
3477 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3478 {
3479 if (flags & UpdateAllRegs)
3480 {
3481 SetLocation(pContext, i - 1, PTR_DWORD((TADDR)ESP));
3482 }
3483 ESP += sizeof(void*);
3484 }
3485
3486 offset = SKIP_POP_REG(epilogBase, offset);
3487 }
3488
3489 if (needMovEspEbp)
3490 {
3491 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3492 ESP = *pContext->GetEbpLocation();
3493
3494 offset = SKIP_MOV_REG_REG(epilogBase, offset);
3495 }
3496
3497 // Have we executed the pop EBP?
3498 if (!InstructionAlreadyExecuted(offset, info->epilogOffs))
3499 {
3500 pContext->SetEbpLocation(PTR_DWORD(TADDR(ESP)));
3501 ESP += sizeof(void*);
3502 }
3503 offset = SKIP_POP_REG(epilogBase, offset);
3504
3505 pContext->PCTAddr = (TADDR)ESP;
3506 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3507
3508 pContext->SP = ESP;
3509}
3510
3511inline SIZE_T GetStackParameterSize(hdrInfo * info)
3512{
3513 SUPPORTS_DAC;
3514 return (info->varargs ? 0 : info->argSize); // Note varargs is caller-popped
3515}
3516
3517//****************************************************************************
3518// This is the value ESP is incremented by on doing a "return"
3519
3520inline SIZE_T ESPIncrOnReturn(hdrInfo * info)
3521{
3522 SUPPORTS_DAC;
3523 return sizeof(void *) + // pop off the return address
3524 GetStackParameterSize(info);
3525}
3526
3527/*****************************************************************************/
3528
3529void UnwindEpilog(
3530 PREGDISPLAY pContext,
3531 hdrInfo * info,
3532 PTR_CBYTE epilogBase,
3533 unsigned flags)
3534{
3535 LIMITED_METHOD_CONTRACT;
3536 SUPPORTS_DAC;
3537 _ASSERTE(info->epilogOffs != hdrInfo::NOT_IN_EPILOG);
3538 // _ASSERTE(flags & ActiveStackFrame); // <TODO> Wont work for thread death</TODO>
3539 _ASSERTE(info->epilogOffs > 0);
3540
3541 if (info->ebpFrame || info->doubleAlign)
3542 {
3543 UnwindEbpDoubleAlignFrameEpilog(pContext, info, epilogBase, flags);
3544 }
3545 else
3546 {
3547 UnwindEspFrameEpilog(pContext, info, epilogBase, flags);
3548 }
3549
3550#ifdef _DEBUG
3551 if (flags & UpdateAllRegs)
3552 TRASH_CALLEE_UNSAVED_REGS(pContext);
3553#endif
3554
3555 /* Now adjust stack pointer */
3556
3557 pContext->SP += ESPIncrOnReturn(info);
3558}
3559
3560/*****************************************************************************/
3561
3562void UnwindEspFrameProlog(
3563 PREGDISPLAY pContext,
3564 hdrInfo * info,
3565 PTR_CBYTE methodStart,
3566 unsigned flags)
3567{
3568 LIMITED_METHOD_CONTRACT;
3569 SUPPORTS_DAC;
3570
3571 /* we are in the middle of the prolog */
3572 _ASSERTE(info->prologOffs != hdrInfo::NOT_IN_PROLOG);
3573 _ASSERTE(!info->ebpFrame && !info->doubleAlign);
3574
3575 unsigned offset = 0;
3576
3577#ifdef _DEBUG
3578 // If the first two instructions are 'nop, int3', then we will
3579 // assume that is from a JitHalt operation and skip past it
3580 if (methodStart[0] == X86_INSTR_NOP && methodStart[1] == X86_INSTR_INT3)
3581 {
3582 offset += 2;
3583 }
3584#endif
3585
3586 const DWORD curOffs = info->prologOffs;
3587 unsigned ESP = pContext->SP;
3588
3589 // Find out how many callee-saved regs have already been pushed
3590
3591 unsigned regsMask = RM_NONE;
3592 PTR_DWORD savedRegPtr = PTR_DWORD((TADDR)ESP);
3593
3594 for (unsigned i = 0; i < NumItems(CALLEE_SAVED_REGISTERS_MASK); i++)
3595 {
3596 RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i];
3597
3598 if (!(info->savedRegMask & regMask))
3599 continue;
3600
3601 if (InstructionAlreadyExecuted(offset, curOffs))
3602 {
3603 ESP += sizeof(void*);
3604 regsMask |= regMask;
3605 }
3606
3607 offset = SKIP_PUSH_REG(methodStart, offset);
3608 }
3609
3610 if (info->rawStkSize)
3611 {
3612 offset = SKIP_ALLOC_FRAME(info->rawStkSize, methodStart, offset);
3613
3614 // Note that this assumes that only the last instruction in SKIP_ALLOC_FRAME
3615 // actually updates ESP
3616 if (InstructionAlreadyExecuted(offset, curOffs + 1))
3617 {
3618 savedRegPtr += (info->rawStkSize / sizeof(DWORD));
3619 ESP += info->rawStkSize;
3620 }
3621 }
3622
3623 //
3624 // Stack probe checks here
3625 //
3626
3627 // Poison the value, we don't set it properly at the end of the prolog
3628 INDEBUG(offset = 0xCCCCCCCC);
3629
3630
3631 // Always restore EBP
3632 if (regsMask & RM_EBP)
3633 pContext->SetEbpLocation(savedRegPtr++);
3634
3635 if (flags & UpdateAllRegs)
3636 {
3637 if (regsMask & RM_EBX)
3638 pContext->SetEbxLocation(savedRegPtr++);
3639 if (regsMask & RM_ESI)
3640 pContext->SetEsiLocation(savedRegPtr++);
3641 if (regsMask & RM_EDI)
3642 pContext->SetEdiLocation(savedRegPtr++);
3643
3644 TRASH_CALLEE_UNSAVED_REGS(pContext);
3645 }
3646
3647#if 0
3648// NOTE:
3649// THIS IS ONLY TRUE IF PROLOGSIZE DOES NOT INCLUDE REG-VAR INITIALIZATION !!!!
3650//
3651 /* there is (potentially) only one additional
3652 instruction in the prolog, (push ebp)
3653 but if we would have been passed that instruction,
3654 info->prologOffs would be hdrInfo::NOT_IN_PROLOG!
3655 */
3656 _ASSERTE(offset == info->prologOffs);
3657#endif
3658
3659 pContext->SP = ESP;
3660}
3661
3662/*****************************************************************************/
3663
3664void UnwindEspFrame(
3665 PREGDISPLAY pContext,
3666 hdrInfo * info,
3667 PTR_CBYTE table,
3668 PTR_CBYTE methodStart,
3669 DWORD curOffs,
3670 unsigned flags)
3671{
3672 LIMITED_METHOD_CONTRACT;
3673 SUPPORTS_DAC;
3674
3675 _ASSERTE(!info->ebpFrame && !info->doubleAlign);
3676 _ASSERTE(info->epilogOffs == hdrInfo::NOT_IN_EPILOG);
3677
3678 unsigned ESP = pContext->SP;
3679
3680
3681 if (info->prologOffs != hdrInfo::NOT_IN_PROLOG)
3682 {
3683 if (info->prologOffs != 0) // Do nothing for the very start of the method
3684 {
3685 UnwindEspFrameProlog(pContext, info, methodStart, flags);
3686 ESP = pContext->SP;
3687 }
3688 }
3689 else
3690 {
3691 /* we are past the prolog, ESP has been set above */
3692
3693 // Are there any arguments pushed on the stack?
3694
3695 ESP += GetPushedArgSize(info, table, curOffs);
3696
3697 ESP += info->rawStkSize;
3698
3699 const RegMask regsMask = info->savedRegMask;
3700
3701 for (unsigned i = NumItems(CALLEE_SAVED_REGISTERS_MASK); i > 0; i--)
3702 {
3703 RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i - 1];
3704
3705 if ((regMask & regsMask) == 0)
3706 continue;
3707
3708 SetLocation(pContext, i - 1, PTR_DWORD((TADDR)ESP));
3709
3710 ESP += sizeof(unsigned);
3711 }
3712 }
3713
3714 /* we can now set the (address of the) return address */
3715
3716 pContext->PCTAddr = (TADDR)ESP;
3717 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3718
3719 /* Now adjust stack pointer */
3720
3721 pContext->SP = ESP + ESPIncrOnReturn(info);
3722}
3723
3724
3725/*****************************************************************************/
3726
3727void UnwindEbpDoubleAlignFrameProlog(
3728 PREGDISPLAY pContext,
3729 hdrInfo * info,
3730 PTR_CBYTE methodStart,
3731 unsigned flags)
3732{
3733 LIMITED_METHOD_DAC_CONTRACT;
3734
3735 _ASSERTE(info->prologOffs != hdrInfo::NOT_IN_PROLOG);
3736 _ASSERTE(info->ebpFrame || info->doubleAlign);
3737
3738 DWORD offset = 0;
3739
3740#ifdef _DEBUG
3741 // If the first two instructions are 'nop, int3', then we will
3742 // assume that is from a JitHalt operation and skip past it
3743 if (methodStart[0] == X86_INSTR_NOP && methodStart[1] == X86_INSTR_INT3)
3744 {
3745 offset += 2;
3746 }
3747#endif
3748
3749 /* Check for the case where EBP has not been updated yet. */
3750
3751 const DWORD curOffs = info->prologOffs;
3752
3753 // If we have still not excecuted "push ebp; mov ebp, esp", then we need to
3754 // report the frame relative to ESP
3755
3756 if (!InstructionAlreadyExecuted(offset + 1, curOffs))
3757 {
3758 _ASSERTE(CheckInstrByte(methodStart [offset], X86_INSTR_PUSH_EBP) ||
3759 CheckInstrWord(*PTR_WORD(methodStart + offset), X86_INSTR_W_MOV_EBP_ESP) ||
3760 CheckInstrByte(methodStart [offset], X86_INSTR_JMP_NEAR_REL32)); // a rejit jmp-stamp
3761
3762 /* If we're past the "push ebp", adjust ESP to pop EBP off */
3763
3764 if (curOffs == (offset + 1))
3765 pContext->SP += sizeof(TADDR);
3766
3767 /* Stack pointer points to return address */
3768
3769 pContext->PCTAddr = (TADDR)pContext->SP;
3770 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3771
3772 /* EBP and callee-saved registers still have the correct value */
3773
3774 return;
3775 }
3776
3777 // We are atleast after the "push ebp; mov ebp, esp"
3778
3779 offset = SKIP_MOV_REG_REG(methodStart,
3780 SKIP_PUSH_REG(methodStart, offset));
3781
3782 /* At this point, EBP has been set up. The caller's ESP and the return value
3783 can be determined using EBP. Since we are still in the prolog,
3784 we need to know our exact location to determine the callee-saved registers */
3785
3786 const unsigned curEBP = *pContext->GetEbpLocation();
3787
3788 if (flags & UpdateAllRegs)
3789 {
3790 PTR_DWORD pSavedRegs = PTR_DWORD((TADDR)curEBP);
3791
3792 /* make sure that we align ESP just like the method's prolog did */
3793 if (info->doubleAlign)
3794 {
3795 // "and esp,-8"
3796 offset = SKIP_ARITH_REG(-8, methodStart, offset);
3797 if (curEBP & 0x04)
3798 {
3799 pSavedRegs--;
3800#ifdef _DEBUG
3801 if (dspPtr) printf("EnumRef: dblalign ebp: %08X\n", curEBP);
3802#endif
3803 }
3804 }
3805
3806 /* Increment "offset" in steps to see which callee-saved
3807 registers have been pushed already */
3808
3809 for (unsigned i = 0; i < NumItems(CALLEE_SAVED_REGISTERS_MASK) - 1; i++)
3810 {
3811 RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i];
3812 _ASSERTE(regMask != RM_EBP);
3813
3814 if ((info->savedRegMask & regMask) == 0)
3815 continue;
3816
3817 if (InstructionAlreadyExecuted(offset, curOffs))
3818 {
3819 SetLocation(pContext, i, PTR_DWORD(--pSavedRegs));
3820 }
3821
3822 // "push reg"
3823 offset = SKIP_PUSH_REG(methodStart, offset) ;
3824 }
3825
3826 TRASH_CALLEE_UNSAVED_REGS(pContext);
3827 }
3828
3829 /* The caller's saved EBP is pointed to by our EBP */
3830
3831 pContext->SetEbpLocation(PTR_DWORD((TADDR)curEBP));
3832 pContext->SP = DWORD((TADDR)(curEBP + sizeof(void *)));
3833
3834 /* Stack pointer points to return address */
3835
3836 pContext->PCTAddr = (TADDR)pContext->SP;
3837 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3838}
3839
3840/*****************************************************************************/
3841
3842bool UnwindEbpDoubleAlignFrame(
3843 PREGDISPLAY pContext,
3844 EECodeInfo *pCodeInfo,
3845 hdrInfo *info,
3846 PTR_CBYTE table,
3847 PTR_CBYTE methodStart,
3848 DWORD curOffs,
3849 unsigned flags,
3850 StackwalkCacheUnwindInfo *pUnwindInfo) // out-only, perf improvement
3851{
3852 LIMITED_METHOD_CONTRACT;
3853 SUPPORTS_DAC;
3854
3855 _ASSERTE(info->ebpFrame || info->doubleAlign);
3856
3857 const unsigned curESP = pContext->SP;
3858 const unsigned curEBP = *pContext->GetEbpLocation();
3859
3860 /* First check if we are in a filter (which is obviously after the prolog) */
3861
3862 if (info->handlers && info->prologOffs == hdrInfo::NOT_IN_PROLOG)
3863 {
3864 TADDR baseSP;
3865
3866#ifdef WIN64EXCEPTIONS
3867 // Funclets' frame pointers(EBP) are always restored so they can access to main function's local variables.
3868 // Therefore the value of EBP is invalid for unwinder so we should use ESP instead.
3869 // TODO If funclet frame layout is changed from CodeGen::genFuncletProlog() and genFuncletEpilog(),
3870 // we need to change here accordingly. It is likely to have changes when introducing PSPSym.
3871 // TODO Currently we assume that ESP of funclet frames is always fixed but actually it could change.
3872 if (pCodeInfo->IsFunclet())
3873 {
3874 baseSP = curESP;
3875 // Set baseSP as initial SP
3876 baseSP += GetPushedArgSize(info, table, curOffs);
3877
3878 // 16-byte stack alignment padding (allocated in genFuncletProlog)
3879 // Current funclet frame layout (see CodeGen::genFuncletProlog() and genFuncletEpilog()):
3880 // prolog: sub esp, 12
3881 // epilog: add esp, 12
3882 // ret
3883 // SP alignment padding should be added for all instructions except the first one and the last one.
3884 // Epilog may not exist (unreachable), so we need to check the instruction code.
3885 const TADDR funcletStart = pCodeInfo->GetJitManager()->GetFuncletStartAddress(pCodeInfo);
3886 if (funcletStart != pCodeInfo->GetCodeAddress() && methodStart[pCodeInfo->GetRelOffset()] != X86_INSTR_RETN)
3887 baseSP += 12;
3888
3889 pContext->PCTAddr = baseSP;
3890 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3891
3892 pContext->SP = (DWORD)(baseSP + sizeof(TADDR));
3893
3894 return true;
3895 }
3896#else // WIN64EXCEPTIONS
3897
3898 FrameType frameType = GetHandlerFrameInfo(info, curEBP,
3899 curESP, (DWORD) IGNORE_VAL,
3900 &baseSP);
3901
3902 /* If we are in a filter, we only need to unwind the funclet stack.
3903 For catches/finallies, the normal handling will
3904 cause the frame to be unwound all the way up to ebp skipping
3905 other frames above it. This is OK, as those frames will be
3906 dead. Also, the EE will detect that this has happened and it
3907 will handle any EE frames correctly.
3908 */
3909
3910 if (frameType == FR_INVALID)
3911 {
3912 return false;
3913 }
3914
3915 if (frameType == FR_FILTER)
3916 {
3917 pContext->PCTAddr = baseSP;
3918 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3919
3920 pContext->SP = (DWORD)(baseSP + sizeof(TADDR));
3921
3922 // pContext->pEbp = same as before;
3923
3924#ifdef _DEBUG
3925 /* The filter has to be called by the VM. So we dont need to
3926 update callee-saved registers.
3927 */
3928
3929 if (flags & UpdateAllRegs)
3930 {
3931 static DWORD s_badData = 0xDEADBEEF;
3932
3933 pContext->SetEaxLocation(&s_badData);
3934 pContext->SetEcxLocation(&s_badData);
3935 pContext->SetEdxLocation(&s_badData);
3936
3937 pContext->SetEbxLocation(&s_badData);
3938 pContext->SetEsiLocation(&s_badData);
3939 pContext->SetEdiLocation(&s_badData);
3940 }
3941#endif
3942
3943 if (pUnwindInfo)
3944 {
3945 // The filter funclet is like an ESP-framed-method.
3946 pUnwindInfo->fUseEbp = FALSE;
3947 pUnwindInfo->fUseEbpAsFrameReg = FALSE;
3948 }
3949
3950 return true;
3951 }
3952#endif // !WIN64EXCEPTIONS
3953 }
3954
3955 //
3956 // Prolog of an EBP method
3957 //
3958
3959 if (info->prologOffs != hdrInfo::NOT_IN_PROLOG)
3960 {
3961 UnwindEbpDoubleAlignFrameProlog(pContext, info, methodStart, flags);
3962
3963 /* Now adjust stack pointer. */
3964
3965 pContext->SP += ESPIncrOnReturn(info);
3966 return true;
3967 }
3968
3969 if (flags & UpdateAllRegs)
3970 {
3971 // Get to the first callee-saved register
3972 PTR_DWORD pSavedRegs = PTR_DWORD((TADDR)curEBP);
3973
3974 if (info->doubleAlign && (curEBP & 0x04))
3975 pSavedRegs--;
3976
3977 for (unsigned i = 0; i < NumItems(CALLEE_SAVED_REGISTERS_MASK) - 1; i++)
3978 {
3979 RegMask regMask = CALLEE_SAVED_REGISTERS_MASK[i];
3980 if ((info->savedRegMask & regMask) == 0)
3981 continue;
3982
3983 SetLocation(pContext, i, --pSavedRegs);
3984 }
3985 }
3986
3987 /* The caller's ESP will be equal to EBP + retAddrSize + argSize. */
3988
3989 pContext->SP = (DWORD)(curEBP + sizeof(curEBP) + ESPIncrOnReturn(info));
3990
3991 /* The caller's saved EIP is right after our EBP */
3992
3993 pContext->PCTAddr = (TADDR)curEBP + RETURN_ADDR_OFFS * sizeof(TADDR);
3994 pContext->ControlPC = *PTR_PCODE(pContext->PCTAddr);
3995
3996 /* The caller's saved EBP is pointed to by our EBP */
3997
3998 pContext->SetEbpLocation(PTR_DWORD((TADDR)curEBP));
3999
4000 return true;
4001}
4002
4003bool UnwindStackFrame(PREGDISPLAY pContext,
4004 EECodeInfo *pCodeInfo,
4005 unsigned flags,
4006 CodeManState *pState,
4007 StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */)
4008{
4009 CONTRACTL {
4010 NOTHROW;
4011 GC_NOTRIGGER;
4012 HOST_NOCALLS;
4013 SUPPORTS_DAC;
4014 } CONTRACTL_END;
4015
4016 // Address where the method has been interrupted
4017 PCODE breakPC = pContext->ControlPC;
4018 _ASSERTE(PCODEToPINSTR(breakPC) == pCodeInfo->GetCodeAddress());
4019
4020 PTR_CBYTE methodStart = PTR_CBYTE(pCodeInfo->GetSavedMethodCode());
4021
4022 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
4023 PTR_VOID methodInfoPtr = gcInfoToken.Info;
4024 DWORD curOffs = pCodeInfo->GetRelOffset();
4025
4026 _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
4027 CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
4028
4029 if (pState->dwIsSet == 0)
4030 {
4031 /* Extract the necessary information from the info block header */
4032
4033 stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken,
4034 curOffs,
4035 &stateBuf->hdrInfoBody);
4036 }
4037
4038 PTR_CBYTE table = dac_cast<PTR_CBYTE>(methodInfoPtr) + stateBuf->hdrInfoSize;
4039
4040 hdrInfo * info = &stateBuf->hdrInfoBody;
4041
4042 info->isSpeculativeStackWalk = ((flags & SpeculativeStackwalk) != 0);
4043
4044 if (pUnwindInfo != NULL)
4045 {
4046 pUnwindInfo->securityObjectOffset = 0;
4047 if (info->securityCheck)
4048 {
4049 _ASSERTE(info->ebpFrame);
4050 SIZE_T securityObjectOffset = (GetSecurityObjectOffset(info) / sizeof(void*));
4051 _ASSERTE(securityObjectOffset != 0);
4052 pUnwindInfo->securityObjectOffset = DWORD(securityObjectOffset);
4053 }
4054
4055 pUnwindInfo->fUseEbpAsFrameReg = info->ebpFrame;
4056 pUnwindInfo->fUseEbp = ((info->savedRegMask & RM_EBP) != 0);
4057 }
4058
4059 if (info->epilogOffs != hdrInfo::NOT_IN_EPILOG)
4060 {
4061 /*---------------------------------------------------------------------
4062 * First, handle the epilog
4063 */
4064
4065 PTR_CBYTE epilogBase = (PTR_CBYTE) (breakPC - info->epilogOffs);
4066 UnwindEpilog(pContext, info, epilogBase, flags);
4067 }
4068 else if (!info->ebpFrame && !info->doubleAlign)
4069 {
4070 /*---------------------------------------------------------------------
4071 * Now handle ESP frames
4072 */
4073
4074 UnwindEspFrame(pContext, info, table, methodStart, curOffs, flags);
4075 return true;
4076 }
4077 else
4078 {
4079 /*---------------------------------------------------------------------
4080 * Now we know that have an EBP frame
4081 */
4082
4083 if (!UnwindEbpDoubleAlignFrame(pContext, pCodeInfo, info, table, methodStart, curOffs, flags, pUnwindInfo))
4084 return false;
4085 }
4086
4087 // TODO [DAVBR]: For the full fix for VsWhidbey 450273, all the below
4088 // may be uncommented once isLegalManagedCodeCaller works properly
4089 // with non-return address inputs, and with non-DEBUG builds
4090 /*
4091 // Ensure isLegalManagedCodeCaller succeeds for speculative stackwalks.
4092 // (We just assert this below for non-speculative stackwalks.)
4093 //
4094 FAIL_IF_SPECULATIVE_WALK(isLegalManagedCodeCaller(GetControlPC(pContext)));
4095 */
4096
4097 return true;
4098}
4099
4100#endif // _TARGET_X86_
4101
4102#ifdef WIN64EXCEPTIONS
4103#ifdef _TARGET_X86_
4104size_t EECodeManager::GetResumeSp( PCONTEXT pContext )
4105{
4106 PCODE currentPc = PCODE(pContext->Eip);
4107
4108 _ASSERTE(ExecutionManager::IsManagedCode(currentPc));
4109
4110 EECodeInfo codeInfo(currentPc);
4111
4112 PTR_CBYTE methodStart = PTR_CBYTE(codeInfo.GetSavedMethodCode());
4113
4114 GCInfoToken gcInfoToken = codeInfo.GetGCInfoToken();
4115 PTR_VOID methodInfoPtr = gcInfoToken.Info;
4116 DWORD curOffs = codeInfo.GetRelOffset();
4117
4118 CodeManStateBuf stateBuf;
4119
4120 stateBuf.hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken,
4121 curOffs,
4122 &stateBuf.hdrInfoBody);
4123
4124 PTR_CBYTE table = dac_cast<PTR_CBYTE>(methodInfoPtr) + stateBuf.hdrInfoSize;
4125
4126 hdrInfo *info = &stateBuf.hdrInfoBody;
4127
4128 _ASSERTE(info->epilogOffs == hdrInfo::NOT_IN_EPILOG && info->prologOffs == hdrInfo::NOT_IN_PROLOG);
4129
4130 bool isESPFrame = !info->ebpFrame && !info->doubleAlign;
4131
4132 if (codeInfo.IsFunclet())
4133 {
4134 // Treat funclet's frame as ESP frame
4135 isESPFrame = true;
4136 }
4137
4138 if (isESPFrame)
4139 {
4140 const size_t curESP = (size_t)(pContext->Esp);
4141 return curESP + GetPushedArgSize(info, table, curOffs);
4142 }
4143
4144 const size_t curEBP = (size_t)(pContext->Ebp);
4145 return GetOutermostBaseFP(curEBP, info);
4146}
4147#endif // _TARGET_X86_
4148#endif // WIN64EXCEPTIONS
4149
4150#ifndef CROSSGEN_COMPILE
4151#ifndef WIN64EXCEPTIONS
4152
4153/*****************************************************************************
4154 *
4155 * Unwind the current stack frame, i.e. update the virtual register
4156 * set in pContext. This will be similar to the state after the function
4157 * returns back to caller (IP points to after the call, Frame and Stack
4158 * pointer has been reset, callee-saved registers restored (if UpdateAllRegs),
4159 * callee-unsaved registers are trashed.
4160 * Returns success of operation.
4161 */
4162
4163bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
4164 EECodeInfo *pCodeInfo,
4165 unsigned flags,
4166 CodeManState *pState,
4167 StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */)
4168{
4169#ifdef _TARGET_X86_
4170 return ::UnwindStackFrame(pContext, pCodeInfo, flags, pState, pUnwindInfo);
4171#else // _TARGET_X86_
4172 PORTABILITY_ASSERT("EECodeManager::UnwindStackFrame");
4173 return false;
4174#endif // _TARGET_???_
4175}
4176
4177/*****************************************************************************/
4178#else // !WIN64EXCEPTIONS
4179/*****************************************************************************/
4180
4181bool EECodeManager::UnwindStackFrame(PREGDISPLAY pContext,
4182 EECodeInfo *pCodeInfo,
4183 unsigned flags,
4184 CodeManState *pState,
4185 StackwalkCacheUnwindInfo *pUnwindInfo /* out-only, perf improvement */)
4186{
4187 CONTRACTL {
4188 NOTHROW;
4189 GC_NOTRIGGER;
4190 } CONTRACTL_END;
4191
4192#if defined(_TARGET_AMD64_)
4193 // To avoid unnecessary computation, we only crack the unwind info if pUnwindInfo is not NULL, which only happens
4194 // if the LIGHTUNWIND flag is passed to StackWalkFramesEx().
4195 if (pUnwindInfo != NULL)
4196 {
4197 pCodeInfo->GetOffsetsFromUnwindInfo(&(pUnwindInfo->RSPOffsetFromUnwindInfo),
4198 &(pUnwindInfo->RBPOffset));
4199 }
4200#endif // _TARGET_AMD64_
4201
4202 _ASSERTE(pCodeInfo != NULL);
4203 Thread::VirtualUnwindCallFrame(pContext, pCodeInfo);
4204 return true;
4205}
4206
4207/*****************************************************************************/
4208#endif // WIN64EXCEPTIONS
4209#endif // !CROSSGEN_COMPILE
4210
4211/*****************************************************************************/
4212
4213/* report args in 'msig' to the GC.
4214 'argsStart' is start of the stack-based arguments
4215 'varArgSig' describes the arguments
4216 'ctx' has the GC reporting info
4217*/
4218void promoteVarArgs(PTR_BYTE argsStart, PTR_VASigCookie varArgSig, GCCONTEXT* ctx)
4219{
4220 WRAPPER_NO_CONTRACT;
4221
4222 //Note: no instantiations needed for varargs
4223 MetaSig msig(varArgSig->signature,
4224 varArgSig->pModule,
4225 NULL);
4226
4227 PTR_BYTE pFrameBase = argsStart - TransitionBlock::GetOffsetOfArgs();
4228
4229 ArgIterator argit(&msig);
4230
4231#ifdef _TARGET_X86_
4232 // For the X86 target the JIT does not report any of the fixed args for a varargs method
4233 // So we report the fixed args via the promoteArgs call below
4234 bool skipFixedArgs = false;
4235#else
4236 // For other platforms the JITs do report the fixed args of a varargs method
4237 // So we must tell promoteArgs to skip to the end of the fixed args
4238 bool skipFixedArgs = true;
4239#endif
4240
4241 bool inVarArgs = false;
4242
4243 int argOffset;
4244 while ((argOffset = argit.GetNextOffset()) != TransitionBlock::InvalidOffset)
4245 {
4246 if (msig.GetArgProps().AtSentinel())
4247 inVarArgs = true;
4248
4249 // if skipFixedArgs is false we report all arguments
4250 // otherwise we just report the varargs.
4251 if (!skipFixedArgs || inVarArgs)
4252 {
4253 ArgDestination argDest(pFrameBase, argOffset, argit.GetArgLocDescForStructInRegs());
4254 msig.GcScanRoots(&argDest, ctx->f, ctx->sc);
4255 }
4256 }
4257}
4258
4259INDEBUG(void* forceStack1;)
4260
4261#ifndef CROSSGEN_COMPILE
4262#ifndef USE_GC_INFO_DECODER
4263
4264/*****************************************************************************
4265 *
4266 * Enumerate all live object references in that function using
4267 * the virtual register set.
4268 * Returns success of operation.
4269 */
4270
4271bool EECodeManager::EnumGcRefs( PREGDISPLAY pContext,
4272 EECodeInfo *pCodeInfo,
4273 unsigned flags,
4274 GCEnumCallback pCallBack,
4275 LPVOID hCallBack,
4276 DWORD relOffsetOverride)
4277{
4278 CONTRACTL {
4279 NOTHROW;
4280 GC_NOTRIGGER;
4281 } CONTRACTL_END;
4282
4283#ifdef WIN64EXCEPTIONS
4284 if (flags & ParentOfFuncletStackFrame)
4285 {
4286 LOG((LF_GCROOTS, LL_INFO100000, "Not reporting this frame because it was already reported via another funclet.\n"));
4287 return true;
4288 }
4289#endif // WIN64EXCEPTIONS
4290
4291 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
4292 unsigned curOffs = pCodeInfo->GetRelOffset();
4293
4294 unsigned EBP = *pContext->GetEbpLocation();
4295 unsigned ESP = pContext->SP;
4296
4297 unsigned ptrOffs;
4298
4299 unsigned count;
4300
4301 hdrInfo info;
4302 PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
4303#if 0
4304 printf("EECodeManager::EnumGcRefs - EIP = %08x ESP = %08x offset = %x GC Info is at %08x\n", *pContext->pPC, ESP, curOffs, table);
4305#endif
4306
4307
4308 /* Extract the necessary information from the info block header */
4309
4310 table += DecodeGCHdrInfo(gcInfoToken,
4311 curOffs,
4312 &info);
4313
4314 _ASSERTE( curOffs <= info.methodSize);
4315
4316#ifdef _DEBUG
4317// if ((gcInfoToken.Info == (void*)0x37760d0) && (curOffs == 0x264))
4318// __asm int 3;
4319
4320 if (trEnumGCRefs) {
4321 static unsigned lastESP = 0;
4322 unsigned diffESP = ESP - lastESP;
4323 if (diffESP > 0xFFFF) {
4324 printf("------------------------------------------------------\n");
4325 }
4326 lastESP = ESP;
4327 printf("EnumGCRefs [%s][%s] at %s.%s + 0x%03X:\n",
4328 info.ebpFrame?"ebp":" ",
4329 info.interruptible?"int":" ",
4330 "UnknownClass","UnknownMethod", curOffs);
4331 fflush(stdout);
4332 }
4333#endif
4334
4335 /* Are we in the prolog or epilog of the method? */
4336
4337 if (info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
4338 info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
4339 {
4340
4341#if !DUMP_PTR_REFS
4342 // Under normal circumstances the system will not suspend a thread
4343 // if it is in the prolog or epilog of the function. However ThreadAbort
4344 // exception or stack overflows can cause EH to happen in a prolog.
4345 // Once in the handler, a GC can happen, so we can get to this code path.
4346 // However since we are tearing down this frame, we don't need to report
4347 // anything and we can simply return.
4348
4349 _ASSERTE(flags & ExecutionAborted);
4350#endif
4351 return true;
4352 }
4353
4354#ifdef _DEBUG
4355#define CHK_AND_REPORT_REG(reg, doIt, iptr, regName) \
4356 if (doIt) \
4357 { \
4358 if (dspPtr) \
4359 printf(" Live pointer register %s: ", #regName); \
4360 pCallBack(hCallBack, \
4361 (OBJECTREF*)(pContext->Get##regName##Location()), \
4362 (iptr ? GC_CALL_INTERIOR : 0) \
4363 | CHECK_APP_DOMAIN \
4364 DAC_ARG(DacSlotLocation(reg, 0, false))); \
4365 }
4366#else // !_DEBUG
4367#define CHK_AND_REPORT_REG(reg, doIt, iptr, regName) \
4368 if (doIt) \
4369 pCallBack(hCallBack, \
4370 (OBJECTREF*)(pContext->Get##regName##Location()), \
4371 (iptr ? GC_CALL_INTERIOR : 0) \
4372 | CHECK_APP_DOMAIN \
4373 DAC_ARG(DacSlotLocation(reg, 0, false)));
4374
4375#endif // _DEBUG
4376
4377 /* What kind of a frame is this ? */
4378
4379 FrameType frameType = FR_NORMAL;
4380 TADDR baseSP = 0;
4381
4382 if (info.handlers)
4383 {
4384 _ASSERTE(info.ebpFrame);
4385
4386 bool hasInnerFilter, hadInnerFilter;
4387 frameType = GetHandlerFrameInfo(&info, EBP,
4388 ESP, (DWORD) IGNORE_VAL,
4389 &baseSP, NULL,
4390 &hasInnerFilter, &hadInnerFilter);
4391 _ASSERTE(frameType != FR_INVALID);
4392
4393 /* If this is the parent frame of a filter which is currently
4394 executing, then the filter would have enumerated the frame using
4395 the filter PC.
4396 */
4397
4398 if (hasInnerFilter)
4399 return true;
4400
4401 /* If are in a try and we had a filter execute, we may have reported
4402 GC refs from the filter (and not using the try's offset). So
4403 we had better use the filter's end offset, as the try is
4404 effectively dead and its GC ref's would be stale */
4405
4406 if (hadInnerFilter)
4407 {
4408 PTR_TADDR pFirstBaseSPslot = GetFirstBaseSPslotPtr(EBP, &info);
4409 curOffs = (unsigned)pFirstBaseSPslot[1] - 1;
4410 _ASSERTE(curOffs < info.methodSize);
4411
4412 /* Extract the necessary information from the info block header */
4413
4414 table = PTR_CBYTE(gcInfoToken.Info);
4415
4416 table += DecodeGCHdrInfo(gcInfoToken,
4417 curOffs,
4418 &info);
4419 }
4420 }
4421
4422 bool willContinueExecution = !(flags & ExecutionAborted);
4423 unsigned pushedSize = 0;
4424
4425 /* if we have been interrupted we don't have to report registers/arguments
4426 * because we are about to lose this context anyway.
4427 * Alas, if we are in a ebp-less method we have to parse the table
4428 * in order to adjust ESP.
4429 *
4430 * Note that we report "this" for all methods, even if
4431 * noncontinuable, because because of the off chance they may be
4432 * synchronized and we have to release the monitor on unwind. This
4433 * could conceivably be optimized, but it turns out to be more
4434 * expensive to check whether we're synchronized (which involves
4435 * consulting metadata) than to just report "this" all the time in
4436 * our most important scenarios.
4437 */
4438
4439 if (info.interruptible)
4440 {
4441 // If we are not on the active stack frame, we need to report gc registers
4442 // that are live before the call. The reason is that the liveness of gc registers
4443 // may change across a call to a method that does not return. In this case the instruction
4444 // after the call may be a jump target and a register that didn't have a live gc pointer
4445 // before the call may have a live gc pointer after the jump. To make sure we report the
4446 // registers that have live gc pointers before the call we subtract 1 from curOffs.
4447 unsigned curOffsRegs = (flags & ActiveStackFrame) != 0 ? curOffs : curOffs - 1;
4448
4449 pushedSize = scanArgRegTableI(skipToArgReg(info, table), curOffsRegs, curOffs, &info);
4450
4451 RegMask regs = info.regMaskResult;
4452 RegMask iregs = info.iregMaskResult;
4453 ptrArgTP args = info.argMaskResult;
4454 ptrArgTP iargs = info.iargMaskResult;
4455
4456 _ASSERTE((isZero(args) || pushedSize != 0) || info.ebpFrame);
4457 _ASSERTE((args & iargs) == iargs);
4458 // Only synchronized methods and generic code that accesses
4459 // the type context via "this" need to report "this".
4460 // If its reported for other methods, its probably
4461 // done incorrectly. So flag such cases.
4462 _ASSERTE(info.thisPtrResult == REGI_NA ||
4463 pCodeInfo->GetMethodDesc()->IsSynchronized() ||
4464 pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis());
4465
4466 /* now report registers and arguments if we are not interrupted */
4467
4468 if (willContinueExecution)
4469 {
4470
4471 /* Propagate unsafed registers only in "current" method */
4472 /* If this is not the active method, then the callee wil
4473 * trash these registers, and so we wont need to report them */
4474
4475 if (flags & ActiveStackFrame)
4476 {
4477 CHK_AND_REPORT_REG(REGI_EAX, regs & RM_EAX, iregs & RM_EAX, Eax);
4478 CHK_AND_REPORT_REG(REGI_ECX, regs & RM_ECX, iregs & RM_ECX, Ecx);
4479 CHK_AND_REPORT_REG(REGI_EDX, regs & RM_EDX, iregs & RM_EDX, Edx);
4480 }
4481
4482 CHK_AND_REPORT_REG(REGI_EBX, regs & RM_EBX, iregs & RM_EBX, Ebx);
4483 CHK_AND_REPORT_REG(REGI_EBP, regs & RM_EBP, iregs & RM_EBP, Ebp);
4484 CHK_AND_REPORT_REG(REGI_ESI, regs & RM_ESI, iregs & RM_ESI, Esi);
4485 CHK_AND_REPORT_REG(REGI_EDI, regs & RM_EDI, iregs & RM_EDI, Edi);
4486 _ASSERTE(!(regs & RM_ESP));
4487
4488 /* Report any pending pointer arguments */
4489
4490 DWORD * pPendingArgFirst; // points **AT** first parameter
4491 if (!info.ebpFrame)
4492 {
4493 // -sizeof(void*) because we want to point *AT* first parameter
4494 pPendingArgFirst = (DWORD *)(size_t)(ESP + pushedSize - sizeof(void*));
4495 }
4496 else
4497 {
4498 _ASSERTE(willContinueExecution);
4499
4500 if (info.handlers)
4501 {
4502 // -sizeof(void*) because we want to point *AT* first parameter
4503 pPendingArgFirst = (DWORD *)(size_t)(baseSP - sizeof(void*));
4504 }
4505 else if (info.localloc)
4506 {
4507 baseSP = *(DWORD *)(size_t)(EBP - GetLocallocSPOffset(&info));
4508 // -sizeof(void*) because we want to point *AT* first parameter
4509 pPendingArgFirst = (DWORD *)(size_t) (baseSP - sizeof(void*));
4510 }
4511 else
4512 {
4513 // Note that 'info.stackSize includes the size for pushing EBP, but EBP is pushed
4514 // BEFORE EBP is set from ESP, thus (EBP - info.stackSize) actually points past
4515 // the frame by one DWORD, and thus points *AT* the first parameter
4516
4517 pPendingArgFirst = (DWORD *)(size_t)(EBP - info.stackSize);
4518 }
4519 }
4520
4521 if (!isZero(args))
4522 {
4523 unsigned i = 0;
4524 ptrArgTP b(1);
4525 for (; !isZero(args) && (i < MAX_PTRARG_OFS); i += 1, b <<= 1)
4526 {
4527 if (intersect(args,b))
4528 {
4529 unsigned argAddr = (unsigned)(size_t)(pPendingArgFirst - i);
4530 bool iptr = false;
4531
4532 setDiff(args, b);
4533 if (intersect(iargs,b))
4534 {
4535 setDiff(iargs, b);
4536 iptr = true;
4537 }
4538
4539#ifdef _DEBUG
4540 if (dspPtr)
4541 {
4542 printf(" Pushed ptr arg [E");
4543 if (info.ebpFrame)
4544 printf("BP-%02XH]: ", EBP - argAddr);
4545 else
4546 printf("SP+%02XH]: ", argAddr - ESP);
4547 }
4548#endif
4549 _ASSERTE(true == GC_CALL_INTERIOR);
4550 pCallBack(hCallBack, (OBJECTREF *)(size_t)argAddr, (int)iptr | CHECK_APP_DOMAIN
4551 DAC_ARG(DacSlotLocation(info.ebpFrame ? REGI_EBP : REGI_ESP,
4552 info.ebpFrame ? EBP - argAddr : argAddr - ESP,
4553 true)));
4554 }
4555 }
4556 }
4557 }
4558 else
4559 {
4560 // Is "this" enregistered. If so, report it as we might need to
4561 // release the monitor for synchronized methods.
4562 // Else, it is on the stack and will be reported below.
4563
4564 if (info.thisPtrResult != REGI_NA)
4565 {
4566 // Synchronized methods and methods satisfying
4567 // MethodDesc::AcquiresInstMethodTableFromThis (i.e. those
4568 // where "this" is reported in thisPtrResult) are
4569 // not supported on value types.
4570 _ASSERTE((regNumToMask(info.thisPtrResult) & info.iregMaskResult)== 0);
4571
4572 void * thisReg = getCalleeSavedReg(pContext, info.thisPtrResult);
4573 pCallBack(hCallBack, (OBJECTREF *)thisReg, CHECK_APP_DOMAIN
4574 DAC_ARG(DacSlotLocation(info.thisPtrResult, 0, false)));
4575 }
4576 }
4577 }
4578 else /* not interruptible */
4579 {
4580 pushedSize = scanArgRegTable(skipToArgReg(info, table), curOffs, &info);
4581
4582 RegMask regMask = info.regMaskResult;
4583 RegMask iregMask = info.iregMaskResult;
4584 ptrArgTP argMask = info.argMaskResult;
4585 ptrArgTP iargMask = info.iargMaskResult;
4586 unsigned argHnum = info.argHnumResult;
4587 PTR_CBYTE argTab = info.argTabResult;
4588
4589 // Only synchronized methods and generic code that accesses
4590 // the type context via "this" need to report "this".
4591 // If its reported for other methods, its probably
4592 // done incorrectly. So flag such cases.
4593 _ASSERTE(info.thisPtrResult == REGI_NA ||
4594 pCodeInfo->GetMethodDesc()->IsSynchronized() ||
4595 pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis());
4596
4597
4598 /* now report registers and arguments if we are not interrupted */
4599
4600 if (willContinueExecution)
4601 {
4602
4603 /* Report all live pointer registers */
4604
4605 CHK_AND_REPORT_REG(REGI_EDI, regMask & RM_EDI, iregMask & RM_EDI, Edi);
4606 CHK_AND_REPORT_REG(REGI_ESI, regMask & RM_ESI, iregMask & RM_ESI, Esi);
4607 CHK_AND_REPORT_REG(REGI_EBX, regMask & RM_EBX, iregMask & RM_EBX, Ebx);
4608 CHK_AND_REPORT_REG(REGI_EBP, regMask & RM_EBP, iregMask & RM_EBP, Ebp);
4609
4610 /* Esp cant be reported */
4611 _ASSERTE(!(regMask & RM_ESP));
4612 /* No callee-trashed registers */
4613 _ASSERTE(!(regMask & RM_CALLEE_TRASHED));
4614 /* EBP can't be reported unless we have an EBP-less frame */
4615 _ASSERTE(!(regMask & RM_EBP) || !(info.ebpFrame));
4616
4617 /* Report any pending pointer arguments */
4618
4619 if (argTab != 0)
4620 {
4621 unsigned lowBits, stkOffs, argAddr, val;
4622
4623 // argMask does not fit in 32-bits
4624 // thus arguments are reported via a table
4625 // Both of these are very rare cases
4626
4627 do
4628 {
4629 val = fastDecodeUnsigned(argTab);
4630
4631 lowBits = val & OFFSET_MASK;
4632 stkOffs = val & ~OFFSET_MASK;
4633 _ASSERTE((lowBits == 0) || (lowBits == byref_OFFSET_FLAG));
4634
4635 argAddr = ESP + stkOffs;
4636#ifdef _DEBUG
4637 if (dspPtr)
4638 printf(" Pushed %sptr arg at [ESP+%02XH]",
4639 lowBits ? "iptr " : "", stkOffs);
4640#endif
4641 _ASSERTE(byref_OFFSET_FLAG == GC_CALL_INTERIOR);
4642 pCallBack(hCallBack, (OBJECTREF *)(size_t)argAddr, lowBits | CHECK_APP_DOMAIN
4643 DAC_ARG(DacSlotLocation(REGI_ESP, stkOffs, true)));
4644 }
4645 while(--argHnum);
4646
4647 _ASSERTE(info.argTabResult + info.argTabBytes == argTab);
4648 }
4649 else
4650 {
4651 unsigned argAddr = ESP;
4652
4653 while (!isZero(argMask))
4654 {
4655 _ASSERTE(argHnum-- > 0);
4656
4657 if (toUnsigned(argMask) & 1)
4658 {
4659 bool iptr = false;
4660
4661 if (toUnsigned(iargMask) & 1)
4662 iptr = true;
4663#ifdef _DEBUG
4664 if (dspPtr)
4665 printf(" Pushed ptr arg at [ESP+%02XH]",
4666 argAddr - ESP);
4667#endif
4668 _ASSERTE(true == GC_CALL_INTERIOR);
4669 pCallBack(hCallBack, (OBJECTREF *)(size_t)argAddr, (int)iptr | CHECK_APP_DOMAIN
4670 DAC_ARG(DacSlotLocation(REGI_ESP, argAddr - ESP, true)));
4671 }
4672
4673 argMask >>= 1;
4674 iargMask >>= 1;
4675 argAddr += 4;
4676 }
4677
4678 }
4679
4680 }
4681 else
4682 {
4683 // Is "this" enregistered. If so, report it as we will need to
4684 // release the monitor. Else, it is on the stack and will be
4685 // reported below.
4686
4687 // For partially interruptible code, info.thisPtrResult will be
4688 // the last known location of "this". So the compiler needs to
4689 // generate information which is correct at every point in the code,
4690 // not just at call sites.
4691
4692 if (info.thisPtrResult != REGI_NA)
4693 {
4694 // Synchronized methods on value types are not supported
4695 _ASSERTE((regNumToMask(info.thisPtrResult) & info.iregMaskResult)== 0);
4696
4697 void * thisReg = getCalleeSavedReg(pContext, info.thisPtrResult);
4698 pCallBack(hCallBack, (OBJECTREF *)thisReg, CHECK_APP_DOMAIN
4699 DAC_ARG(DacSlotLocation(info.thisPtrResult, 0, false)));
4700 }
4701 }
4702
4703 } //info.interruptible
4704
4705 /* compute the argument base (reference point) */
4706
4707 unsigned argBase;
4708
4709 if (info.ebpFrame)
4710 argBase = EBP;
4711 else
4712 argBase = ESP + pushedSize;
4713
4714#if VERIFY_GC_TABLES
4715 _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
4716#endif
4717
4718 unsigned ptrAddr;
4719 unsigned lowBits;
4720
4721
4722 /* Process the untracked frame variable table */
4723
4724#if defined(WIN64EXCEPTIONS) // funclets
4725 // Filters are the only funclet that run during the 1st pass, and must have
4726 // both the leaf and the parent frame reported. In order to avoid double
4727 // reporting of the untracked variables, do not report them for the filter.
4728 if (!pCodeInfo->GetJitManager()->IsFilterFunclet(pCodeInfo))
4729#endif // WIN64EXCEPTIONS
4730 {
4731 count = info.untrackedCnt;
4732 int lastStkOffs = 0;
4733 while (count-- > 0)
4734 {
4735 int stkOffs = fastDecodeSigned(table);
4736 stkOffs = lastStkOffs - stkOffs;
4737 lastStkOffs = stkOffs;
4738
4739 _ASSERTE(0 == ~OFFSET_MASK % sizeof(void*));
4740
4741 lowBits = OFFSET_MASK & stkOffs;
4742 stkOffs &= ~OFFSET_MASK;
4743
4744 ptrAddr = argBase + stkOffs;
4745 if (info.doubleAlign && stkOffs >= int(info.stackSize - sizeof(void*))) {
4746 // We encode the arguments as if they were ESP based variables even though they aren't
4747 // If this frame would have ben an ESP based frame, This fake frame is one DWORD
4748 // smaller than the real frame because it did not push EBP but the real frame did.
4749 // Thus to get the correct EBP relative offset we have to adjust by info.stackSize-sizeof(void*)
4750 ptrAddr = EBP + (stkOffs-(info.stackSize - sizeof(void*)));
4751 }
4752
4753#ifdef _DEBUG
4754 if (dspPtr)
4755 {
4756 printf(" Untracked %s%s local at [E",
4757 (lowBits & pinned_OFFSET_FLAG) ? "pinned " : "",
4758 (lowBits & byref_OFFSET_FLAG) ? "byref" : "");
4759
4760 int dspOffs = ptrAddr;
4761 char frameType;
4762
4763 if (info.ebpFrame) {
4764 dspOffs -= EBP;
4765 frameType = 'B';
4766 }
4767 else {
4768 dspOffs -= ESP;
4769 frameType = 'S';
4770 }
4771
4772 if (dspOffs < 0)
4773 printf("%cP-%02XH]: ", frameType, -dspOffs);
4774 else
4775 printf("%cP+%02XH]: ", frameType, +dspOffs);
4776 }
4777#endif
4778
4779 _ASSERTE((pinned_OFFSET_FLAG == GC_CALL_PINNED) &&
4780 (byref_OFFSET_FLAG == GC_CALL_INTERIOR));
4781 pCallBack(hCallBack, (OBJECTREF*)(size_t)ptrAddr, lowBits | CHECK_APP_DOMAIN
4782 DAC_ARG(DacSlotLocation(info.ebpFrame ? REGI_EBP : REGI_ESP,
4783 info.ebpFrame ? EBP - ptrAddr : ptrAddr - ESP,
4784 true)));
4785 }
4786
4787 }
4788
4789#if VERIFY_GC_TABLES
4790 _ASSERTE(*castto(table, unsigned short *)++ == 0xCAFE);
4791#endif
4792
4793 /* Process the frame variable lifetime table */
4794 count = info.varPtrTableSize;
4795
4796 /* If we are not in the active method, we are currently pointing
4797 * to the return address; at the return address stack variables
4798 * can become dead if the call the last instruction of a try block
4799 * and the return address is the jump around the catch block. Therefore
4800 * we simply assume an offset inside of call instruction.
4801 */
4802
4803 unsigned newCurOffs;
4804
4805 if (willContinueExecution)
4806 {
4807 newCurOffs = (flags & ActiveStackFrame) ? curOffs // after "call"
4808 : curOffs-1; // inside "call"
4809 }
4810 else
4811 {
4812 /* However if ExecutionAborted, then this must be one of the
4813 * ExceptionFrames. Handle accordingly
4814 */
4815 _ASSERTE(!(flags & AbortingCall) || !(flags & ActiveStackFrame));
4816
4817 newCurOffs = (flags & AbortingCall) ? curOffs-1 // inside "call"
4818 : curOffs; // at faulting instr, or start of "try"
4819 }
4820
4821 ptrOffs = 0;
4822
4823 while (count-- > 0)
4824 {
4825 int stkOffs;
4826 unsigned begOffs;
4827 unsigned endOffs;
4828
4829 stkOffs = fastDecodeUnsigned(table);
4830 begOffs = ptrOffs + fastDecodeUnsigned(table);
4831 endOffs = begOffs + fastDecodeUnsigned(table);
4832
4833 _ASSERTE(0 == ~OFFSET_MASK % sizeof(void*));
4834
4835 lowBits = OFFSET_MASK & stkOffs;
4836 stkOffs &= ~OFFSET_MASK;
4837
4838 if (info.ebpFrame) {
4839 stkOffs = -stkOffs;
4840 _ASSERTE(stkOffs < 0);
4841 }
4842 else {
4843 _ASSERTE(stkOffs >= 0);
4844 }
4845
4846 ptrAddr = argBase + stkOffs;
4847
4848 /* Is this variable live right now? */
4849
4850 if (newCurOffs >= begOffs)
4851 {
4852 if (newCurOffs < endOffs)
4853 {
4854#ifdef _DEBUG
4855 if (dspPtr) {
4856 printf(" Frame %s%s local at [E",
4857 (lowBits & byref_OFFSET_FLAG) ? "byref " : "",
4858#ifndef WIN64EXCEPTIONS
4859 (lowBits & this_OFFSET_FLAG) ? "this-ptr" : "");
4860#else
4861 (lowBits & pinned_OFFSET_FLAG) ? "pinned" : "");
4862#endif
4863
4864
4865 int dspOffs = ptrAddr;
4866 char frameType;
4867
4868 if (info.ebpFrame) {
4869 dspOffs -= EBP;
4870 frameType = 'B';
4871 }
4872 else {
4873 dspOffs -= ESP;
4874 frameType = 'S';
4875 }
4876
4877 if (dspOffs < 0)
4878 printf("%cP-%02XH]: ", frameType, -dspOffs);
4879 else
4880 printf("%cP+%02XH]: ", frameType, +dspOffs);
4881 }
4882#endif
4883
4884 unsigned flags = CHECK_APP_DOMAIN;
4885#ifndef WIN64EXCEPTIONS
4886 // First Bit : byref
4887 // Second Bit : this
4888 // The second bit means `this` not `pinned`. So we ignore it.
4889 flags |= lowBits & byref_OFFSET_FLAG;
4890#else
4891 // First Bit : byref
4892 // Second Bit : pinned
4893 // Both bits are valid
4894 flags |= lowBits;
4895#endif
4896
4897 _ASSERTE(byref_OFFSET_FLAG == GC_CALL_INTERIOR);
4898 pCallBack(hCallBack, (OBJECTREF*)(size_t)ptrAddr, flags
4899 DAC_ARG(DacSlotLocation(info.ebpFrame ? REGI_EBP : REGI_ESP,
4900 info.ebpFrame ? EBP - ptrAddr : ptrAddr - ESP,
4901 true)));
4902 }
4903 }
4904 // exit loop early if start of live range is beyond PC, as ranges are sorted by lower bound
4905 else break;
4906
4907 ptrOffs = begOffs;
4908 }
4909
4910
4911#if VERIFY_GC_TABLES
4912 _ASSERTE(*castto(table, unsigned short *)++ == 0xBABE);
4913#endif
4914
4915#ifdef WIN64EXCEPTIONS // funclets
4916 //
4917 // If we're in a funclet, we do not want to report the incoming varargs. This is
4918 // taken care of by the parent method and the funclet should access those arguments
4919 // by way of the parent method's stack frame.
4920 //
4921 if(pCodeInfo->IsFunclet())
4922 {
4923 return true;
4924 }
4925#endif // WIN64EXCEPTIONS
4926
4927 /* Are we a varargs function, if so we have to report all args
4928 except 'this' (note that the GC tables created by the x86 jit
4929 do not contain ANY arguments except 'this' (even if they
4930 were statically declared */
4931
4932 if (info.varargs) {
4933 LOG((LF_GCINFO, LL_INFO100, "Reporting incoming vararg GC refs\n"));
4934
4935 PTR_BYTE argsStart;
4936
4937 if (info.ebpFrame || info.doubleAlign)
4938 argsStart = PTR_BYTE((size_t)EBP) + 2* sizeof(void*); // pushed EBP and retAddr
4939 else
4940 argsStart = PTR_BYTE((size_t)argBase) + info.stackSize + sizeof(void*); // ESP + locals + retAddr
4941
4942#if defined(_DEBUG) && !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
4943 // Note that I really want to say hCallBack is a GCCONTEXT, but this is pretty close
4944 extern void GcEnumObject(LPVOID pData, OBJECTREF *pObj, uint32_t flags);
4945 _ASSERTE((void*) GcEnumObject == pCallBack);
4946#endif
4947 GCCONTEXT *pCtx = (GCCONTEXT *) hCallBack;
4948
4949 // For varargs, look up the signature using the varArgSig token passed on the stack
4950 PTR_VASigCookie varArgSig = *PTR_PTR_VASigCookie(argsStart);
4951
4952 promoteVarArgs(argsStart, varArgSig, pCtx);
4953 }
4954
4955 return true;
4956}
4957
4958#else // !USE_GC_INFO_DECODER
4959
4960
4961/*****************************************************************************
4962 *
4963 * Enumerate all live object references in that function using
4964 * the virtual register set.
4965 * Returns success of operation.
4966 */
4967
4968bool EECodeManager::EnumGcRefs( PREGDISPLAY pRD,
4969 EECodeInfo *pCodeInfo,
4970 unsigned flags,
4971 GCEnumCallback pCallBack,
4972 LPVOID hCallBack,
4973 DWORD relOffsetOverride)
4974{
4975 CONTRACTL {
4976 NOTHROW;
4977 GC_NOTRIGGER;
4978 } CONTRACTL_END;
4979
4980 unsigned curOffs = pCodeInfo->GetRelOffset();
4981
4982#ifdef _TARGET_ARM_
4983 // On ARM, the low-order bit of an instruction pointer indicates Thumb vs. ARM mode.
4984 // Mask this off; all instructions are two-byte aligned.
4985 curOffs &= (~THUMB_CODE);
4986#endif // _TARGET_ARM_
4987
4988#ifdef _DEBUG
4989 // Get the name of the current method
4990 const char * methodName = pCodeInfo->GetMethodDesc()->GetName();
4991 LOG((LF_GCINFO, LL_INFO1000, "Reporting GC refs for %s at offset %04x.\n",
4992 methodName, curOffs));
4993#endif
4994
4995 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
4996
4997#if defined(STRESS_HEAP) && defined(PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED)
4998 // When we simulate a hijack during gcstress
4999 // we start with ActiveStackFrame and the offset
5000 // after the call
5001 // We need to make it look like a non-leaf frame
5002 // so that it's treated like a regular hijack
5003 if (flags & ActiveStackFrame)
5004 {
5005 GcInfoDecoder _gcInfoDecoder(
5006 gcInfoToken,
5007 DECODE_INTERRUPTIBILITY,
5008 curOffs
5009 );
5010 if(!_gcInfoDecoder.IsInterruptible())
5011 {
5012 // This must be the offset after a call
5013#ifdef _DEBUG
5014 GcInfoDecoder _safePointDecoder(gcInfoToken, (GcInfoDecoderFlags)0, 0);
5015 _ASSERTE(_safePointDecoder.IsSafePoint(curOffs));
5016#endif
5017 flags &= ~((unsigned)ActiveStackFrame);
5018 }
5019 }
5020#endif // STRESS_HEAP && PARTIALLY_INTERRUPTIBLE_GC_SUPPORTED
5021
5022#ifdef _DEBUG
5023 if (flags & ActiveStackFrame)
5024 {
5025 GcInfoDecoder _gcInfoDecoder(
5026 gcInfoToken,
5027 DECODE_INTERRUPTIBILITY,
5028 curOffs
5029 );
5030 _ASSERTE(_gcInfoDecoder.IsInterruptible());
5031 }
5032#endif
5033
5034 /* If we are not in the active method, we are currently pointing
5035 * to the return address; at the return address stack variables
5036 * can become dead if the call is the last instruction of a try block
5037 * and the return address is the jump around the catch block. Therefore
5038 * we simply assume an offset inside of call instruction.
5039 * NOTE: The GcInfoDecoder depends on this; if you change it, you must
5040 * revisit the GcInfoEncoder/Decoder
5041 */
5042
5043 if (!(flags & ExecutionAborted))
5044 {
5045 if (!(flags & ActiveStackFrame))
5046 {
5047 curOffs--;
5048 LOG((LF_GCINFO, LL_INFO1000, "Adjusted GC reporting offset due to flags !ExecutionAborted && !ActiveStackFrame. Now reporting GC refs for %s at offset %04x.\n",
5049 methodName, curOffs));
5050 }
5051 }
5052 else
5053 {
5054 /* However if ExecutionAborted, then this must be one of the
5055 * ExceptionFrames. Handle accordingly
5056 */
5057 _ASSERTE(!(flags & AbortingCall) || !(flags & ActiveStackFrame));
5058
5059 if (flags & AbortingCall)
5060 {
5061 curOffs--;
5062 LOG((LF_GCINFO, LL_INFO1000, "Adjusted GC reporting offset due to flags ExecutionAborted && AbortingCall. Now reporting GC refs for %s at offset %04x.\n",
5063 methodName, curOffs));
5064 }
5065 }
5066
5067 // Check if we have been given an override value for relOffset
5068 if (relOffsetOverride != NO_OVERRIDE_OFFSET)
5069 {
5070 // We've been given an override offset for GC Info
5071#ifdef _DEBUG
5072 GcInfoDecoder _gcInfoDecoder(
5073 gcInfoToken,
5074 DECODE_CODE_LENGTH
5075 );
5076
5077 // We only use override offset for wantsReportOnlyLeaf
5078 _ASSERTE(_gcInfoDecoder.WantsReportOnlyLeaf());
5079#endif // _DEBUG
5080
5081 curOffs = relOffsetOverride;
5082
5083#ifdef _TARGET_ARM_
5084 // On ARM, the low-order bit of an instruction pointer indicates Thumb vs. ARM mode.
5085 // Mask this off; all instructions are two-byte aligned.
5086 curOffs &= (~THUMB_CODE);
5087#endif // _TARGET_ARM_
5088
5089 LOG((LF_GCINFO, LL_INFO1000, "Adjusted GC reporting offset to provided override offset. Now reporting GC refs for %s at offset %04x.\n",
5090 methodName, curOffs));
5091 }
5092
5093
5094#if defined(WIN64EXCEPTIONS) // funclets
5095 if (pCodeInfo->GetJitManager()->IsFilterFunclet(pCodeInfo))
5096 {
5097 // Filters are the only funclet that run during the 1st pass, and must have
5098 // both the leaf and the parent frame reported. In order to avoid double
5099 // reporting of the untracked variables, do not report them for the filter.
5100 flags |= NoReportUntracked;
5101 }
5102#endif // WIN64EXCEPTIONS
5103
5104 bool reportScratchSlots;
5105
5106 // We report scratch slots only for leaf frames.
5107 // A frame is non-leaf if we are executing a call, or a fault occurred in the function.
5108 // The only case in which we need to report scratch slots for a non-leaf frame
5109 // is when execution has to be resumed at the point of interruption (via ResumableFrame)
5110 //<TODO>Implement ResumableFrame</TODO>
5111 _ASSERTE( sizeof( BOOL ) >= sizeof( ActiveStackFrame ) );
5112 reportScratchSlots = (flags & ActiveStackFrame) != 0;
5113
5114
5115 GcInfoDecoder gcInfoDecoder(
5116 gcInfoToken,
5117 GcInfoDecoderFlags (DECODE_GC_LIFETIMES | DECODE_SECURITY_OBJECT | DECODE_VARARG),
5118 curOffs
5119 );
5120
5121 if (!gcInfoDecoder.EnumerateLiveSlots(
5122 pRD,
5123 reportScratchSlots,
5124 flags,
5125 pCallBack,
5126 hCallBack
5127 ))
5128 {
5129 return false;
5130 }
5131
5132#ifdef WIN64EXCEPTIONS // funclets
5133 //
5134 // If we're in a funclet, we do not want to report the incoming varargs. This is
5135 // taken care of by the parent method and the funclet should access those arguments
5136 // by way of the parent method's stack frame.
5137 //
5138 if(pCodeInfo->IsFunclet())
5139 {
5140 return true;
5141 }
5142#endif // WIN64EXCEPTIONS
5143
5144 if (gcInfoDecoder.GetIsVarArg())
5145 {
5146 MethodDesc* pMD = pCodeInfo->GetMethodDesc();
5147 _ASSERTE(pMD != NULL);
5148
5149 // This does not apply to x86 because of how it handles varargs (it never
5150 // reports the arguments from the explicit method signature).
5151 //
5152#ifndef _TARGET_X86_
5153 //
5154 // SPECIAL CASE:
5155 // IL marshaling stubs have signatures that are marked as vararg,
5156 // but they are callsite sigs that actually contain complete sig
5157 // info. There are two reasons for this:
5158 // 1) the stub callsites expect the method to be vararg
5159 // 2) the marshaling stub must have full sig info so that
5160 // it can do a ldarg.N on the arguments it needs to marshal.
5161 // The result of this is that the code below will report the
5162 // variable arguments twice--once from the va sig cookie and once
5163 // from the explicit method signature (in the method's gc info).
5164 //
5165 // This fix to this is to early out of the va sig cookie reporting
5166 // in this special case.
5167 //
5168 if (pMD->IsILStub())
5169 {
5170 return true;
5171 }
5172#endif // !_TARGET_X86_
5173
5174 LOG((LF_GCINFO, LL_INFO100, "Reporting incoming vararg GC refs\n"));
5175
5176 // Find the offset of the VASigCookie. It's offsets are relative to
5177 // the base of a FramedMethodFrame.
5178 int VASigCookieOffset;
5179
5180 {
5181 MetaSig msigFindVASig(pMD);
5182 ArgIterator argit(&msigFindVASig);
5183 VASigCookieOffset = argit.GetVASigCookieOffset() - TransitionBlock::GetOffsetOfArgs();
5184 }
5185
5186 PTR_BYTE prevSP = dac_cast<PTR_BYTE>(GetCallerSp(pRD));
5187
5188 _ASSERTE(prevSP + VASigCookieOffset >= dac_cast<PTR_BYTE>(GetSP(pRD->pCurrentContext)));
5189
5190#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
5191 // Note that I really want to say hCallBack is a GCCONTEXT, but this is pretty close
5192 extern void GcEnumObject(LPVOID pData, OBJECTREF *pObj, uint32_t flags);
5193 _ASSERTE((void*) GcEnumObject == pCallBack);
5194#endif // _DEBUG && !DACCESS_COMPILE
5195 GCCONTEXT *pCtx = (GCCONTEXT *) hCallBack;
5196
5197 // For varargs, look up the signature using the varArgSig token passed on the stack
5198 PTR_VASigCookie varArgSig = *PTR_PTR_VASigCookie(prevSP + VASigCookieOffset);
5199
5200 promoteVarArgs(prevSP, varArgSig, pCtx);
5201 }
5202
5203 return true;
5204
5205}
5206
5207#endif // USE_GC_INFO_DECODER
5208#endif // !CROSSGEN_COMPILE
5209
5210#ifdef _TARGET_X86_
5211/*****************************************************************************
5212 *
5213 * Return the address of the local security object reference
5214 * using data that was previously cached before in UnwindStackFrame
5215 * using StackwalkCacheUnwindInfo
5216 */
5217
5218OBJECTREF* EECodeManager::GetAddrOfSecurityObjectFromCachedInfo(PREGDISPLAY pRD, StackwalkCacheUnwindInfo * stackwalkCacheUnwindInfo)
5219{
5220 LIMITED_METHOD_CONTRACT;
5221 size_t securityObjectOffset = stackwalkCacheUnwindInfo->securityObjectOffset;
5222
5223 _ASSERTE(securityObjectOffset != 0);
5224 // We pretend that filters are ESP-based methods in UnwindEbpDoubleAlignFrame().
5225 // Hence we cannot enforce this assert.
5226 // _ASSERTE(stackwalkCacheUnwindInfo->fUseEbpAsFrameReg);
5227 return (OBJECTREF *) (size_t) (*pRD->GetEbpLocation() - (securityObjectOffset * sizeof(void*)));
5228}
5229#endif // _TARGET_X86_
5230
5231#if !defined(DACCESS_COMPILE) && !defined(CROSSGEN_COMPILE)
5232OBJECTREF* EECodeManager::GetAddrOfSecurityObject(CrawlFrame *pCF)
5233{
5234 CONTRACTL {
5235 NOTHROW;
5236 GC_NOTRIGGER;
5237 } CONTRACTL_END;
5238
5239 REGDISPLAY* pRD = pCF->GetRegisterSet();
5240 IJitManager* pJitMan = pCF->GetJitManager();
5241 METHODTOKEN methodToken = pCF->GetMethodToken();
5242 unsigned relOffset = pCF->GetRelOffset();
5243 CodeManState* pState = pCF->GetCodeManState();
5244
5245 GCInfoToken gcInfoToken = pJitMan->GetGCInfoToken(methodToken);
5246
5247 _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
5248
5249#ifndef USE_GC_INFO_DECODER
5250 CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
5251
5252 /* Extract the necessary information from the info block header */
5253 stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken, // <TODO>truncation</TODO>
5254 relOffset,
5255 &stateBuf->hdrInfoBody);
5256
5257 pState->dwIsSet = 1;
5258 if (stateBuf->hdrInfoBody.securityCheck)
5259 {
5260 _ASSERTE(stateBuf->hdrInfoBody.ebpFrame);
5261 if(stateBuf->hdrInfoBody.prologOffs == hdrInfo::NOT_IN_PROLOG &&
5262 stateBuf->hdrInfoBody.epilogOffs == hdrInfo::NOT_IN_EPILOG)
5263 {
5264 return (OBJECTREF *)(size_t)(*pRD->GetEbpLocation() - GetSecurityObjectOffset(&stateBuf->hdrInfoBody));
5265 }
5266 }
5267#else // !USE_GC_INFO_DECODER
5268
5269 GcInfoDecoder gcInfoDecoder(
5270 gcInfoToken,
5271 DECODE_SECURITY_OBJECT
5272 );
5273
5274 INT32 spOffset = gcInfoDecoder.GetSecurityObjectStackSlot();
5275 if( spOffset != NO_SECURITY_OBJECT )
5276 {
5277 UINT_PTR uCallerSP = GetCallerSp(pRD);
5278
5279 if (pCF->IsFunclet())
5280 {
5281 if (!pCF->IsFilterFunclet())
5282 {
5283 // Cannot retrieve the security object for a non-filter funclet.
5284 return NULL;
5285 }
5286
5287 DWORD dwParentOffset = 0;
5288 UINT_PTR uParentCallerSP = 0;
5289
5290 // If this is a filter funclet, retrieve the information of the parent method
5291 // and use that to find the security object.
5292 ExceptionTracker::FindParentStackFrameEx(pCF, &dwParentOffset, &uParentCallerSP);
5293
5294 relOffset = dwParentOffset;
5295 uCallerSP = uParentCallerSP;
5296 }
5297
5298 // Security object is always live anyplace we can throw or take a GC
5299 OBJECTREF* pSlot = (OBJECTREF*) (spOffset + uCallerSP);
5300 return pSlot;
5301 }
5302#endif // USE_GC_INFO_DECODER
5303
5304 return NULL;
5305}
5306#endif // !DACCESS_COMPILE && !CROSSGEN_COMPILE
5307
5308#ifndef CROSSGEN_COMPILE
5309/*****************************************************************************
5310 *
5311 * Returns "this" pointer if it is a non-static method
5312 * AND the object is still alive.
5313 * Returns NULL in all other cases.
5314 * Unfortunately, the semantics of this call currently depend on the architecture.
5315 * On non-x86 architectures, where we use GcInfo{En,De}Coder, this returns NULL for
5316 * all cases except the case where the GenericsContext is determined via "this." On x86,
5317 * it will definitely return a non-NULL value in that case, and for synchronized methods;
5318 * it may also return a non-NULL value for other cases, depending on how the method is compiled.
5319 */
5320OBJECTREF EECodeManager::GetInstance( PREGDISPLAY pContext,
5321 EECodeInfo* pCodeInfo)
5322{
5323 CONTRACTL {
5324 NOTHROW;
5325 GC_NOTRIGGER;
5326 MODE_COOPERATIVE;
5327 SUPPORTS_DAC;
5328 } CONTRACTL_END;
5329
5330#ifndef USE_GC_INFO_DECODER
5331 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5332 unsigned relOffset = pCodeInfo->GetRelOffset();
5333
5334 PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
5335 hdrInfo info;
5336 unsigned stackDepth;
5337 TADDR taArgBase;
5338 unsigned count;
5339
5340 /* Extract the necessary information from the info block header */
5341
5342 table += DecodeGCHdrInfo(gcInfoToken,
5343 relOffset,
5344 &info);
5345
5346 // We do not have accurate information in the prolog or the epilog
5347 if (info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
5348 info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
5349 {
5350 return NULL;
5351 }
5352
5353 if (info.interruptible)
5354 {
5355 stackDepth = scanArgRegTableI(skipToArgReg(info, table), relOffset, relOffset, &info);
5356 }
5357 else
5358 {
5359 stackDepth = scanArgRegTable (skipToArgReg(info, table), (unsigned)relOffset, &info);
5360 }
5361
5362 if (info.ebpFrame)
5363 {
5364 _ASSERTE(stackDepth == 0);
5365 taArgBase = GetRegdisplayFP(pContext);
5366 }
5367 else
5368 {
5369 taArgBase = pContext->SP + stackDepth;
5370 }
5371
5372 // Only synchronized methods and generic code that accesses
5373 // the type context via "this" need to report "this".
5374 // If it's reported for other methods, it's probably
5375 // done incorrectly. So flag such cases.
5376 _ASSERTE(info.thisPtrResult == REGI_NA ||
5377 pCodeInfo->GetMethodDesc()->IsSynchronized() ||
5378 pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis());
5379
5380 if (info.thisPtrResult != REGI_NA)
5381 {
5382 // the register contains the Object pointer.
5383 TADDR uRegValue = *(reinterpret_cast<TADDR *>(getCalleeSavedReg(pContext, info.thisPtrResult)));
5384 return ObjectToOBJECTREF(PTR_Object(uRegValue));
5385 }
5386
5387#if VERIFY_GC_TABLES
5388 _ASSERTE(*castto(table, unsigned short *)++ == 0xBEEF);
5389#endif
5390
5391#ifndef WIN64EXCEPTIONS
5392 /* Parse the untracked frame variable table */
5393
5394 /* The 'this' pointer can never be located in the untracked table */
5395 /* as we only allow pinned and byrefs in the untracked table */
5396
5397 count = info.untrackedCnt;
5398 while (count-- > 0)
5399 {
5400 fastSkipSigned(table);
5401 }
5402
5403 /* Look for the 'this' pointer in the frame variable lifetime table */
5404
5405 count = info.varPtrTableSize;
5406 unsigned tmpOffs = 0;
5407 while (count-- > 0)
5408 {
5409 unsigned varOfs = fastDecodeUnsigned(table);
5410 unsigned begOfs = tmpOffs + fastDecodeUnsigned(table);
5411 unsigned endOfs = begOfs + fastDecodeUnsigned(table);
5412 _ASSERTE(!info.ebpFrame || (varOfs!=0));
5413 /* Is this variable live right now? */
5414 if (((unsigned)relOffset >= begOfs) && ((unsigned)relOffset < endOfs))
5415 {
5416 /* Does it contain the 'this' pointer */
5417 if (varOfs & this_OFFSET_FLAG)
5418 {
5419 unsigned ofs = varOfs & ~OFFSET_MASK;
5420
5421 /* Tracked locals for EBP frames are always at negative offsets */
5422
5423 if (info.ebpFrame)
5424 taArgBase -= ofs;
5425 else
5426 taArgBase += ofs;
5427
5428 return (OBJECTREF)(size_t)(*PTR_DWORD(taArgBase));
5429 }
5430 }
5431 tmpOffs = begOfs;
5432 }
5433
5434#if VERIFY_GC_TABLES
5435 _ASSERTE(*castto(table, unsigned short *) == 0xBABE);
5436#endif
5437
5438#else // WIN64EXCEPTIONS
5439 if (pCodeInfo->GetMethodDesc()->AcquiresInstMethodTableFromThis()) // Generic Context is "this"
5440 {
5441 // Untracked table must have at least one entry - this pointer
5442 _ASSERTE(info.untrackedCnt > 0);
5443
5444 // The first entry must be "this" pointer
5445 int stkOffs = fastDecodeSigned(table);
5446 taArgBase -= stkOffs & ~OFFSET_MASK;
5447 return (OBJECTREF)(size_t)(*PTR_DWORD(taArgBase));
5448 }
5449#endif // WIN64EXCEPTIONS
5450
5451 return NULL;
5452#else // !USE_GC_INFO_DECODER
5453 PTR_VOID token = EECodeManager::GetExactGenericsToken(pContext, pCodeInfo);
5454
5455 OBJECTREF oRef = ObjectToOBJECTREF(PTR_Object(dac_cast<TADDR>(token)));
5456 VALIDATEOBJECTREF(oRef);
5457 return oRef;
5458#endif // USE_GC_INFO_DECODER
5459}
5460#endif // !CROSSGEN_COMPILE
5461
5462GenericParamContextType EECodeManager::GetParamContextType(PREGDISPLAY pContext,
5463 EECodeInfo * pCodeInfo)
5464{
5465 LIMITED_METHOD_DAC_CONTRACT;
5466
5467#ifndef USE_GC_INFO_DECODER
5468 /* Extract the necessary information from the info block header */
5469 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5470 PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
5471 unsigned relOffset = pCodeInfo->GetRelOffset();
5472
5473 hdrInfo info;
5474 PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
5475 table += DecodeGCHdrInfo(gcInfoToken,
5476 relOffset,
5477 &info);
5478
5479 if (!info.genericsContext ||
5480 info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
5481 info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
5482 {
5483 return GENERIC_PARAM_CONTEXT_NONE;
5484 }
5485
5486 if (info.genericsContextIsMethodDesc)
5487 {
5488 return GENERIC_PARAM_CONTEXT_METHODDESC;
5489 }
5490
5491 return GENERIC_PARAM_CONTEXT_METHODTABLE;
5492
5493 // On x86 the generic param context parameter is never this.
5494#else // !USE_GC_INFO_DECODER
5495 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5496
5497 GcInfoDecoder gcInfoDecoder(
5498 gcInfoToken,
5499 GcInfoDecoderFlags (DECODE_GENERICS_INST_CONTEXT)
5500 );
5501
5502 INT32 spOffsetGenericsContext = gcInfoDecoder.GetGenericsInstContextStackSlot();
5503 if (spOffsetGenericsContext != NO_GENERICS_INST_CONTEXT)
5504 {
5505 if (gcInfoDecoder.HasMethodDescGenericsInstContext())
5506 {
5507 return GENERIC_PARAM_CONTEXT_METHODDESC;
5508 }
5509 else if (gcInfoDecoder.HasMethodTableGenericsInstContext())
5510 {
5511 return GENERIC_PARAM_CONTEXT_METHODTABLE;
5512 }
5513 return GENERIC_PARAM_CONTEXT_THIS;
5514 }
5515 return GENERIC_PARAM_CONTEXT_NONE;
5516#endif // USE_GC_INFO_DECODER
5517}
5518
5519#ifndef CROSSGEN_COMPILE
5520/*****************************************************************************
5521 *
5522 * Returns the extra argument passed to to shared generic code if it is still alive.
5523 * Returns NULL in all other cases.
5524 */
5525PTR_VOID EECodeManager::GetParamTypeArg(PREGDISPLAY pContext,
5526 EECodeInfo * pCodeInfo)
5527
5528{
5529 LIMITED_METHOD_DAC_CONTRACT;
5530
5531#ifndef USE_GC_INFO_DECODER
5532 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5533 PTR_VOID methodInfoPtr = pCodeInfo->GetGCInfo();
5534 unsigned relOffset = pCodeInfo->GetRelOffset();
5535
5536 /* Extract the necessary information from the info block header */
5537 hdrInfo info;
5538 PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
5539 table += DecodeGCHdrInfo(gcInfoToken,
5540 relOffset,
5541 &info);
5542
5543 if (!info.genericsContext ||
5544 info.prologOffs != hdrInfo::NOT_IN_PROLOG ||
5545 info.epilogOffs != hdrInfo::NOT_IN_EPILOG)
5546 {
5547 return NULL;
5548 }
5549
5550 TADDR fp = GetRegdisplayFP(pContext);
5551 TADDR taParamTypeArg = *PTR_TADDR(fp - GetParamTypeArgOffset(&info));
5552 return PTR_VOID(taParamTypeArg);
5553
5554#else // !USE_GC_INFO_DECODER
5555 return EECodeManager::GetExactGenericsToken(pContext, pCodeInfo);
5556
5557#endif // USE_GC_INFO_DECODER
5558}
5559#endif // !CROSSGEN_COMPILE
5560
5561#if defined(WIN64EXCEPTIONS) && defined(USE_GC_INFO_DECODER) && !defined(CROSSGEN_COMPILE)
5562/*
5563 Returns the generics token. This is used by GetInstance() and GetParamTypeArg() on WIN64.
5564*/
5565//static
5566PTR_VOID EECodeManager::GetExactGenericsToken(PREGDISPLAY pContext,
5567 EECodeInfo * pCodeInfo)
5568{
5569 LIMITED_METHOD_DAC_CONTRACT;
5570
5571 return EECodeManager::GetExactGenericsToken(GetCallerSp(pContext), pCodeInfo);
5572}
5573
5574//static
5575PTR_VOID EECodeManager::GetExactGenericsToken(SIZE_T baseStackSlot,
5576 EECodeInfo * pCodeInfo)
5577{
5578 LIMITED_METHOD_DAC_CONTRACT;
5579
5580 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5581
5582 GcInfoDecoder gcInfoDecoder(
5583 gcInfoToken,
5584 GcInfoDecoderFlags (DECODE_PSP_SYM | DECODE_GENERICS_INST_CONTEXT)
5585 );
5586
5587 INT32 spOffsetGenericsContext = gcInfoDecoder.GetGenericsInstContextStackSlot();
5588 if (spOffsetGenericsContext != NO_GENERICS_INST_CONTEXT)
5589 {
5590
5591 TADDR taSlot;
5592 if (pCodeInfo->IsFunclet())
5593 {
5594 INT32 spOffsetPSPSym = gcInfoDecoder.GetPSPSymStackSlot();
5595 _ASSERTE(spOffsetPSPSym != NO_PSP_SYM);
5596
5597#ifdef _TARGET_AMD64_
5598 // On AMD64 the spOffsetPSPSym is relative to the "Initial SP": the stack
5599 // pointer at the end of the prolog before and dynamic allocations, so it
5600 // can be the same for funclets and the main function.
5601 // However, we have a caller SP, so we need to convert
5602 baseStackSlot -= pCodeInfo->GetFixedStackSize();
5603
5604#endif // _TARGET_AMD64_
5605
5606 // For funclets we have to do an extra dereference to get the PSPSym first.
5607 TADDR newBaseStackSlot = *PTR_TADDR(baseStackSlot + spOffsetPSPSym);
5608
5609#ifdef _TARGET_AMD64_
5610 // On AMD64 the PSPSym stores the "Initial SP": the stack pointer at the end of
5611 // prolog, before any dynamic allocations.
5612 // However, the GenericsContext offset is relative to the caller SP for all
5613 // platforms. So here we adjust to convert AMD64's initial sp to a caller SP.
5614 // But we have to be careful to use the main function's EECodeInfo, not the
5615 // funclet's EECodeInfo because they have different stack sizes!
5616 newBaseStackSlot += pCodeInfo->GetMainFunctionInfo().GetFixedStackSize();
5617#endif // _TARGET_AMD64_
5618
5619 taSlot = (TADDR)( spOffsetGenericsContext + newBaseStackSlot );
5620 }
5621 else
5622 {
5623 taSlot = (TADDR)( spOffsetGenericsContext + baseStackSlot );
5624 }
5625 TADDR taExactGenericsToken = *PTR_TADDR(taSlot);
5626 return PTR_VOID(taExactGenericsToken);
5627 }
5628 return NULL;
5629}
5630
5631
5632#endif // WIN64EXCEPTIONS && USE_GC_INFO_DECODER && !CROSSGEN_COMPILE
5633
5634#ifndef CROSSGEN_COMPILE
5635/*****************************************************************************/
5636
5637void * EECodeManager::GetGSCookieAddr(PREGDISPLAY pContext,
5638 EECodeInfo * pCodeInfo,
5639 CodeManState * pState)
5640{
5641 CONTRACTL {
5642 NOTHROW;
5643 GC_NOTRIGGER;
5644 } CONTRACTL_END;
5645
5646 _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
5647
5648 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5649 unsigned relOffset = pCodeInfo->GetRelOffset();
5650
5651#ifdef WIN64EXCEPTIONS
5652 if (pCodeInfo->IsFunclet())
5653 {
5654 return NULL;
5655 }
5656#endif
5657
5658#ifndef USE_GC_INFO_DECODER
5659 CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
5660
5661 /* Extract the necessary information from the info block header */
5662 hdrInfo * info = &stateBuf->hdrInfoBody;
5663 stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken, // <TODO>truncation</TODO>
5664 relOffset,
5665 info);
5666
5667 pState->dwIsSet = 1;
5668
5669 if (info->prologOffs != hdrInfo::NOT_IN_PROLOG ||
5670 info->epilogOffs != hdrInfo::NOT_IN_EPILOG ||
5671 info->gsCookieOffset == INVALID_GS_COOKIE_OFFSET)
5672 {
5673 return NULL;
5674 }
5675
5676 if (info->ebpFrame)
5677 {
5678 DWORD curEBP = GetRegdisplayFP(pContext);
5679
5680 return PVOID(SIZE_T(curEBP - info->gsCookieOffset));
5681 }
5682 else
5683 {
5684 PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info) + stateBuf->hdrInfoSize;
5685 unsigned argSize = GetPushedArgSize(info, table, relOffset);
5686
5687 return PVOID(SIZE_T(pContext->SP + argSize + info->gsCookieOffset));
5688 }
5689
5690#else // !USE_GC_INFO_DECODER
5691 GcInfoDecoder gcInfoDecoder(
5692 gcInfoToken,
5693 DECODE_GS_COOKIE
5694 );
5695
5696 INT32 spOffsetGSCookie = gcInfoDecoder.GetGSCookieStackSlot();
5697 if (spOffsetGSCookie != NO_GS_COOKIE)
5698 {
5699 if(relOffset >= gcInfoDecoder.GetGSCookieValidRangeStart()
5700 && relOffset < gcInfoDecoder.GetGSCookieValidRangeEnd())
5701 {
5702 SIZE_T baseStackSlot = GetCallerSp(pContext);
5703 return (LPVOID)( spOffsetGSCookie + baseStackSlot );
5704 }
5705 }
5706 return NULL;
5707
5708#endif // USE_GC_INFO_DECODER
5709}
5710#endif // !CROSSGEN_COMPILE
5711
5712#ifndef USE_GC_INFO_DECODER
5713/*****************************************************************************
5714 *
5715 * Returns true if the given IP is in the given method's prolog or epilog.
5716 */
5717bool EECodeManager::IsInPrologOrEpilog(DWORD relPCoffset,
5718 GCInfoToken gcInfoToken,
5719 size_t* prologSize)
5720{
5721 CONTRACTL {
5722 NOTHROW;
5723 GC_NOTRIGGER;
5724 } CONTRACTL_END;
5725
5726 hdrInfo info;
5727
5728 DecodeGCHdrInfo(gcInfoToken, relPCoffset, &info);
5729
5730 if (prologSize)
5731 *prologSize = info.prologSize;
5732
5733 return ((info.prologOffs != hdrInfo::NOT_IN_PROLOG) ||
5734 (info.epilogOffs != hdrInfo::NOT_IN_EPILOG));
5735}
5736
5737/*****************************************************************************
5738 *
5739 * Returns true if the given IP is in the synchronized region of the method (valid for synchronized functions only)
5740*/
5741bool EECodeManager::IsInSynchronizedRegion(DWORD relOffset,
5742 GCInfoToken gcInfoToken,
5743 unsigned flags)
5744{
5745 CONTRACTL {
5746 NOTHROW;
5747 GC_NOTRIGGER;
5748 } CONTRACTL_END;
5749
5750 hdrInfo info;
5751
5752 DecodeGCHdrInfo(gcInfoToken, relOffset, &info);
5753
5754 // We should be called only for synchronized methods
5755 _ASSERTE(info.syncStartOffset != INVALID_SYNC_OFFSET && info.syncEndOffset != INVALID_SYNC_OFFSET);
5756
5757 _ASSERTE(info.syncStartOffset < info.syncEndOffset);
5758 _ASSERTE(info.epilogCnt <= 1);
5759 _ASSERTE(info.epilogCnt == 0 || info.syncEndOffset <= info.syncEpilogStart);
5760
5761 return (info.syncStartOffset < relOffset && relOffset < info.syncEndOffset) ||
5762 (info.syncStartOffset == relOffset && (flags & (ActiveStackFrame|ExecutionAborted))) ||
5763 // Synchronized methods have at most one epilog. The epilog does not have to be at the end of the method though.
5764 // Everything after the epilog is also in synchronized region.
5765 (info.epilogCnt != 0 && info.syncEpilogStart + info.epilogSize <= relOffset);
5766}
5767#endif // !USE_GC_INFO_DECODER
5768
5769/*****************************************************************************
5770 *
5771 * Returns the size of a given function.
5772 */
5773size_t EECodeManager::GetFunctionSize(GCInfoToken gcInfoToken)
5774{
5775 CONTRACTL {
5776 NOTHROW;
5777 GC_NOTRIGGER;
5778 SUPPORTS_DAC;
5779 } CONTRACTL_END;
5780
5781#ifndef USE_GC_INFO_DECODER
5782 hdrInfo info;
5783
5784 DecodeGCHdrInfo(gcInfoToken, 0, &info);
5785
5786 return info.methodSize;
5787#else // !USE_GC_INFO_DECODER
5788
5789 GcInfoDecoder gcInfoDecoder(
5790 gcInfoToken,
5791 DECODE_CODE_LENGTH
5792 );
5793
5794 UINT32 codeLength = gcInfoDecoder.GetCodeLength();
5795 _ASSERTE( codeLength > 0 );
5796 return codeLength;
5797
5798#endif // USE_GC_INFO_DECODER
5799}
5800
5801/*****************************************************************************
5802*
5803* Returns the size of a given function.
5804*/
5805ReturnKind EECodeManager::GetReturnKind(GCInfoToken gcInfoToken)
5806{
5807 CONTRACTL{
5808 NOTHROW;
5809 GC_NOTRIGGER;
5810 SUPPORTS_DAC;
5811 } CONTRACTL_END;
5812
5813 if (!gcInfoToken.IsReturnKindAvailable())
5814 {
5815 return RT_Illegal;
5816 }
5817
5818#ifndef USE_GC_INFO_DECODER
5819 hdrInfo info;
5820
5821 DecodeGCHdrInfo(gcInfoToken, 0, &info);
5822
5823 return info.returnKind;
5824#else // !USE_GC_INFO_DECODER
5825
5826 GcInfoDecoder gcInfoDecoder(gcInfoToken, DECODE_RETURN_KIND);
5827 return gcInfoDecoder.GetReturnKind();
5828
5829#endif // USE_GC_INFO_DECODER
5830}
5831
5832#ifndef USE_GC_INFO_DECODER
5833/*****************************************************************************
5834 *
5835 * Returns the size of the frame of the given function.
5836 */
5837unsigned int EECodeManager::GetFrameSize(GCInfoToken gcInfoToken)
5838{
5839 CONTRACTL {
5840 NOTHROW;
5841 GC_NOTRIGGER;
5842 } CONTRACTL_END;
5843
5844 hdrInfo info;
5845
5846 DecodeGCHdrInfo(gcInfoToken, 0, &info);
5847
5848 // currently only used by E&C callers need to know about doubleAlign
5849 // in all likelyhood
5850 _ASSERTE(!info.doubleAlign);
5851 return info.stackSize;
5852}
5853#endif // USE_GC_INFO_DECODER
5854
5855#ifndef DACCESS_COMPILE
5856
5857/*****************************************************************************/
5858
5859#ifndef WIN64EXCEPTIONS
5860const BYTE* EECodeManager::GetFinallyReturnAddr(PREGDISPLAY pReg)
5861{
5862 LIMITED_METHOD_CONTRACT;
5863
5864 return *(const BYTE**)(size_t)(GetRegdisplaySP(pReg));
5865}
5866
5867BOOL EECodeManager::IsInFilter(GCInfoToken gcInfoToken,
5868 unsigned offset,
5869 PCONTEXT pCtx,
5870 DWORD curNestLevel)
5871{
5872 CONTRACTL {
5873 NOTHROW;
5874 GC_NOTRIGGER;
5875 } CONTRACTL_END;
5876
5877 /* Extract the necessary information from the info block header */
5878
5879 hdrInfo info;
5880
5881 DecodeGCHdrInfo(gcInfoToken,
5882 offset,
5883 &info);
5884
5885 /* make sure that we have an ebp stack frame */
5886
5887 _ASSERTE(info.ebpFrame);
5888 _ASSERTE(info.handlers); // <TODO> This will alway be set. Remove it</TODO>
5889
5890 TADDR baseSP;
5891 DWORD nestingLevel;
5892
5893 FrameType frameType = GetHandlerFrameInfo(&info, pCtx->Ebp,
5894 pCtx->Esp, (DWORD) IGNORE_VAL,
5895 &baseSP, &nestingLevel);
5896 _ASSERTE(frameType != FR_INVALID);
5897
5898// _ASSERTE(nestingLevel == curNestLevel);
5899
5900 return frameType == FR_FILTER;
5901}
5902
5903
5904BOOL EECodeManager::LeaveFinally(GCInfoToken gcInfoToken,
5905 unsigned offset,
5906 PCONTEXT pCtx)
5907{
5908 CONTRACTL {
5909 NOTHROW;
5910 GC_NOTRIGGER;
5911 } CONTRACTL_END;
5912
5913
5914 hdrInfo info;
5915
5916 DecodeGCHdrInfo(gcInfoToken,
5917 offset,
5918 &info);
5919
5920 DWORD nestingLevel;
5921 GetHandlerFrameInfo(&info, pCtx->Ebp, pCtx->Esp, (DWORD) IGNORE_VAL, NULL, &nestingLevel);
5922
5923 // Compute an index into the stack-based table of esp values from
5924 // each level of catch block.
5925 PTR_TADDR pBaseSPslots = GetFirstBaseSPslotPtr(pCtx->Ebp, &info);
5926 PTR_TADDR pPrevSlot = pBaseSPslots - (nestingLevel - 1);
5927
5928 /* Currently, LeaveFinally() is not used if the finally is invoked in the
5929 second pass for unwinding. So we expect the finally to be called locally */
5930 _ASSERTE(*pPrevSlot == LCL_FINALLY_MARK);
5931
5932 *pPrevSlot = 0; // Zero out the previous shadow ESP
5933
5934 pCtx->Esp += sizeof(TADDR); // Pop the return value off the stack
5935 return TRUE;
5936}
5937
5938void EECodeManager::LeaveCatch(GCInfoToken gcInfoToken,
5939 unsigned offset,
5940 PCONTEXT pCtx)
5941{
5942 CONTRACTL {
5943 NOTHROW;
5944 GC_NOTRIGGER;
5945 } CONTRACTL_END;
5946
5947#ifdef _DEBUG
5948 TADDR baseSP;
5949 DWORD nestingLevel;
5950 bool hasInnerFilter;
5951 hdrInfo info;
5952
5953 DecodeGCHdrInfo(gcInfoToken, offset, &info);
5954 GetHandlerFrameInfo(&info, pCtx->Ebp, pCtx->Esp, (DWORD) IGNORE_VAL,
5955 &baseSP, &nestingLevel, &hasInnerFilter);
5956// _ASSERTE(frameType == FR_HANDLER);
5957// _ASSERTE(pCtx->Esp == baseSP);
5958#endif
5959
5960 return;
5961}
5962#endif // !WIN64EXCEPTIONS
5963#endif // #ifndef DACCESS_COMPILE
5964
5965#ifdef DACCESS_COMPILE
5966
5967void EECodeManager::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
5968{
5969 DAC_ENUM_VTHIS();
5970}
5971
5972#endif // #ifdef DACCESS_COMPILE
5973
5974
5975#ifdef _TARGET_X86_
5976/*
5977 * GetAmbientSP
5978 *
5979 * This function computes the zero-depth stack pointer for the given nesting
5980 * level within the method given. Nesting level is the the depth within
5981 * try-catch-finally blocks, and is zero based. It is up to the caller to
5982 * supply a valid nesting level value.
5983 *
5984 */
5985
5986TADDR EECodeManager::GetAmbientSP(PREGDISPLAY pContext,
5987 EECodeInfo *pCodeInfo,
5988 DWORD dwRelOffset,
5989 DWORD nestingLevel,
5990 CodeManState *pState)
5991{
5992 CONTRACTL {
5993 NOTHROW;
5994 GC_NOTRIGGER;
5995 SUPPORTS_DAC;
5996 } CONTRACTL_END;
5997
5998 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
5999
6000 _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(pState->stateBuf));
6001 CodeManStateBuf * stateBuf = (CodeManStateBuf*)pState->stateBuf;
6002 PTR_CBYTE table = PTR_CBYTE(gcInfoToken.Info);
6003
6004 /* Extract the necessary information from the info block header */
6005
6006 stateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken,
6007 dwRelOffset,
6008 &stateBuf->hdrInfoBody);
6009 table += stateBuf->hdrInfoSize;
6010
6011 pState->dwIsSet = 1;
6012
6013#if defined(_DEBUG) && !defined(DACCESS_COMPILE)
6014 if (trFixContext)
6015 {
6016 printf("GetAmbientSP [%s][%s] for %s.%s: ",
6017 stateBuf->hdrInfoBody.ebpFrame?"ebp":" ",
6018 stateBuf->hdrInfoBody.interruptible?"int":" ",
6019 "UnknownClass","UnknownMethod");
6020 fflush(stdout);
6021 }
6022#endif // _DEBUG && !DACCESS_COMPILE
6023
6024 if ((stateBuf->hdrInfoBody.prologOffs != hdrInfo::NOT_IN_PROLOG) ||
6025 (stateBuf->hdrInfoBody.epilogOffs != hdrInfo::NOT_IN_EPILOG))
6026 {
6027 return NULL;
6028 }
6029
6030 /* make sure that we have an ebp stack frame */
6031
6032 if (stateBuf->hdrInfoBody.handlers)
6033 {
6034 _ASSERTE(stateBuf->hdrInfoBody.ebpFrame);
6035
6036 TADDR baseSP;
6037 GetHandlerFrameInfo(&stateBuf->hdrInfoBody,
6038 GetRegdisplayFP(pContext),
6039 (DWORD) IGNORE_VAL,
6040 nestingLevel,
6041 &baseSP);
6042
6043 _ASSERTE((GetRegdisplayFP(pContext) >= baseSP) && (baseSP >= GetRegdisplaySP(pContext)));
6044
6045 return baseSP;
6046 }
6047
6048 _ASSERTE(nestingLevel == 0);
6049
6050 if (stateBuf->hdrInfoBody.ebpFrame)
6051 {
6052 return GetOutermostBaseFP(GetRegdisplayFP(pContext), &stateBuf->hdrInfoBody);
6053 }
6054
6055 TADDR baseSP = GetRegdisplaySP(pContext);
6056 if (stateBuf->hdrInfoBody.interruptible)
6057 {
6058 baseSP += scanArgRegTableI(skipToArgReg(stateBuf->hdrInfoBody, table),
6059 dwRelOffset,
6060 dwRelOffset,
6061 &stateBuf->hdrInfoBody);
6062 }
6063 else
6064 {
6065 baseSP += scanArgRegTable(skipToArgReg(stateBuf->hdrInfoBody, table),
6066 dwRelOffset,
6067 &stateBuf->hdrInfoBody);
6068 }
6069
6070 return baseSP;
6071}
6072#endif // _TARGET_X86_
6073
6074/*
6075 Get the number of bytes used for stack parameters.
6076 This is currently only used on x86.
6077 */
6078
6079// virtual
6080ULONG32 EECodeManager::GetStackParameterSize(EECodeInfo * pCodeInfo)
6081{
6082 CONTRACTL {
6083 NOTHROW;
6084 GC_NOTRIGGER;
6085 SUPPORTS_DAC;
6086 } CONTRACTL_END;
6087
6088#if defined(_TARGET_X86_)
6089#if defined(WIN64EXCEPTIONS)
6090 if (pCodeInfo->IsFunclet())
6091 {
6092 // Funclet has no stack argument
6093 return 0;
6094 }
6095#endif // WIN64EXCEPTIONS
6096
6097 GCInfoToken gcInfoToken = pCodeInfo->GetGCInfoToken();
6098 unsigned dwOffset = pCodeInfo->GetRelOffset();
6099
6100 CodeManState state;
6101 state.dwIsSet = 0;
6102
6103 _ASSERTE(sizeof(CodeManStateBuf) <= sizeof(state.stateBuf));
6104 CodeManStateBuf * pStateBuf = reinterpret_cast<CodeManStateBuf *>(state.stateBuf);
6105
6106 hdrInfo * pHdrInfo = &(pStateBuf->hdrInfoBody);
6107 pStateBuf->hdrInfoSize = (DWORD)DecodeGCHdrInfo(gcInfoToken, dwOffset, pHdrInfo);
6108
6109 // We need to subtract 4 here because ESPIncrOnReturn() includes the stack slot containing the return
6110 // address.
6111 return (ULONG32)::GetStackParameterSize(pHdrInfo);
6112
6113#else
6114 return 0;
6115
6116#endif // _TARGET_X86_
6117}
6118
6119