| 1 | // Licensed to the .NET Foundation under one or more agreements. |
| 2 | // The .NET Foundation licenses this file to you under the MIT license. |
| 3 | // See the LICENSE file in the project root for more information. |
| 4 | |
| 5 | /* EXCEP.CPP |
| 6 | * |
| 7 | */ |
| 8 | // |
| 9 | |
| 10 | // |
| 11 | |
| 12 | #include "common.h" |
| 13 | |
| 14 | #include "frames.h" |
| 15 | #include "threads.h" |
| 16 | #include "excep.h" |
| 17 | #include "object.h" |
| 18 | #include "field.h" |
| 19 | #include "dbginterface.h" |
| 20 | #include "cgensys.h" |
| 21 | #include "comutilnative.h" |
| 22 | #include "sigformat.h" |
| 23 | #include "siginfo.hpp" |
| 24 | #include "gcheaputilities.h" |
| 25 | #include "eedbginterfaceimpl.h" //so we can clearexception in COMPlusThrow |
| 26 | #include "perfcounters.h" |
| 27 | #include "asmconstants.h" |
| 28 | |
| 29 | #include "exceptionhandling.h" |
| 30 | |
| 31 | |
| 32 | |
| 33 | #if !defined(DACCESS_COMPILE) |
| 34 | |
| 35 | VOID ResetCurrentContext() |
| 36 | { |
| 37 | LIMITED_METHOD_CONTRACT; |
| 38 | } |
| 39 | |
| 40 | LONG CLRNoCatchHandler(EXCEPTION_POINTERS* pExceptionInfo, PVOID pv) |
| 41 | { |
| 42 | return EXCEPTION_CONTINUE_SEARCH; |
| 43 | } |
| 44 | |
| 45 | #endif // !DACCESS_COMPILE |
| 46 | |
| 47 | inline PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrameWorker(UINT_PTR establisherFrame) |
| 48 | { |
| 49 | LIMITED_METHOD_DAC_CONTRACT; |
| 50 | |
| 51 | SIZE_T rbp = establisherFrame + REDIRECTSTUB_ESTABLISHER_OFFSET_RBP; |
| 52 | PTR_PTR_CONTEXT ppContext = dac_cast<PTR_PTR_CONTEXT>((TADDR)rbp + REDIRECTSTUB_RBP_OFFSET_CONTEXT); |
| 53 | return *ppContext; |
| 54 | } |
| 55 | |
| 56 | PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(DISPATCHER_CONTEXT * pDispatcherContext) |
| 57 | { |
| 58 | LIMITED_METHOD_DAC_CONTRACT; |
| 59 | |
| 60 | return GetCONTEXTFromRedirectedStubStackFrameWorker(pDispatcherContext->EstablisherFrame); |
| 61 | } |
| 62 | |
| 63 | PTR_CONTEXT GetCONTEXTFromRedirectedStubStackFrame(CONTEXT * pContext) |
| 64 | { |
| 65 | LIMITED_METHOD_DAC_CONTRACT; |
| 66 | |
| 67 | return GetCONTEXTFromRedirectedStubStackFrameWorker(pContext->Rbp); |
| 68 | } |
| 69 | |
| 70 | #if !defined(DACCESS_COMPILE) |
| 71 | |
| 72 | FaultingExceptionFrame *GetFrameFromRedirectedStubStackFrame (DISPATCHER_CONTEXT *pDispatcherContext) |
| 73 | { |
| 74 | LIMITED_METHOD_CONTRACT; |
| 75 | |
| 76 | return (FaultingExceptionFrame*)(pDispatcherContext->EstablisherFrame + THROWSTUB_ESTABLISHER_OFFSET_FaultingExceptionFrame); |
| 77 | } |
| 78 | |
| 79 | #endif // !DACCESS_COMPILE |
| 80 | |
| 81 | #if !defined(DACCESS_COMPILE) |
| 82 | |
| 83 | #define AMD64_SIZE64_PREFIX 0x48 |
| 84 | #define AMD64_ADD_IMM8_OP 0x83 |
| 85 | #define AMD64_ADD_IMM32_OP 0x81 |
| 86 | #define AMD64_JMP_IMM8_OP 0xeb |
| 87 | #define AMD64_JMP_IMM32_OP 0xe9 |
| 88 | #define AMD64_JMP_IND_OP 0xff |
| 89 | #define AMD64_JMP_IND_RAX 0x20 |
| 90 | #define AMD64_LEA_OP 0x8d |
| 91 | #define AMD64_POP_OP 0x58 |
| 92 | #define AMD64_RET_OP 0xc3 |
| 93 | #define AMD64_RET_OP_2 0xc2 |
| 94 | #define AMD64_REP_PREFIX 0xf3 |
| 95 | #define AMD64_NOP 0x90 |
| 96 | #define AMD64_INT3 0xCC |
| 97 | |
| 98 | #define AMD64_IS_REX_PREFIX(x) (((x) & 0xf0) == 0x40) |
| 99 | |
| 100 | #define FAKE_PROLOG_SIZE 1 |
| 101 | #define FAKE_FUNCTION_CODE_SIZE 1 |
| 102 | |
| 103 | #ifdef DEBUGGING_SUPPORTED |
| 104 | // |
| 105 | // If there is an Int3 opcode at the Address then this tries to get the |
| 106 | // correct Opcode for the address from the managed patch table. If this is |
| 107 | // called on an address which doesn't currently have an Int3 then the current |
| 108 | // opcode is returned. If there is no managed patch in the patch table |
| 109 | // corresponding to this address then the current opcode (0xCC) at Address is |
| 110 | // is returned. If a 0xCC is returned from this function it indicates an |
| 111 | // unmanaged patch at the address. |
| 112 | // |
| 113 | // If there is a managed patch at the address HasManagedBreakpoint is set to true. |
| 114 | // |
| 115 | // If there is a 0xCC at the address before the call to GetPatchedOpcode and |
| 116 | // still a 0xCC when we return then this is considered an unmanaged patch and |
| 117 | // HasManagedBreakpoint is set to true. |
| 118 | // |
| 119 | UCHAR GetOpcodeFromManagedBPForAddress(ULONG64 Address, BOOL* HasManagedBreakpoint, BOOL* HasUnmanagedBreakpoint) |
| 120 | { |
| 121 | // If we don't see a breakpoint then quickly return. |
| 122 | if (((UCHAR)*(BYTE*)Address) != AMD64_INT3) |
| 123 | { |
| 124 | return ((UCHAR)*(BYTE*)Address); |
| 125 | } |
| 126 | |
| 127 | UCHAR PatchedOpcode; |
| 128 | PatchedOpcode = (UCHAR)g_pDebugInterface->GetPatchedOpcode((CORDB_ADDRESS_TYPE*)(BYTE*)Address); |
| 129 | |
| 130 | // If a non Int3 opcode is returned from GetPatchedOpcode then |
| 131 | // this function has a managed breakpoint |
| 132 | if (PatchedOpcode != AMD64_INT3) |
| 133 | { |
| 134 | (*HasManagedBreakpoint) = TRUE; |
| 135 | } |
| 136 | else |
| 137 | { |
| 138 | (*HasUnmanagedBreakpoint) = TRUE; |
| 139 | } |
| 140 | |
| 141 | return PatchedOpcode; |
| 142 | } |
| 143 | #endif // DEBUGGING_SUPPORTED |
| 144 | |
| 145 | PEXCEPTION_ROUTINE |
| 146 | RtlVirtualUnwind ( |
| 147 | IN ULONG HandlerType, |
| 148 | IN ULONG64 ImageBase, |
| 149 | IN ULONG64 ControlPc, |
| 150 | IN PT_RUNTIME_FUNCTION FunctionEntry, |
| 151 | IN OUT PCONTEXT ContextRecord, |
| 152 | OUT PVOID *HandlerData, |
| 153 | OUT PULONG64 EstablisherFrame, |
| 154 | IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL |
| 155 | ) |
| 156 | { |
| 157 | CONTRACTL |
| 158 | { |
| 159 | NOTHROW; |
| 160 | GC_NOTRIGGER; |
| 161 | SO_TOLERANT; |
| 162 | } |
| 163 | CONTRACTL_END; |
| 164 | |
| 165 | // The indirection should be taken care of by the caller |
| 166 | _ASSERTE((FunctionEntry->UnwindData & RUNTIME_FUNCTION_INDIRECT) == 0); |
| 167 | |
| 168 | #ifdef DEBUGGING_SUPPORTED |
| 169 | if (CORDebuggerAttached()) |
| 170 | { |
| 171 | return RtlVirtualUnwind_Worker(HandlerType, ImageBase, ControlPc, FunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers); |
| 172 | } |
| 173 | else |
| 174 | #endif // DEBUGGING_SUPPORTED |
| 175 | { |
| 176 | return RtlVirtualUnwind_Unsafe(HandlerType, ImageBase, ControlPc, FunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers); |
| 177 | } |
| 178 | } |
| 179 | |
| 180 | #ifdef DEBUGGING_SUPPORTED |
| 181 | PEXCEPTION_ROUTINE |
| 182 | RtlVirtualUnwind_Worker ( |
| 183 | IN ULONG HandlerType, |
| 184 | IN ULONG64 ImageBase, |
| 185 | IN ULONG64 ControlPc, |
| 186 | IN PT_RUNTIME_FUNCTION FunctionEntry, |
| 187 | IN OUT PCONTEXT ContextRecord, |
| 188 | OUT PVOID *HandlerData, |
| 189 | OUT PULONG64 EstablisherFrame, |
| 190 | IN OUT PKNONVOLATILE_CONTEXT_POINTERS ContextPointers OPTIONAL |
| 191 | ) |
| 192 | { |
| 193 | CONTRACTL |
| 194 | { |
| 195 | NOTHROW; |
| 196 | GC_NOTRIGGER; |
| 197 | SO_TOLERANT; |
| 198 | } |
| 199 | CONTRACTL_END; |
| 200 | |
| 201 | // b/c we're only called by the safe RtlVirtualUnwind we are guaranteed |
| 202 | // that the debugger is attched when we get here. |
| 203 | _ASSERTE(CORDebuggerAttached()); |
| 204 | |
| 205 | LOG((LF_CORDB, LL_EVERYTHING, "RVU_CBSW: in RtlVitualUnwind_ClrDbgSafeWorker, ControlPc=0x%p\n" , ControlPc)); |
| 206 | |
| 207 | BOOL InEpilogue = FALSE; |
| 208 | BOOL HasManagedBreakpoint = FALSE; |
| 209 | BOOL HasUnmanagedBreakpoint = FALSE; |
| 210 | UCHAR TempOpcode = NULL; |
| 211 | PUCHAR NextByte; |
| 212 | ULONG CurrentOffset; |
| 213 | ULONG FrameRegister; |
| 214 | ULONG64 BranchTarget; |
| 215 | PUNWIND_INFO UnwindInfo; |
| 216 | |
| 217 | // 64bit Whidbey does NOT support interop debugging, so if this |
| 218 | // is not managed code, normal unwind |
| 219 | if (!ExecutionManager::IsManagedCode((PCODE) ControlPc)) |
| 220 | { |
| 221 | goto NORMAL_UNWIND; |
| 222 | } |
| 223 | |
| 224 | UnwindInfo = (PUNWIND_INFO)(FunctionEntry->UnwindData + ImageBase); |
| 225 | CurrentOffset = (ULONG)(ControlPc - (FunctionEntry->BeginAddress + ImageBase)); |
| 226 | |
| 227 | // control stopped in prologue, normal unwind |
| 228 | if (CurrentOffset < UnwindInfo->SizeOfProlog) |
| 229 | { |
| 230 | goto NORMAL_UNWIND; |
| 231 | } |
| 232 | |
| 233 | // ASSUMPTION: only the first byte of an opcode will be patched by the CLR debugging code |
| 234 | |
| 235 | // determine if we're in an epilog and if there is at least one managed breakpoint |
| 236 | NextByte = (PUCHAR)ControlPc; |
| 237 | |
| 238 | TempOpcode = GetOpcodeFromManagedBPForAddress((ULONG64)NextByte, &HasManagedBreakpoint, &HasUnmanagedBreakpoint); |
| 239 | |
| 240 | // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint |
| 241 | _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3); |
| 242 | |
| 243 | // Check for an indication of the start of a epilogue: |
| 244 | // add rsp, imm8 |
| 245 | // add rsp, imm32 |
| 246 | // lea rsp, -disp8[fp] |
| 247 | // lea rsp, -disp32[fp] |
| 248 | if ((TempOpcode == AMD64_SIZE64_PREFIX) |
| 249 | && (NextByte[1] == AMD64_ADD_IMM8_OP) |
| 250 | && (NextByte[2] == 0xc4)) |
| 251 | { |
| 252 | // add rsp, imm8. |
| 253 | NextByte += 4; |
| 254 | } |
| 255 | else if ((TempOpcode == AMD64_SIZE64_PREFIX) |
| 256 | && (NextByte[1] == AMD64_ADD_IMM32_OP) |
| 257 | && (NextByte[2] == 0xc4)) |
| 258 | { |
| 259 | // add rsp, imm32. |
| 260 | NextByte += 7; |
| 261 | } |
| 262 | else if (((TempOpcode & 0xf8) == AMD64_SIZE64_PREFIX) |
| 263 | && (NextByte[1] == AMD64_LEA_OP)) |
| 264 | { |
| 265 | FrameRegister = ((TempOpcode & 0x7) << 3) | (NextByte[2] & 0x7); |
| 266 | |
| 267 | if ((FrameRegister != 0) |
| 268 | && (FrameRegister == UnwindInfo->FrameRegister)) |
| 269 | { |
| 270 | if ((NextByte[2] & 0xf8) == 0x60) |
| 271 | { |
| 272 | // lea rsp, disp8[fp]. |
| 273 | NextByte += 4; |
| 274 | } |
| 275 | else if ((NextByte[2] &0xf8) == 0xa0) |
| 276 | { |
| 277 | // lea rsp, disp32[fp]. |
| 278 | NextByte += 7; |
| 279 | } |
| 280 | } |
| 281 | } |
| 282 | |
| 283 | // if we haven't eaten any of the code stream detecting a stack adjustment |
| 284 | // then TempOpcode is still valid |
| 285 | if (((ULONG64)NextByte) != ControlPc) |
| 286 | { |
| 287 | TempOpcode = GetOpcodeFromManagedBPForAddress((ULONG64)NextByte, &HasManagedBreakpoint, &HasUnmanagedBreakpoint); |
| 288 | } |
| 289 | |
| 290 | // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint |
| 291 | _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3); |
| 292 | |
| 293 | // Check for any number of: |
| 294 | // pop nonvolatile-integer-register[0..15]. |
| 295 | while (TRUE) |
| 296 | { |
| 297 | if ((TempOpcode & 0xf8) == AMD64_POP_OP) |
| 298 | { |
| 299 | NextByte += 1; |
| 300 | } |
| 301 | else if (AMD64_IS_REX_PREFIX(TempOpcode) |
| 302 | && ((NextByte[1] & 0xf8) == AMD64_POP_OP)) |
| 303 | { |
| 304 | NextByte += 2; |
| 305 | } |
| 306 | else |
| 307 | { |
| 308 | // when we break out here TempOpcode will hold the next Opcode so there |
| 309 | // is no need to call GetOpcodeFromManagedBPForAddress again |
| 310 | break; |
| 311 | } |
| 312 | TempOpcode = GetOpcodeFromManagedBPForAddress((ULONG64)NextByte, &HasManagedBreakpoint, &HasUnmanagedBreakpoint); |
| 313 | |
| 314 | // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint |
| 315 | _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3); |
| 316 | } |
| 317 | |
| 318 | // TempOpcode == NextByte[0] unless NextByte[0] is a breakpoint |
| 319 | _ASSERTE(TempOpcode == NextByte[0] || NextByte[0] == AMD64_INT3); |
| 320 | |
| 321 | // If the next instruction is a return, then control is currently in |
| 322 | // an epilogue and execution of the epilogue should be emulated. |
| 323 | // Otherwise, execution is not in an epilogue and the prologue should |
| 324 | // be unwound. |
| 325 | if (TempOpcode == AMD64_RET_OP || TempOpcode == AMD64_RET_OP_2) |
| 326 | { |
| 327 | // A return is an unambiguous indication of an epilogue |
| 328 | InEpilogue = TRUE; |
| 329 | NextByte += 1; |
| 330 | } |
| 331 | else if (TempOpcode == AMD64_REP_PREFIX && NextByte[1] == AMD64_RET_OP) |
| 332 | { |
| 333 | // A return is an unambiguous indication of an epilogue |
| 334 | InEpilogue = TRUE; |
| 335 | NextByte += 2; |
| 336 | } |
| 337 | else if (TempOpcode == AMD64_JMP_IMM8_OP || TempOpcode == AMD64_JMP_IMM32_OP) |
| 338 | { |
| 339 | // An unconditional branch to a target that is equal to the start of |
| 340 | // or outside of this routine is logically a call to another function. |
| 341 | BranchTarget = (ULONG64)NextByte - ImageBase; |
| 342 | |
| 343 | if (TempOpcode == AMD64_JMP_IMM8_OP) |
| 344 | { |
| 345 | BranchTarget += 2 + (CHAR)NextByte[1]; |
| 346 | NextByte += 2; |
| 347 | } |
| 348 | else |
| 349 | { |
| 350 | BranchTarget += 5 + *((LONG UNALIGNED *)&NextByte[1]); |
| 351 | NextByte += 5; |
| 352 | } |
| 353 | |
| 354 | // Now determine whether the branch target refers to code within this |
| 355 | // function. If not, then it is an epilogue indicator. |
| 356 | // |
| 357 | // A branch to the start of self implies a recursive call, so |
| 358 | // is treated as an epilogue. |
| 359 | if (BranchTarget <= FunctionEntry->BeginAddress || |
| 360 | BranchTarget >= FunctionEntry->EndAddress) |
| 361 | { |
| 362 | _ASSERTE((UnwindInfo->Flags & UNW_FLAG_CHAININFO) == 0); |
| 363 | InEpilogue = TRUE; |
| 364 | } |
| 365 | } |
| 366 | else if ((TempOpcode == AMD64_JMP_IND_OP) && (NextByte[1] == 0x25)) |
| 367 | { |
| 368 | // An unconditional jump indirect. |
| 369 | |
| 370 | // This is a jmp outside of the function, probably a tail call |
| 371 | // to an import function. |
| 372 | InEpilogue = TRUE; |
| 373 | NextByte += 2; |
| 374 | } |
| 375 | else if (((TempOpcode & 0xf8) == AMD64_SIZE64_PREFIX) |
| 376 | && (NextByte[1] == AMD64_JMP_IND_OP) |
| 377 | && (NextByte[2] & 0x38) == AMD64_JMP_IND_RAX) |
| 378 | { |
| 379 | // |
| 380 | // This is an indirect jump opcode: 0x48 0xff /4. The 64-bit |
| 381 | // flag (REX.W) is always redundant here, so its presence is |
| 382 | // overloaded to indicate a branch out of the function - a tail |
| 383 | // call. |
| 384 | // |
| 385 | // Such an opcode is an unambiguous epilogue indication. |
| 386 | // |
| 387 | InEpilogue = TRUE; |
| 388 | NextByte += 3; |
| 389 | } |
| 390 | |
| 391 | if (InEpilogue && HasUnmanagedBreakpoint) |
| 392 | { |
| 393 | STRESS_LOG1(LF_CORDB, LL_ERROR, "RtlVirtualUnwind is about to fail b/c the ControlPc (0x%p) is in the epilog of a function which has a 0xCC in its epilog." , ControlPc); |
| 394 | _ASSERTE(!"RtlVirtualUnwind is about to fail b/c you are unwinding through\n" |
| 395 | "the epilogue of a function and have a 0xCC in the codestream. This is\n" |
| 396 | "probably caused by having set that breakpoint yourself in the debugger,\n" |
| 397 | "you might try to remove the bp and ignore this assert." ); |
| 398 | } |
| 399 | |
| 400 | if (!(InEpilogue && HasManagedBreakpoint)) |
| 401 | { |
| 402 | goto NORMAL_UNWIND; |
| 403 | } |
| 404 | else |
| 405 | { |
| 406 | // InEpilogue && HasManagedBreakpoint, this means we have to make the fake code buffer |
| 407 | |
| 408 | // We explicitly handle the case where the new below can't allocate, but we're still |
| 409 | // getting an assert from inside new b/c we can be called within a FAULT_FORBID scope. |
| 410 | // |
| 411 | // If new does fail we will still end up crashing, but the debugger doesn't have to |
| 412 | // be OOM hardened in Whidbey and this is a debugger only code path so we're ok in |
| 413 | // that department. |
| 414 | FAULT_NOT_FATAL(); |
| 415 | |
| 416 | LOG((LF_CORDB, LL_EVERYTHING, "RVU_CBSW: Function has >1 managed bp in the epilogue, and we are in the epilogue, need a code buffer for RtlVirtualUnwind\n" )); |
| 417 | |
| 418 | // IMPLEMENTATION NOTE: |
| 419 | // It is of note that we are significantly pruning the funtion here in making the fake |
| 420 | // code buffer, all that we are making room for is 1 byte for the prologue, 1 byte for |
| 421 | // function code and what is left of the epilogue to be executed. This is _very_ closely |
| 422 | // tied to the implmentation of RtlVirtualUnwind and the knowledge that by passing the |
| 423 | // the test above and having InEpilogue==TRUE then the code path which will be followed |
| 424 | // through RtlVirtualUnwind is known. |
| 425 | // |
| 426 | // In making this fake code buffer we need to ensure that we don't mess with the outcome |
| 427 | // of the test in RtlVirtualUnwind to determine that control stopped within a function |
| 428 | // epilogue, or the unwinding that will happen when that test comes out TRUE. To that end |
| 429 | // we have preserved a single byte representing the Prologue as a section of the buffer |
| 430 | // as well as a single byte representation of the Function code so that tests to make sure |
| 431 | // that we're out of the prologue will not fail. |
| 432 | |
| 433 | T_RUNTIME_FUNCTION FakeFunctionEntry; |
| 434 | |
| 435 | // |
| 436 | // The buffer contains 4 sections |
| 437 | // |
| 438 | // UNWIND_INFO: The fake UNWIND_INFO will be first, we are making a copy within the |
| 439 | // buffer because it needs to be addressable through a 32bit offset |
| 440 | // of NewImageBase like the fake code buffer |
| 441 | // |
| 442 | // Prologue: A single byte representing the function Prologue |
| 443 | // |
| 444 | // Function Code: A single byte representing the Function's code |
| 445 | // |
| 446 | // Epilogue: This contains what is left to be executed of the Epilogue which control |
| 447 | // stopped in, it can be as little as a "return" type statement or as much |
| 448 | // as the whole Epilogue containing a stack adjustment, pops and "return" |
| 449 | // type statement. |
| 450 | // |
| 451 | // |
| 452 | // Here is the layout of the buffer: |
| 453 | // |
| 454 | // UNWIND_INFO copy: |
| 455 | // pBuffer[0] |
| 456 | // ... |
| 457 | // pBuffer[sizeof(UNWIND_INFO) - 1] |
| 458 | // PROLOGUE: |
| 459 | // pBuffer[sizeof(UNWIND_INFO) + 0] <----------------- THIS IS THE START OF pCodeBuffer |
| 460 | // FUNCTION CODE: |
| 461 | // pBuffer[sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE] |
| 462 | // EPILOGUE |
| 463 | // pBuffer[sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE] |
| 464 | // ... |
| 465 | // pBuffer[sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue] |
| 466 | // |
| 467 | ULONG SizeOfEpilogue = (ULONG)((ULONG64)NextByte - ControlPc); |
| 468 | ULONG SizeOfBuffer = (ULONG)(sizeof(UNWIND_INFO) + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue); |
| 469 | BYTE *pBuffer = (BYTE*) new (nothrow) BYTE[SizeOfBuffer]; |
| 470 | BYTE *pCodeBuffer; |
| 471 | ULONG64 NewImageBase; |
| 472 | ULONG64 NewControlPc; |
| 473 | |
| 474 | // <TODO> This WILL fail during unwind because we KNOW there is a managed breakpoint |
| 475 | // in the epilog and we're in the epilog, but we could not allocate a buffer to |
| 476 | // put our cleaned up code into, what to do? </TODO> |
| 477 | if (pBuffer == NULL) |
| 478 | { |
| 479 | // TODO: can we throw OOM here? or will we just go recursive b/c that will eventually get to the same place? |
| 480 | _ASSERTE(!"OOM when trying to allocate buffer for virtual unwind cleaned code, BIG PROBLEM!!" ); |
| 481 | goto NORMAL_UNWIND; |
| 482 | } |
| 483 | |
| 484 | NewImageBase = ((((ULONG64)pBuffer) >> 32) << 32); |
| 485 | pCodeBuffer = pBuffer + sizeof(UNWIND_INFO); |
| 486 | |
| 487 | #if defined(_DEBUG) |
| 488 | // Fill the buffer up to the rest of the epilogue to be executed with Int3 |
| 489 | for (int i=0; i<(FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE); i++) |
| 490 | { |
| 491 | pCodeBuffer[i] = AMD64_INT3; |
| 492 | } |
| 493 | #endif |
| 494 | |
| 495 | // Copy the UNWIND_INFO and the Epilogue into the buffer |
| 496 | memcpy(pBuffer, (const void*)UnwindInfo, sizeof(UNWIND_INFO)); |
| 497 | memcpy(&(pCodeBuffer[FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE]), (const void*)(BYTE*)ControlPc, SizeOfEpilogue); |
| 498 | |
| 499 | _ASSERTE((UCHAR)*(BYTE*)ControlPc == (UCHAR)pCodeBuffer[FAKE_PROLOG_SIZE+FAKE_FUNCTION_CODE_SIZE]); |
| 500 | |
| 501 | HasManagedBreakpoint = FALSE; |
| 502 | HasUnmanagedBreakpoint = FALSE; |
| 503 | |
| 504 | // The buffer cleaning implementation here just runs through the buffer byte by byte trying |
| 505 | // to get a real opcode from the patch table for any 0xCC that it finds. There is the |
| 506 | // possiblity that the epilogue will contain a 0xCC in an immediate value for which a |
| 507 | // patch won't be found and this will report a false positive for HasUnmanagedBreakpoint. |
| 508 | BYTE* pCleanCodePc = pCodeBuffer + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE; |
| 509 | BYTE* pRealCodePc = (BYTE*)ControlPc; |
| 510 | while (pCleanCodePc < (pCodeBuffer + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue)) |
| 511 | { |
| 512 | // If we have a breakpoint at the address then try to get the correct opcode from |
| 513 | // the managed patch using GetOpcodeFromManagedBPForAddress. |
| 514 | if (AMD64_INT3 == ((UCHAR)*pCleanCodePc)) |
| 515 | { |
| 516 | (*pCleanCodePc) = GetOpcodeFromManagedBPForAddress((ULONG64)pRealCodePc, &HasManagedBreakpoint, &HasUnmanagedBreakpoint); |
| 517 | } |
| 518 | |
| 519 | pCleanCodePc++; |
| 520 | pRealCodePc++; |
| 521 | } |
| 522 | |
| 523 | // On the second pass through the epilogue assuming things are working as |
| 524 | // they should we should once again have at least one managed breakpoint... |
| 525 | // otherwise why are we here? |
| 526 | _ASSERTE(HasManagedBreakpoint == TRUE); |
| 527 | |
| 528 | // This would be nice to assert, but we can't w/ current buffer cleaning implementation, see note above. |
| 529 | // _ASSERTE(HasUnmanagedBreakpoint == FALSE); |
| 530 | |
| 531 | ((PUNWIND_INFO)pBuffer)->SizeOfProlog = FAKE_PROLOG_SIZE; |
| 532 | |
| 533 | FakeFunctionEntry.BeginAddress = (ULONG)((ULONG64)pCodeBuffer - NewImageBase); |
| 534 | FakeFunctionEntry.EndAddress = (ULONG)((ULONG64)(pCodeBuffer + (FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE + SizeOfEpilogue)) - NewImageBase); |
| 535 | FakeFunctionEntry.UnwindData = (ULONG)((ULONG64)pBuffer - NewImageBase); |
| 536 | |
| 537 | NewControlPc = (ULONG64)(pCodeBuffer + FAKE_PROLOG_SIZE + FAKE_FUNCTION_CODE_SIZE); |
| 538 | |
| 539 | RtlVirtualUnwind_Unsafe((ULONG)HandlerType, (ULONG64)NewImageBase, (ULONG64)NewControlPc, &FakeFunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers); |
| 540 | |
| 541 | // Make sure to delete the whole buffer and not just the code buffer |
| 542 | delete[] pBuffer; |
| 543 | |
| 544 | return NULL; // if control left in the epilog then RtlVirtualUnwind will not return an exception handler |
| 545 | } |
| 546 | |
| 547 | NORMAL_UNWIND: |
| 548 | return RtlVirtualUnwind_Unsafe(HandlerType, ImageBase, ControlPc, FunctionEntry, ContextRecord, HandlerData, EstablisherFrame, ContextPointers); |
| 549 | } |
| 550 | #endif // DEBUGGING_SUPPORTED |
| 551 | |
| 552 | #undef FAKE_PROLOG_SIZE |
| 553 | #undef FAKE_FUNCTION_CODE_SIZE |
| 554 | |
| 555 | #undef AMD64_SIZE64_PREFIX |
| 556 | #undef AMD64_ADD_IMM8_OP |
| 557 | #undef AMD64_ADD_IMM32_OP |
| 558 | #undef AMD64_JMP_IMM8_OP |
| 559 | #undef AMD64_JMP_IMM32_OP |
| 560 | #undef AMD64_JMP_IND_OP |
| 561 | #undef AMD64_JMP_IND_RAX |
| 562 | #undef AMD64_POP_OP |
| 563 | #undef AMD64_RET_OP |
| 564 | #undef AMD64_RET_OP_2 |
| 565 | #undef AMD64_NOP |
| 566 | #undef AMD64_INT3 |
| 567 | |
| 568 | #endif // !DACCESS_COMPILE |
| 569 | |
| 570 | #ifndef DACCESS_COMPILE |
| 571 | // Returns TRUE if caller should resume execution. |
| 572 | BOOL |
| 573 | AdjustContextForVirtualStub( |
| 574 | EXCEPTION_RECORD *pExceptionRecord, |
| 575 | CONTEXT *pContext) |
| 576 | { |
| 577 | LIMITED_METHOD_CONTRACT; |
| 578 | |
| 579 | // Nothing to adjust |
| 580 | |
| 581 | return FALSE; |
| 582 | } |
| 583 | |
| 584 | #endif |
| 585 | |
| 586 | |