1// Licensed to the .NET Foundation under one or more agreements.
2// The .NET Foundation licenses this file to you under the MIT license.
3// See the LICENSE file in the project root for more information.
4
5/*++
6
7
8
9Module Name:
10
11 context.c
12
13Abstract:
14
15 Implementation of GetThreadContext/SetThreadContext/DebugBreak.
16 There are a lot of architecture specifics here.
17
18
19
20--*/
21
22#include "pal/dbgmsg.h"
23SET_DEFAULT_DEBUG_CHANNEL(THREAD); // some headers have code with asserts, so do this first
24
25#include "pal/palinternal.h"
26#include "pal/context.h"
27#include "pal/debug.h"
28#include "pal/thread.hpp"
29#include "pal/utils.h"
30#include "pal/virtual.h"
31
32#include <sys/ptrace.h>
33#include <errno.h>
34#include <unistd.h>
35
36extern PGET_GCMARKER_EXCEPTION_CODE g_getGcMarkerExceptionCode;
37
38#define CONTEXT_AREA_MASK 0xffff
39#ifdef _X86_
40#define CONTEXT_ALL_FLOATING (CONTEXT_FLOATING_POINT | CONTEXT_EXTENDED_REGISTERS)
41#elif defined(_AMD64_)
42#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
43#elif defined(_ARM_)
44#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
45#elif defined(_ARM64_)
46#define CONTEXT_ALL_FLOATING CONTEXT_FLOATING_POINT
47#else
48#error Unexpected architecture.
49#endif
50
51#if !HAVE_MACH_EXCEPTIONS
52
53#ifndef __GLIBC__
54typedef int __ptrace_request;
55#endif
56
57#if HAVE_MACHINE_REG_H
58#include <machine/reg.h>
59#endif // HAVE_MACHINE_REG_H
60#if HAVE_MACHINE_NPX_H
61#include <machine/npx.h>
62#endif // HAVE_MACHINE_NPX_H
63
64#if HAVE_PT_REGS
65#include <asm/ptrace.h>
66#endif // HAVE_PT_REGS
67
68#ifdef _AMD64_
69#define ASSIGN_CONTROL_REGS \
70 ASSIGN_REG(Rbp) \
71 ASSIGN_REG(Rip) \
72 ASSIGN_REG(SegCs) \
73 ASSIGN_REG(EFlags) \
74 ASSIGN_REG(Rsp) \
75
76#define ASSIGN_INTEGER_REGS \
77 ASSIGN_REG(Rdi) \
78 ASSIGN_REG(Rsi) \
79 ASSIGN_REG(Rbx) \
80 ASSIGN_REG(Rdx) \
81 ASSIGN_REG(Rcx) \
82 ASSIGN_REG(Rax) \
83 ASSIGN_REG(R8) \
84 ASSIGN_REG(R9) \
85 ASSIGN_REG(R10) \
86 ASSIGN_REG(R11) \
87 ASSIGN_REG(R12) \
88 ASSIGN_REG(R13) \
89 ASSIGN_REG(R14) \
90 ASSIGN_REG(R15) \
91
92#elif defined(_X86_)
93#define ASSIGN_CONTROL_REGS \
94 ASSIGN_REG(Ebp) \
95 ASSIGN_REG(Eip) \
96 ASSIGN_REG(SegCs) \
97 ASSIGN_REG(EFlags) \
98 ASSIGN_REG(Esp) \
99 ASSIGN_REG(SegSs) \
100
101#define ASSIGN_INTEGER_REGS \
102 ASSIGN_REG(Edi) \
103 ASSIGN_REG(Esi) \
104 ASSIGN_REG(Ebx) \
105 ASSIGN_REG(Edx) \
106 ASSIGN_REG(Ecx) \
107 ASSIGN_REG(Eax) \
108
109#elif defined(_ARM_)
110#define ASSIGN_CONTROL_REGS \
111 ASSIGN_REG(Sp) \
112 ASSIGN_REG(Lr) \
113 ASSIGN_REG(Pc) \
114 ASSIGN_REG(Cpsr) \
115
116#define ASSIGN_INTEGER_REGS \
117 ASSIGN_REG(R0) \
118 ASSIGN_REG(R1) \
119 ASSIGN_REG(R2) \
120 ASSIGN_REG(R3) \
121 ASSIGN_REG(R4) \
122 ASSIGN_REG(R5) \
123 ASSIGN_REG(R6) \
124 ASSIGN_REG(R7) \
125 ASSIGN_REG(R8) \
126 ASSIGN_REG(R9) \
127 ASSIGN_REG(R10) \
128 ASSIGN_REG(R11) \
129 ASSIGN_REG(R12)
130#elif defined(_ARM64_)
131#define ASSIGN_CONTROL_REGS \
132 ASSIGN_REG(Cpsr) \
133 ASSIGN_REG(Fp) \
134 ASSIGN_REG(Sp) \
135 ASSIGN_REG(Lr) \
136 ASSIGN_REG(Pc)
137
138#define ASSIGN_INTEGER_REGS \
139 ASSIGN_REG(X0) \
140 ASSIGN_REG(X1) \
141 ASSIGN_REG(X2) \
142 ASSIGN_REG(X3) \
143 ASSIGN_REG(X4) \
144 ASSIGN_REG(X5) \
145 ASSIGN_REG(X6) \
146 ASSIGN_REG(X7) \
147 ASSIGN_REG(X8) \
148 ASSIGN_REG(X9) \
149 ASSIGN_REG(X10) \
150 ASSIGN_REG(X11) \
151 ASSIGN_REG(X12) \
152 ASSIGN_REG(X13) \
153 ASSIGN_REG(X14) \
154 ASSIGN_REG(X15) \
155 ASSIGN_REG(X16) \
156 ASSIGN_REG(X17) \
157 ASSIGN_REG(X18) \
158 ASSIGN_REG(X19) \
159 ASSIGN_REG(X20) \
160 ASSIGN_REG(X21) \
161 ASSIGN_REG(X22) \
162 ASSIGN_REG(X23) \
163 ASSIGN_REG(X24) \
164 ASSIGN_REG(X25) \
165 ASSIGN_REG(X26) \
166 ASSIGN_REG(X27) \
167 ASSIGN_REG(X28)
168
169#else
170#error Don't know how to assign registers on this architecture
171#endif
172
173#define ASSIGN_ALL_REGS \
174 ASSIGN_CONTROL_REGS \
175 ASSIGN_INTEGER_REGS \
176
177/*++
178Function:
179 CONTEXT_GetRegisters
180
181Abstract
182 retrieve the machine registers value of the indicated process.
183
184Parameter
185 processId: process ID
186 lpContext: context structure in which the machine registers value will be returned.
187Return
188 returns TRUE if it succeeds, FALSE otherwise
189--*/
190BOOL CONTEXT_GetRegisters(DWORD processId, LPCONTEXT lpContext)
191{
192#if HAVE_BSD_REGS_T
193 int regFd = -1;
194#endif // HAVE_BSD_REGS_T
195 BOOL bRet = FALSE;
196
197 if (processId == GetCurrentProcessId())
198 {
199 CONTEXT_CaptureContext(lpContext);
200 }
201 else
202 {
203 ucontext_t registers;
204#if HAVE_PT_REGS
205 struct pt_regs ptrace_registers;
206 if (ptrace((__ptrace_request)PT_GETREGS, processId, (caddr_t) &ptrace_registers, 0) == -1)
207#elif HAVE_BSD_REGS_T
208 struct reg ptrace_registers;
209 if (PAL_PTRACE(PT_GETREGS, processId, &ptrace_registers, 0) == -1)
210#endif
211 {
212 ASSERT("Failed ptrace(PT_GETREGS, processId:%d) errno:%d (%s)\n",
213 processId, errno, strerror(errno));
214 }
215
216#if HAVE_PT_REGS
217#define ASSIGN_REG(reg) MCREG_##reg(registers.uc_mcontext) = PTREG_##reg(ptrace_registers);
218#elif HAVE_BSD_REGS_T
219#define ASSIGN_REG(reg) MCREG_##reg(registers.uc_mcontext) = BSDREG_##reg(ptrace_registers);
220#else
221#define ASSIGN_REG(reg)
222 ASSERT("Don't know how to get the context of another process on this platform!");
223 return bRet;
224#endif
225 ASSIGN_ALL_REGS
226#undef ASSIGN_REG
227
228 CONTEXTFromNativeContext(&registers, lpContext, lpContext->ContextFlags);
229 }
230
231 bRet = TRUE;
232#if HAVE_BSD_REGS_T
233 if (regFd != -1)
234 {
235 close(regFd);
236 }
237#endif // HAVE_BSD_REGS_T
238 return bRet;
239}
240
241/*++
242Function:
243 GetThreadContext
244
245See MSDN doc.
246--*/
247BOOL
248CONTEXT_GetThreadContext(
249 DWORD dwProcessId,
250 pthread_t self,
251 LPCONTEXT lpContext)
252{
253 BOOL ret = FALSE;
254
255 if (lpContext == NULL)
256 {
257 ERROR("Invalid lpContext parameter value\n");
258 SetLastError(ERROR_NOACCESS);
259 goto EXIT;
260 }
261
262 /* How to consider the case when self is different from the current
263 thread of its owner process. Machine registers values could be retreived
264 by a ptrace(pid, ...) call or from the "/proc/%pid/reg" file content.
265 Unfortunately, these two methods only depend on process ID, not on
266 thread ID. */
267
268 if (dwProcessId == GetCurrentProcessId())
269 {
270 if (self != pthread_self())
271 {
272 DWORD flags;
273 // There aren't any APIs for this. We can potentially get the
274 // context of another thread by using per-thread signals, but
275 // on FreeBSD signal handlers that are called as a result
276 // of signals raised via pthread_kill don't get a valid
277 // sigcontext or ucontext_t. But we need this to return TRUE
278 // to avoid an assertion in the CLR in code that manages to
279 // cope reasonably well without a valid thread context.
280 // Given that, we'll zero out our structure and return TRUE.
281 ERROR("GetThreadContext on a thread other than the current "
282 "thread is returning TRUE\n");
283 flags = lpContext->ContextFlags;
284 memset(lpContext, 0, sizeof(*lpContext));
285 lpContext->ContextFlags = flags;
286 ret = TRUE;
287 goto EXIT;
288 }
289
290 }
291
292 if (lpContext->ContextFlags &
293 (CONTEXT_CONTROL | CONTEXT_INTEGER) & CONTEXT_AREA_MASK)
294 {
295 if (CONTEXT_GetRegisters(dwProcessId, lpContext) == FALSE)
296 {
297 SetLastError(ERROR_INTERNAL_ERROR);
298 goto EXIT;
299 }
300 }
301
302 ret = TRUE;
303
304EXIT:
305 return ret;
306}
307
308/*++
309Function:
310 SetThreadContext
311
312See MSDN doc.
313--*/
314BOOL
315CONTEXT_SetThreadContext(
316 DWORD dwProcessId,
317 pthread_t self,
318 CONST CONTEXT *lpContext)
319{
320 BOOL ret = FALSE;
321
322#if HAVE_PT_REGS
323 struct pt_regs ptrace_registers;
324#elif HAVE_BSD_REGS_T
325 struct reg ptrace_registers;
326#endif
327
328 if (lpContext == NULL)
329 {
330 ERROR("Invalid lpContext parameter value\n");
331 SetLastError(ERROR_NOACCESS);
332 goto EXIT;
333 }
334
335 /* How to consider the case when self is different from the current
336 thread of its owner process. Machine registers values could be retreived
337 by a ptrace(pid, ...) call or from the "/proc/%pid/reg" file content.
338 Unfortunately, these two methods only depend on process ID, not on
339 thread ID. */
340
341 if (dwProcessId == GetCurrentProcessId())
342 {
343#ifdef FEATURE_PAL_SXS
344 // Need to implement SetThreadContext(current thread) for the IX architecture; look at common_signal_handler.
345 _ASSERT(FALSE);
346#endif // FEATURE_PAL_SXS
347 ASSERT("SetThreadContext should be called for cross-process only.\n");
348 SetLastError(ERROR_INVALID_PARAMETER);
349 goto EXIT;
350 }
351
352 if (lpContext->ContextFlags &
353 (CONTEXT_CONTROL | CONTEXT_INTEGER) & CONTEXT_AREA_MASK)
354 {
355#if HAVE_PT_REGS
356 if (ptrace((__ptrace_request)PT_GETREGS, dwProcessId, (caddr_t)&ptrace_registers, 0) == -1)
357#elif HAVE_BSD_REGS_T
358 if (PAL_PTRACE(PT_GETREGS, dwProcessId, &ptrace_registers, 0) == -1)
359#endif
360 {
361 ASSERT("Failed ptrace(PT_GETREGS, processId:%d) errno:%d (%s)\n",
362 dwProcessId, errno, strerror(errno));
363 SetLastError(ERROR_INTERNAL_ERROR);
364 goto EXIT;
365 }
366
367#if HAVE_PT_REGS
368#define ASSIGN_REG(reg) PTREG_##reg(ptrace_registers) = lpContext->reg;
369#elif HAVE_BSD_REGS_T
370#define ASSIGN_REG(reg) BSDREG_##reg(ptrace_registers) = lpContext->reg;
371#else
372#define ASSIGN_REG(reg)
373 ASSERT("Don't know how to set the context of another process on this platform!");
374 return FALSE;
375#endif
376 if (lpContext->ContextFlags & CONTEXT_CONTROL & CONTEXT_AREA_MASK)
377 {
378 ASSIGN_CONTROL_REGS
379 }
380 if (lpContext->ContextFlags & CONTEXT_INTEGER & CONTEXT_AREA_MASK)
381 {
382 ASSIGN_INTEGER_REGS
383 }
384#undef ASSIGN_REG
385
386#if HAVE_PT_REGS
387 if (ptrace((__ptrace_request)PT_SETREGS, dwProcessId, (caddr_t)&ptrace_registers, 0) == -1)
388#elif HAVE_BSD_REGS_T
389 if (PAL_PTRACE(PT_SETREGS, dwProcessId, &ptrace_registers, 0) == -1)
390#endif
391 {
392 ASSERT("Failed ptrace(PT_SETREGS, processId:%d) errno:%d (%s)\n",
393 dwProcessId, errno, strerror(errno));
394 SetLastError(ERROR_INTERNAL_ERROR);
395 goto EXIT;
396 }
397 }
398
399 ret = TRUE;
400 EXIT:
401 return ret;
402}
403
404/*++
405Function :
406 CONTEXTToNativeContext
407
408 Converts a CONTEXT record to a native context.
409
410Parameters :
411 CONST CONTEXT *lpContext : CONTEXT to convert
412 native_context_t *native : native context to fill in
413
414Return value :
415 None
416
417--*/
418void CONTEXTToNativeContext(CONST CONTEXT *lpContext, native_context_t *native)
419{
420#define ASSIGN_REG(reg) MCREG_##reg(native->uc_mcontext) = lpContext->reg;
421 if ((lpContext->ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
422 {
423 ASSIGN_CONTROL_REGS
424 }
425
426 if ((lpContext->ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
427 {
428 ASSIGN_INTEGER_REGS
429 }
430#undef ASSIGN_REG
431
432#if HAVE_GREGSET_T || HAVE_GREGSET_T
433#if HAVE_GREGSET_T
434 if (native->uc_mcontext.fpregs == nullptr)
435#elif HAVE___GREGSET_T
436 if (native->uc_mcontext.__fpregs == nullptr)
437#endif
438 {
439 // If the pointer to the floating point state in the native context
440 // is not valid, we can't copy floating point registers regardless of
441 // whether CONTEXT_FLOATING_POINT is set in the CONTEXT's flags.
442 return;
443 }
444#endif
445
446 if ((lpContext->ContextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
447 {
448#ifdef _AMD64_
449 FPREG_ControlWord(native) = lpContext->FltSave.ControlWord;
450 FPREG_StatusWord(native) = lpContext->FltSave.StatusWord;
451 FPREG_TagWord(native) = lpContext->FltSave.TagWord;
452 FPREG_ErrorOffset(native) = lpContext->FltSave.ErrorOffset;
453 FPREG_ErrorSelector(native) = lpContext->FltSave.ErrorSelector;
454 FPREG_DataOffset(native) = lpContext->FltSave.DataOffset;
455 FPREG_DataSelector(native) = lpContext->FltSave.DataSelector;
456 FPREG_MxCsr(native) = lpContext->FltSave.MxCsr;
457 FPREG_MxCsr_Mask(native) = lpContext->FltSave.MxCsr_Mask;
458
459 for (int i = 0; i < 8; i++)
460 {
461 FPREG_St(native, i) = lpContext->FltSave.FloatRegisters[i];
462 }
463
464 for (int i = 0; i < 16; i++)
465 {
466 FPREG_Xmm(native, i) = lpContext->FltSave.XmmRegisters[i];
467 }
468#endif
469 }
470
471 // TODO: Enable for all Unix systems
472#if defined(_AMD64_) && defined(XSTATE_SUPPORTED)
473 if ((lpContext->ContextFlags & CONTEXT_XSTATE) == CONTEXT_XSTATE)
474 {
475 _ASSERTE(FPREG_HasYmmRegisters(native));
476 memcpy_s(FPREG_Xstate_Ymmh(native), sizeof(M128A) * 16, lpContext->VectorRegister, sizeof(M128A) * 16);
477 }
478#endif //_AMD64_ && XSTATE_SUPPORTED
479}
480
481/*++
482Function :
483 CONTEXTFromNativeContext
484
485 Converts a native context to a CONTEXT record.
486
487Parameters :
488 const native_context_t *native : native context to convert
489 LPCONTEXT lpContext : CONTEXT to fill in
490 ULONG contextFlags : flags that determine which registers are valid in
491 native and which ones to set in lpContext
492
493Return value :
494 None
495
496--*/
497void CONTEXTFromNativeContext(const native_context_t *native, LPCONTEXT lpContext,
498 ULONG contextFlags)
499{
500 lpContext->ContextFlags = contextFlags;
501
502#define ASSIGN_REG(reg) lpContext->reg = MCREG_##reg(native->uc_mcontext);
503 if ((contextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL)
504 {
505 ASSIGN_CONTROL_REGS
506#if defined(_ARM_)
507 // WinContext assumes that the least bit of Pc is always 1 (denoting thumb)
508 // although the pc value retrived from native context might not have set the least bit.
509 // This becomes especially problematic if the context is on the JIT_WRITEBARRIER.
510 lpContext->Pc |= 0x1;
511#endif
512 }
513
514 if ((contextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER)
515 {
516 ASSIGN_INTEGER_REGS
517 }
518#undef ASSIGN_REG
519
520#if HAVE_GREGSET_T || HAVE___GREGSET_T
521#if HAVE_GREGSET_T
522 if (native->uc_mcontext.fpregs == nullptr)
523#elif HAVE___GREGSET_T
524 if (native->uc_mcontext.__fpregs == nullptr)
525#endif
526 {
527 // Reset the CONTEXT_FLOATING_POINT bit(s) and the CONTEXT_XSTATE bit(s) so it's
528 // clear that the floating point and extended state data in the CONTEXT is not
529 // valid. Since these flags are defined as the architecture bit(s) OR'd with one
530 // or more other bits, we first get the bits that are unique to each by resetting
531 // the architecture bits. We determine what those are by inverting the union of
532 // CONTEXT_CONTROL and CONTEXT_INTEGER, both of which should also have the
533 // architecture bit(s) set.
534 const ULONG floatingPointFlags = CONTEXT_FLOATING_POINT & ~(CONTEXT_CONTROL & CONTEXT_INTEGER);
535 const ULONG xstateFlags = CONTEXT_XSTATE & ~(CONTEXT_CONTROL & CONTEXT_INTEGER);
536
537 lpContext->ContextFlags &= ~(floatingPointFlags | xstateFlags);
538
539 // Bail out regardless of whether the caller wanted CONTEXT_FLOATING_POINT or CONTEXT_XSTATE
540 return;
541 }
542#endif
543
544 if ((contextFlags & CONTEXT_FLOATING_POINT) == CONTEXT_FLOATING_POINT)
545 {
546#ifdef _AMD64_
547 lpContext->FltSave.ControlWord = FPREG_ControlWord(native);
548 lpContext->FltSave.StatusWord = FPREG_StatusWord(native);
549 lpContext->FltSave.TagWord = FPREG_TagWord(native);
550 lpContext->FltSave.ErrorOffset = FPREG_ErrorOffset(native);
551 lpContext->FltSave.ErrorSelector = FPREG_ErrorSelector(native);
552 lpContext->FltSave.DataOffset = FPREG_DataOffset(native);
553 lpContext->FltSave.DataSelector = FPREG_DataSelector(native);
554 lpContext->FltSave.MxCsr = FPREG_MxCsr(native);
555 lpContext->FltSave.MxCsr_Mask = FPREG_MxCsr_Mask(native);
556
557 for (int i = 0; i < 8; i++)
558 {
559 lpContext->FltSave.FloatRegisters[i] = FPREG_St(native, i);
560 }
561
562 for (int i = 0; i < 16; i++)
563 {
564 lpContext->FltSave.XmmRegisters[i] = FPREG_Xmm(native, i);
565 }
566#endif
567 }
568
569#ifdef _AMD64_
570 if ((contextFlags & CONTEXT_XSTATE) == CONTEXT_XSTATE)
571 {
572 // TODO: Enable for all Unix systems
573#if XSTATE_SUPPORTED
574 if (FPREG_HasYmmRegisters(native))
575 {
576 memcpy_s(lpContext->VectorRegister, sizeof(M128A) * 16, FPREG_Xstate_Ymmh(native), sizeof(M128A) * 16);
577 }
578 else
579#endif // XSTATE_SUPPORTED
580 {
581 // Reset the CONTEXT_XSTATE bit(s) so it's clear that the extended state data in
582 // the CONTEXT is not valid.
583 const ULONG xstateFlags = CONTEXT_XSTATE & ~(CONTEXT_CONTROL & CONTEXT_INTEGER);
584 lpContext->ContextFlags &= ~xstateFlags;
585 }
586 }
587#endif // _AMD64_
588}
589
590/*++
591Function :
592 GetNativeContextPC
593
594 Returns the program counter from the native context.
595
596Parameters :
597 const native_context_t *native : native context
598
599Return value :
600 The program counter from the native context.
601
602--*/
603LPVOID GetNativeContextPC(const native_context_t *context)
604{
605#ifdef _AMD64_
606 return (LPVOID)MCREG_Rip(context->uc_mcontext);
607#elif defined(_X86_)
608 return (LPVOID) MCREG_Eip(context->uc_mcontext);
609#elif defined(_ARM_)
610 return (LPVOID) MCREG_Pc(context->uc_mcontext);
611#elif defined(_ARM64_)
612 return (LPVOID) MCREG_Pc(context->uc_mcontext);
613#else
614# error implement me for this architecture
615#endif
616}
617
618/*++
619Function :
620 GetNativeContextSP
621
622 Returns the stack pointer from the native context.
623
624Parameters :
625 const native_context_t *native : native context
626
627Return value :
628 The stack pointer from the native context.
629
630--*/
631LPVOID GetNativeContextSP(const native_context_t *context)
632{
633#ifdef _AMD64_
634 return (LPVOID)MCREG_Rsp(context->uc_mcontext);
635#elif defined(_X86_)
636 return (LPVOID) MCREG_Esp(context->uc_mcontext);
637#elif defined(_ARM_)
638 return (LPVOID) MCREG_Sp(context->uc_mcontext);
639#elif defined(_ARM64_)
640 return (LPVOID) MCREG_Sp(context->uc_mcontext);
641#else
642# error implement me for this architecture
643#endif
644}
645
646
647/*++
648Function :
649 CONTEXTGetExceptionCodeForSignal
650
651 Translates signal and context information to a Win32 exception code.
652
653Parameters :
654 const siginfo_t *siginfo : signal information from a signal handler
655 const native_context_t *context : context information
656
657Return value :
658 The Win32 exception code that corresponds to the signal and context
659 information.
660
661--*/
662#ifdef ILL_ILLOPC
663// If si_code values are available for all signals, use those.
664DWORD CONTEXTGetExceptionCodeForSignal(const siginfo_t *siginfo,
665 const native_context_t *context)
666{
667 // IMPORTANT NOTE: This function must not call any signal unsafe functions
668 // since it is called from signal handlers.
669 // That includes ASSERT and TRACE macros.
670
671 switch (siginfo->si_signo)
672 {
673 case SIGILL:
674 switch (siginfo->si_code)
675 {
676 case ILL_ILLOPC: // Illegal opcode
677 case ILL_ILLOPN: // Illegal operand
678 case ILL_ILLADR: // Illegal addressing mode
679 case ILL_ILLTRP: // Illegal trap
680 case ILL_COPROC: // Co-processor error
681 return EXCEPTION_ILLEGAL_INSTRUCTION;
682 case ILL_PRVOPC: // Privileged opcode
683 case ILL_PRVREG: // Privileged register
684 return EXCEPTION_PRIV_INSTRUCTION;
685 case ILL_BADSTK: // Internal stack error
686 return EXCEPTION_STACK_OVERFLOW;
687 default:
688 break;
689 }
690 break;
691 case SIGFPE:
692 switch (siginfo->si_code)
693 {
694 case FPE_INTDIV:
695 return EXCEPTION_INT_DIVIDE_BY_ZERO;
696 case FPE_INTOVF:
697 return EXCEPTION_INT_OVERFLOW;
698 case FPE_FLTDIV:
699 return EXCEPTION_FLT_DIVIDE_BY_ZERO;
700 case FPE_FLTOVF:
701 return EXCEPTION_FLT_OVERFLOW;
702 case FPE_FLTUND:
703 return EXCEPTION_FLT_UNDERFLOW;
704 case FPE_FLTRES:
705 return EXCEPTION_FLT_INEXACT_RESULT;
706 case FPE_FLTINV:
707 return EXCEPTION_FLT_INVALID_OPERATION;
708 case FPE_FLTSUB:
709 return EXCEPTION_FLT_INVALID_OPERATION;
710 default:
711 break;
712 }
713 break;
714 case SIGSEGV:
715 switch (siginfo->si_code)
716 {
717 case SI_USER: // User-generated signal, sometimes sent
718 // for SIGSEGV under normal circumstances
719 case SEGV_MAPERR: // Address not mapped to object
720 case SEGV_ACCERR: // Invalid permissions for mapped object
721 return EXCEPTION_ACCESS_VIOLATION;
722
723#ifdef SI_KERNEL
724 case SI_KERNEL:
725 {
726 // Identify privileged instructions that are not identified as such by the system
727 if (g_getGcMarkerExceptionCode != nullptr)
728 {
729 DWORD exceptionCode = g_getGcMarkerExceptionCode(GetNativeContextPC(context));
730 if (exceptionCode != 0)
731 {
732 return exceptionCode;
733 }
734 }
735 return EXCEPTION_ACCESS_VIOLATION;
736 }
737#endif
738 default:
739 break;
740 }
741 break;
742 case SIGBUS:
743 switch (siginfo->si_code)
744 {
745 case BUS_ADRALN: // Invalid address alignment
746 return EXCEPTION_DATATYPE_MISALIGNMENT;
747 case BUS_ADRERR: // Non-existent physical address
748 return EXCEPTION_ACCESS_VIOLATION;
749 case BUS_OBJERR: // Object-specific hardware error
750 default:
751 break;
752 }
753 case SIGTRAP:
754 switch (siginfo->si_code)
755 {
756#ifdef SI_KERNEL
757 case SI_KERNEL:
758#endif
759 case SI_USER:
760 case TRAP_BRKPT: // Process breakpoint
761 return EXCEPTION_BREAKPOINT;
762 case TRAP_TRACE: // Process trace trap
763 return EXCEPTION_SINGLE_STEP;
764 default:
765 // Got unknown SIGTRAP signal with code siginfo->si_code;
766 return EXCEPTION_ILLEGAL_INSTRUCTION;
767 }
768 default:
769 break;
770 }
771
772 // Got unknown signal number siginfo->si_signo with code siginfo->si_code;
773 return EXCEPTION_ILLEGAL_INSTRUCTION;
774}
775#else // ILL_ILLOPC
776DWORD CONTEXTGetExceptionCodeForSignal(const siginfo_t *siginfo,
777 const native_context_t *context)
778{
779 // IMPORTANT NOTE: This function must not call any signal unsafe functions
780 // since it is called from signal handlers.
781 // That includes ASSERT and TRACE macros.
782
783 int trap;
784
785 if (siginfo->si_signo == SIGFPE)
786 {
787 // Floating point exceptions are mapped by their si_code.
788 switch (siginfo->si_code)
789 {
790 case FPE_INTDIV :
791 return EXCEPTION_INT_DIVIDE_BY_ZERO;
792 case FPE_INTOVF :
793 return EXCEPTION_INT_OVERFLOW;
794 case FPE_FLTDIV :
795 return EXCEPTION_FLT_DIVIDE_BY_ZERO;
796 case FPE_FLTOVF :
797 return EXCEPTION_FLT_OVERFLOW;
798 case FPE_FLTUND :
799 return EXCEPTION_FLT_UNDERFLOW;
800 case FPE_FLTRES :
801 return EXCEPTION_FLT_INEXACT_RESULT;
802 case FPE_FLTINV :
803 return EXCEPTION_FLT_INVALID_OPERATION;
804 case FPE_FLTSUB :/* subscript out of range */
805 return EXCEPTION_FLT_INVALID_OPERATION;
806 default:
807 // Got unknown signal code siginfo->si_code;
808 return 0;
809 }
810 }
811
812 trap = context->uc_mcontext.mc_trapno;
813 switch (trap)
814 {
815 case T_PRIVINFLT : /* privileged instruction */
816 return EXCEPTION_PRIV_INSTRUCTION;
817 case T_BPTFLT : /* breakpoint instruction */
818 return EXCEPTION_BREAKPOINT;
819 case T_ARITHTRAP : /* arithmetic trap */
820 return 0; /* let the caller pick an exception code */
821#ifdef T_ASTFLT
822 case T_ASTFLT : /* system forced exception : ^C, ^\. SIGINT signal
823 handler shouldn't be calling this function, since
824 it doesn't need an exception code */
825 // Trap code T_ASTFLT received, shouldn't get here;
826 return 0;
827#endif // T_ASTFLT
828 case T_PROTFLT : /* protection fault */
829 return EXCEPTION_ACCESS_VIOLATION;
830 case T_TRCTRAP : /* debug exception (sic) */
831 return EXCEPTION_SINGLE_STEP;
832 case T_PAGEFLT : /* page fault */
833 return EXCEPTION_ACCESS_VIOLATION;
834 case T_ALIGNFLT : /* alignment fault */
835 return EXCEPTION_DATATYPE_MISALIGNMENT;
836 case T_DIVIDE :
837 return EXCEPTION_INT_DIVIDE_BY_ZERO;
838 case T_NMI : /* non-maskable trap */
839 return EXCEPTION_ILLEGAL_INSTRUCTION;
840 case T_OFLOW :
841 return EXCEPTION_INT_OVERFLOW;
842 case T_BOUND : /* bound instruction fault */
843 return EXCEPTION_ARRAY_BOUNDS_EXCEEDED;
844 case T_DNA : /* device not available fault */
845 return EXCEPTION_ILLEGAL_INSTRUCTION;
846 case T_DOUBLEFLT : /* double fault */
847 return EXCEPTION_ILLEGAL_INSTRUCTION;
848 case T_FPOPFLT : /* fp coprocessor operand fetch fault */
849 return EXCEPTION_FLT_INVALID_OPERATION;
850 case T_TSSFLT : /* invalid tss fault */
851 return EXCEPTION_ILLEGAL_INSTRUCTION;
852 case T_SEGNPFLT : /* segment not present fault */
853 return EXCEPTION_ACCESS_VIOLATION;
854 case T_STKFLT : /* stack fault */
855 return EXCEPTION_STACK_OVERFLOW;
856 case T_MCHK : /* machine check trap */
857 return EXCEPTION_ILLEGAL_INSTRUCTION;
858 case T_RESERVED : /* reserved (unknown) */
859 return EXCEPTION_ILLEGAL_INSTRUCTION;
860 default:
861 // Got unknown trap code trap;
862 break;
863 }
864 return EXCEPTION_ILLEGAL_INSTRUCTION;
865}
866#endif // ILL_ILLOPC
867
868#else // !HAVE_MACH_EXCEPTIONS
869
870#include <mach/message.h>
871#include <mach/thread_act.h>
872#include "../exception/machexception.h"
873
874/*++
875Function:
876 CONTEXT_GetThreadContextFromPort
877
878 Helper for GetThreadContext that uses a mach_port
879--*/
880kern_return_t
881CONTEXT_GetThreadContextFromPort(
882 mach_port_t Port,
883 LPCONTEXT lpContext)
884{
885 // Extract the CONTEXT from the Mach thread.
886
887 kern_return_t MachRet = KERN_SUCCESS;
888 mach_msg_type_number_t StateCount;
889 thread_state_flavor_t StateFlavor;
890
891 if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK)
892 {
893#ifdef _X86_
894 x86_thread_state32_t State;
895 StateFlavor = x86_THREAD_STATE32;
896#elif defined(_AMD64_)
897 x86_thread_state64_t State;
898 StateFlavor = x86_THREAD_STATE64;
899#else
900#error Unexpected architecture.
901#endif
902 StateCount = sizeof(State) / sizeof(natural_t);
903 MachRet = thread_get_state(Port, StateFlavor, (thread_state_t)&State, &StateCount);
904 if (MachRet != KERN_SUCCESS)
905 {
906 ASSERT("thread_get_state(THREAD_STATE) failed: %d\n", MachRet);
907 goto exit;
908 }
909
910 CONTEXT_GetThreadContextFromThreadState(StateFlavor, (thread_state_t)&State, lpContext);
911 }
912
913 if (lpContext->ContextFlags & CONTEXT_ALL_FLOATING & CONTEXT_AREA_MASK) {
914#ifdef _X86_
915 x86_float_state32_t State;
916 StateFlavor = x86_FLOAT_STATE32;
917#elif defined(_AMD64_)
918 x86_float_state64_t State;
919 StateFlavor = x86_FLOAT_STATE64;
920#else
921#error Unexpected architecture.
922#endif
923 StateCount = sizeof(State) / sizeof(natural_t);
924 MachRet = thread_get_state(Port, StateFlavor, (thread_state_t)&State, &StateCount);
925 if (MachRet != KERN_SUCCESS)
926 {
927 ASSERT("thread_get_state(FLOAT_STATE) failed: %d\n", MachRet);
928 goto exit;
929 }
930
931 CONTEXT_GetThreadContextFromThreadState(StateFlavor, (thread_state_t)&State, lpContext);
932 }
933
934#if defined(_AMD64_) && defined(XSTATE_SUPPORTED)
935 if (lpContext->ContextFlags & CONTEXT_XSTATE & CONTEXT_AREA_MASK) {
936 x86_avx_state64_t State;
937 StateFlavor = x86_AVX_STATE64;
938 StateCount = sizeof(State) / sizeof(natural_t);
939 MachRet = thread_get_state(Port, StateFlavor, (thread_state_t)&State, &StateCount);
940 if (MachRet != KERN_SUCCESS)
941 {
942 ASSERT("thread_get_state(XSTATE) failed: %d\n", MachRet);
943 goto exit;
944 }
945
946 CONTEXT_GetThreadContextFromThreadState(StateFlavor, (thread_state_t)&State, lpContext);
947 }
948#endif
949
950exit:
951 return MachRet;
952}
953
954/*++
955Function:
956 CONTEXT_GetThreadContextFromThreadState
957
958--*/
959void
960CONTEXT_GetThreadContextFromThreadState(
961 thread_state_flavor_t threadStateFlavor,
962 thread_state_t threadState,
963 LPCONTEXT lpContext)
964{
965 switch (threadStateFlavor)
966 {
967#ifdef _X86_
968 case x86_THREAD_STATE32:
969 if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK)
970 {
971 x86_thread_state32_t *pState = (x86_thread_state32_t *)threadState;
972
973 lpContext->Eax = pState->eax;
974 lpContext->Ebx = pState->ebx;
975 lpContext->Ecx = pState->ecx;
976 lpContext->Edx = pState->edx;
977 lpContext->Edi = pState->edi;
978 lpContext->Esi = pState->esi;
979 lpContext->Ebp = pState->ebp;
980 lpContext->Esp = pState->esp;
981 lpContext->SegSs = pState->ss;
982 lpContext->EFlags = pState->eflags;
983 lpContext->Eip = pState->eip;
984 lpContext->SegCs = pState->cs;
985 lpContext->SegDs_PAL_Undefined = pState->ds;
986 lpContext->SegEs_PAL_Undefined = pState->es;
987 lpContext->SegFs_PAL_Undefined = pState->fs;
988 lpContext->SegGs_PAL_Undefined = pState->gs;
989 }
990 break;
991
992 case x86_FLOAT_STATE32:
993 {
994 x86_float_state32_t *pState = (x86_float_state32_t *)threadState;
995
996 if (lpContext->ContextFlags & CONTEXT_FLOATING_POINT & CONTEXT_AREA_MASK)
997 {
998 lpContext->FloatSave.ControlWord = *(DWORD*)&pState->fpu_fcw;
999 lpContext->FloatSave.StatusWord = *(DWORD*)&pState->fpu_fsw;
1000 lpContext->FloatSave.TagWord = pState->fpu_ftw;
1001 lpContext->FloatSave.ErrorOffset = pState->fpu_ip;
1002 lpContext->FloatSave.ErrorSelector = pState->fpu_cs;
1003 lpContext->FloatSave.DataOffset = pState->fpu_dp;
1004 lpContext->FloatSave.DataSelector = pState->fpu_ds;
1005 lpContext->FloatSave.Cr0NpxState = pState->fpu_mxcsr;
1006
1007 // Windows stores the floating point registers in a packed layout (each 10-byte register end to end
1008 // for a total of 80 bytes). But Mach returns each register in an 16-bit structure (presumably for
1009 // alignment purposes). So we can't just memcpy the registers over in a single block, we need to copy
1010 // them individually.
1011 for (int i = 0; i < 8; i++)
1012 memcpy(&lpContext->FloatSave.RegisterArea[i * 10], (&pState->fpu_stmm0)[i].mmst_reg, 10);
1013 }
1014
1015 if (lpContext->ContextFlags & CONTEXT_EXTENDED_REGISTERS & CONTEXT_AREA_MASK)
1016 {
1017 // The only extended register information that Mach will tell us about are the xmm register values.
1018 // Both Windows and Mach store the registers in a packed layout (each of the 8 registers is 16 bytes)
1019 // so we can simply memcpy them across.
1020 memcpy(lpContext->ExtendedRegisters + CONTEXT_EXREG_XMM_OFFSET, &pState->fpu_xmm0, 8 * 16);
1021 }
1022 }
1023 break;
1024
1025#elif defined(_AMD64_)
1026 case x86_THREAD_STATE64:
1027 if (lpContext->ContextFlags & (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS) & CONTEXT_AREA_MASK)
1028 {
1029 x86_thread_state64_t *pState = (x86_thread_state64_t *)threadState;
1030
1031 lpContext->Rax = pState->__rax;
1032 lpContext->Rbx = pState->__rbx;
1033 lpContext->Rcx = pState->__rcx;
1034 lpContext->Rdx = pState->__rdx;
1035 lpContext->Rdi = pState->__rdi;
1036 lpContext->Rsi = pState->__rsi;
1037 lpContext->Rbp = pState->__rbp;
1038 lpContext->Rsp = pState->__rsp;
1039 lpContext->R8 = pState->__r8;
1040 lpContext->R9 = pState->__r9;
1041 lpContext->R10 = pState->__r10;
1042 lpContext->R11 = pState->__r11;
1043 lpContext->R12 = pState->__r12;
1044 lpContext->R13 = pState->__r13;
1045 lpContext->R14 = pState->__r14;
1046 lpContext->R15 = pState->__r15;
1047 lpContext->EFlags = pState->__rflags;
1048 lpContext->Rip = pState->__rip;
1049 lpContext->SegCs = pState->__cs;
1050 // RtlRestoreContext uses the actual ss instead of this one
1051 // to build the iret frame so just set it zero.
1052 lpContext->SegSs = 0;
1053 lpContext->SegDs = 0;
1054 lpContext->SegEs = 0;
1055 lpContext->SegFs = pState->__fs;
1056 lpContext->SegGs = pState->__gs;
1057 }
1058 break;
1059
1060 case x86_FLOAT_STATE64:
1061 if (lpContext->ContextFlags & CONTEXT_FLOATING_POINT & CONTEXT_AREA_MASK)
1062 {
1063 x86_float_state64_t *pState = (x86_float_state64_t *)threadState;
1064
1065 lpContext->FltSave.ControlWord = *(DWORD*)&pState->__fpu_fcw;
1066 lpContext->FltSave.StatusWord = *(DWORD*)&pState->__fpu_fsw;
1067 lpContext->FltSave.TagWord = pState->__fpu_ftw;
1068 lpContext->FltSave.ErrorOffset = pState->__fpu_ip;
1069 lpContext->FltSave.ErrorSelector = pState->__fpu_cs;
1070 lpContext->FltSave.DataOffset = pState->__fpu_dp;
1071 lpContext->FltSave.DataSelector = pState->__fpu_ds;
1072 lpContext->FltSave.MxCsr = pState->__fpu_mxcsr;
1073 lpContext->FltSave.MxCsr_Mask = pState->__fpu_mxcsrmask; // note: we don't save the mask for x86
1074
1075 // Windows stores the floating point registers in a packed layout (each 10-byte register end to end
1076 // for a total of 80 bytes). But Mach returns each register in an 16-bit structure (presumably for
1077 // alignment purposes). So we can't just memcpy the registers over in a single block, we need to copy
1078 // them individually.
1079 for (int i = 0; i < 8; i++)
1080 memcpy(&lpContext->FltSave.FloatRegisters[i], (&pState->__fpu_stmm0)[i].__mmst_reg, 10);
1081
1082 // AMD64's FLOATING_POINT includes the xmm registers.
1083 memcpy(&lpContext->Xmm0, &pState->__fpu_xmm0, 16 * 16);
1084 }
1085 break;
1086
1087#ifdef XSTATE_SUPPORTED
1088 case x86_AVX_STATE64:
1089 if (lpContext->ContextFlags & CONTEXT_XSTATE & CONTEXT_AREA_MASK)
1090 {
1091 x86_avx_state64_t *pState = (x86_avx_state64_t *)threadState;
1092 memcpy(&lpContext->VectorRegister, &pState->__fpu_ymmh0, 16 * 16);
1093 }
1094 break;
1095#endif
1096#else
1097#error Unexpected architecture.
1098#endif
1099 case x86_THREAD_STATE:
1100 {
1101 x86_thread_state_t *pState = (x86_thread_state_t *)threadState;
1102 CONTEXT_GetThreadContextFromThreadState((thread_state_flavor_t)pState->tsh.flavor, (thread_state_t)&pState->uts, lpContext);
1103 }
1104 break;
1105
1106 case x86_FLOAT_STATE:
1107 {
1108 x86_float_state_t *pState = (x86_float_state_t *)threadState;
1109 CONTEXT_GetThreadContextFromThreadState((thread_state_flavor_t)pState->fsh.flavor, (thread_state_t)&pState->ufs, lpContext);
1110 }
1111 break;
1112
1113 default:
1114 ASSERT("Invalid thread state flavor %d\n", threadStateFlavor);
1115 break;
1116 }
1117}
1118
1119/*++
1120Function:
1121 GetThreadContext
1122
1123See MSDN doc.
1124--*/
1125BOOL
1126CONTEXT_GetThreadContext(
1127 DWORD dwProcessId,
1128 pthread_t self,
1129 LPCONTEXT lpContext)
1130{
1131 BOOL ret = FALSE;
1132
1133 if (lpContext == NULL)
1134 {
1135 ERROR("Invalid lpContext parameter value\n");
1136 SetLastError(ERROR_NOACCESS);
1137 goto EXIT;
1138 }
1139
1140 if (GetCurrentProcessId() == dwProcessId)
1141 {
1142 if (self != pthread_self())
1143 {
1144 // the target thread is in the current process, but isn't
1145 // the current one: extract the CONTEXT from the Mach thread.
1146 mach_port_t mptPort;
1147 mptPort = pthread_mach_thread_np(self);
1148
1149 ret = (CONTEXT_GetThreadContextFromPort(mptPort, lpContext) == KERN_SUCCESS);
1150 }
1151 else
1152 {
1153 CONTEXT_CaptureContext(lpContext);
1154 ret = TRUE;
1155 }
1156 }
1157 else
1158 {
1159 ASSERT("Cross-process GetThreadContext() is not supported on this platform\n");
1160 SetLastError(ERROR_NOACCESS);
1161 }
1162
1163EXIT:
1164 return ret;
1165}
1166
1167/*++
1168Function:
1169 SetThreadContextOnPort
1170
1171 Helper for CONTEXT_SetThreadContext
1172--*/
1173kern_return_t
1174CONTEXT_SetThreadContextOnPort(
1175 mach_port_t Port,
1176 IN CONST CONTEXT *lpContext)
1177{
1178 kern_return_t MachRet = KERN_SUCCESS;
1179 mach_msg_type_number_t StateCount;
1180 thread_state_flavor_t StateFlavor;
1181
1182 if (lpContext->ContextFlags & (CONTEXT_CONTROL|CONTEXT_INTEGER) & CONTEXT_AREA_MASK)
1183 {
1184#ifdef _X86_
1185 x86_thread_state32_t State;
1186 StateFlavor = x86_THREAD_STATE32;
1187
1188 State.eax = lpContext->Eax;
1189 State.ebx = lpContext->Ebx;
1190 State.ecx = lpContext->Ecx;
1191 State.edx = lpContext->Edx;
1192 State.edi = lpContext->Edi;
1193 State.esi = lpContext->Esi;
1194 State.ebp = lpContext->Ebp;
1195 State.esp = lpContext->Esp;
1196 State.ss = lpContext->SegSs;
1197 State.eflags = lpContext->EFlags;
1198 State.eip = lpContext->Eip;
1199 State.cs = lpContext->SegCs;
1200 State.ds = lpContext->SegDs_PAL_Undefined;
1201 State.es = lpContext->SegEs_PAL_Undefined;
1202 State.fs = lpContext->SegFs_PAL_Undefined;
1203 State.gs = lpContext->SegGs_PAL_Undefined;
1204#elif defined(_AMD64_)
1205 x86_thread_state64_t State;
1206 StateFlavor = x86_THREAD_STATE64;
1207
1208 State.__rax = lpContext->Rax;
1209 State.__rbx = lpContext->Rbx;
1210 State.__rcx = lpContext->Rcx;
1211 State.__rdx = lpContext->Rdx;
1212 State.__rdi = lpContext->Rdi;
1213 State.__rsi = lpContext->Rsi;
1214 State.__rbp = lpContext->Rbp;
1215 State.__rsp = lpContext->Rsp;
1216 State.__r8 = lpContext->R8;
1217 State.__r9 = lpContext->R9;
1218 State.__r10 = lpContext->R10;
1219 State.__r11 = lpContext->R11;
1220 State.__r12 = lpContext->R12;
1221 State.__r13 = lpContext->R13;
1222 State.__r14 = lpContext->R14;
1223 State.__r15 = lpContext->R15;
1224// State.ss = lpContext->SegSs;
1225 State.__rflags = lpContext->EFlags;
1226 State.__rip = lpContext->Rip;
1227 State.__cs = lpContext->SegCs;
1228// State.ds = lpContext->SegDs_PAL_Undefined;
1229// State.es = lpContext->SegEs_PAL_Undefined;
1230 State.__fs = lpContext->SegFs;
1231 State.__gs = lpContext->SegGs;
1232#else
1233#error Unexpected architecture.
1234#endif
1235
1236 StateCount = sizeof(State) / sizeof(natural_t);
1237
1238 MachRet = thread_set_state(Port,
1239 StateFlavor,
1240 (thread_state_t)&State,
1241 StateCount);
1242 if (MachRet != KERN_SUCCESS)
1243 {
1244 ASSERT("thread_set_state(THREAD_STATE) failed: %d\n", MachRet);
1245 goto EXIT;
1246 }
1247 }
1248
1249 if (lpContext->ContextFlags & CONTEXT_ALL_FLOATING & CONTEXT_AREA_MASK)
1250 {
1251
1252#ifdef _X86_
1253 x86_float_state32_t State;
1254 StateFlavor = x86_FLOAT_STATE32;
1255 StateCount = sizeof(State) / sizeof(natural_t);
1256#elif defined(_AMD64_)
1257#ifdef XSTATE_SUPPORTED
1258 // We're relying on the fact that the initial portion of
1259 // x86_avx_state64_t is identical to x86_float_state64_t.
1260 // Check a few fields to make sure the assumption is correct.
1261 static_assert_no_msg(sizeof(x86_avx_state64_t) > sizeof(x86_float_state64_t));
1262 static_assert_no_msg(offsetof(x86_avx_state64_t, __fpu_fcw) == offsetof(x86_float_state64_t, __fpu_fcw));
1263 static_assert_no_msg(offsetof(x86_avx_state64_t, __fpu_xmm0) == offsetof(x86_float_state64_t, __fpu_xmm0));
1264
1265 x86_avx_state64_t State;
1266 if (lpContext->ContextFlags & CONTEXT_XSTATE & CONTEXT_AREA_MASK)
1267 {
1268 StateFlavor = x86_AVX_STATE64;
1269 StateCount = sizeof(State) / sizeof(natural_t);
1270 }
1271 else
1272 {
1273 StateFlavor = x86_FLOAT_STATE64;
1274 StateCount = sizeof(x86_float_state64_t) / sizeof(natural_t);
1275 }
1276#else
1277 x86_float_state64_t State;
1278 StateFlavor = x86_FLOAT_STATE64;
1279 StateCount = sizeof(State) / sizeof(natural_t);
1280#endif
1281#else
1282#error Unexpected architecture.
1283#endif
1284
1285 // If we're setting only one of the floating point or extended registers (of which Mach supports only
1286 // the xmm values) then we don't have values for the other set. This is a problem since Mach only
1287 // supports setting both groups as a single unit. So in this case we'll need to fetch the current
1288 // values first.
1289 if ((lpContext->ContextFlags & CONTEXT_ALL_FLOATING) !=
1290 CONTEXT_ALL_FLOATING)
1291 {
1292 mach_msg_type_number_t StateCountGet = StateCount;
1293 MachRet = thread_get_state(Port,
1294 StateFlavor,
1295 (thread_state_t)&State,
1296 &StateCountGet);
1297 if (MachRet != KERN_SUCCESS)
1298 {
1299 ASSERT("thread_get_state(FLOAT_STATE) failed: %d\n", MachRet);
1300 goto EXIT;
1301 }
1302 _ASSERTE(StateCountGet == StateCount);
1303 }
1304
1305 if (lpContext->ContextFlags & CONTEXT_FLOATING_POINT & CONTEXT_AREA_MASK)
1306 {
1307#ifdef _X86_
1308 *(DWORD*)&State.fpu_fcw = lpContext->FloatSave.ControlWord;
1309 *(DWORD*)&State.fpu_fsw = lpContext->FloatSave.StatusWord;
1310 State.fpu_ftw = lpContext->FloatSave.TagWord;
1311 State.fpu_ip = lpContext->FloatSave.ErrorOffset;
1312 State.fpu_cs = lpContext->FloatSave.ErrorSelector;
1313 State.fpu_dp = lpContext->FloatSave.DataOffset;
1314 State.fpu_ds = lpContext->FloatSave.DataSelector;
1315 State.fpu_mxcsr = lpContext->FloatSave.Cr0NpxState;
1316
1317 // Windows stores the floating point registers in a packed layout (each 10-byte register end to
1318 // end for a total of 80 bytes). But Mach returns each register in an 16-bit structure (presumably
1319 // for alignment purposes). So we can't just memcpy the registers over in a single block, we need
1320 // to copy them individually.
1321 for (int i = 0; i < 8; i++)
1322 memcpy((&State.fpu_stmm0)[i].mmst_reg, &lpContext->FloatSave.RegisterArea[i * 10], 10);
1323#elif defined(_AMD64_)
1324 *(DWORD*)&State.__fpu_fcw = lpContext->FltSave.ControlWord;
1325 *(DWORD*)&State.__fpu_fsw = lpContext->FltSave.StatusWord;
1326 State.__fpu_ftw = lpContext->FltSave.TagWord;
1327 State.__fpu_ip = lpContext->FltSave.ErrorOffset;
1328 State.__fpu_cs = lpContext->FltSave.ErrorSelector;
1329 State.__fpu_dp = lpContext->FltSave.DataOffset;
1330 State.__fpu_ds = lpContext->FltSave.DataSelector;
1331 State.__fpu_mxcsr = lpContext->FltSave.MxCsr;
1332 State.__fpu_mxcsrmask = lpContext->FltSave.MxCsr_Mask; // note: we don't save the mask for x86
1333
1334 // Windows stores the floating point registers in a packed layout (each 10-byte register end to
1335 // end for a total of 80 bytes). But Mach returns each register in an 16-bit structure (presumably
1336 // for alignment purposes). So we can't just memcpy the registers over in a single block, we need
1337 // to copy them individually.
1338 for (int i = 0; i < 8; i++)
1339 memcpy((&State.__fpu_stmm0)[i].__mmst_reg, &lpContext->FltSave.FloatRegisters[i], 10);
1340
1341 memcpy(&State.__fpu_xmm0, &lpContext->Xmm0, 16 * 16);
1342#else
1343#error Unexpected architecture.
1344#endif
1345 }
1346
1347#ifdef _X86_
1348 if (lpContext->ContextFlags & CONTEXT_EXTENDED_REGISTERS & CONTEXT_AREA_MASK)
1349 {
1350 // The only extended register information that Mach will tell us about are the xmm register
1351 // values. Both Windows and Mach store the registers in a packed layout (each of the 8 registers
1352 // is 16 bytes) so we can simply memcpy them across.
1353 memcpy(&State.fpu_xmm0, lpContext->ExtendedRegisters + CONTEXT_EXREG_XMM_OFFSET, 8 * 16);
1354 }
1355#endif // _X86_
1356
1357#if defined(_AMD64_) && defined(XSTATE_SUPPORTED)
1358 if (lpContext->ContextFlags & CONTEXT_XSTATE & CONTEXT_AREA_MASK)
1359 {
1360 memcpy(&State.__fpu_ymmh0, lpContext->VectorRegister, 16 * 16);
1361 }
1362#endif
1363
1364 MachRet = thread_set_state(Port,
1365 StateFlavor,
1366 (thread_state_t)&State,
1367 StateCount);
1368 if (MachRet != KERN_SUCCESS)
1369 {
1370 ASSERT("thread_set_state(FLOAT_STATE) failed: %d\n", MachRet);
1371 goto EXIT;
1372 }
1373 }
1374
1375EXIT:
1376 return MachRet;
1377}
1378
1379/*++
1380Function:
1381 SetThreadContext
1382
1383See MSDN doc.
1384--*/
1385BOOL
1386CONTEXT_SetThreadContext(
1387 DWORD dwProcessId,
1388 pthread_t self,
1389 CONST CONTEXT *lpContext)
1390{
1391 BOOL ret = FALSE;
1392
1393 if (lpContext == NULL)
1394 {
1395 ERROR("Invalid lpContext parameter value\n");
1396 SetLastError(ERROR_NOACCESS);
1397 goto EXIT;
1398 }
1399
1400 if (dwProcessId != GetCurrentProcessId())
1401 {
1402 // GetThreadContext() of a thread in another process
1403 ASSERT("Cross-process GetThreadContext() is not supported\n");
1404 SetLastError(ERROR_NOACCESS);
1405 goto EXIT;
1406 }
1407
1408 if (self != pthread_self())
1409 {
1410 // hThread is in the current process, but isn't the current
1411 // thread. Extract the CONTEXT from the Mach thread.
1412
1413 mach_port_t mptPort;
1414
1415 mptPort = pthread_mach_thread_np(self);
1416
1417 ret = (CONTEXT_SetThreadContextOnPort(mptPort, lpContext) == KERN_SUCCESS);
1418 }
1419 else
1420 {
1421 MachSetThreadContext(const_cast<CONTEXT *>(lpContext));
1422 ASSERT("MachSetThreadContext should never return\n");
1423 }
1424
1425EXIT:
1426 return ret;
1427}
1428
1429#endif // !HAVE_MACH_EXCEPTIONS
1430
1431/*++
1432Function:
1433 DBG_FlushInstructionCache: processor-specific portion of
1434 FlushInstructionCache
1435
1436See MSDN doc.
1437--*/
1438BOOL
1439DBG_FlushInstructionCache(
1440 IN LPCVOID lpBaseAddress,
1441 IN SIZE_T dwSize)
1442{
1443#ifndef _ARM_
1444 // Intrinsic should do the right thing across all platforms (except Linux arm)
1445 __builtin___clear_cache((char *)lpBaseAddress, (char *)((INT_PTR)lpBaseAddress + dwSize));
1446#else // _ARM_
1447 // On Linux/arm (at least on 3.10) we found that there is a problem with __do_cache_op (arch/arm/kernel/traps.c)
1448 // implementing cacheflush syscall. cacheflush flushes only the first page in range [lpBaseAddress, lpBaseAddress + dwSize)
1449 // and leaves other pages in undefined state which causes random tests failures (often due to SIGSEGV) with no particular pattern.
1450 //
1451 // As a workaround, we call __builtin___clear_cache on each page separately.
1452
1453 const SIZE_T pageSize = GetVirtualPageSize();
1454 INT_PTR begin = (INT_PTR)lpBaseAddress;
1455 const INT_PTR end = begin + dwSize;
1456
1457 while (begin < end)
1458 {
1459 INT_PTR endOrNextPageBegin = ALIGN_UP(begin + 1, pageSize);
1460 if (endOrNextPageBegin > end)
1461 endOrNextPageBegin = end;
1462
1463 __builtin___clear_cache((char *)begin, (char *)endOrNextPageBegin);
1464 begin = endOrNextPageBegin;
1465 }
1466#endif // _ARM_
1467 return TRUE;
1468}
1469