1 | /* |
2 | * i386 virtual CPU header |
3 | * |
4 | * Copyright (c) 2003 Fabrice Bellard |
5 | * |
6 | * This library is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either |
9 | * version 2 of the License, or (at your option) any later version. |
10 | * |
11 | * This library is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. |
15 | * |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
18 | */ |
19 | |
20 | #ifndef I386_CPU_H |
21 | #define I386_CPU_H |
22 | |
23 | #include "sysemu/tcg.h" |
24 | #include "cpu-qom.h" |
25 | #include "hyperv-proto.h" |
26 | #include "exec/cpu-defs.h" |
27 | |
28 | /* The x86 has a strong memory model with some store-after-load re-ordering */ |
29 | #define TCG_GUEST_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD) |
30 | |
31 | /* Maximum instruction code size */ |
32 | #define TARGET_MAX_INSN_SIZE 16 |
33 | |
34 | /* support for self modifying code even if the modified instruction is |
35 | close to the modifying instruction */ |
36 | #define TARGET_HAS_PRECISE_SMC |
37 | |
38 | #ifdef TARGET_X86_64 |
39 | #define I386_ELF_MACHINE EM_X86_64 |
40 | #define ELF_MACHINE_UNAME "x86_64" |
41 | #else |
42 | #define I386_ELF_MACHINE EM_386 |
43 | #define ELF_MACHINE_UNAME "i686" |
44 | #endif |
45 | |
46 | enum { |
47 | R_EAX = 0, |
48 | R_ECX = 1, |
49 | R_EDX = 2, |
50 | R_EBX = 3, |
51 | R_ESP = 4, |
52 | R_EBP = 5, |
53 | R_ESI = 6, |
54 | R_EDI = 7, |
55 | R_R8 = 8, |
56 | R_R9 = 9, |
57 | R_R10 = 10, |
58 | R_R11 = 11, |
59 | R_R12 = 12, |
60 | R_R13 = 13, |
61 | R_R14 = 14, |
62 | R_R15 = 15, |
63 | |
64 | R_AL = 0, |
65 | R_CL = 1, |
66 | R_DL = 2, |
67 | R_BL = 3, |
68 | R_AH = 4, |
69 | R_CH = 5, |
70 | R_DH = 6, |
71 | R_BH = 7, |
72 | }; |
73 | |
74 | typedef enum X86Seg { |
75 | R_ES = 0, |
76 | R_CS = 1, |
77 | R_SS = 2, |
78 | R_DS = 3, |
79 | R_FS = 4, |
80 | R_GS = 5, |
81 | R_LDTR = 6, |
82 | R_TR = 7, |
83 | } X86Seg; |
84 | |
85 | /* segment descriptor fields */ |
86 | #define DESC_G_SHIFT 23 |
87 | #define DESC_G_MASK (1 << DESC_G_SHIFT) |
88 | #define DESC_B_SHIFT 22 |
89 | #define DESC_B_MASK (1 << DESC_B_SHIFT) |
90 | #define DESC_L_SHIFT 21 /* x86_64 only : 64 bit code segment */ |
91 | #define DESC_L_MASK (1 << DESC_L_SHIFT) |
92 | #define DESC_AVL_SHIFT 20 |
93 | #define DESC_AVL_MASK (1 << DESC_AVL_SHIFT) |
94 | #define DESC_P_SHIFT 15 |
95 | #define DESC_P_MASK (1 << DESC_P_SHIFT) |
96 | #define DESC_DPL_SHIFT 13 |
97 | #define DESC_DPL_MASK (3 << DESC_DPL_SHIFT) |
98 | #define DESC_S_SHIFT 12 |
99 | #define DESC_S_MASK (1 << DESC_S_SHIFT) |
100 | #define DESC_TYPE_SHIFT 8 |
101 | #define DESC_TYPE_MASK (15 << DESC_TYPE_SHIFT) |
102 | #define DESC_A_MASK (1 << 8) |
103 | |
104 | #define DESC_CS_MASK (1 << 11) /* 1=code segment 0=data segment */ |
105 | #define DESC_C_MASK (1 << 10) /* code: conforming */ |
106 | #define DESC_R_MASK (1 << 9) /* code: readable */ |
107 | |
108 | #define DESC_E_MASK (1 << 10) /* data: expansion direction */ |
109 | #define DESC_W_MASK (1 << 9) /* data: writable */ |
110 | |
111 | #define DESC_TSS_BUSY_MASK (1 << 9) |
112 | |
113 | /* eflags masks */ |
114 | #define CC_C 0x0001 |
115 | #define CC_P 0x0004 |
116 | #define CC_A 0x0010 |
117 | #define CC_Z 0x0040 |
118 | #define CC_S 0x0080 |
119 | #define CC_O 0x0800 |
120 | |
121 | #define TF_SHIFT 8 |
122 | #define IOPL_SHIFT 12 |
123 | #define VM_SHIFT 17 |
124 | |
125 | #define TF_MASK 0x00000100 |
126 | #define IF_MASK 0x00000200 |
127 | #define DF_MASK 0x00000400 |
128 | #define IOPL_MASK 0x00003000 |
129 | #define NT_MASK 0x00004000 |
130 | #define RF_MASK 0x00010000 |
131 | #define VM_MASK 0x00020000 |
132 | #define AC_MASK 0x00040000 |
133 | #define VIF_MASK 0x00080000 |
134 | #define VIP_MASK 0x00100000 |
135 | #define ID_MASK 0x00200000 |
136 | |
137 | /* hidden flags - used internally by qemu to represent additional cpu |
138 | states. Only the INHIBIT_IRQ, SMM and SVMI are not redundant. We |
139 | avoid using the IOPL_MASK, TF_MASK, VM_MASK and AC_MASK bit |
140 | positions to ease oring with eflags. */ |
141 | /* current cpl */ |
142 | #define HF_CPL_SHIFT 0 |
143 | /* true if hardware interrupts must be disabled for next instruction */ |
144 | #define HF_INHIBIT_IRQ_SHIFT 3 |
145 | /* 16 or 32 segments */ |
146 | #define HF_CS32_SHIFT 4 |
147 | #define HF_SS32_SHIFT 5 |
148 | /* zero base for DS, ES and SS : can be '0' only in 32 bit CS segment */ |
149 | #define HF_ADDSEG_SHIFT 6 |
150 | /* copy of CR0.PE (protected mode) */ |
151 | #define HF_PE_SHIFT 7 |
152 | #define HF_TF_SHIFT 8 /* must be same as eflags */ |
153 | #define HF_MP_SHIFT 9 /* the order must be MP, EM, TS */ |
154 | #define HF_EM_SHIFT 10 |
155 | #define HF_TS_SHIFT 11 |
156 | #define HF_IOPL_SHIFT 12 /* must be same as eflags */ |
157 | #define HF_LMA_SHIFT 14 /* only used on x86_64: long mode active */ |
158 | #define HF_CS64_SHIFT 15 /* only used on x86_64: 64 bit code segment */ |
159 | #define HF_RF_SHIFT 16 /* must be same as eflags */ |
160 | #define HF_VM_SHIFT 17 /* must be same as eflags */ |
161 | #define HF_AC_SHIFT 18 /* must be same as eflags */ |
162 | #define HF_SMM_SHIFT 19 /* CPU in SMM mode */ |
163 | #define HF_SVME_SHIFT 20 /* SVME enabled (copy of EFER.SVME) */ |
164 | #define HF_GUEST_SHIFT 21 /* SVM intercepts are active */ |
165 | #define HF_OSFXSR_SHIFT 22 /* CR4.OSFXSR */ |
166 | #define HF_SMAP_SHIFT 23 /* CR4.SMAP */ |
167 | #define HF_IOBPT_SHIFT 24 /* an io breakpoint enabled */ |
168 | #define HF_MPX_EN_SHIFT 25 /* MPX Enabled (CR4+XCR0+BNDCFGx) */ |
169 | #define HF_MPX_IU_SHIFT 26 /* BND registers in-use */ |
170 | |
171 | #define HF_CPL_MASK (3 << HF_CPL_SHIFT) |
172 | #define HF_INHIBIT_IRQ_MASK (1 << HF_INHIBIT_IRQ_SHIFT) |
173 | #define HF_CS32_MASK (1 << HF_CS32_SHIFT) |
174 | #define HF_SS32_MASK (1 << HF_SS32_SHIFT) |
175 | #define HF_ADDSEG_MASK (1 << HF_ADDSEG_SHIFT) |
176 | #define HF_PE_MASK (1 << HF_PE_SHIFT) |
177 | #define HF_TF_MASK (1 << HF_TF_SHIFT) |
178 | #define HF_MP_MASK (1 << HF_MP_SHIFT) |
179 | #define HF_EM_MASK (1 << HF_EM_SHIFT) |
180 | #define HF_TS_MASK (1 << HF_TS_SHIFT) |
181 | #define HF_IOPL_MASK (3 << HF_IOPL_SHIFT) |
182 | #define HF_LMA_MASK (1 << HF_LMA_SHIFT) |
183 | #define HF_CS64_MASK (1 << HF_CS64_SHIFT) |
184 | #define HF_RF_MASK (1 << HF_RF_SHIFT) |
185 | #define HF_VM_MASK (1 << HF_VM_SHIFT) |
186 | #define HF_AC_MASK (1 << HF_AC_SHIFT) |
187 | #define HF_SMM_MASK (1 << HF_SMM_SHIFT) |
188 | #define HF_SVME_MASK (1 << HF_SVME_SHIFT) |
189 | #define HF_GUEST_MASK (1 << HF_GUEST_SHIFT) |
190 | #define HF_OSFXSR_MASK (1 << HF_OSFXSR_SHIFT) |
191 | #define HF_SMAP_MASK (1 << HF_SMAP_SHIFT) |
192 | #define HF_IOBPT_MASK (1 << HF_IOBPT_SHIFT) |
193 | #define HF_MPX_EN_MASK (1 << HF_MPX_EN_SHIFT) |
194 | #define HF_MPX_IU_MASK (1 << HF_MPX_IU_SHIFT) |
195 | |
196 | /* hflags2 */ |
197 | |
198 | #define HF2_GIF_SHIFT 0 /* if set CPU takes interrupts */ |
199 | #define HF2_HIF_SHIFT 1 /* value of IF_MASK when entering SVM */ |
200 | #define HF2_NMI_SHIFT 2 /* CPU serving NMI */ |
201 | #define HF2_VINTR_SHIFT 3 /* value of V_INTR_MASKING bit */ |
202 | #define HF2_SMM_INSIDE_NMI_SHIFT 4 /* CPU serving SMI nested inside NMI */ |
203 | #define HF2_MPX_PR_SHIFT 5 /* BNDCFGx.BNDPRESERVE */ |
204 | #define HF2_NPT_SHIFT 6 /* Nested Paging enabled */ |
205 | |
206 | #define HF2_GIF_MASK (1 << HF2_GIF_SHIFT) |
207 | #define HF2_HIF_MASK (1 << HF2_HIF_SHIFT) |
208 | #define HF2_NMI_MASK (1 << HF2_NMI_SHIFT) |
209 | #define HF2_VINTR_MASK (1 << HF2_VINTR_SHIFT) |
210 | #define HF2_SMM_INSIDE_NMI_MASK (1 << HF2_SMM_INSIDE_NMI_SHIFT) |
211 | #define HF2_MPX_PR_MASK (1 << HF2_MPX_PR_SHIFT) |
212 | #define HF2_NPT_MASK (1 << HF2_NPT_SHIFT) |
213 | |
214 | #define CR0_PE_SHIFT 0 |
215 | #define CR0_MP_SHIFT 1 |
216 | |
217 | #define CR0_PE_MASK (1U << 0) |
218 | #define CR0_MP_MASK (1U << 1) |
219 | #define CR0_EM_MASK (1U << 2) |
220 | #define CR0_TS_MASK (1U << 3) |
221 | #define CR0_ET_MASK (1U << 4) |
222 | #define CR0_NE_MASK (1U << 5) |
223 | #define CR0_WP_MASK (1U << 16) |
224 | #define CR0_AM_MASK (1U << 18) |
225 | #define CR0_PG_MASK (1U << 31) |
226 | |
227 | #define CR4_VME_MASK (1U << 0) |
228 | #define CR4_PVI_MASK (1U << 1) |
229 | #define CR4_TSD_MASK (1U << 2) |
230 | #define CR4_DE_MASK (1U << 3) |
231 | #define CR4_PSE_MASK (1U << 4) |
232 | #define CR4_PAE_MASK (1U << 5) |
233 | #define CR4_MCE_MASK (1U << 6) |
234 | #define CR4_PGE_MASK (1U << 7) |
235 | #define CR4_PCE_MASK (1U << 8) |
236 | #define CR4_OSFXSR_SHIFT 9 |
237 | #define CR4_OSFXSR_MASK (1U << CR4_OSFXSR_SHIFT) |
238 | #define CR4_OSXMMEXCPT_MASK (1U << 10) |
239 | #define CR4_LA57_MASK (1U << 12) |
240 | #define CR4_VMXE_MASK (1U << 13) |
241 | #define CR4_SMXE_MASK (1U << 14) |
242 | #define CR4_FSGSBASE_MASK (1U << 16) |
243 | #define CR4_PCIDE_MASK (1U << 17) |
244 | #define CR4_OSXSAVE_MASK (1U << 18) |
245 | #define CR4_SMEP_MASK (1U << 20) |
246 | #define CR4_SMAP_MASK (1U << 21) |
247 | #define CR4_PKE_MASK (1U << 22) |
248 | |
249 | #define DR6_BD (1 << 13) |
250 | #define DR6_BS (1 << 14) |
251 | #define DR6_BT (1 << 15) |
252 | #define DR6_FIXED_1 0xffff0ff0 |
253 | |
254 | #define DR7_GD (1 << 13) |
255 | #define DR7_TYPE_SHIFT 16 |
256 | #define DR7_LEN_SHIFT 18 |
257 | #define DR7_FIXED_1 0x00000400 |
258 | #define DR7_GLOBAL_BP_MASK 0xaa |
259 | #define DR7_LOCAL_BP_MASK 0x55 |
260 | #define DR7_MAX_BP 4 |
261 | #define DR7_TYPE_BP_INST 0x0 |
262 | #define DR7_TYPE_DATA_WR 0x1 |
263 | #define DR7_TYPE_IO_RW 0x2 |
264 | #define DR7_TYPE_DATA_RW 0x3 |
265 | |
266 | #define PG_PRESENT_BIT 0 |
267 | #define PG_RW_BIT 1 |
268 | #define PG_USER_BIT 2 |
269 | #define PG_PWT_BIT 3 |
270 | #define PG_PCD_BIT 4 |
271 | #define PG_ACCESSED_BIT 5 |
272 | #define PG_DIRTY_BIT 6 |
273 | #define PG_PSE_BIT 7 |
274 | #define PG_GLOBAL_BIT 8 |
275 | #define PG_PSE_PAT_BIT 12 |
276 | #define PG_PKRU_BIT 59 |
277 | #define PG_NX_BIT 63 |
278 | |
279 | #define PG_PRESENT_MASK (1 << PG_PRESENT_BIT) |
280 | #define PG_RW_MASK (1 << PG_RW_BIT) |
281 | #define PG_USER_MASK (1 << PG_USER_BIT) |
282 | #define PG_PWT_MASK (1 << PG_PWT_BIT) |
283 | #define PG_PCD_MASK (1 << PG_PCD_BIT) |
284 | #define PG_ACCESSED_MASK (1 << PG_ACCESSED_BIT) |
285 | #define PG_DIRTY_MASK (1 << PG_DIRTY_BIT) |
286 | #define PG_PSE_MASK (1 << PG_PSE_BIT) |
287 | #define PG_GLOBAL_MASK (1 << PG_GLOBAL_BIT) |
288 | #define PG_PSE_PAT_MASK (1 << PG_PSE_PAT_BIT) |
289 | #define PG_ADDRESS_MASK 0x000ffffffffff000LL |
290 | #define PG_HI_RSVD_MASK (PG_ADDRESS_MASK & ~PHYS_ADDR_MASK) |
291 | #define PG_HI_USER_MASK 0x7ff0000000000000LL |
292 | #define PG_PKRU_MASK (15ULL << PG_PKRU_BIT) |
293 | #define PG_NX_MASK (1ULL << PG_NX_BIT) |
294 | |
295 | #define PG_ERROR_W_BIT 1 |
296 | |
297 | #define PG_ERROR_P_MASK 0x01 |
298 | #define PG_ERROR_W_MASK (1 << PG_ERROR_W_BIT) |
299 | #define PG_ERROR_U_MASK 0x04 |
300 | #define PG_ERROR_RSVD_MASK 0x08 |
301 | #define PG_ERROR_I_D_MASK 0x10 |
302 | #define PG_ERROR_PK_MASK 0x20 |
303 | |
304 | #define MCG_CTL_P (1ULL<<8) /* MCG_CAP register available */ |
305 | #define MCG_SER_P (1ULL<<24) /* MCA recovery/new status bits */ |
306 | #define MCG_LMCE_P (1ULL<<27) /* Local Machine Check Supported */ |
307 | |
308 | #define MCE_CAP_DEF (MCG_CTL_P|MCG_SER_P) |
309 | #define MCE_BANKS_DEF 10 |
310 | |
311 | #define MCG_CAP_BANKS_MASK 0xff |
312 | |
313 | #define MCG_STATUS_RIPV (1ULL<<0) /* restart ip valid */ |
314 | #define MCG_STATUS_EIPV (1ULL<<1) /* ip points to correct instruction */ |
315 | #define MCG_STATUS_MCIP (1ULL<<2) /* machine check in progress */ |
316 | #define MCG_STATUS_LMCE (1ULL<<3) /* Local MCE signaled */ |
317 | |
318 | #define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Local MCE enabled */ |
319 | |
320 | #define MCI_STATUS_VAL (1ULL<<63) /* valid error */ |
321 | #define MCI_STATUS_OVER (1ULL<<62) /* previous errors lost */ |
322 | #define MCI_STATUS_UC (1ULL<<61) /* uncorrected error */ |
323 | #define MCI_STATUS_EN (1ULL<<60) /* error enabled */ |
324 | #define MCI_STATUS_MISCV (1ULL<<59) /* misc error reg. valid */ |
325 | #define MCI_STATUS_ADDRV (1ULL<<58) /* addr reg. valid */ |
326 | #define MCI_STATUS_PCC (1ULL<<57) /* processor context corrupt */ |
327 | #define MCI_STATUS_S (1ULL<<56) /* Signaled machine check */ |
328 | #define MCI_STATUS_AR (1ULL<<55) /* Action required */ |
329 | |
330 | /* MISC register defines */ |
331 | #define MCM_ADDR_SEGOFF 0 /* segment offset */ |
332 | #define MCM_ADDR_LINEAR 1 /* linear address */ |
333 | #define MCM_ADDR_PHYS 2 /* physical address */ |
334 | #define MCM_ADDR_MEM 3 /* memory address */ |
335 | #define MCM_ADDR_GENERIC 7 /* generic */ |
336 | |
337 | #define MSR_IA32_TSC 0x10 |
338 | #define MSR_IA32_APICBASE 0x1b |
339 | #define MSR_IA32_APICBASE_BSP (1<<8) |
340 | #define MSR_IA32_APICBASE_ENABLE (1<<11) |
341 | #define MSR_IA32_APICBASE_EXTD (1 << 10) |
342 | #define MSR_IA32_APICBASE_BASE (0xfffffU<<12) |
343 | #define MSR_IA32_FEATURE_CONTROL 0x0000003a |
344 | #define MSR_TSC_ADJUST 0x0000003b |
345 | #define MSR_IA32_SPEC_CTRL 0x48 |
346 | #define MSR_VIRT_SSBD 0xc001011f |
347 | #define MSR_IA32_PRED_CMD 0x49 |
348 | #define MSR_IA32_CORE_CAPABILITY 0xcf |
349 | #define MSR_IA32_ARCH_CAPABILITIES 0x10a |
350 | #define MSR_IA32_TSCDEADLINE 0x6e0 |
351 | |
352 | #define FEATURE_CONTROL_LOCKED (1<<0) |
353 | #define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX (1<<2) |
354 | #define FEATURE_CONTROL_LMCE (1<<20) |
355 | |
356 | #define MSR_P6_PERFCTR0 0xc1 |
357 | |
358 | #define MSR_IA32_SMBASE 0x9e |
359 | #define MSR_SMI_COUNT 0x34 |
360 | #define MSR_MTRRcap 0xfe |
361 | #define MSR_MTRRcap_VCNT 8 |
362 | #define MSR_MTRRcap_FIXRANGE_SUPPORT (1 << 8) |
363 | #define MSR_MTRRcap_WC_SUPPORTED (1 << 10) |
364 | |
365 | #define MSR_IA32_SYSENTER_CS 0x174 |
366 | #define MSR_IA32_SYSENTER_ESP 0x175 |
367 | #define MSR_IA32_SYSENTER_EIP 0x176 |
368 | |
369 | #define MSR_MCG_CAP 0x179 |
370 | #define MSR_MCG_STATUS 0x17a |
371 | #define MSR_MCG_CTL 0x17b |
372 | #define MSR_MCG_EXT_CTL 0x4d0 |
373 | |
374 | #define MSR_P6_EVNTSEL0 0x186 |
375 | |
376 | #define MSR_IA32_PERF_STATUS 0x198 |
377 | |
378 | #define MSR_IA32_MISC_ENABLE 0x1a0 |
379 | /* Indicates good rep/movs microcode on some processors: */ |
380 | #define MSR_IA32_MISC_ENABLE_DEFAULT 1 |
381 | #define MSR_IA32_MISC_ENABLE_MWAIT (1ULL << 18) |
382 | |
383 | #define MSR_MTRRphysBase(reg) (0x200 + 2 * (reg)) |
384 | #define MSR_MTRRphysMask(reg) (0x200 + 2 * (reg) + 1) |
385 | |
386 | #define MSR_MTRRphysIndex(addr) ((((addr) & ~1u) - 0x200) / 2) |
387 | |
388 | #define MSR_MTRRfix64K_00000 0x250 |
389 | #define MSR_MTRRfix16K_80000 0x258 |
390 | #define MSR_MTRRfix16K_A0000 0x259 |
391 | #define MSR_MTRRfix4K_C0000 0x268 |
392 | #define MSR_MTRRfix4K_C8000 0x269 |
393 | #define MSR_MTRRfix4K_D0000 0x26a |
394 | #define MSR_MTRRfix4K_D8000 0x26b |
395 | #define MSR_MTRRfix4K_E0000 0x26c |
396 | #define MSR_MTRRfix4K_E8000 0x26d |
397 | #define MSR_MTRRfix4K_F0000 0x26e |
398 | #define MSR_MTRRfix4K_F8000 0x26f |
399 | |
400 | #define MSR_PAT 0x277 |
401 | |
402 | #define MSR_MTRRdefType 0x2ff |
403 | |
404 | #define MSR_CORE_PERF_FIXED_CTR0 0x309 |
405 | #define MSR_CORE_PERF_FIXED_CTR1 0x30a |
406 | #define MSR_CORE_PERF_FIXED_CTR2 0x30b |
407 | #define MSR_CORE_PERF_FIXED_CTR_CTRL 0x38d |
408 | #define MSR_CORE_PERF_GLOBAL_STATUS 0x38e |
409 | #define MSR_CORE_PERF_GLOBAL_CTRL 0x38f |
410 | #define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x390 |
411 | |
412 | #define MSR_MC0_CTL 0x400 |
413 | #define MSR_MC0_STATUS 0x401 |
414 | #define MSR_MC0_ADDR 0x402 |
415 | #define MSR_MC0_MISC 0x403 |
416 | |
417 | #define MSR_IA32_RTIT_OUTPUT_BASE 0x560 |
418 | #define MSR_IA32_RTIT_OUTPUT_MASK 0x561 |
419 | #define MSR_IA32_RTIT_CTL 0x570 |
420 | #define MSR_IA32_RTIT_STATUS 0x571 |
421 | #define MSR_IA32_RTIT_CR3_MATCH 0x572 |
422 | #define MSR_IA32_RTIT_ADDR0_A 0x580 |
423 | #define MSR_IA32_RTIT_ADDR0_B 0x581 |
424 | #define MSR_IA32_RTIT_ADDR1_A 0x582 |
425 | #define MSR_IA32_RTIT_ADDR1_B 0x583 |
426 | #define MSR_IA32_RTIT_ADDR2_A 0x584 |
427 | #define MSR_IA32_RTIT_ADDR2_B 0x585 |
428 | #define MSR_IA32_RTIT_ADDR3_A 0x586 |
429 | #define MSR_IA32_RTIT_ADDR3_B 0x587 |
430 | #define MAX_RTIT_ADDRS 8 |
431 | |
432 | #define MSR_EFER 0xc0000080 |
433 | |
434 | #define MSR_EFER_SCE (1 << 0) |
435 | #define MSR_EFER_LME (1 << 8) |
436 | #define MSR_EFER_LMA (1 << 10) |
437 | #define MSR_EFER_NXE (1 << 11) |
438 | #define MSR_EFER_SVME (1 << 12) |
439 | #define MSR_EFER_FFXSR (1 << 14) |
440 | |
441 | #define MSR_STAR 0xc0000081 |
442 | #define MSR_LSTAR 0xc0000082 |
443 | #define MSR_CSTAR 0xc0000083 |
444 | #define MSR_FMASK 0xc0000084 |
445 | #define MSR_FSBASE 0xc0000100 |
446 | #define MSR_GSBASE 0xc0000101 |
447 | #define MSR_KERNELGSBASE 0xc0000102 |
448 | #define MSR_TSC_AUX 0xc0000103 |
449 | |
450 | #define MSR_VM_HSAVE_PA 0xc0010117 |
451 | |
452 | #define MSR_IA32_BNDCFGS 0x00000d90 |
453 | #define MSR_IA32_XSS 0x00000da0 |
454 | |
455 | #define XSTATE_FP_BIT 0 |
456 | #define XSTATE_SSE_BIT 1 |
457 | #define XSTATE_YMM_BIT 2 |
458 | #define XSTATE_BNDREGS_BIT 3 |
459 | #define XSTATE_BNDCSR_BIT 4 |
460 | #define XSTATE_OPMASK_BIT 5 |
461 | #define XSTATE_ZMM_Hi256_BIT 6 |
462 | #define XSTATE_Hi16_ZMM_BIT 7 |
463 | #define XSTATE_PKRU_BIT 9 |
464 | |
465 | #define XSTATE_FP_MASK (1ULL << XSTATE_FP_BIT) |
466 | #define XSTATE_SSE_MASK (1ULL << XSTATE_SSE_BIT) |
467 | #define XSTATE_YMM_MASK (1ULL << XSTATE_YMM_BIT) |
468 | #define XSTATE_BNDREGS_MASK (1ULL << XSTATE_BNDREGS_BIT) |
469 | #define XSTATE_BNDCSR_MASK (1ULL << XSTATE_BNDCSR_BIT) |
470 | #define XSTATE_OPMASK_MASK (1ULL << XSTATE_OPMASK_BIT) |
471 | #define XSTATE_ZMM_Hi256_MASK (1ULL << XSTATE_ZMM_Hi256_BIT) |
472 | #define XSTATE_Hi16_ZMM_MASK (1ULL << XSTATE_Hi16_ZMM_BIT) |
473 | #define XSTATE_PKRU_MASK (1ULL << XSTATE_PKRU_BIT) |
474 | |
475 | /* CPUID feature words */ |
476 | typedef enum FeatureWord { |
477 | FEAT_1_EDX, /* CPUID[1].EDX */ |
478 | FEAT_1_ECX, /* CPUID[1].ECX */ |
479 | FEAT_7_0_EBX, /* CPUID[EAX=7,ECX=0].EBX */ |
480 | FEAT_7_0_ECX, /* CPUID[EAX=7,ECX=0].ECX */ |
481 | FEAT_7_0_EDX, /* CPUID[EAX=7,ECX=0].EDX */ |
482 | FEAT_7_1_EAX, /* CPUID[EAX=7,ECX=1].EAX */ |
483 | FEAT_8000_0001_EDX, /* CPUID[8000_0001].EDX */ |
484 | FEAT_8000_0001_ECX, /* CPUID[8000_0001].ECX */ |
485 | FEAT_8000_0007_EDX, /* CPUID[8000_0007].EDX */ |
486 | FEAT_8000_0008_EBX, /* CPUID[8000_0008].EBX */ |
487 | FEAT_C000_0001_EDX, /* CPUID[C000_0001].EDX */ |
488 | FEAT_KVM, /* CPUID[4000_0001].EAX (KVM_CPUID_FEATURES) */ |
489 | FEAT_KVM_HINTS, /* CPUID[4000_0001].EDX */ |
490 | FEAT_HYPERV_EAX, /* CPUID[4000_0003].EAX */ |
491 | FEAT_HYPERV_EBX, /* CPUID[4000_0003].EBX */ |
492 | FEAT_HYPERV_EDX, /* CPUID[4000_0003].EDX */ |
493 | FEAT_HV_RECOMM_EAX, /* CPUID[4000_0004].EAX */ |
494 | FEAT_HV_NESTED_EAX, /* CPUID[4000_000A].EAX */ |
495 | FEAT_SVM, /* CPUID[8000_000A].EDX */ |
496 | FEAT_XSAVE, /* CPUID[EAX=0xd,ECX=1].EAX */ |
497 | FEAT_6_EAX, /* CPUID[6].EAX */ |
498 | FEAT_XSAVE_COMP_LO, /* CPUID[EAX=0xd,ECX=0].EAX */ |
499 | FEAT_XSAVE_COMP_HI, /* CPUID[EAX=0xd,ECX=0].EDX */ |
500 | FEAT_ARCH_CAPABILITIES, |
501 | FEAT_CORE_CAPABILITY, |
502 | FEATURE_WORDS, |
503 | } FeatureWord; |
504 | |
505 | typedef uint32_t FeatureWordArray[FEATURE_WORDS]; |
506 | |
507 | /* cpuid_features bits */ |
508 | #define CPUID_FP87 (1U << 0) |
509 | #define CPUID_VME (1U << 1) |
510 | #define CPUID_DE (1U << 2) |
511 | #define CPUID_PSE (1U << 3) |
512 | #define CPUID_TSC (1U << 4) |
513 | #define CPUID_MSR (1U << 5) |
514 | #define CPUID_PAE (1U << 6) |
515 | #define CPUID_MCE (1U << 7) |
516 | #define CPUID_CX8 (1U << 8) |
517 | #define CPUID_APIC (1U << 9) |
518 | #define CPUID_SEP (1U << 11) /* sysenter/sysexit */ |
519 | #define CPUID_MTRR (1U << 12) |
520 | #define CPUID_PGE (1U << 13) |
521 | #define CPUID_MCA (1U << 14) |
522 | #define CPUID_CMOV (1U << 15) |
523 | #define CPUID_PAT (1U << 16) |
524 | #define CPUID_PSE36 (1U << 17) |
525 | #define CPUID_PN (1U << 18) |
526 | #define CPUID_CLFLUSH (1U << 19) |
527 | #define CPUID_DTS (1U << 21) |
528 | #define CPUID_ACPI (1U << 22) |
529 | #define CPUID_MMX (1U << 23) |
530 | #define CPUID_FXSR (1U << 24) |
531 | #define CPUID_SSE (1U << 25) |
532 | #define CPUID_SSE2 (1U << 26) |
533 | #define CPUID_SS (1U << 27) |
534 | #define CPUID_HT (1U << 28) |
535 | #define CPUID_TM (1U << 29) |
536 | #define CPUID_IA64 (1U << 30) |
537 | #define CPUID_PBE (1U << 31) |
538 | |
539 | #define CPUID_EXT_SSE3 (1U << 0) |
540 | #define CPUID_EXT_PCLMULQDQ (1U << 1) |
541 | #define CPUID_EXT_DTES64 (1U << 2) |
542 | #define CPUID_EXT_MONITOR (1U << 3) |
543 | #define CPUID_EXT_DSCPL (1U << 4) |
544 | #define CPUID_EXT_VMX (1U << 5) |
545 | #define CPUID_EXT_SMX (1U << 6) |
546 | #define CPUID_EXT_EST (1U << 7) |
547 | #define CPUID_EXT_TM2 (1U << 8) |
548 | #define CPUID_EXT_SSSE3 (1U << 9) |
549 | #define CPUID_EXT_CID (1U << 10) |
550 | #define CPUID_EXT_FMA (1U << 12) |
551 | #define CPUID_EXT_CX16 (1U << 13) |
552 | #define CPUID_EXT_XTPR (1U << 14) |
553 | #define CPUID_EXT_PDCM (1U << 15) |
554 | #define CPUID_EXT_PCID (1U << 17) |
555 | #define CPUID_EXT_DCA (1U << 18) |
556 | #define CPUID_EXT_SSE41 (1U << 19) |
557 | #define CPUID_EXT_SSE42 (1U << 20) |
558 | #define CPUID_EXT_X2APIC (1U << 21) |
559 | #define CPUID_EXT_MOVBE (1U << 22) |
560 | #define CPUID_EXT_POPCNT (1U << 23) |
561 | #define CPUID_EXT_TSC_DEADLINE_TIMER (1U << 24) |
562 | #define CPUID_EXT_AES (1U << 25) |
563 | #define CPUID_EXT_XSAVE (1U << 26) |
564 | #define CPUID_EXT_OSXSAVE (1U << 27) |
565 | #define CPUID_EXT_AVX (1U << 28) |
566 | #define CPUID_EXT_F16C (1U << 29) |
567 | #define CPUID_EXT_RDRAND (1U << 30) |
568 | #define CPUID_EXT_HYPERVISOR (1U << 31) |
569 | |
570 | #define CPUID_EXT2_FPU (1U << 0) |
571 | #define CPUID_EXT2_VME (1U << 1) |
572 | #define CPUID_EXT2_DE (1U << 2) |
573 | #define CPUID_EXT2_PSE (1U << 3) |
574 | #define CPUID_EXT2_TSC (1U << 4) |
575 | #define CPUID_EXT2_MSR (1U << 5) |
576 | #define CPUID_EXT2_PAE (1U << 6) |
577 | #define CPUID_EXT2_MCE (1U << 7) |
578 | #define CPUID_EXT2_CX8 (1U << 8) |
579 | #define CPUID_EXT2_APIC (1U << 9) |
580 | #define CPUID_EXT2_SYSCALL (1U << 11) |
581 | #define CPUID_EXT2_MTRR (1U << 12) |
582 | #define CPUID_EXT2_PGE (1U << 13) |
583 | #define CPUID_EXT2_MCA (1U << 14) |
584 | #define CPUID_EXT2_CMOV (1U << 15) |
585 | #define CPUID_EXT2_PAT (1U << 16) |
586 | #define CPUID_EXT2_PSE36 (1U << 17) |
587 | #define CPUID_EXT2_MP (1U << 19) |
588 | #define CPUID_EXT2_NX (1U << 20) |
589 | #define CPUID_EXT2_MMXEXT (1U << 22) |
590 | #define CPUID_EXT2_MMX (1U << 23) |
591 | #define CPUID_EXT2_FXSR (1U << 24) |
592 | #define CPUID_EXT2_FFXSR (1U << 25) |
593 | #define CPUID_EXT2_PDPE1GB (1U << 26) |
594 | #define CPUID_EXT2_RDTSCP (1U << 27) |
595 | #define CPUID_EXT2_LM (1U << 29) |
596 | #define CPUID_EXT2_3DNOWEXT (1U << 30) |
597 | #define CPUID_EXT2_3DNOW (1U << 31) |
598 | |
599 | /* CPUID[8000_0001].EDX bits that are aliase of CPUID[1].EDX bits on AMD CPUs */ |
600 | #define CPUID_EXT2_AMD_ALIASES (CPUID_EXT2_FPU | CPUID_EXT2_VME | \ |
601 | CPUID_EXT2_DE | CPUID_EXT2_PSE | \ |
602 | CPUID_EXT2_TSC | CPUID_EXT2_MSR | \ |
603 | CPUID_EXT2_PAE | CPUID_EXT2_MCE | \ |
604 | CPUID_EXT2_CX8 | CPUID_EXT2_APIC | \ |
605 | CPUID_EXT2_MTRR | CPUID_EXT2_PGE | \ |
606 | CPUID_EXT2_MCA | CPUID_EXT2_CMOV | \ |
607 | CPUID_EXT2_PAT | CPUID_EXT2_PSE36 | \ |
608 | CPUID_EXT2_MMX | CPUID_EXT2_FXSR) |
609 | |
610 | #define CPUID_EXT3_LAHF_LM (1U << 0) |
611 | #define CPUID_EXT3_CMP_LEG (1U << 1) |
612 | #define CPUID_EXT3_SVM (1U << 2) |
613 | #define CPUID_EXT3_EXTAPIC (1U << 3) |
614 | #define CPUID_EXT3_CR8LEG (1U << 4) |
615 | #define CPUID_EXT3_ABM (1U << 5) |
616 | #define CPUID_EXT3_SSE4A (1U << 6) |
617 | #define CPUID_EXT3_MISALIGNSSE (1U << 7) |
618 | #define CPUID_EXT3_3DNOWPREFETCH (1U << 8) |
619 | #define CPUID_EXT3_OSVW (1U << 9) |
620 | #define CPUID_EXT3_IBS (1U << 10) |
621 | #define CPUID_EXT3_XOP (1U << 11) |
622 | #define CPUID_EXT3_SKINIT (1U << 12) |
623 | #define CPUID_EXT3_WDT (1U << 13) |
624 | #define CPUID_EXT3_LWP (1U << 15) |
625 | #define CPUID_EXT3_FMA4 (1U << 16) |
626 | #define CPUID_EXT3_TCE (1U << 17) |
627 | #define CPUID_EXT3_NODEID (1U << 19) |
628 | #define CPUID_EXT3_TBM (1U << 21) |
629 | #define CPUID_EXT3_TOPOEXT (1U << 22) |
630 | #define CPUID_EXT3_PERFCORE (1U << 23) |
631 | #define CPUID_EXT3_PERFNB (1U << 24) |
632 | |
633 | #define CPUID_SVM_NPT (1U << 0) |
634 | #define CPUID_SVM_LBRV (1U << 1) |
635 | #define CPUID_SVM_SVMLOCK (1U << 2) |
636 | #define CPUID_SVM_NRIPSAVE (1U << 3) |
637 | #define CPUID_SVM_TSCSCALE (1U << 4) |
638 | #define CPUID_SVM_VMCBCLEAN (1U << 5) |
639 | #define CPUID_SVM_FLUSHASID (1U << 6) |
640 | #define CPUID_SVM_DECODEASSIST (1U << 7) |
641 | #define CPUID_SVM_PAUSEFILTER (1U << 10) |
642 | #define CPUID_SVM_PFTHRESHOLD (1U << 12) |
643 | |
644 | #define CPUID_7_0_EBX_FSGSBASE (1U << 0) |
645 | #define CPUID_7_0_EBX_BMI1 (1U << 3) |
646 | #define CPUID_7_0_EBX_HLE (1U << 4) |
647 | #define CPUID_7_0_EBX_AVX2 (1U << 5) |
648 | #define CPUID_7_0_EBX_SMEP (1U << 7) |
649 | #define CPUID_7_0_EBX_BMI2 (1U << 8) |
650 | #define CPUID_7_0_EBX_ERMS (1U << 9) |
651 | #define CPUID_7_0_EBX_INVPCID (1U << 10) |
652 | #define CPUID_7_0_EBX_RTM (1U << 11) |
653 | #define CPUID_7_0_EBX_MPX (1U << 14) |
654 | #define CPUID_7_0_EBX_AVX512F (1U << 16) /* AVX-512 Foundation */ |
655 | #define CPUID_7_0_EBX_AVX512DQ (1U << 17) /* AVX-512 Doubleword & Quadword Instrs */ |
656 | #define CPUID_7_0_EBX_RDSEED (1U << 18) |
657 | #define CPUID_7_0_EBX_ADX (1U << 19) |
658 | #define CPUID_7_0_EBX_SMAP (1U << 20) |
659 | #define CPUID_7_0_EBX_AVX512IFMA (1U << 21) /* AVX-512 Integer Fused Multiply Add */ |
660 | #define CPUID_7_0_EBX_PCOMMIT (1U << 22) /* Persistent Commit */ |
661 | #define CPUID_7_0_EBX_CLFLUSHOPT (1U << 23) /* Flush a Cache Line Optimized */ |
662 | #define CPUID_7_0_EBX_CLWB (1U << 24) /* Cache Line Write Back */ |
663 | #define CPUID_7_0_EBX_INTEL_PT (1U << 25) /* Intel Processor Trace */ |
664 | #define CPUID_7_0_EBX_AVX512PF (1U << 26) /* AVX-512 Prefetch */ |
665 | #define CPUID_7_0_EBX_AVX512ER (1U << 27) /* AVX-512 Exponential and Reciprocal */ |
666 | #define CPUID_7_0_EBX_AVX512CD (1U << 28) /* AVX-512 Conflict Detection */ |
667 | #define CPUID_7_0_EBX_SHA_NI (1U << 29) /* SHA1/SHA256 Instruction Extensions */ |
668 | #define CPUID_7_0_EBX_AVX512BW (1U << 30) /* AVX-512 Byte and Word Instructions */ |
669 | #define CPUID_7_0_EBX_AVX512VL (1U << 31) /* AVX-512 Vector Length Extensions */ |
670 | |
671 | #define CPUID_7_0_ECX_AVX512BMI (1U << 1) |
672 | #define CPUID_7_0_ECX_VBMI (1U << 1) /* AVX-512 Vector Byte Manipulation Instrs */ |
673 | #define CPUID_7_0_ECX_UMIP (1U << 2) |
674 | #define CPUID_7_0_ECX_PKU (1U << 3) |
675 | #define CPUID_7_0_ECX_OSPKE (1U << 4) |
676 | #define CPUID_7_0_ECX_VBMI2 (1U << 6) /* Additional VBMI Instrs */ |
677 | #define CPUID_7_0_ECX_GFNI (1U << 8) |
678 | #define CPUID_7_0_ECX_VAES (1U << 9) |
679 | #define CPUID_7_0_ECX_VPCLMULQDQ (1U << 10) |
680 | #define CPUID_7_0_ECX_AVX512VNNI (1U << 11) |
681 | #define CPUID_7_0_ECX_AVX512BITALG (1U << 12) |
682 | #define CPUID_7_0_ECX_AVX512_VPOPCNTDQ (1U << 14) /* POPCNT for vectors of DW/QW */ |
683 | #define CPUID_7_0_ECX_LA57 (1U << 16) |
684 | #define CPUID_7_0_ECX_RDPID (1U << 22) |
685 | #define CPUID_7_0_ECX_CLDEMOTE (1U << 25) /* CLDEMOTE Instruction */ |
686 | #define CPUID_7_0_ECX_MOVDIRI (1U << 27) /* MOVDIRI Instruction */ |
687 | #define CPUID_7_0_ECX_MOVDIR64B (1U << 28) /* MOVDIR64B Instruction */ |
688 | |
689 | #define CPUID_7_0_EDX_AVX512_4VNNIW (1U << 2) /* AVX512 Neural Network Instructions */ |
690 | #define CPUID_7_0_EDX_AVX512_4FMAPS (1U << 3) /* AVX512 Multiply Accumulation Single Precision */ |
691 | #define CPUID_7_0_EDX_SPEC_CTRL (1U << 26) /* Speculation Control */ |
692 | #define CPUID_7_0_EDX_ARCH_CAPABILITIES (1U << 29) /*Arch Capabilities*/ |
693 | #define CPUID_7_0_EDX_CORE_CAPABILITY (1U << 30) /*Core Capability*/ |
694 | #define CPUID_7_0_EDX_SPEC_CTRL_SSBD (1U << 31) /* Speculative Store Bypass Disable */ |
695 | |
696 | #define CPUID_7_1_EAX_AVX512_BF16 (1U << 5) /* AVX512 BFloat16 Instruction */ |
697 | |
698 | #define CPUID_8000_0008_EBX_WBNOINVD (1U << 9) /* Write back and |
699 | do not invalidate cache */ |
700 | #define CPUID_8000_0008_EBX_IBPB (1U << 12) /* Indirect Branch Prediction Barrier */ |
701 | |
702 | #define CPUID_XSAVE_XSAVEOPT (1U << 0) |
703 | #define CPUID_XSAVE_XSAVEC (1U << 1) |
704 | #define CPUID_XSAVE_XGETBV1 (1U << 2) |
705 | #define CPUID_XSAVE_XSAVES (1U << 3) |
706 | |
707 | #define CPUID_6_EAX_ARAT (1U << 2) |
708 | |
709 | /* CPUID[0x80000007].EDX flags: */ |
710 | #define CPUID_APM_INVTSC (1U << 8) |
711 | |
712 | #define CPUID_VENDOR_SZ 12 |
713 | |
714 | #define CPUID_VENDOR_INTEL_1 0x756e6547 /* "Genu" */ |
715 | #define CPUID_VENDOR_INTEL_2 0x49656e69 /* "ineI" */ |
716 | #define CPUID_VENDOR_INTEL_3 0x6c65746e /* "ntel" */ |
717 | #define CPUID_VENDOR_INTEL "GenuineIntel" |
718 | |
719 | #define CPUID_VENDOR_AMD_1 0x68747541 /* "Auth" */ |
720 | #define CPUID_VENDOR_AMD_2 0x69746e65 /* "enti" */ |
721 | #define CPUID_VENDOR_AMD_3 0x444d4163 /* "cAMD" */ |
722 | #define CPUID_VENDOR_AMD "AuthenticAMD" |
723 | |
724 | #define CPUID_VENDOR_VIA "CentaurHauls" |
725 | |
726 | #define CPUID_VENDOR_HYGON "HygonGenuine" |
727 | |
728 | #define IS_INTEL_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_INTEL_1 && \ |
729 | (env)->cpuid_vendor2 == CPUID_VENDOR_INTEL_2 && \ |
730 | (env)->cpuid_vendor3 == CPUID_VENDOR_INTEL_3) |
731 | #define IS_AMD_CPU(env) ((env)->cpuid_vendor1 == CPUID_VENDOR_AMD_1 && \ |
732 | (env)->cpuid_vendor2 == CPUID_VENDOR_AMD_2 && \ |
733 | (env)->cpuid_vendor3 == CPUID_VENDOR_AMD_3) |
734 | |
735 | #define CPUID_MWAIT_IBE (1U << 1) /* Interrupts can exit capability */ |
736 | #define CPUID_MWAIT_EMX (1U << 0) /* enumeration supported */ |
737 | |
738 | /* CPUID[0xB].ECX level types */ |
739 | #define CPUID_TOPOLOGY_LEVEL_INVALID (0U << 8) |
740 | #define CPUID_TOPOLOGY_LEVEL_SMT (1U << 8) |
741 | #define CPUID_TOPOLOGY_LEVEL_CORE (2U << 8) |
742 | #define CPUID_TOPOLOGY_LEVEL_DIE (5U << 8) |
743 | |
744 | /* MSR Feature Bits */ |
745 | #define MSR_ARCH_CAP_RDCL_NO (1U << 0) |
746 | #define MSR_ARCH_CAP_IBRS_ALL (1U << 1) |
747 | #define MSR_ARCH_CAP_RSBA (1U << 2) |
748 | #define MSR_ARCH_CAP_SKIP_L1DFL_VMENTRY (1U << 3) |
749 | #define MSR_ARCH_CAP_SSB_NO (1U << 4) |
750 | |
751 | #define MSR_CORE_CAP_SPLIT_LOCK_DETECT (1U << 5) |
752 | |
753 | /* Supported Hyper-V Enlightenments */ |
754 | #define HYPERV_FEAT_RELAXED 0 |
755 | #define HYPERV_FEAT_VAPIC 1 |
756 | #define HYPERV_FEAT_TIME 2 |
757 | #define HYPERV_FEAT_CRASH 3 |
758 | #define HYPERV_FEAT_RESET 4 |
759 | #define HYPERV_FEAT_VPINDEX 5 |
760 | #define HYPERV_FEAT_RUNTIME 6 |
761 | #define HYPERV_FEAT_SYNIC 7 |
762 | #define HYPERV_FEAT_STIMER 8 |
763 | #define HYPERV_FEAT_FREQUENCIES 9 |
764 | #define HYPERV_FEAT_REENLIGHTENMENT 10 |
765 | #define HYPERV_FEAT_TLBFLUSH 11 |
766 | #define HYPERV_FEAT_EVMCS 12 |
767 | #define HYPERV_FEAT_IPI 13 |
768 | #define HYPERV_FEAT_STIMER_DIRECT 14 |
769 | |
770 | #ifndef HYPERV_SPINLOCK_NEVER_RETRY |
771 | #define HYPERV_SPINLOCK_NEVER_RETRY 0xFFFFFFFF |
772 | #endif |
773 | |
774 | #define EXCP00_DIVZ 0 |
775 | #define EXCP01_DB 1 |
776 | #define EXCP02_NMI 2 |
777 | #define EXCP03_INT3 3 |
778 | #define EXCP04_INTO 4 |
779 | #define EXCP05_BOUND 5 |
780 | #define EXCP06_ILLOP 6 |
781 | #define EXCP07_PREX 7 |
782 | #define EXCP08_DBLE 8 |
783 | #define EXCP09_XERR 9 |
784 | #define EXCP0A_TSS 10 |
785 | #define EXCP0B_NOSEG 11 |
786 | #define EXCP0C_STACK 12 |
787 | #define EXCP0D_GPF 13 |
788 | #define EXCP0E_PAGE 14 |
789 | #define EXCP10_COPR 16 |
790 | #define EXCP11_ALGN 17 |
791 | #define EXCP12_MCHK 18 |
792 | |
793 | #define EXCP_SYSCALL 0x100 /* only happens in user only emulation |
794 | for syscall instruction */ |
795 | #define EXCP_VMEXIT 0x100 |
796 | |
797 | /* i386-specific interrupt pending bits. */ |
798 | #define CPU_INTERRUPT_POLL CPU_INTERRUPT_TGT_EXT_1 |
799 | #define CPU_INTERRUPT_SMI CPU_INTERRUPT_TGT_EXT_2 |
800 | #define CPU_INTERRUPT_NMI CPU_INTERRUPT_TGT_EXT_3 |
801 | #define CPU_INTERRUPT_MCE CPU_INTERRUPT_TGT_EXT_4 |
802 | #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_INT_0 |
803 | #define CPU_INTERRUPT_SIPI CPU_INTERRUPT_TGT_INT_1 |
804 | #define CPU_INTERRUPT_TPR CPU_INTERRUPT_TGT_INT_2 |
805 | |
806 | /* Use a clearer name for this. */ |
807 | #define CPU_INTERRUPT_INIT CPU_INTERRUPT_RESET |
808 | |
809 | /* Instead of computing the condition codes after each x86 instruction, |
810 | * QEMU just stores one operand (called CC_SRC), the result |
811 | * (called CC_DST) and the type of operation (called CC_OP). When the |
812 | * condition codes are needed, the condition codes can be calculated |
813 | * using this information. Condition codes are not generated if they |
814 | * are only needed for conditional branches. |
815 | */ |
816 | typedef enum { |
817 | CC_OP_DYNAMIC, /* must use dynamic code to get cc_op */ |
818 | CC_OP_EFLAGS, /* all cc are explicitly computed, CC_SRC = flags */ |
819 | |
820 | CC_OP_MULB, /* modify all flags, C, O = (CC_SRC != 0) */ |
821 | CC_OP_MULW, |
822 | CC_OP_MULL, |
823 | CC_OP_MULQ, |
824 | |
825 | CC_OP_ADDB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
826 | CC_OP_ADDW, |
827 | CC_OP_ADDL, |
828 | CC_OP_ADDQ, |
829 | |
830 | CC_OP_ADCB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
831 | CC_OP_ADCW, |
832 | CC_OP_ADCL, |
833 | CC_OP_ADCQ, |
834 | |
835 | CC_OP_SUBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
836 | CC_OP_SUBW, |
837 | CC_OP_SUBL, |
838 | CC_OP_SUBQ, |
839 | |
840 | CC_OP_SBBB, /* modify all flags, CC_DST = res, CC_SRC = src1 */ |
841 | CC_OP_SBBW, |
842 | CC_OP_SBBL, |
843 | CC_OP_SBBQ, |
844 | |
845 | CC_OP_LOGICB, /* modify all flags, CC_DST = res */ |
846 | CC_OP_LOGICW, |
847 | CC_OP_LOGICL, |
848 | CC_OP_LOGICQ, |
849 | |
850 | CC_OP_INCB, /* modify all flags except, CC_DST = res, CC_SRC = C */ |
851 | CC_OP_INCW, |
852 | CC_OP_INCL, |
853 | CC_OP_INCQ, |
854 | |
855 | CC_OP_DECB, /* modify all flags except, CC_DST = res, CC_SRC = C */ |
856 | CC_OP_DECW, |
857 | CC_OP_DECL, |
858 | CC_OP_DECQ, |
859 | |
860 | CC_OP_SHLB, /* modify all flags, CC_DST = res, CC_SRC.msb = C */ |
861 | CC_OP_SHLW, |
862 | CC_OP_SHLL, |
863 | CC_OP_SHLQ, |
864 | |
865 | CC_OP_SARB, /* modify all flags, CC_DST = res, CC_SRC.lsb = C */ |
866 | CC_OP_SARW, |
867 | CC_OP_SARL, |
868 | CC_OP_SARQ, |
869 | |
870 | CC_OP_BMILGB, /* Z,S via CC_DST, C = SRC==0; O=0; P,A undefined */ |
871 | CC_OP_BMILGW, |
872 | CC_OP_BMILGL, |
873 | CC_OP_BMILGQ, |
874 | |
875 | CC_OP_ADCX, /* CC_DST = C, CC_SRC = rest. */ |
876 | CC_OP_ADOX, /* CC_DST = O, CC_SRC = rest. */ |
877 | CC_OP_ADCOX, /* CC_DST = C, CC_SRC2 = O, CC_SRC = rest. */ |
878 | |
879 | CC_OP_CLR, /* Z set, all other flags clear. */ |
880 | CC_OP_POPCNT, /* Z via CC_SRC, all other flags clear. */ |
881 | |
882 | CC_OP_NB, |
883 | } CCOp; |
884 | |
885 | typedef struct SegmentCache { |
886 | uint32_t selector; |
887 | target_ulong base; |
888 | uint32_t limit; |
889 | uint32_t flags; |
890 | } SegmentCache; |
891 | |
892 | #define MMREG_UNION(n, bits) \ |
893 | union n { \ |
894 | uint8_t _b_##n[(bits)/8]; \ |
895 | uint16_t _w_##n[(bits)/16]; \ |
896 | uint32_t _l_##n[(bits)/32]; \ |
897 | uint64_t _q_##n[(bits)/64]; \ |
898 | float32 _s_##n[(bits)/32]; \ |
899 | float64 _d_##n[(bits)/64]; \ |
900 | } |
901 | |
902 | typedef union { |
903 | uint8_t _b[16]; |
904 | uint16_t _w[8]; |
905 | uint32_t _l[4]; |
906 | uint64_t _q[2]; |
907 | } XMMReg; |
908 | |
909 | typedef union { |
910 | uint8_t _b[32]; |
911 | uint16_t _w[16]; |
912 | uint32_t _l[8]; |
913 | uint64_t _q[4]; |
914 | } YMMReg; |
915 | |
916 | typedef MMREG_UNION(ZMMReg, 512) ZMMReg; |
917 | typedef MMREG_UNION(MMXReg, 64) MMXReg; |
918 | |
919 | typedef struct BNDReg { |
920 | uint64_t lb; |
921 | uint64_t ub; |
922 | } BNDReg; |
923 | |
924 | typedef struct BNDCSReg { |
925 | uint64_t cfgu; |
926 | uint64_t sts; |
927 | } BNDCSReg; |
928 | |
929 | #define BNDCFG_ENABLE 1ULL |
930 | #define BNDCFG_BNDPRESERVE 2ULL |
931 | #define BNDCFG_BDIR_MASK TARGET_PAGE_MASK |
932 | |
933 | #ifdef HOST_WORDS_BIGENDIAN |
934 | #define ZMM_B(n) _b_ZMMReg[63 - (n)] |
935 | #define ZMM_W(n) _w_ZMMReg[31 - (n)] |
936 | #define ZMM_L(n) _l_ZMMReg[15 - (n)] |
937 | #define ZMM_S(n) _s_ZMMReg[15 - (n)] |
938 | #define ZMM_Q(n) _q_ZMMReg[7 - (n)] |
939 | #define ZMM_D(n) _d_ZMMReg[7 - (n)] |
940 | |
941 | #define MMX_B(n) _b_MMXReg[7 - (n)] |
942 | #define MMX_W(n) _w_MMXReg[3 - (n)] |
943 | #define MMX_L(n) _l_MMXReg[1 - (n)] |
944 | #define MMX_S(n) _s_MMXReg[1 - (n)] |
945 | #else |
946 | #define ZMM_B(n) _b_ZMMReg[n] |
947 | #define ZMM_W(n) _w_ZMMReg[n] |
948 | #define ZMM_L(n) _l_ZMMReg[n] |
949 | #define ZMM_S(n) _s_ZMMReg[n] |
950 | #define ZMM_Q(n) _q_ZMMReg[n] |
951 | #define ZMM_D(n) _d_ZMMReg[n] |
952 | |
953 | #define MMX_B(n) _b_MMXReg[n] |
954 | #define MMX_W(n) _w_MMXReg[n] |
955 | #define MMX_L(n) _l_MMXReg[n] |
956 | #define MMX_S(n) _s_MMXReg[n] |
957 | #endif |
958 | #define MMX_Q(n) _q_MMXReg[n] |
959 | |
960 | typedef union { |
961 | floatx80 d __attribute__((aligned(16))); |
962 | MMXReg mmx; |
963 | } FPReg; |
964 | |
965 | typedef struct { |
966 | uint64_t base; |
967 | uint64_t mask; |
968 | } MTRRVar; |
969 | |
970 | #define CPU_NB_REGS64 16 |
971 | #define CPU_NB_REGS32 8 |
972 | |
973 | #ifdef TARGET_X86_64 |
974 | #define CPU_NB_REGS CPU_NB_REGS64 |
975 | #else |
976 | #define CPU_NB_REGS CPU_NB_REGS32 |
977 | #endif |
978 | |
979 | #define MAX_FIXED_COUNTERS 3 |
980 | #define MAX_GP_COUNTERS (MSR_IA32_PERF_STATUS - MSR_P6_EVNTSEL0) |
981 | |
982 | #define 1 |
983 | |
984 | #define NB_OPMASK_REGS 8 |
985 | |
986 | /* CPU can't have 0xFFFFFFFF APIC ID, use that value to distinguish |
987 | * that APIC ID hasn't been set yet |
988 | */ |
989 | #define UNASSIGNED_APIC_ID 0xFFFFFFFF |
990 | |
991 | typedef union X86LegacyXSaveArea { |
992 | struct { |
993 | uint16_t fcw; |
994 | uint16_t fsw; |
995 | uint8_t ftw; |
996 | uint8_t reserved; |
997 | uint16_t fpop; |
998 | uint64_t fpip; |
999 | uint64_t fpdp; |
1000 | uint32_t mxcsr; |
1001 | uint32_t mxcsr_mask; |
1002 | FPReg fpregs[8]; |
1003 | uint8_t xmm_regs[16][16]; |
1004 | }; |
1005 | uint8_t data[512]; |
1006 | } X86LegacyXSaveArea; |
1007 | |
1008 | typedef struct { |
1009 | uint64_t ; |
1010 | uint64_t ; |
1011 | uint64_t ; |
1012 | uint8_t [40]; |
1013 | } ; |
1014 | |
1015 | /* Ext. save area 2: AVX State */ |
1016 | typedef struct XSaveAVX { |
1017 | uint8_t ymmh[16][16]; |
1018 | } XSaveAVX; |
1019 | |
1020 | /* Ext. save area 3: BNDREG */ |
1021 | typedef struct XSaveBNDREG { |
1022 | BNDReg bnd_regs[4]; |
1023 | } XSaveBNDREG; |
1024 | |
1025 | /* Ext. save area 4: BNDCSR */ |
1026 | typedef union XSaveBNDCSR { |
1027 | BNDCSReg bndcsr; |
1028 | uint8_t data[64]; |
1029 | } XSaveBNDCSR; |
1030 | |
1031 | /* Ext. save area 5: Opmask */ |
1032 | typedef struct XSaveOpmask { |
1033 | uint64_t opmask_regs[NB_OPMASK_REGS]; |
1034 | } XSaveOpmask; |
1035 | |
1036 | /* Ext. save area 6: ZMM_Hi256 */ |
1037 | typedef struct XSaveZMM_Hi256 { |
1038 | uint8_t zmm_hi256[16][32]; |
1039 | } XSaveZMM_Hi256; |
1040 | |
1041 | /* Ext. save area 7: Hi16_ZMM */ |
1042 | typedef struct XSaveHi16_ZMM { |
1043 | uint8_t hi16_zmm[16][64]; |
1044 | } XSaveHi16_ZMM; |
1045 | |
1046 | /* Ext. save area 9: PKRU state */ |
1047 | typedef struct XSavePKRU { |
1048 | uint32_t pkru; |
1049 | uint32_t padding; |
1050 | } XSavePKRU; |
1051 | |
1052 | typedef struct X86XSaveArea { |
1053 | X86LegacyXSaveArea legacy; |
1054 | X86XSaveHeader ; |
1055 | |
1056 | /* Extended save areas: */ |
1057 | |
1058 | /* AVX State: */ |
1059 | XSaveAVX avx_state; |
1060 | uint8_t padding[960 - 576 - sizeof(XSaveAVX)]; |
1061 | /* MPX State: */ |
1062 | XSaveBNDREG bndreg_state; |
1063 | XSaveBNDCSR bndcsr_state; |
1064 | /* AVX-512 State: */ |
1065 | XSaveOpmask opmask_state; |
1066 | XSaveZMM_Hi256 zmm_hi256_state; |
1067 | XSaveHi16_ZMM hi16_zmm_state; |
1068 | /* PKRU State: */ |
1069 | XSavePKRU pkru_state; |
1070 | } X86XSaveArea; |
1071 | |
1072 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, avx_state) != 0x240); |
1073 | QEMU_BUILD_BUG_ON(sizeof(XSaveAVX) != 0x100); |
1074 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndreg_state) != 0x3c0); |
1075 | QEMU_BUILD_BUG_ON(sizeof(XSaveBNDREG) != 0x40); |
1076 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, bndcsr_state) != 0x400); |
1077 | QEMU_BUILD_BUG_ON(sizeof(XSaveBNDCSR) != 0x40); |
1078 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, opmask_state) != 0x440); |
1079 | QEMU_BUILD_BUG_ON(sizeof(XSaveOpmask) != 0x40); |
1080 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, zmm_hi256_state) != 0x480); |
1081 | QEMU_BUILD_BUG_ON(sizeof(XSaveZMM_Hi256) != 0x200); |
1082 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, hi16_zmm_state) != 0x680); |
1083 | QEMU_BUILD_BUG_ON(sizeof(XSaveHi16_ZMM) != 0x400); |
1084 | QEMU_BUILD_BUG_ON(offsetof(X86XSaveArea, pkru_state) != 0xA80); |
1085 | QEMU_BUILD_BUG_ON(sizeof(XSavePKRU) != 0x8); |
1086 | |
1087 | typedef enum TPRAccess { |
1088 | TPR_ACCESS_READ, |
1089 | TPR_ACCESS_WRITE, |
1090 | } TPRAccess; |
1091 | |
1092 | /* Cache information data structures: */ |
1093 | |
1094 | enum CacheType { |
1095 | DATA_CACHE, |
1096 | INSTRUCTION_CACHE, |
1097 | UNIFIED_CACHE |
1098 | }; |
1099 | |
1100 | typedef struct CPUCacheInfo { |
1101 | enum CacheType type; |
1102 | uint8_t level; |
1103 | /* Size in bytes */ |
1104 | uint32_t size; |
1105 | /* Line size, in bytes */ |
1106 | uint16_t line_size; |
1107 | /* |
1108 | * Associativity. |
1109 | * Note: representation of fully-associative caches is not implemented |
1110 | */ |
1111 | uint8_t associativity; |
1112 | /* Physical line partitions. CPUID[0x8000001D].EBX, CPUID[4].EBX */ |
1113 | uint8_t partitions; |
1114 | /* Number of sets. CPUID[0x8000001D].ECX, CPUID[4].ECX */ |
1115 | uint32_t sets; |
1116 | /* |
1117 | * Lines per tag. |
1118 | * AMD-specific: CPUID[0x80000005], CPUID[0x80000006]. |
1119 | * (Is this synonym to @partitions?) |
1120 | */ |
1121 | uint8_t lines_per_tag; |
1122 | |
1123 | /* Self-initializing cache */ |
1124 | bool self_init; |
1125 | /* |
1126 | * WBINVD/INVD is not guaranteed to act upon lower level caches of |
1127 | * non-originating threads sharing this cache. |
1128 | * CPUID[4].EDX[bit 0], CPUID[0x8000001D].EDX[bit 0] |
1129 | */ |
1130 | bool no_invd_sharing; |
1131 | /* |
1132 | * Cache is inclusive of lower cache levels. |
1133 | * CPUID[4].EDX[bit 1], CPUID[0x8000001D].EDX[bit 1]. |
1134 | */ |
1135 | bool inclusive; |
1136 | /* |
1137 | * A complex function is used to index the cache, potentially using all |
1138 | * address bits. CPUID[4].EDX[bit 2]. |
1139 | */ |
1140 | bool complex_indexing; |
1141 | } CPUCacheInfo; |
1142 | |
1143 | |
1144 | typedef struct CPUCaches { |
1145 | CPUCacheInfo *l1d_cache; |
1146 | CPUCacheInfo *l1i_cache; |
1147 | CPUCacheInfo *l2_cache; |
1148 | CPUCacheInfo *l3_cache; |
1149 | } CPUCaches; |
1150 | |
1151 | typedef struct CPUX86State { |
1152 | /* standard registers */ |
1153 | target_ulong regs[CPU_NB_REGS]; |
1154 | target_ulong eip; |
1155 | target_ulong eflags; /* eflags register. During CPU emulation, CC |
1156 | flags and DF are set to zero because they are |
1157 | stored elsewhere */ |
1158 | |
1159 | /* emulator internal eflags handling */ |
1160 | target_ulong cc_dst; |
1161 | target_ulong cc_src; |
1162 | target_ulong cc_src2; |
1163 | uint32_t cc_op; |
1164 | int32_t df; /* D flag : 1 if D = 0, -1 if D = 1 */ |
1165 | uint32_t hflags; /* TB flags, see HF_xxx constants. These flags |
1166 | are known at translation time. */ |
1167 | uint32_t hflags2; /* various other flags, see HF2_xxx constants. */ |
1168 | |
1169 | /* segments */ |
1170 | SegmentCache segs[6]; /* selector values */ |
1171 | SegmentCache ldt; |
1172 | SegmentCache tr; |
1173 | SegmentCache gdt; /* only base and limit are used */ |
1174 | SegmentCache idt; /* only base and limit are used */ |
1175 | |
1176 | target_ulong cr[5]; /* NOTE: cr1 is unused */ |
1177 | int32_t a20_mask; |
1178 | |
1179 | BNDReg bnd_regs[4]; |
1180 | BNDCSReg bndcs_regs; |
1181 | uint64_t msr_bndcfgs; |
1182 | uint64_t efer; |
1183 | |
1184 | /* Beginning of state preserved by INIT (dummy marker). */ |
1185 | struct {} start_init_save; |
1186 | |
1187 | /* FPU state */ |
1188 | unsigned int fpstt; /* top of stack index */ |
1189 | uint16_t fpus; |
1190 | uint16_t fpuc; |
1191 | uint8_t fptags[8]; /* 0 = valid, 1 = empty */ |
1192 | FPReg fpregs[8]; |
1193 | /* KVM-only so far */ |
1194 | uint16_t fpop; |
1195 | uint64_t fpip; |
1196 | uint64_t fpdp; |
1197 | |
1198 | /* emulator internal variables */ |
1199 | float_status fp_status; |
1200 | floatx80 ft0; |
1201 | |
1202 | float_status mmx_status; /* for 3DNow! float ops */ |
1203 | float_status sse_status; |
1204 | uint32_t mxcsr; |
1205 | ZMMReg xmm_regs[CPU_NB_REGS == 8 ? 8 : 32]; |
1206 | ZMMReg xmm_t0; |
1207 | MMXReg mmx_t0; |
1208 | |
1209 | XMMReg ymmh_regs[CPU_NB_REGS]; |
1210 | |
1211 | uint64_t opmask_regs[NB_OPMASK_REGS]; |
1212 | YMMReg zmmh_regs[CPU_NB_REGS]; |
1213 | ZMMReg hi16_zmm_regs[CPU_NB_REGS]; |
1214 | |
1215 | /* sysenter registers */ |
1216 | uint32_t sysenter_cs; |
1217 | target_ulong sysenter_esp; |
1218 | target_ulong sysenter_eip; |
1219 | uint64_t star; |
1220 | |
1221 | uint64_t vm_hsave; |
1222 | |
1223 | #ifdef TARGET_X86_64 |
1224 | target_ulong lstar; |
1225 | target_ulong cstar; |
1226 | target_ulong fmask; |
1227 | target_ulong kernelgsbase; |
1228 | #endif |
1229 | |
1230 | uint64_t tsc; |
1231 | uint64_t tsc_adjust; |
1232 | uint64_t tsc_deadline; |
1233 | uint64_t tsc_aux; |
1234 | |
1235 | uint64_t xcr0; |
1236 | |
1237 | uint64_t mcg_status; |
1238 | uint64_t msr_ia32_misc_enable; |
1239 | uint64_t msr_ia32_feature_control; |
1240 | |
1241 | uint64_t msr_fixed_ctr_ctrl; |
1242 | uint64_t msr_global_ctrl; |
1243 | uint64_t msr_global_status; |
1244 | uint64_t msr_global_ovf_ctrl; |
1245 | uint64_t msr_fixed_counters[MAX_FIXED_COUNTERS]; |
1246 | uint64_t msr_gp_counters[MAX_GP_COUNTERS]; |
1247 | uint64_t msr_gp_evtsel[MAX_GP_COUNTERS]; |
1248 | |
1249 | uint64_t pat; |
1250 | uint32_t smbase; |
1251 | uint64_t msr_smi_count; |
1252 | |
1253 | uint32_t pkru; |
1254 | |
1255 | uint64_t spec_ctrl; |
1256 | uint64_t virt_ssbd; |
1257 | |
1258 | /* End of state preserved by INIT (dummy marker). */ |
1259 | struct {} end_init_save; |
1260 | |
1261 | uint64_t system_time_msr; |
1262 | uint64_t wall_clock_msr; |
1263 | uint64_t steal_time_msr; |
1264 | uint64_t async_pf_en_msr; |
1265 | uint64_t pv_eoi_en_msr; |
1266 | uint64_t poll_control_msr; |
1267 | |
1268 | /* Partition-wide HV MSRs, will be updated only on the first vcpu */ |
1269 | uint64_t msr_hv_hypercall; |
1270 | uint64_t msr_hv_guest_os_id; |
1271 | uint64_t msr_hv_tsc; |
1272 | |
1273 | /* Per-VCPU HV MSRs */ |
1274 | uint64_t msr_hv_vapic; |
1275 | uint64_t msr_hv_crash_params[HV_CRASH_PARAMS]; |
1276 | uint64_t msr_hv_runtime; |
1277 | uint64_t msr_hv_synic_control; |
1278 | uint64_t msr_hv_synic_evt_page; |
1279 | uint64_t msr_hv_synic_msg_page; |
1280 | uint64_t msr_hv_synic_sint[HV_SINT_COUNT]; |
1281 | uint64_t msr_hv_stimer_config[HV_STIMER_COUNT]; |
1282 | uint64_t msr_hv_stimer_count[HV_STIMER_COUNT]; |
1283 | uint64_t msr_hv_reenlightenment_control; |
1284 | uint64_t msr_hv_tsc_emulation_control; |
1285 | uint64_t msr_hv_tsc_emulation_status; |
1286 | |
1287 | uint64_t msr_rtit_ctrl; |
1288 | uint64_t msr_rtit_status; |
1289 | uint64_t msr_rtit_output_base; |
1290 | uint64_t msr_rtit_output_mask; |
1291 | uint64_t msr_rtit_cr3_match; |
1292 | uint64_t msr_rtit_addrs[MAX_RTIT_ADDRS]; |
1293 | |
1294 | /* exception/interrupt handling */ |
1295 | int error_code; |
1296 | int exception_is_int; |
1297 | target_ulong exception_next_eip; |
1298 | target_ulong dr[8]; /* debug registers; note dr4 and dr5 are unused */ |
1299 | union { |
1300 | struct CPUBreakpoint *cpu_breakpoint[4]; |
1301 | struct CPUWatchpoint *cpu_watchpoint[4]; |
1302 | }; /* break/watchpoints for dr[0..3] */ |
1303 | int old_exception; /* exception in flight */ |
1304 | |
1305 | uint64_t vm_vmcb; |
1306 | uint64_t tsc_offset; |
1307 | uint64_t intercept; |
1308 | uint16_t intercept_cr_read; |
1309 | uint16_t intercept_cr_write; |
1310 | uint16_t intercept_dr_read; |
1311 | uint16_t intercept_dr_write; |
1312 | uint32_t intercept_exceptions; |
1313 | uint64_t nested_cr3; |
1314 | uint32_t nested_pg_mode; |
1315 | uint8_t v_tpr; |
1316 | |
1317 | /* KVM states, automatically cleared on reset */ |
1318 | uint8_t nmi_injected; |
1319 | uint8_t nmi_pending; |
1320 | |
1321 | uintptr_t retaddr; |
1322 | |
1323 | /* Fields up to this point are cleared by a CPU reset */ |
1324 | struct {} end_reset_fields; |
1325 | |
1326 | /* Fields after this point are preserved across CPU reset. */ |
1327 | |
1328 | /* processor features (e.g. for CPUID insn) */ |
1329 | /* Minimum cpuid leaf 7 value */ |
1330 | uint32_t cpuid_level_func7; |
1331 | /* Actual cpuid leaf 7 value */ |
1332 | uint32_t cpuid_min_level_func7; |
1333 | /* Minimum level/xlevel/xlevel2, based on CPU model + features */ |
1334 | uint32_t cpuid_min_level, cpuid_min_xlevel, cpuid_min_xlevel2; |
1335 | /* Maximum level/xlevel/xlevel2 value for auto-assignment: */ |
1336 | uint32_t cpuid_max_level, cpuid_max_xlevel, cpuid_max_xlevel2; |
1337 | /* Actual level/xlevel/xlevel2 value: */ |
1338 | uint32_t cpuid_level, cpuid_xlevel, cpuid_xlevel2; |
1339 | uint32_t cpuid_vendor1; |
1340 | uint32_t cpuid_vendor2; |
1341 | uint32_t cpuid_vendor3; |
1342 | uint32_t cpuid_version; |
1343 | FeatureWordArray features; |
1344 | /* Features that were explicitly enabled/disabled */ |
1345 | FeatureWordArray user_features; |
1346 | uint32_t cpuid_model[12]; |
1347 | /* Cache information for CPUID. When legacy-cache=on, the cache data |
1348 | * on each CPUID leaf will be different, because we keep compatibility |
1349 | * with old QEMU versions. |
1350 | */ |
1351 | CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; |
1352 | |
1353 | /* MTRRs */ |
1354 | uint64_t mtrr_fixed[11]; |
1355 | uint64_t mtrr_deftype; |
1356 | MTRRVar mtrr_var[MSR_MTRRcap_VCNT]; |
1357 | |
1358 | /* For KVM */ |
1359 | uint32_t mp_state; |
1360 | int32_t exception_nr; |
1361 | int32_t interrupt_injected; |
1362 | uint8_t soft_interrupt; |
1363 | uint8_t exception_pending; |
1364 | uint8_t exception_injected; |
1365 | uint8_t has_error_code; |
1366 | uint8_t exception_has_payload; |
1367 | uint64_t exception_payload; |
1368 | uint32_t ins_len; |
1369 | uint32_t sipi_vector; |
1370 | bool tsc_valid; |
1371 | int64_t tsc_khz; |
1372 | int64_t user_tsc_khz; /* for sanity check only */ |
1373 | #if defined(CONFIG_KVM) || defined(CONFIG_HVF) |
1374 | void *xsave_buf; |
1375 | #endif |
1376 | #if defined(CONFIG_KVM) |
1377 | struct kvm_nested_state *nested_state; |
1378 | #endif |
1379 | #if defined(CONFIG_HVF) |
1380 | HVFX86EmulatorState *hvf_emul; |
1381 | #endif |
1382 | |
1383 | uint64_t mcg_cap; |
1384 | uint64_t mcg_ctl; |
1385 | uint64_t mcg_ext_ctl; |
1386 | uint64_t mce_banks[MCE_BANKS_DEF*4]; |
1387 | uint64_t xstate_bv; |
1388 | |
1389 | /* vmstate */ |
1390 | uint16_t fpus_vmstate; |
1391 | uint16_t fptag_vmstate; |
1392 | uint16_t fpregs_format_vmstate; |
1393 | |
1394 | uint64_t xss; |
1395 | |
1396 | TPRAccess tpr_access_type; |
1397 | |
1398 | unsigned nr_dies; |
1399 | } CPUX86State; |
1400 | |
1401 | struct kvm_msrs; |
1402 | |
1403 | /** |
1404 | * X86CPU: |
1405 | * @env: #CPUX86State |
1406 | * @migratable: If set, only migratable flags will be accepted when "enforce" |
1407 | * mode is used, and only migratable flags will be included in the "host" |
1408 | * CPU model. |
1409 | * |
1410 | * An x86 CPU. |
1411 | */ |
1412 | struct X86CPU { |
1413 | /*< private >*/ |
1414 | CPUState parent_obj; |
1415 | /*< public >*/ |
1416 | |
1417 | CPUNegativeOffsetState neg; |
1418 | CPUX86State env; |
1419 | |
1420 | uint32_t hyperv_spinlock_attempts; |
1421 | char *hyperv_vendor_id; |
1422 | bool hyperv_synic_kvm_only; |
1423 | uint64_t hyperv_features; |
1424 | bool hyperv_passthrough; |
1425 | |
1426 | bool check_cpuid; |
1427 | bool enforce_cpuid; |
1428 | /* |
1429 | * Force features to be enabled even if the host doesn't support them. |
1430 | * This is dangerous and should be done only for testing CPUID |
1431 | * compatibility. |
1432 | */ |
1433 | bool force_features; |
1434 | bool expose_kvm; |
1435 | bool expose_tcg; |
1436 | bool migratable; |
1437 | bool migrate_smi_count; |
1438 | bool max_features; /* Enable all supported features automatically */ |
1439 | uint32_t apic_id; |
1440 | |
1441 | /* Enables publishing of TSC increment and Local APIC bus frequencies to |
1442 | * the guest OS in CPUID page 0x40000010, the same way that VMWare does. */ |
1443 | bool vmware_cpuid_freq; |
1444 | |
1445 | /* if true the CPUID code directly forward host cache leaves to the guest */ |
1446 | bool cache_info_passthrough; |
1447 | |
1448 | /* if true the CPUID code directly forwards |
1449 | * host monitor/mwait leaves to the guest */ |
1450 | struct { |
1451 | uint32_t eax; |
1452 | uint32_t ebx; |
1453 | uint32_t ecx; |
1454 | uint32_t edx; |
1455 | } mwait; |
1456 | |
1457 | /* Features that were filtered out because of missing host capabilities */ |
1458 | FeatureWordArray filtered_features; |
1459 | |
1460 | /* Enable PMU CPUID bits. This can't be enabled by default yet because |
1461 | * it doesn't have ABI stability guarantees, as it passes all PMU CPUID |
1462 | * bits returned by GET_SUPPORTED_CPUID (that depend on host CPU and kernel |
1463 | * capabilities) directly to the guest. |
1464 | */ |
1465 | bool enable_pmu; |
1466 | |
1467 | /* LMCE support can be enabled/disabled via cpu option 'lmce=on/off'. It is |
1468 | * disabled by default to avoid breaking migration between QEMU with |
1469 | * different LMCE configurations. |
1470 | */ |
1471 | bool enable_lmce; |
1472 | |
1473 | /* Compatibility bits for old machine types. |
1474 | * If true present virtual l3 cache for VM, the vcpus in the same virtual |
1475 | * socket share an virtual l3 cache. |
1476 | */ |
1477 | bool enable_l3_cache; |
1478 | |
1479 | /* Compatibility bits for old machine types. |
1480 | * If true present the old cache topology information |
1481 | */ |
1482 | bool legacy_cache; |
1483 | |
1484 | /* Compatibility bits for old machine types: */ |
1485 | bool enable_cpuid_0xb; |
1486 | |
1487 | /* Enable auto level-increase for all CPUID leaves */ |
1488 | bool full_cpuid_auto_level; |
1489 | |
1490 | /* Enable auto level-increase for Intel Processor Trace leave */ |
1491 | bool intel_pt_auto_level; |
1492 | |
1493 | /* if true fill the top bits of the MTRR_PHYSMASKn variable range */ |
1494 | bool fill_mtrr_mask; |
1495 | |
1496 | /* if true override the phys_bits value with a value read from the host */ |
1497 | bool host_phys_bits; |
1498 | |
1499 | /* if set, limit maximum value for phys_bits when host_phys_bits is true */ |
1500 | uint8_t host_phys_bits_limit; |
1501 | |
1502 | /* Stop SMI delivery for migration compatibility with old machines */ |
1503 | bool kvm_no_smi_migration; |
1504 | |
1505 | /* Number of physical address bits supported */ |
1506 | uint32_t phys_bits; |
1507 | |
1508 | /* in order to simplify APIC support, we leave this pointer to the |
1509 | user */ |
1510 | struct DeviceState *apic_state; |
1511 | struct MemoryRegion *cpu_as_root, *cpu_as_mem, *smram; |
1512 | Notifier machine_done; |
1513 | |
1514 | struct kvm_msrs *kvm_msr_buf; |
1515 | |
1516 | int32_t node_id; /* NUMA node this CPU belongs to */ |
1517 | int32_t socket_id; |
1518 | int32_t die_id; |
1519 | int32_t core_id; |
1520 | int32_t thread_id; |
1521 | |
1522 | int32_t hv_max_vps; |
1523 | }; |
1524 | |
1525 | |
1526 | #ifndef CONFIG_USER_ONLY |
1527 | extern VMStateDescription vmstate_x86_cpu; |
1528 | #endif |
1529 | |
1530 | /** |
1531 | * x86_cpu_do_interrupt: |
1532 | * @cpu: vCPU the interrupt is to be handled by. |
1533 | */ |
1534 | void x86_cpu_do_interrupt(CPUState *cpu); |
1535 | bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); |
1536 | int x86_cpu_pending_interrupt(CPUState *cs, int interrupt_request); |
1537 | |
1538 | int x86_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cpu, |
1539 | int cpuid, void *opaque); |
1540 | int x86_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cpu, |
1541 | int cpuid, void *opaque); |
1542 | int x86_cpu_write_elf64_qemunote(WriteCoreDumpFunction f, CPUState *cpu, |
1543 | void *opaque); |
1544 | int x86_cpu_write_elf32_qemunote(WriteCoreDumpFunction f, CPUState *cpu, |
1545 | void *opaque); |
1546 | |
1547 | void x86_cpu_get_memory_mapping(CPUState *cpu, MemoryMappingList *list, |
1548 | Error **errp); |
1549 | |
1550 | void x86_cpu_dump_state(CPUState *cs, FILE *f, int flags); |
1551 | |
1552 | hwaddr x86_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr); |
1553 | |
1554 | int x86_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg); |
1555 | int x86_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg); |
1556 | |
1557 | void x86_cpu_exec_enter(CPUState *cpu); |
1558 | void x86_cpu_exec_exit(CPUState *cpu); |
1559 | |
1560 | void x86_cpu_list(void); |
1561 | int cpu_x86_support_mca_broadcast(CPUX86State *env); |
1562 | |
1563 | int cpu_get_pic_interrupt(CPUX86State *s); |
1564 | /* MSDOS compatibility mode FPU exception support */ |
1565 | void cpu_set_ferr(CPUX86State *s); |
1566 | /* mpx_helper.c */ |
1567 | void cpu_sync_bndcs_hflags(CPUX86State *env); |
1568 | |
1569 | /* this function must always be used to load data in the segment |
1570 | cache: it synchronizes the hflags with the segment cache values */ |
1571 | static inline void cpu_x86_load_seg_cache(CPUX86State *env, |
1572 | int seg_reg, unsigned int selector, |
1573 | target_ulong base, |
1574 | unsigned int limit, |
1575 | unsigned int flags) |
1576 | { |
1577 | SegmentCache *sc; |
1578 | unsigned int new_hflags; |
1579 | |
1580 | sc = &env->segs[seg_reg]; |
1581 | sc->selector = selector; |
1582 | sc->base = base; |
1583 | sc->limit = limit; |
1584 | sc->flags = flags; |
1585 | |
1586 | /* update the hidden flags */ |
1587 | { |
1588 | if (seg_reg == R_CS) { |
1589 | #ifdef TARGET_X86_64 |
1590 | if ((env->hflags & HF_LMA_MASK) && (flags & DESC_L_MASK)) { |
1591 | /* long mode */ |
1592 | env->hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK; |
1593 | env->hflags &= ~(HF_ADDSEG_MASK); |
1594 | } else |
1595 | #endif |
1596 | { |
1597 | /* legacy / compatibility case */ |
1598 | new_hflags = (env->segs[R_CS].flags & DESC_B_MASK) |
1599 | >> (DESC_B_SHIFT - HF_CS32_SHIFT); |
1600 | env->hflags = (env->hflags & ~(HF_CS32_MASK | HF_CS64_MASK)) | |
1601 | new_hflags; |
1602 | } |
1603 | } |
1604 | if (seg_reg == R_SS) { |
1605 | int cpl = (flags >> DESC_DPL_SHIFT) & 3; |
1606 | #if HF_CPL_MASK != 3 |
1607 | #error HF_CPL_MASK is hardcoded |
1608 | #endif |
1609 | env->hflags = (env->hflags & ~HF_CPL_MASK) | cpl; |
1610 | /* Possibly switch between BNDCFGS and BNDCFGU */ |
1611 | cpu_sync_bndcs_hflags(env); |
1612 | } |
1613 | new_hflags = (env->segs[R_SS].flags & DESC_B_MASK) |
1614 | >> (DESC_B_SHIFT - HF_SS32_SHIFT); |
1615 | if (env->hflags & HF_CS64_MASK) { |
1616 | /* zero base assumed for DS, ES and SS in long mode */ |
1617 | } else if (!(env->cr[0] & CR0_PE_MASK) || |
1618 | (env->eflags & VM_MASK) || |
1619 | !(env->hflags & HF_CS32_MASK)) { |
1620 | /* XXX: try to avoid this test. The problem comes from the |
1621 | fact that is real mode or vm86 mode we only modify the |
1622 | 'base' and 'selector' fields of the segment cache to go |
1623 | faster. A solution may be to force addseg to one in |
1624 | translate-i386.c. */ |
1625 | new_hflags |= HF_ADDSEG_MASK; |
1626 | } else { |
1627 | new_hflags |= ((env->segs[R_DS].base | |
1628 | env->segs[R_ES].base | |
1629 | env->segs[R_SS].base) != 0) << |
1630 | HF_ADDSEG_SHIFT; |
1631 | } |
1632 | env->hflags = (env->hflags & |
1633 | ~(HF_SS32_MASK | HF_ADDSEG_MASK)) | new_hflags; |
1634 | } |
1635 | } |
1636 | |
1637 | static inline void cpu_x86_load_seg_cache_sipi(X86CPU *cpu, |
1638 | uint8_t sipi_vector) |
1639 | { |
1640 | CPUState *cs = CPU(cpu); |
1641 | CPUX86State *env = &cpu->env; |
1642 | |
1643 | env->eip = 0; |
1644 | cpu_x86_load_seg_cache(env, R_CS, sipi_vector << 8, |
1645 | sipi_vector << 12, |
1646 | env->segs[R_CS].limit, |
1647 | env->segs[R_CS].flags); |
1648 | cs->halted = 0; |
1649 | } |
1650 | |
1651 | int cpu_x86_get_descr_debug(CPUX86State *env, unsigned int selector, |
1652 | target_ulong *base, unsigned int *limit, |
1653 | unsigned int *flags); |
1654 | |
1655 | /* op_helper.c */ |
1656 | /* used for debug or cpu save/restore */ |
1657 | |
1658 | /* cpu-exec.c */ |
1659 | /* the following helpers are only usable in user mode simulation as |
1660 | they can trigger unexpected exceptions */ |
1661 | void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector); |
1662 | void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32); |
1663 | void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32); |
1664 | void cpu_x86_fxsave(CPUX86State *s, target_ulong ptr); |
1665 | void cpu_x86_fxrstor(CPUX86State *s, target_ulong ptr); |
1666 | |
1667 | /* you can call this signal handler from your SIGBUS and SIGSEGV |
1668 | signal handlers to inform the virtual CPU of exceptions. non zero |
1669 | is returned if the signal was handled by the virtual CPU. */ |
1670 | int cpu_x86_signal_handler(int host_signum, void *pinfo, |
1671 | void *puc); |
1672 | |
1673 | /* cpu.c */ |
1674 | void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, |
1675 | uint32_t *eax, uint32_t *ebx, |
1676 | uint32_t *ecx, uint32_t *edx); |
1677 | void cpu_clear_apic_feature(CPUX86State *env); |
1678 | void host_cpuid(uint32_t function, uint32_t count, |
1679 | uint32_t *eax, uint32_t *ebx, uint32_t *ecx, uint32_t *edx); |
1680 | void host_vendor_fms(char *vendor, int *family, int *model, int *stepping); |
1681 | |
1682 | /* helper.c */ |
1683 | bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, |
1684 | MMUAccessType access_type, int mmu_idx, |
1685 | bool probe, uintptr_t retaddr); |
1686 | void x86_cpu_set_a20(X86CPU *cpu, int a20_state); |
1687 | |
1688 | #ifndef CONFIG_USER_ONLY |
1689 | static inline int x86_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs) |
1690 | { |
1691 | return !!attrs.secure; |
1692 | } |
1693 | |
1694 | static inline AddressSpace *cpu_addressspace(CPUState *cs, MemTxAttrs attrs) |
1695 | { |
1696 | return cpu_get_address_space(cs, cpu_asidx_from_attrs(cs, attrs)); |
1697 | } |
1698 | |
1699 | uint8_t x86_ldub_phys(CPUState *cs, hwaddr addr); |
1700 | uint32_t x86_lduw_phys(CPUState *cs, hwaddr addr); |
1701 | uint32_t x86_ldl_phys(CPUState *cs, hwaddr addr); |
1702 | uint64_t x86_ldq_phys(CPUState *cs, hwaddr addr); |
1703 | void x86_stb_phys(CPUState *cs, hwaddr addr, uint8_t val); |
1704 | void x86_stl_phys_notdirty(CPUState *cs, hwaddr addr, uint32_t val); |
1705 | void x86_stw_phys(CPUState *cs, hwaddr addr, uint32_t val); |
1706 | void x86_stl_phys(CPUState *cs, hwaddr addr, uint32_t val); |
1707 | void x86_stq_phys(CPUState *cs, hwaddr addr, uint64_t val); |
1708 | #endif |
1709 | |
1710 | void breakpoint_handler(CPUState *cs); |
1711 | |
1712 | /* will be suppressed */ |
1713 | void cpu_x86_update_cr0(CPUX86State *env, uint32_t new_cr0); |
1714 | void cpu_x86_update_cr3(CPUX86State *env, target_ulong new_cr3); |
1715 | void cpu_x86_update_cr4(CPUX86State *env, uint32_t new_cr4); |
1716 | void cpu_x86_update_dr7(CPUX86State *env, uint32_t new_dr7); |
1717 | |
1718 | /* hw/pc.c */ |
1719 | uint64_t cpu_get_tsc(CPUX86State *env); |
1720 | |
1721 | /* XXX: This value should match the one returned by CPUID |
1722 | * and in exec.c */ |
1723 | # if defined(TARGET_X86_64) |
1724 | # define TCG_PHYS_ADDR_BITS 40 |
1725 | # else |
1726 | # define TCG_PHYS_ADDR_BITS 36 |
1727 | # endif |
1728 | |
1729 | #define PHYS_ADDR_MASK MAKE_64BIT_MASK(0, TCG_PHYS_ADDR_BITS) |
1730 | |
1731 | #define X86_CPU_TYPE_SUFFIX "-" TYPE_X86_CPU |
1732 | #define X86_CPU_TYPE_NAME(name) (name X86_CPU_TYPE_SUFFIX) |
1733 | #define CPU_RESOLVING_TYPE TYPE_X86_CPU |
1734 | |
1735 | #ifdef TARGET_X86_64 |
1736 | #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu64") |
1737 | #else |
1738 | #define TARGET_DEFAULT_CPU_TYPE X86_CPU_TYPE_NAME("qemu32") |
1739 | #endif |
1740 | |
1741 | #define cpu_signal_handler cpu_x86_signal_handler |
1742 | #define cpu_list x86_cpu_list |
1743 | |
1744 | /* MMU modes definitions */ |
1745 | #define MMU_MODE0_SUFFIX _ksmap |
1746 | #define MMU_MODE1_SUFFIX _user |
1747 | #define MMU_MODE2_SUFFIX _knosmap /* SMAP disabled or CPL<3 && AC=1 */ |
1748 | #define MMU_KSMAP_IDX 0 |
1749 | #define MMU_USER_IDX 1 |
1750 | #define MMU_KNOSMAP_IDX 2 |
1751 | static inline int cpu_mmu_index(CPUX86State *env, bool ifetch) |
1752 | { |
1753 | return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX : |
1754 | (!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK)) |
1755 | ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; |
1756 | } |
1757 | |
1758 | static inline int cpu_mmu_index_kernel(CPUX86State *env) |
1759 | { |
1760 | return !(env->hflags & HF_SMAP_MASK) ? MMU_KNOSMAP_IDX : |
1761 | ((env->hflags & HF_CPL_MASK) < 3 && (env->eflags & AC_MASK)) |
1762 | ? MMU_KNOSMAP_IDX : MMU_KSMAP_IDX; |
1763 | } |
1764 | |
1765 | #define CC_DST (env->cc_dst) |
1766 | #define CC_SRC (env->cc_src) |
1767 | #define CC_SRC2 (env->cc_src2) |
1768 | #define CC_OP (env->cc_op) |
1769 | |
1770 | /* n must be a constant to be efficient */ |
1771 | static inline target_long lshift(target_long x, int n) |
1772 | { |
1773 | if (n >= 0) { |
1774 | return x << n; |
1775 | } else { |
1776 | return x >> (-n); |
1777 | } |
1778 | } |
1779 | |
1780 | /* float macros */ |
1781 | #define FT0 (env->ft0) |
1782 | #define ST0 (env->fpregs[env->fpstt].d) |
1783 | #define ST(n) (env->fpregs[(env->fpstt + (n)) & 7].d) |
1784 | #define ST1 ST(1) |
1785 | |
1786 | /* translate.c */ |
1787 | void tcg_x86_init(void); |
1788 | |
1789 | typedef CPUX86State CPUArchState; |
1790 | typedef X86CPU ArchCPU; |
1791 | |
1792 | #include "exec/cpu-all.h" |
1793 | #include "svm.h" |
1794 | |
1795 | #if !defined(CONFIG_USER_ONLY) |
1796 | #include "hw/i386/apic.h" |
1797 | #endif |
1798 | |
1799 | static inline void cpu_get_tb_cpu_state(CPUX86State *env, target_ulong *pc, |
1800 | target_ulong *cs_base, uint32_t *flags) |
1801 | { |
1802 | *cs_base = env->segs[R_CS].base; |
1803 | *pc = *cs_base + env->eip; |
1804 | *flags = env->hflags | |
1805 | (env->eflags & (IOPL_MASK | TF_MASK | RF_MASK | VM_MASK | AC_MASK)); |
1806 | } |
1807 | |
1808 | void do_cpu_init(X86CPU *cpu); |
1809 | void do_cpu_sipi(X86CPU *cpu); |
1810 | |
1811 | #define MCE_INJECT_BROADCAST 1 |
1812 | #define MCE_INJECT_UNCOND_AO 2 |
1813 | |
1814 | void cpu_x86_inject_mce(Monitor *mon, X86CPU *cpu, int bank, |
1815 | uint64_t status, uint64_t mcg_status, uint64_t addr, |
1816 | uint64_t misc, int flags); |
1817 | |
1818 | /* excp_helper.c */ |
1819 | void QEMU_NORETURN raise_exception(CPUX86State *env, int exception_index); |
1820 | void QEMU_NORETURN raise_exception_ra(CPUX86State *env, int exception_index, |
1821 | uintptr_t retaddr); |
1822 | void QEMU_NORETURN raise_exception_err(CPUX86State *env, int exception_index, |
1823 | int error_code); |
1824 | void QEMU_NORETURN raise_exception_err_ra(CPUX86State *env, int exception_index, |
1825 | int error_code, uintptr_t retaddr); |
1826 | void QEMU_NORETURN raise_interrupt(CPUX86State *nenv, int intno, int is_int, |
1827 | int error_code, int next_eip_addend); |
1828 | |
1829 | /* cc_helper.c */ |
1830 | extern const uint8_t parity_table[256]; |
1831 | uint32_t cpu_cc_compute_all(CPUX86State *env1, int op); |
1832 | |
1833 | static inline uint32_t cpu_compute_eflags(CPUX86State *env) |
1834 | { |
1835 | uint32_t eflags = env->eflags; |
1836 | if (tcg_enabled()) { |
1837 | eflags |= cpu_cc_compute_all(env, CC_OP) | (env->df & DF_MASK); |
1838 | } |
1839 | return eflags; |
1840 | } |
1841 | |
1842 | /* NOTE: the translator must set DisasContext.cc_op to CC_OP_EFLAGS |
1843 | * after generating a call to a helper that uses this. |
1844 | */ |
1845 | static inline void cpu_load_eflags(CPUX86State *env, int eflags, |
1846 | int update_mask) |
1847 | { |
1848 | CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C); |
1849 | CC_OP = CC_OP_EFLAGS; |
1850 | env->df = 1 - (2 * ((eflags >> 10) & 1)); |
1851 | env->eflags = (env->eflags & ~update_mask) | |
1852 | (eflags & update_mask) | 0x2; |
1853 | } |
1854 | |
1855 | /* load efer and update the corresponding hflags. XXX: do consistency |
1856 | checks with cpuid bits? */ |
1857 | static inline void cpu_load_efer(CPUX86State *env, uint64_t val) |
1858 | { |
1859 | env->efer = val; |
1860 | env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK); |
1861 | if (env->efer & MSR_EFER_LMA) { |
1862 | env->hflags |= HF_LMA_MASK; |
1863 | } |
1864 | if (env->efer & MSR_EFER_SVME) { |
1865 | env->hflags |= HF_SVME_MASK; |
1866 | } |
1867 | } |
1868 | |
1869 | static inline MemTxAttrs cpu_get_mem_attrs(CPUX86State *env) |
1870 | { |
1871 | return ((MemTxAttrs) { .secure = (env->hflags & HF_SMM_MASK) != 0 }); |
1872 | } |
1873 | |
1874 | static inline int32_t x86_get_a20_mask(CPUX86State *env) |
1875 | { |
1876 | if (env->hflags & HF_SMM_MASK) { |
1877 | return -1; |
1878 | } else { |
1879 | return env->a20_mask; |
1880 | } |
1881 | } |
1882 | |
1883 | static inline bool cpu_has_vmx(CPUX86State *env) |
1884 | { |
1885 | return env->features[FEAT_1_ECX] & CPUID_EXT_VMX; |
1886 | } |
1887 | |
1888 | /* |
1889 | * In order for a vCPU to enter VMX operation it must have CR4.VMXE set. |
1890 | * Since it was set, CR4.VMXE must remain set as long as vCPU is in |
1891 | * VMX operation. This is because CR4.VMXE is one of the bits set |
1892 | * in MSR_IA32_VMX_CR4_FIXED1. |
1893 | * |
1894 | * There is one exception to above statement when vCPU enters SMM mode. |
1895 | * When a vCPU enters SMM mode, it temporarily exit VMX operation and |
1896 | * may also reset CR4.VMXE during execution in SMM mode. |
1897 | * When vCPU exits SMM mode, vCPU state is restored to be in VMX operation |
1898 | * and CR4.VMXE is restored to it's original value of being set. |
1899 | * |
1900 | * Therefore, when vCPU is not in SMM mode, we can infer whether |
1901 | * VMX is being used by examining CR4.VMXE. Otherwise, we cannot |
1902 | * know for certain. |
1903 | */ |
1904 | static inline bool cpu_vmx_maybe_enabled(CPUX86State *env) |
1905 | { |
1906 | return cpu_has_vmx(env) && |
1907 | ((env->cr[4] & CR4_VMXE_MASK) || (env->hflags & HF_SMM_MASK)); |
1908 | } |
1909 | |
1910 | /* fpu_helper.c */ |
1911 | void update_fp_status(CPUX86State *env); |
1912 | void update_mxcsr_status(CPUX86State *env); |
1913 | |
1914 | static inline void cpu_set_mxcsr(CPUX86State *env, uint32_t mxcsr) |
1915 | { |
1916 | env->mxcsr = mxcsr; |
1917 | if (tcg_enabled()) { |
1918 | update_mxcsr_status(env); |
1919 | } |
1920 | } |
1921 | |
1922 | static inline void cpu_set_fpuc(CPUX86State *env, uint16_t fpuc) |
1923 | { |
1924 | env->fpuc = fpuc; |
1925 | if (tcg_enabled()) { |
1926 | update_fp_status(env); |
1927 | } |
1928 | } |
1929 | |
1930 | /* mem_helper.c */ |
1931 | void helper_lock_init(void); |
1932 | |
1933 | /* svm_helper.c */ |
1934 | void cpu_svm_check_intercept_param(CPUX86State *env1, uint32_t type, |
1935 | uint64_t param, uintptr_t retaddr); |
1936 | void QEMU_NORETURN cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, |
1937 | uint64_t exit_info_1, uintptr_t retaddr); |
1938 | void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1); |
1939 | |
1940 | /* seg_helper.c */ |
1941 | void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw); |
1942 | |
1943 | /* smm_helper.c */ |
1944 | void do_smm_enter(X86CPU *cpu); |
1945 | |
1946 | /* apic.c */ |
1947 | void cpu_report_tpr_access(CPUX86State *env, TPRAccess access); |
1948 | void apic_handle_tpr_access_report(DeviceState *d, target_ulong ip, |
1949 | TPRAccess access); |
1950 | |
1951 | |
1952 | /* Change the value of a KVM-specific default |
1953 | * |
1954 | * If value is NULL, no default will be set and the original |
1955 | * value from the CPU model table will be kept. |
1956 | * |
1957 | * It is valid to call this function only for properties that |
1958 | * are already present in the kvm_default_props table. |
1959 | */ |
1960 | void x86_cpu_change_kvm_default(const char *prop, const char *value); |
1961 | |
1962 | /* Special values for X86CPUVersion: */ |
1963 | |
1964 | /* Resolve to latest CPU version */ |
1965 | #define CPU_VERSION_LATEST -1 |
1966 | |
1967 | /* |
1968 | * Resolve to version defined by current machine type. |
1969 | * See x86_cpu_set_default_version() |
1970 | */ |
1971 | #define CPU_VERSION_AUTO -2 |
1972 | |
1973 | /* Don't resolve to any versioned CPU models, like old QEMU versions */ |
1974 | #define CPU_VERSION_LEGACY 0 |
1975 | |
1976 | typedef int X86CPUVersion; |
1977 | |
1978 | /* |
1979 | * Set default CPU model version for CPU models having |
1980 | * version == CPU_VERSION_AUTO. |
1981 | */ |
1982 | void x86_cpu_set_default_version(X86CPUVersion version); |
1983 | |
1984 | /* Return name of 32-bit register, from a R_* constant */ |
1985 | const char *get_register_name_32(unsigned int reg); |
1986 | |
1987 | void enable_compat_apic_id_mode(void); |
1988 | |
1989 | #define APIC_DEFAULT_ADDRESS 0xfee00000 |
1990 | #define APIC_SPACE_SIZE 0x100000 |
1991 | |
1992 | void x86_cpu_dump_local_apic_state(CPUState *cs, int flags); |
1993 | |
1994 | /* cpu.c */ |
1995 | bool cpu_is_bsp(X86CPU *cpu); |
1996 | |
1997 | void x86_cpu_xrstor_all_areas(X86CPU *cpu, const X86XSaveArea *buf); |
1998 | void x86_cpu_xsave_all_areas(X86CPU *cpu, X86XSaveArea *buf); |
1999 | void x86_update_hflags(CPUX86State* env); |
2000 | |
2001 | static inline bool hyperv_feat_enabled(X86CPU *cpu, int feat) |
2002 | { |
2003 | return !!(cpu->hyperv_features & BIT(feat)); |
2004 | } |
2005 | |
2006 | #endif /* I386_CPU_H */ |
2007 | |