1 | /* |
2 | * QEMU KVM support |
3 | * |
4 | * Copyright (C) 2006-2008 Qumranet Technologies |
5 | * Copyright IBM, Corp. 2008 |
6 | * |
7 | * Authors: |
8 | * Anthony Liguori <aliguori@us.ibm.com> |
9 | * |
10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
11 | * See the COPYING file in the top-level directory. |
12 | * |
13 | */ |
14 | |
15 | #include "qemu/osdep.h" |
16 | #include "qapi/error.h" |
17 | #include <sys/ioctl.h> |
18 | #include <sys/utsname.h> |
19 | |
20 | #include <linux/kvm.h> |
21 | #include "standard-headers/asm-x86/kvm_para.h" |
22 | |
23 | #include "cpu.h" |
24 | #include "sysemu/sysemu.h" |
25 | #include "sysemu/hw_accel.h" |
26 | #include "sysemu/kvm_int.h" |
27 | #include "sysemu/reset.h" |
28 | #include "sysemu/runstate.h" |
29 | #include "kvm_i386.h" |
30 | #include "hyperv.h" |
31 | #include "hyperv-proto.h" |
32 | |
33 | #include "exec/gdbstub.h" |
34 | #include "qemu/host-utils.h" |
35 | #include "qemu/main-loop.h" |
36 | #include "qemu/config-file.h" |
37 | #include "qemu/error-report.h" |
38 | #include "hw/i386/pc.h" |
39 | #include "hw/i386/apic.h" |
40 | #include "hw/i386/apic_internal.h" |
41 | #include "hw/i386/apic-msidef.h" |
42 | #include "hw/i386/intel_iommu.h" |
43 | #include "hw/i386/x86-iommu.h" |
44 | |
45 | #include "hw/pci/pci.h" |
46 | #include "hw/pci/msi.h" |
47 | #include "hw/pci/msix.h" |
48 | #include "migration/blocker.h" |
49 | #include "exec/memattrs.h" |
50 | #include "trace.h" |
51 | |
52 | //#define DEBUG_KVM |
53 | |
54 | #ifdef DEBUG_KVM |
55 | #define DPRINTF(fmt, ...) \ |
56 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
57 | #else |
58 | #define DPRINTF(fmt, ...) \ |
59 | do { } while (0) |
60 | #endif |
61 | |
62 | #define MSR_KVM_WALL_CLOCK 0x11 |
63 | #define MSR_KVM_SYSTEM_TIME 0x12 |
64 | |
65 | /* A 4096-byte buffer can hold the 8-byte kvm_msrs header, plus |
66 | * 255 kvm_msr_entry structs */ |
67 | #define MSR_BUF_SIZE 4096 |
68 | |
69 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { |
70 | KVM_CAP_INFO(SET_TSS_ADDR), |
71 | KVM_CAP_INFO(EXT_CPUID), |
72 | KVM_CAP_INFO(MP_STATE), |
73 | KVM_CAP_LAST_INFO |
74 | }; |
75 | |
76 | static bool has_msr_star; |
77 | static bool has_msr_hsave_pa; |
78 | static bool has_msr_tsc_aux; |
79 | static bool has_msr_tsc_adjust; |
80 | static bool has_msr_tsc_deadline; |
81 | static bool has_msr_feature_control; |
82 | static bool has_msr_misc_enable; |
83 | static bool has_msr_smbase; |
84 | static bool has_msr_bndcfgs; |
85 | static int lm_capable_kernel; |
86 | static bool has_msr_hv_hypercall; |
87 | static bool has_msr_hv_crash; |
88 | static bool has_msr_hv_reset; |
89 | static bool has_msr_hv_vpindex; |
90 | static bool hv_vpindex_settable; |
91 | static bool has_msr_hv_runtime; |
92 | static bool has_msr_hv_synic; |
93 | static bool has_msr_hv_stimer; |
94 | static bool has_msr_hv_frequencies; |
95 | static bool has_msr_hv_reenlightenment; |
96 | static bool has_msr_xss; |
97 | static bool has_msr_spec_ctrl; |
98 | static bool has_msr_virt_ssbd; |
99 | static bool has_msr_smi_count; |
100 | static bool has_msr_arch_capabs; |
101 | static bool has_msr_core_capabs; |
102 | |
103 | static uint32_t has_architectural_pmu_version; |
104 | static uint32_t num_architectural_pmu_gp_counters; |
105 | static uint32_t num_architectural_pmu_fixed_counters; |
106 | |
107 | static int has_xsave; |
108 | static int has_xcrs; |
109 | static int has_pit_state2; |
110 | static int has_exception_payload; |
111 | |
112 | static bool has_msr_mcg_ext_ctl; |
113 | |
114 | static struct kvm_cpuid2 *cpuid_cache; |
115 | static struct kvm_msr_list *kvm_feature_msrs; |
116 | |
117 | int kvm_has_pit_state2(void) |
118 | { |
119 | return has_pit_state2; |
120 | } |
121 | |
122 | bool kvm_has_smm(void) |
123 | { |
124 | return kvm_check_extension(kvm_state, KVM_CAP_X86_SMM); |
125 | } |
126 | |
127 | bool kvm_has_adjust_clock_stable(void) |
128 | { |
129 | int ret = kvm_check_extension(kvm_state, KVM_CAP_ADJUST_CLOCK); |
130 | |
131 | return (ret == KVM_CLOCK_TSC_STABLE); |
132 | } |
133 | |
134 | bool kvm_has_exception_payload(void) |
135 | { |
136 | return has_exception_payload; |
137 | } |
138 | |
139 | bool kvm_allows_irq0_override(void) |
140 | { |
141 | return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing(); |
142 | } |
143 | |
144 | static bool kvm_x2apic_api_set_flags(uint64_t flags) |
145 | { |
146 | KVMState *s = KVM_STATE(current_machine->accelerator); |
147 | |
148 | return !kvm_vm_enable_cap(s, KVM_CAP_X2APIC_API, 0, flags); |
149 | } |
150 | |
151 | #define MEMORIZE(fn, _result) \ |
152 | ({ \ |
153 | static bool _memorized; \ |
154 | \ |
155 | if (_memorized) { \ |
156 | return _result; \ |
157 | } \ |
158 | _memorized = true; \ |
159 | _result = fn; \ |
160 | }) |
161 | |
162 | static bool has_x2apic_api; |
163 | |
164 | bool kvm_has_x2apic_api(void) |
165 | { |
166 | return has_x2apic_api; |
167 | } |
168 | |
169 | bool kvm_enable_x2apic(void) |
170 | { |
171 | return MEMORIZE( |
172 | kvm_x2apic_api_set_flags(KVM_X2APIC_API_USE_32BIT_IDS | |
173 | KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK), |
174 | has_x2apic_api); |
175 | } |
176 | |
177 | bool kvm_hv_vpindex_settable(void) |
178 | { |
179 | return hv_vpindex_settable; |
180 | } |
181 | |
182 | static int kvm_get_tsc(CPUState *cs) |
183 | { |
184 | X86CPU *cpu = X86_CPU(cs); |
185 | CPUX86State *env = &cpu->env; |
186 | struct { |
187 | struct kvm_msrs info; |
188 | struct kvm_msr_entry entries[1]; |
189 | } msr_data; |
190 | int ret; |
191 | |
192 | if (env->tsc_valid) { |
193 | return 0; |
194 | } |
195 | |
196 | memset(&msr_data, 0, sizeof(msr_data)); |
197 | msr_data.info.nmsrs = 1; |
198 | msr_data.entries[0].index = MSR_IA32_TSC; |
199 | env->tsc_valid = !runstate_is_running(); |
200 | |
201 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, &msr_data); |
202 | if (ret < 0) { |
203 | return ret; |
204 | } |
205 | |
206 | assert(ret == 1); |
207 | env->tsc = msr_data.entries[0].data; |
208 | return 0; |
209 | } |
210 | |
211 | static inline void do_kvm_synchronize_tsc(CPUState *cpu, run_on_cpu_data arg) |
212 | { |
213 | kvm_get_tsc(cpu); |
214 | } |
215 | |
216 | void kvm_synchronize_all_tsc(void) |
217 | { |
218 | CPUState *cpu; |
219 | |
220 | if (kvm_enabled()) { |
221 | CPU_FOREACH(cpu) { |
222 | run_on_cpu(cpu, do_kvm_synchronize_tsc, RUN_ON_CPU_NULL); |
223 | } |
224 | } |
225 | } |
226 | |
227 | static struct kvm_cpuid2 *try_get_cpuid(KVMState *s, int max) |
228 | { |
229 | struct kvm_cpuid2 *cpuid; |
230 | int r, size; |
231 | |
232 | size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); |
233 | cpuid = g_malloc0(size); |
234 | cpuid->nent = max; |
235 | r = kvm_ioctl(s, KVM_GET_SUPPORTED_CPUID, cpuid); |
236 | if (r == 0 && cpuid->nent >= max) { |
237 | r = -E2BIG; |
238 | } |
239 | if (r < 0) { |
240 | if (r == -E2BIG) { |
241 | g_free(cpuid); |
242 | return NULL; |
243 | } else { |
244 | fprintf(stderr, "KVM_GET_SUPPORTED_CPUID failed: %s\n" , |
245 | strerror(-r)); |
246 | exit(1); |
247 | } |
248 | } |
249 | return cpuid; |
250 | } |
251 | |
252 | /* Run KVM_GET_SUPPORTED_CPUID ioctl(), allocating a buffer large enough |
253 | * for all entries. |
254 | */ |
255 | static struct kvm_cpuid2 *get_supported_cpuid(KVMState *s) |
256 | { |
257 | struct kvm_cpuid2 *cpuid; |
258 | int max = 1; |
259 | |
260 | if (cpuid_cache != NULL) { |
261 | return cpuid_cache; |
262 | } |
263 | while ((cpuid = try_get_cpuid(s, max)) == NULL) { |
264 | max *= 2; |
265 | } |
266 | cpuid_cache = cpuid; |
267 | return cpuid; |
268 | } |
269 | |
270 | static const struct kvm_para_features { |
271 | int cap; |
272 | int feature; |
273 | } para_features[] = { |
274 | { KVM_CAP_CLOCKSOURCE, KVM_FEATURE_CLOCKSOURCE }, |
275 | { KVM_CAP_NOP_IO_DELAY, KVM_FEATURE_NOP_IO_DELAY }, |
276 | { KVM_CAP_PV_MMU, KVM_FEATURE_MMU_OP }, |
277 | { KVM_CAP_ASYNC_PF, KVM_FEATURE_ASYNC_PF }, |
278 | }; |
279 | |
280 | static int get_para_features(KVMState *s) |
281 | { |
282 | int i, features = 0; |
283 | |
284 | for (i = 0; i < ARRAY_SIZE(para_features); i++) { |
285 | if (kvm_check_extension(s, para_features[i].cap)) { |
286 | features |= (1 << para_features[i].feature); |
287 | } |
288 | } |
289 | |
290 | return features; |
291 | } |
292 | |
293 | static bool host_tsx_blacklisted(void) |
294 | { |
295 | int family, model, stepping;\ |
296 | char vendor[CPUID_VENDOR_SZ + 1]; |
297 | |
298 | host_vendor_fms(vendor, &family, &model, &stepping); |
299 | |
300 | /* Check if we are running on a Haswell host known to have broken TSX */ |
301 | return !strcmp(vendor, CPUID_VENDOR_INTEL) && |
302 | (family == 6) && |
303 | ((model == 63 && stepping < 4) || |
304 | model == 60 || model == 69 || model == 70); |
305 | } |
306 | |
307 | /* Returns the value for a specific register on the cpuid entry |
308 | */ |
309 | static uint32_t cpuid_entry_get_reg(struct kvm_cpuid_entry2 *entry, int reg) |
310 | { |
311 | uint32_t ret = 0; |
312 | switch (reg) { |
313 | case R_EAX: |
314 | ret = entry->eax; |
315 | break; |
316 | case R_EBX: |
317 | ret = entry->ebx; |
318 | break; |
319 | case R_ECX: |
320 | ret = entry->ecx; |
321 | break; |
322 | case R_EDX: |
323 | ret = entry->edx; |
324 | break; |
325 | } |
326 | return ret; |
327 | } |
328 | |
329 | /* Find matching entry for function/index on kvm_cpuid2 struct |
330 | */ |
331 | static struct kvm_cpuid_entry2 *cpuid_find_entry(struct kvm_cpuid2 *cpuid, |
332 | uint32_t function, |
333 | uint32_t index) |
334 | { |
335 | int i; |
336 | for (i = 0; i < cpuid->nent; ++i) { |
337 | if (cpuid->entries[i].function == function && |
338 | cpuid->entries[i].index == index) { |
339 | return &cpuid->entries[i]; |
340 | } |
341 | } |
342 | /* not found: */ |
343 | return NULL; |
344 | } |
345 | |
346 | uint32_t kvm_arch_get_supported_cpuid(KVMState *s, uint32_t function, |
347 | uint32_t index, int reg) |
348 | { |
349 | struct kvm_cpuid2 *cpuid; |
350 | uint32_t ret = 0; |
351 | uint32_t cpuid_1_edx; |
352 | bool found = false; |
353 | |
354 | cpuid = get_supported_cpuid(s); |
355 | |
356 | struct kvm_cpuid_entry2 *entry = cpuid_find_entry(cpuid, function, index); |
357 | if (entry) { |
358 | found = true; |
359 | ret = cpuid_entry_get_reg(entry, reg); |
360 | } |
361 | |
362 | /* Fixups for the data returned by KVM, below */ |
363 | |
364 | if (function == 1 && reg == R_EDX) { |
365 | /* KVM before 2.6.30 misreports the following features */ |
366 | ret |= CPUID_MTRR | CPUID_PAT | CPUID_MCE | CPUID_MCA; |
367 | } else if (function == 1 && reg == R_ECX) { |
368 | /* We can set the hypervisor flag, even if KVM does not return it on |
369 | * GET_SUPPORTED_CPUID |
370 | */ |
371 | ret |= CPUID_EXT_HYPERVISOR; |
372 | /* tsc-deadline flag is not returned by GET_SUPPORTED_CPUID, but it |
373 | * can be enabled if the kernel has KVM_CAP_TSC_DEADLINE_TIMER, |
374 | * and the irqchip is in the kernel. |
375 | */ |
376 | if (kvm_irqchip_in_kernel() && |
377 | kvm_check_extension(s, KVM_CAP_TSC_DEADLINE_TIMER)) { |
378 | ret |= CPUID_EXT_TSC_DEADLINE_TIMER; |
379 | } |
380 | |
381 | /* x2apic is reported by GET_SUPPORTED_CPUID, but it can't be enabled |
382 | * without the in-kernel irqchip |
383 | */ |
384 | if (!kvm_irqchip_in_kernel()) { |
385 | ret &= ~CPUID_EXT_X2APIC; |
386 | } |
387 | |
388 | if (enable_cpu_pm) { |
389 | int disable_exits = kvm_check_extension(s, |
390 | KVM_CAP_X86_DISABLE_EXITS); |
391 | |
392 | if (disable_exits & KVM_X86_DISABLE_EXITS_MWAIT) { |
393 | ret |= CPUID_EXT_MONITOR; |
394 | } |
395 | } |
396 | } else if (function == 6 && reg == R_EAX) { |
397 | ret |= CPUID_6_EAX_ARAT; /* safe to allow because of emulated APIC */ |
398 | } else if (function == 7 && index == 0 && reg == R_EBX) { |
399 | if (host_tsx_blacklisted()) { |
400 | ret &= ~(CPUID_7_0_EBX_RTM | CPUID_7_0_EBX_HLE); |
401 | } |
402 | } else if (function == 7 && index == 0 && reg == R_EDX) { |
403 | /* |
404 | * Linux v4.17-v4.20 incorrectly return ARCH_CAPABILITIES on SVM hosts. |
405 | * We can detect the bug by checking if MSR_IA32_ARCH_CAPABILITIES is |
406 | * returned by KVM_GET_MSR_INDEX_LIST. |
407 | */ |
408 | if (!has_msr_arch_capabs) { |
409 | ret &= ~CPUID_7_0_EDX_ARCH_CAPABILITIES; |
410 | } |
411 | } else if (function == 0x80000001 && reg == R_ECX) { |
412 | /* |
413 | * It's safe to enable TOPOEXT even if it's not returned by |
414 | * GET_SUPPORTED_CPUID. Unconditionally enabling TOPOEXT here allows |
415 | * us to keep CPU models including TOPOEXT runnable on older kernels. |
416 | */ |
417 | ret |= CPUID_EXT3_TOPOEXT; |
418 | } else if (function == 0x80000001 && reg == R_EDX) { |
419 | /* On Intel, kvm returns cpuid according to the Intel spec, |
420 | * so add missing bits according to the AMD spec: |
421 | */ |
422 | cpuid_1_edx = kvm_arch_get_supported_cpuid(s, 1, 0, R_EDX); |
423 | ret |= cpuid_1_edx & CPUID_EXT2_AMD_ALIASES; |
424 | } else if (function == KVM_CPUID_FEATURES && reg == R_EAX) { |
425 | /* kvm_pv_unhalt is reported by GET_SUPPORTED_CPUID, but it can't |
426 | * be enabled without the in-kernel irqchip |
427 | */ |
428 | if (!kvm_irqchip_in_kernel()) { |
429 | ret &= ~(1U << KVM_FEATURE_PV_UNHALT); |
430 | } |
431 | } else if (function == KVM_CPUID_FEATURES && reg == R_EDX) { |
432 | ret |= 1U << KVM_HINTS_REALTIME; |
433 | found = 1; |
434 | } |
435 | |
436 | /* fallback for older kernels */ |
437 | if ((function == KVM_CPUID_FEATURES) && !found) { |
438 | ret = get_para_features(s); |
439 | } |
440 | |
441 | return ret; |
442 | } |
443 | |
444 | uint32_t kvm_arch_get_supported_msr_feature(KVMState *s, uint32_t index) |
445 | { |
446 | struct { |
447 | struct kvm_msrs info; |
448 | struct kvm_msr_entry entries[1]; |
449 | } msr_data; |
450 | uint32_t ret; |
451 | |
452 | if (kvm_feature_msrs == NULL) { /* Host doesn't support feature MSRs */ |
453 | return 0; |
454 | } |
455 | |
456 | /* Check if requested MSR is supported feature MSR */ |
457 | int i; |
458 | for (i = 0; i < kvm_feature_msrs->nmsrs; i++) |
459 | if (kvm_feature_msrs->indices[i] == index) { |
460 | break; |
461 | } |
462 | if (i == kvm_feature_msrs->nmsrs) { |
463 | return 0; /* if the feature MSR is not supported, simply return 0 */ |
464 | } |
465 | |
466 | msr_data.info.nmsrs = 1; |
467 | msr_data.entries[0].index = index; |
468 | |
469 | ret = kvm_ioctl(s, KVM_GET_MSRS, &msr_data); |
470 | if (ret != 1) { |
471 | error_report("KVM get MSR (index=0x%x) feature failed, %s" , |
472 | index, strerror(-ret)); |
473 | exit(1); |
474 | } |
475 | |
476 | return msr_data.entries[0].data; |
477 | } |
478 | |
479 | |
480 | typedef struct HWPoisonPage { |
481 | ram_addr_t ram_addr; |
482 | QLIST_ENTRY(HWPoisonPage) list; |
483 | } HWPoisonPage; |
484 | |
485 | static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list = |
486 | QLIST_HEAD_INITIALIZER(hwpoison_page_list); |
487 | |
488 | static void kvm_unpoison_all(void *param) |
489 | { |
490 | HWPoisonPage *page, *next_page; |
491 | |
492 | QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) { |
493 | QLIST_REMOVE(page, list); |
494 | qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE); |
495 | g_free(page); |
496 | } |
497 | } |
498 | |
499 | static void kvm_hwpoison_page_add(ram_addr_t ram_addr) |
500 | { |
501 | HWPoisonPage *page; |
502 | |
503 | QLIST_FOREACH(page, &hwpoison_page_list, list) { |
504 | if (page->ram_addr == ram_addr) { |
505 | return; |
506 | } |
507 | } |
508 | page = g_new(HWPoisonPage, 1); |
509 | page->ram_addr = ram_addr; |
510 | QLIST_INSERT_HEAD(&hwpoison_page_list, page, list); |
511 | } |
512 | |
513 | static int kvm_get_mce_cap_supported(KVMState *s, uint64_t *mce_cap, |
514 | int *max_banks) |
515 | { |
516 | int r; |
517 | |
518 | r = kvm_check_extension(s, KVM_CAP_MCE); |
519 | if (r > 0) { |
520 | *max_banks = r; |
521 | return kvm_ioctl(s, KVM_X86_GET_MCE_CAP_SUPPORTED, mce_cap); |
522 | } |
523 | return -ENOSYS; |
524 | } |
525 | |
526 | static void kvm_mce_inject(X86CPU *cpu, hwaddr paddr, int code) |
527 | { |
528 | CPUState *cs = CPU(cpu); |
529 | CPUX86State *env = &cpu->env; |
530 | uint64_t status = MCI_STATUS_VAL | MCI_STATUS_UC | MCI_STATUS_EN | |
531 | MCI_STATUS_MISCV | MCI_STATUS_ADDRV | MCI_STATUS_S; |
532 | uint64_t mcg_status = MCG_STATUS_MCIP; |
533 | int flags = 0; |
534 | |
535 | if (code == BUS_MCEERR_AR) { |
536 | status |= MCI_STATUS_AR | 0x134; |
537 | mcg_status |= MCG_STATUS_EIPV; |
538 | } else { |
539 | status |= 0xc0; |
540 | mcg_status |= MCG_STATUS_RIPV; |
541 | } |
542 | |
543 | flags = cpu_x86_support_mca_broadcast(env) ? MCE_INJECT_BROADCAST : 0; |
544 | /* We need to read back the value of MSR_EXT_MCG_CTL that was set by the |
545 | * guest kernel back into env->mcg_ext_ctl. |
546 | */ |
547 | cpu_synchronize_state(cs); |
548 | if (env->mcg_ext_ctl & MCG_EXT_CTL_LMCE_EN) { |
549 | mcg_status |= MCG_STATUS_LMCE; |
550 | flags = 0; |
551 | } |
552 | |
553 | cpu_x86_inject_mce(NULL, cpu, 9, status, mcg_status, paddr, |
554 | (MCM_ADDR_PHYS << 6) | 0xc, flags); |
555 | } |
556 | |
557 | static void hardware_memory_error(void) |
558 | { |
559 | fprintf(stderr, "Hardware memory error!\n" ); |
560 | exit(1); |
561 | } |
562 | |
563 | void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr) |
564 | { |
565 | X86CPU *cpu = X86_CPU(c); |
566 | CPUX86State *env = &cpu->env; |
567 | ram_addr_t ram_addr; |
568 | hwaddr paddr; |
569 | |
570 | /* If we get an action required MCE, it has been injected by KVM |
571 | * while the VM was running. An action optional MCE instead should |
572 | * be coming from the main thread, which qemu_init_sigbus identifies |
573 | * as the "early kill" thread. |
574 | */ |
575 | assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO); |
576 | |
577 | if ((env->mcg_cap & MCG_SER_P) && addr) { |
578 | ram_addr = qemu_ram_addr_from_host(addr); |
579 | if (ram_addr != RAM_ADDR_INVALID && |
580 | kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) { |
581 | kvm_hwpoison_page_add(ram_addr); |
582 | kvm_mce_inject(cpu, paddr, code); |
583 | return; |
584 | } |
585 | |
586 | fprintf(stderr, "Hardware memory error for memory used by " |
587 | "QEMU itself instead of guest system!\n" ); |
588 | } |
589 | |
590 | if (code == BUS_MCEERR_AR) { |
591 | hardware_memory_error(); |
592 | } |
593 | |
594 | /* Hope we are lucky for AO MCE */ |
595 | } |
596 | |
597 | static void kvm_reset_exception(CPUX86State *env) |
598 | { |
599 | env->exception_nr = -1; |
600 | env->exception_pending = 0; |
601 | env->exception_injected = 0; |
602 | env->exception_has_payload = false; |
603 | env->exception_payload = 0; |
604 | } |
605 | |
606 | static void kvm_queue_exception(CPUX86State *env, |
607 | int32_t exception_nr, |
608 | uint8_t exception_has_payload, |
609 | uint64_t exception_payload) |
610 | { |
611 | assert(env->exception_nr == -1); |
612 | assert(!env->exception_pending); |
613 | assert(!env->exception_injected); |
614 | assert(!env->exception_has_payload); |
615 | |
616 | env->exception_nr = exception_nr; |
617 | |
618 | if (has_exception_payload) { |
619 | env->exception_pending = 1; |
620 | |
621 | env->exception_has_payload = exception_has_payload; |
622 | env->exception_payload = exception_payload; |
623 | } else { |
624 | env->exception_injected = 1; |
625 | |
626 | if (exception_nr == EXCP01_DB) { |
627 | assert(exception_has_payload); |
628 | env->dr[6] = exception_payload; |
629 | } else if (exception_nr == EXCP0E_PAGE) { |
630 | assert(exception_has_payload); |
631 | env->cr[2] = exception_payload; |
632 | } else { |
633 | assert(!exception_has_payload); |
634 | } |
635 | } |
636 | } |
637 | |
638 | static int kvm_inject_mce_oldstyle(X86CPU *cpu) |
639 | { |
640 | CPUX86State *env = &cpu->env; |
641 | |
642 | if (!kvm_has_vcpu_events() && env->exception_nr == EXCP12_MCHK) { |
643 | unsigned int bank, bank_num = env->mcg_cap & 0xff; |
644 | struct kvm_x86_mce mce; |
645 | |
646 | kvm_reset_exception(env); |
647 | |
648 | /* |
649 | * There must be at least one bank in use if an MCE is pending. |
650 | * Find it and use its values for the event injection. |
651 | */ |
652 | for (bank = 0; bank < bank_num; bank++) { |
653 | if (env->mce_banks[bank * 4 + 1] & MCI_STATUS_VAL) { |
654 | break; |
655 | } |
656 | } |
657 | assert(bank < bank_num); |
658 | |
659 | mce.bank = bank; |
660 | mce.status = env->mce_banks[bank * 4 + 1]; |
661 | mce.mcg_status = env->mcg_status; |
662 | mce.addr = env->mce_banks[bank * 4 + 2]; |
663 | mce.misc = env->mce_banks[bank * 4 + 3]; |
664 | |
665 | return kvm_vcpu_ioctl(CPU(cpu), KVM_X86_SET_MCE, &mce); |
666 | } |
667 | return 0; |
668 | } |
669 | |
670 | static void cpu_update_state(void *opaque, int running, RunState state) |
671 | { |
672 | CPUX86State *env = opaque; |
673 | |
674 | if (running) { |
675 | env->tsc_valid = false; |
676 | } |
677 | } |
678 | |
679 | unsigned long kvm_arch_vcpu_id(CPUState *cs) |
680 | { |
681 | X86CPU *cpu = X86_CPU(cs); |
682 | return cpu->apic_id; |
683 | } |
684 | |
685 | #ifndef KVM_CPUID_SIGNATURE_NEXT |
686 | #define KVM_CPUID_SIGNATURE_NEXT 0x40000100 |
687 | #endif |
688 | |
689 | static bool hyperv_enabled(X86CPU *cpu) |
690 | { |
691 | CPUState *cs = CPU(cpu); |
692 | return kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0 && |
693 | ((cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) || |
694 | cpu->hyperv_features || cpu->hyperv_passthrough); |
695 | } |
696 | |
697 | static int kvm_arch_set_tsc_khz(CPUState *cs) |
698 | { |
699 | X86CPU *cpu = X86_CPU(cs); |
700 | CPUX86State *env = &cpu->env; |
701 | int r; |
702 | |
703 | if (!env->tsc_khz) { |
704 | return 0; |
705 | } |
706 | |
707 | r = kvm_check_extension(cs->kvm_state, KVM_CAP_TSC_CONTROL) ? |
708 | kvm_vcpu_ioctl(cs, KVM_SET_TSC_KHZ, env->tsc_khz) : |
709 | -ENOTSUP; |
710 | if (r < 0) { |
711 | /* When KVM_SET_TSC_KHZ fails, it's an error only if the current |
712 | * TSC frequency doesn't match the one we want. |
713 | */ |
714 | int cur_freq = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? |
715 | kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : |
716 | -ENOTSUP; |
717 | if (cur_freq <= 0 || cur_freq != env->tsc_khz) { |
718 | warn_report("TSC frequency mismatch between " |
719 | "VM (%" PRId64 " kHz) and host (%d kHz), " |
720 | "and TSC scaling unavailable" , |
721 | env->tsc_khz, cur_freq); |
722 | return r; |
723 | } |
724 | } |
725 | |
726 | return 0; |
727 | } |
728 | |
729 | static bool tsc_is_stable_and_known(CPUX86State *env) |
730 | { |
731 | if (!env->tsc_khz) { |
732 | return false; |
733 | } |
734 | return (env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) |
735 | || env->user_tsc_khz; |
736 | } |
737 | |
738 | static struct { |
739 | const char *desc; |
740 | struct { |
741 | uint32_t fw; |
742 | uint32_t bits; |
743 | } flags[2]; |
744 | uint64_t dependencies; |
745 | } kvm_hyperv_properties[] = { |
746 | [HYPERV_FEAT_RELAXED] = { |
747 | .desc = "relaxed timing (hv-relaxed)" , |
748 | .flags = { |
749 | {.fw = FEAT_HYPERV_EAX, |
750 | .bits = HV_HYPERCALL_AVAILABLE}, |
751 | {.fw = FEAT_HV_RECOMM_EAX, |
752 | .bits = HV_RELAXED_TIMING_RECOMMENDED} |
753 | } |
754 | }, |
755 | [HYPERV_FEAT_VAPIC] = { |
756 | .desc = "virtual APIC (hv-vapic)" , |
757 | .flags = { |
758 | {.fw = FEAT_HYPERV_EAX, |
759 | .bits = HV_HYPERCALL_AVAILABLE | HV_APIC_ACCESS_AVAILABLE}, |
760 | {.fw = FEAT_HV_RECOMM_EAX, |
761 | .bits = HV_APIC_ACCESS_RECOMMENDED} |
762 | } |
763 | }, |
764 | [HYPERV_FEAT_TIME] = { |
765 | .desc = "clocksources (hv-time)" , |
766 | .flags = { |
767 | {.fw = FEAT_HYPERV_EAX, |
768 | .bits = HV_HYPERCALL_AVAILABLE | HV_TIME_REF_COUNT_AVAILABLE | |
769 | HV_REFERENCE_TSC_AVAILABLE} |
770 | } |
771 | }, |
772 | [HYPERV_FEAT_CRASH] = { |
773 | .desc = "crash MSRs (hv-crash)" , |
774 | .flags = { |
775 | {.fw = FEAT_HYPERV_EDX, |
776 | .bits = HV_GUEST_CRASH_MSR_AVAILABLE} |
777 | } |
778 | }, |
779 | [HYPERV_FEAT_RESET] = { |
780 | .desc = "reset MSR (hv-reset)" , |
781 | .flags = { |
782 | {.fw = FEAT_HYPERV_EAX, |
783 | .bits = HV_RESET_AVAILABLE} |
784 | } |
785 | }, |
786 | [HYPERV_FEAT_VPINDEX] = { |
787 | .desc = "VP_INDEX MSR (hv-vpindex)" , |
788 | .flags = { |
789 | {.fw = FEAT_HYPERV_EAX, |
790 | .bits = HV_VP_INDEX_AVAILABLE} |
791 | } |
792 | }, |
793 | [HYPERV_FEAT_RUNTIME] = { |
794 | .desc = "VP_RUNTIME MSR (hv-runtime)" , |
795 | .flags = { |
796 | {.fw = FEAT_HYPERV_EAX, |
797 | .bits = HV_VP_RUNTIME_AVAILABLE} |
798 | } |
799 | }, |
800 | [HYPERV_FEAT_SYNIC] = { |
801 | .desc = "synthetic interrupt controller (hv-synic)" , |
802 | .flags = { |
803 | {.fw = FEAT_HYPERV_EAX, |
804 | .bits = HV_SYNIC_AVAILABLE} |
805 | } |
806 | }, |
807 | [HYPERV_FEAT_STIMER] = { |
808 | .desc = "synthetic timers (hv-stimer)" , |
809 | .flags = { |
810 | {.fw = FEAT_HYPERV_EAX, |
811 | .bits = HV_SYNTIMERS_AVAILABLE} |
812 | }, |
813 | .dependencies = BIT(HYPERV_FEAT_SYNIC) | BIT(HYPERV_FEAT_TIME) |
814 | }, |
815 | [HYPERV_FEAT_FREQUENCIES] = { |
816 | .desc = "frequency MSRs (hv-frequencies)" , |
817 | .flags = { |
818 | {.fw = FEAT_HYPERV_EAX, |
819 | .bits = HV_ACCESS_FREQUENCY_MSRS}, |
820 | {.fw = FEAT_HYPERV_EDX, |
821 | .bits = HV_FREQUENCY_MSRS_AVAILABLE} |
822 | } |
823 | }, |
824 | [HYPERV_FEAT_REENLIGHTENMENT] = { |
825 | .desc = "reenlightenment MSRs (hv-reenlightenment)" , |
826 | .flags = { |
827 | {.fw = FEAT_HYPERV_EAX, |
828 | .bits = HV_ACCESS_REENLIGHTENMENTS_CONTROL} |
829 | } |
830 | }, |
831 | [HYPERV_FEAT_TLBFLUSH] = { |
832 | .desc = "paravirtualized TLB flush (hv-tlbflush)" , |
833 | .flags = { |
834 | {.fw = FEAT_HV_RECOMM_EAX, |
835 | .bits = HV_REMOTE_TLB_FLUSH_RECOMMENDED | |
836 | HV_EX_PROCESSOR_MASKS_RECOMMENDED} |
837 | }, |
838 | .dependencies = BIT(HYPERV_FEAT_VPINDEX) |
839 | }, |
840 | [HYPERV_FEAT_EVMCS] = { |
841 | .desc = "enlightened VMCS (hv-evmcs)" , |
842 | .flags = { |
843 | {.fw = FEAT_HV_RECOMM_EAX, |
844 | .bits = HV_ENLIGHTENED_VMCS_RECOMMENDED} |
845 | }, |
846 | .dependencies = BIT(HYPERV_FEAT_VAPIC) |
847 | }, |
848 | [HYPERV_FEAT_IPI] = { |
849 | .desc = "paravirtualized IPI (hv-ipi)" , |
850 | .flags = { |
851 | {.fw = FEAT_HV_RECOMM_EAX, |
852 | .bits = HV_CLUSTER_IPI_RECOMMENDED | |
853 | HV_EX_PROCESSOR_MASKS_RECOMMENDED} |
854 | }, |
855 | .dependencies = BIT(HYPERV_FEAT_VPINDEX) |
856 | }, |
857 | [HYPERV_FEAT_STIMER_DIRECT] = { |
858 | .desc = "direct mode synthetic timers (hv-stimer-direct)" , |
859 | .flags = { |
860 | {.fw = FEAT_HYPERV_EDX, |
861 | .bits = HV_STIMER_DIRECT_MODE_AVAILABLE} |
862 | }, |
863 | .dependencies = BIT(HYPERV_FEAT_STIMER) |
864 | }, |
865 | }; |
866 | |
867 | static struct kvm_cpuid2 *try_get_hv_cpuid(CPUState *cs, int max) |
868 | { |
869 | struct kvm_cpuid2 *cpuid; |
870 | int r, size; |
871 | |
872 | size = sizeof(*cpuid) + max * sizeof(*cpuid->entries); |
873 | cpuid = g_malloc0(size); |
874 | cpuid->nent = max; |
875 | |
876 | r = kvm_vcpu_ioctl(cs, KVM_GET_SUPPORTED_HV_CPUID, cpuid); |
877 | if (r == 0 && cpuid->nent >= max) { |
878 | r = -E2BIG; |
879 | } |
880 | if (r < 0) { |
881 | if (r == -E2BIG) { |
882 | g_free(cpuid); |
883 | return NULL; |
884 | } else { |
885 | fprintf(stderr, "KVM_GET_SUPPORTED_HV_CPUID failed: %s\n" , |
886 | strerror(-r)); |
887 | exit(1); |
888 | } |
889 | } |
890 | return cpuid; |
891 | } |
892 | |
893 | /* |
894 | * Run KVM_GET_SUPPORTED_HV_CPUID ioctl(), allocating a buffer large enough |
895 | * for all entries. |
896 | */ |
897 | static struct kvm_cpuid2 *get_supported_hv_cpuid(CPUState *cs) |
898 | { |
899 | struct kvm_cpuid2 *cpuid; |
900 | int max = 7; /* 0x40000000..0x40000005, 0x4000000A */ |
901 | |
902 | /* |
903 | * When the buffer is too small, KVM_GET_SUPPORTED_HV_CPUID fails with |
904 | * -E2BIG, however, it doesn't report back the right size. Keep increasing |
905 | * it and re-trying until we succeed. |
906 | */ |
907 | while ((cpuid = try_get_hv_cpuid(cs, max)) == NULL) { |
908 | max++; |
909 | } |
910 | return cpuid; |
911 | } |
912 | |
913 | /* |
914 | * When KVM_GET_SUPPORTED_HV_CPUID is not supported we fill CPUID feature |
915 | * leaves from KVM_CAP_HYPERV* and present MSRs data. |
916 | */ |
917 | static struct kvm_cpuid2 *get_supported_hv_cpuid_legacy(CPUState *cs) |
918 | { |
919 | X86CPU *cpu = X86_CPU(cs); |
920 | struct kvm_cpuid2 *cpuid; |
921 | struct kvm_cpuid_entry2 *entry_feat, *entry_recomm; |
922 | |
923 | /* HV_CPUID_FEATURES, HV_CPUID_ENLIGHTMENT_INFO */ |
924 | cpuid = g_malloc0(sizeof(*cpuid) + 2 * sizeof(*cpuid->entries)); |
925 | cpuid->nent = 2; |
926 | |
927 | /* HV_CPUID_VENDOR_AND_MAX_FUNCTIONS */ |
928 | entry_feat = &cpuid->entries[0]; |
929 | entry_feat->function = HV_CPUID_FEATURES; |
930 | |
931 | entry_recomm = &cpuid->entries[1]; |
932 | entry_recomm->function = HV_CPUID_ENLIGHTMENT_INFO; |
933 | entry_recomm->ebx = cpu->hyperv_spinlock_attempts; |
934 | |
935 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV) > 0) { |
936 | entry_feat->eax |= HV_HYPERCALL_AVAILABLE; |
937 | entry_feat->eax |= HV_APIC_ACCESS_AVAILABLE; |
938 | entry_feat->edx |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; |
939 | entry_recomm->eax |= HV_RELAXED_TIMING_RECOMMENDED; |
940 | entry_recomm->eax |= HV_APIC_ACCESS_RECOMMENDED; |
941 | } |
942 | |
943 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_TIME) > 0) { |
944 | entry_feat->eax |= HV_TIME_REF_COUNT_AVAILABLE; |
945 | entry_feat->eax |= HV_REFERENCE_TSC_AVAILABLE; |
946 | } |
947 | |
948 | if (has_msr_hv_frequencies) { |
949 | entry_feat->eax |= HV_ACCESS_FREQUENCY_MSRS; |
950 | entry_feat->edx |= HV_FREQUENCY_MSRS_AVAILABLE; |
951 | } |
952 | |
953 | if (has_msr_hv_crash) { |
954 | entry_feat->edx |= HV_GUEST_CRASH_MSR_AVAILABLE; |
955 | } |
956 | |
957 | if (has_msr_hv_reenlightenment) { |
958 | entry_feat->eax |= HV_ACCESS_REENLIGHTENMENTS_CONTROL; |
959 | } |
960 | |
961 | if (has_msr_hv_reset) { |
962 | entry_feat->eax |= HV_RESET_AVAILABLE; |
963 | } |
964 | |
965 | if (has_msr_hv_vpindex) { |
966 | entry_feat->eax |= HV_VP_INDEX_AVAILABLE; |
967 | } |
968 | |
969 | if (has_msr_hv_runtime) { |
970 | entry_feat->eax |= HV_VP_RUNTIME_AVAILABLE; |
971 | } |
972 | |
973 | if (has_msr_hv_synic) { |
974 | unsigned int cap = cpu->hyperv_synic_kvm_only ? |
975 | KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; |
976 | |
977 | if (kvm_check_extension(cs->kvm_state, cap) > 0) { |
978 | entry_feat->eax |= HV_SYNIC_AVAILABLE; |
979 | } |
980 | } |
981 | |
982 | if (has_msr_hv_stimer) { |
983 | entry_feat->eax |= HV_SYNTIMERS_AVAILABLE; |
984 | } |
985 | |
986 | if (kvm_check_extension(cs->kvm_state, |
987 | KVM_CAP_HYPERV_TLBFLUSH) > 0) { |
988 | entry_recomm->eax |= HV_REMOTE_TLB_FLUSH_RECOMMENDED; |
989 | entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; |
990 | } |
991 | |
992 | if (kvm_check_extension(cs->kvm_state, |
993 | KVM_CAP_HYPERV_ENLIGHTENED_VMCS) > 0) { |
994 | entry_recomm->eax |= HV_ENLIGHTENED_VMCS_RECOMMENDED; |
995 | } |
996 | |
997 | if (kvm_check_extension(cs->kvm_state, |
998 | KVM_CAP_HYPERV_SEND_IPI) > 0) { |
999 | entry_recomm->eax |= HV_CLUSTER_IPI_RECOMMENDED; |
1000 | entry_recomm->eax |= HV_EX_PROCESSOR_MASKS_RECOMMENDED; |
1001 | } |
1002 | |
1003 | return cpuid; |
1004 | } |
1005 | |
1006 | static int hv_cpuid_get_fw(struct kvm_cpuid2 *cpuid, int fw, uint32_t *r) |
1007 | { |
1008 | struct kvm_cpuid_entry2 *entry; |
1009 | uint32_t func; |
1010 | int reg; |
1011 | |
1012 | switch (fw) { |
1013 | case FEAT_HYPERV_EAX: |
1014 | reg = R_EAX; |
1015 | func = HV_CPUID_FEATURES; |
1016 | break; |
1017 | case FEAT_HYPERV_EDX: |
1018 | reg = R_EDX; |
1019 | func = HV_CPUID_FEATURES; |
1020 | break; |
1021 | case FEAT_HV_RECOMM_EAX: |
1022 | reg = R_EAX; |
1023 | func = HV_CPUID_ENLIGHTMENT_INFO; |
1024 | break; |
1025 | default: |
1026 | return -EINVAL; |
1027 | } |
1028 | |
1029 | entry = cpuid_find_entry(cpuid, func, 0); |
1030 | if (!entry) { |
1031 | return -ENOENT; |
1032 | } |
1033 | |
1034 | switch (reg) { |
1035 | case R_EAX: |
1036 | *r = entry->eax; |
1037 | break; |
1038 | case R_EDX: |
1039 | *r = entry->edx; |
1040 | break; |
1041 | default: |
1042 | return -EINVAL; |
1043 | } |
1044 | |
1045 | return 0; |
1046 | } |
1047 | |
1048 | static int hv_cpuid_check_and_set(CPUState *cs, struct kvm_cpuid2 *cpuid, |
1049 | int feature) |
1050 | { |
1051 | X86CPU *cpu = X86_CPU(cs); |
1052 | CPUX86State *env = &cpu->env; |
1053 | uint32_t r, fw, bits; |
1054 | uint64_t deps; |
1055 | int i, dep_feat; |
1056 | |
1057 | if (!hyperv_feat_enabled(cpu, feature) && !cpu->hyperv_passthrough) { |
1058 | return 0; |
1059 | } |
1060 | |
1061 | deps = kvm_hyperv_properties[feature].dependencies; |
1062 | while (deps) { |
1063 | dep_feat = ctz64(deps); |
1064 | if (!(hyperv_feat_enabled(cpu, dep_feat))) { |
1065 | fprintf(stderr, |
1066 | "Hyper-V %s requires Hyper-V %s\n" , |
1067 | kvm_hyperv_properties[feature].desc, |
1068 | kvm_hyperv_properties[dep_feat].desc); |
1069 | return 1; |
1070 | } |
1071 | deps &= ~(1ull << dep_feat); |
1072 | } |
1073 | |
1074 | for (i = 0; i < ARRAY_SIZE(kvm_hyperv_properties[feature].flags); i++) { |
1075 | fw = kvm_hyperv_properties[feature].flags[i].fw; |
1076 | bits = kvm_hyperv_properties[feature].flags[i].bits; |
1077 | |
1078 | if (!fw) { |
1079 | continue; |
1080 | } |
1081 | |
1082 | if (hv_cpuid_get_fw(cpuid, fw, &r) || (r & bits) != bits) { |
1083 | if (hyperv_feat_enabled(cpu, feature)) { |
1084 | fprintf(stderr, |
1085 | "Hyper-V %s is not supported by kernel\n" , |
1086 | kvm_hyperv_properties[feature].desc); |
1087 | return 1; |
1088 | } else { |
1089 | return 0; |
1090 | } |
1091 | } |
1092 | |
1093 | env->features[fw] |= bits; |
1094 | } |
1095 | |
1096 | if (cpu->hyperv_passthrough) { |
1097 | cpu->hyperv_features |= BIT(feature); |
1098 | } |
1099 | |
1100 | return 0; |
1101 | } |
1102 | |
1103 | /* |
1104 | * Fill in Hyper-V CPUIDs. Returns the number of entries filled in cpuid_ent in |
1105 | * case of success, errno < 0 in case of failure and 0 when no Hyper-V |
1106 | * extentions are enabled. |
1107 | */ |
1108 | static int hyperv_handle_properties(CPUState *cs, |
1109 | struct kvm_cpuid_entry2 *cpuid_ent) |
1110 | { |
1111 | X86CPU *cpu = X86_CPU(cs); |
1112 | CPUX86State *env = &cpu->env; |
1113 | struct kvm_cpuid2 *cpuid; |
1114 | struct kvm_cpuid_entry2 *c; |
1115 | uint32_t signature[3]; |
1116 | uint32_t cpuid_i = 0; |
1117 | int r; |
1118 | |
1119 | if (!hyperv_enabled(cpu)) |
1120 | return 0; |
1121 | |
1122 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) || |
1123 | cpu->hyperv_passthrough) { |
1124 | uint16_t evmcs_version; |
1125 | |
1126 | r = kvm_vcpu_enable_cap(cs, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, 0, |
1127 | (uintptr_t)&evmcs_version); |
1128 | |
1129 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) && r) { |
1130 | fprintf(stderr, "Hyper-V %s is not supported by kernel\n" , |
1131 | kvm_hyperv_properties[HYPERV_FEAT_EVMCS].desc); |
1132 | return -ENOSYS; |
1133 | } |
1134 | |
1135 | if (!r) { |
1136 | env->features[FEAT_HV_RECOMM_EAX] |= |
1137 | HV_ENLIGHTENED_VMCS_RECOMMENDED; |
1138 | env->features[FEAT_HV_NESTED_EAX] = evmcs_version; |
1139 | } |
1140 | } |
1141 | |
1142 | if (kvm_check_extension(cs->kvm_state, KVM_CAP_HYPERV_CPUID) > 0) { |
1143 | cpuid = get_supported_hv_cpuid(cs); |
1144 | } else { |
1145 | cpuid = get_supported_hv_cpuid_legacy(cs); |
1146 | } |
1147 | |
1148 | if (cpu->hyperv_passthrough) { |
1149 | memcpy(cpuid_ent, &cpuid->entries[0], |
1150 | cpuid->nent * sizeof(cpuid->entries[0])); |
1151 | |
1152 | c = cpuid_find_entry(cpuid, HV_CPUID_FEATURES, 0); |
1153 | if (c) { |
1154 | env->features[FEAT_HYPERV_EAX] = c->eax; |
1155 | env->features[FEAT_HYPERV_EBX] = c->ebx; |
1156 | env->features[FEAT_HYPERV_EDX] = c->eax; |
1157 | } |
1158 | c = cpuid_find_entry(cpuid, HV_CPUID_ENLIGHTMENT_INFO, 0); |
1159 | if (c) { |
1160 | env->features[FEAT_HV_RECOMM_EAX] = c->eax; |
1161 | |
1162 | /* hv-spinlocks may have been overriden */ |
1163 | if (cpu->hyperv_spinlock_attempts != HYPERV_SPINLOCK_NEVER_RETRY) { |
1164 | c->ebx = cpu->hyperv_spinlock_attempts; |
1165 | } |
1166 | } |
1167 | c = cpuid_find_entry(cpuid, HV_CPUID_NESTED_FEATURES, 0); |
1168 | if (c) { |
1169 | env->features[FEAT_HV_NESTED_EAX] = c->eax; |
1170 | } |
1171 | } |
1172 | |
1173 | /* Features */ |
1174 | r = hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RELAXED); |
1175 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VAPIC); |
1176 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TIME); |
1177 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_CRASH); |
1178 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RESET); |
1179 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_VPINDEX); |
1180 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_RUNTIME); |
1181 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_SYNIC); |
1182 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER); |
1183 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_FREQUENCIES); |
1184 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_REENLIGHTENMENT); |
1185 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_TLBFLUSH); |
1186 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_EVMCS); |
1187 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_IPI); |
1188 | r |= hv_cpuid_check_and_set(cs, cpuid, HYPERV_FEAT_STIMER_DIRECT); |
1189 | |
1190 | /* Additional dependencies not covered by kvm_hyperv_properties[] */ |
1191 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC) && |
1192 | !cpu->hyperv_synic_kvm_only && |
1193 | !hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX)) { |
1194 | fprintf(stderr, "Hyper-V %s requires Hyper-V %s\n" , |
1195 | kvm_hyperv_properties[HYPERV_FEAT_SYNIC].desc, |
1196 | kvm_hyperv_properties[HYPERV_FEAT_VPINDEX].desc); |
1197 | r |= 1; |
1198 | } |
1199 | |
1200 | /* Not exposed by KVM but needed to make CPU hotplug in Windows work */ |
1201 | env->features[FEAT_HYPERV_EDX] |= HV_CPU_DYNAMIC_PARTITIONING_AVAILABLE; |
1202 | |
1203 | if (r) { |
1204 | r = -ENOSYS; |
1205 | goto free; |
1206 | } |
1207 | |
1208 | if (cpu->hyperv_passthrough) { |
1209 | /* We already copied all feature words from KVM as is */ |
1210 | r = cpuid->nent; |
1211 | goto free; |
1212 | } |
1213 | |
1214 | c = &cpuid_ent[cpuid_i++]; |
1215 | c->function = HV_CPUID_VENDOR_AND_MAX_FUNCTIONS; |
1216 | if (!cpu->hyperv_vendor_id) { |
1217 | memcpy(signature, "Microsoft Hv" , 12); |
1218 | } else { |
1219 | size_t len = strlen(cpu->hyperv_vendor_id); |
1220 | |
1221 | if (len > 12) { |
1222 | error_report("hv-vendor-id truncated to 12 characters" ); |
1223 | len = 12; |
1224 | } |
1225 | memset(signature, 0, 12); |
1226 | memcpy(signature, cpu->hyperv_vendor_id, len); |
1227 | } |
1228 | c->eax = hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS) ? |
1229 | HV_CPUID_NESTED_FEATURES : HV_CPUID_IMPLEMENT_LIMITS; |
1230 | c->ebx = signature[0]; |
1231 | c->ecx = signature[1]; |
1232 | c->edx = signature[2]; |
1233 | |
1234 | c = &cpuid_ent[cpuid_i++]; |
1235 | c->function = HV_CPUID_INTERFACE; |
1236 | memcpy(signature, "Hv#1\0\0\0\0\0\0\0\0" , 12); |
1237 | c->eax = signature[0]; |
1238 | c->ebx = 0; |
1239 | c->ecx = 0; |
1240 | c->edx = 0; |
1241 | |
1242 | c = &cpuid_ent[cpuid_i++]; |
1243 | c->function = HV_CPUID_VERSION; |
1244 | c->eax = 0x00001bbc; |
1245 | c->ebx = 0x00060001; |
1246 | |
1247 | c = &cpuid_ent[cpuid_i++]; |
1248 | c->function = HV_CPUID_FEATURES; |
1249 | c->eax = env->features[FEAT_HYPERV_EAX]; |
1250 | c->ebx = env->features[FEAT_HYPERV_EBX]; |
1251 | c->edx = env->features[FEAT_HYPERV_EDX]; |
1252 | |
1253 | c = &cpuid_ent[cpuid_i++]; |
1254 | c->function = HV_CPUID_ENLIGHTMENT_INFO; |
1255 | c->eax = env->features[FEAT_HV_RECOMM_EAX]; |
1256 | c->ebx = cpu->hyperv_spinlock_attempts; |
1257 | |
1258 | c = &cpuid_ent[cpuid_i++]; |
1259 | c->function = HV_CPUID_IMPLEMENT_LIMITS; |
1260 | c->eax = cpu->hv_max_vps; |
1261 | c->ebx = 0x40; |
1262 | |
1263 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_EVMCS)) { |
1264 | __u32 function; |
1265 | |
1266 | /* Create zeroed 0x40000006..0x40000009 leaves */ |
1267 | for (function = HV_CPUID_IMPLEMENT_LIMITS + 1; |
1268 | function < HV_CPUID_NESTED_FEATURES; function++) { |
1269 | c = &cpuid_ent[cpuid_i++]; |
1270 | c->function = function; |
1271 | } |
1272 | |
1273 | c = &cpuid_ent[cpuid_i++]; |
1274 | c->function = HV_CPUID_NESTED_FEATURES; |
1275 | c->eax = env->features[FEAT_HV_NESTED_EAX]; |
1276 | } |
1277 | r = cpuid_i; |
1278 | |
1279 | free: |
1280 | g_free(cpuid); |
1281 | |
1282 | return r; |
1283 | } |
1284 | |
1285 | static Error *hv_passthrough_mig_blocker; |
1286 | |
1287 | static int hyperv_init_vcpu(X86CPU *cpu) |
1288 | { |
1289 | CPUState *cs = CPU(cpu); |
1290 | Error *local_err = NULL; |
1291 | int ret; |
1292 | |
1293 | if (cpu->hyperv_passthrough && hv_passthrough_mig_blocker == NULL) { |
1294 | error_setg(&hv_passthrough_mig_blocker, |
1295 | "'hv-passthrough' CPU flag prevents migration, use explicit" |
1296 | " set of hv-* flags instead" ); |
1297 | ret = migrate_add_blocker(hv_passthrough_mig_blocker, &local_err); |
1298 | if (local_err) { |
1299 | error_report_err(local_err); |
1300 | error_free(hv_passthrough_mig_blocker); |
1301 | return ret; |
1302 | } |
1303 | } |
1304 | |
1305 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) && !hv_vpindex_settable) { |
1306 | /* |
1307 | * the kernel doesn't support setting vp_index; assert that its value |
1308 | * is in sync |
1309 | */ |
1310 | struct { |
1311 | struct kvm_msrs info; |
1312 | struct kvm_msr_entry entries[1]; |
1313 | } msr_data = { |
1314 | .info.nmsrs = 1, |
1315 | .entries[0].index = HV_X64_MSR_VP_INDEX, |
1316 | }; |
1317 | |
1318 | ret = kvm_vcpu_ioctl(cs, KVM_GET_MSRS, &msr_data); |
1319 | if (ret < 0) { |
1320 | return ret; |
1321 | } |
1322 | assert(ret == 1); |
1323 | |
1324 | if (msr_data.entries[0].data != hyperv_vp_index(CPU(cpu))) { |
1325 | error_report("kernel's vp_index != QEMU's vp_index" ); |
1326 | return -ENXIO; |
1327 | } |
1328 | } |
1329 | |
1330 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { |
1331 | uint32_t synic_cap = cpu->hyperv_synic_kvm_only ? |
1332 | KVM_CAP_HYPERV_SYNIC : KVM_CAP_HYPERV_SYNIC2; |
1333 | ret = kvm_vcpu_enable_cap(cs, synic_cap, 0); |
1334 | if (ret < 0) { |
1335 | error_report("failed to turn on HyperV SynIC in KVM: %s" , |
1336 | strerror(-ret)); |
1337 | return ret; |
1338 | } |
1339 | |
1340 | if (!cpu->hyperv_synic_kvm_only) { |
1341 | ret = hyperv_x86_synic_add(cpu); |
1342 | if (ret < 0) { |
1343 | error_report("failed to create HyperV SynIC: %s" , |
1344 | strerror(-ret)); |
1345 | return ret; |
1346 | } |
1347 | } |
1348 | } |
1349 | |
1350 | return 0; |
1351 | } |
1352 | |
1353 | static Error *invtsc_mig_blocker; |
1354 | |
1355 | #define KVM_MAX_CPUID_ENTRIES 100 |
1356 | |
1357 | int kvm_arch_init_vcpu(CPUState *cs) |
1358 | { |
1359 | struct { |
1360 | struct kvm_cpuid2 cpuid; |
1361 | struct kvm_cpuid_entry2 entries[KVM_MAX_CPUID_ENTRIES]; |
1362 | } cpuid_data; |
1363 | /* |
1364 | * The kernel defines these structs with padding fields so there |
1365 | * should be no extra padding in our cpuid_data struct. |
1366 | */ |
1367 | QEMU_BUILD_BUG_ON(sizeof(cpuid_data) != |
1368 | sizeof(struct kvm_cpuid2) + |
1369 | sizeof(struct kvm_cpuid_entry2) * KVM_MAX_CPUID_ENTRIES); |
1370 | |
1371 | X86CPU *cpu = X86_CPU(cs); |
1372 | CPUX86State *env = &cpu->env; |
1373 | uint32_t limit, i, j, cpuid_i; |
1374 | uint32_t unused; |
1375 | struct kvm_cpuid_entry2 *c; |
1376 | uint32_t signature[3]; |
1377 | int kvm_base = KVM_CPUID_SIGNATURE; |
1378 | int max_nested_state_len; |
1379 | int r; |
1380 | Error *local_err = NULL; |
1381 | |
1382 | memset(&cpuid_data, 0, sizeof(cpuid_data)); |
1383 | |
1384 | cpuid_i = 0; |
1385 | |
1386 | r = kvm_arch_set_tsc_khz(cs); |
1387 | if (r < 0) { |
1388 | return r; |
1389 | } |
1390 | |
1391 | /* vcpu's TSC frequency is either specified by user, or following |
1392 | * the value used by KVM if the former is not present. In the |
1393 | * latter case, we query it from KVM and record in env->tsc_khz, |
1394 | * so that vcpu's TSC frequency can be migrated later via this field. |
1395 | */ |
1396 | if (!env->tsc_khz) { |
1397 | r = kvm_check_extension(cs->kvm_state, KVM_CAP_GET_TSC_KHZ) ? |
1398 | kvm_vcpu_ioctl(cs, KVM_GET_TSC_KHZ) : |
1399 | -ENOTSUP; |
1400 | if (r > 0) { |
1401 | env->tsc_khz = r; |
1402 | } |
1403 | } |
1404 | |
1405 | /* Paravirtualization CPUIDs */ |
1406 | r = hyperv_handle_properties(cs, cpuid_data.entries); |
1407 | if (r < 0) { |
1408 | return r; |
1409 | } else if (r > 0) { |
1410 | cpuid_i = r; |
1411 | kvm_base = KVM_CPUID_SIGNATURE_NEXT; |
1412 | has_msr_hv_hypercall = true; |
1413 | } |
1414 | |
1415 | if (cpu->expose_kvm) { |
1416 | memcpy(signature, "KVMKVMKVM\0\0\0" , 12); |
1417 | c = &cpuid_data.entries[cpuid_i++]; |
1418 | c->function = KVM_CPUID_SIGNATURE | kvm_base; |
1419 | c->eax = KVM_CPUID_FEATURES | kvm_base; |
1420 | c->ebx = signature[0]; |
1421 | c->ecx = signature[1]; |
1422 | c->edx = signature[2]; |
1423 | |
1424 | c = &cpuid_data.entries[cpuid_i++]; |
1425 | c->function = KVM_CPUID_FEATURES | kvm_base; |
1426 | c->eax = env->features[FEAT_KVM]; |
1427 | c->edx = env->features[FEAT_KVM_HINTS]; |
1428 | } |
1429 | |
1430 | cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused); |
1431 | |
1432 | for (i = 0; i <= limit; i++) { |
1433 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1434 | fprintf(stderr, "unsupported level value: 0x%x\n" , limit); |
1435 | abort(); |
1436 | } |
1437 | c = &cpuid_data.entries[cpuid_i++]; |
1438 | |
1439 | switch (i) { |
1440 | case 2: { |
1441 | /* Keep reading function 2 till all the input is received */ |
1442 | int times; |
1443 | |
1444 | c->function = i; |
1445 | c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC | |
1446 | KVM_CPUID_FLAG_STATE_READ_NEXT; |
1447 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1448 | times = c->eax & 0xff; |
1449 | |
1450 | for (j = 1; j < times; ++j) { |
1451 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1452 | fprintf(stderr, "cpuid_data is full, no space for " |
1453 | "cpuid(eax:2):eax & 0xf = 0x%x\n" , times); |
1454 | abort(); |
1455 | } |
1456 | c = &cpuid_data.entries[cpuid_i++]; |
1457 | c->function = i; |
1458 | c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC; |
1459 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1460 | } |
1461 | break; |
1462 | } |
1463 | case 0x1f: |
1464 | if (env->nr_dies < 2) { |
1465 | break; |
1466 | } |
1467 | case 4: |
1468 | case 0xb: |
1469 | case 0xd: |
1470 | for (j = 0; ; j++) { |
1471 | if (i == 0xd && j == 64) { |
1472 | break; |
1473 | } |
1474 | |
1475 | if (i == 0x1f && j == 64) { |
1476 | break; |
1477 | } |
1478 | |
1479 | c->function = i; |
1480 | c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
1481 | c->index = j; |
1482 | cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1483 | |
1484 | if (i == 4 && c->eax == 0) { |
1485 | break; |
1486 | } |
1487 | if (i == 0xb && !(c->ecx & 0xff00)) { |
1488 | break; |
1489 | } |
1490 | if (i == 0x1f && !(c->ecx & 0xff00)) { |
1491 | break; |
1492 | } |
1493 | if (i == 0xd && c->eax == 0) { |
1494 | continue; |
1495 | } |
1496 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1497 | fprintf(stderr, "cpuid_data is full, no space for " |
1498 | "cpuid(eax:0x%x,ecx:0x%x)\n" , i, j); |
1499 | abort(); |
1500 | } |
1501 | c = &cpuid_data.entries[cpuid_i++]; |
1502 | } |
1503 | break; |
1504 | case 0x7: |
1505 | case 0x14: { |
1506 | uint32_t times; |
1507 | |
1508 | c->function = i; |
1509 | c->index = 0; |
1510 | c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
1511 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1512 | times = c->eax; |
1513 | |
1514 | for (j = 1; j <= times; ++j) { |
1515 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1516 | fprintf(stderr, "cpuid_data is full, no space for " |
1517 | "cpuid(eax:0x%x,ecx:0x%x)\n" , i, j); |
1518 | abort(); |
1519 | } |
1520 | c = &cpuid_data.entries[cpuid_i++]; |
1521 | c->function = i; |
1522 | c->index = j; |
1523 | c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
1524 | cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1525 | } |
1526 | break; |
1527 | } |
1528 | default: |
1529 | c->function = i; |
1530 | c->flags = 0; |
1531 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1532 | break; |
1533 | } |
1534 | } |
1535 | |
1536 | if (limit >= 0x0a) { |
1537 | uint32_t eax, edx; |
1538 | |
1539 | cpu_x86_cpuid(env, 0x0a, 0, &eax, &unused, &unused, &edx); |
1540 | |
1541 | has_architectural_pmu_version = eax & 0xff; |
1542 | if (has_architectural_pmu_version > 0) { |
1543 | num_architectural_pmu_gp_counters = (eax & 0xff00) >> 8; |
1544 | |
1545 | /* Shouldn't be more than 32, since that's the number of bits |
1546 | * available in EBX to tell us _which_ counters are available. |
1547 | * Play it safe. |
1548 | */ |
1549 | if (num_architectural_pmu_gp_counters > MAX_GP_COUNTERS) { |
1550 | num_architectural_pmu_gp_counters = MAX_GP_COUNTERS; |
1551 | } |
1552 | |
1553 | if (has_architectural_pmu_version > 1) { |
1554 | num_architectural_pmu_fixed_counters = edx & 0x1f; |
1555 | |
1556 | if (num_architectural_pmu_fixed_counters > MAX_FIXED_COUNTERS) { |
1557 | num_architectural_pmu_fixed_counters = MAX_FIXED_COUNTERS; |
1558 | } |
1559 | } |
1560 | } |
1561 | } |
1562 | |
1563 | cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused); |
1564 | |
1565 | for (i = 0x80000000; i <= limit; i++) { |
1566 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1567 | fprintf(stderr, "unsupported xlevel value: 0x%x\n" , limit); |
1568 | abort(); |
1569 | } |
1570 | c = &cpuid_data.entries[cpuid_i++]; |
1571 | |
1572 | switch (i) { |
1573 | case 0x8000001d: |
1574 | /* Query for all AMD cache information leaves */ |
1575 | for (j = 0; ; j++) { |
1576 | c->function = i; |
1577 | c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX; |
1578 | c->index = j; |
1579 | cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1580 | |
1581 | if (c->eax == 0) { |
1582 | break; |
1583 | } |
1584 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1585 | fprintf(stderr, "cpuid_data is full, no space for " |
1586 | "cpuid(eax:0x%x,ecx:0x%x)\n" , i, j); |
1587 | abort(); |
1588 | } |
1589 | c = &cpuid_data.entries[cpuid_i++]; |
1590 | } |
1591 | break; |
1592 | default: |
1593 | c->function = i; |
1594 | c->flags = 0; |
1595 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1596 | break; |
1597 | } |
1598 | } |
1599 | |
1600 | /* Call Centaur's CPUID instructions they are supported. */ |
1601 | if (env->cpuid_xlevel2 > 0) { |
1602 | cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused); |
1603 | |
1604 | for (i = 0xC0000000; i <= limit; i++) { |
1605 | if (cpuid_i == KVM_MAX_CPUID_ENTRIES) { |
1606 | fprintf(stderr, "unsupported xlevel2 value: 0x%x\n" , limit); |
1607 | abort(); |
1608 | } |
1609 | c = &cpuid_data.entries[cpuid_i++]; |
1610 | |
1611 | c->function = i; |
1612 | c->flags = 0; |
1613 | cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx); |
1614 | } |
1615 | } |
1616 | |
1617 | cpuid_data.cpuid.nent = cpuid_i; |
1618 | |
1619 | if (((env->cpuid_version >> 8)&0xF) >= 6 |
1620 | && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == |
1621 | (CPUID_MCE | CPUID_MCA) |
1622 | && kvm_check_extension(cs->kvm_state, KVM_CAP_MCE) > 0) { |
1623 | uint64_t mcg_cap, unsupported_caps; |
1624 | int banks; |
1625 | int ret; |
1626 | |
1627 | ret = kvm_get_mce_cap_supported(cs->kvm_state, &mcg_cap, &banks); |
1628 | if (ret < 0) { |
1629 | fprintf(stderr, "kvm_get_mce_cap_supported: %s" , strerror(-ret)); |
1630 | return ret; |
1631 | } |
1632 | |
1633 | if (banks < (env->mcg_cap & MCG_CAP_BANKS_MASK)) { |
1634 | error_report("kvm: Unsupported MCE bank count (QEMU = %d, KVM = %d)" , |
1635 | (int)(env->mcg_cap & MCG_CAP_BANKS_MASK), banks); |
1636 | return -ENOTSUP; |
1637 | } |
1638 | |
1639 | unsupported_caps = env->mcg_cap & ~(mcg_cap | MCG_CAP_BANKS_MASK); |
1640 | if (unsupported_caps) { |
1641 | if (unsupported_caps & MCG_LMCE_P) { |
1642 | error_report("kvm: LMCE not supported" ); |
1643 | return -ENOTSUP; |
1644 | } |
1645 | warn_report("Unsupported MCG_CAP bits: 0x%" PRIx64, |
1646 | unsupported_caps); |
1647 | } |
1648 | |
1649 | env->mcg_cap &= mcg_cap | MCG_CAP_BANKS_MASK; |
1650 | ret = kvm_vcpu_ioctl(cs, KVM_X86_SETUP_MCE, &env->mcg_cap); |
1651 | if (ret < 0) { |
1652 | fprintf(stderr, "KVM_X86_SETUP_MCE: %s" , strerror(-ret)); |
1653 | return ret; |
1654 | } |
1655 | } |
1656 | |
1657 | qemu_add_vm_change_state_handler(cpu_update_state, env); |
1658 | |
1659 | c = cpuid_find_entry(&cpuid_data.cpuid, 1, 0); |
1660 | if (c) { |
1661 | has_msr_feature_control = !!(c->ecx & CPUID_EXT_VMX) || |
1662 | !!(c->ecx & CPUID_EXT_SMX); |
1663 | } |
1664 | |
1665 | if (env->mcg_cap & MCG_LMCE_P) { |
1666 | has_msr_mcg_ext_ctl = has_msr_feature_control = true; |
1667 | } |
1668 | |
1669 | if (!env->user_tsc_khz) { |
1670 | if ((env->features[FEAT_8000_0007_EDX] & CPUID_APM_INVTSC) && |
1671 | invtsc_mig_blocker == NULL) { |
1672 | error_setg(&invtsc_mig_blocker, |
1673 | "State blocked by non-migratable CPU device" |
1674 | " (invtsc flag)" ); |
1675 | r = migrate_add_blocker(invtsc_mig_blocker, &local_err); |
1676 | if (local_err) { |
1677 | error_report_err(local_err); |
1678 | error_free(invtsc_mig_blocker); |
1679 | return r; |
1680 | } |
1681 | } |
1682 | } |
1683 | |
1684 | if (cpu->vmware_cpuid_freq |
1685 | /* Guests depend on 0x40000000 to detect this feature, so only expose |
1686 | * it if KVM exposes leaf 0x40000000. (Conflicts with Hyper-V) */ |
1687 | && cpu->expose_kvm |
1688 | && kvm_base == KVM_CPUID_SIGNATURE |
1689 | /* TSC clock must be stable and known for this feature. */ |
1690 | && tsc_is_stable_and_known(env)) { |
1691 | |
1692 | c = &cpuid_data.entries[cpuid_i++]; |
1693 | c->function = KVM_CPUID_SIGNATURE | 0x10; |
1694 | c->eax = env->tsc_khz; |
1695 | /* LAPIC resolution of 1ns (freq: 1GHz) is hardcoded in KVM's |
1696 | * APIC_BUS_CYCLE_NS */ |
1697 | c->ebx = 1000000; |
1698 | c->ecx = c->edx = 0; |
1699 | |
1700 | c = cpuid_find_entry(&cpuid_data.cpuid, kvm_base, 0); |
1701 | c->eax = MAX(c->eax, KVM_CPUID_SIGNATURE | 0x10); |
1702 | } |
1703 | |
1704 | cpuid_data.cpuid.nent = cpuid_i; |
1705 | |
1706 | cpuid_data.cpuid.padding = 0; |
1707 | r = kvm_vcpu_ioctl(cs, KVM_SET_CPUID2, &cpuid_data); |
1708 | if (r) { |
1709 | goto fail; |
1710 | } |
1711 | |
1712 | if (has_xsave) { |
1713 | env->xsave_buf = qemu_memalign(4096, sizeof(struct kvm_xsave)); |
1714 | memset(env->xsave_buf, 0, sizeof(struct kvm_xsave)); |
1715 | } |
1716 | |
1717 | max_nested_state_len = kvm_max_nested_state_length(); |
1718 | if (max_nested_state_len > 0) { |
1719 | assert(max_nested_state_len >= offsetof(struct kvm_nested_state, data)); |
1720 | |
1721 | if (cpu_has_vmx(env)) { |
1722 | struct kvm_vmx_nested_state_hdr *vmx_hdr; |
1723 | |
1724 | env->nested_state = g_malloc0(max_nested_state_len); |
1725 | env->nested_state->size = max_nested_state_len; |
1726 | env->nested_state->format = KVM_STATE_NESTED_FORMAT_VMX; |
1727 | |
1728 | vmx_hdr = &env->nested_state->hdr.vmx; |
1729 | vmx_hdr->vmxon_pa = -1ull; |
1730 | vmx_hdr->vmcs12_pa = -1ull; |
1731 | } |
1732 | } |
1733 | |
1734 | cpu->kvm_msr_buf = g_malloc0(MSR_BUF_SIZE); |
1735 | |
1736 | if (!(env->features[FEAT_8000_0001_EDX] & CPUID_EXT2_RDTSCP)) { |
1737 | has_msr_tsc_aux = false; |
1738 | } |
1739 | |
1740 | r = hyperv_init_vcpu(cpu); |
1741 | if (r) { |
1742 | goto fail; |
1743 | } |
1744 | |
1745 | return 0; |
1746 | |
1747 | fail: |
1748 | migrate_del_blocker(invtsc_mig_blocker); |
1749 | |
1750 | return r; |
1751 | } |
1752 | |
1753 | int kvm_arch_destroy_vcpu(CPUState *cs) |
1754 | { |
1755 | X86CPU *cpu = X86_CPU(cs); |
1756 | CPUX86State *env = &cpu->env; |
1757 | |
1758 | if (cpu->kvm_msr_buf) { |
1759 | g_free(cpu->kvm_msr_buf); |
1760 | cpu->kvm_msr_buf = NULL; |
1761 | } |
1762 | |
1763 | if (env->nested_state) { |
1764 | g_free(env->nested_state); |
1765 | env->nested_state = NULL; |
1766 | } |
1767 | |
1768 | return 0; |
1769 | } |
1770 | |
1771 | void kvm_arch_reset_vcpu(X86CPU *cpu) |
1772 | { |
1773 | CPUX86State *env = &cpu->env; |
1774 | |
1775 | env->xcr0 = 1; |
1776 | if (kvm_irqchip_in_kernel()) { |
1777 | env->mp_state = cpu_is_bsp(cpu) ? KVM_MP_STATE_RUNNABLE : |
1778 | KVM_MP_STATE_UNINITIALIZED; |
1779 | } else { |
1780 | env->mp_state = KVM_MP_STATE_RUNNABLE; |
1781 | } |
1782 | |
1783 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { |
1784 | int i; |
1785 | for (i = 0; i < ARRAY_SIZE(env->msr_hv_synic_sint); i++) { |
1786 | env->msr_hv_synic_sint[i] = HV_SINT_MASKED; |
1787 | } |
1788 | |
1789 | hyperv_x86_synic_reset(cpu); |
1790 | } |
1791 | /* enabled by default */ |
1792 | env->poll_control_msr = 1; |
1793 | } |
1794 | |
1795 | void kvm_arch_do_init_vcpu(X86CPU *cpu) |
1796 | { |
1797 | CPUX86State *env = &cpu->env; |
1798 | |
1799 | /* APs get directly into wait-for-SIPI state. */ |
1800 | if (env->mp_state == KVM_MP_STATE_UNINITIALIZED) { |
1801 | env->mp_state = KVM_MP_STATE_INIT_RECEIVED; |
1802 | } |
1803 | } |
1804 | |
1805 | static int kvm_get_supported_feature_msrs(KVMState *s) |
1806 | { |
1807 | int ret = 0; |
1808 | |
1809 | if (kvm_feature_msrs != NULL) { |
1810 | return 0; |
1811 | } |
1812 | |
1813 | if (!kvm_check_extension(s, KVM_CAP_GET_MSR_FEATURES)) { |
1814 | return 0; |
1815 | } |
1816 | |
1817 | struct kvm_msr_list msr_list; |
1818 | |
1819 | msr_list.nmsrs = 0; |
1820 | ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, &msr_list); |
1821 | if (ret < 0 && ret != -E2BIG) { |
1822 | error_report("Fetch KVM feature MSR list failed: %s" , |
1823 | strerror(-ret)); |
1824 | return ret; |
1825 | } |
1826 | |
1827 | assert(msr_list.nmsrs > 0); |
1828 | kvm_feature_msrs = (struct kvm_msr_list *) \ |
1829 | g_malloc0(sizeof(msr_list) + |
1830 | msr_list.nmsrs * sizeof(msr_list.indices[0])); |
1831 | |
1832 | kvm_feature_msrs->nmsrs = msr_list.nmsrs; |
1833 | ret = kvm_ioctl(s, KVM_GET_MSR_FEATURE_INDEX_LIST, kvm_feature_msrs); |
1834 | |
1835 | if (ret < 0) { |
1836 | error_report("Fetch KVM feature MSR list failed: %s" , |
1837 | strerror(-ret)); |
1838 | g_free(kvm_feature_msrs); |
1839 | kvm_feature_msrs = NULL; |
1840 | return ret; |
1841 | } |
1842 | |
1843 | return 0; |
1844 | } |
1845 | |
1846 | static int kvm_get_supported_msrs(KVMState *s) |
1847 | { |
1848 | int ret = 0; |
1849 | struct kvm_msr_list msr_list, *kvm_msr_list; |
1850 | |
1851 | /* |
1852 | * Obtain MSR list from KVM. These are the MSRs that we must |
1853 | * save/restore. |
1854 | */ |
1855 | msr_list.nmsrs = 0; |
1856 | ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, &msr_list); |
1857 | if (ret < 0 && ret != -E2BIG) { |
1858 | return ret; |
1859 | } |
1860 | /* |
1861 | * Old kernel modules had a bug and could write beyond the provided |
1862 | * memory. Allocate at least a safe amount of 1K. |
1863 | */ |
1864 | kvm_msr_list = g_malloc0(MAX(1024, sizeof(msr_list) + |
1865 | msr_list.nmsrs * |
1866 | sizeof(msr_list.indices[0]))); |
1867 | |
1868 | kvm_msr_list->nmsrs = msr_list.nmsrs; |
1869 | ret = kvm_ioctl(s, KVM_GET_MSR_INDEX_LIST, kvm_msr_list); |
1870 | if (ret >= 0) { |
1871 | int i; |
1872 | |
1873 | for (i = 0; i < kvm_msr_list->nmsrs; i++) { |
1874 | switch (kvm_msr_list->indices[i]) { |
1875 | case MSR_STAR: |
1876 | has_msr_star = true; |
1877 | break; |
1878 | case MSR_VM_HSAVE_PA: |
1879 | has_msr_hsave_pa = true; |
1880 | break; |
1881 | case MSR_TSC_AUX: |
1882 | has_msr_tsc_aux = true; |
1883 | break; |
1884 | case MSR_TSC_ADJUST: |
1885 | has_msr_tsc_adjust = true; |
1886 | break; |
1887 | case MSR_IA32_TSCDEADLINE: |
1888 | has_msr_tsc_deadline = true; |
1889 | break; |
1890 | case MSR_IA32_SMBASE: |
1891 | has_msr_smbase = true; |
1892 | break; |
1893 | case MSR_SMI_COUNT: |
1894 | has_msr_smi_count = true; |
1895 | break; |
1896 | case MSR_IA32_MISC_ENABLE: |
1897 | has_msr_misc_enable = true; |
1898 | break; |
1899 | case MSR_IA32_BNDCFGS: |
1900 | has_msr_bndcfgs = true; |
1901 | break; |
1902 | case MSR_IA32_XSS: |
1903 | has_msr_xss = true; |
1904 | break; |
1905 | case HV_X64_MSR_CRASH_CTL: |
1906 | has_msr_hv_crash = true; |
1907 | break; |
1908 | case HV_X64_MSR_RESET: |
1909 | has_msr_hv_reset = true; |
1910 | break; |
1911 | case HV_X64_MSR_VP_INDEX: |
1912 | has_msr_hv_vpindex = true; |
1913 | break; |
1914 | case HV_X64_MSR_VP_RUNTIME: |
1915 | has_msr_hv_runtime = true; |
1916 | break; |
1917 | case HV_X64_MSR_SCONTROL: |
1918 | has_msr_hv_synic = true; |
1919 | break; |
1920 | case HV_X64_MSR_STIMER0_CONFIG: |
1921 | has_msr_hv_stimer = true; |
1922 | break; |
1923 | case HV_X64_MSR_TSC_FREQUENCY: |
1924 | has_msr_hv_frequencies = true; |
1925 | break; |
1926 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
1927 | has_msr_hv_reenlightenment = true; |
1928 | break; |
1929 | case MSR_IA32_SPEC_CTRL: |
1930 | has_msr_spec_ctrl = true; |
1931 | break; |
1932 | case MSR_VIRT_SSBD: |
1933 | has_msr_virt_ssbd = true; |
1934 | break; |
1935 | case MSR_IA32_ARCH_CAPABILITIES: |
1936 | has_msr_arch_capabs = true; |
1937 | break; |
1938 | case MSR_IA32_CORE_CAPABILITY: |
1939 | has_msr_core_capabs = true; |
1940 | break; |
1941 | } |
1942 | } |
1943 | } |
1944 | |
1945 | g_free(kvm_msr_list); |
1946 | |
1947 | return ret; |
1948 | } |
1949 | |
1950 | static Notifier smram_machine_done; |
1951 | static KVMMemoryListener smram_listener; |
1952 | static AddressSpace smram_address_space; |
1953 | static MemoryRegion smram_as_root; |
1954 | static MemoryRegion smram_as_mem; |
1955 | |
1956 | static void register_smram_listener(Notifier *n, void *unused) |
1957 | { |
1958 | MemoryRegion *smram = |
1959 | (MemoryRegion *) object_resolve_path("/machine/smram" , NULL); |
1960 | |
1961 | /* Outer container... */ |
1962 | memory_region_init(&smram_as_root, OBJECT(kvm_state), "mem-container-smram" , ~0ull); |
1963 | memory_region_set_enabled(&smram_as_root, true); |
1964 | |
1965 | /* ... with two regions inside: normal system memory with low |
1966 | * priority, and... |
1967 | */ |
1968 | memory_region_init_alias(&smram_as_mem, OBJECT(kvm_state), "mem-smram" , |
1969 | get_system_memory(), 0, ~0ull); |
1970 | memory_region_add_subregion_overlap(&smram_as_root, 0, &smram_as_mem, 0); |
1971 | memory_region_set_enabled(&smram_as_mem, true); |
1972 | |
1973 | if (smram) { |
1974 | /* ... SMRAM with higher priority */ |
1975 | memory_region_add_subregion_overlap(&smram_as_root, 0, smram, 10); |
1976 | memory_region_set_enabled(smram, true); |
1977 | } |
1978 | |
1979 | address_space_init(&smram_address_space, &smram_as_root, "KVM-SMRAM" ); |
1980 | kvm_memory_listener_register(kvm_state, &smram_listener, |
1981 | &smram_address_space, 1); |
1982 | } |
1983 | |
1984 | int kvm_arch_init(MachineState *ms, KVMState *s) |
1985 | { |
1986 | uint64_t identity_base = 0xfffbc000; |
1987 | uint64_t shadow_mem; |
1988 | int ret; |
1989 | struct utsname utsname; |
1990 | |
1991 | has_xsave = kvm_check_extension(s, KVM_CAP_XSAVE); |
1992 | has_xcrs = kvm_check_extension(s, KVM_CAP_XCRS); |
1993 | has_pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); |
1994 | |
1995 | hv_vpindex_settable = kvm_check_extension(s, KVM_CAP_HYPERV_VP_INDEX); |
1996 | |
1997 | has_exception_payload = kvm_check_extension(s, KVM_CAP_EXCEPTION_PAYLOAD); |
1998 | if (has_exception_payload) { |
1999 | ret = kvm_vm_enable_cap(s, KVM_CAP_EXCEPTION_PAYLOAD, 0, true); |
2000 | if (ret < 0) { |
2001 | error_report("kvm: Failed to enable exception payload cap: %s" , |
2002 | strerror(-ret)); |
2003 | return ret; |
2004 | } |
2005 | } |
2006 | |
2007 | ret = kvm_get_supported_msrs(s); |
2008 | if (ret < 0) { |
2009 | return ret; |
2010 | } |
2011 | |
2012 | kvm_get_supported_feature_msrs(s); |
2013 | |
2014 | uname(&utsname); |
2015 | lm_capable_kernel = strcmp(utsname.machine, "x86_64" ) == 0; |
2016 | |
2017 | /* |
2018 | * On older Intel CPUs, KVM uses vm86 mode to emulate 16-bit code directly. |
2019 | * In order to use vm86 mode, an EPT identity map and a TSS are needed. |
2020 | * Since these must be part of guest physical memory, we need to allocate |
2021 | * them, both by setting their start addresses in the kernel and by |
2022 | * creating a corresponding e820 entry. We need 4 pages before the BIOS. |
2023 | * |
2024 | * Older KVM versions may not support setting the identity map base. In |
2025 | * that case we need to stick with the default, i.e. a 256K maximum BIOS |
2026 | * size. |
2027 | */ |
2028 | if (kvm_check_extension(s, KVM_CAP_SET_IDENTITY_MAP_ADDR)) { |
2029 | /* Allows up to 16M BIOSes. */ |
2030 | identity_base = 0xfeffc000; |
2031 | |
2032 | ret = kvm_vm_ioctl(s, KVM_SET_IDENTITY_MAP_ADDR, &identity_base); |
2033 | if (ret < 0) { |
2034 | return ret; |
2035 | } |
2036 | } |
2037 | |
2038 | /* Set TSS base one page after EPT identity map. */ |
2039 | ret = kvm_vm_ioctl(s, KVM_SET_TSS_ADDR, identity_base + 0x1000); |
2040 | if (ret < 0) { |
2041 | return ret; |
2042 | } |
2043 | |
2044 | /* Tell fw_cfg to notify the BIOS to reserve the range. */ |
2045 | ret = e820_add_entry(identity_base, 0x4000, E820_RESERVED); |
2046 | if (ret < 0) { |
2047 | fprintf(stderr, "e820_add_entry() table is full\n" ); |
2048 | return ret; |
2049 | } |
2050 | qemu_register_reset(kvm_unpoison_all, NULL); |
2051 | |
2052 | shadow_mem = machine_kvm_shadow_mem(ms); |
2053 | if (shadow_mem != -1) { |
2054 | shadow_mem /= 4096; |
2055 | ret = kvm_vm_ioctl(s, KVM_SET_NR_MMU_PAGES, shadow_mem); |
2056 | if (ret < 0) { |
2057 | return ret; |
2058 | } |
2059 | } |
2060 | |
2061 | if (kvm_check_extension(s, KVM_CAP_X86_SMM) && |
2062 | object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE) && |
2063 | pc_machine_is_smm_enabled(PC_MACHINE(ms))) { |
2064 | smram_machine_done.notify = register_smram_listener; |
2065 | qemu_add_machine_init_done_notifier(&smram_machine_done); |
2066 | } |
2067 | |
2068 | if (enable_cpu_pm) { |
2069 | int disable_exits = kvm_check_extension(s, KVM_CAP_X86_DISABLE_EXITS); |
2070 | int ret; |
2071 | |
2072 | /* Work around for kernel header with a typo. TODO: fix header and drop. */ |
2073 | #if defined(KVM_X86_DISABLE_EXITS_HTL) && !defined(KVM_X86_DISABLE_EXITS_HLT) |
2074 | #define KVM_X86_DISABLE_EXITS_HLT KVM_X86_DISABLE_EXITS_HTL |
2075 | #endif |
2076 | if (disable_exits) { |
2077 | disable_exits &= (KVM_X86_DISABLE_EXITS_MWAIT | |
2078 | KVM_X86_DISABLE_EXITS_HLT | |
2079 | KVM_X86_DISABLE_EXITS_PAUSE); |
2080 | } |
2081 | |
2082 | ret = kvm_vm_enable_cap(s, KVM_CAP_X86_DISABLE_EXITS, 0, |
2083 | disable_exits); |
2084 | if (ret < 0) { |
2085 | error_report("kvm: guest stopping CPU not supported: %s" , |
2086 | strerror(-ret)); |
2087 | } |
2088 | } |
2089 | |
2090 | return 0; |
2091 | } |
2092 | |
2093 | static void set_v8086_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
2094 | { |
2095 | lhs->selector = rhs->selector; |
2096 | lhs->base = rhs->base; |
2097 | lhs->limit = rhs->limit; |
2098 | lhs->type = 3; |
2099 | lhs->present = 1; |
2100 | lhs->dpl = 3; |
2101 | lhs->db = 0; |
2102 | lhs->s = 1; |
2103 | lhs->l = 0; |
2104 | lhs->g = 0; |
2105 | lhs->avl = 0; |
2106 | lhs->unusable = 0; |
2107 | } |
2108 | |
2109 | static void set_seg(struct kvm_segment *lhs, const SegmentCache *rhs) |
2110 | { |
2111 | unsigned flags = rhs->flags; |
2112 | lhs->selector = rhs->selector; |
2113 | lhs->base = rhs->base; |
2114 | lhs->limit = rhs->limit; |
2115 | lhs->type = (flags >> DESC_TYPE_SHIFT) & 15; |
2116 | lhs->present = (flags & DESC_P_MASK) != 0; |
2117 | lhs->dpl = (flags >> DESC_DPL_SHIFT) & 3; |
2118 | lhs->db = (flags >> DESC_B_SHIFT) & 1; |
2119 | lhs->s = (flags & DESC_S_MASK) != 0; |
2120 | lhs->l = (flags >> DESC_L_SHIFT) & 1; |
2121 | lhs->g = (flags & DESC_G_MASK) != 0; |
2122 | lhs->avl = (flags & DESC_AVL_MASK) != 0; |
2123 | lhs->unusable = !lhs->present; |
2124 | lhs->padding = 0; |
2125 | } |
2126 | |
2127 | static void get_seg(SegmentCache *lhs, const struct kvm_segment *rhs) |
2128 | { |
2129 | lhs->selector = rhs->selector; |
2130 | lhs->base = rhs->base; |
2131 | lhs->limit = rhs->limit; |
2132 | lhs->flags = (rhs->type << DESC_TYPE_SHIFT) | |
2133 | ((rhs->present && !rhs->unusable) * DESC_P_MASK) | |
2134 | (rhs->dpl << DESC_DPL_SHIFT) | |
2135 | (rhs->db << DESC_B_SHIFT) | |
2136 | (rhs->s * DESC_S_MASK) | |
2137 | (rhs->l << DESC_L_SHIFT) | |
2138 | (rhs->g * DESC_G_MASK) | |
2139 | (rhs->avl * DESC_AVL_MASK); |
2140 | } |
2141 | |
2142 | static void kvm_getput_reg(__u64 *kvm_reg, target_ulong *qemu_reg, int set) |
2143 | { |
2144 | if (set) { |
2145 | *kvm_reg = *qemu_reg; |
2146 | } else { |
2147 | *qemu_reg = *kvm_reg; |
2148 | } |
2149 | } |
2150 | |
2151 | static int kvm_getput_regs(X86CPU *cpu, int set) |
2152 | { |
2153 | CPUX86State *env = &cpu->env; |
2154 | struct kvm_regs regs; |
2155 | int ret = 0; |
2156 | |
2157 | if (!set) { |
2158 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_REGS, ®s); |
2159 | if (ret < 0) { |
2160 | return ret; |
2161 | } |
2162 | } |
2163 | |
2164 | kvm_getput_reg(®s.rax, &env->regs[R_EAX], set); |
2165 | kvm_getput_reg(®s.rbx, &env->regs[R_EBX], set); |
2166 | kvm_getput_reg(®s.rcx, &env->regs[R_ECX], set); |
2167 | kvm_getput_reg(®s.rdx, &env->regs[R_EDX], set); |
2168 | kvm_getput_reg(®s.rsi, &env->regs[R_ESI], set); |
2169 | kvm_getput_reg(®s.rdi, &env->regs[R_EDI], set); |
2170 | kvm_getput_reg(®s.rsp, &env->regs[R_ESP], set); |
2171 | kvm_getput_reg(®s.rbp, &env->regs[R_EBP], set); |
2172 | #ifdef TARGET_X86_64 |
2173 | kvm_getput_reg(®s.r8, &env->regs[8], set); |
2174 | kvm_getput_reg(®s.r9, &env->regs[9], set); |
2175 | kvm_getput_reg(®s.r10, &env->regs[10], set); |
2176 | kvm_getput_reg(®s.r11, &env->regs[11], set); |
2177 | kvm_getput_reg(®s.r12, &env->regs[12], set); |
2178 | kvm_getput_reg(®s.r13, &env->regs[13], set); |
2179 | kvm_getput_reg(®s.r14, &env->regs[14], set); |
2180 | kvm_getput_reg(®s.r15, &env->regs[15], set); |
2181 | #endif |
2182 | |
2183 | kvm_getput_reg(®s.rflags, &env->eflags, set); |
2184 | kvm_getput_reg(®s.rip, &env->eip, set); |
2185 | |
2186 | if (set) { |
2187 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_REGS, ®s); |
2188 | } |
2189 | |
2190 | return ret; |
2191 | } |
2192 | |
2193 | static int kvm_put_fpu(X86CPU *cpu) |
2194 | { |
2195 | CPUX86State *env = &cpu->env; |
2196 | struct kvm_fpu fpu; |
2197 | int i; |
2198 | |
2199 | memset(&fpu, 0, sizeof fpu); |
2200 | fpu.fsw = env->fpus & ~(7 << 11); |
2201 | fpu.fsw |= (env->fpstt & 7) << 11; |
2202 | fpu.fcw = env->fpuc; |
2203 | fpu.last_opcode = env->fpop; |
2204 | fpu.last_ip = env->fpip; |
2205 | fpu.last_dp = env->fpdp; |
2206 | for (i = 0; i < 8; ++i) { |
2207 | fpu.ftwx |= (!env->fptags[i]) << i; |
2208 | } |
2209 | memcpy(fpu.fpr, env->fpregs, sizeof env->fpregs); |
2210 | for (i = 0; i < CPU_NB_REGS; i++) { |
2211 | stq_p(&fpu.xmm[i][0], env->xmm_regs[i].ZMM_Q(0)); |
2212 | stq_p(&fpu.xmm[i][8], env->xmm_regs[i].ZMM_Q(1)); |
2213 | } |
2214 | fpu.mxcsr = env->mxcsr; |
2215 | |
2216 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_FPU, &fpu); |
2217 | } |
2218 | |
2219 | #define XSAVE_FCW_FSW 0 |
2220 | #define XSAVE_FTW_FOP 1 |
2221 | #define XSAVE_CWD_RIP 2 |
2222 | #define XSAVE_CWD_RDP 4 |
2223 | #define XSAVE_MXCSR 6 |
2224 | #define XSAVE_ST_SPACE 8 |
2225 | #define XSAVE_XMM_SPACE 40 |
2226 | #define XSAVE_XSTATE_BV 128 |
2227 | #define XSAVE_YMMH_SPACE 144 |
2228 | #define XSAVE_BNDREGS 240 |
2229 | #define XSAVE_BNDCSR 256 |
2230 | #define XSAVE_OPMASK 272 |
2231 | #define XSAVE_ZMM_Hi256 288 |
2232 | #define XSAVE_Hi16_ZMM 416 |
2233 | #define XSAVE_PKRU 672 |
2234 | |
2235 | #define XSAVE_BYTE_OFFSET(word_offset) \ |
2236 | ((word_offset) * sizeof_field(struct kvm_xsave, region[0])) |
2237 | |
2238 | #define ASSERT_OFFSET(word_offset, field) \ |
2239 | QEMU_BUILD_BUG_ON(XSAVE_BYTE_OFFSET(word_offset) != \ |
2240 | offsetof(X86XSaveArea, field)) |
2241 | |
2242 | ASSERT_OFFSET(XSAVE_FCW_FSW, legacy.fcw); |
2243 | ASSERT_OFFSET(XSAVE_FTW_FOP, legacy.ftw); |
2244 | ASSERT_OFFSET(XSAVE_CWD_RIP, legacy.fpip); |
2245 | ASSERT_OFFSET(XSAVE_CWD_RDP, legacy.fpdp); |
2246 | ASSERT_OFFSET(XSAVE_MXCSR, legacy.mxcsr); |
2247 | ASSERT_OFFSET(XSAVE_ST_SPACE, legacy.fpregs); |
2248 | ASSERT_OFFSET(XSAVE_XMM_SPACE, legacy.xmm_regs); |
2249 | ASSERT_OFFSET(XSAVE_XSTATE_BV, header.xstate_bv); |
2250 | ASSERT_OFFSET(XSAVE_YMMH_SPACE, avx_state); |
2251 | ASSERT_OFFSET(XSAVE_BNDREGS, bndreg_state); |
2252 | ASSERT_OFFSET(XSAVE_BNDCSR, bndcsr_state); |
2253 | ASSERT_OFFSET(XSAVE_OPMASK, opmask_state); |
2254 | ASSERT_OFFSET(XSAVE_ZMM_Hi256, zmm_hi256_state); |
2255 | ASSERT_OFFSET(XSAVE_Hi16_ZMM, hi16_zmm_state); |
2256 | ASSERT_OFFSET(XSAVE_PKRU, pkru_state); |
2257 | |
2258 | static int kvm_put_xsave(X86CPU *cpu) |
2259 | { |
2260 | CPUX86State *env = &cpu->env; |
2261 | X86XSaveArea *xsave = env->xsave_buf; |
2262 | |
2263 | if (!has_xsave) { |
2264 | return kvm_put_fpu(cpu); |
2265 | } |
2266 | x86_cpu_xsave_all_areas(cpu, xsave); |
2267 | |
2268 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XSAVE, xsave); |
2269 | } |
2270 | |
2271 | static int kvm_put_xcrs(X86CPU *cpu) |
2272 | { |
2273 | CPUX86State *env = &cpu->env; |
2274 | struct kvm_xcrs xcrs = {}; |
2275 | |
2276 | if (!has_xcrs) { |
2277 | return 0; |
2278 | } |
2279 | |
2280 | xcrs.nr_xcrs = 1; |
2281 | xcrs.flags = 0; |
2282 | xcrs.xcrs[0].xcr = 0; |
2283 | xcrs.xcrs[0].value = env->xcr0; |
2284 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_XCRS, &xcrs); |
2285 | } |
2286 | |
2287 | static int kvm_put_sregs(X86CPU *cpu) |
2288 | { |
2289 | CPUX86State *env = &cpu->env; |
2290 | struct kvm_sregs sregs; |
2291 | |
2292 | memset(sregs.interrupt_bitmap, 0, sizeof(sregs.interrupt_bitmap)); |
2293 | if (env->interrupt_injected >= 0) { |
2294 | sregs.interrupt_bitmap[env->interrupt_injected / 64] |= |
2295 | (uint64_t)1 << (env->interrupt_injected % 64); |
2296 | } |
2297 | |
2298 | if ((env->eflags & VM_MASK)) { |
2299 | set_v8086_seg(&sregs.cs, &env->segs[R_CS]); |
2300 | set_v8086_seg(&sregs.ds, &env->segs[R_DS]); |
2301 | set_v8086_seg(&sregs.es, &env->segs[R_ES]); |
2302 | set_v8086_seg(&sregs.fs, &env->segs[R_FS]); |
2303 | set_v8086_seg(&sregs.gs, &env->segs[R_GS]); |
2304 | set_v8086_seg(&sregs.ss, &env->segs[R_SS]); |
2305 | } else { |
2306 | set_seg(&sregs.cs, &env->segs[R_CS]); |
2307 | set_seg(&sregs.ds, &env->segs[R_DS]); |
2308 | set_seg(&sregs.es, &env->segs[R_ES]); |
2309 | set_seg(&sregs.fs, &env->segs[R_FS]); |
2310 | set_seg(&sregs.gs, &env->segs[R_GS]); |
2311 | set_seg(&sregs.ss, &env->segs[R_SS]); |
2312 | } |
2313 | |
2314 | set_seg(&sregs.tr, &env->tr); |
2315 | set_seg(&sregs.ldt, &env->ldt); |
2316 | |
2317 | sregs.idt.limit = env->idt.limit; |
2318 | sregs.idt.base = env->idt.base; |
2319 | memset(sregs.idt.padding, 0, sizeof sregs.idt.padding); |
2320 | sregs.gdt.limit = env->gdt.limit; |
2321 | sregs.gdt.base = env->gdt.base; |
2322 | memset(sregs.gdt.padding, 0, sizeof sregs.gdt.padding); |
2323 | |
2324 | sregs.cr0 = env->cr[0]; |
2325 | sregs.cr2 = env->cr[2]; |
2326 | sregs.cr3 = env->cr[3]; |
2327 | sregs.cr4 = env->cr[4]; |
2328 | |
2329 | sregs.cr8 = cpu_get_apic_tpr(cpu->apic_state); |
2330 | sregs.apic_base = cpu_get_apic_base(cpu->apic_state); |
2331 | |
2332 | sregs.efer = env->efer; |
2333 | |
2334 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs); |
2335 | } |
2336 | |
2337 | static void kvm_msr_buf_reset(X86CPU *cpu) |
2338 | { |
2339 | memset(cpu->kvm_msr_buf, 0, MSR_BUF_SIZE); |
2340 | } |
2341 | |
2342 | static void kvm_msr_entry_add(X86CPU *cpu, uint32_t index, uint64_t value) |
2343 | { |
2344 | struct kvm_msrs *msrs = cpu->kvm_msr_buf; |
2345 | void *limit = ((void *)msrs) + MSR_BUF_SIZE; |
2346 | struct kvm_msr_entry *entry = &msrs->entries[msrs->nmsrs]; |
2347 | |
2348 | assert((void *)(entry + 1) <= limit); |
2349 | |
2350 | entry->index = index; |
2351 | entry->reserved = 0; |
2352 | entry->data = value; |
2353 | msrs->nmsrs++; |
2354 | } |
2355 | |
2356 | static int kvm_put_one_msr(X86CPU *cpu, int index, uint64_t value) |
2357 | { |
2358 | kvm_msr_buf_reset(cpu); |
2359 | kvm_msr_entry_add(cpu, index, value); |
2360 | |
2361 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); |
2362 | } |
2363 | |
2364 | void kvm_put_apicbase(X86CPU *cpu, uint64_t value) |
2365 | { |
2366 | int ret; |
2367 | |
2368 | ret = kvm_put_one_msr(cpu, MSR_IA32_APICBASE, value); |
2369 | assert(ret == 1); |
2370 | } |
2371 | |
2372 | static int kvm_put_tscdeadline_msr(X86CPU *cpu) |
2373 | { |
2374 | CPUX86State *env = &cpu->env; |
2375 | int ret; |
2376 | |
2377 | if (!has_msr_tsc_deadline) { |
2378 | return 0; |
2379 | } |
2380 | |
2381 | ret = kvm_put_one_msr(cpu, MSR_IA32_TSCDEADLINE, env->tsc_deadline); |
2382 | if (ret < 0) { |
2383 | return ret; |
2384 | } |
2385 | |
2386 | assert(ret == 1); |
2387 | return 0; |
2388 | } |
2389 | |
2390 | /* |
2391 | * Provide a separate write service for the feature control MSR in order to |
2392 | * kick the VCPU out of VMXON or even guest mode on reset. This has to be done |
2393 | * before writing any other state because forcibly leaving nested mode |
2394 | * invalidates the VCPU state. |
2395 | */ |
2396 | static int kvm_put_msr_feature_control(X86CPU *cpu) |
2397 | { |
2398 | int ret; |
2399 | |
2400 | if (!has_msr_feature_control) { |
2401 | return 0; |
2402 | } |
2403 | |
2404 | ret = kvm_put_one_msr(cpu, MSR_IA32_FEATURE_CONTROL, |
2405 | cpu->env.msr_ia32_feature_control); |
2406 | if (ret < 0) { |
2407 | return ret; |
2408 | } |
2409 | |
2410 | assert(ret == 1); |
2411 | return 0; |
2412 | } |
2413 | |
2414 | static int kvm_put_msrs(X86CPU *cpu, int level) |
2415 | { |
2416 | CPUX86State *env = &cpu->env; |
2417 | int i; |
2418 | int ret; |
2419 | |
2420 | kvm_msr_buf_reset(cpu); |
2421 | |
2422 | kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, env->sysenter_cs); |
2423 | kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, env->sysenter_esp); |
2424 | kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, env->sysenter_eip); |
2425 | kvm_msr_entry_add(cpu, MSR_PAT, env->pat); |
2426 | if (has_msr_star) { |
2427 | kvm_msr_entry_add(cpu, MSR_STAR, env->star); |
2428 | } |
2429 | if (has_msr_hsave_pa) { |
2430 | kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, env->vm_hsave); |
2431 | } |
2432 | if (has_msr_tsc_aux) { |
2433 | kvm_msr_entry_add(cpu, MSR_TSC_AUX, env->tsc_aux); |
2434 | } |
2435 | if (has_msr_tsc_adjust) { |
2436 | kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, env->tsc_adjust); |
2437 | } |
2438 | if (has_msr_misc_enable) { |
2439 | kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, |
2440 | env->msr_ia32_misc_enable); |
2441 | } |
2442 | if (has_msr_smbase) { |
2443 | kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, env->smbase); |
2444 | } |
2445 | if (has_msr_smi_count) { |
2446 | kvm_msr_entry_add(cpu, MSR_SMI_COUNT, env->msr_smi_count); |
2447 | } |
2448 | if (has_msr_bndcfgs) { |
2449 | kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, env->msr_bndcfgs); |
2450 | } |
2451 | if (has_msr_xss) { |
2452 | kvm_msr_entry_add(cpu, MSR_IA32_XSS, env->xss); |
2453 | } |
2454 | if (has_msr_spec_ctrl) { |
2455 | kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, env->spec_ctrl); |
2456 | } |
2457 | if (has_msr_virt_ssbd) { |
2458 | kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, env->virt_ssbd); |
2459 | } |
2460 | |
2461 | #ifdef TARGET_X86_64 |
2462 | if (lm_capable_kernel) { |
2463 | kvm_msr_entry_add(cpu, MSR_CSTAR, env->cstar); |
2464 | kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, env->kernelgsbase); |
2465 | kvm_msr_entry_add(cpu, MSR_FMASK, env->fmask); |
2466 | kvm_msr_entry_add(cpu, MSR_LSTAR, env->lstar); |
2467 | } |
2468 | #endif |
2469 | |
2470 | /* If host supports feature MSR, write down. */ |
2471 | if (has_msr_arch_capabs) { |
2472 | kvm_msr_entry_add(cpu, MSR_IA32_ARCH_CAPABILITIES, |
2473 | env->features[FEAT_ARCH_CAPABILITIES]); |
2474 | } |
2475 | |
2476 | if (has_msr_core_capabs) { |
2477 | kvm_msr_entry_add(cpu, MSR_IA32_CORE_CAPABILITY, |
2478 | env->features[FEAT_CORE_CAPABILITY]); |
2479 | } |
2480 | |
2481 | /* |
2482 | * The following MSRs have side effects on the guest or are too heavy |
2483 | * for normal writeback. Limit them to reset or full state updates. |
2484 | */ |
2485 | if (level >= KVM_PUT_RESET_STATE) { |
2486 | kvm_msr_entry_add(cpu, MSR_IA32_TSC, env->tsc); |
2487 | kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, env->system_time_msr); |
2488 | kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, env->wall_clock_msr); |
2489 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { |
2490 | kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, env->async_pf_en_msr); |
2491 | } |
2492 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { |
2493 | kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, env->pv_eoi_en_msr); |
2494 | } |
2495 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { |
2496 | kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, env->steal_time_msr); |
2497 | } |
2498 | |
2499 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { |
2500 | kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, env->poll_control_msr); |
2501 | } |
2502 | |
2503 | if (has_architectural_pmu_version > 0) { |
2504 | if (has_architectural_pmu_version > 1) { |
2505 | /* Stop the counter. */ |
2506 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); |
2507 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); |
2508 | } |
2509 | |
2510 | /* Set the counter values. */ |
2511 | for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { |
2512 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, |
2513 | env->msr_fixed_counters[i]); |
2514 | } |
2515 | for (i = 0; i < num_architectural_pmu_gp_counters; i++) { |
2516 | kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, |
2517 | env->msr_gp_counters[i]); |
2518 | kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, |
2519 | env->msr_gp_evtsel[i]); |
2520 | } |
2521 | if (has_architectural_pmu_version > 1) { |
2522 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, |
2523 | env->msr_global_status); |
2524 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, |
2525 | env->msr_global_ovf_ctrl); |
2526 | |
2527 | /* Now start the PMU. */ |
2528 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, |
2529 | env->msr_fixed_ctr_ctrl); |
2530 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, |
2531 | env->msr_global_ctrl); |
2532 | } |
2533 | } |
2534 | /* |
2535 | * Hyper-V partition-wide MSRs: to avoid clearing them on cpu hot-add, |
2536 | * only sync them to KVM on the first cpu |
2537 | */ |
2538 | if (current_cpu == first_cpu) { |
2539 | if (has_msr_hv_hypercall) { |
2540 | kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, |
2541 | env->msr_hv_guest_os_id); |
2542 | kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, |
2543 | env->msr_hv_hypercall); |
2544 | } |
2545 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { |
2546 | kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, |
2547 | env->msr_hv_tsc); |
2548 | } |
2549 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { |
2550 | kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, |
2551 | env->msr_hv_reenlightenment_control); |
2552 | kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, |
2553 | env->msr_hv_tsc_emulation_control); |
2554 | kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, |
2555 | env->msr_hv_tsc_emulation_status); |
2556 | } |
2557 | } |
2558 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { |
2559 | kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, |
2560 | env->msr_hv_vapic); |
2561 | } |
2562 | if (has_msr_hv_crash) { |
2563 | int j; |
2564 | |
2565 | for (j = 0; j < HV_CRASH_PARAMS; j++) |
2566 | kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, |
2567 | env->msr_hv_crash_params[j]); |
2568 | |
2569 | kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_CTL, HV_CRASH_CTL_NOTIFY); |
2570 | } |
2571 | if (has_msr_hv_runtime) { |
2572 | kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, env->msr_hv_runtime); |
2573 | } |
2574 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VPINDEX) |
2575 | && hv_vpindex_settable) { |
2576 | kvm_msr_entry_add(cpu, HV_X64_MSR_VP_INDEX, |
2577 | hyperv_vp_index(CPU(cpu))); |
2578 | } |
2579 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { |
2580 | int j; |
2581 | |
2582 | kvm_msr_entry_add(cpu, HV_X64_MSR_SVERSION, HV_SYNIC_VERSION); |
2583 | |
2584 | kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, |
2585 | env->msr_hv_synic_control); |
2586 | kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, |
2587 | env->msr_hv_synic_evt_page); |
2588 | kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, |
2589 | env->msr_hv_synic_msg_page); |
2590 | |
2591 | for (j = 0; j < ARRAY_SIZE(env->msr_hv_synic_sint); j++) { |
2592 | kvm_msr_entry_add(cpu, HV_X64_MSR_SINT0 + j, |
2593 | env->msr_hv_synic_sint[j]); |
2594 | } |
2595 | } |
2596 | if (has_msr_hv_stimer) { |
2597 | int j; |
2598 | |
2599 | for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_config); j++) { |
2600 | kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_CONFIG + j * 2, |
2601 | env->msr_hv_stimer_config[j]); |
2602 | } |
2603 | |
2604 | for (j = 0; j < ARRAY_SIZE(env->msr_hv_stimer_count); j++) { |
2605 | kvm_msr_entry_add(cpu, HV_X64_MSR_STIMER0_COUNT + j * 2, |
2606 | env->msr_hv_stimer_count[j]); |
2607 | } |
2608 | } |
2609 | if (env->features[FEAT_1_EDX] & CPUID_MTRR) { |
2610 | uint64_t phys_mask = MAKE_64BIT_MASK(0, cpu->phys_bits); |
2611 | |
2612 | kvm_msr_entry_add(cpu, MSR_MTRRdefType, env->mtrr_deftype); |
2613 | kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, env->mtrr_fixed[0]); |
2614 | kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, env->mtrr_fixed[1]); |
2615 | kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, env->mtrr_fixed[2]); |
2616 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, env->mtrr_fixed[3]); |
2617 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, env->mtrr_fixed[4]); |
2618 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, env->mtrr_fixed[5]); |
2619 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, env->mtrr_fixed[6]); |
2620 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, env->mtrr_fixed[7]); |
2621 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, env->mtrr_fixed[8]); |
2622 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, env->mtrr_fixed[9]); |
2623 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, env->mtrr_fixed[10]); |
2624 | for (i = 0; i < MSR_MTRRcap_VCNT; i++) { |
2625 | /* The CPU GPs if we write to a bit above the physical limit of |
2626 | * the host CPU (and KVM emulates that) |
2627 | */ |
2628 | uint64_t mask = env->mtrr_var[i].mask; |
2629 | mask &= phys_mask; |
2630 | |
2631 | kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), |
2632 | env->mtrr_var[i].base); |
2633 | kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), mask); |
2634 | } |
2635 | } |
2636 | if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { |
2637 | int addr_num = kvm_arch_get_supported_cpuid(kvm_state, |
2638 | 0x14, 1, R_EAX) & 0x7; |
2639 | |
2640 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, |
2641 | env->msr_rtit_ctrl); |
2642 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, |
2643 | env->msr_rtit_status); |
2644 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, |
2645 | env->msr_rtit_output_base); |
2646 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, |
2647 | env->msr_rtit_output_mask); |
2648 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, |
2649 | env->msr_rtit_cr3_match); |
2650 | for (i = 0; i < addr_num; i++) { |
2651 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, |
2652 | env->msr_rtit_addrs[i]); |
2653 | } |
2654 | } |
2655 | |
2656 | /* Note: MSR_IA32_FEATURE_CONTROL is written separately, see |
2657 | * kvm_put_msr_feature_control. */ |
2658 | } |
2659 | if (env->mcg_cap) { |
2660 | int i; |
2661 | |
2662 | kvm_msr_entry_add(cpu, MSR_MCG_STATUS, env->mcg_status); |
2663 | kvm_msr_entry_add(cpu, MSR_MCG_CTL, env->mcg_ctl); |
2664 | if (has_msr_mcg_ext_ctl) { |
2665 | kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, env->mcg_ext_ctl); |
2666 | } |
2667 | for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { |
2668 | kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, env->mce_banks[i]); |
2669 | } |
2670 | } |
2671 | |
2672 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MSRS, cpu->kvm_msr_buf); |
2673 | if (ret < 0) { |
2674 | return ret; |
2675 | } |
2676 | |
2677 | if (ret < cpu->kvm_msr_buf->nmsrs) { |
2678 | struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; |
2679 | error_report("error: failed to set MSR 0x%" PRIx32 " to 0x%" PRIx64, |
2680 | (uint32_t)e->index, (uint64_t)e->data); |
2681 | } |
2682 | |
2683 | assert(ret == cpu->kvm_msr_buf->nmsrs); |
2684 | return 0; |
2685 | } |
2686 | |
2687 | |
2688 | static int kvm_get_fpu(X86CPU *cpu) |
2689 | { |
2690 | CPUX86State *env = &cpu->env; |
2691 | struct kvm_fpu fpu; |
2692 | int i, ret; |
2693 | |
2694 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_FPU, &fpu); |
2695 | if (ret < 0) { |
2696 | return ret; |
2697 | } |
2698 | |
2699 | env->fpstt = (fpu.fsw >> 11) & 7; |
2700 | env->fpus = fpu.fsw; |
2701 | env->fpuc = fpu.fcw; |
2702 | env->fpop = fpu.last_opcode; |
2703 | env->fpip = fpu.last_ip; |
2704 | env->fpdp = fpu.last_dp; |
2705 | for (i = 0; i < 8; ++i) { |
2706 | env->fptags[i] = !((fpu.ftwx >> i) & 1); |
2707 | } |
2708 | memcpy(env->fpregs, fpu.fpr, sizeof env->fpregs); |
2709 | for (i = 0; i < CPU_NB_REGS; i++) { |
2710 | env->xmm_regs[i].ZMM_Q(0) = ldq_p(&fpu.xmm[i][0]); |
2711 | env->xmm_regs[i].ZMM_Q(1) = ldq_p(&fpu.xmm[i][8]); |
2712 | } |
2713 | env->mxcsr = fpu.mxcsr; |
2714 | |
2715 | return 0; |
2716 | } |
2717 | |
2718 | static int kvm_get_xsave(X86CPU *cpu) |
2719 | { |
2720 | CPUX86State *env = &cpu->env; |
2721 | X86XSaveArea *xsave = env->xsave_buf; |
2722 | int ret; |
2723 | |
2724 | if (!has_xsave) { |
2725 | return kvm_get_fpu(cpu); |
2726 | } |
2727 | |
2728 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XSAVE, xsave); |
2729 | if (ret < 0) { |
2730 | return ret; |
2731 | } |
2732 | x86_cpu_xrstor_all_areas(cpu, xsave); |
2733 | |
2734 | return 0; |
2735 | } |
2736 | |
2737 | static int kvm_get_xcrs(X86CPU *cpu) |
2738 | { |
2739 | CPUX86State *env = &cpu->env; |
2740 | int i, ret; |
2741 | struct kvm_xcrs xcrs; |
2742 | |
2743 | if (!has_xcrs) { |
2744 | return 0; |
2745 | } |
2746 | |
2747 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_XCRS, &xcrs); |
2748 | if (ret < 0) { |
2749 | return ret; |
2750 | } |
2751 | |
2752 | for (i = 0; i < xcrs.nr_xcrs; i++) { |
2753 | /* Only support xcr0 now */ |
2754 | if (xcrs.xcrs[i].xcr == 0) { |
2755 | env->xcr0 = xcrs.xcrs[i].value; |
2756 | break; |
2757 | } |
2758 | } |
2759 | return 0; |
2760 | } |
2761 | |
2762 | static int kvm_get_sregs(X86CPU *cpu) |
2763 | { |
2764 | CPUX86State *env = &cpu->env; |
2765 | struct kvm_sregs sregs; |
2766 | int bit, i, ret; |
2767 | |
2768 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs); |
2769 | if (ret < 0) { |
2770 | return ret; |
2771 | } |
2772 | |
2773 | /* There can only be one pending IRQ set in the bitmap at a time, so try |
2774 | to find it and save its number instead (-1 for none). */ |
2775 | env->interrupt_injected = -1; |
2776 | for (i = 0; i < ARRAY_SIZE(sregs.interrupt_bitmap); i++) { |
2777 | if (sregs.interrupt_bitmap[i]) { |
2778 | bit = ctz64(sregs.interrupt_bitmap[i]); |
2779 | env->interrupt_injected = i * 64 + bit; |
2780 | break; |
2781 | } |
2782 | } |
2783 | |
2784 | get_seg(&env->segs[R_CS], &sregs.cs); |
2785 | get_seg(&env->segs[R_DS], &sregs.ds); |
2786 | get_seg(&env->segs[R_ES], &sregs.es); |
2787 | get_seg(&env->segs[R_FS], &sregs.fs); |
2788 | get_seg(&env->segs[R_GS], &sregs.gs); |
2789 | get_seg(&env->segs[R_SS], &sregs.ss); |
2790 | |
2791 | get_seg(&env->tr, &sregs.tr); |
2792 | get_seg(&env->ldt, &sregs.ldt); |
2793 | |
2794 | env->idt.limit = sregs.idt.limit; |
2795 | env->idt.base = sregs.idt.base; |
2796 | env->gdt.limit = sregs.gdt.limit; |
2797 | env->gdt.base = sregs.gdt.base; |
2798 | |
2799 | env->cr[0] = sregs.cr0; |
2800 | env->cr[2] = sregs.cr2; |
2801 | env->cr[3] = sregs.cr3; |
2802 | env->cr[4] = sregs.cr4; |
2803 | |
2804 | env->efer = sregs.efer; |
2805 | |
2806 | /* changes to apic base and cr8/tpr are read back via kvm_arch_post_run */ |
2807 | x86_update_hflags(env); |
2808 | |
2809 | return 0; |
2810 | } |
2811 | |
2812 | static int kvm_get_msrs(X86CPU *cpu) |
2813 | { |
2814 | CPUX86State *env = &cpu->env; |
2815 | struct kvm_msr_entry *msrs = cpu->kvm_msr_buf->entries; |
2816 | int ret, i; |
2817 | uint64_t mtrr_top_bits; |
2818 | |
2819 | kvm_msr_buf_reset(cpu); |
2820 | |
2821 | kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_CS, 0); |
2822 | kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_ESP, 0); |
2823 | kvm_msr_entry_add(cpu, MSR_IA32_SYSENTER_EIP, 0); |
2824 | kvm_msr_entry_add(cpu, MSR_PAT, 0); |
2825 | if (has_msr_star) { |
2826 | kvm_msr_entry_add(cpu, MSR_STAR, 0); |
2827 | } |
2828 | if (has_msr_hsave_pa) { |
2829 | kvm_msr_entry_add(cpu, MSR_VM_HSAVE_PA, 0); |
2830 | } |
2831 | if (has_msr_tsc_aux) { |
2832 | kvm_msr_entry_add(cpu, MSR_TSC_AUX, 0); |
2833 | } |
2834 | if (has_msr_tsc_adjust) { |
2835 | kvm_msr_entry_add(cpu, MSR_TSC_ADJUST, 0); |
2836 | } |
2837 | if (has_msr_tsc_deadline) { |
2838 | kvm_msr_entry_add(cpu, MSR_IA32_TSCDEADLINE, 0); |
2839 | } |
2840 | if (has_msr_misc_enable) { |
2841 | kvm_msr_entry_add(cpu, MSR_IA32_MISC_ENABLE, 0); |
2842 | } |
2843 | if (has_msr_smbase) { |
2844 | kvm_msr_entry_add(cpu, MSR_IA32_SMBASE, 0); |
2845 | } |
2846 | if (has_msr_smi_count) { |
2847 | kvm_msr_entry_add(cpu, MSR_SMI_COUNT, 0); |
2848 | } |
2849 | if (has_msr_feature_control) { |
2850 | kvm_msr_entry_add(cpu, MSR_IA32_FEATURE_CONTROL, 0); |
2851 | } |
2852 | if (has_msr_bndcfgs) { |
2853 | kvm_msr_entry_add(cpu, MSR_IA32_BNDCFGS, 0); |
2854 | } |
2855 | if (has_msr_xss) { |
2856 | kvm_msr_entry_add(cpu, MSR_IA32_XSS, 0); |
2857 | } |
2858 | if (has_msr_spec_ctrl) { |
2859 | kvm_msr_entry_add(cpu, MSR_IA32_SPEC_CTRL, 0); |
2860 | } |
2861 | if (has_msr_virt_ssbd) { |
2862 | kvm_msr_entry_add(cpu, MSR_VIRT_SSBD, 0); |
2863 | } |
2864 | if (!env->tsc_valid) { |
2865 | kvm_msr_entry_add(cpu, MSR_IA32_TSC, 0); |
2866 | env->tsc_valid = !runstate_is_running(); |
2867 | } |
2868 | |
2869 | #ifdef TARGET_X86_64 |
2870 | if (lm_capable_kernel) { |
2871 | kvm_msr_entry_add(cpu, MSR_CSTAR, 0); |
2872 | kvm_msr_entry_add(cpu, MSR_KERNELGSBASE, 0); |
2873 | kvm_msr_entry_add(cpu, MSR_FMASK, 0); |
2874 | kvm_msr_entry_add(cpu, MSR_LSTAR, 0); |
2875 | } |
2876 | #endif |
2877 | kvm_msr_entry_add(cpu, MSR_KVM_SYSTEM_TIME, 0); |
2878 | kvm_msr_entry_add(cpu, MSR_KVM_WALL_CLOCK, 0); |
2879 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_ASYNC_PF)) { |
2880 | kvm_msr_entry_add(cpu, MSR_KVM_ASYNC_PF_EN, 0); |
2881 | } |
2882 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_PV_EOI)) { |
2883 | kvm_msr_entry_add(cpu, MSR_KVM_PV_EOI_EN, 0); |
2884 | } |
2885 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_STEAL_TIME)) { |
2886 | kvm_msr_entry_add(cpu, MSR_KVM_STEAL_TIME, 0); |
2887 | } |
2888 | if (env->features[FEAT_KVM] & (1 << KVM_FEATURE_POLL_CONTROL)) { |
2889 | kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); |
2890 | } |
2891 | if (has_architectural_pmu_version > 0) { |
2892 | if (has_architectural_pmu_version > 1) { |
2893 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); |
2894 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); |
2895 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_STATUS, 0); |
2896 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_OVF_CTRL, 0); |
2897 | } |
2898 | for (i = 0; i < num_architectural_pmu_fixed_counters; i++) { |
2899 | kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR0 + i, 0); |
2900 | } |
2901 | for (i = 0; i < num_architectural_pmu_gp_counters; i++) { |
2902 | kvm_msr_entry_add(cpu, MSR_P6_PERFCTR0 + i, 0); |
2903 | kvm_msr_entry_add(cpu, MSR_P6_EVNTSEL0 + i, 0); |
2904 | } |
2905 | } |
2906 | |
2907 | if (env->mcg_cap) { |
2908 | kvm_msr_entry_add(cpu, MSR_MCG_STATUS, 0); |
2909 | kvm_msr_entry_add(cpu, MSR_MCG_CTL, 0); |
2910 | if (has_msr_mcg_ext_ctl) { |
2911 | kvm_msr_entry_add(cpu, MSR_MCG_EXT_CTL, 0); |
2912 | } |
2913 | for (i = 0; i < (env->mcg_cap & 0xff) * 4; i++) { |
2914 | kvm_msr_entry_add(cpu, MSR_MC0_CTL + i, 0); |
2915 | } |
2916 | } |
2917 | |
2918 | if (has_msr_hv_hypercall) { |
2919 | kvm_msr_entry_add(cpu, HV_X64_MSR_HYPERCALL, 0); |
2920 | kvm_msr_entry_add(cpu, HV_X64_MSR_GUEST_OS_ID, 0); |
2921 | } |
2922 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_VAPIC)) { |
2923 | kvm_msr_entry_add(cpu, HV_X64_MSR_APIC_ASSIST_PAGE, 0); |
2924 | } |
2925 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_TIME)) { |
2926 | kvm_msr_entry_add(cpu, HV_X64_MSR_REFERENCE_TSC, 0); |
2927 | } |
2928 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_REENLIGHTENMENT)) { |
2929 | kvm_msr_entry_add(cpu, HV_X64_MSR_REENLIGHTENMENT_CONTROL, 0); |
2930 | kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_CONTROL, 0); |
2931 | kvm_msr_entry_add(cpu, HV_X64_MSR_TSC_EMULATION_STATUS, 0); |
2932 | } |
2933 | if (has_msr_hv_crash) { |
2934 | int j; |
2935 | |
2936 | for (j = 0; j < HV_CRASH_PARAMS; j++) { |
2937 | kvm_msr_entry_add(cpu, HV_X64_MSR_CRASH_P0 + j, 0); |
2938 | } |
2939 | } |
2940 | if (has_msr_hv_runtime) { |
2941 | kvm_msr_entry_add(cpu, HV_X64_MSR_VP_RUNTIME, 0); |
2942 | } |
2943 | if (hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { |
2944 | uint32_t msr; |
2945 | |
2946 | kvm_msr_entry_add(cpu, HV_X64_MSR_SCONTROL, 0); |
2947 | kvm_msr_entry_add(cpu, HV_X64_MSR_SIEFP, 0); |
2948 | kvm_msr_entry_add(cpu, HV_X64_MSR_SIMP, 0); |
2949 | for (msr = HV_X64_MSR_SINT0; msr <= HV_X64_MSR_SINT15; msr++) { |
2950 | kvm_msr_entry_add(cpu, msr, 0); |
2951 | } |
2952 | } |
2953 | if (has_msr_hv_stimer) { |
2954 | uint32_t msr; |
2955 | |
2956 | for (msr = HV_X64_MSR_STIMER0_CONFIG; msr <= HV_X64_MSR_STIMER3_COUNT; |
2957 | msr++) { |
2958 | kvm_msr_entry_add(cpu, msr, 0); |
2959 | } |
2960 | } |
2961 | if (env->features[FEAT_1_EDX] & CPUID_MTRR) { |
2962 | kvm_msr_entry_add(cpu, MSR_MTRRdefType, 0); |
2963 | kvm_msr_entry_add(cpu, MSR_MTRRfix64K_00000, 0); |
2964 | kvm_msr_entry_add(cpu, MSR_MTRRfix16K_80000, 0); |
2965 | kvm_msr_entry_add(cpu, MSR_MTRRfix16K_A0000, 0); |
2966 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C0000, 0); |
2967 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_C8000, 0); |
2968 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D0000, 0); |
2969 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_D8000, 0); |
2970 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E0000, 0); |
2971 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_E8000, 0); |
2972 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F0000, 0); |
2973 | kvm_msr_entry_add(cpu, MSR_MTRRfix4K_F8000, 0); |
2974 | for (i = 0; i < MSR_MTRRcap_VCNT; i++) { |
2975 | kvm_msr_entry_add(cpu, MSR_MTRRphysBase(i), 0); |
2976 | kvm_msr_entry_add(cpu, MSR_MTRRphysMask(i), 0); |
2977 | } |
2978 | } |
2979 | |
2980 | if (env->features[FEAT_7_0_EBX] & CPUID_7_0_EBX_INTEL_PT) { |
2981 | int addr_num = |
2982 | kvm_arch_get_supported_cpuid(kvm_state, 0x14, 1, R_EAX) & 0x7; |
2983 | |
2984 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CTL, 0); |
2985 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_STATUS, 0); |
2986 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_BASE, 0); |
2987 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_OUTPUT_MASK, 0); |
2988 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_CR3_MATCH, 0); |
2989 | for (i = 0; i < addr_num; i++) { |
2990 | kvm_msr_entry_add(cpu, MSR_IA32_RTIT_ADDR0_A + i, 0); |
2991 | } |
2992 | } |
2993 | |
2994 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MSRS, cpu->kvm_msr_buf); |
2995 | if (ret < 0) { |
2996 | return ret; |
2997 | } |
2998 | |
2999 | if (ret < cpu->kvm_msr_buf->nmsrs) { |
3000 | struct kvm_msr_entry *e = &cpu->kvm_msr_buf->entries[ret]; |
3001 | error_report("error: failed to get MSR 0x%" PRIx32, |
3002 | (uint32_t)e->index); |
3003 | } |
3004 | |
3005 | assert(ret == cpu->kvm_msr_buf->nmsrs); |
3006 | /* |
3007 | * MTRR masks: Each mask consists of 5 parts |
3008 | * a 10..0: must be zero |
3009 | * b 11 : valid bit |
3010 | * c n-1.12: actual mask bits |
3011 | * d 51..n: reserved must be zero |
3012 | * e 63.52: reserved must be zero |
3013 | * |
3014 | * 'n' is the number of physical bits supported by the CPU and is |
3015 | * apparently always <= 52. We know our 'n' but don't know what |
3016 | * the destinations 'n' is; it might be smaller, in which case |
3017 | * it masks (c) on loading. It might be larger, in which case |
3018 | * we fill 'd' so that d..c is consistent irrespetive of the 'n' |
3019 | * we're migrating to. |
3020 | */ |
3021 | |
3022 | if (cpu->fill_mtrr_mask) { |
3023 | QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 52); |
3024 | assert(cpu->phys_bits <= TARGET_PHYS_ADDR_SPACE_BITS); |
3025 | mtrr_top_bits = MAKE_64BIT_MASK(cpu->phys_bits, 52 - cpu->phys_bits); |
3026 | } else { |
3027 | mtrr_top_bits = 0; |
3028 | } |
3029 | |
3030 | for (i = 0; i < ret; i++) { |
3031 | uint32_t index = msrs[i].index; |
3032 | switch (index) { |
3033 | case MSR_IA32_SYSENTER_CS: |
3034 | env->sysenter_cs = msrs[i].data; |
3035 | break; |
3036 | case MSR_IA32_SYSENTER_ESP: |
3037 | env->sysenter_esp = msrs[i].data; |
3038 | break; |
3039 | case MSR_IA32_SYSENTER_EIP: |
3040 | env->sysenter_eip = msrs[i].data; |
3041 | break; |
3042 | case MSR_PAT: |
3043 | env->pat = msrs[i].data; |
3044 | break; |
3045 | case MSR_STAR: |
3046 | env->star = msrs[i].data; |
3047 | break; |
3048 | #ifdef TARGET_X86_64 |
3049 | case MSR_CSTAR: |
3050 | env->cstar = msrs[i].data; |
3051 | break; |
3052 | case MSR_KERNELGSBASE: |
3053 | env->kernelgsbase = msrs[i].data; |
3054 | break; |
3055 | case MSR_FMASK: |
3056 | env->fmask = msrs[i].data; |
3057 | break; |
3058 | case MSR_LSTAR: |
3059 | env->lstar = msrs[i].data; |
3060 | break; |
3061 | #endif |
3062 | case MSR_IA32_TSC: |
3063 | env->tsc = msrs[i].data; |
3064 | break; |
3065 | case MSR_TSC_AUX: |
3066 | env->tsc_aux = msrs[i].data; |
3067 | break; |
3068 | case MSR_TSC_ADJUST: |
3069 | env->tsc_adjust = msrs[i].data; |
3070 | break; |
3071 | case MSR_IA32_TSCDEADLINE: |
3072 | env->tsc_deadline = msrs[i].data; |
3073 | break; |
3074 | case MSR_VM_HSAVE_PA: |
3075 | env->vm_hsave = msrs[i].data; |
3076 | break; |
3077 | case MSR_KVM_SYSTEM_TIME: |
3078 | env->system_time_msr = msrs[i].data; |
3079 | break; |
3080 | case MSR_KVM_WALL_CLOCK: |
3081 | env->wall_clock_msr = msrs[i].data; |
3082 | break; |
3083 | case MSR_MCG_STATUS: |
3084 | env->mcg_status = msrs[i].data; |
3085 | break; |
3086 | case MSR_MCG_CTL: |
3087 | env->mcg_ctl = msrs[i].data; |
3088 | break; |
3089 | case MSR_MCG_EXT_CTL: |
3090 | env->mcg_ext_ctl = msrs[i].data; |
3091 | break; |
3092 | case MSR_IA32_MISC_ENABLE: |
3093 | env->msr_ia32_misc_enable = msrs[i].data; |
3094 | break; |
3095 | case MSR_IA32_SMBASE: |
3096 | env->smbase = msrs[i].data; |
3097 | break; |
3098 | case MSR_SMI_COUNT: |
3099 | env->msr_smi_count = msrs[i].data; |
3100 | break; |
3101 | case MSR_IA32_FEATURE_CONTROL: |
3102 | env->msr_ia32_feature_control = msrs[i].data; |
3103 | break; |
3104 | case MSR_IA32_BNDCFGS: |
3105 | env->msr_bndcfgs = msrs[i].data; |
3106 | break; |
3107 | case MSR_IA32_XSS: |
3108 | env->xss = msrs[i].data; |
3109 | break; |
3110 | default: |
3111 | if (msrs[i].index >= MSR_MC0_CTL && |
3112 | msrs[i].index < MSR_MC0_CTL + (env->mcg_cap & 0xff) * 4) { |
3113 | env->mce_banks[msrs[i].index - MSR_MC0_CTL] = msrs[i].data; |
3114 | } |
3115 | break; |
3116 | case MSR_KVM_ASYNC_PF_EN: |
3117 | env->async_pf_en_msr = msrs[i].data; |
3118 | break; |
3119 | case MSR_KVM_PV_EOI_EN: |
3120 | env->pv_eoi_en_msr = msrs[i].data; |
3121 | break; |
3122 | case MSR_KVM_STEAL_TIME: |
3123 | env->steal_time_msr = msrs[i].data; |
3124 | break; |
3125 | case MSR_KVM_POLL_CONTROL: { |
3126 | env->poll_control_msr = msrs[i].data; |
3127 | break; |
3128 | } |
3129 | case MSR_CORE_PERF_FIXED_CTR_CTRL: |
3130 | env->msr_fixed_ctr_ctrl = msrs[i].data; |
3131 | break; |
3132 | case MSR_CORE_PERF_GLOBAL_CTRL: |
3133 | env->msr_global_ctrl = msrs[i].data; |
3134 | break; |
3135 | case MSR_CORE_PERF_GLOBAL_STATUS: |
3136 | env->msr_global_status = msrs[i].data; |
3137 | break; |
3138 | case MSR_CORE_PERF_GLOBAL_OVF_CTRL: |
3139 | env->msr_global_ovf_ctrl = msrs[i].data; |
3140 | break; |
3141 | case MSR_CORE_PERF_FIXED_CTR0 ... MSR_CORE_PERF_FIXED_CTR0 + MAX_FIXED_COUNTERS - 1: |
3142 | env->msr_fixed_counters[index - MSR_CORE_PERF_FIXED_CTR0] = msrs[i].data; |
3143 | break; |
3144 | case MSR_P6_PERFCTR0 ... MSR_P6_PERFCTR0 + MAX_GP_COUNTERS - 1: |
3145 | env->msr_gp_counters[index - MSR_P6_PERFCTR0] = msrs[i].data; |
3146 | break; |
3147 | case MSR_P6_EVNTSEL0 ... MSR_P6_EVNTSEL0 + MAX_GP_COUNTERS - 1: |
3148 | env->msr_gp_evtsel[index - MSR_P6_EVNTSEL0] = msrs[i].data; |
3149 | break; |
3150 | case HV_X64_MSR_HYPERCALL: |
3151 | env->msr_hv_hypercall = msrs[i].data; |
3152 | break; |
3153 | case HV_X64_MSR_GUEST_OS_ID: |
3154 | env->msr_hv_guest_os_id = msrs[i].data; |
3155 | break; |
3156 | case HV_X64_MSR_APIC_ASSIST_PAGE: |
3157 | env->msr_hv_vapic = msrs[i].data; |
3158 | break; |
3159 | case HV_X64_MSR_REFERENCE_TSC: |
3160 | env->msr_hv_tsc = msrs[i].data; |
3161 | break; |
3162 | case HV_X64_MSR_CRASH_P0 ... HV_X64_MSR_CRASH_P4: |
3163 | env->msr_hv_crash_params[index - HV_X64_MSR_CRASH_P0] = msrs[i].data; |
3164 | break; |
3165 | case HV_X64_MSR_VP_RUNTIME: |
3166 | env->msr_hv_runtime = msrs[i].data; |
3167 | break; |
3168 | case HV_X64_MSR_SCONTROL: |
3169 | env->msr_hv_synic_control = msrs[i].data; |
3170 | break; |
3171 | case HV_X64_MSR_SIEFP: |
3172 | env->msr_hv_synic_evt_page = msrs[i].data; |
3173 | break; |
3174 | case HV_X64_MSR_SIMP: |
3175 | env->msr_hv_synic_msg_page = msrs[i].data; |
3176 | break; |
3177 | case HV_X64_MSR_SINT0 ... HV_X64_MSR_SINT15: |
3178 | env->msr_hv_synic_sint[index - HV_X64_MSR_SINT0] = msrs[i].data; |
3179 | break; |
3180 | case HV_X64_MSR_STIMER0_CONFIG: |
3181 | case HV_X64_MSR_STIMER1_CONFIG: |
3182 | case HV_X64_MSR_STIMER2_CONFIG: |
3183 | case HV_X64_MSR_STIMER3_CONFIG: |
3184 | env->msr_hv_stimer_config[(index - HV_X64_MSR_STIMER0_CONFIG)/2] = |
3185 | msrs[i].data; |
3186 | break; |
3187 | case HV_X64_MSR_STIMER0_COUNT: |
3188 | case HV_X64_MSR_STIMER1_COUNT: |
3189 | case HV_X64_MSR_STIMER2_COUNT: |
3190 | case HV_X64_MSR_STIMER3_COUNT: |
3191 | env->msr_hv_stimer_count[(index - HV_X64_MSR_STIMER0_COUNT)/2] = |
3192 | msrs[i].data; |
3193 | break; |
3194 | case HV_X64_MSR_REENLIGHTENMENT_CONTROL: |
3195 | env->msr_hv_reenlightenment_control = msrs[i].data; |
3196 | break; |
3197 | case HV_X64_MSR_TSC_EMULATION_CONTROL: |
3198 | env->msr_hv_tsc_emulation_control = msrs[i].data; |
3199 | break; |
3200 | case HV_X64_MSR_TSC_EMULATION_STATUS: |
3201 | env->msr_hv_tsc_emulation_status = msrs[i].data; |
3202 | break; |
3203 | case MSR_MTRRdefType: |
3204 | env->mtrr_deftype = msrs[i].data; |
3205 | break; |
3206 | case MSR_MTRRfix64K_00000: |
3207 | env->mtrr_fixed[0] = msrs[i].data; |
3208 | break; |
3209 | case MSR_MTRRfix16K_80000: |
3210 | env->mtrr_fixed[1] = msrs[i].data; |
3211 | break; |
3212 | case MSR_MTRRfix16K_A0000: |
3213 | env->mtrr_fixed[2] = msrs[i].data; |
3214 | break; |
3215 | case MSR_MTRRfix4K_C0000: |
3216 | env->mtrr_fixed[3] = msrs[i].data; |
3217 | break; |
3218 | case MSR_MTRRfix4K_C8000: |
3219 | env->mtrr_fixed[4] = msrs[i].data; |
3220 | break; |
3221 | case MSR_MTRRfix4K_D0000: |
3222 | env->mtrr_fixed[5] = msrs[i].data; |
3223 | break; |
3224 | case MSR_MTRRfix4K_D8000: |
3225 | env->mtrr_fixed[6] = msrs[i].data; |
3226 | break; |
3227 | case MSR_MTRRfix4K_E0000: |
3228 | env->mtrr_fixed[7] = msrs[i].data; |
3229 | break; |
3230 | case MSR_MTRRfix4K_E8000: |
3231 | env->mtrr_fixed[8] = msrs[i].data; |
3232 | break; |
3233 | case MSR_MTRRfix4K_F0000: |
3234 | env->mtrr_fixed[9] = msrs[i].data; |
3235 | break; |
3236 | case MSR_MTRRfix4K_F8000: |
3237 | env->mtrr_fixed[10] = msrs[i].data; |
3238 | break; |
3239 | case MSR_MTRRphysBase(0) ... MSR_MTRRphysMask(MSR_MTRRcap_VCNT - 1): |
3240 | if (index & 1) { |
3241 | env->mtrr_var[MSR_MTRRphysIndex(index)].mask = msrs[i].data | |
3242 | mtrr_top_bits; |
3243 | } else { |
3244 | env->mtrr_var[MSR_MTRRphysIndex(index)].base = msrs[i].data; |
3245 | } |
3246 | break; |
3247 | case MSR_IA32_SPEC_CTRL: |
3248 | env->spec_ctrl = msrs[i].data; |
3249 | break; |
3250 | case MSR_VIRT_SSBD: |
3251 | env->virt_ssbd = msrs[i].data; |
3252 | break; |
3253 | case MSR_IA32_RTIT_CTL: |
3254 | env->msr_rtit_ctrl = msrs[i].data; |
3255 | break; |
3256 | case MSR_IA32_RTIT_STATUS: |
3257 | env->msr_rtit_status = msrs[i].data; |
3258 | break; |
3259 | case MSR_IA32_RTIT_OUTPUT_BASE: |
3260 | env->msr_rtit_output_base = msrs[i].data; |
3261 | break; |
3262 | case MSR_IA32_RTIT_OUTPUT_MASK: |
3263 | env->msr_rtit_output_mask = msrs[i].data; |
3264 | break; |
3265 | case MSR_IA32_RTIT_CR3_MATCH: |
3266 | env->msr_rtit_cr3_match = msrs[i].data; |
3267 | break; |
3268 | case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: |
3269 | env->msr_rtit_addrs[index - MSR_IA32_RTIT_ADDR0_A] = msrs[i].data; |
3270 | break; |
3271 | } |
3272 | } |
3273 | |
3274 | return 0; |
3275 | } |
3276 | |
3277 | static int kvm_put_mp_state(X86CPU *cpu) |
3278 | { |
3279 | struct kvm_mp_state mp_state = { .mp_state = cpu->env.mp_state }; |
3280 | |
3281 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state); |
3282 | } |
3283 | |
3284 | static int kvm_get_mp_state(X86CPU *cpu) |
3285 | { |
3286 | CPUState *cs = CPU(cpu); |
3287 | CPUX86State *env = &cpu->env; |
3288 | struct kvm_mp_state mp_state; |
3289 | int ret; |
3290 | |
3291 | ret = kvm_vcpu_ioctl(cs, KVM_GET_MP_STATE, &mp_state); |
3292 | if (ret < 0) { |
3293 | return ret; |
3294 | } |
3295 | env->mp_state = mp_state.mp_state; |
3296 | if (kvm_irqchip_in_kernel()) { |
3297 | cs->halted = (mp_state.mp_state == KVM_MP_STATE_HALTED); |
3298 | } |
3299 | return 0; |
3300 | } |
3301 | |
3302 | static int kvm_get_apic(X86CPU *cpu) |
3303 | { |
3304 | DeviceState *apic = cpu->apic_state; |
3305 | struct kvm_lapic_state kapic; |
3306 | int ret; |
3307 | |
3308 | if (apic && kvm_irqchip_in_kernel()) { |
3309 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_LAPIC, &kapic); |
3310 | if (ret < 0) { |
3311 | return ret; |
3312 | } |
3313 | |
3314 | kvm_get_apic_state(apic, &kapic); |
3315 | } |
3316 | return 0; |
3317 | } |
3318 | |
3319 | static int kvm_put_vcpu_events(X86CPU *cpu, int level) |
3320 | { |
3321 | CPUState *cs = CPU(cpu); |
3322 | CPUX86State *env = &cpu->env; |
3323 | struct kvm_vcpu_events events = {}; |
3324 | |
3325 | if (!kvm_has_vcpu_events()) { |
3326 | return 0; |
3327 | } |
3328 | |
3329 | events.flags = 0; |
3330 | |
3331 | if (has_exception_payload) { |
3332 | events.flags |= KVM_VCPUEVENT_VALID_PAYLOAD; |
3333 | events.exception.pending = env->exception_pending; |
3334 | events.exception_has_payload = env->exception_has_payload; |
3335 | events.exception_payload = env->exception_payload; |
3336 | } |
3337 | events.exception.nr = env->exception_nr; |
3338 | events.exception.injected = env->exception_injected; |
3339 | events.exception.has_error_code = env->has_error_code; |
3340 | events.exception.error_code = env->error_code; |
3341 | |
3342 | events.interrupt.injected = (env->interrupt_injected >= 0); |
3343 | events.interrupt.nr = env->interrupt_injected; |
3344 | events.interrupt.soft = env->soft_interrupt; |
3345 | |
3346 | events.nmi.injected = env->nmi_injected; |
3347 | events.nmi.pending = env->nmi_pending; |
3348 | events.nmi.masked = !!(env->hflags2 & HF2_NMI_MASK); |
3349 | |
3350 | events.sipi_vector = env->sipi_vector; |
3351 | |
3352 | if (has_msr_smbase) { |
3353 | events.smi.smm = !!(env->hflags & HF_SMM_MASK); |
3354 | events.smi.smm_inside_nmi = !!(env->hflags2 & HF2_SMM_INSIDE_NMI_MASK); |
3355 | if (kvm_irqchip_in_kernel()) { |
3356 | /* As soon as these are moved to the kernel, remove them |
3357 | * from cs->interrupt_request. |
3358 | */ |
3359 | events.smi.pending = cs->interrupt_request & CPU_INTERRUPT_SMI; |
3360 | events.smi.latched_init = cs->interrupt_request & CPU_INTERRUPT_INIT; |
3361 | cs->interrupt_request &= ~(CPU_INTERRUPT_INIT | CPU_INTERRUPT_SMI); |
3362 | } else { |
3363 | /* Keep these in cs->interrupt_request. */ |
3364 | events.smi.pending = 0; |
3365 | events.smi.latched_init = 0; |
3366 | } |
3367 | /* Stop SMI delivery on old machine types to avoid a reboot |
3368 | * on an inward migration of an old VM. |
3369 | */ |
3370 | if (!cpu->kvm_no_smi_migration) { |
3371 | events.flags |= KVM_VCPUEVENT_VALID_SMM; |
3372 | } |
3373 | } |
3374 | |
3375 | if (level >= KVM_PUT_RESET_STATE) { |
3376 | events.flags |= KVM_VCPUEVENT_VALID_NMI_PENDING; |
3377 | if (env->mp_state == KVM_MP_STATE_SIPI_RECEIVED) { |
3378 | events.flags |= KVM_VCPUEVENT_VALID_SIPI_VECTOR; |
3379 | } |
3380 | } |
3381 | |
3382 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events); |
3383 | } |
3384 | |
3385 | static int kvm_get_vcpu_events(X86CPU *cpu) |
3386 | { |
3387 | CPUX86State *env = &cpu->env; |
3388 | struct kvm_vcpu_events events; |
3389 | int ret; |
3390 | |
3391 | if (!kvm_has_vcpu_events()) { |
3392 | return 0; |
3393 | } |
3394 | |
3395 | memset(&events, 0, sizeof(events)); |
3396 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_VCPU_EVENTS, &events); |
3397 | if (ret < 0) { |
3398 | return ret; |
3399 | } |
3400 | |
3401 | if (events.flags & KVM_VCPUEVENT_VALID_PAYLOAD) { |
3402 | env->exception_pending = events.exception.pending; |
3403 | env->exception_has_payload = events.exception_has_payload; |
3404 | env->exception_payload = events.exception_payload; |
3405 | } else { |
3406 | env->exception_pending = 0; |
3407 | env->exception_has_payload = false; |
3408 | } |
3409 | env->exception_injected = events.exception.injected; |
3410 | env->exception_nr = |
3411 | (env->exception_pending || env->exception_injected) ? |
3412 | events.exception.nr : -1; |
3413 | env->has_error_code = events.exception.has_error_code; |
3414 | env->error_code = events.exception.error_code; |
3415 | |
3416 | env->interrupt_injected = |
3417 | events.interrupt.injected ? events.interrupt.nr : -1; |
3418 | env->soft_interrupt = events.interrupt.soft; |
3419 | |
3420 | env->nmi_injected = events.nmi.injected; |
3421 | env->nmi_pending = events.nmi.pending; |
3422 | if (events.nmi.masked) { |
3423 | env->hflags2 |= HF2_NMI_MASK; |
3424 | } else { |
3425 | env->hflags2 &= ~HF2_NMI_MASK; |
3426 | } |
3427 | |
3428 | if (events.flags & KVM_VCPUEVENT_VALID_SMM) { |
3429 | if (events.smi.smm) { |
3430 | env->hflags |= HF_SMM_MASK; |
3431 | } else { |
3432 | env->hflags &= ~HF_SMM_MASK; |
3433 | } |
3434 | if (events.smi.pending) { |
3435 | cpu_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); |
3436 | } else { |
3437 | cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_SMI); |
3438 | } |
3439 | if (events.smi.smm_inside_nmi) { |
3440 | env->hflags2 |= HF2_SMM_INSIDE_NMI_MASK; |
3441 | } else { |
3442 | env->hflags2 &= ~HF2_SMM_INSIDE_NMI_MASK; |
3443 | } |
3444 | if (events.smi.latched_init) { |
3445 | cpu_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); |
3446 | } else { |
3447 | cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_INIT); |
3448 | } |
3449 | } |
3450 | |
3451 | env->sipi_vector = events.sipi_vector; |
3452 | |
3453 | return 0; |
3454 | } |
3455 | |
3456 | static int kvm_guest_debug_workarounds(X86CPU *cpu) |
3457 | { |
3458 | CPUState *cs = CPU(cpu); |
3459 | CPUX86State *env = &cpu->env; |
3460 | int ret = 0; |
3461 | unsigned long reinject_trap = 0; |
3462 | |
3463 | if (!kvm_has_vcpu_events()) { |
3464 | if (env->exception_nr == EXCP01_DB) { |
3465 | reinject_trap = KVM_GUESTDBG_INJECT_DB; |
3466 | } else if (env->exception_injected == EXCP03_INT3) { |
3467 | reinject_trap = KVM_GUESTDBG_INJECT_BP; |
3468 | } |
3469 | kvm_reset_exception(env); |
3470 | } |
3471 | |
3472 | /* |
3473 | * Kernels before KVM_CAP_X86_ROBUST_SINGLESTEP overwrote flags.TF |
3474 | * injected via SET_GUEST_DEBUG while updating GP regs. Work around this |
3475 | * by updating the debug state once again if single-stepping is on. |
3476 | * Another reason to call kvm_update_guest_debug here is a pending debug |
3477 | * trap raise by the guest. On kernels without SET_VCPU_EVENTS we have to |
3478 | * reinject them via SET_GUEST_DEBUG. |
3479 | */ |
3480 | if (reinject_trap || |
3481 | (!kvm_has_robust_singlestep() && cs->singlestep_enabled)) { |
3482 | ret = kvm_update_guest_debug(cs, reinject_trap); |
3483 | } |
3484 | return ret; |
3485 | } |
3486 | |
3487 | static int kvm_put_debugregs(X86CPU *cpu) |
3488 | { |
3489 | CPUX86State *env = &cpu->env; |
3490 | struct kvm_debugregs dbgregs; |
3491 | int i; |
3492 | |
3493 | if (!kvm_has_debugregs()) { |
3494 | return 0; |
3495 | } |
3496 | |
3497 | memset(&dbgregs, 0, sizeof(dbgregs)); |
3498 | for (i = 0; i < 4; i++) { |
3499 | dbgregs.db[i] = env->dr[i]; |
3500 | } |
3501 | dbgregs.dr6 = env->dr[6]; |
3502 | dbgregs.dr7 = env->dr[7]; |
3503 | dbgregs.flags = 0; |
3504 | |
3505 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEBUGREGS, &dbgregs); |
3506 | } |
3507 | |
3508 | static int kvm_get_debugregs(X86CPU *cpu) |
3509 | { |
3510 | CPUX86State *env = &cpu->env; |
3511 | struct kvm_debugregs dbgregs; |
3512 | int i, ret; |
3513 | |
3514 | if (!kvm_has_debugregs()) { |
3515 | return 0; |
3516 | } |
3517 | |
3518 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_DEBUGREGS, &dbgregs); |
3519 | if (ret < 0) { |
3520 | return ret; |
3521 | } |
3522 | for (i = 0; i < 4; i++) { |
3523 | env->dr[i] = dbgregs.db[i]; |
3524 | } |
3525 | env->dr[4] = env->dr[6] = dbgregs.dr6; |
3526 | env->dr[5] = env->dr[7] = dbgregs.dr7; |
3527 | |
3528 | return 0; |
3529 | } |
3530 | |
3531 | static int kvm_put_nested_state(X86CPU *cpu) |
3532 | { |
3533 | CPUX86State *env = &cpu->env; |
3534 | int max_nested_state_len = kvm_max_nested_state_length(); |
3535 | |
3536 | if (!env->nested_state) { |
3537 | return 0; |
3538 | } |
3539 | |
3540 | assert(env->nested_state->size <= max_nested_state_len); |
3541 | return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_NESTED_STATE, env->nested_state); |
3542 | } |
3543 | |
3544 | static int kvm_get_nested_state(X86CPU *cpu) |
3545 | { |
3546 | CPUX86State *env = &cpu->env; |
3547 | int max_nested_state_len = kvm_max_nested_state_length(); |
3548 | int ret; |
3549 | |
3550 | if (!env->nested_state) { |
3551 | return 0; |
3552 | } |
3553 | |
3554 | /* |
3555 | * It is possible that migration restored a smaller size into |
3556 | * nested_state->hdr.size than what our kernel support. |
3557 | * We preserve migration origin nested_state->hdr.size for |
3558 | * call to KVM_SET_NESTED_STATE but wish that our next call |
3559 | * to KVM_GET_NESTED_STATE will use max size our kernel support. |
3560 | */ |
3561 | env->nested_state->size = max_nested_state_len; |
3562 | |
3563 | ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_NESTED_STATE, env->nested_state); |
3564 | if (ret < 0) { |
3565 | return ret; |
3566 | } |
3567 | |
3568 | if (env->nested_state->flags & KVM_STATE_NESTED_GUEST_MODE) { |
3569 | env->hflags |= HF_GUEST_MASK; |
3570 | } else { |
3571 | env->hflags &= ~HF_GUEST_MASK; |
3572 | } |
3573 | |
3574 | return ret; |
3575 | } |
3576 | |
3577 | int kvm_arch_put_registers(CPUState *cpu, int level) |
3578 | { |
3579 | X86CPU *x86_cpu = X86_CPU(cpu); |
3580 | int ret; |
3581 | |
3582 | assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); |
3583 | |
3584 | if (level >= KVM_PUT_RESET_STATE) { |
3585 | ret = kvm_put_nested_state(x86_cpu); |
3586 | if (ret < 0) { |
3587 | return ret; |
3588 | } |
3589 | |
3590 | ret = kvm_put_msr_feature_control(x86_cpu); |
3591 | if (ret < 0) { |
3592 | return ret; |
3593 | } |
3594 | } |
3595 | |
3596 | if (level == KVM_PUT_FULL_STATE) { |
3597 | /* We don't check for kvm_arch_set_tsc_khz() errors here, |
3598 | * because TSC frequency mismatch shouldn't abort migration, |
3599 | * unless the user explicitly asked for a more strict TSC |
3600 | * setting (e.g. using an explicit "tsc-freq" option). |
3601 | */ |
3602 | kvm_arch_set_tsc_khz(cpu); |
3603 | } |
3604 | |
3605 | ret = kvm_getput_regs(x86_cpu, 1); |
3606 | if (ret < 0) { |
3607 | return ret; |
3608 | } |
3609 | ret = kvm_put_xsave(x86_cpu); |
3610 | if (ret < 0) { |
3611 | return ret; |
3612 | } |
3613 | ret = kvm_put_xcrs(x86_cpu); |
3614 | if (ret < 0) { |
3615 | return ret; |
3616 | } |
3617 | ret = kvm_put_sregs(x86_cpu); |
3618 | if (ret < 0) { |
3619 | return ret; |
3620 | } |
3621 | /* must be before kvm_put_msrs */ |
3622 | ret = kvm_inject_mce_oldstyle(x86_cpu); |
3623 | if (ret < 0) { |
3624 | return ret; |
3625 | } |
3626 | ret = kvm_put_msrs(x86_cpu, level); |
3627 | if (ret < 0) { |
3628 | return ret; |
3629 | } |
3630 | ret = kvm_put_vcpu_events(x86_cpu, level); |
3631 | if (ret < 0) { |
3632 | return ret; |
3633 | } |
3634 | if (level >= KVM_PUT_RESET_STATE) { |
3635 | ret = kvm_put_mp_state(x86_cpu); |
3636 | if (ret < 0) { |
3637 | return ret; |
3638 | } |
3639 | } |
3640 | |
3641 | ret = kvm_put_tscdeadline_msr(x86_cpu); |
3642 | if (ret < 0) { |
3643 | return ret; |
3644 | } |
3645 | ret = kvm_put_debugregs(x86_cpu); |
3646 | if (ret < 0) { |
3647 | return ret; |
3648 | } |
3649 | /* must be last */ |
3650 | ret = kvm_guest_debug_workarounds(x86_cpu); |
3651 | if (ret < 0) { |
3652 | return ret; |
3653 | } |
3654 | return 0; |
3655 | } |
3656 | |
3657 | int kvm_arch_get_registers(CPUState *cs) |
3658 | { |
3659 | X86CPU *cpu = X86_CPU(cs); |
3660 | int ret; |
3661 | |
3662 | assert(cpu_is_stopped(cs) || qemu_cpu_is_self(cs)); |
3663 | |
3664 | ret = kvm_get_vcpu_events(cpu); |
3665 | if (ret < 0) { |
3666 | goto out; |
3667 | } |
3668 | /* |
3669 | * KVM_GET_MPSTATE can modify CS and RIP, call it before |
3670 | * KVM_GET_REGS and KVM_GET_SREGS. |
3671 | */ |
3672 | ret = kvm_get_mp_state(cpu); |
3673 | if (ret < 0) { |
3674 | goto out; |
3675 | } |
3676 | ret = kvm_getput_regs(cpu, 0); |
3677 | if (ret < 0) { |
3678 | goto out; |
3679 | } |
3680 | ret = kvm_get_xsave(cpu); |
3681 | if (ret < 0) { |
3682 | goto out; |
3683 | } |
3684 | ret = kvm_get_xcrs(cpu); |
3685 | if (ret < 0) { |
3686 | goto out; |
3687 | } |
3688 | ret = kvm_get_sregs(cpu); |
3689 | if (ret < 0) { |
3690 | goto out; |
3691 | } |
3692 | ret = kvm_get_msrs(cpu); |
3693 | if (ret < 0) { |
3694 | goto out; |
3695 | } |
3696 | ret = kvm_get_apic(cpu); |
3697 | if (ret < 0) { |
3698 | goto out; |
3699 | } |
3700 | ret = kvm_get_debugregs(cpu); |
3701 | if (ret < 0) { |
3702 | goto out; |
3703 | } |
3704 | ret = kvm_get_nested_state(cpu); |
3705 | if (ret < 0) { |
3706 | goto out; |
3707 | } |
3708 | ret = 0; |
3709 | out: |
3710 | cpu_sync_bndcs_hflags(&cpu->env); |
3711 | return ret; |
3712 | } |
3713 | |
3714 | void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) |
3715 | { |
3716 | X86CPU *x86_cpu = X86_CPU(cpu); |
3717 | CPUX86State *env = &x86_cpu->env; |
3718 | int ret; |
3719 | |
3720 | /* Inject NMI */ |
3721 | if (cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { |
3722 | if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { |
3723 | qemu_mutex_lock_iothread(); |
3724 | cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; |
3725 | qemu_mutex_unlock_iothread(); |
3726 | DPRINTF("injected NMI\n" ); |
3727 | ret = kvm_vcpu_ioctl(cpu, KVM_NMI); |
3728 | if (ret < 0) { |
3729 | fprintf(stderr, "KVM: injection failed, NMI lost (%s)\n" , |
3730 | strerror(-ret)); |
3731 | } |
3732 | } |
3733 | if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { |
3734 | qemu_mutex_lock_iothread(); |
3735 | cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; |
3736 | qemu_mutex_unlock_iothread(); |
3737 | DPRINTF("injected SMI\n" ); |
3738 | ret = kvm_vcpu_ioctl(cpu, KVM_SMI); |
3739 | if (ret < 0) { |
3740 | fprintf(stderr, "KVM: injection failed, SMI lost (%s)\n" , |
3741 | strerror(-ret)); |
3742 | } |
3743 | } |
3744 | } |
3745 | |
3746 | if (!kvm_pic_in_kernel()) { |
3747 | qemu_mutex_lock_iothread(); |
3748 | } |
3749 | |
3750 | /* Force the VCPU out of its inner loop to process any INIT requests |
3751 | * or (for userspace APIC, but it is cheap to combine the checks here) |
3752 | * pending TPR access reports. |
3753 | */ |
3754 | if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { |
3755 | if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && |
3756 | !(env->hflags & HF_SMM_MASK)) { |
3757 | cpu->exit_request = 1; |
3758 | } |
3759 | if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { |
3760 | cpu->exit_request = 1; |
3761 | } |
3762 | } |
3763 | |
3764 | if (!kvm_pic_in_kernel()) { |
3765 | /* Try to inject an interrupt if the guest can accept it */ |
3766 | if (run->ready_for_interrupt_injection && |
3767 | (cpu->interrupt_request & CPU_INTERRUPT_HARD) && |
3768 | (env->eflags & IF_MASK)) { |
3769 | int irq; |
3770 | |
3771 | cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; |
3772 | irq = cpu_get_pic_interrupt(env); |
3773 | if (irq >= 0) { |
3774 | struct kvm_interrupt intr; |
3775 | |
3776 | intr.irq = irq; |
3777 | DPRINTF("injected interrupt %d\n" , irq); |
3778 | ret = kvm_vcpu_ioctl(cpu, KVM_INTERRUPT, &intr); |
3779 | if (ret < 0) { |
3780 | fprintf(stderr, |
3781 | "KVM: injection failed, interrupt lost (%s)\n" , |
3782 | strerror(-ret)); |
3783 | } |
3784 | } |
3785 | } |
3786 | |
3787 | /* If we have an interrupt but the guest is not ready to receive an |
3788 | * interrupt, request an interrupt window exit. This will |
3789 | * cause a return to userspace as soon as the guest is ready to |
3790 | * receive interrupts. */ |
3791 | if ((cpu->interrupt_request & CPU_INTERRUPT_HARD)) { |
3792 | run->request_interrupt_window = 1; |
3793 | } else { |
3794 | run->request_interrupt_window = 0; |
3795 | } |
3796 | |
3797 | DPRINTF("setting tpr\n" ); |
3798 | run->cr8 = cpu_get_apic_tpr(x86_cpu->apic_state); |
3799 | |
3800 | qemu_mutex_unlock_iothread(); |
3801 | } |
3802 | } |
3803 | |
3804 | MemTxAttrs kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) |
3805 | { |
3806 | X86CPU *x86_cpu = X86_CPU(cpu); |
3807 | CPUX86State *env = &x86_cpu->env; |
3808 | |
3809 | if (run->flags & KVM_RUN_X86_SMM) { |
3810 | env->hflags |= HF_SMM_MASK; |
3811 | } else { |
3812 | env->hflags &= ~HF_SMM_MASK; |
3813 | } |
3814 | if (run->if_flag) { |
3815 | env->eflags |= IF_MASK; |
3816 | } else { |
3817 | env->eflags &= ~IF_MASK; |
3818 | } |
3819 | |
3820 | /* We need to protect the apic state against concurrent accesses from |
3821 | * different threads in case the userspace irqchip is used. */ |
3822 | if (!kvm_irqchip_in_kernel()) { |
3823 | qemu_mutex_lock_iothread(); |
3824 | } |
3825 | cpu_set_apic_tpr(x86_cpu->apic_state, run->cr8); |
3826 | cpu_set_apic_base(x86_cpu->apic_state, run->apic_base); |
3827 | if (!kvm_irqchip_in_kernel()) { |
3828 | qemu_mutex_unlock_iothread(); |
3829 | } |
3830 | return cpu_get_mem_attrs(env); |
3831 | } |
3832 | |
3833 | int kvm_arch_process_async_events(CPUState *cs) |
3834 | { |
3835 | X86CPU *cpu = X86_CPU(cs); |
3836 | CPUX86State *env = &cpu->env; |
3837 | |
3838 | if (cs->interrupt_request & CPU_INTERRUPT_MCE) { |
3839 | /* We must not raise CPU_INTERRUPT_MCE if it's not supported. */ |
3840 | assert(env->mcg_cap); |
3841 | |
3842 | cs->interrupt_request &= ~CPU_INTERRUPT_MCE; |
3843 | |
3844 | kvm_cpu_synchronize_state(cs); |
3845 | |
3846 | if (env->exception_nr == EXCP08_DBLE) { |
3847 | /* this means triple fault */ |
3848 | qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); |
3849 | cs->exit_request = 1; |
3850 | return 0; |
3851 | } |
3852 | kvm_queue_exception(env, EXCP12_MCHK, 0, 0); |
3853 | env->has_error_code = 0; |
3854 | |
3855 | cs->halted = 0; |
3856 | if (kvm_irqchip_in_kernel() && env->mp_state == KVM_MP_STATE_HALTED) { |
3857 | env->mp_state = KVM_MP_STATE_RUNNABLE; |
3858 | } |
3859 | } |
3860 | |
3861 | if ((cs->interrupt_request & CPU_INTERRUPT_INIT) && |
3862 | !(env->hflags & HF_SMM_MASK)) { |
3863 | kvm_cpu_synchronize_state(cs); |
3864 | do_cpu_init(cpu); |
3865 | } |
3866 | |
3867 | if (kvm_irqchip_in_kernel()) { |
3868 | return 0; |
3869 | } |
3870 | |
3871 | if (cs->interrupt_request & CPU_INTERRUPT_POLL) { |
3872 | cs->interrupt_request &= ~CPU_INTERRUPT_POLL; |
3873 | apic_poll_irq(cpu->apic_state); |
3874 | } |
3875 | if (((cs->interrupt_request & CPU_INTERRUPT_HARD) && |
3876 | (env->eflags & IF_MASK)) || |
3877 | (cs->interrupt_request & CPU_INTERRUPT_NMI)) { |
3878 | cs->halted = 0; |
3879 | } |
3880 | if (cs->interrupt_request & CPU_INTERRUPT_SIPI) { |
3881 | kvm_cpu_synchronize_state(cs); |
3882 | do_cpu_sipi(cpu); |
3883 | } |
3884 | if (cs->interrupt_request & CPU_INTERRUPT_TPR) { |
3885 | cs->interrupt_request &= ~CPU_INTERRUPT_TPR; |
3886 | kvm_cpu_synchronize_state(cs); |
3887 | apic_handle_tpr_access_report(cpu->apic_state, env->eip, |
3888 | env->tpr_access_type); |
3889 | } |
3890 | |
3891 | return cs->halted; |
3892 | } |
3893 | |
3894 | static int kvm_handle_halt(X86CPU *cpu) |
3895 | { |
3896 | CPUState *cs = CPU(cpu); |
3897 | CPUX86State *env = &cpu->env; |
3898 | |
3899 | if (!((cs->interrupt_request & CPU_INTERRUPT_HARD) && |
3900 | (env->eflags & IF_MASK)) && |
3901 | !(cs->interrupt_request & CPU_INTERRUPT_NMI)) { |
3902 | cs->halted = 1; |
3903 | return EXCP_HLT; |
3904 | } |
3905 | |
3906 | return 0; |
3907 | } |
3908 | |
3909 | static int kvm_handle_tpr_access(X86CPU *cpu) |
3910 | { |
3911 | CPUState *cs = CPU(cpu); |
3912 | struct kvm_run *run = cs->kvm_run; |
3913 | |
3914 | apic_handle_tpr_access_report(cpu->apic_state, run->tpr_access.rip, |
3915 | run->tpr_access.is_write ? TPR_ACCESS_WRITE |
3916 | : TPR_ACCESS_READ); |
3917 | return 1; |
3918 | } |
3919 | |
3920 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
3921 | { |
3922 | static const uint8_t int3 = 0xcc; |
3923 | |
3924 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 0) || |
3925 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&int3, 1, 1)) { |
3926 | return -EINVAL; |
3927 | } |
3928 | return 0; |
3929 | } |
3930 | |
3931 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
3932 | { |
3933 | uint8_t int3; |
3934 | |
3935 | if (cpu_memory_rw_debug(cs, bp->pc, &int3, 1, 0) || int3 != 0xcc || |
3936 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 1, 1)) { |
3937 | return -EINVAL; |
3938 | } |
3939 | return 0; |
3940 | } |
3941 | |
3942 | static struct { |
3943 | target_ulong addr; |
3944 | int len; |
3945 | int type; |
3946 | } hw_breakpoint[4]; |
3947 | |
3948 | static int nb_hw_breakpoint; |
3949 | |
3950 | static int find_hw_breakpoint(target_ulong addr, int len, int type) |
3951 | { |
3952 | int n; |
3953 | |
3954 | for (n = 0; n < nb_hw_breakpoint; n++) { |
3955 | if (hw_breakpoint[n].addr == addr && hw_breakpoint[n].type == type && |
3956 | (hw_breakpoint[n].len == len || len == -1)) { |
3957 | return n; |
3958 | } |
3959 | } |
3960 | return -1; |
3961 | } |
3962 | |
3963 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, |
3964 | target_ulong len, int type) |
3965 | { |
3966 | switch (type) { |
3967 | case GDB_BREAKPOINT_HW: |
3968 | len = 1; |
3969 | break; |
3970 | case GDB_WATCHPOINT_WRITE: |
3971 | case GDB_WATCHPOINT_ACCESS: |
3972 | switch (len) { |
3973 | case 1: |
3974 | break; |
3975 | case 2: |
3976 | case 4: |
3977 | case 8: |
3978 | if (addr & (len - 1)) { |
3979 | return -EINVAL; |
3980 | } |
3981 | break; |
3982 | default: |
3983 | return -EINVAL; |
3984 | } |
3985 | break; |
3986 | default: |
3987 | return -ENOSYS; |
3988 | } |
3989 | |
3990 | if (nb_hw_breakpoint == 4) { |
3991 | return -ENOBUFS; |
3992 | } |
3993 | if (find_hw_breakpoint(addr, len, type) >= 0) { |
3994 | return -EEXIST; |
3995 | } |
3996 | hw_breakpoint[nb_hw_breakpoint].addr = addr; |
3997 | hw_breakpoint[nb_hw_breakpoint].len = len; |
3998 | hw_breakpoint[nb_hw_breakpoint].type = type; |
3999 | nb_hw_breakpoint++; |
4000 | |
4001 | return 0; |
4002 | } |
4003 | |
4004 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, |
4005 | target_ulong len, int type) |
4006 | { |
4007 | int n; |
4008 | |
4009 | n = find_hw_breakpoint(addr, (type == GDB_BREAKPOINT_HW) ? 1 : len, type); |
4010 | if (n < 0) { |
4011 | return -ENOENT; |
4012 | } |
4013 | nb_hw_breakpoint--; |
4014 | hw_breakpoint[n] = hw_breakpoint[nb_hw_breakpoint]; |
4015 | |
4016 | return 0; |
4017 | } |
4018 | |
4019 | void kvm_arch_remove_all_hw_breakpoints(void) |
4020 | { |
4021 | nb_hw_breakpoint = 0; |
4022 | } |
4023 | |
4024 | static CPUWatchpoint hw_watchpoint; |
4025 | |
4026 | static int kvm_handle_debug(X86CPU *cpu, |
4027 | struct kvm_debug_exit_arch *arch_info) |
4028 | { |
4029 | CPUState *cs = CPU(cpu); |
4030 | CPUX86State *env = &cpu->env; |
4031 | int ret = 0; |
4032 | int n; |
4033 | |
4034 | if (arch_info->exception == EXCP01_DB) { |
4035 | if (arch_info->dr6 & DR6_BS) { |
4036 | if (cs->singlestep_enabled) { |
4037 | ret = EXCP_DEBUG; |
4038 | } |
4039 | } else { |
4040 | for (n = 0; n < 4; n++) { |
4041 | if (arch_info->dr6 & (1 << n)) { |
4042 | switch ((arch_info->dr7 >> (16 + n*4)) & 0x3) { |
4043 | case 0x0: |
4044 | ret = EXCP_DEBUG; |
4045 | break; |
4046 | case 0x1: |
4047 | ret = EXCP_DEBUG; |
4048 | cs->watchpoint_hit = &hw_watchpoint; |
4049 | hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
4050 | hw_watchpoint.flags = BP_MEM_WRITE; |
4051 | break; |
4052 | case 0x3: |
4053 | ret = EXCP_DEBUG; |
4054 | cs->watchpoint_hit = &hw_watchpoint; |
4055 | hw_watchpoint.vaddr = hw_breakpoint[n].addr; |
4056 | hw_watchpoint.flags = BP_MEM_ACCESS; |
4057 | break; |
4058 | } |
4059 | } |
4060 | } |
4061 | } |
4062 | } else if (kvm_find_sw_breakpoint(cs, arch_info->pc)) { |
4063 | ret = EXCP_DEBUG; |
4064 | } |
4065 | if (ret == 0) { |
4066 | cpu_synchronize_state(cs); |
4067 | assert(env->exception_nr == -1); |
4068 | |
4069 | /* pass to guest */ |
4070 | kvm_queue_exception(env, arch_info->exception, |
4071 | arch_info->exception == EXCP01_DB, |
4072 | arch_info->dr6); |
4073 | env->has_error_code = 0; |
4074 | } |
4075 | |
4076 | return ret; |
4077 | } |
4078 | |
4079 | void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) |
4080 | { |
4081 | const uint8_t type_code[] = { |
4082 | [GDB_BREAKPOINT_HW] = 0x0, |
4083 | [GDB_WATCHPOINT_WRITE] = 0x1, |
4084 | [GDB_WATCHPOINT_ACCESS] = 0x3 |
4085 | }; |
4086 | const uint8_t len_code[] = { |
4087 | [1] = 0x0, [2] = 0x1, [4] = 0x3, [8] = 0x2 |
4088 | }; |
4089 | int n; |
4090 | |
4091 | if (kvm_sw_breakpoints_active(cpu)) { |
4092 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP; |
4093 | } |
4094 | if (nb_hw_breakpoint > 0) { |
4095 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; |
4096 | dbg->arch.debugreg[7] = 0x0600; |
4097 | for (n = 0; n < nb_hw_breakpoint; n++) { |
4098 | dbg->arch.debugreg[n] = hw_breakpoint[n].addr; |
4099 | dbg->arch.debugreg[7] |= (2 << (n * 2)) | |
4100 | (type_code[hw_breakpoint[n].type] << (16 + n*4)) | |
4101 | ((uint32_t)len_code[hw_breakpoint[n].len] << (18 + n*4)); |
4102 | } |
4103 | } |
4104 | } |
4105 | |
4106 | static bool host_supports_vmx(void) |
4107 | { |
4108 | uint32_t ecx, unused; |
4109 | |
4110 | host_cpuid(1, 0, &unused, &unused, &ecx, &unused); |
4111 | return ecx & CPUID_EXT_VMX; |
4112 | } |
4113 | |
4114 | #define VMX_INVALID_GUEST_STATE 0x80000021 |
4115 | |
4116 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) |
4117 | { |
4118 | X86CPU *cpu = X86_CPU(cs); |
4119 | uint64_t code; |
4120 | int ret; |
4121 | |
4122 | switch (run->exit_reason) { |
4123 | case KVM_EXIT_HLT: |
4124 | DPRINTF("handle_hlt\n" ); |
4125 | qemu_mutex_lock_iothread(); |
4126 | ret = kvm_handle_halt(cpu); |
4127 | qemu_mutex_unlock_iothread(); |
4128 | break; |
4129 | case KVM_EXIT_SET_TPR: |
4130 | ret = 0; |
4131 | break; |
4132 | case KVM_EXIT_TPR_ACCESS: |
4133 | qemu_mutex_lock_iothread(); |
4134 | ret = kvm_handle_tpr_access(cpu); |
4135 | qemu_mutex_unlock_iothread(); |
4136 | break; |
4137 | case KVM_EXIT_FAIL_ENTRY: |
4138 | code = run->fail_entry.hardware_entry_failure_reason; |
4139 | fprintf(stderr, "KVM: entry failed, hardware error 0x%" PRIx64 "\n" , |
4140 | code); |
4141 | if (host_supports_vmx() && code == VMX_INVALID_GUEST_STATE) { |
4142 | fprintf(stderr, |
4143 | "\nIf you're running a guest on an Intel machine without " |
4144 | "unrestricted mode\n" |
4145 | "support, the failure can be most likely due to the guest " |
4146 | "entering an invalid\n" |
4147 | "state for Intel VT. For example, the guest maybe running " |
4148 | "in big real mode\n" |
4149 | "which is not supported on less recent Intel processors." |
4150 | "\n\n" ); |
4151 | } |
4152 | ret = -1; |
4153 | break; |
4154 | case KVM_EXIT_EXCEPTION: |
4155 | fprintf(stderr, "KVM: exception %d exit (error code 0x%x)\n" , |
4156 | run->ex.exception, run->ex.error_code); |
4157 | ret = -1; |
4158 | break; |
4159 | case KVM_EXIT_DEBUG: |
4160 | DPRINTF("kvm_exit_debug\n" ); |
4161 | qemu_mutex_lock_iothread(); |
4162 | ret = kvm_handle_debug(cpu, &run->debug.arch); |
4163 | qemu_mutex_unlock_iothread(); |
4164 | break; |
4165 | case KVM_EXIT_HYPERV: |
4166 | ret = kvm_hv_handle_exit(cpu, &run->hyperv); |
4167 | break; |
4168 | case KVM_EXIT_IOAPIC_EOI: |
4169 | ioapic_eoi_broadcast(run->eoi.vector); |
4170 | ret = 0; |
4171 | break; |
4172 | default: |
4173 | fprintf(stderr, "KVM: unknown exit reason %d\n" , run->exit_reason); |
4174 | ret = -1; |
4175 | break; |
4176 | } |
4177 | |
4178 | return ret; |
4179 | } |
4180 | |
4181 | bool kvm_arch_stop_on_emulation_error(CPUState *cs) |
4182 | { |
4183 | X86CPU *cpu = X86_CPU(cs); |
4184 | CPUX86State *env = &cpu->env; |
4185 | |
4186 | kvm_cpu_synchronize_state(cs); |
4187 | return !(env->cr[0] & CR0_PE_MASK) || |
4188 | ((env->segs[R_CS].selector & 3) != 3); |
4189 | } |
4190 | |
4191 | void kvm_arch_init_irq_routing(KVMState *s) |
4192 | { |
4193 | if (!kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { |
4194 | /* If kernel can't do irq routing, interrupt source |
4195 | * override 0->2 cannot be set up as required by HPET. |
4196 | * So we have to disable it. |
4197 | */ |
4198 | no_hpet = 1; |
4199 | } |
4200 | /* We know at this point that we're using the in-kernel |
4201 | * irqchip, so we can use irqfds, and on x86 we know |
4202 | * we can use msi via irqfd and GSI routing. |
4203 | */ |
4204 | kvm_msi_via_irqfd_allowed = true; |
4205 | kvm_gsi_routing_allowed = true; |
4206 | |
4207 | if (kvm_irqchip_is_split()) { |
4208 | int i; |
4209 | |
4210 | /* If the ioapic is in QEMU and the lapics are in KVM, reserve |
4211 | MSI routes for signaling interrupts to the local apics. */ |
4212 | for (i = 0; i < IOAPIC_NUM_PINS; i++) { |
4213 | if (kvm_irqchip_add_msi_route(s, 0, NULL) < 0) { |
4214 | error_report("Could not enable split IRQ mode." ); |
4215 | exit(1); |
4216 | } |
4217 | } |
4218 | } |
4219 | } |
4220 | |
4221 | int kvm_arch_irqchip_create(MachineState *ms, KVMState *s) |
4222 | { |
4223 | int ret; |
4224 | if (machine_kernel_irqchip_split(ms)) { |
4225 | ret = kvm_vm_enable_cap(s, KVM_CAP_SPLIT_IRQCHIP, 0, 24); |
4226 | if (ret) { |
4227 | error_report("Could not enable split irqchip mode: %s" , |
4228 | strerror(-ret)); |
4229 | exit(1); |
4230 | } else { |
4231 | DPRINTF("Enabled KVM_CAP_SPLIT_IRQCHIP\n" ); |
4232 | kvm_split_irqchip = true; |
4233 | return 1; |
4234 | } |
4235 | } else { |
4236 | return 0; |
4237 | } |
4238 | } |
4239 | |
4240 | /* Classic KVM device assignment interface. Will remain x86 only. */ |
4241 | int kvm_device_pci_assign(KVMState *s, PCIHostDeviceAddress *dev_addr, |
4242 | uint32_t flags, uint32_t *dev_id) |
4243 | { |
4244 | struct kvm_assigned_pci_dev dev_data = { |
4245 | .segnr = dev_addr->domain, |
4246 | .busnr = dev_addr->bus, |
4247 | .devfn = PCI_DEVFN(dev_addr->slot, dev_addr->function), |
4248 | .flags = flags, |
4249 | }; |
4250 | int ret; |
4251 | |
4252 | dev_data.assigned_dev_id = |
4253 | (dev_addr->domain << 16) | (dev_addr->bus << 8) | dev_data.devfn; |
4254 | |
4255 | ret = kvm_vm_ioctl(s, KVM_ASSIGN_PCI_DEVICE, &dev_data); |
4256 | if (ret < 0) { |
4257 | return ret; |
4258 | } |
4259 | |
4260 | *dev_id = dev_data.assigned_dev_id; |
4261 | |
4262 | return 0; |
4263 | } |
4264 | |
4265 | int kvm_device_pci_deassign(KVMState *s, uint32_t dev_id) |
4266 | { |
4267 | struct kvm_assigned_pci_dev dev_data = { |
4268 | .assigned_dev_id = dev_id, |
4269 | }; |
4270 | |
4271 | return kvm_vm_ioctl(s, KVM_DEASSIGN_PCI_DEVICE, &dev_data); |
4272 | } |
4273 | |
4274 | static int kvm_assign_irq_internal(KVMState *s, uint32_t dev_id, |
4275 | uint32_t irq_type, uint32_t guest_irq) |
4276 | { |
4277 | struct kvm_assigned_irq assigned_irq = { |
4278 | .assigned_dev_id = dev_id, |
4279 | .guest_irq = guest_irq, |
4280 | .flags = irq_type, |
4281 | }; |
4282 | |
4283 | if (kvm_check_extension(s, KVM_CAP_ASSIGN_DEV_IRQ)) { |
4284 | return kvm_vm_ioctl(s, KVM_ASSIGN_DEV_IRQ, &assigned_irq); |
4285 | } else { |
4286 | return kvm_vm_ioctl(s, KVM_ASSIGN_IRQ, &assigned_irq); |
4287 | } |
4288 | } |
4289 | |
4290 | int kvm_device_intx_assign(KVMState *s, uint32_t dev_id, bool use_host_msi, |
4291 | uint32_t guest_irq) |
4292 | { |
4293 | uint32_t irq_type = KVM_DEV_IRQ_GUEST_INTX | |
4294 | (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX); |
4295 | |
4296 | return kvm_assign_irq_internal(s, dev_id, irq_type, guest_irq); |
4297 | } |
4298 | |
4299 | int kvm_device_intx_set_mask(KVMState *s, uint32_t dev_id, bool masked) |
4300 | { |
4301 | struct kvm_assigned_pci_dev dev_data = { |
4302 | .assigned_dev_id = dev_id, |
4303 | .flags = masked ? KVM_DEV_ASSIGN_MASK_INTX : 0, |
4304 | }; |
4305 | |
4306 | return kvm_vm_ioctl(s, KVM_ASSIGN_SET_INTX_MASK, &dev_data); |
4307 | } |
4308 | |
4309 | static int kvm_deassign_irq_internal(KVMState *s, uint32_t dev_id, |
4310 | uint32_t type) |
4311 | { |
4312 | struct kvm_assigned_irq assigned_irq = { |
4313 | .assigned_dev_id = dev_id, |
4314 | .flags = type, |
4315 | }; |
4316 | |
4317 | return kvm_vm_ioctl(s, KVM_DEASSIGN_DEV_IRQ, &assigned_irq); |
4318 | } |
4319 | |
4320 | int kvm_device_intx_deassign(KVMState *s, uint32_t dev_id, bool use_host_msi) |
4321 | { |
4322 | return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_INTX | |
4323 | (use_host_msi ? KVM_DEV_IRQ_HOST_MSI : KVM_DEV_IRQ_HOST_INTX)); |
4324 | } |
4325 | |
4326 | int kvm_device_msi_assign(KVMState *s, uint32_t dev_id, int virq) |
4327 | { |
4328 | return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSI | |
4329 | KVM_DEV_IRQ_GUEST_MSI, virq); |
4330 | } |
4331 | |
4332 | int kvm_device_msi_deassign(KVMState *s, uint32_t dev_id) |
4333 | { |
4334 | return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSI | |
4335 | KVM_DEV_IRQ_HOST_MSI); |
4336 | } |
4337 | |
4338 | bool kvm_device_msix_supported(KVMState *s) |
4339 | { |
4340 | /* The kernel lacks a corresponding KVM_CAP, so we probe by calling |
4341 | * KVM_ASSIGN_SET_MSIX_NR with an invalid parameter. */ |
4342 | return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, NULL) == -EFAULT; |
4343 | } |
4344 | |
4345 | int kvm_device_msix_init_vectors(KVMState *s, uint32_t dev_id, |
4346 | uint32_t nr_vectors) |
4347 | { |
4348 | struct kvm_assigned_msix_nr msix_nr = { |
4349 | .assigned_dev_id = dev_id, |
4350 | .entry_nr = nr_vectors, |
4351 | }; |
4352 | |
4353 | return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_NR, &msix_nr); |
4354 | } |
4355 | |
4356 | int kvm_device_msix_set_vector(KVMState *s, uint32_t dev_id, uint32_t vector, |
4357 | int virq) |
4358 | { |
4359 | struct kvm_assigned_msix_entry msix_entry = { |
4360 | .assigned_dev_id = dev_id, |
4361 | .gsi = virq, |
4362 | .entry = vector, |
4363 | }; |
4364 | |
4365 | return kvm_vm_ioctl(s, KVM_ASSIGN_SET_MSIX_ENTRY, &msix_entry); |
4366 | } |
4367 | |
4368 | int kvm_device_msix_assign(KVMState *s, uint32_t dev_id) |
4369 | { |
4370 | return kvm_assign_irq_internal(s, dev_id, KVM_DEV_IRQ_HOST_MSIX | |
4371 | KVM_DEV_IRQ_GUEST_MSIX, 0); |
4372 | } |
4373 | |
4374 | int kvm_device_msix_deassign(KVMState *s, uint32_t dev_id) |
4375 | { |
4376 | return kvm_deassign_irq_internal(s, dev_id, KVM_DEV_IRQ_GUEST_MSIX | |
4377 | KVM_DEV_IRQ_HOST_MSIX); |
4378 | } |
4379 | |
4380 | int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route, |
4381 | uint64_t address, uint32_t data, PCIDevice *dev) |
4382 | { |
4383 | X86IOMMUState *iommu = x86_iommu_get_default(); |
4384 | |
4385 | if (iommu) { |
4386 | int ret; |
4387 | MSIMessage src, dst; |
4388 | X86IOMMUClass *class = X86_IOMMU_GET_CLASS(iommu); |
4389 | |
4390 | if (!class->int_remap) { |
4391 | return 0; |
4392 | } |
4393 | |
4394 | src.address = route->u.msi.address_hi; |
4395 | src.address <<= VTD_MSI_ADDR_HI_SHIFT; |
4396 | src.address |= route->u.msi.address_lo; |
4397 | src.data = route->u.msi.data; |
4398 | |
4399 | ret = class->int_remap(iommu, &src, &dst, dev ? \ |
4400 | pci_requester_id(dev) : \ |
4401 | X86_IOMMU_SID_INVALID); |
4402 | if (ret) { |
4403 | trace_kvm_x86_fixup_msi_error(route->gsi); |
4404 | return 1; |
4405 | } |
4406 | |
4407 | route->u.msi.address_hi = dst.address >> VTD_MSI_ADDR_HI_SHIFT; |
4408 | route->u.msi.address_lo = dst.address & VTD_MSI_ADDR_LO_MASK; |
4409 | route->u.msi.data = dst.data; |
4410 | } |
4411 | |
4412 | return 0; |
4413 | } |
4414 | |
4415 | typedef struct MSIRouteEntry MSIRouteEntry; |
4416 | |
4417 | struct MSIRouteEntry { |
4418 | PCIDevice *dev; /* Device pointer */ |
4419 | int vector; /* MSI/MSIX vector index */ |
4420 | int virq; /* Virtual IRQ index */ |
4421 | QLIST_ENTRY(MSIRouteEntry) list; |
4422 | }; |
4423 | |
4424 | /* List of used GSI routes */ |
4425 | static QLIST_HEAD(, MSIRouteEntry) msi_route_list = \ |
4426 | QLIST_HEAD_INITIALIZER(msi_route_list); |
4427 | |
4428 | static void kvm_update_msi_routes_all(void *private, bool global, |
4429 | uint32_t index, uint32_t mask) |
4430 | { |
4431 | int cnt = 0, vector; |
4432 | MSIRouteEntry *entry; |
4433 | MSIMessage msg; |
4434 | PCIDevice *dev; |
4435 | |
4436 | /* TODO: explicit route update */ |
4437 | QLIST_FOREACH(entry, &msi_route_list, list) { |
4438 | cnt++; |
4439 | vector = entry->vector; |
4440 | dev = entry->dev; |
4441 | if (msix_enabled(dev) && !msix_is_masked(dev, vector)) { |
4442 | msg = msix_get_message(dev, vector); |
4443 | } else if (msi_enabled(dev) && !msi_is_masked(dev, vector)) { |
4444 | msg = msi_get_message(dev, vector); |
4445 | } else { |
4446 | /* |
4447 | * Either MSI/MSIX is disabled for the device, or the |
4448 | * specific message was masked out. Skip this one. |
4449 | */ |
4450 | continue; |
4451 | } |
4452 | kvm_irqchip_update_msi_route(kvm_state, entry->virq, msg, dev); |
4453 | } |
4454 | kvm_irqchip_commit_routes(kvm_state); |
4455 | trace_kvm_x86_update_msi_routes(cnt); |
4456 | } |
4457 | |
4458 | int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route, |
4459 | int vector, PCIDevice *dev) |
4460 | { |
4461 | static bool notify_list_inited = false; |
4462 | MSIRouteEntry *entry; |
4463 | |
4464 | if (!dev) { |
4465 | /* These are (possibly) IOAPIC routes only used for split |
4466 | * kernel irqchip mode, while what we are housekeeping are |
4467 | * PCI devices only. */ |
4468 | return 0; |
4469 | } |
4470 | |
4471 | entry = g_new0(MSIRouteEntry, 1); |
4472 | entry->dev = dev; |
4473 | entry->vector = vector; |
4474 | entry->virq = route->gsi; |
4475 | QLIST_INSERT_HEAD(&msi_route_list, entry, list); |
4476 | |
4477 | trace_kvm_x86_add_msi_route(route->gsi); |
4478 | |
4479 | if (!notify_list_inited) { |
4480 | /* For the first time we do add route, add ourselves into |
4481 | * IOMMU's IEC notify list if needed. */ |
4482 | X86IOMMUState *iommu = x86_iommu_get_default(); |
4483 | if (iommu) { |
4484 | x86_iommu_iec_register_notifier(iommu, |
4485 | kvm_update_msi_routes_all, |
4486 | NULL); |
4487 | } |
4488 | notify_list_inited = true; |
4489 | } |
4490 | return 0; |
4491 | } |
4492 | |
4493 | int kvm_arch_release_virq_post(int virq) |
4494 | { |
4495 | MSIRouteEntry *entry, *next; |
4496 | QLIST_FOREACH_SAFE(entry, &msi_route_list, list, next) { |
4497 | if (entry->virq == virq) { |
4498 | trace_kvm_x86_remove_msi_route(virq); |
4499 | QLIST_REMOVE(entry, list); |
4500 | g_free(entry); |
4501 | break; |
4502 | } |
4503 | } |
4504 | return 0; |
4505 | } |
4506 | |
4507 | int kvm_arch_msi_data_to_gsi(uint32_t data) |
4508 | { |
4509 | abort(); |
4510 | } |
4511 | |