1 | /* |
2 | * QEMU KVM Hyper-V support |
3 | * |
4 | * Copyright (C) 2015 Andrey Smetanin <asmetanin@virtuozzo.com> |
5 | * |
6 | * Authors: |
7 | * Andrey Smetanin <asmetanin@virtuozzo.com> |
8 | * |
9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
10 | * See the COPYING file in the top-level directory. |
11 | * |
12 | */ |
13 | |
14 | #include "qemu/osdep.h" |
15 | #include "qemu/main-loop.h" |
16 | #include "hyperv.h" |
17 | #include "hw/hyperv/hyperv.h" |
18 | #include "hyperv-proto.h" |
19 | |
20 | int hyperv_x86_synic_add(X86CPU *cpu) |
21 | { |
22 | hyperv_synic_add(CPU(cpu)); |
23 | return 0; |
24 | } |
25 | |
26 | void hyperv_x86_synic_reset(X86CPU *cpu) |
27 | { |
28 | hyperv_synic_reset(CPU(cpu)); |
29 | } |
30 | |
31 | void hyperv_x86_synic_update(X86CPU *cpu) |
32 | { |
33 | CPUX86State *env = &cpu->env; |
34 | bool enable = env->msr_hv_synic_control & HV_SYNIC_ENABLE; |
35 | hwaddr msg_page_addr = (env->msr_hv_synic_msg_page & HV_SIMP_ENABLE) ? |
36 | (env->msr_hv_synic_msg_page & TARGET_PAGE_MASK) : 0; |
37 | hwaddr event_page_addr = (env->msr_hv_synic_evt_page & HV_SIEFP_ENABLE) ? |
38 | (env->msr_hv_synic_evt_page & TARGET_PAGE_MASK) : 0; |
39 | hyperv_synic_update(CPU(cpu), enable, msg_page_addr, event_page_addr); |
40 | } |
41 | |
42 | static void async_synic_update(CPUState *cs, run_on_cpu_data data) |
43 | { |
44 | qemu_mutex_lock_iothread(); |
45 | hyperv_x86_synic_update(X86_CPU(cs)); |
46 | qemu_mutex_unlock_iothread(); |
47 | } |
48 | |
49 | int kvm_hv_handle_exit(X86CPU *cpu, struct kvm_hyperv_exit *exit) |
50 | { |
51 | CPUX86State *env = &cpu->env; |
52 | |
53 | switch (exit->type) { |
54 | case KVM_EXIT_HYPERV_SYNIC: |
55 | if (!hyperv_feat_enabled(cpu, HYPERV_FEAT_SYNIC)) { |
56 | return -1; |
57 | } |
58 | |
59 | switch (exit->u.synic.msr) { |
60 | case HV_X64_MSR_SCONTROL: |
61 | env->msr_hv_synic_control = exit->u.synic.control; |
62 | break; |
63 | case HV_X64_MSR_SIMP: |
64 | env->msr_hv_synic_msg_page = exit->u.synic.msg_page; |
65 | break; |
66 | case HV_X64_MSR_SIEFP: |
67 | env->msr_hv_synic_evt_page = exit->u.synic.evt_page; |
68 | break; |
69 | default: |
70 | return -1; |
71 | } |
72 | |
73 | /* |
74 | * this will run in this cpu thread before it returns to KVM, but in a |
75 | * safe environment (i.e. when all cpus are quiescent) -- this is |
76 | * necessary because memory hierarchy is being changed |
77 | */ |
78 | async_safe_run_on_cpu(CPU(cpu), async_synic_update, RUN_ON_CPU_NULL); |
79 | |
80 | return 0; |
81 | case KVM_EXIT_HYPERV_HCALL: { |
82 | uint16_t code = exit->u.hcall.input & 0xffff; |
83 | bool fast = exit->u.hcall.input & HV_HYPERCALL_FAST; |
84 | uint64_t param = exit->u.hcall.params[0]; |
85 | |
86 | switch (code) { |
87 | case HV_POST_MESSAGE: |
88 | exit->u.hcall.result = hyperv_hcall_post_message(param, fast); |
89 | break; |
90 | case HV_SIGNAL_EVENT: |
91 | exit->u.hcall.result = hyperv_hcall_signal_event(param, fast); |
92 | break; |
93 | default: |
94 | exit->u.hcall.result = HV_STATUS_INVALID_HYPERCALL_CODE; |
95 | } |
96 | return 0; |
97 | } |
98 | default: |
99 | return -1; |
100 | } |
101 | } |
102 | |