]>
Commit | Line | Data |
---|---|---|
c97d6d2c SAGDR |
1 | /* |
2 | * Copyright (C) 2016 Veertu Inc, | |
3 | * Copyright (C) 2017 Google Inc, | |
4 | * Based on Veertu vddh/vmm/vmx.h | |
5 | * | |
6 | * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
996feed4 SAGDR |
9 | * modify it under the terms of the GNU Lesser General Public |
10 | * License as published by the Free Software Foundation; either | |
11 | * version 2 of the License, or (at your option) any later version. | |
c97d6d2c SAGDR |
12 | * |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
996feed4 SAGDR |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * Lesser General Public License for more details. | |
c97d6d2c | 17 | * |
996feed4 SAGDR |
18 | * You should have received a copy of the GNU Lesser General Public |
19 | * License along with this program; if not, see <http://www.gnu.org/licenses/>. | |
d781e24d IE |
20 | * |
21 | * This file contain code under public domain from the hvdos project: | |
22 | * https://github.com/mist64/hvdos | |
c97d6d2c SAGDR |
23 | */ |
24 | ||
25 | #ifndef VMX_H | |
26 | #define VMX_H | |
27 | ||
c97d6d2c SAGDR |
28 | #include <Hypervisor/hv.h> |
29 | #include <Hypervisor/hv_vmx.h> | |
30 | #include "vmcs.h" | |
31 | #include "cpu.h" | |
32 | #include "x86.h" | |
33 | ||
34 | #include "exec/address-spaces.h" | |
35 | ||
36 | static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg) | |
37 | { | |
38 | uint64_t v; | |
39 | ||
40 | if (hv_vcpu_read_register(vcpu, reg, &v)) { | |
41 | abort(); | |
42 | } | |
43 | ||
44 | return v; | |
45 | } | |
46 | ||
47 | /* write GPR */ | |
48 | static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v) | |
49 | { | |
50 | if (hv_vcpu_write_register(vcpu, reg, v)) { | |
51 | abort(); | |
52 | } | |
53 | } | |
54 | ||
55 | /* read VMCS field */ | |
56 | static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field) | |
57 | { | |
58 | uint64_t v; | |
59 | ||
60 | hv_vmx_vcpu_read_vmcs(vcpu, field, &v); | |
61 | ||
62 | return v; | |
63 | } | |
64 | ||
65 | /* write VMCS field */ | |
66 | static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v) | |
67 | { | |
68 | hv_vmx_vcpu_write_vmcs(vcpu, field, v); | |
69 | } | |
70 | ||
71 | /* desired control word constrained by hardware/hypervisor capabilities */ | |
72 | static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl) | |
73 | { | |
74 | return (ctrl | (cap & 0xffffffff)) & (cap >> 32); | |
75 | } | |
76 | ||
77 | #define VM_ENTRY_GUEST_LMA (1LL << 9) | |
78 | ||
79 | #define AR_TYPE_ACCESSES_MASK 1 | |
80 | #define AR_TYPE_READABLE_MASK (1 << 1) | |
81 | #define AR_TYPE_WRITEABLE_MASK (1 << 2) | |
82 | #define AR_TYPE_CODE_MASK (1 << 3) | |
83 | #define AR_TYPE_MASK 0x0f | |
84 | #define AR_TYPE_BUSY_64_TSS 11 | |
85 | #define AR_TYPE_BUSY_32_TSS 11 | |
86 | #define AR_TYPE_BUSY_16_TSS 3 | |
87 | #define AR_TYPE_LDT 2 | |
88 | ||
89 | static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer) | |
90 | { | |
91 | uint64_t entry_ctls; | |
92 | ||
6701d81d | 93 | efer |= MSR_EFER_LMA; |
c97d6d2c SAGDR |
94 | wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); |
95 | entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS); | |
96 | wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) | | |
97 | VM_ENTRY_GUEST_LMA); | |
98 | ||
99 | uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS); | |
6701d81d | 100 | if ((efer & MSR_EFER_LME) && |
c97d6d2c SAGDR |
101 | (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) { |
102 | wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS, | |
103 | (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS); | |
104 | } | |
105 | } | |
106 | ||
107 | static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer) | |
108 | { | |
109 | uint64_t entry_ctls; | |
110 | ||
111 | entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS); | |
112 | wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA); | |
113 | ||
6701d81d | 114 | efer &= ~MSR_EFER_LMA; |
c97d6d2c SAGDR |
115 | wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer); |
116 | } | |
117 | ||
118 | static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0) | |
119 | { | |
120 | int i; | |
121 | uint64_t pdpte[4] = {0, 0, 0, 0}; | |
122 | uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER); | |
123 | uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0); | |
e37aa8b0 | 124 | uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET; |
c97d6d2c SAGDR |
125 | |
126 | if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) && | |
6701d81d | 127 | !(efer & MSR_EFER_LME)) { |
19f70347 PM |
128 | address_space_read(&address_space_memory, |
129 | rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f, | |
130 | MEMTXATTRS_UNSPECIFIED, pdpte, 32); | |
e37aa8b0 CE |
131 | /* Only set PDPTE when appropriate. */ |
132 | for (i = 0; i < 4; i++) { | |
133 | wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]); | |
134 | } | |
c97d6d2c SAGDR |
135 | } |
136 | ||
e37aa8b0 | 137 | wvmcs(vcpu, VMCS_CR0_MASK, mask); |
c97d6d2c SAGDR |
138 | wvmcs(vcpu, VMCS_CR0_SHADOW, cr0); |
139 | ||
6701d81d | 140 | if (efer & MSR_EFER_LME) { |
c97d6d2c SAGDR |
141 | if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) { |
142 | enter_long_mode(vcpu, cr0, efer); | |
143 | } | |
144 | if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) { | |
145 | exit_long_mode(vcpu, cr0, efer); | |
146 | } | |
147 | } | |
148 | ||
e37aa8b0 CE |
149 | /* Filter new CR0 after we are finished examining it above. */ |
150 | cr0 = (cr0 & ~(mask & ~CR0_PG)); | |
151 | wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET); | |
152 | ||
c97d6d2c SAGDR |
153 | hv_vcpu_invalidate_tlb(vcpu); |
154 | hv_vcpu_flush(vcpu); | |
155 | } | |
156 | ||
157 | static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4) | |
158 | { | |
159 | uint64_t guest_cr4 = cr4 | CR4_VMXE; | |
160 | ||
161 | wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4); | |
162 | wvmcs(vcpu, VMCS_CR4_SHADOW, cr4); | |
163 | ||
164 | hv_vcpu_invalidate_tlb(vcpu); | |
165 | hv_vcpu_flush(vcpu); | |
166 | } | |
167 | ||
168 | static inline void macvm_set_rip(CPUState *cpu, uint64_t rip) | |
169 | { | |
ddd31732 RB |
170 | X86CPU *x86_cpu = X86_CPU(cpu); |
171 | CPUX86State *env = &x86_cpu->env; | |
c97d6d2c SAGDR |
172 | uint64_t val; |
173 | ||
174 | /* BUG, should take considering overlap.. */ | |
175 | wreg(cpu->hvf_fd, HV_X86_RIP, rip); | |
176 | ||
177 | /* after moving forward in rip, we need to clean INTERRUPTABILITY */ | |
178 | val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); | |
179 | if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING | | |
180 | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) { | |
ddd31732 | 181 | env->hflags &= ~HF_INHIBIT_IRQ_MASK; |
c97d6d2c SAGDR |
182 | wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, |
183 | val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING | | |
184 | VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)); | |
185 | } | |
186 | } | |
187 | ||
188 | static inline void vmx_clear_nmi_blocking(CPUState *cpu) | |
189 | { | |
b7394c83 SAGDR |
190 | X86CPU *x86_cpu = X86_CPU(cpu); |
191 | CPUX86State *env = &x86_cpu->env; | |
192 | ||
193 | env->hflags2 &= ~HF2_NMI_MASK; | |
c97d6d2c SAGDR |
194 | uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); |
195 | gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING; | |
196 | wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi); | |
197 | } | |
198 | ||
199 | static inline void vmx_set_nmi_blocking(CPUState *cpu) | |
200 | { | |
b7394c83 SAGDR |
201 | X86CPU *x86_cpu = X86_CPU(cpu); |
202 | CPUX86State *env = &x86_cpu->env; | |
203 | ||
204 | env->hflags2 |= HF2_NMI_MASK; | |
c97d6d2c SAGDR |
205 | uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY); |
206 | gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING; | |
207 | wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi); | |
208 | } | |
209 | ||
210 | static inline void vmx_set_nmi_window_exiting(CPUState *cpu) | |
211 | { | |
212 | uint64_t val; | |
213 | val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); | |
214 | wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | | |
215 | VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); | |
216 | ||
217 | } | |
218 | ||
219 | static inline void vmx_clear_nmi_window_exiting(CPUState *cpu) | |
220 | { | |
221 | ||
222 | uint64_t val; | |
223 | val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); | |
224 | wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & | |
225 | ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING); | |
226 | } | |
227 | ||
228 | #endif |