2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 * Based on Veertu vddh/vmm/vmx.h
6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
28 #include <Hypervisor/hv.h>
29 #include <Hypervisor/hv_vmx.h>
34 #include "exec/address-spaces.h"
36 static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
40 if (hv_vcpu_read_register(vcpu, reg, &v)) {
48 static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
50 if (hv_vcpu_write_register(vcpu, reg, v)) {
56 static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
60 hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
65 /* write VMCS field */
66 static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
68 hv_vmx_vcpu_write_vmcs(vcpu, field, v);
71 /* desired control word constrained by hardware/hypervisor capabilities */
72 static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
74 return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
77 #define VM_ENTRY_GUEST_LMA (1LL << 9)
79 #define AR_TYPE_ACCESSES_MASK 1
80 #define AR_TYPE_READABLE_MASK (1 << 1)
81 #define AR_TYPE_WRITEABLE_MASK (1 << 2)
82 #define AR_TYPE_CODE_MASK (1 << 3)
83 #define AR_TYPE_MASK 0x0f
84 #define AR_TYPE_BUSY_64_TSS 11
85 #define AR_TYPE_BUSY_32_TSS 11
86 #define AR_TYPE_BUSY_16_TSS 3
89 static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
94 wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
95 entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
96 wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
99 uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
100 if ((efer & MSR_EFER_LME) &&
101 (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
102 wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
103 (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
107 static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
111 entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
112 wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
114 efer &= ~MSR_EFER_LMA;
115 wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
118 static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
121 uint64_t pdpte[4] = {0, 0, 0, 0};
122 uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
123 uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
124 uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
126 if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
127 !(efer & MSR_EFER_LME)) {
128 address_space_rw(&address_space_memory,
129 rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
130 MEMTXATTRS_UNSPECIFIED,
131 (uint8_t *)pdpte, 32, 0);
132 /* Only set PDPTE when appropriate. */
133 for (i = 0; i < 4; i++) {
134 wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
138 wvmcs(vcpu, VMCS_CR0_MASK, mask);
139 wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
141 if (efer & MSR_EFER_LME) {
142 if (!(old_cr0 & CR0_PG) && (cr0 & CR0_PG)) {
143 enter_long_mode(vcpu, cr0, efer);
145 if (/*(old_cr0 & CR0_PG) &&*/ !(cr0 & CR0_PG)) {
146 exit_long_mode(vcpu, cr0, efer);
150 /* Filter new CR0 after we are finished examining it above. */
151 cr0 = (cr0 & ~(mask & ~CR0_PG));
152 wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
154 hv_vcpu_invalidate_tlb(vcpu);
158 static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
160 uint64_t guest_cr4 = cr4 | CR4_VMXE;
162 wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
163 wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
165 hv_vcpu_invalidate_tlb(vcpu);
169 static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
173 /* BUG, should take considering overlap.. */
174 wreg(cpu->hvf_fd, HV_X86_RIP, rip);
176 /* after moving forward in rip, we need to clean INTERRUPTABILITY */
177 val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
178 if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
179 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
180 wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
181 val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
182 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
186 static inline void vmx_clear_nmi_blocking(CPUState *cpu)
188 X86CPU *x86_cpu = X86_CPU(cpu);
189 CPUX86State *env = &x86_cpu->env;
191 env->hflags2 &= ~HF2_NMI_MASK;
192 uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
193 gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
194 wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
197 static inline void vmx_set_nmi_blocking(CPUState *cpu)
199 X86CPU *x86_cpu = X86_CPU(cpu);
200 CPUX86State *env = &x86_cpu->env;
202 env->hflags2 |= HF2_NMI_MASK;
203 uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
204 gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
205 wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
208 static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
211 val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
212 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
213 VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
217 static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
221 val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
222 wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
223 ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);