]>
Commit | Line | Data |
---|---|---|
c97d6d2c SAGDR |
1 | /* |
2 | * Copyright (c) 2003-2008 Fabrice Bellard | |
3 | * Copyright (C) 2016 Veertu Inc, | |
4 | * Copyright (C) 2017 Google Inc, | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or | |
996feed4 SAGDR |
7 | * modify it under the terms of the GNU Lesser General Public |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
c97d6d2c SAGDR |
10 | * |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
996feed4 SAGDR |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
14 | * Lesser General Public License for more details. | |
c97d6d2c | 15 | * |
996feed4 SAGDR |
16 | * You should have received a copy of the GNU Lesser General Public |
17 | * License along with this program; if not, see <http://www.gnu.org/licenses/>. | |
c97d6d2c SAGDR |
18 | */ |
19 | ||
20 | #include "qemu/osdep.h" | |
c97d6d2c | 21 | |
f9fea777 | 22 | #include "qemu-common.h" |
c97d6d2c SAGDR |
23 | #include "x86hvf.h" |
24 | #include "vmx.h" | |
25 | #include "vmcs.h" | |
26 | #include "cpu.h" | |
27 | #include "x86_descr.h" | |
28 | #include "x86_decode.h" | |
29 | ||
30 | #include "hw/i386/apic_internal.h" | |
31 | ||
c97d6d2c SAGDR |
32 | #include <Hypervisor/hv.h> |
33 | #include <Hypervisor/hv_vmx.h> | |
c97d6d2c SAGDR |
34 | |
35 | void hvf_set_segment(struct CPUState *cpu, struct vmx_segment *vmx_seg, | |
36 | SegmentCache *qseg, bool is_tr) | |
37 | { | |
38 | vmx_seg->sel = qseg->selector; | |
39 | vmx_seg->base = qseg->base; | |
40 | vmx_seg->limit = qseg->limit; | |
41 | ||
42 | if (!qseg->selector && !x86_is_real(cpu) && !is_tr) { | |
43 | /* the TR register is usable after processor reset despite | |
44 | * having a null selector */ | |
45 | vmx_seg->ar = 1 << 16; | |
46 | return; | |
47 | } | |
48 | vmx_seg->ar = (qseg->flags >> DESC_TYPE_SHIFT) & 0xf; | |
49 | vmx_seg->ar |= ((qseg->flags >> DESC_G_SHIFT) & 1) << 15; | |
50 | vmx_seg->ar |= ((qseg->flags >> DESC_B_SHIFT) & 1) << 14; | |
51 | vmx_seg->ar |= ((qseg->flags >> DESC_L_SHIFT) & 1) << 13; | |
52 | vmx_seg->ar |= ((qseg->flags >> DESC_AVL_SHIFT) & 1) << 12; | |
53 | vmx_seg->ar |= ((qseg->flags >> DESC_P_SHIFT) & 1) << 7; | |
54 | vmx_seg->ar |= ((qseg->flags >> DESC_DPL_SHIFT) & 3) << 5; | |
55 | vmx_seg->ar |= ((qseg->flags >> DESC_S_SHIFT) & 1) << 4; | |
56 | } | |
57 | ||
58 | void hvf_get_segment(SegmentCache *qseg, struct vmx_segment *vmx_seg) | |
59 | { | |
60 | qseg->limit = vmx_seg->limit; | |
61 | qseg->base = vmx_seg->base; | |
62 | qseg->selector = vmx_seg->sel; | |
63 | qseg->flags = ((vmx_seg->ar & 0xf) << DESC_TYPE_SHIFT) | | |
64 | (((vmx_seg->ar >> 4) & 1) << DESC_S_SHIFT) | | |
65 | (((vmx_seg->ar >> 5) & 3) << DESC_DPL_SHIFT) | | |
66 | (((vmx_seg->ar >> 7) & 1) << DESC_P_SHIFT) | | |
67 | (((vmx_seg->ar >> 12) & 1) << DESC_AVL_SHIFT) | | |
68 | (((vmx_seg->ar >> 13) & 1) << DESC_L_SHIFT) | | |
69 | (((vmx_seg->ar >> 14) & 1) << DESC_B_SHIFT) | | |
70 | (((vmx_seg->ar >> 15) & 1) << DESC_G_SHIFT); | |
71 | } | |
72 | ||
73 | void hvf_put_xsave(CPUState *cpu_state) | |
74 | { | |
75 | ||
f585195e SAGDR |
76 | struct X86XSaveArea *xsave; |
77 | ||
5b8063c4 | 78 | xsave = X86_CPU(cpu_state)->env.xsave_buf; |
f585195e SAGDR |
79 | |
80 | x86_cpu_xsave_all_areas(X86_CPU(cpu_state), xsave); | |
81 | ||
82 | if (hv_vcpu_write_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { | |
c97d6d2c SAGDR |
83 | abort(); |
84 | } | |
85 | } | |
86 | ||
87 | void hvf_put_segments(CPUState *cpu_state) | |
88 | { | |
89 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
90 | struct vmx_segment seg; | |
91 | ||
92 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT, env->idt.limit); | |
93 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE, env->idt.base); | |
94 | ||
95 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT, env->gdt.limit); | |
96 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE, env->gdt.base); | |
97 | ||
98 | /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */ | |
99 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3, env->cr[3]); | |
100 | vmx_update_tpr(cpu_state); | |
101 | wvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER, env->efer); | |
102 | ||
103 | macvm_set_cr4(cpu_state->hvf_fd, env->cr[4]); | |
104 | macvm_set_cr0(cpu_state->hvf_fd, env->cr[0]); | |
105 | ||
106 | hvf_set_segment(cpu_state, &seg, &env->segs[R_CS], false); | |
6701d81d | 107 | vmx_write_segment_descriptor(cpu_state, &seg, R_CS); |
c97d6d2c SAGDR |
108 | |
109 | hvf_set_segment(cpu_state, &seg, &env->segs[R_DS], false); | |
6701d81d | 110 | vmx_write_segment_descriptor(cpu_state, &seg, R_DS); |
c97d6d2c SAGDR |
111 | |
112 | hvf_set_segment(cpu_state, &seg, &env->segs[R_ES], false); | |
6701d81d | 113 | vmx_write_segment_descriptor(cpu_state, &seg, R_ES); |
c97d6d2c SAGDR |
114 | |
115 | hvf_set_segment(cpu_state, &seg, &env->segs[R_SS], false); | |
6701d81d | 116 | vmx_write_segment_descriptor(cpu_state, &seg, R_SS); |
c97d6d2c SAGDR |
117 | |
118 | hvf_set_segment(cpu_state, &seg, &env->segs[R_FS], false); | |
6701d81d | 119 | vmx_write_segment_descriptor(cpu_state, &seg, R_FS); |
c97d6d2c SAGDR |
120 | |
121 | hvf_set_segment(cpu_state, &seg, &env->segs[R_GS], false); | |
6701d81d | 122 | vmx_write_segment_descriptor(cpu_state, &seg, R_GS); |
c97d6d2c SAGDR |
123 | |
124 | hvf_set_segment(cpu_state, &seg, &env->tr, true); | |
6701d81d | 125 | vmx_write_segment_descriptor(cpu_state, &seg, R_TR); |
c97d6d2c SAGDR |
126 | |
127 | hvf_set_segment(cpu_state, &seg, &env->ldt, false); | |
6701d81d | 128 | vmx_write_segment_descriptor(cpu_state, &seg, R_LDTR); |
c97d6d2c SAGDR |
129 | |
130 | hv_vcpu_flush(cpu_state->hvf_fd); | |
131 | } | |
132 | ||
133 | void hvf_put_msrs(CPUState *cpu_state) | |
134 | { | |
135 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
136 | ||
137 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, | |
138 | env->sysenter_cs); | |
139 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, | |
140 | env->sysenter_esp); | |
141 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, | |
142 | env->sysenter_eip); | |
143 | ||
144 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_STAR, env->star); | |
145 | ||
146 | #ifdef TARGET_X86_64 | |
147 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_CSTAR, env->cstar); | |
148 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, env->kernelgsbase); | |
149 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FMASK, env->fmask); | |
150 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_LSTAR, env->lstar); | |
151 | #endif | |
152 | ||
153 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_GSBASE, env->segs[R_GS].base); | |
154 | hv_vcpu_write_msr(cpu_state->hvf_fd, MSR_FSBASE, env->segs[R_FS].base); | |
c97d6d2c SAGDR |
155 | } |
156 | ||
157 | ||
158 | void hvf_get_xsave(CPUState *cpu_state) | |
159 | { | |
f585195e SAGDR |
160 | struct X86XSaveArea *xsave; |
161 | ||
5b8063c4 | 162 | xsave = X86_CPU(cpu_state)->env.xsave_buf; |
f585195e SAGDR |
163 | |
164 | if (hv_vcpu_read_fpstate(cpu_state->hvf_fd, (void*)xsave, 4096)) { | |
c97d6d2c SAGDR |
165 | abort(); |
166 | } | |
167 | ||
f585195e | 168 | x86_cpu_xrstor_all_areas(X86_CPU(cpu_state), xsave); |
c97d6d2c SAGDR |
169 | } |
170 | ||
171 | void hvf_get_segments(CPUState *cpu_state) | |
172 | { | |
173 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
174 | ||
175 | struct vmx_segment seg; | |
176 | ||
177 | env->interrupt_injected = -1; | |
178 | ||
6701d81d | 179 | vmx_read_segment_descriptor(cpu_state, &seg, R_CS); |
c97d6d2c SAGDR |
180 | hvf_get_segment(&env->segs[R_CS], &seg); |
181 | ||
6701d81d | 182 | vmx_read_segment_descriptor(cpu_state, &seg, R_DS); |
c97d6d2c SAGDR |
183 | hvf_get_segment(&env->segs[R_DS], &seg); |
184 | ||
6701d81d | 185 | vmx_read_segment_descriptor(cpu_state, &seg, R_ES); |
c97d6d2c SAGDR |
186 | hvf_get_segment(&env->segs[R_ES], &seg); |
187 | ||
6701d81d | 188 | vmx_read_segment_descriptor(cpu_state, &seg, R_FS); |
c97d6d2c SAGDR |
189 | hvf_get_segment(&env->segs[R_FS], &seg); |
190 | ||
6701d81d | 191 | vmx_read_segment_descriptor(cpu_state, &seg, R_GS); |
c97d6d2c SAGDR |
192 | hvf_get_segment(&env->segs[R_GS], &seg); |
193 | ||
6701d81d | 194 | vmx_read_segment_descriptor(cpu_state, &seg, R_SS); |
c97d6d2c SAGDR |
195 | hvf_get_segment(&env->segs[R_SS], &seg); |
196 | ||
6701d81d | 197 | vmx_read_segment_descriptor(cpu_state, &seg, R_TR); |
c97d6d2c SAGDR |
198 | hvf_get_segment(&env->tr, &seg); |
199 | ||
6701d81d | 200 | vmx_read_segment_descriptor(cpu_state, &seg, R_LDTR); |
c97d6d2c SAGDR |
201 | hvf_get_segment(&env->ldt, &seg); |
202 | ||
203 | env->idt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_LIMIT); | |
204 | env->idt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IDTR_BASE); | |
205 | env->gdt.limit = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_LIMIT); | |
206 | env->gdt.base = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_GDTR_BASE); | |
207 | ||
208 | env->cr[0] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR0); | |
209 | env->cr[2] = 0; | |
210 | env->cr[3] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR3); | |
211 | env->cr[4] = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR4); | |
212 | ||
213 | env->efer = rvmcs(cpu_state->hvf_fd, VMCS_GUEST_IA32_EFER); | |
214 | } | |
215 | ||
216 | void hvf_get_msrs(CPUState *cpu_state) | |
217 | { | |
218 | CPUX86State *env = &X86_CPU(cpu_state)->env; | |
219 | uint64_t tmp; | |
220 | ||
221 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_CS, &tmp); | |
222 | env->sysenter_cs = tmp; | |
223 | ||
224 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_ESP, &tmp); | |
225 | env->sysenter_esp = tmp; | |
226 | ||
227 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_SYSENTER_EIP, &tmp); | |
228 | env->sysenter_eip = tmp; | |
229 | ||
230 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_STAR, &env->star); | |
231 | ||
232 | #ifdef TARGET_X86_64 | |
233 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_CSTAR, &env->cstar); | |
234 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_KERNELGSBASE, &env->kernelgsbase); | |
235 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_FMASK, &env->fmask); | |
236 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_LSTAR, &env->lstar); | |
237 | #endif | |
238 | ||
239 | hv_vcpu_read_msr(cpu_state->hvf_fd, MSR_IA32_APICBASE, &tmp); | |
240 | ||
241 | env->tsc = rdtscp() + rvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET); | |
242 | } | |
243 | ||
244 | int hvf_put_registers(CPUState *cpu_state) | |
245 | { | |
246 | X86CPU *x86cpu = X86_CPU(cpu_state); | |
247 | CPUX86State *env = &x86cpu->env; | |
248 | ||
249 | wreg(cpu_state->hvf_fd, HV_X86_RAX, env->regs[R_EAX]); | |
250 | wreg(cpu_state->hvf_fd, HV_X86_RBX, env->regs[R_EBX]); | |
251 | wreg(cpu_state->hvf_fd, HV_X86_RCX, env->regs[R_ECX]); | |
252 | wreg(cpu_state->hvf_fd, HV_X86_RDX, env->regs[R_EDX]); | |
253 | wreg(cpu_state->hvf_fd, HV_X86_RBP, env->regs[R_EBP]); | |
254 | wreg(cpu_state->hvf_fd, HV_X86_RSP, env->regs[R_ESP]); | |
255 | wreg(cpu_state->hvf_fd, HV_X86_RSI, env->regs[R_ESI]); | |
256 | wreg(cpu_state->hvf_fd, HV_X86_RDI, env->regs[R_EDI]); | |
257 | wreg(cpu_state->hvf_fd, HV_X86_R8, env->regs[8]); | |
258 | wreg(cpu_state->hvf_fd, HV_X86_R9, env->regs[9]); | |
259 | wreg(cpu_state->hvf_fd, HV_X86_R10, env->regs[10]); | |
260 | wreg(cpu_state->hvf_fd, HV_X86_R11, env->regs[11]); | |
261 | wreg(cpu_state->hvf_fd, HV_X86_R12, env->regs[12]); | |
262 | wreg(cpu_state->hvf_fd, HV_X86_R13, env->regs[13]); | |
263 | wreg(cpu_state->hvf_fd, HV_X86_R14, env->regs[14]); | |
264 | wreg(cpu_state->hvf_fd, HV_X86_R15, env->regs[15]); | |
265 | wreg(cpu_state->hvf_fd, HV_X86_RFLAGS, env->eflags); | |
266 | wreg(cpu_state->hvf_fd, HV_X86_RIP, env->eip); | |
267 | ||
268 | wreg(cpu_state->hvf_fd, HV_X86_XCR0, env->xcr0); | |
269 | ||
270 | hvf_put_xsave(cpu_state); | |
271 | ||
272 | hvf_put_segments(cpu_state); | |
273 | ||
274 | hvf_put_msrs(cpu_state); | |
275 | ||
276 | wreg(cpu_state->hvf_fd, HV_X86_DR0, env->dr[0]); | |
277 | wreg(cpu_state->hvf_fd, HV_X86_DR1, env->dr[1]); | |
278 | wreg(cpu_state->hvf_fd, HV_X86_DR2, env->dr[2]); | |
279 | wreg(cpu_state->hvf_fd, HV_X86_DR3, env->dr[3]); | |
280 | wreg(cpu_state->hvf_fd, HV_X86_DR4, env->dr[4]); | |
281 | wreg(cpu_state->hvf_fd, HV_X86_DR5, env->dr[5]); | |
282 | wreg(cpu_state->hvf_fd, HV_X86_DR6, env->dr[6]); | |
283 | wreg(cpu_state->hvf_fd, HV_X86_DR7, env->dr[7]); | |
284 | ||
285 | return 0; | |
286 | } | |
287 | ||
288 | int hvf_get_registers(CPUState *cpu_state) | |
289 | { | |
290 | X86CPU *x86cpu = X86_CPU(cpu_state); | |
291 | CPUX86State *env = &x86cpu->env; | |
292 | ||
c97d6d2c SAGDR |
293 | env->regs[R_EAX] = rreg(cpu_state->hvf_fd, HV_X86_RAX); |
294 | env->regs[R_EBX] = rreg(cpu_state->hvf_fd, HV_X86_RBX); | |
295 | env->regs[R_ECX] = rreg(cpu_state->hvf_fd, HV_X86_RCX); | |
296 | env->regs[R_EDX] = rreg(cpu_state->hvf_fd, HV_X86_RDX); | |
297 | env->regs[R_EBP] = rreg(cpu_state->hvf_fd, HV_X86_RBP); | |
298 | env->regs[R_ESP] = rreg(cpu_state->hvf_fd, HV_X86_RSP); | |
299 | env->regs[R_ESI] = rreg(cpu_state->hvf_fd, HV_X86_RSI); | |
300 | env->regs[R_EDI] = rreg(cpu_state->hvf_fd, HV_X86_RDI); | |
301 | env->regs[8] = rreg(cpu_state->hvf_fd, HV_X86_R8); | |
302 | env->regs[9] = rreg(cpu_state->hvf_fd, HV_X86_R9); | |
303 | env->regs[10] = rreg(cpu_state->hvf_fd, HV_X86_R10); | |
304 | env->regs[11] = rreg(cpu_state->hvf_fd, HV_X86_R11); | |
305 | env->regs[12] = rreg(cpu_state->hvf_fd, HV_X86_R12); | |
306 | env->regs[13] = rreg(cpu_state->hvf_fd, HV_X86_R13); | |
307 | env->regs[14] = rreg(cpu_state->hvf_fd, HV_X86_R14); | |
308 | env->regs[15] = rreg(cpu_state->hvf_fd, HV_X86_R15); | |
309 | ||
310 | env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); | |
311 | env->eip = rreg(cpu_state->hvf_fd, HV_X86_RIP); | |
312 | ||
313 | hvf_get_xsave(cpu_state); | |
314 | env->xcr0 = rreg(cpu_state->hvf_fd, HV_X86_XCR0); | |
315 | ||
316 | hvf_get_segments(cpu_state); | |
317 | hvf_get_msrs(cpu_state); | |
318 | ||
319 | env->dr[0] = rreg(cpu_state->hvf_fd, HV_X86_DR0); | |
320 | env->dr[1] = rreg(cpu_state->hvf_fd, HV_X86_DR1); | |
321 | env->dr[2] = rreg(cpu_state->hvf_fd, HV_X86_DR2); | |
322 | env->dr[3] = rreg(cpu_state->hvf_fd, HV_X86_DR3); | |
323 | env->dr[4] = rreg(cpu_state->hvf_fd, HV_X86_DR4); | |
324 | env->dr[5] = rreg(cpu_state->hvf_fd, HV_X86_DR5); | |
325 | env->dr[6] = rreg(cpu_state->hvf_fd, HV_X86_DR6); | |
326 | env->dr[7] = rreg(cpu_state->hvf_fd, HV_X86_DR7); | |
327 | ||
809092f3 | 328 | x86_update_hflags(env); |
c97d6d2c SAGDR |
329 | return 0; |
330 | } | |
331 | ||
332 | static void vmx_set_int_window_exiting(CPUState *cpu) | |
333 | { | |
334 | uint64_t val; | |
335 | val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); | |
336 | wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val | | |
337 | VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); | |
338 | } | |
339 | ||
340 | void vmx_clear_int_window_exiting(CPUState *cpu) | |
341 | { | |
342 | uint64_t val; | |
343 | val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS); | |
344 | wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val & | |
345 | ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING); | |
346 | } | |
347 | ||
c97d6d2c SAGDR |
348 | bool hvf_inject_interrupts(CPUState *cpu_state) |
349 | { | |
c97d6d2c SAGDR |
350 | X86CPU *x86cpu = X86_CPU(cpu_state); |
351 | CPUX86State *env = &x86cpu->env; | |
352 | ||
b7394c83 SAGDR |
353 | uint8_t vector; |
354 | uint64_t intr_type; | |
355 | bool have_event = true; | |
356 | if (env->interrupt_injected != -1) { | |
357 | vector = env->interrupt_injected; | |
64bef038 CE |
358 | if (env->ins_len) { |
359 | intr_type = VMCS_INTR_T_SWINTR; | |
360 | } else { | |
361 | intr_type = VMCS_INTR_T_HWINTR; | |
362 | } | |
fd13f23b LA |
363 | } else if (env->exception_nr != -1) { |
364 | vector = env->exception_nr; | |
b7394c83 SAGDR |
365 | if (vector == EXCP03_INT3 || vector == EXCP04_INTO) { |
366 | intr_type = VMCS_INTR_T_SWEXCEPTION; | |
367 | } else { | |
368 | intr_type = VMCS_INTR_T_HWEXCEPTION; | |
369 | } | |
370 | } else if (env->nmi_injected) { | |
64bef038 | 371 | vector = EXCP02_NMI; |
b7394c83 SAGDR |
372 | intr_type = VMCS_INTR_T_NMI; |
373 | } else { | |
374 | have_event = false; | |
375 | } | |
376 | ||
c97d6d2c | 377 | uint64_t info = 0; |
b7394c83 SAGDR |
378 | if (have_event) { |
379 | info = vector | intr_type | VMCS_INTR_VALID; | |
c97d6d2c | 380 | uint64_t reason = rvmcs(cpu_state->hvf_fd, VMCS_EXIT_REASON); |
b7394c83 | 381 | if (env->nmi_injected && reason != EXIT_REASON_TASK_SWITCH) { |
c97d6d2c SAGDR |
382 | vmx_clear_nmi_blocking(cpu_state); |
383 | } | |
b7394c83 SAGDR |
384 | |
385 | if (!(env->hflags2 & HF2_NMI_MASK) || intr_type != VMCS_INTR_T_NMI) { | |
c97d6d2c SAGDR |
386 | info &= ~(1 << 12); /* clear undefined bit */ |
387 | if (intr_type == VMCS_INTR_T_SWINTR || | |
c97d6d2c | 388 | intr_type == VMCS_INTR_T_SWEXCEPTION) { |
b7394c83 | 389 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INST_LENGTH, env->ins_len); |
c97d6d2c SAGDR |
390 | } |
391 | ||
b7394c83 SAGDR |
392 | if (env->has_error_code) { |
393 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_EXCEPTION_ERROR, | |
394 | env->error_code); | |
64bef038 CE |
395 | /* Indicate that VMCS_ENTRY_EXCEPTION_ERROR is valid */ |
396 | info |= VMCS_INTR_DEL_ERRCODE; | |
c97d6d2c SAGDR |
397 | } |
398 | /*printf("reinject %lx err %d\n", info, err);*/ | |
399 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); | |
400 | }; | |
401 | } | |
402 | ||
403 | if (cpu_state->interrupt_request & CPU_INTERRUPT_NMI) { | |
b7394c83 | 404 | if (!(env->hflags2 & HF2_NMI_MASK) && !(info & VMCS_INTR_VALID)) { |
c97d6d2c | 405 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_NMI; |
64bef038 | 406 | info = VMCS_INTR_VALID | VMCS_INTR_T_NMI | EXCP02_NMI; |
c97d6d2c SAGDR |
407 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, info); |
408 | } else { | |
409 | vmx_set_nmi_window_exiting(cpu_state); | |
410 | } | |
411 | } | |
412 | ||
b7394c83 | 413 | if (!(env->hflags & HF_INHIBIT_IRQ_MASK) && |
c97d6d2c | 414 | (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && |
967f4da2 | 415 | (env->eflags & IF_MASK) && !(info & VMCS_INTR_VALID)) { |
c97d6d2c SAGDR |
416 | int line = cpu_get_pic_interrupt(&x86cpu->env); |
417 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_HARD; | |
418 | if (line >= 0) { | |
419 | wvmcs(cpu_state->hvf_fd, VMCS_ENTRY_INTR_INFO, line | | |
420 | VMCS_INTR_VALID | VMCS_INTR_T_HWINTR); | |
421 | } | |
422 | } | |
423 | if (cpu_state->interrupt_request & CPU_INTERRUPT_HARD) { | |
424 | vmx_set_int_window_exiting(cpu_state); | |
425 | } | |
b7394c83 SAGDR |
426 | return (cpu_state->interrupt_request |
427 | & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)); | |
c97d6d2c SAGDR |
428 | } |
429 | ||
430 | int hvf_process_events(CPUState *cpu_state) | |
431 | { | |
432 | X86CPU *cpu = X86_CPU(cpu_state); | |
433 | CPUX86State *env = &cpu->env; | |
434 | ||
967f4da2 | 435 | env->eflags = rreg(cpu_state->hvf_fd, HV_X86_RFLAGS); |
c97d6d2c SAGDR |
436 | |
437 | if (cpu_state->interrupt_request & CPU_INTERRUPT_INIT) { | |
438 | hvf_cpu_synchronize_state(cpu_state); | |
439 | do_cpu_init(cpu); | |
440 | } | |
441 | ||
442 | if (cpu_state->interrupt_request & CPU_INTERRUPT_POLL) { | |
443 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_POLL; | |
444 | apic_poll_irq(cpu->apic_state); | |
445 | } | |
446 | if (((cpu_state->interrupt_request & CPU_INTERRUPT_HARD) && | |
967f4da2 | 447 | (env->eflags & IF_MASK)) || |
c97d6d2c SAGDR |
448 | (cpu_state->interrupt_request & CPU_INTERRUPT_NMI)) { |
449 | cpu_state->halted = 0; | |
450 | } | |
451 | if (cpu_state->interrupt_request & CPU_INTERRUPT_SIPI) { | |
452 | hvf_cpu_synchronize_state(cpu_state); | |
453 | do_cpu_sipi(cpu); | |
454 | } | |
455 | if (cpu_state->interrupt_request & CPU_INTERRUPT_TPR) { | |
456 | cpu_state->interrupt_request &= ~CPU_INTERRUPT_TPR; | |
457 | hvf_cpu_synchronize_state(cpu_state); | |
458 | apic_handle_tpr_access_report(cpu->apic_state, env->eip, | |
459 | env->tpr_access_type); | |
460 | } | |
461 | return cpu_state->halted; | |
462 | } |