]>
Commit | Line | Data |
---|---|---|
6bada5e8 BS |
1 | /* |
2 | * x86 SVM helpers | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "cpu.h" | |
022c62cb | 21 | #include "exec/cpu-all.h" |
6bada5e8 BS |
22 | #include "helper.h" |
23 | ||
92fc4b58 | 24 | #if !defined(CONFIG_USER_ONLY) |
022c62cb | 25 | #include "exec/softmmu_exec.h" |
92fc4b58 BS |
26 | #endif /* !defined(CONFIG_USER_ONLY) */ |
27 | ||
6bada5e8 BS |
28 | /* Secure Virtual Machine helpers */ |
29 | ||
30 | #if defined(CONFIG_USER_ONLY) | |
31 | ||
052e80d5 | 32 | void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) |
6bada5e8 BS |
33 | { |
34 | } | |
35 | ||
052e80d5 | 36 | void helper_vmmcall(CPUX86State *env) |
6bada5e8 BS |
37 | { |
38 | } | |
39 | ||
052e80d5 | 40 | void helper_vmload(CPUX86State *env, int aflag) |
6bada5e8 BS |
41 | { |
42 | } | |
43 | ||
052e80d5 | 44 | void helper_vmsave(CPUX86State *env, int aflag) |
6bada5e8 BS |
45 | { |
46 | } | |
47 | ||
052e80d5 | 48 | void helper_stgi(CPUX86State *env) |
6bada5e8 BS |
49 | { |
50 | } | |
51 | ||
052e80d5 | 52 | void helper_clgi(CPUX86State *env) |
6bada5e8 BS |
53 | { |
54 | } | |
55 | ||
052e80d5 | 56 | void helper_skinit(CPUX86State *env) |
6bada5e8 BS |
57 | { |
58 | } | |
59 | ||
052e80d5 | 60 | void helper_invlpga(CPUX86State *env, int aflag) |
6bada5e8 BS |
61 | { |
62 | } | |
63 | ||
052e80d5 | 64 | void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) |
6bada5e8 BS |
65 | { |
66 | } | |
67 | ||
68 | void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1) | |
69 | { | |
70 | } | |
71 | ||
052e80d5 BS |
72 | void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, |
73 | uint64_t param) | |
6bada5e8 BS |
74 | { |
75 | } | |
76 | ||
77 | void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, | |
78 | uint64_t param) | |
79 | { | |
80 | } | |
81 | ||
052e80d5 | 82 | void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, |
6bada5e8 BS |
83 | uint32_t next_eip_addend) |
84 | { | |
85 | } | |
86 | #else | |
87 | ||
a8170e5e | 88 | static inline void svm_save_seg(CPUX86State *env, hwaddr addr, |
6bada5e8 BS |
89 | const SegmentCache *sc) |
90 | { | |
91 | stw_phys(addr + offsetof(struct vmcb_seg, selector), | |
92 | sc->selector); | |
93 | stq_phys(addr + offsetof(struct vmcb_seg, base), | |
94 | sc->base); | |
95 | stl_phys(addr + offsetof(struct vmcb_seg, limit), | |
96 | sc->limit); | |
97 | stw_phys(addr + offsetof(struct vmcb_seg, attrib), | |
98 | ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); | |
99 | } | |
100 | ||
a8170e5e | 101 | static inline void svm_load_seg(CPUX86State *env, hwaddr addr, |
052e80d5 | 102 | SegmentCache *sc) |
6bada5e8 BS |
103 | { |
104 | unsigned int flags; | |
105 | ||
106 | sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector)); | |
107 | sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base)); | |
108 | sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit)); | |
109 | flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib)); | |
110 | sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); | |
111 | } | |
112 | ||
a8170e5e | 113 | static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr, |
052e80d5 | 114 | int seg_reg) |
6bada5e8 BS |
115 | { |
116 | SegmentCache sc1, *sc = &sc1; | |
117 | ||
052e80d5 | 118 | svm_load_seg(env, addr, sc); |
6bada5e8 BS |
119 | cpu_x86_load_seg_cache(env, seg_reg, sc->selector, |
120 | sc->base, sc->limit, sc->flags); | |
121 | } | |
122 | ||
052e80d5 | 123 | void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) |
6bada5e8 BS |
124 | { |
125 | target_ulong addr; | |
126 | uint32_t event_inj; | |
127 | uint32_t int_ctl; | |
128 | ||
052e80d5 | 129 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0); |
6bada5e8 BS |
130 | |
131 | if (aflag == 2) { | |
4b34e3ad | 132 | addr = env->regs[R_EAX]; |
6bada5e8 | 133 | } else { |
4b34e3ad | 134 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
135 | } |
136 | ||
137 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); | |
138 | ||
139 | env->vm_vmcb = addr; | |
140 | ||
141 | /* save the current CPU state in the hsave page */ | |
142 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), | |
143 | env->gdt.base); | |
144 | stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), | |
145 | env->gdt.limit); | |
146 | ||
147 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), | |
148 | env->idt.base); | |
149 | stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), | |
150 | env->idt.limit); | |
151 | ||
152 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); | |
153 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); | |
154 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); | |
155 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); | |
156 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); | |
157 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); | |
158 | ||
159 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); | |
160 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), | |
161 | cpu_compute_eflags(env)); | |
162 | ||
052e80d5 | 163 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es), |
6bada5e8 | 164 | &env->segs[R_ES]); |
052e80d5 | 165 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs), |
6bada5e8 | 166 | &env->segs[R_CS]); |
052e80d5 | 167 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss), |
6bada5e8 | 168 | &env->segs[R_SS]); |
052e80d5 | 169 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds), |
6bada5e8 BS |
170 | &env->segs[R_DS]); |
171 | ||
172 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), | |
a78d0eab | 173 | env->eip + next_eip_addend); |
08b3ded6 | 174 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); |
4b34e3ad | 175 | stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); |
6bada5e8 BS |
176 | |
177 | /* load the interception bitmaps so we do not need to access the | |
178 | vmcb in svm mode */ | |
179 | env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
180 | control.intercept)); | |
181 | env->intercept_cr_read = lduw_phys(env->vm_vmcb + | |
182 | offsetof(struct vmcb, | |
183 | control.intercept_cr_read)); | |
184 | env->intercept_cr_write = lduw_phys(env->vm_vmcb + | |
185 | offsetof(struct vmcb, | |
186 | control.intercept_cr_write)); | |
187 | env->intercept_dr_read = lduw_phys(env->vm_vmcb + | |
188 | offsetof(struct vmcb, | |
189 | control.intercept_dr_read)); | |
190 | env->intercept_dr_write = lduw_phys(env->vm_vmcb + | |
191 | offsetof(struct vmcb, | |
192 | control.intercept_dr_write)); | |
193 | env->intercept_exceptions = ldl_phys(env->vm_vmcb + | |
194 | offsetof(struct vmcb, | |
195 | control.intercept_exceptions | |
196 | )); | |
197 | ||
198 | /* enable intercepts */ | |
199 | env->hflags |= HF_SVMI_MASK; | |
200 | ||
201 | env->tsc_offset = ldq_phys(env->vm_vmcb + | |
202 | offsetof(struct vmcb, control.tsc_offset)); | |
203 | ||
204 | env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
205 | save.gdtr.base)); | |
206 | env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, | |
207 | save.gdtr.limit)); | |
208 | ||
209 | env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
210 | save.idtr.base)); | |
211 | env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, | |
212 | save.idtr.limit)); | |
213 | ||
214 | /* clear exit_info_2 so we behave like the real hardware */ | |
215 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); | |
216 | ||
217 | cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
218 | save.cr0))); | |
219 | cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
220 | save.cr4))); | |
221 | cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
222 | save.cr3))); | |
223 | env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2)); | |
224 | int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); | |
225 | env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); | |
226 | if (int_ctl & V_INTR_MASKING_MASK) { | |
227 | env->v_tpr = int_ctl & V_TPR_MASK; | |
228 | env->hflags2 |= HF2_VINTR_MASK; | |
229 | if (env->eflags & IF_MASK) { | |
230 | env->hflags2 |= HF2_HIF_MASK; | |
231 | } | |
232 | } | |
233 | ||
234 | cpu_load_efer(env, | |
235 | ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer))); | |
236 | env->eflags = 0; | |
237 | cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
238 | save.rflags)), | |
239 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); | |
240 | CC_OP = CC_OP_EFLAGS; | |
241 | ||
052e80d5 BS |
242 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es), |
243 | R_ES); | |
244 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), | |
245 | R_CS); | |
246 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), | |
247 | R_SS); | |
248 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), | |
249 | R_DS); | |
6bada5e8 | 250 | |
a78d0eab | 251 | env->eip = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip)); |
0bc60a8a | 252 | |
08b3ded6 | 253 | env->regs[R_ESP] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp)); |
4b34e3ad | 254 | env->regs[R_EAX] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax)); |
6bada5e8 BS |
255 | env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7)); |
256 | env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6)); | |
257 | cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, | |
258 | save.cpl))); | |
259 | ||
260 | /* FIXME: guest state consistency checks */ | |
261 | ||
262 | switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { | |
263 | case TLB_CONTROL_DO_NOTHING: | |
264 | break; | |
265 | case TLB_CONTROL_FLUSH_ALL_ASID: | |
266 | /* FIXME: this is not 100% correct but should work for now */ | |
267 | tlb_flush(env, 1); | |
268 | break; | |
269 | } | |
270 | ||
271 | env->hflags2 |= HF2_GIF_MASK; | |
272 | ||
273 | if (int_ctl & V_IRQ_MASK) { | |
259186a7 AF |
274 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
275 | ||
276 | cs->interrupt_request |= CPU_INTERRUPT_VIRQ; | |
6bada5e8 BS |
277 | } |
278 | ||
279 | /* maybe we need to inject an event */ | |
280 | event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, | |
281 | control.event_inj)); | |
282 | if (event_inj & SVM_EVTINJ_VALID) { | |
283 | uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; | |
284 | uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; | |
285 | uint32_t event_inj_err = ldl_phys(env->vm_vmcb + | |
286 | offsetof(struct vmcb, | |
287 | control.event_inj_err)); | |
288 | ||
289 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); | |
290 | /* FIXME: need to implement valid_err */ | |
291 | switch (event_inj & SVM_EVTINJ_TYPE_MASK) { | |
292 | case SVM_EVTINJ_TYPE_INTR: | |
293 | env->exception_index = vector; | |
294 | env->error_code = event_inj_err; | |
295 | env->exception_is_int = 0; | |
296 | env->exception_next_eip = -1; | |
297 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); | |
298 | /* XXX: is it always correct? */ | |
299 | do_interrupt_x86_hardirq(env, vector, 1); | |
300 | break; | |
301 | case SVM_EVTINJ_TYPE_NMI: | |
302 | env->exception_index = EXCP02_NMI; | |
303 | env->error_code = event_inj_err; | |
304 | env->exception_is_int = 0; | |
a78d0eab | 305 | env->exception_next_eip = env->eip; |
6bada5e8 BS |
306 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); |
307 | cpu_loop_exit(env); | |
308 | break; | |
309 | case SVM_EVTINJ_TYPE_EXEPT: | |
310 | env->exception_index = vector; | |
311 | env->error_code = event_inj_err; | |
312 | env->exception_is_int = 0; | |
313 | env->exception_next_eip = -1; | |
314 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); | |
315 | cpu_loop_exit(env); | |
316 | break; | |
317 | case SVM_EVTINJ_TYPE_SOFT: | |
318 | env->exception_index = vector; | |
319 | env->error_code = event_inj_err; | |
320 | env->exception_is_int = 1; | |
a78d0eab | 321 | env->exception_next_eip = env->eip; |
6bada5e8 BS |
322 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); |
323 | cpu_loop_exit(env); | |
324 | break; | |
325 | } | |
326 | qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, | |
327 | env->error_code); | |
328 | } | |
329 | } | |
330 | ||
052e80d5 | 331 | void helper_vmmcall(CPUX86State *env) |
6bada5e8 | 332 | { |
052e80d5 | 333 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0); |
6bada5e8 BS |
334 | raise_exception(env, EXCP06_ILLOP); |
335 | } | |
336 | ||
052e80d5 | 337 | void helper_vmload(CPUX86State *env, int aflag) |
6bada5e8 BS |
338 | { |
339 | target_ulong addr; | |
340 | ||
052e80d5 | 341 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0); |
6bada5e8 BS |
342 | |
343 | if (aflag == 2) { | |
4b34e3ad | 344 | addr = env->regs[R_EAX]; |
6bada5e8 | 345 | } else { |
4b34e3ad | 346 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
347 | } |
348 | ||
349 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx | |
350 | "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", | |
052e80d5 BS |
351 | addr, ldq_phys(addr + offsetof(struct vmcb, |
352 | save.fs.base)), | |
6bada5e8 BS |
353 | env->segs[R_FS].base); |
354 | ||
052e80d5 BS |
355 | svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS); |
356 | svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS); | |
357 | svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr); | |
358 | svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); | |
6bada5e8 BS |
359 | |
360 | #ifdef TARGET_X86_64 | |
361 | env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, | |
362 | save.kernel_gs_base)); | |
363 | env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar)); | |
364 | env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar)); | |
365 | env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask)); | |
366 | #endif | |
367 | env->star = ldq_phys(addr + offsetof(struct vmcb, save.star)); | |
368 | env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs)); | |
369 | env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, | |
370 | save.sysenter_esp)); | |
371 | env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, | |
372 | save.sysenter_eip)); | |
373 | } | |
374 | ||
052e80d5 | 375 | void helper_vmsave(CPUX86State *env, int aflag) |
6bada5e8 BS |
376 | { |
377 | target_ulong addr; | |
378 | ||
052e80d5 | 379 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0); |
6bada5e8 BS |
380 | |
381 | if (aflag == 2) { | |
4b34e3ad | 382 | addr = env->regs[R_EAX]; |
6bada5e8 | 383 | } else { |
4b34e3ad | 384 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
385 | } |
386 | ||
387 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx | |
388 | "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", | |
389 | addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)), | |
390 | env->segs[R_FS].base); | |
391 | ||
052e80d5 | 392 | svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), |
6bada5e8 | 393 | &env->segs[R_FS]); |
052e80d5 | 394 | svm_save_seg(env, addr + offsetof(struct vmcb, save.gs), |
6bada5e8 | 395 | &env->segs[R_GS]); |
052e80d5 | 396 | svm_save_seg(env, addr + offsetof(struct vmcb, save.tr), |
6bada5e8 | 397 | &env->tr); |
052e80d5 | 398 | svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr), |
6bada5e8 BS |
399 | &env->ldt); |
400 | ||
401 | #ifdef TARGET_X86_64 | |
402 | stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), | |
403 | env->kernelgsbase); | |
404 | stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar); | |
405 | stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar); | |
406 | stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask); | |
407 | #endif | |
408 | stq_phys(addr + offsetof(struct vmcb, save.star), env->star); | |
409 | stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); | |
410 | stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), | |
411 | env->sysenter_esp); | |
412 | stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), | |
413 | env->sysenter_eip); | |
414 | } | |
415 | ||
052e80d5 | 416 | void helper_stgi(CPUX86State *env) |
6bada5e8 | 417 | { |
052e80d5 | 418 | cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0); |
6bada5e8 BS |
419 | env->hflags2 |= HF2_GIF_MASK; |
420 | } | |
421 | ||
052e80d5 | 422 | void helper_clgi(CPUX86State *env) |
6bada5e8 | 423 | { |
052e80d5 | 424 | cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0); |
6bada5e8 BS |
425 | env->hflags2 &= ~HF2_GIF_MASK; |
426 | } | |
427 | ||
052e80d5 | 428 | void helper_skinit(CPUX86State *env) |
6bada5e8 | 429 | { |
052e80d5 | 430 | cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0); |
6bada5e8 BS |
431 | /* XXX: not implemented */ |
432 | raise_exception(env, EXCP06_ILLOP); | |
433 | } | |
434 | ||
052e80d5 | 435 | void helper_invlpga(CPUX86State *env, int aflag) |
6bada5e8 BS |
436 | { |
437 | target_ulong addr; | |
438 | ||
052e80d5 | 439 | cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0); |
6bada5e8 BS |
440 | |
441 | if (aflag == 2) { | |
4b34e3ad | 442 | addr = env->regs[R_EAX]; |
6bada5e8 | 443 | } else { |
4b34e3ad | 444 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
445 | } |
446 | ||
447 | /* XXX: could use the ASID to see if it is needed to do the | |
448 | flush */ | |
449 | tlb_flush_page(env, addr); | |
450 | } | |
451 | ||
052e80d5 BS |
452 | void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, |
453 | uint64_t param) | |
6bada5e8 BS |
454 | { |
455 | if (likely(!(env->hflags & HF_SVMI_MASK))) { | |
456 | return; | |
457 | } | |
458 | switch (type) { | |
459 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: | |
460 | if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { | |
052e80d5 | 461 | helper_vmexit(env, type, param); |
6bada5e8 BS |
462 | } |
463 | break; | |
464 | case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: | |
465 | if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { | |
052e80d5 | 466 | helper_vmexit(env, type, param); |
6bada5e8 BS |
467 | } |
468 | break; | |
469 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7: | |
470 | if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { | |
052e80d5 | 471 | helper_vmexit(env, type, param); |
6bada5e8 BS |
472 | } |
473 | break; | |
474 | case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7: | |
475 | if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { | |
052e80d5 | 476 | helper_vmexit(env, type, param); |
6bada5e8 BS |
477 | } |
478 | break; | |
479 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31: | |
480 | if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { | |
052e80d5 | 481 | helper_vmexit(env, type, param); |
6bada5e8 BS |
482 | } |
483 | break; | |
484 | case SVM_EXIT_MSR: | |
485 | if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { | |
486 | /* FIXME: this should be read in at vmrun (faster this way?) */ | |
487 | uint64_t addr = ldq_phys(env->vm_vmcb + | |
488 | offsetof(struct vmcb, | |
489 | control.msrpm_base_pa)); | |
490 | uint32_t t0, t1; | |
491 | ||
a4165610 | 492 | switch ((uint32_t)env->regs[R_ECX]) { |
6bada5e8 | 493 | case 0 ... 0x1fff: |
a4165610 LG |
494 | t0 = (env->regs[R_ECX] * 2) % 8; |
495 | t1 = (env->regs[R_ECX] * 2) / 8; | |
6bada5e8 BS |
496 | break; |
497 | case 0xc0000000 ... 0xc0001fff: | |
a4165610 | 498 | t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; |
6bada5e8 BS |
499 | t1 = (t0 / 8); |
500 | t0 %= 8; | |
501 | break; | |
502 | case 0xc0010000 ... 0xc0011fff: | |
a4165610 | 503 | t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; |
6bada5e8 BS |
504 | t1 = (t0 / 8); |
505 | t0 %= 8; | |
506 | break; | |
507 | default: | |
052e80d5 | 508 | helper_vmexit(env, type, param); |
6bada5e8 BS |
509 | t0 = 0; |
510 | t1 = 0; | |
511 | break; | |
512 | } | |
513 | if (ldub_phys(addr + t1) & ((1 << param) << t0)) { | |
052e80d5 | 514 | helper_vmexit(env, type, param); |
6bada5e8 BS |
515 | } |
516 | } | |
517 | break; | |
518 | default: | |
519 | if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { | |
052e80d5 | 520 | helper_vmexit(env, type, param); |
6bada5e8 BS |
521 | } |
522 | break; | |
523 | } | |
524 | } | |
525 | ||
052e80d5 | 526 | void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, |
6bada5e8 BS |
527 | uint64_t param) |
528 | { | |
052e80d5 | 529 | helper_svm_check_intercept_param(env, type, param); |
6bada5e8 BS |
530 | } |
531 | ||
052e80d5 | 532 | void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, |
6bada5e8 BS |
533 | uint32_t next_eip_addend) |
534 | { | |
535 | if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { | |
536 | /* FIXME: this should be read in at vmrun (faster this way?) */ | |
537 | uint64_t addr = ldq_phys(env->vm_vmcb + | |
538 | offsetof(struct vmcb, control.iopm_base_pa)); | |
539 | uint16_t mask = (1 << ((param >> 4) & 7)) - 1; | |
540 | ||
541 | if (lduw_phys(addr + port / 8) & (mask << (port & 7))) { | |
a78d0eab | 542 | /* next env->eip */ |
6bada5e8 BS |
543 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), |
544 | env->eip + next_eip_addend); | |
052e80d5 | 545 | helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16)); |
6bada5e8 BS |
546 | } |
547 | } | |
548 | } | |
549 | ||
550 | /* Note: currently only 32 bits of exit_code are used */ | |
052e80d5 | 551 | void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) |
6bada5e8 | 552 | { |
259186a7 | 553 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
6bada5e8 BS |
554 | uint32_t int_ctl; |
555 | ||
556 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" | |
557 | PRIx64 ", " TARGET_FMT_lx ")!\n", | |
558 | exit_code, exit_info_1, | |
559 | ldq_phys(env->vm_vmcb + offsetof(struct vmcb, | |
560 | control.exit_info_2)), | |
a78d0eab | 561 | env->eip); |
6bada5e8 BS |
562 | |
563 | if (env->hflags & HF_INHIBIT_IRQ_MASK) { | |
564 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), | |
565 | SVM_INTERRUPT_SHADOW_MASK); | |
566 | env->hflags &= ~HF_INHIBIT_IRQ_MASK; | |
567 | } else { | |
568 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); | |
569 | } | |
570 | ||
571 | /* Save the VM state in the vmcb */ | |
052e80d5 | 572 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), |
6bada5e8 | 573 | &env->segs[R_ES]); |
052e80d5 | 574 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), |
6bada5e8 | 575 | &env->segs[R_CS]); |
052e80d5 | 576 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), |
6bada5e8 | 577 | &env->segs[R_SS]); |
052e80d5 | 578 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), |
6bada5e8 BS |
579 | &env->segs[R_DS]); |
580 | ||
581 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), | |
582 | env->gdt.base); | |
583 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), | |
584 | env->gdt.limit); | |
585 | ||
586 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), | |
587 | env->idt.base); | |
588 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), | |
589 | env->idt.limit); | |
590 | ||
591 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); | |
592 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); | |
593 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); | |
594 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); | |
595 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); | |
596 | ||
597 | int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); | |
598 | int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); | |
599 | int_ctl |= env->v_tpr & V_TPR_MASK; | |
259186a7 | 600 | if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { |
6bada5e8 BS |
601 | int_ctl |= V_IRQ_MASK; |
602 | } | |
603 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); | |
604 | ||
605 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), | |
606 | cpu_compute_eflags(env)); | |
052e80d5 BS |
607 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), |
608 | env->eip); | |
08b3ded6 | 609 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); |
4b34e3ad | 610 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); |
6bada5e8 BS |
611 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); |
612 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); | |
613 | stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), | |
614 | env->hflags & HF_CPL_MASK); | |
615 | ||
616 | /* Reload the host state from vm_hsave */ | |
617 | env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); | |
618 | env->hflags &= ~HF_SVMI_MASK; | |
619 | env->intercept = 0; | |
620 | env->intercept_exceptions = 0; | |
259186a7 | 621 | cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
6bada5e8 BS |
622 | env->tsc_offset = 0; |
623 | ||
624 | env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
625 | save.gdtr.base)); | |
626 | env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, | |
627 | save.gdtr.limit)); | |
628 | ||
629 | env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
630 | save.idtr.base)); | |
631 | env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, | |
632 | save.idtr.limit)); | |
633 | ||
634 | cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
635 | save.cr0)) | | |
636 | CR0_PE_MASK); | |
637 | cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
638 | save.cr4))); | |
639 | cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
640 | save.cr3))); | |
641 | /* we need to set the efer after the crs so the hidden flags get | |
642 | set properly */ | |
643 | cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
644 | save.efer))); | |
645 | env->eflags = 0; | |
646 | cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, | |
647 | save.rflags)), | |
648 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); | |
649 | CC_OP = CC_OP_EFLAGS; | |
650 | ||
052e80d5 BS |
651 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es), |
652 | R_ES); | |
653 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs), | |
654 | R_CS); | |
655 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss), | |
656 | R_SS); | |
657 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), | |
658 | R_DS); | |
6bada5e8 | 659 | |
a78d0eab | 660 | env->eip = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip)); |
90a2541b LG |
661 | env->regs[R_ESP] = ldq_phys(env->vm_hsave + |
662 | offsetof(struct vmcb, save.rsp)); | |
663 | env->regs[R_EAX] = ldq_phys(env->vm_hsave + | |
664 | offsetof(struct vmcb, save.rax)); | |
6bada5e8 BS |
665 | |
666 | env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6)); | |
667 | env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7)); | |
668 | ||
669 | /* other setups */ | |
670 | cpu_x86_set_cpl(env, 0); | |
671 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), | |
672 | exit_code); | |
673 | stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), | |
674 | exit_info_1); | |
675 | ||
676 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), | |
677 | ldl_phys(env->vm_vmcb + offsetof(struct vmcb, | |
678 | control.event_inj))); | |
679 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), | |
680 | ldl_phys(env->vm_vmcb + offsetof(struct vmcb, | |
681 | control.event_inj_err))); | |
682 | stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); | |
683 | ||
684 | env->hflags2 &= ~HF2_GIF_MASK; | |
685 | /* FIXME: Resets the current ASID register to zero (host ASID). */ | |
686 | ||
687 | /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ | |
688 | ||
689 | /* Clears the TSC_OFFSET inside the processor. */ | |
690 | ||
691 | /* If the host is in PAE mode, the processor reloads the host's PDPEs | |
692 | from the page table indicated the host's CR3. If the PDPEs contain | |
693 | illegal state, the processor causes a shutdown. */ | |
694 | ||
695 | /* Forces CR0.PE = 1, RFLAGS.VM = 0. */ | |
696 | env->cr[0] |= CR0_PE_MASK; | |
697 | env->eflags &= ~VM_MASK; | |
698 | ||
699 | /* Disables all breakpoints in the host DR7 register. */ | |
700 | ||
701 | /* Checks the reloaded host state for consistency. */ | |
702 | ||
703 | /* If the host's rIP reloaded by #VMEXIT is outside the limit of the | |
704 | host's code segment or non-canonical (in the case of long mode), a | |
705 | #GP fault is delivered inside the host. */ | |
706 | ||
707 | /* remove any pending exception */ | |
708 | env->exception_index = -1; | |
709 | env->error_code = 0; | |
710 | env->old_exception = -1; | |
711 | ||
712 | cpu_loop_exit(env); | |
713 | } | |
714 | ||
052e80d5 | 715 | void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) |
6bada5e8 | 716 | { |
052e80d5 | 717 | helper_vmexit(env, exit_code, exit_info_1); |
6bada5e8 BS |
718 | } |
719 | ||
720 | #endif |