4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
27 /* Secure Virtual Machine helpers */
29 #if defined(CONFIG_USER_ONLY)
31 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
35 void helper_vmmcall(CPUX86State *env)
39 void helper_vmload(CPUX86State *env, int aflag)
43 void helper_vmsave(CPUX86State *env, int aflag)
47 void helper_stgi(CPUX86State *env)
51 void helper_clgi(CPUX86State *env)
55 void helper_skinit(CPUX86State *env)
59 void helper_invlpga(CPUX86State *env, int aflag)
63 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
68 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74 uint64_t param, uintptr_t retaddr)
78 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
84 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85 const SegmentCache *sc)
87 CPUState *cs = CPU(x86_env_get_cpu(env));
89 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
91 x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
93 x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
95 x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
99 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
102 CPUState *cs = CPU(x86_env_get_cpu(env));
105 sc->selector = x86_lduw_phys(cs,
106 addr + offsetof(struct vmcb_seg, selector));
107 sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108 sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109 flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
113 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
116 SegmentCache sc1, *sc = &sc1;
118 svm_load_seg(env, addr, sc);
119 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120 sc->base, sc->limit, sc->flags);
123 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125 CPUState *cs = CPU(x86_env_get_cpu(env));
130 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
133 addr = env->regs[R_EAX];
135 addr = (uint32_t)env->regs[R_EAX];
138 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
142 /* save the current CPU state in the hsave page */
143 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
145 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
148 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
150 x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
154 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
156 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
158 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
160 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
162 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
164 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
167 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
169 env->vm_hsave + offsetof(struct vmcb, save.rflags),
170 cpu_compute_eflags(env));
172 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
174 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
181 x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
182 env->eip + next_eip_addend);
184 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
186 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
188 /* load the interception bitmaps so we do not need to access the
190 env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
192 env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
193 offsetof(struct vmcb,
194 control.intercept_cr_read));
195 env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
196 offsetof(struct vmcb,
197 control.intercept_cr_write));
198 env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
199 offsetof(struct vmcb,
200 control.intercept_dr_read));
201 env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
202 offsetof(struct vmcb,
203 control.intercept_dr_write));
204 env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
205 offsetof(struct vmcb,
206 control.intercept_exceptions
209 /* enable intercepts */
210 env->hflags |= HF_SVMI_MASK;
212 env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
213 offsetof(struct vmcb, control.tsc_offset));
215 env->gdt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
217 env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
220 env->idt.base = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
222 env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
225 /* clear exit_info_2 so we behave like the real hardware */
227 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
229 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
230 env->vm_vmcb + offsetof(struct vmcb,
232 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
233 env->vm_vmcb + offsetof(struct vmcb,
235 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
236 env->vm_vmcb + offsetof(struct vmcb,
238 env->cr[2] = x86_ldq_phys(cs,
239 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
240 int_ctl = x86_ldl_phys(cs,
241 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
242 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
243 if (int_ctl & V_INTR_MASKING_MASK) {
244 env->v_tpr = int_ctl & V_TPR_MASK;
245 env->hflags2 |= HF2_VINTR_MASK;
246 if (env->eflags & IF_MASK) {
247 env->hflags2 |= HF2_HIF_MASK;
253 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
255 cpu_load_eflags(env, x86_ldq_phys(cs,
256 env->vm_vmcb + offsetof(struct vmcb,
258 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
260 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
262 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
264 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
266 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
269 env->eip = x86_ldq_phys(cs,
270 env->vm_vmcb + offsetof(struct vmcb, save.rip));
272 env->regs[R_ESP] = x86_ldq_phys(cs,
273 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
274 env->regs[R_EAX] = x86_ldq_phys(cs,
275 env->vm_vmcb + offsetof(struct vmcb, save.rax));
276 env->dr[7] = x86_ldq_phys(cs,
277 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
278 env->dr[6] = x86_ldq_phys(cs,
279 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
281 /* FIXME: guest state consistency checks */
283 switch (x86_ldub_phys(cs,
284 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
285 case TLB_CONTROL_DO_NOTHING:
287 case TLB_CONTROL_FLUSH_ALL_ASID:
288 /* FIXME: this is not 100% correct but should work for now */
293 env->hflags2 |= HF2_GIF_MASK;
295 if (int_ctl & V_IRQ_MASK) {
296 CPUState *cs = CPU(x86_env_get_cpu(env));
298 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
301 /* maybe we need to inject an event */
302 event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
304 if (event_inj & SVM_EVTINJ_VALID) {
305 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
306 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
307 uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
308 offsetof(struct vmcb,
309 control.event_inj_err));
311 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
312 /* FIXME: need to implement valid_err */
313 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
314 case SVM_EVTINJ_TYPE_INTR:
315 cs->exception_index = vector;
316 env->error_code = event_inj_err;
317 env->exception_is_int = 0;
318 env->exception_next_eip = -1;
319 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
320 /* XXX: is it always correct? */
321 do_interrupt_x86_hardirq(env, vector, 1);
323 case SVM_EVTINJ_TYPE_NMI:
324 cs->exception_index = EXCP02_NMI;
325 env->error_code = event_inj_err;
326 env->exception_is_int = 0;
327 env->exception_next_eip = env->eip;
328 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
331 case SVM_EVTINJ_TYPE_EXEPT:
332 cs->exception_index = vector;
333 env->error_code = event_inj_err;
334 env->exception_is_int = 0;
335 env->exception_next_eip = -1;
336 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
339 case SVM_EVTINJ_TYPE_SOFT:
340 cs->exception_index = vector;
341 env->error_code = event_inj_err;
342 env->exception_is_int = 1;
343 env->exception_next_eip = env->eip;
344 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
348 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
353 void helper_vmmcall(CPUX86State *env)
355 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
356 raise_exception(env, EXCP06_ILLOP);
359 void helper_vmload(CPUX86State *env, int aflag)
361 CPUState *cs = CPU(x86_env_get_cpu(env));
364 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
367 addr = env->regs[R_EAX];
369 addr = (uint32_t)env->regs[R_EAX];
372 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
373 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
374 addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
376 env->segs[R_FS].base);
378 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
379 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
380 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
381 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
384 env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
385 save.kernel_gs_base));
386 env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
387 env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
388 env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
390 env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
391 env->sysenter_cs = x86_ldq_phys(cs,
392 addr + offsetof(struct vmcb, save.sysenter_cs));
393 env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
395 env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
399 void helper_vmsave(CPUX86State *env, int aflag)
401 CPUState *cs = CPU(x86_env_get_cpu(env));
404 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
407 addr = env->regs[R_EAX];
409 addr = (uint32_t)env->regs[R_EAX];
412 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
413 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
414 addr, x86_ldq_phys(cs,
415 addr + offsetof(struct vmcb, save.fs.base)),
416 env->segs[R_FS].base);
418 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
420 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
422 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
424 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
428 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
430 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
431 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
432 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
434 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
436 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
437 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
439 x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
443 void helper_stgi(CPUX86State *env)
445 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
446 env->hflags2 |= HF2_GIF_MASK;
449 void helper_clgi(CPUX86State *env)
451 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
452 env->hflags2 &= ~HF2_GIF_MASK;
455 void helper_skinit(CPUX86State *env)
457 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
458 /* XXX: not implemented */
459 raise_exception(env, EXCP06_ILLOP);
462 void helper_invlpga(CPUX86State *env, int aflag)
464 X86CPU *cpu = x86_env_get_cpu(env);
467 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
470 addr = env->regs[R_EAX];
472 addr = (uint32_t)env->regs[R_EAX];
475 /* XXX: could use the ASID to see if it is needed to do the
477 tlb_flush_page(CPU(cpu), addr);
480 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
481 uint64_t param, uintptr_t retaddr)
483 CPUState *cs = CPU(x86_env_get_cpu(env));
485 if (likely(!(env->hflags & HF_SVMI_MASK))) {
489 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
490 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
491 cpu_vmexit(env, type, param, retaddr);
494 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
495 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
496 cpu_vmexit(env, type, param, retaddr);
499 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
500 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
501 cpu_vmexit(env, type, param, retaddr);
504 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
505 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
506 cpu_vmexit(env, type, param, retaddr);
509 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
510 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
511 cpu_vmexit(env, type, param, retaddr);
515 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
516 /* FIXME: this should be read in at vmrun (faster this way?) */
517 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
518 offsetof(struct vmcb,
519 control.msrpm_base_pa));
522 switch ((uint32_t)env->regs[R_ECX]) {
524 t0 = (env->regs[R_ECX] * 2) % 8;
525 t1 = (env->regs[R_ECX] * 2) / 8;
527 case 0xc0000000 ... 0xc0001fff:
528 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
532 case 0xc0010000 ... 0xc0011fff:
533 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
538 cpu_vmexit(env, type, param, retaddr);
543 if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
544 cpu_vmexit(env, type, param, retaddr);
549 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
550 cpu_vmexit(env, type, param, retaddr);
556 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
559 cpu_svm_check_intercept_param(env, type, param, GETPC());
562 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
563 uint32_t next_eip_addend)
565 CPUState *cs = CPU(x86_env_get_cpu(env));
567 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
568 /* FIXME: this should be read in at vmrun (faster this way?) */
569 uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
570 offsetof(struct vmcb, control.iopm_base_pa));
571 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
573 if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
576 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
577 env->eip + next_eip_addend);
578 cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
583 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
586 CPUState *cs = CPU(x86_env_get_cpu(env));
589 cpu_restore_state(cs, retaddr);
592 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
593 PRIx64 ", " TARGET_FMT_lx ")!\n",
594 exit_code, exit_info_1,
595 x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
596 control.exit_info_2)),
599 cs->exception_index = EXCP_VMEXIT + exit_code;
600 env->error_code = exit_info_1;
602 /* remove any pending exception */
603 env->old_exception = -1;
607 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
609 CPUState *cs = CPU(x86_env_get_cpu(env));
612 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
614 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
615 SVM_INTERRUPT_SHADOW_MASK);
616 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
619 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
622 /* Save the VM state in the vmcb */
623 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
625 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
627 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
629 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
632 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
634 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
637 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
639 x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
643 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
645 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
647 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
649 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
651 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
653 int_ctl = x86_ldl_phys(cs,
654 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
655 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
656 int_ctl |= env->v_tpr & V_TPR_MASK;
657 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
658 int_ctl |= V_IRQ_MASK;
661 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
663 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
664 cpu_compute_eflags(env));
665 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
668 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
670 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
672 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
674 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
675 x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
676 env->hflags & HF_CPL_MASK);
678 /* Reload the host state from vm_hsave */
679 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
680 env->hflags &= ~HF_SVMI_MASK;
682 env->intercept_exceptions = 0;
683 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
686 env->gdt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
688 env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
691 env->idt.base = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
693 env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
696 cpu_x86_update_cr0(env, x86_ldq_phys(cs,
697 env->vm_hsave + offsetof(struct vmcb,
700 cpu_x86_update_cr4(env, x86_ldq_phys(cs,
701 env->vm_hsave + offsetof(struct vmcb,
703 cpu_x86_update_cr3(env, x86_ldq_phys(cs,
704 env->vm_hsave + offsetof(struct vmcb,
706 /* we need to set the efer after the crs so the hidden flags get
708 cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
711 cpu_load_eflags(env, x86_ldq_phys(cs,
712 env->vm_hsave + offsetof(struct vmcb,
714 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
717 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
719 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
721 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
723 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
726 env->eip = x86_ldq_phys(cs,
727 env->vm_hsave + offsetof(struct vmcb, save.rip));
728 env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
729 offsetof(struct vmcb, save.rsp));
730 env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
731 offsetof(struct vmcb, save.rax));
733 env->dr[6] = x86_ldq_phys(cs,
734 env->vm_hsave + offsetof(struct vmcb, save.dr6));
735 env->dr[7] = x86_ldq_phys(cs,
736 env->vm_hsave + offsetof(struct vmcb, save.dr7));
739 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
741 x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
745 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
746 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
747 control.event_inj)));
749 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
750 x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
751 control.event_inj_err)));
753 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
755 env->hflags2 &= ~HF2_GIF_MASK;
756 /* FIXME: Resets the current ASID register to zero (host ASID). */
758 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
760 /* Clears the TSC_OFFSET inside the processor. */
762 /* If the host is in PAE mode, the processor reloads the host's PDPEs
763 from the page table indicated the host's CR3. If the PDPEs contain
764 illegal state, the processor causes a shutdown. */
766 /* Disables all breakpoints in the host DR7 register. */
768 /* Checks the reloaded host state for consistency. */
770 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
771 host's code segment or non-canonical (in the case of long mode), a
772 #GP fault is delivered inside the host. */