4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "exec/cpu-all.h"
22 #include "exec/helper-proto.h"
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
28 /* Secure Virtual Machine helpers */
30 #if defined(CONFIG_USER_ONLY)
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
36 void helper_vmmcall(CPUX86State *env)
40 void helper_vmload(CPUX86State *env, int aflag)
44 void helper_vmsave(CPUX86State *env, int aflag)
48 void helper_stgi(CPUX86State *env)
52 void helper_clgi(CPUX86State *env)
56 void helper_skinit(CPUX86State *env)
60 void helper_invlpga(CPUX86State *env, int aflag)
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83 uint32_t next_eip_addend)
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89 const SegmentCache *sc)
91 CPUState *cs = CPU(x86_env_get_cpu(env));
93 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
95 stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
97 stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
99 stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
100 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
103 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
106 CPUState *cs = CPU(x86_env_get_cpu(env));
109 sc->selector = lduw_phys(cs->as,
110 addr + offsetof(struct vmcb_seg, selector));
111 sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
112 sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
113 flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
114 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
117 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
120 SegmentCache sc1, *sc = &sc1;
122 svm_load_seg(env, addr, sc);
123 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
124 sc->base, sc->limit, sc->flags);
127 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
129 CPUState *cs = CPU(x86_env_get_cpu(env));
134 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
137 addr = env->regs[R_EAX];
139 addr = (uint32_t)env->regs[R_EAX];
142 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
146 /* save the current CPU state in the hsave page */
147 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
149 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
152 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
154 stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
158 env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
160 env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
162 env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
164 env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
166 env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
168 env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
171 env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
173 env->vm_hsave + offsetof(struct vmcb, save.rflags),
174 cpu_compute_eflags(env));
176 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
178 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
180 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
182 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
185 stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
186 env->eip + next_eip_addend);
188 env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
190 env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
192 /* load the interception bitmaps so we do not need to access the
194 env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
196 env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
197 offsetof(struct vmcb,
198 control.intercept_cr_read));
199 env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
200 offsetof(struct vmcb,
201 control.intercept_cr_write));
202 env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
203 offsetof(struct vmcb,
204 control.intercept_dr_read));
205 env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
206 offsetof(struct vmcb,
207 control.intercept_dr_write));
208 env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
209 offsetof(struct vmcb,
210 control.intercept_exceptions
213 /* enable intercepts */
214 env->hflags |= HF_SVMI_MASK;
216 env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
217 offsetof(struct vmcb, control.tsc_offset));
219 env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
221 env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
224 env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
226 env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
229 /* clear exit_info_2 so we behave like the real hardware */
231 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
233 cpu_x86_update_cr0(env, ldq_phys(cs->as,
234 env->vm_vmcb + offsetof(struct vmcb,
236 cpu_x86_update_cr4(env, ldq_phys(cs->as,
237 env->vm_vmcb + offsetof(struct vmcb,
239 cpu_x86_update_cr3(env, ldq_phys(cs->as,
240 env->vm_vmcb + offsetof(struct vmcb,
242 env->cr[2] = ldq_phys(cs->as,
243 env->vm_vmcb + offsetof(struct vmcb, save.cr2));
244 int_ctl = ldl_phys(cs->as,
245 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
246 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
247 if (int_ctl & V_INTR_MASKING_MASK) {
248 env->v_tpr = int_ctl & V_TPR_MASK;
249 env->hflags2 |= HF2_VINTR_MASK;
250 if (env->eflags & IF_MASK) {
251 env->hflags2 |= HF2_HIF_MASK;
257 env->vm_vmcb + offsetof(struct vmcb, save.efer)));
259 cpu_load_eflags(env, ldq_phys(cs->as,
260 env->vm_vmcb + offsetof(struct vmcb,
262 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
263 CC_OP = CC_OP_EFLAGS;
265 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
267 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
269 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
271 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
274 env->eip = ldq_phys(cs->as,
275 env->vm_vmcb + offsetof(struct vmcb, save.rip));
277 env->regs[R_ESP] = ldq_phys(cs->as,
278 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
279 env->regs[R_EAX] = ldq_phys(cs->as,
280 env->vm_vmcb + offsetof(struct vmcb, save.rax));
281 env->dr[7] = ldq_phys(cs->as,
282 env->vm_vmcb + offsetof(struct vmcb, save.dr7));
283 env->dr[6] = ldq_phys(cs->as,
284 env->vm_vmcb + offsetof(struct vmcb, save.dr6));
286 /* FIXME: guest state consistency checks */
288 switch (ldub_phys(cs->as,
289 env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
290 case TLB_CONTROL_DO_NOTHING:
292 case TLB_CONTROL_FLUSH_ALL_ASID:
293 /* FIXME: this is not 100% correct but should work for now */
298 env->hflags2 |= HF2_GIF_MASK;
300 if (int_ctl & V_IRQ_MASK) {
301 CPUState *cs = CPU(x86_env_get_cpu(env));
303 cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
306 /* maybe we need to inject an event */
307 event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
309 if (event_inj & SVM_EVTINJ_VALID) {
310 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
311 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
312 uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
313 offsetof(struct vmcb,
314 control.event_inj_err));
316 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
317 /* FIXME: need to implement valid_err */
318 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
319 case SVM_EVTINJ_TYPE_INTR:
320 cs->exception_index = vector;
321 env->error_code = event_inj_err;
322 env->exception_is_int = 0;
323 env->exception_next_eip = -1;
324 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
325 /* XXX: is it always correct? */
326 do_interrupt_x86_hardirq(env, vector, 1);
328 case SVM_EVTINJ_TYPE_NMI:
329 cs->exception_index = EXCP02_NMI;
330 env->error_code = event_inj_err;
331 env->exception_is_int = 0;
332 env->exception_next_eip = env->eip;
333 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
336 case SVM_EVTINJ_TYPE_EXEPT:
337 cs->exception_index = vector;
338 env->error_code = event_inj_err;
339 env->exception_is_int = 0;
340 env->exception_next_eip = -1;
341 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
344 case SVM_EVTINJ_TYPE_SOFT:
345 cs->exception_index = vector;
346 env->error_code = event_inj_err;
347 env->exception_is_int = 1;
348 env->exception_next_eip = env->eip;
349 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
353 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
358 void helper_vmmcall(CPUX86State *env)
360 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
361 raise_exception(env, EXCP06_ILLOP);
364 void helper_vmload(CPUX86State *env, int aflag)
366 CPUState *cs = CPU(x86_env_get_cpu(env));
369 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
372 addr = env->regs[R_EAX];
374 addr = (uint32_t)env->regs[R_EAX];
377 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
378 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
379 addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
381 env->segs[R_FS].base);
383 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
384 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
385 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
386 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
389 env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
390 save.kernel_gs_base));
391 env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
392 env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
393 env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
395 env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
396 env->sysenter_cs = ldq_phys(cs->as,
397 addr + offsetof(struct vmcb, save.sysenter_cs));
398 env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
400 env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
404 void helper_vmsave(CPUX86State *env, int aflag)
406 CPUState *cs = CPU(x86_env_get_cpu(env));
409 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
412 addr = env->regs[R_EAX];
414 addr = (uint32_t)env->regs[R_EAX];
417 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
418 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
419 addr, ldq_phys(cs->as,
420 addr + offsetof(struct vmcb, save.fs.base)),
421 env->segs[R_FS].base);
423 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
425 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
427 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
429 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
433 stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
435 stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
436 stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
437 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
439 stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
441 addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
442 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
444 stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
448 void helper_stgi(CPUX86State *env)
450 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
451 env->hflags2 |= HF2_GIF_MASK;
454 void helper_clgi(CPUX86State *env)
456 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
457 env->hflags2 &= ~HF2_GIF_MASK;
460 void helper_skinit(CPUX86State *env)
462 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
463 /* XXX: not implemented */
464 raise_exception(env, EXCP06_ILLOP);
467 void helper_invlpga(CPUX86State *env, int aflag)
469 X86CPU *cpu = x86_env_get_cpu(env);
472 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
475 addr = env->regs[R_EAX];
477 addr = (uint32_t)env->regs[R_EAX];
480 /* XXX: could use the ASID to see if it is needed to do the
482 tlb_flush_page(CPU(cpu), addr);
485 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
488 CPUState *cs = CPU(x86_env_get_cpu(env));
490 if (likely(!(env->hflags & HF_SVMI_MASK))) {
494 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
495 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
496 helper_vmexit(env, type, param);
499 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
500 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
501 helper_vmexit(env, type, param);
504 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
505 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
506 helper_vmexit(env, type, param);
509 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
510 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
511 helper_vmexit(env, type, param);
514 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
515 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
516 helper_vmexit(env, type, param);
520 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
521 /* FIXME: this should be read in at vmrun (faster this way?) */
522 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
523 offsetof(struct vmcb,
524 control.msrpm_base_pa));
527 switch ((uint32_t)env->regs[R_ECX]) {
529 t0 = (env->regs[R_ECX] * 2) % 8;
530 t1 = (env->regs[R_ECX] * 2) / 8;
532 case 0xc0000000 ... 0xc0001fff:
533 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
537 case 0xc0010000 ... 0xc0011fff:
538 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
543 helper_vmexit(env, type, param);
548 if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
549 helper_vmexit(env, type, param);
554 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
555 helper_vmexit(env, type, param);
561 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
564 helper_svm_check_intercept_param(env, type, param);
567 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
568 uint32_t next_eip_addend)
570 CPUState *cs = CPU(x86_env_get_cpu(env));
572 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
573 /* FIXME: this should be read in at vmrun (faster this way?) */
574 uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
575 offsetof(struct vmcb, control.iopm_base_pa));
576 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
578 if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
581 env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
582 env->eip + next_eip_addend);
583 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
588 /* Note: currently only 32 bits of exit_code are used */
589 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
591 CPUState *cs = CPU(x86_env_get_cpu(env));
594 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
595 PRIx64 ", " TARGET_FMT_lx ")!\n",
596 exit_code, exit_info_1,
597 ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
598 control.exit_info_2)),
601 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
603 env->vm_vmcb + offsetof(struct vmcb, control.int_state),
604 SVM_INTERRUPT_SHADOW_MASK);
605 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
608 env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
611 /* Save the VM state in the vmcb */
612 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
614 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
616 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
618 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
621 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
623 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
626 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
628 stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
632 env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
634 env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
636 env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
638 env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
640 env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
642 int_ctl = ldl_phys(cs->as,
643 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
644 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
645 int_ctl |= env->v_tpr & V_TPR_MASK;
646 if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
647 int_ctl |= V_IRQ_MASK;
650 env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
652 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
653 cpu_compute_eflags(env));
654 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
657 env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
659 env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
661 env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
663 env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
664 stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
665 env->hflags & HF_CPL_MASK);
667 /* Reload the host state from vm_hsave */
668 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
669 env->hflags &= ~HF_SVMI_MASK;
671 env->intercept_exceptions = 0;
672 cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
675 env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
677 env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
680 env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
682 env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
685 cpu_x86_update_cr0(env, ldq_phys(cs->as,
686 env->vm_hsave + offsetof(struct vmcb,
689 cpu_x86_update_cr4(env, ldq_phys(cs->as,
690 env->vm_hsave + offsetof(struct vmcb,
692 cpu_x86_update_cr3(env, ldq_phys(cs->as,
693 env->vm_hsave + offsetof(struct vmcb,
695 /* we need to set the efer after the crs so the hidden flags get
697 cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
700 cpu_load_eflags(env, ldq_phys(cs->as,
701 env->vm_hsave + offsetof(struct vmcb,
703 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
705 CC_OP = CC_OP_EFLAGS;
707 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
709 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
711 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
713 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
716 env->eip = ldq_phys(cs->as,
717 env->vm_hsave + offsetof(struct vmcb, save.rip));
718 env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
719 offsetof(struct vmcb, save.rsp));
720 env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
721 offsetof(struct vmcb, save.rax));
723 env->dr[6] = ldq_phys(cs->as,
724 env->vm_hsave + offsetof(struct vmcb, save.dr6));
725 env->dr[7] = ldq_phys(cs->as,
726 env->vm_hsave + offsetof(struct vmcb, save.dr7));
729 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
731 stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
735 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
736 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
737 control.event_inj)));
739 env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
740 ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
741 control.event_inj_err)));
743 env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
745 env->hflags2 &= ~HF2_GIF_MASK;
746 /* FIXME: Resets the current ASID register to zero (host ASID). */
748 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
750 /* Clears the TSC_OFFSET inside the processor. */
752 /* If the host is in PAE mode, the processor reloads the host's PDPEs
753 from the page table indicated the host's CR3. If the PDPEs contain
754 illegal state, the processor causes a shutdown. */
756 /* Disables all breakpoints in the host DR7 register. */
758 /* Checks the reloaded host state for consistency. */
760 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
761 host's code segment or non-canonical (in the case of long mode), a
762 #GP fault is delivered inside the host. */
764 /* remove any pending exception */
765 cs->exception_index = -1;
767 env->old_exception = -1;
772 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
774 helper_vmexit(env, exit_code, exit_info_1);