4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 /* Secure Virtual Machine helpers */
26 #if defined(CONFIG_USER_ONLY)
28 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
32 void helper_vmmcall(CPUX86State *env)
36 void helper_vmload(CPUX86State *env, int aflag)
40 void helper_vmsave(CPUX86State *env, int aflag)
44 void helper_stgi(CPUX86State *env)
48 void helper_clgi(CPUX86State *env)
52 void helper_skinit(CPUX86State *env)
56 void helper_invlpga(CPUX86State *env, int aflag)
60 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
64 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
68 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79 uint32_t next_eip_addend)
84 static inline void svm_save_seg(CPUX86State *env, target_phys_addr_t addr,
85 const SegmentCache *sc)
87 stw_phys(addr + offsetof(struct vmcb_seg, selector),
89 stq_phys(addr + offsetof(struct vmcb_seg, base),
91 stl_phys(addr + offsetof(struct vmcb_seg, limit),
93 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
94 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97 static inline void svm_load_seg(CPUX86State *env, target_phys_addr_t addr,
102 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
103 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
104 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
105 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
106 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
109 static inline void svm_load_seg_cache(CPUX86State *env, target_phys_addr_t addr,
112 SegmentCache sc1, *sc = &sc1;
114 svm_load_seg(env, addr, sc);
115 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
116 sc->base, sc->limit, sc->flags);
119 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
125 cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
130 addr = (uint32_t)EAX;
133 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
137 /* save the current CPU state in the hsave page */
138 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
140 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
143 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
145 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
148 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
149 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
150 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
151 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
152 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
153 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
155 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
157 cpu_compute_eflags(env));
159 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
161 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
163 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
165 svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
168 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
169 EIP + next_eip_addend);
170 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
171 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
173 /* load the interception bitmaps so we do not need to access the
175 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
177 env->intercept_cr_read = lduw_phys(env->vm_vmcb +
178 offsetof(struct vmcb,
179 control.intercept_cr_read));
180 env->intercept_cr_write = lduw_phys(env->vm_vmcb +
181 offsetof(struct vmcb,
182 control.intercept_cr_write));
183 env->intercept_dr_read = lduw_phys(env->vm_vmcb +
184 offsetof(struct vmcb,
185 control.intercept_dr_read));
186 env->intercept_dr_write = lduw_phys(env->vm_vmcb +
187 offsetof(struct vmcb,
188 control.intercept_dr_write));
189 env->intercept_exceptions = ldl_phys(env->vm_vmcb +
190 offsetof(struct vmcb,
191 control.intercept_exceptions
194 /* enable intercepts */
195 env->hflags |= HF_SVMI_MASK;
197 env->tsc_offset = ldq_phys(env->vm_vmcb +
198 offsetof(struct vmcb, control.tsc_offset));
200 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
202 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
205 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
207 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
210 /* clear exit_info_2 so we behave like the real hardware */
211 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
213 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
215 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
217 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
219 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
220 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
221 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
222 if (int_ctl & V_INTR_MASKING_MASK) {
223 env->v_tpr = int_ctl & V_TPR_MASK;
224 env->hflags2 |= HF2_VINTR_MASK;
225 if (env->eflags & IF_MASK) {
226 env->hflags2 |= HF2_HIF_MASK;
231 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
233 cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
235 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
236 CC_OP = CC_OP_EFLAGS;
238 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
240 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
242 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
244 svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
247 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
249 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
250 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
251 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
252 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
253 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
256 /* FIXME: guest state consistency checks */
258 switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
259 case TLB_CONTROL_DO_NOTHING:
261 case TLB_CONTROL_FLUSH_ALL_ASID:
262 /* FIXME: this is not 100% correct but should work for now */
267 env->hflags2 |= HF2_GIF_MASK;
269 if (int_ctl & V_IRQ_MASK) {
270 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
273 /* maybe we need to inject an event */
274 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
276 if (event_inj & SVM_EVTINJ_VALID) {
277 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
278 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
279 uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
280 offsetof(struct vmcb,
281 control.event_inj_err));
283 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
284 /* FIXME: need to implement valid_err */
285 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
286 case SVM_EVTINJ_TYPE_INTR:
287 env->exception_index = vector;
288 env->error_code = event_inj_err;
289 env->exception_is_int = 0;
290 env->exception_next_eip = -1;
291 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
292 /* XXX: is it always correct? */
293 do_interrupt_x86_hardirq(env, vector, 1);
295 case SVM_EVTINJ_TYPE_NMI:
296 env->exception_index = EXCP02_NMI;
297 env->error_code = event_inj_err;
298 env->exception_is_int = 0;
299 env->exception_next_eip = EIP;
300 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
303 case SVM_EVTINJ_TYPE_EXEPT:
304 env->exception_index = vector;
305 env->error_code = event_inj_err;
306 env->exception_is_int = 0;
307 env->exception_next_eip = -1;
308 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
311 case SVM_EVTINJ_TYPE_SOFT:
312 env->exception_index = vector;
313 env->error_code = event_inj_err;
314 env->exception_is_int = 1;
315 env->exception_next_eip = EIP;
316 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
320 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
325 void helper_vmmcall(CPUX86State *env)
327 cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
328 raise_exception(env, EXCP06_ILLOP);
331 void helper_vmload(CPUX86State *env, int aflag)
335 cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
340 addr = (uint32_t)EAX;
343 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
344 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
345 addr, ldq_phys(addr + offsetof(struct vmcb,
347 env->segs[R_FS].base);
349 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
350 svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
351 svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
352 svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
355 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
356 save.kernel_gs_base));
357 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
358 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
359 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
361 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
362 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
363 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
365 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
369 void helper_vmsave(CPUX86State *env, int aflag)
373 cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
378 addr = (uint32_t)EAX;
381 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
382 "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
383 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
384 env->segs[R_FS].base);
386 svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
388 svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
390 svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
392 svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
396 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
398 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
399 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
400 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
402 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
403 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
404 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
406 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
410 void helper_stgi(CPUX86State *env)
412 cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
413 env->hflags2 |= HF2_GIF_MASK;
416 void helper_clgi(CPUX86State *env)
418 cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
419 env->hflags2 &= ~HF2_GIF_MASK;
422 void helper_skinit(CPUX86State *env)
424 cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
425 /* XXX: not implemented */
426 raise_exception(env, EXCP06_ILLOP);
429 void helper_invlpga(CPUX86State *env, int aflag)
433 cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
438 addr = (uint32_t)EAX;
441 /* XXX: could use the ASID to see if it is needed to do the
443 tlb_flush_page(env, addr);
446 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
449 if (likely(!(env->hflags & HF_SVMI_MASK))) {
453 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
454 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
455 helper_vmexit(env, type, param);
458 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
459 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
460 helper_vmexit(env, type, param);
463 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
464 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
465 helper_vmexit(env, type, param);
468 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
469 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
470 helper_vmexit(env, type, param);
473 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
474 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
475 helper_vmexit(env, type, param);
479 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
480 /* FIXME: this should be read in at vmrun (faster this way?) */
481 uint64_t addr = ldq_phys(env->vm_vmcb +
482 offsetof(struct vmcb,
483 control.msrpm_base_pa));
486 switch ((uint32_t)ECX) {
491 case 0xc0000000 ... 0xc0001fff:
492 t0 = (8192 + ECX - 0xc0000000) * 2;
496 case 0xc0010000 ... 0xc0011fff:
497 t0 = (16384 + ECX - 0xc0010000) * 2;
502 helper_vmexit(env, type, param);
507 if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
508 helper_vmexit(env, type, param);
513 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
514 helper_vmexit(env, type, param);
520 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
523 helper_svm_check_intercept_param(env, type, param);
526 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
527 uint32_t next_eip_addend)
529 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
530 /* FIXME: this should be read in at vmrun (faster this way?) */
531 uint64_t addr = ldq_phys(env->vm_vmcb +
532 offsetof(struct vmcb, control.iopm_base_pa));
533 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
535 if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
537 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
538 env->eip + next_eip_addend);
539 helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
544 /* Note: currently only 32 bits of exit_code are used */
545 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
549 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
550 PRIx64 ", " TARGET_FMT_lx ")!\n",
551 exit_code, exit_info_1,
552 ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
553 control.exit_info_2)),
556 if (env->hflags & HF_INHIBIT_IRQ_MASK) {
557 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
558 SVM_INTERRUPT_SHADOW_MASK);
559 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
561 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
564 /* Save the VM state in the vmcb */
565 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
567 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
569 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
571 svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
574 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
576 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
579 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
581 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
584 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
585 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
586 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
588 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
590 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
591 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
592 int_ctl |= env->v_tpr & V_TPR_MASK;
593 if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
594 int_ctl |= V_IRQ_MASK;
596 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
598 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
599 cpu_compute_eflags(env));
600 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
602 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
603 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
604 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
605 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
606 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
607 env->hflags & HF_CPL_MASK);
609 /* Reload the host state from vm_hsave */
610 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
611 env->hflags &= ~HF_SVMI_MASK;
613 env->intercept_exceptions = 0;
614 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
617 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
619 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
622 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
624 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
627 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
630 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
632 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
634 /* we need to set the efer after the crs so the hidden flags get
636 cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
639 cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
641 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
642 CC_OP = CC_OP_EFLAGS;
644 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
646 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
648 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
650 svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
653 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
654 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
655 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
657 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
658 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
661 cpu_x86_set_cpl(env, 0);
662 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
664 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
667 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
668 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
669 control.event_inj)));
670 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
671 ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
672 control.event_inj_err)));
673 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
675 env->hflags2 &= ~HF2_GIF_MASK;
676 /* FIXME: Resets the current ASID register to zero (host ASID). */
678 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
680 /* Clears the TSC_OFFSET inside the processor. */
682 /* If the host is in PAE mode, the processor reloads the host's PDPEs
683 from the page table indicated the host's CR3. If the PDPEs contain
684 illegal state, the processor causes a shutdown. */
686 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
687 env->cr[0] |= CR0_PE_MASK;
688 env->eflags &= ~VM_MASK;
690 /* Disables all breakpoints in the host DR7 register. */
692 /* Checks the reloaded host state for consistency. */
694 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
695 host's code segment or non-canonical (in the case of long mode), a
696 #GP fault is delivered inside the host. */
698 /* remove any pending exception */
699 env->exception_index = -1;
701 env->old_exception = -1;
706 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
708 helper_vmexit(env, exit_code, exit_info_1);