]> Git Repo - qemu.git/blob - target-i386/svm_helper.c
x86: avoid AREG0 for SVM helpers
[qemu.git] / target-i386 / svm_helper.c
1 /*
2  *  x86 SVM helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "cpu.h"
21 #include "cpu-all.h"
22 #include "helper.h"
23
24 /* Secure Virtual Machine helpers */
25
26 #if defined(CONFIG_USER_ONLY)
27
28 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
29 {
30 }
31
32 void helper_vmmcall(CPUX86State *env)
33 {
34 }
35
36 void helper_vmload(CPUX86State *env, int aflag)
37 {
38 }
39
40 void helper_vmsave(CPUX86State *env, int aflag)
41 {
42 }
43
44 void helper_stgi(CPUX86State *env)
45 {
46 }
47
48 void helper_clgi(CPUX86State *env)
49 {
50 }
51
52 void helper_skinit(CPUX86State *env)
53 {
54 }
55
56 void helper_invlpga(CPUX86State *env, int aflag)
57 {
58 }
59
60 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
61 {
62 }
63
64 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
65 {
66 }
67
68 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69                                       uint64_t param)
70 {
71 }
72
73 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74                                    uint64_t param)
75 {
76 }
77
78 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79                          uint32_t next_eip_addend)
80 {
81 }
82 #else
83
84 static inline void svm_save_seg(CPUX86State *env, target_phys_addr_t addr,
85                                 const SegmentCache *sc)
86 {
87     stw_phys(addr + offsetof(struct vmcb_seg, selector),
88              sc->selector);
89     stq_phys(addr + offsetof(struct vmcb_seg, base),
90              sc->base);
91     stl_phys(addr + offsetof(struct vmcb_seg, limit),
92              sc->limit);
93     stw_phys(addr + offsetof(struct vmcb_seg, attrib),
94              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
95 }
96
97 static inline void svm_load_seg(CPUX86State *env, target_phys_addr_t addr,
98                                 SegmentCache *sc)
99 {
100     unsigned int flags;
101
102     sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
103     sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
104     sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
105     flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
106     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
107 }
108
109 static inline void svm_load_seg_cache(CPUX86State *env, target_phys_addr_t addr,
110                                       int seg_reg)
111 {
112     SegmentCache sc1, *sc = &sc1;
113
114     svm_load_seg(env, addr, sc);
115     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
116                            sc->base, sc->limit, sc->flags);
117 }
118
119 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
120 {
121     target_ulong addr;
122     uint32_t event_inj;
123     uint32_t int_ctl;
124
125     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
126
127     if (aflag == 2) {
128         addr = EAX;
129     } else {
130         addr = (uint32_t)EAX;
131     }
132
133     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
134
135     env->vm_vmcb = addr;
136
137     /* save the current CPU state in the hsave page */
138     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
139              env->gdt.base);
140     stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
141              env->gdt.limit);
142
143     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
144              env->idt.base);
145     stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
146              env->idt.limit);
147
148     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
149     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
150     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
151     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
152     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
153     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
154
155     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
156     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags),
157              cpu_compute_eflags(env));
158
159     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
160                  &env->segs[R_ES]);
161     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
162                  &env->segs[R_CS]);
163     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
164                  &env->segs[R_SS]);
165     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
166                  &env->segs[R_DS]);
167
168     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
169              EIP + next_eip_addend);
170     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
171     stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
172
173     /* load the interception bitmaps so we do not need to access the
174        vmcb in svm mode */
175     env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
176                                                       control.intercept));
177     env->intercept_cr_read = lduw_phys(env->vm_vmcb +
178                                        offsetof(struct vmcb,
179                                                 control.intercept_cr_read));
180     env->intercept_cr_write = lduw_phys(env->vm_vmcb +
181                                         offsetof(struct vmcb,
182                                                  control.intercept_cr_write));
183     env->intercept_dr_read = lduw_phys(env->vm_vmcb +
184                                        offsetof(struct vmcb,
185                                                 control.intercept_dr_read));
186     env->intercept_dr_write = lduw_phys(env->vm_vmcb +
187                                         offsetof(struct vmcb,
188                                                  control.intercept_dr_write));
189     env->intercept_exceptions = ldl_phys(env->vm_vmcb +
190                                          offsetof(struct vmcb,
191                                                   control.intercept_exceptions
192                                                   ));
193
194     /* enable intercepts */
195     env->hflags |= HF_SVMI_MASK;
196
197     env->tsc_offset = ldq_phys(env->vm_vmcb +
198                                offsetof(struct vmcb, control.tsc_offset));
199
200     env->gdt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
201                                                       save.gdtr.base));
202     env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
203                                                       save.gdtr.limit));
204
205     env->idt.base  = ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
206                                                       save.idtr.base));
207     env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
208                                                       save.idtr.limit));
209
210     /* clear exit_info_2 so we behave like the real hardware */
211     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
212
213     cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
214                                                              save.cr0)));
215     cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
216                                                              save.cr4)));
217     cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
218                                                              save.cr3)));
219     env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
220     int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
221     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
222     if (int_ctl & V_INTR_MASKING_MASK) {
223         env->v_tpr = int_ctl & V_TPR_MASK;
224         env->hflags2 |= HF2_VINTR_MASK;
225         if (env->eflags & IF_MASK) {
226             env->hflags2 |= HF2_HIF_MASK;
227         }
228     }
229
230     cpu_load_efer(env,
231                   ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
232     env->eflags = 0;
233     cpu_load_eflags(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
234                                                           save.rflags)),
235                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
236     CC_OP = CC_OP_EFLAGS;
237
238     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
239                        R_ES);
240     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
241                        R_CS);
242     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
243                        R_SS);
244     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
245                        R_DS);
246
247     EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
248     env->eip = EIP;
249     ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
250     EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
251     env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
252     env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
253     cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb,
254                                                            save.cpl)));
255
256     /* FIXME: guest state consistency checks */
257
258     switch (ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
259     case TLB_CONTROL_DO_NOTHING:
260         break;
261     case TLB_CONTROL_FLUSH_ALL_ASID:
262         /* FIXME: this is not 100% correct but should work for now */
263         tlb_flush(env, 1);
264         break;
265     }
266
267     env->hflags2 |= HF2_GIF_MASK;
268
269     if (int_ctl & V_IRQ_MASK) {
270         env->interrupt_request |= CPU_INTERRUPT_VIRQ;
271     }
272
273     /* maybe we need to inject an event */
274     event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
275                                                  control.event_inj));
276     if (event_inj & SVM_EVTINJ_VALID) {
277         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
278         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
279         uint32_t event_inj_err = ldl_phys(env->vm_vmcb +
280                                           offsetof(struct vmcb,
281                                                    control.event_inj_err));
282
283         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
284         /* FIXME: need to implement valid_err */
285         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
286         case SVM_EVTINJ_TYPE_INTR:
287             env->exception_index = vector;
288             env->error_code = event_inj_err;
289             env->exception_is_int = 0;
290             env->exception_next_eip = -1;
291             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
292             /* XXX: is it always correct? */
293             do_interrupt_x86_hardirq(env, vector, 1);
294             break;
295         case SVM_EVTINJ_TYPE_NMI:
296             env->exception_index = EXCP02_NMI;
297             env->error_code = event_inj_err;
298             env->exception_is_int = 0;
299             env->exception_next_eip = EIP;
300             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
301             cpu_loop_exit(env);
302             break;
303         case SVM_EVTINJ_TYPE_EXEPT:
304             env->exception_index = vector;
305             env->error_code = event_inj_err;
306             env->exception_is_int = 0;
307             env->exception_next_eip = -1;
308             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
309             cpu_loop_exit(env);
310             break;
311         case SVM_EVTINJ_TYPE_SOFT:
312             env->exception_index = vector;
313             env->error_code = event_inj_err;
314             env->exception_is_int = 1;
315             env->exception_next_eip = EIP;
316             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
317             cpu_loop_exit(env);
318             break;
319         }
320         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
321                       env->error_code);
322     }
323 }
324
325 void helper_vmmcall(CPUX86State *env)
326 {
327     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
328     raise_exception(env, EXCP06_ILLOP);
329 }
330
331 void helper_vmload(CPUX86State *env, int aflag)
332 {
333     target_ulong addr;
334
335     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
336
337     if (aflag == 2) {
338         addr = EAX;
339     } else {
340         addr = (uint32_t)EAX;
341     }
342
343     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
344                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
345                   addr, ldq_phys(addr + offsetof(struct vmcb,
346                                                           save.fs.base)),
347                   env->segs[R_FS].base);
348
349     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
350     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
351     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
352     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
353
354 #ifdef TARGET_X86_64
355     env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb,
356                                                  save.kernel_gs_base));
357     env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
358     env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
359     env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
360 #endif
361     env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
362     env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
363     env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb,
364                                                  save.sysenter_esp));
365     env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb,
366                                                  save.sysenter_eip));
367 }
368
369 void helper_vmsave(CPUX86State *env, int aflag)
370 {
371     target_ulong addr;
372
373     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
374
375     if (aflag == 2) {
376         addr = EAX;
377     } else {
378         addr = (uint32_t)EAX;
379     }
380
381     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
382                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
383                   addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
384                   env->segs[R_FS].base);
385
386     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
387                  &env->segs[R_FS]);
388     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
389                  &env->segs[R_GS]);
390     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
391                  &env->tr);
392     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
393                  &env->ldt);
394
395 #ifdef TARGET_X86_64
396     stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base),
397              env->kernelgsbase);
398     stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
399     stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
400     stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
401 #endif
402     stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
403     stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
404     stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp),
405              env->sysenter_esp);
406     stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip),
407              env->sysenter_eip);
408 }
409
410 void helper_stgi(CPUX86State *env)
411 {
412     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
413     env->hflags2 |= HF2_GIF_MASK;
414 }
415
416 void helper_clgi(CPUX86State *env)
417 {
418     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
419     env->hflags2 &= ~HF2_GIF_MASK;
420 }
421
422 void helper_skinit(CPUX86State *env)
423 {
424     cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
425     /* XXX: not implemented */
426     raise_exception(env, EXCP06_ILLOP);
427 }
428
429 void helper_invlpga(CPUX86State *env, int aflag)
430 {
431     target_ulong addr;
432
433     cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
434
435     if (aflag == 2) {
436         addr = EAX;
437     } else {
438         addr = (uint32_t)EAX;
439     }
440
441     /* XXX: could use the ASID to see if it is needed to do the
442        flush */
443     tlb_flush_page(env, addr);
444 }
445
446 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
447                                       uint64_t param)
448 {
449     if (likely(!(env->hflags & HF_SVMI_MASK))) {
450         return;
451     }
452     switch (type) {
453     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
454         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
455             helper_vmexit(env, type, param);
456         }
457         break;
458     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
459         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
460             helper_vmexit(env, type, param);
461         }
462         break;
463     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
464         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
465             helper_vmexit(env, type, param);
466         }
467         break;
468     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
469         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
470             helper_vmexit(env, type, param);
471         }
472         break;
473     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
474         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
475             helper_vmexit(env, type, param);
476         }
477         break;
478     case SVM_EXIT_MSR:
479         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
480             /* FIXME: this should be read in at vmrun (faster this way?) */
481             uint64_t addr = ldq_phys(env->vm_vmcb +
482                                      offsetof(struct vmcb,
483                                               control.msrpm_base_pa));
484             uint32_t t0, t1;
485
486             switch ((uint32_t)ECX) {
487             case 0 ... 0x1fff:
488                 t0 = (ECX * 2) % 8;
489                 t1 = (ECX * 2) / 8;
490                 break;
491             case 0xc0000000 ... 0xc0001fff:
492                 t0 = (8192 + ECX - 0xc0000000) * 2;
493                 t1 = (t0 / 8);
494                 t0 %= 8;
495                 break;
496             case 0xc0010000 ... 0xc0011fff:
497                 t0 = (16384 + ECX - 0xc0010000) * 2;
498                 t1 = (t0 / 8);
499                 t0 %= 8;
500                 break;
501             default:
502                 helper_vmexit(env, type, param);
503                 t0 = 0;
504                 t1 = 0;
505                 break;
506             }
507             if (ldub_phys(addr + t1) & ((1 << param) << t0)) {
508                 helper_vmexit(env, type, param);
509             }
510         }
511         break;
512     default:
513         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
514             helper_vmexit(env, type, param);
515         }
516         break;
517     }
518 }
519
520 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
521                                    uint64_t param)
522 {
523     helper_svm_check_intercept_param(env, type, param);
524 }
525
526 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
527                          uint32_t next_eip_addend)
528 {
529     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
530         /* FIXME: this should be read in at vmrun (faster this way?) */
531         uint64_t addr = ldq_phys(env->vm_vmcb +
532                                  offsetof(struct vmcb, control.iopm_base_pa));
533         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
534
535         if (lduw_phys(addr + port / 8) & (mask << (port & 7))) {
536             /* next EIP */
537             stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
538                      env->eip + next_eip_addend);
539             helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
540         }
541     }
542 }
543
544 /* Note: currently only 32 bits of exit_code are used */
545 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
546 {
547     uint32_t int_ctl;
548
549     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
550                   PRIx64 ", " TARGET_FMT_lx ")!\n",
551                   exit_code, exit_info_1,
552                   ldq_phys(env->vm_vmcb + offsetof(struct vmcb,
553                                                    control.exit_info_2)),
554                   EIP);
555
556     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
557         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state),
558                  SVM_INTERRUPT_SHADOW_MASK);
559         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
560     } else {
561         stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
562     }
563
564     /* Save the VM state in the vmcb */
565     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
566                  &env->segs[R_ES]);
567     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
568                  &env->segs[R_CS]);
569     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
570                  &env->segs[R_SS]);
571     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
572                  &env->segs[R_DS]);
573
574     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
575              env->gdt.base);
576     stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
577              env->gdt.limit);
578
579     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
580              env->idt.base);
581     stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
582              env->idt.limit);
583
584     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
585     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
586     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
587     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
588     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
589
590     int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
591     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
592     int_ctl |= env->v_tpr & V_TPR_MASK;
593     if (env->interrupt_request & CPU_INTERRUPT_VIRQ) {
594         int_ctl |= V_IRQ_MASK;
595     }
596     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
597
598     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags),
599              cpu_compute_eflags(env));
600     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip),
601              env->eip);
602     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
603     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
604     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
605     stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
606     stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl),
607              env->hflags & HF_CPL_MASK);
608
609     /* Reload the host state from vm_hsave */
610     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
611     env->hflags &= ~HF_SVMI_MASK;
612     env->intercept = 0;
613     env->intercept_exceptions = 0;
614     env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
615     env->tsc_offset = 0;
616
617     env->gdt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
618                                                        save.gdtr.base));
619     env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
620                                                        save.gdtr.limit));
621
622     env->idt.base  = ldq_phys(env->vm_hsave + offsetof(struct vmcb,
623                                                        save.idtr.base));
624     env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb,
625                                                        save.idtr.limit));
626
627     cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
628                                                               save.cr0)) |
629                        CR0_PE_MASK);
630     cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
631                                                               save.cr4)));
632     cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
633                                                               save.cr3)));
634     /* we need to set the efer after the crs so the hidden flags get
635        set properly */
636     cpu_load_efer(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
637                                                          save.efer)));
638     env->eflags = 0;
639     cpu_load_eflags(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb,
640                                                            save.rflags)),
641                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
642     CC_OP = CC_OP_EFLAGS;
643
644     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
645                        R_ES);
646     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
647                        R_CS);
648     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
649                        R_SS);
650     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
651                        R_DS);
652
653     EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
654     ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
655     EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
656
657     env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
658     env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
659
660     /* other setups */
661     cpu_x86_set_cpl(env, 0);
662     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
663              exit_code);
664     stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
665              exit_info_1);
666
667     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
668              ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
669                                               control.event_inj)));
670     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
671              ldl_phys(env->vm_vmcb + offsetof(struct vmcb,
672                                               control.event_inj_err)));
673     stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
674
675     env->hflags2 &= ~HF2_GIF_MASK;
676     /* FIXME: Resets the current ASID register to zero (host ASID). */
677
678     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
679
680     /* Clears the TSC_OFFSET inside the processor. */
681
682     /* If the host is in PAE mode, the processor reloads the host's PDPEs
683        from the page table indicated the host's CR3. If the PDPEs contain
684        illegal state, the processor causes a shutdown. */
685
686     /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
687     env->cr[0] |= CR0_PE_MASK;
688     env->eflags &= ~VM_MASK;
689
690     /* Disables all breakpoints in the host DR7 register. */
691
692     /* Checks the reloaded host state for consistency. */
693
694     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
695        host's code segment or non-canonical (in the case of long mode), a
696        #GP fault is delivered inside the host. */
697
698     /* remove any pending exception */
699     env->exception_index = -1;
700     env->error_code = 0;
701     env->old_exception = -1;
702
703     cpu_loop_exit(env);
704 }
705
706 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
707 {
708     helper_vmexit(env, exit_code, exit_info_1);
709 }
710
711 #endif
This page took 0.065214 seconds and 4 git commands to generate.