]> Git Repo - qemu.git/blob - target-i386/svm_helper.c
target-i386: Rename x86_def_t to X86CPUDefinition
[qemu.git] / target-i386 / svm_helper.c
1 /*
2  *  x86 SVM helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "cpu.h"
21 #include "exec/cpu-all.h"
22 #include "helper.h"
23
24 #if !defined(CONFIG_USER_ONLY)
25 #include "exec/softmmu_exec.h"
26 #endif /* !defined(CONFIG_USER_ONLY) */
27
28 /* Secure Virtual Machine helpers */
29
30 #if defined(CONFIG_USER_ONLY)
31
32 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
33 {
34 }
35
36 void helper_vmmcall(CPUX86State *env)
37 {
38 }
39
40 void helper_vmload(CPUX86State *env, int aflag)
41 {
42 }
43
44 void helper_vmsave(CPUX86State *env, int aflag)
45 {
46 }
47
48 void helper_stgi(CPUX86State *env)
49 {
50 }
51
52 void helper_clgi(CPUX86State *env)
53 {
54 }
55
56 void helper_skinit(CPUX86State *env)
57 {
58 }
59
60 void helper_invlpga(CPUX86State *env, int aflag)
61 {
62 }
63
64 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
65 {
66 }
67
68 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1)
69 {
70 }
71
72 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
73                                       uint64_t param)
74 {
75 }
76
77 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
78                                    uint64_t param)
79 {
80 }
81
82 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
83                          uint32_t next_eip_addend)
84 {
85 }
86 #else
87
88 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
89                                 const SegmentCache *sc)
90 {
91     CPUState *cs = CPU(x86_env_get_cpu(env));
92
93     stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector),
94              sc->selector);
95     stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base),
96              sc->base);
97     stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit),
98              sc->limit);
99     stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib),
100              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
101 }
102
103 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
104                                 SegmentCache *sc)
105 {
106     CPUState *cs = CPU(x86_env_get_cpu(env));
107     unsigned int flags;
108
109     sc->selector = lduw_phys(cs->as,
110                              addr + offsetof(struct vmcb_seg, selector));
111     sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base));
112     sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit));
113     flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib));
114     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
115 }
116
117 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
118                                       int seg_reg)
119 {
120     SegmentCache sc1, *sc = &sc1;
121
122     svm_load_seg(env, addr, sc);
123     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
124                            sc->base, sc->limit, sc->flags);
125 }
126
127 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
128 {
129     CPUState *cs = CPU(x86_env_get_cpu(env));
130     target_ulong addr;
131     uint32_t event_inj;
132     uint32_t int_ctl;
133
134     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0);
135
136     if (aflag == 2) {
137         addr = env->regs[R_EAX];
138     } else {
139         addr = (uint32_t)env->regs[R_EAX];
140     }
141
142     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
143
144     env->vm_vmcb = addr;
145
146     /* save the current CPU state in the hsave page */
147     stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
148              env->gdt.base);
149     stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
150              env->gdt.limit);
151
152     stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
153              env->idt.base);
154     stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
155              env->idt.limit);
156
157     stq_phys(cs->as,
158              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
159     stq_phys(cs->as,
160              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
161     stq_phys(cs->as,
162              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
163     stq_phys(cs->as,
164              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
165     stq_phys(cs->as,
166              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
167     stq_phys(cs->as,
168              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
169
170     stq_phys(cs->as,
171              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
172     stq_phys(cs->as,
173              env->vm_hsave + offsetof(struct vmcb, save.rflags),
174              cpu_compute_eflags(env));
175
176     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
177                  &env->segs[R_ES]);
178     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
179                  &env->segs[R_CS]);
180     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
181                  &env->segs[R_SS]);
182     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
183                  &env->segs[R_DS]);
184
185     stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip),
186              env->eip + next_eip_addend);
187     stq_phys(cs->as,
188              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
189     stq_phys(cs->as,
190              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
191
192     /* load the interception bitmaps so we do not need to access the
193        vmcb in svm mode */
194     env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
195                                                       control.intercept));
196     env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb +
197                                        offsetof(struct vmcb,
198                                                 control.intercept_cr_read));
199     env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb +
200                                         offsetof(struct vmcb,
201                                                  control.intercept_cr_write));
202     env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb +
203                                        offsetof(struct vmcb,
204                                                 control.intercept_dr_read));
205     env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb +
206                                         offsetof(struct vmcb,
207                                                  control.intercept_dr_write));
208     env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb +
209                                          offsetof(struct vmcb,
210                                                   control.intercept_exceptions
211                                                   ));
212
213     /* enable intercepts */
214     env->hflags |= HF_SVMI_MASK;
215
216     env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb +
217                                offsetof(struct vmcb, control.tsc_offset));
218
219     env->gdt.base  = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
220                                                       save.gdtr.base));
221     env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
222                                                       save.gdtr.limit));
223
224     env->idt.base  = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
225                                                       save.idtr.base));
226     env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
227                                                       save.idtr.limit));
228
229     /* clear exit_info_2 so we behave like the real hardware */
230     stq_phys(cs->as,
231              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
232
233     cpu_x86_update_cr0(env, ldq_phys(cs->as,
234                                      env->vm_vmcb + offsetof(struct vmcb,
235                                                              save.cr0)));
236     cpu_x86_update_cr4(env, ldq_phys(cs->as,
237                                      env->vm_vmcb + offsetof(struct vmcb,
238                                                              save.cr4)));
239     cpu_x86_update_cr3(env, ldq_phys(cs->as,
240                                      env->vm_vmcb + offsetof(struct vmcb,
241                                                              save.cr3)));
242     env->cr[2] = ldq_phys(cs->as,
243                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
244     int_ctl = ldl_phys(cs->as,
245                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
246     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
247     if (int_ctl & V_INTR_MASKING_MASK) {
248         env->v_tpr = int_ctl & V_TPR_MASK;
249         env->hflags2 |= HF2_VINTR_MASK;
250         if (env->eflags & IF_MASK) {
251             env->hflags2 |= HF2_HIF_MASK;
252         }
253     }
254
255     cpu_load_efer(env,
256                   ldq_phys(cs->as,
257                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
258     env->eflags = 0;
259     cpu_load_eflags(env, ldq_phys(cs->as,
260                                   env->vm_vmcb + offsetof(struct vmcb,
261                                                           save.rflags)),
262                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
263     CC_OP = CC_OP_EFLAGS;
264
265     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
266                        R_ES);
267     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
268                        R_CS);
269     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
270                        R_SS);
271     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
272                        R_DS);
273
274     env->eip = ldq_phys(cs->as,
275                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
276
277     env->regs[R_ESP] = ldq_phys(cs->as,
278                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
279     env->regs[R_EAX] = ldq_phys(cs->as,
280                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
281     env->dr[7] = ldq_phys(cs->as,
282                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
283     env->dr[6] = ldq_phys(cs->as,
284                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
285     cpu_x86_set_cpl(env, ldub_phys(cs->as,
286                                    env->vm_vmcb + offsetof(struct vmcb,
287                                                            save.cpl)));
288
289     /* FIXME: guest state consistency checks */
290
291     switch (ldub_phys(cs->as,
292                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
293     case TLB_CONTROL_DO_NOTHING:
294         break;
295     case TLB_CONTROL_FLUSH_ALL_ASID:
296         /* FIXME: this is not 100% correct but should work for now */
297         tlb_flush(env, 1);
298         break;
299     }
300
301     env->hflags2 |= HF2_GIF_MASK;
302
303     if (int_ctl & V_IRQ_MASK) {
304         CPUState *cs = CPU(x86_env_get_cpu(env));
305
306         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
307     }
308
309     /* maybe we need to inject an event */
310     event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
311                                                  control.event_inj));
312     if (event_inj & SVM_EVTINJ_VALID) {
313         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
314         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
315         uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb +
316                                           offsetof(struct vmcb,
317                                                    control.event_inj_err));
318
319         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
320         /* FIXME: need to implement valid_err */
321         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
322         case SVM_EVTINJ_TYPE_INTR:
323             env->exception_index = vector;
324             env->error_code = event_inj_err;
325             env->exception_is_int = 0;
326             env->exception_next_eip = -1;
327             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
328             /* XXX: is it always correct? */
329             do_interrupt_x86_hardirq(env, vector, 1);
330             break;
331         case SVM_EVTINJ_TYPE_NMI:
332             env->exception_index = EXCP02_NMI;
333             env->error_code = event_inj_err;
334             env->exception_is_int = 0;
335             env->exception_next_eip = env->eip;
336             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
337             cpu_loop_exit(env);
338             break;
339         case SVM_EVTINJ_TYPE_EXEPT:
340             env->exception_index = vector;
341             env->error_code = event_inj_err;
342             env->exception_is_int = 0;
343             env->exception_next_eip = -1;
344             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
345             cpu_loop_exit(env);
346             break;
347         case SVM_EVTINJ_TYPE_SOFT:
348             env->exception_index = vector;
349             env->error_code = event_inj_err;
350             env->exception_is_int = 1;
351             env->exception_next_eip = env->eip;
352             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
353             cpu_loop_exit(env);
354             break;
355         }
356         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index,
357                       env->error_code);
358     }
359 }
360
361 void helper_vmmcall(CPUX86State *env)
362 {
363     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0);
364     raise_exception(env, EXCP06_ILLOP);
365 }
366
367 void helper_vmload(CPUX86State *env, int aflag)
368 {
369     CPUState *cs = CPU(x86_env_get_cpu(env));
370     target_ulong addr;
371
372     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0);
373
374     if (aflag == 2) {
375         addr = env->regs[R_EAX];
376     } else {
377         addr = (uint32_t)env->regs[R_EAX];
378     }
379
380     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
381                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
382                   addr, ldq_phys(cs->as, addr + offsetof(struct vmcb,
383                                                           save.fs.base)),
384                   env->segs[R_FS].base);
385
386     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
387     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
388     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
389     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
390
391 #ifdef TARGET_X86_64
392     env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb,
393                                                  save.kernel_gs_base));
394     env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar));
395     env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar));
396     env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask));
397 #endif
398     env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star));
399     env->sysenter_cs = ldq_phys(cs->as,
400                                 addr + offsetof(struct vmcb, save.sysenter_cs));
401     env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb,
402                                                  save.sysenter_esp));
403     env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb,
404                                                  save.sysenter_eip));
405 }
406
407 void helper_vmsave(CPUX86State *env, int aflag)
408 {
409     CPUState *cs = CPU(x86_env_get_cpu(env));
410     target_ulong addr;
411
412     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0);
413
414     if (aflag == 2) {
415         addr = env->regs[R_EAX];
416     } else {
417         addr = (uint32_t)env->regs[R_EAX];
418     }
419
420     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
421                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
422                   addr, ldq_phys(cs->as,
423                                  addr + offsetof(struct vmcb, save.fs.base)),
424                   env->segs[R_FS].base);
425
426     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
427                  &env->segs[R_FS]);
428     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
429                  &env->segs[R_GS]);
430     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
431                  &env->tr);
432     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
433                  &env->ldt);
434
435 #ifdef TARGET_X86_64
436     stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base),
437              env->kernelgsbase);
438     stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar);
439     stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar);
440     stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
441 #endif
442     stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star);
443     stq_phys(cs->as,
444              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
445     stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp),
446              env->sysenter_esp);
447     stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip),
448              env->sysenter_eip);
449 }
450
451 void helper_stgi(CPUX86State *env)
452 {
453     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0);
454     env->hflags2 |= HF2_GIF_MASK;
455 }
456
457 void helper_clgi(CPUX86State *env)
458 {
459     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0);
460     env->hflags2 &= ~HF2_GIF_MASK;
461 }
462
463 void helper_skinit(CPUX86State *env)
464 {
465     cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0);
466     /* XXX: not implemented */
467     raise_exception(env, EXCP06_ILLOP);
468 }
469
470 void helper_invlpga(CPUX86State *env, int aflag)
471 {
472     target_ulong addr;
473
474     cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0);
475
476     if (aflag == 2) {
477         addr = env->regs[R_EAX];
478     } else {
479         addr = (uint32_t)env->regs[R_EAX];
480     }
481
482     /* XXX: could use the ASID to see if it is needed to do the
483        flush */
484     tlb_flush_page(env, addr);
485 }
486
487 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
488                                       uint64_t param)
489 {
490     CPUState *cs = CPU(x86_env_get_cpu(env));
491
492     if (likely(!(env->hflags & HF_SVMI_MASK))) {
493         return;
494     }
495     switch (type) {
496     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
497         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
498             helper_vmexit(env, type, param);
499         }
500         break;
501     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
502         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
503             helper_vmexit(env, type, param);
504         }
505         break;
506     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
507         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
508             helper_vmexit(env, type, param);
509         }
510         break;
511     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
512         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
513             helper_vmexit(env, type, param);
514         }
515         break;
516     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
517         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
518             helper_vmexit(env, type, param);
519         }
520         break;
521     case SVM_EXIT_MSR:
522         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
523             /* FIXME: this should be read in at vmrun (faster this way?) */
524             uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
525                                      offsetof(struct vmcb,
526                                               control.msrpm_base_pa));
527             uint32_t t0, t1;
528
529             switch ((uint32_t)env->regs[R_ECX]) {
530             case 0 ... 0x1fff:
531                 t0 = (env->regs[R_ECX] * 2) % 8;
532                 t1 = (env->regs[R_ECX] * 2) / 8;
533                 break;
534             case 0xc0000000 ... 0xc0001fff:
535                 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
536                 t1 = (t0 / 8);
537                 t0 %= 8;
538                 break;
539             case 0xc0010000 ... 0xc0011fff:
540                 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
541                 t1 = (t0 / 8);
542                 t0 %= 8;
543                 break;
544             default:
545                 helper_vmexit(env, type, param);
546                 t0 = 0;
547                 t1 = 0;
548                 break;
549             }
550             if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) {
551                 helper_vmexit(env, type, param);
552             }
553         }
554         break;
555     default:
556         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
557             helper_vmexit(env, type, param);
558         }
559         break;
560     }
561 }
562
563 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
564                                    uint64_t param)
565 {
566     helper_svm_check_intercept_param(env, type, param);
567 }
568
569 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
570                          uint32_t next_eip_addend)
571 {
572     CPUState *cs = CPU(x86_env_get_cpu(env));
573
574     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
575         /* FIXME: this should be read in at vmrun (faster this way?) */
576         uint64_t addr = ldq_phys(cs->as, env->vm_vmcb +
577                                  offsetof(struct vmcb, control.iopm_base_pa));
578         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
579
580         if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) {
581             /* next env->eip */
582             stq_phys(cs->as,
583                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
584                      env->eip + next_eip_addend);
585             helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16));
586         }
587     }
588 }
589
590 /* Note: currently only 32 bits of exit_code are used */
591 void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
592 {
593     CPUState *cs = CPU(x86_env_get_cpu(env));
594     uint32_t int_ctl;
595
596     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
597                   PRIx64 ", " TARGET_FMT_lx ")!\n",
598                   exit_code, exit_info_1,
599                   ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
600                                                    control.exit_info_2)),
601                   env->eip);
602
603     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
604         stl_phys(cs->as,
605                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
606                  SVM_INTERRUPT_SHADOW_MASK);
607         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
608     } else {
609         stl_phys(cs->as,
610                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
611     }
612
613     /* Save the VM state in the vmcb */
614     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
615                  &env->segs[R_ES]);
616     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
617                  &env->segs[R_CS]);
618     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
619                  &env->segs[R_SS]);
620     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
621                  &env->segs[R_DS]);
622
623     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
624              env->gdt.base);
625     stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
626              env->gdt.limit);
627
628     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
629              env->idt.base);
630     stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
631              env->idt.limit);
632
633     stq_phys(cs->as,
634              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
635     stq_phys(cs->as,
636              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
637     stq_phys(cs->as,
638              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
639     stq_phys(cs->as,
640              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
641     stq_phys(cs->as,
642              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
643
644     int_ctl = ldl_phys(cs->as,
645                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
646     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
647     int_ctl |= env->v_tpr & V_TPR_MASK;
648     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
649         int_ctl |= V_IRQ_MASK;
650     }
651     stl_phys(cs->as,
652              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
653
654     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
655              cpu_compute_eflags(env));
656     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip),
657              env->eip);
658     stq_phys(cs->as,
659              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
660     stq_phys(cs->as,
661              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
662     stq_phys(cs->as,
663              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
664     stq_phys(cs->as,
665              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
666     stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
667              env->hflags & HF_CPL_MASK);
668
669     /* Reload the host state from vm_hsave */
670     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
671     env->hflags &= ~HF_SVMI_MASK;
672     env->intercept = 0;
673     env->intercept_exceptions = 0;
674     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
675     env->tsc_offset = 0;
676
677     env->gdt.base  = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
678                                                        save.gdtr.base));
679     env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
680                                                        save.gdtr.limit));
681
682     env->idt.base  = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
683                                                        save.idtr.base));
684     env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
685                                                        save.idtr.limit));
686
687     cpu_x86_update_cr0(env, ldq_phys(cs->as,
688                                      env->vm_hsave + offsetof(struct vmcb,
689                                                               save.cr0)) |
690                        CR0_PE_MASK);
691     cpu_x86_update_cr4(env, ldq_phys(cs->as,
692                                      env->vm_hsave + offsetof(struct vmcb,
693                                                               save.cr4)));
694     cpu_x86_update_cr3(env, ldq_phys(cs->as,
695                                      env->vm_hsave + offsetof(struct vmcb,
696                                                               save.cr3)));
697     /* we need to set the efer after the crs so the hidden flags get
698        set properly */
699     cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb,
700                                                          save.efer)));
701     env->eflags = 0;
702     cpu_load_eflags(env, ldq_phys(cs->as,
703                                   env->vm_hsave + offsetof(struct vmcb,
704                                                            save.rflags)),
705                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
706     CC_OP = CC_OP_EFLAGS;
707
708     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
709                        R_ES);
710     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
711                        R_CS);
712     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
713                        R_SS);
714     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
715                        R_DS);
716
717     env->eip = ldq_phys(cs->as,
718                         env->vm_hsave + offsetof(struct vmcb, save.rip));
719     env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave +
720                                 offsetof(struct vmcb, save.rsp));
721     env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave +
722                                 offsetof(struct vmcb, save.rax));
723
724     env->dr[6] = ldq_phys(cs->as,
725                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
726     env->dr[7] = ldq_phys(cs->as,
727                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
728
729     /* other setups */
730     cpu_x86_set_cpl(env, 0);
731     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
732              exit_code);
733     stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
734              exit_info_1);
735
736     stl_phys(cs->as,
737              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
738              ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
739                                               control.event_inj)));
740     stl_phys(cs->as,
741              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
742              ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb,
743                                               control.event_inj_err)));
744     stl_phys(cs->as,
745              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
746
747     env->hflags2 &= ~HF2_GIF_MASK;
748     /* FIXME: Resets the current ASID register to zero (host ASID). */
749
750     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
751
752     /* Clears the TSC_OFFSET inside the processor. */
753
754     /* If the host is in PAE mode, the processor reloads the host's PDPEs
755        from the page table indicated the host's CR3. If the PDPEs contain
756        illegal state, the processor causes a shutdown. */
757
758     /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
759     env->cr[0] |= CR0_PE_MASK;
760     env->eflags &= ~VM_MASK;
761
762     /* Disables all breakpoints in the host DR7 register. */
763
764     /* Checks the reloaded host state for consistency. */
765
766     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
767        host's code segment or non-canonical (in the case of long mode), a
768        #GP fault is delivered inside the host. */
769
770     /* remove any pending exception */
771     env->exception_index = -1;
772     env->error_code = 0;
773     env->old_exception = -1;
774
775     cpu_loop_exit(env);
776 }
777
778 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
779 {
780     helper_vmexit(env, exit_code, exit_info_1);
781 }
782
783 #endif
This page took 0.06763 seconds and 4 git commands to generate.