]> Git Repo - qemu.git/blob - target/i386/svm_helper.c
Merge remote-tracking branch 'remotes/xtensa/tags/20170317-xtensa' into staging
[qemu.git] / target / i386 / svm_helper.c
1 /*
2  *  x86 SVM helpers
3  *
4  *  Copyright (c) 2003 Fabrice Bellard
5  *
6  * This library is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2 of the License, or (at your option) any later version.
10  *
11  * This library is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18  */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/cpu-all.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26
27 /* Secure Virtual Machine helpers */
28
29 #if defined(CONFIG_USER_ONLY)
30
31 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
32 {
33 }
34
35 void helper_vmmcall(CPUX86State *env)
36 {
37 }
38
39 void helper_vmload(CPUX86State *env, int aflag)
40 {
41 }
42
43 void helper_vmsave(CPUX86State *env, int aflag)
44 {
45 }
46
47 void helper_stgi(CPUX86State *env)
48 {
49 }
50
51 void helper_clgi(CPUX86State *env)
52 {
53 }
54
55 void helper_skinit(CPUX86State *env)
56 {
57 }
58
59 void helper_invlpga(CPUX86State *env, int aflag)
60 {
61 }
62
63 void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1,
64                 uintptr_t retaddr)
65 {
66 }
67
68 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
69                                       uint64_t param)
70 {
71 }
72
73 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
74                                    uint64_t param, uintptr_t retaddr)
75 {
76 }
77
78 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
79                          uint32_t next_eip_addend)
80 {
81 }
82 #else
83
84 static inline void svm_save_seg(CPUX86State *env, hwaddr addr,
85                                 const SegmentCache *sc)
86 {
87     CPUState *cs = CPU(x86_env_get_cpu(env));
88
89     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, selector),
90              sc->selector);
91     x86_stq_phys(cs, addr + offsetof(struct vmcb_seg, base),
92              sc->base);
93     x86_stl_phys(cs, addr + offsetof(struct vmcb_seg, limit),
94              sc->limit);
95     x86_stw_phys(cs, addr + offsetof(struct vmcb_seg, attrib),
96              ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
97 }
98
99 static inline void svm_load_seg(CPUX86State *env, hwaddr addr,
100                                 SegmentCache *sc)
101 {
102     CPUState *cs = CPU(x86_env_get_cpu(env));
103     unsigned int flags;
104
105     sc->selector = x86_lduw_phys(cs,
106                              addr + offsetof(struct vmcb_seg, selector));
107     sc->base = x86_ldq_phys(cs, addr + offsetof(struct vmcb_seg, base));
108     sc->limit = x86_ldl_phys(cs, addr + offsetof(struct vmcb_seg, limit));
109     flags = x86_lduw_phys(cs, addr + offsetof(struct vmcb_seg, attrib));
110     sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
111 }
112
113 static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr,
114                                       int seg_reg)
115 {
116     SegmentCache sc1, *sc = &sc1;
117
118     svm_load_seg(env, addr, sc);
119     cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
120                            sc->base, sc->limit, sc->flags);
121 }
122
123 void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend)
124 {
125     CPUState *cs = CPU(x86_env_get_cpu(env));
126     target_ulong addr;
127     uint32_t event_inj;
128     uint32_t int_ctl;
129
130     cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0, GETPC());
131
132     if (aflag == 2) {
133         addr = env->regs[R_EAX];
134     } else {
135         addr = (uint32_t)env->regs[R_EAX];
136     }
137
138     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
139
140     env->vm_vmcb = addr;
141
142     /* save the current CPU state in the hsave page */
143     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base),
144              env->gdt.base);
145     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit),
146              env->gdt.limit);
147
148     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.base),
149              env->idt.base);
150     x86_stl_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit),
151              env->idt.limit);
152
153     x86_stq_phys(cs,
154              env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
155     x86_stq_phys(cs,
156              env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
157     x86_stq_phys(cs,
158              env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
159     x86_stq_phys(cs,
160              env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
161     x86_stq_phys(cs,
162              env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
163     x86_stq_phys(cs,
164              env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
165
166     x86_stq_phys(cs,
167              env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
168     x86_stq_phys(cs,
169              env->vm_hsave + offsetof(struct vmcb, save.rflags),
170              cpu_compute_eflags(env));
171
172     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es),
173                  &env->segs[R_ES]);
174     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
175                  &env->segs[R_CS]);
176     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
177                  &env->segs[R_SS]);
178     svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
179                  &env->segs[R_DS]);
180
181     x86_stq_phys(cs, env->vm_hsave + offsetof(struct vmcb, save.rip),
182              env->eip + next_eip_addend);
183     x86_stq_phys(cs,
184              env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
185     x86_stq_phys(cs,
186              env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
187
188     /* load the interception bitmaps so we do not need to access the
189        vmcb in svm mode */
190     env->intercept = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
191                                                       control.intercept));
192     env->intercept_cr_read = x86_lduw_phys(cs, env->vm_vmcb +
193                                        offsetof(struct vmcb,
194                                                 control.intercept_cr_read));
195     env->intercept_cr_write = x86_lduw_phys(cs, env->vm_vmcb +
196                                         offsetof(struct vmcb,
197                                                  control.intercept_cr_write));
198     env->intercept_dr_read = x86_lduw_phys(cs, env->vm_vmcb +
199                                        offsetof(struct vmcb,
200                                                 control.intercept_dr_read));
201     env->intercept_dr_write = x86_lduw_phys(cs, env->vm_vmcb +
202                                         offsetof(struct vmcb,
203                                                  control.intercept_dr_write));
204     env->intercept_exceptions = x86_ldl_phys(cs, env->vm_vmcb +
205                                          offsetof(struct vmcb,
206                                                   control.intercept_exceptions
207                                                   ));
208
209     /* enable intercepts */
210     env->hflags |= HF_SVMI_MASK;
211
212     env->tsc_offset = x86_ldq_phys(cs, env->vm_vmcb +
213                                offsetof(struct vmcb, control.tsc_offset));
214
215     env->gdt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
216                                                       save.gdtr.base));
217     env->gdt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
218                                                       save.gdtr.limit));
219
220     env->idt.base  = x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
221                                                       save.idtr.base));
222     env->idt.limit = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
223                                                       save.idtr.limit));
224
225     /* clear exit_info_2 so we behave like the real hardware */
226     x86_stq_phys(cs,
227              env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
228
229     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
230                                      env->vm_vmcb + offsetof(struct vmcb,
231                                                              save.cr0)));
232     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
233                                      env->vm_vmcb + offsetof(struct vmcb,
234                                                              save.cr4)));
235     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
236                                      env->vm_vmcb + offsetof(struct vmcb,
237                                                              save.cr3)));
238     env->cr[2] = x86_ldq_phys(cs,
239                           env->vm_vmcb + offsetof(struct vmcb, save.cr2));
240     int_ctl = x86_ldl_phys(cs,
241                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
242     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
243     if (int_ctl & V_INTR_MASKING_MASK) {
244         env->v_tpr = int_ctl & V_TPR_MASK;
245         env->hflags2 |= HF2_VINTR_MASK;
246         if (env->eflags & IF_MASK) {
247             env->hflags2 |= HF2_HIF_MASK;
248         }
249     }
250
251     cpu_load_efer(env,
252                   x86_ldq_phys(cs,
253                            env->vm_vmcb + offsetof(struct vmcb, save.efer)));
254     env->eflags = 0;
255     cpu_load_eflags(env, x86_ldq_phys(cs,
256                                   env->vm_vmcb + offsetof(struct vmcb,
257                                                           save.rflags)),
258                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
259
260     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
261                        R_ES);
262     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
263                        R_CS);
264     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
265                        R_SS);
266     svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
267                        R_DS);
268
269     env->eip = x86_ldq_phys(cs,
270                         env->vm_vmcb + offsetof(struct vmcb, save.rip));
271
272     env->regs[R_ESP] = x86_ldq_phys(cs,
273                                 env->vm_vmcb + offsetof(struct vmcb, save.rsp));
274     env->regs[R_EAX] = x86_ldq_phys(cs,
275                                 env->vm_vmcb + offsetof(struct vmcb, save.rax));
276     env->dr[7] = x86_ldq_phys(cs,
277                           env->vm_vmcb + offsetof(struct vmcb, save.dr7));
278     env->dr[6] = x86_ldq_phys(cs,
279                           env->vm_vmcb + offsetof(struct vmcb, save.dr6));
280
281     /* FIXME: guest state consistency checks */
282
283     switch (x86_ldub_phys(cs,
284                       env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
285     case TLB_CONTROL_DO_NOTHING:
286         break;
287     case TLB_CONTROL_FLUSH_ALL_ASID:
288         /* FIXME: this is not 100% correct but should work for now */
289         tlb_flush(cs);
290         break;
291     }
292
293     env->hflags2 |= HF2_GIF_MASK;
294
295     if (int_ctl & V_IRQ_MASK) {
296         CPUState *cs = CPU(x86_env_get_cpu(env));
297
298         cs->interrupt_request |= CPU_INTERRUPT_VIRQ;
299     }
300
301     /* maybe we need to inject an event */
302     event_inj = x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
303                                                  control.event_inj));
304     if (event_inj & SVM_EVTINJ_VALID) {
305         uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
306         uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
307         uint32_t event_inj_err = x86_ldl_phys(cs, env->vm_vmcb +
308                                           offsetof(struct vmcb,
309                                                    control.event_inj_err));
310
311         qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
312         /* FIXME: need to implement valid_err */
313         switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
314         case SVM_EVTINJ_TYPE_INTR:
315             cs->exception_index = vector;
316             env->error_code = event_inj_err;
317             env->exception_is_int = 0;
318             env->exception_next_eip = -1;
319             qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
320             /* XXX: is it always correct? */
321             do_interrupt_x86_hardirq(env, vector, 1);
322             break;
323         case SVM_EVTINJ_TYPE_NMI:
324             cs->exception_index = EXCP02_NMI;
325             env->error_code = event_inj_err;
326             env->exception_is_int = 0;
327             env->exception_next_eip = env->eip;
328             qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
329             cpu_loop_exit(cs);
330             break;
331         case SVM_EVTINJ_TYPE_EXEPT:
332             cs->exception_index = vector;
333             env->error_code = event_inj_err;
334             env->exception_is_int = 0;
335             env->exception_next_eip = -1;
336             qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
337             cpu_loop_exit(cs);
338             break;
339         case SVM_EVTINJ_TYPE_SOFT:
340             cs->exception_index = vector;
341             env->error_code = event_inj_err;
342             env->exception_is_int = 1;
343             env->exception_next_eip = env->eip;
344             qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
345             cpu_loop_exit(cs);
346             break;
347         }
348         qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index,
349                       env->error_code);
350     }
351 }
352
353 void helper_vmmcall(CPUX86State *env)
354 {
355     cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0, GETPC());
356     raise_exception(env, EXCP06_ILLOP);
357 }
358
359 void helper_vmload(CPUX86State *env, int aflag)
360 {
361     CPUState *cs = CPU(x86_env_get_cpu(env));
362     target_ulong addr;
363
364     cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0, GETPC());
365
366     if (aflag == 2) {
367         addr = env->regs[R_EAX];
368     } else {
369         addr = (uint32_t)env->regs[R_EAX];
370     }
371
372     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx
373                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
374                   addr, x86_ldq_phys(cs, addr + offsetof(struct vmcb,
375                                                           save.fs.base)),
376                   env->segs[R_FS].base);
377
378     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS);
379     svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS);
380     svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr);
381     svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt);
382
383 #ifdef TARGET_X86_64
384     env->kernelgsbase = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
385                                                  save.kernel_gs_base));
386     env->lstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.lstar));
387     env->cstar = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.cstar));
388     env->fmask = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.sfmask));
389 #endif
390     env->star = x86_ldq_phys(cs, addr + offsetof(struct vmcb, save.star));
391     env->sysenter_cs = x86_ldq_phys(cs,
392                                 addr + offsetof(struct vmcb, save.sysenter_cs));
393     env->sysenter_esp = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
394                                                  save.sysenter_esp));
395     env->sysenter_eip = x86_ldq_phys(cs, addr + offsetof(struct vmcb,
396                                                  save.sysenter_eip));
397 }
398
399 void helper_vmsave(CPUX86State *env, int aflag)
400 {
401     CPUState *cs = CPU(x86_env_get_cpu(env));
402     target_ulong addr;
403
404     cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0, GETPC());
405
406     if (aflag == 2) {
407         addr = env->regs[R_EAX];
408     } else {
409         addr = (uint32_t)env->regs[R_EAX];
410     }
411
412     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx
413                   "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
414                   addr, x86_ldq_phys(cs,
415                                  addr + offsetof(struct vmcb, save.fs.base)),
416                   env->segs[R_FS].base);
417
418     svm_save_seg(env, addr + offsetof(struct vmcb, save.fs),
419                  &env->segs[R_FS]);
420     svm_save_seg(env, addr + offsetof(struct vmcb, save.gs),
421                  &env->segs[R_GS]);
422     svm_save_seg(env, addr + offsetof(struct vmcb, save.tr),
423                  &env->tr);
424     svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr),
425                  &env->ldt);
426
427 #ifdef TARGET_X86_64
428     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.kernel_gs_base),
429              env->kernelgsbase);
430     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.lstar), env->lstar);
431     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.cstar), env->cstar);
432     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sfmask), env->fmask);
433 #endif
434     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.star), env->star);
435     x86_stq_phys(cs,
436              addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
437     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_esp),
438              env->sysenter_esp);
439     x86_stq_phys(cs, addr + offsetof(struct vmcb, save.sysenter_eip),
440              env->sysenter_eip);
441 }
442
443 void helper_stgi(CPUX86State *env)
444 {
445     cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0, GETPC());
446     env->hflags2 |= HF2_GIF_MASK;
447 }
448
449 void helper_clgi(CPUX86State *env)
450 {
451     cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0, GETPC());
452     env->hflags2 &= ~HF2_GIF_MASK;
453 }
454
455 void helper_skinit(CPUX86State *env)
456 {
457     cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0, GETPC());
458     /* XXX: not implemented */
459     raise_exception(env, EXCP06_ILLOP);
460 }
461
462 void helper_invlpga(CPUX86State *env, int aflag)
463 {
464     X86CPU *cpu = x86_env_get_cpu(env);
465     target_ulong addr;
466
467     cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0, GETPC());
468
469     if (aflag == 2) {
470         addr = env->regs[R_EAX];
471     } else {
472         addr = (uint32_t)env->regs[R_EAX];
473     }
474
475     /* XXX: could use the ASID to see if it is needed to do the
476        flush */
477     tlb_flush_page(CPU(cpu), addr);
478 }
479
480 void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type,
481                                    uint64_t param, uintptr_t retaddr)
482 {
483     CPUState *cs = CPU(x86_env_get_cpu(env));
484
485     if (likely(!(env->hflags & HF_SVMI_MASK))) {
486         return;
487     }
488     switch (type) {
489     case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
490         if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
491             cpu_vmexit(env, type, param, retaddr);
492         }
493         break;
494     case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
495         if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
496             cpu_vmexit(env, type, param, retaddr);
497         }
498         break;
499     case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
500         if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
501             cpu_vmexit(env, type, param, retaddr);
502         }
503         break;
504     case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
505         if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
506             cpu_vmexit(env, type, param, retaddr);
507         }
508         break;
509     case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
510         if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
511             cpu_vmexit(env, type, param, retaddr);
512         }
513         break;
514     case SVM_EXIT_MSR:
515         if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
516             /* FIXME: this should be read in at vmrun (faster this way?) */
517             uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
518                                      offsetof(struct vmcb,
519                                               control.msrpm_base_pa));
520             uint32_t t0, t1;
521
522             switch ((uint32_t)env->regs[R_ECX]) {
523             case 0 ... 0x1fff:
524                 t0 = (env->regs[R_ECX] * 2) % 8;
525                 t1 = (env->regs[R_ECX] * 2) / 8;
526                 break;
527             case 0xc0000000 ... 0xc0001fff:
528                 t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2;
529                 t1 = (t0 / 8);
530                 t0 %= 8;
531                 break;
532             case 0xc0010000 ... 0xc0011fff:
533                 t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2;
534                 t1 = (t0 / 8);
535                 t0 %= 8;
536                 break;
537             default:
538                 cpu_vmexit(env, type, param, retaddr);
539                 t0 = 0;
540                 t1 = 0;
541                 break;
542             }
543             if (x86_ldub_phys(cs, addr + t1) & ((1 << param) << t0)) {
544                 cpu_vmexit(env, type, param, retaddr);
545             }
546         }
547         break;
548     default:
549         if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
550             cpu_vmexit(env, type, param, retaddr);
551         }
552         break;
553     }
554 }
555
556 void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type,
557                                       uint64_t param)
558 {
559     cpu_svm_check_intercept_param(env, type, param, GETPC());
560 }
561
562 void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param,
563                          uint32_t next_eip_addend)
564 {
565     CPUState *cs = CPU(x86_env_get_cpu(env));
566
567     if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
568         /* FIXME: this should be read in at vmrun (faster this way?) */
569         uint64_t addr = x86_ldq_phys(cs, env->vm_vmcb +
570                                  offsetof(struct vmcb, control.iopm_base_pa));
571         uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
572
573         if (x86_lduw_phys(cs, addr + port / 8) & (mask << (port & 7))) {
574             /* next env->eip */
575             x86_stq_phys(cs,
576                      env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
577                      env->eip + next_eip_addend);
578             cpu_vmexit(env, SVM_EXIT_IOIO, param | (port << 16), GETPC());
579         }
580     }
581 }
582
583 void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1,
584                 uintptr_t retaddr)
585 {
586     CPUState *cs = CPU(x86_env_get_cpu(env));
587
588     if (retaddr) {
589         cpu_restore_state(cs, retaddr);
590     }
591
592     qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016"
593                   PRIx64 ", " TARGET_FMT_lx ")!\n",
594                   exit_code, exit_info_1,
595                   x86_ldq_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
596                                                    control.exit_info_2)),
597                   env->eip);
598
599     cs->exception_index = EXCP_VMEXIT + exit_code;
600     env->error_code = exit_info_1;
601
602     /* remove any pending exception */
603     env->old_exception = -1;
604     cpu_loop_exit(cs);
605 }
606
607 void do_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1)
608 {
609     CPUState *cs = CPU(x86_env_get_cpu(env));
610     uint32_t int_ctl;
611
612     if (env->hflags & HF_INHIBIT_IRQ_MASK) {
613         x86_stl_phys(cs,
614                  env->vm_vmcb + offsetof(struct vmcb, control.int_state),
615                  SVM_INTERRUPT_SHADOW_MASK);
616         env->hflags &= ~HF_INHIBIT_IRQ_MASK;
617     } else {
618         x86_stl_phys(cs,
619                  env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
620     }
621
622     /* Save the VM state in the vmcb */
623     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es),
624                  &env->segs[R_ES]);
625     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs),
626                  &env->segs[R_CS]);
627     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss),
628                  &env->segs[R_SS]);
629     svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds),
630                  &env->segs[R_DS]);
631
632     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base),
633              env->gdt.base);
634     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit),
635              env->gdt.limit);
636
637     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base),
638              env->idt.base);
639     x86_stl_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit),
640              env->idt.limit);
641
642     x86_stq_phys(cs,
643              env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
644     x86_stq_phys(cs,
645              env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
646     x86_stq_phys(cs,
647              env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
648     x86_stq_phys(cs,
649              env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
650     x86_stq_phys(cs,
651              env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
652
653     int_ctl = x86_ldl_phys(cs,
654                        env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
655     int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
656     int_ctl |= env->v_tpr & V_TPR_MASK;
657     if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) {
658         int_ctl |= V_IRQ_MASK;
659     }
660     x86_stl_phys(cs,
661              env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
662
663     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rflags),
664              cpu_compute_eflags(env));
665     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.rip),
666              env->eip);
667     x86_stq_phys(cs,
668              env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]);
669     x86_stq_phys(cs,
670              env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]);
671     x86_stq_phys(cs,
672              env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
673     x86_stq_phys(cs,
674              env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
675     x86_stb_phys(cs, env->vm_vmcb + offsetof(struct vmcb, save.cpl),
676              env->hflags & HF_CPL_MASK);
677
678     /* Reload the host state from vm_hsave */
679     env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
680     env->hflags &= ~HF_SVMI_MASK;
681     env->intercept = 0;
682     env->intercept_exceptions = 0;
683     cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
684     env->tsc_offset = 0;
685
686     env->gdt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
687                                                        save.gdtr.base));
688     env->gdt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
689                                                        save.gdtr.limit));
690
691     env->idt.base  = x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
692                                                        save.idtr.base));
693     env->idt.limit = x86_ldl_phys(cs, env->vm_hsave + offsetof(struct vmcb,
694                                                        save.idtr.limit));
695
696     cpu_x86_update_cr0(env, x86_ldq_phys(cs,
697                                      env->vm_hsave + offsetof(struct vmcb,
698                                                               save.cr0)) |
699                        CR0_PE_MASK);
700     cpu_x86_update_cr4(env, x86_ldq_phys(cs,
701                                      env->vm_hsave + offsetof(struct vmcb,
702                                                               save.cr4)));
703     cpu_x86_update_cr3(env, x86_ldq_phys(cs,
704                                      env->vm_hsave + offsetof(struct vmcb,
705                                                               save.cr3)));
706     /* we need to set the efer after the crs so the hidden flags get
707        set properly */
708     cpu_load_efer(env, x86_ldq_phys(cs, env->vm_hsave + offsetof(struct vmcb,
709                                                          save.efer)));
710     env->eflags = 0;
711     cpu_load_eflags(env, x86_ldq_phys(cs,
712                                   env->vm_hsave + offsetof(struct vmcb,
713                                                            save.rflags)),
714                     ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK |
715                       VM_MASK));
716
717     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es),
718                        R_ES);
719     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs),
720                        R_CS);
721     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss),
722                        R_SS);
723     svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds),
724                        R_DS);
725
726     env->eip = x86_ldq_phys(cs,
727                         env->vm_hsave + offsetof(struct vmcb, save.rip));
728     env->regs[R_ESP] = x86_ldq_phys(cs, env->vm_hsave +
729                                 offsetof(struct vmcb, save.rsp));
730     env->regs[R_EAX] = x86_ldq_phys(cs, env->vm_hsave +
731                                 offsetof(struct vmcb, save.rax));
732
733     env->dr[6] = x86_ldq_phys(cs,
734                           env->vm_hsave + offsetof(struct vmcb, save.dr6));
735     env->dr[7] = x86_ldq_phys(cs,
736                           env->vm_hsave + offsetof(struct vmcb, save.dr7));
737
738     /* other setups */
739     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_code),
740              exit_code);
741     x86_stq_phys(cs, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1),
742              exit_info_1);
743
744     x86_stl_phys(cs,
745              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
746              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
747                                               control.event_inj)));
748     x86_stl_phys(cs,
749              env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
750              x86_ldl_phys(cs, env->vm_vmcb + offsetof(struct vmcb,
751                                               control.event_inj_err)));
752     x86_stl_phys(cs,
753              env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
754
755     env->hflags2 &= ~HF2_GIF_MASK;
756     /* FIXME: Resets the current ASID register to zero (host ASID). */
757
758     /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
759
760     /* Clears the TSC_OFFSET inside the processor. */
761
762     /* If the host is in PAE mode, the processor reloads the host's PDPEs
763        from the page table indicated the host's CR3. If the PDPEs contain
764        illegal state, the processor causes a shutdown. */
765
766     /* Disables all breakpoints in the host DR7 register. */
767
768     /* Checks the reloaded host state for consistency. */
769
770     /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
771        host's code segment or non-canonical (in the case of long mode), a
772        #GP fault is delivered inside the host. */
773 }
774
775 #endif
This page took 0.067935 seconds and 4 git commands to generate.