]>
Commit | Line | Data |
---|---|---|
6bada5e8 BS |
1 | /* |
2 | * x86 SVM helpers | |
3 | * | |
4 | * Copyright (c) 2003 Fabrice Bellard | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "cpu.h" | |
022c62cb | 21 | #include "exec/cpu-all.h" |
6bada5e8 BS |
22 | #include "helper.h" |
23 | ||
92fc4b58 | 24 | #if !defined(CONFIG_USER_ONLY) |
022c62cb | 25 | #include "exec/softmmu_exec.h" |
92fc4b58 BS |
26 | #endif /* !defined(CONFIG_USER_ONLY) */ |
27 | ||
6bada5e8 BS |
28 | /* Secure Virtual Machine helpers */ |
29 | ||
30 | #if defined(CONFIG_USER_ONLY) | |
31 | ||
052e80d5 | 32 | void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) |
6bada5e8 BS |
33 | { |
34 | } | |
35 | ||
052e80d5 | 36 | void helper_vmmcall(CPUX86State *env) |
6bada5e8 BS |
37 | { |
38 | } | |
39 | ||
052e80d5 | 40 | void helper_vmload(CPUX86State *env, int aflag) |
6bada5e8 BS |
41 | { |
42 | } | |
43 | ||
052e80d5 | 44 | void helper_vmsave(CPUX86State *env, int aflag) |
6bada5e8 BS |
45 | { |
46 | } | |
47 | ||
052e80d5 | 48 | void helper_stgi(CPUX86State *env) |
6bada5e8 BS |
49 | { |
50 | } | |
51 | ||
052e80d5 | 52 | void helper_clgi(CPUX86State *env) |
6bada5e8 BS |
53 | { |
54 | } | |
55 | ||
052e80d5 | 56 | void helper_skinit(CPUX86State *env) |
6bada5e8 BS |
57 | { |
58 | } | |
59 | ||
052e80d5 | 60 | void helper_invlpga(CPUX86State *env, int aflag) |
6bada5e8 BS |
61 | { |
62 | } | |
63 | ||
052e80d5 | 64 | void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) |
6bada5e8 BS |
65 | { |
66 | } | |
67 | ||
68 | void cpu_vmexit(CPUX86State *nenv, uint32_t exit_code, uint64_t exit_info_1) | |
69 | { | |
70 | } | |
71 | ||
052e80d5 BS |
72 | void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, |
73 | uint64_t param) | |
6bada5e8 BS |
74 | { |
75 | } | |
76 | ||
77 | void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, | |
78 | uint64_t param) | |
79 | { | |
80 | } | |
81 | ||
052e80d5 | 82 | void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, |
6bada5e8 BS |
83 | uint32_t next_eip_addend) |
84 | { | |
85 | } | |
86 | #else | |
87 | ||
a8170e5e | 88 | static inline void svm_save_seg(CPUX86State *env, hwaddr addr, |
6bada5e8 BS |
89 | const SegmentCache *sc) |
90 | { | |
19d6ca16 AF |
91 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
92 | ||
5ce5944d | 93 | stw_phys(cs->as, addr + offsetof(struct vmcb_seg, selector), |
6bada5e8 | 94 | sc->selector); |
f606604f | 95 | stq_phys(cs->as, addr + offsetof(struct vmcb_seg, base), |
6bada5e8 | 96 | sc->base); |
ab1da857 | 97 | stl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit), |
6bada5e8 | 98 | sc->limit); |
5ce5944d | 99 | stw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib), |
6bada5e8 BS |
100 | ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00)); |
101 | } | |
102 | ||
a8170e5e | 103 | static inline void svm_load_seg(CPUX86State *env, hwaddr addr, |
052e80d5 | 104 | SegmentCache *sc) |
6bada5e8 | 105 | { |
19d6ca16 | 106 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
6bada5e8 BS |
107 | unsigned int flags; |
108 | ||
41701aa4 EI |
109 | sc->selector = lduw_phys(cs->as, |
110 | addr + offsetof(struct vmcb_seg, selector)); | |
2c17449b | 111 | sc->base = ldq_phys(cs->as, addr + offsetof(struct vmcb_seg, base)); |
fdfba1a2 | 112 | sc->limit = ldl_phys(cs->as, addr + offsetof(struct vmcb_seg, limit)); |
41701aa4 | 113 | flags = lduw_phys(cs->as, addr + offsetof(struct vmcb_seg, attrib)); |
6bada5e8 BS |
114 | sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12); |
115 | } | |
116 | ||
a8170e5e | 117 | static inline void svm_load_seg_cache(CPUX86State *env, hwaddr addr, |
052e80d5 | 118 | int seg_reg) |
6bada5e8 BS |
119 | { |
120 | SegmentCache sc1, *sc = &sc1; | |
121 | ||
052e80d5 | 122 | svm_load_seg(env, addr, sc); |
6bada5e8 BS |
123 | cpu_x86_load_seg_cache(env, seg_reg, sc->selector, |
124 | sc->base, sc->limit, sc->flags); | |
125 | } | |
126 | ||
052e80d5 | 127 | void helper_vmrun(CPUX86State *env, int aflag, int next_eip_addend) |
6bada5e8 | 128 | { |
19d6ca16 | 129 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
6bada5e8 BS |
130 | target_ulong addr; |
131 | uint32_t event_inj; | |
132 | uint32_t int_ctl; | |
133 | ||
052e80d5 | 134 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMRUN, 0); |
6bada5e8 BS |
135 | |
136 | if (aflag == 2) { | |
4b34e3ad | 137 | addr = env->regs[R_EAX]; |
6bada5e8 | 138 | } else { |
4b34e3ad | 139 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
140 | } |
141 | ||
142 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr); | |
143 | ||
144 | env->vm_vmcb = addr; | |
145 | ||
146 | /* save the current CPU state in the hsave page */ | |
f606604f | 147 | stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), |
6bada5e8 | 148 | env->gdt.base); |
ab1da857 | 149 | stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), |
6bada5e8 BS |
150 | env->gdt.limit); |
151 | ||
f606604f | 152 | stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.base), |
6bada5e8 | 153 | env->idt.base); |
ab1da857 | 154 | stl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), |
6bada5e8 BS |
155 | env->idt.limit); |
156 | ||
f606604f EI |
157 | stq_phys(cs->as, |
158 | env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]); | |
159 | stq_phys(cs->as, | |
160 | env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]); | |
161 | stq_phys(cs->as, | |
162 | env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]); | |
163 | stq_phys(cs->as, | |
164 | env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]); | |
165 | stq_phys(cs->as, | |
166 | env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]); | |
167 | stq_phys(cs->as, | |
168 | env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]); | |
169 | ||
170 | stq_phys(cs->as, | |
171 | env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer); | |
172 | stq_phys(cs->as, | |
173 | env->vm_hsave + offsetof(struct vmcb, save.rflags), | |
6bada5e8 BS |
174 | cpu_compute_eflags(env)); |
175 | ||
052e80d5 | 176 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.es), |
6bada5e8 | 177 | &env->segs[R_ES]); |
052e80d5 | 178 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.cs), |
6bada5e8 | 179 | &env->segs[R_CS]); |
052e80d5 | 180 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ss), |
6bada5e8 | 181 | &env->segs[R_SS]); |
052e80d5 | 182 | svm_save_seg(env, env->vm_hsave + offsetof(struct vmcb, save.ds), |
6bada5e8 BS |
183 | &env->segs[R_DS]); |
184 | ||
f606604f | 185 | stq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, save.rip), |
a78d0eab | 186 | env->eip + next_eip_addend); |
f606604f EI |
187 | stq_phys(cs->as, |
188 | env->vm_hsave + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); | |
189 | stq_phys(cs->as, | |
190 | env->vm_hsave + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); | |
6bada5e8 BS |
191 | |
192 | /* load the interception bitmaps so we do not need to access the | |
193 | vmcb in svm mode */ | |
2c17449b | 194 | env->intercept = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 195 | control.intercept)); |
41701aa4 | 196 | env->intercept_cr_read = lduw_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
197 | offsetof(struct vmcb, |
198 | control.intercept_cr_read)); | |
41701aa4 | 199 | env->intercept_cr_write = lduw_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
200 | offsetof(struct vmcb, |
201 | control.intercept_cr_write)); | |
41701aa4 | 202 | env->intercept_dr_read = lduw_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
203 | offsetof(struct vmcb, |
204 | control.intercept_dr_read)); | |
41701aa4 | 205 | env->intercept_dr_write = lduw_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
206 | offsetof(struct vmcb, |
207 | control.intercept_dr_write)); | |
fdfba1a2 | 208 | env->intercept_exceptions = ldl_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
209 | offsetof(struct vmcb, |
210 | control.intercept_exceptions | |
211 | )); | |
212 | ||
213 | /* enable intercepts */ | |
214 | env->hflags |= HF_SVMI_MASK; | |
215 | ||
2c17449b | 216 | env->tsc_offset = ldq_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
217 | offsetof(struct vmcb, control.tsc_offset)); |
218 | ||
2c17449b | 219 | env->gdt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 220 | save.gdtr.base)); |
fdfba1a2 | 221 | env->gdt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 BS |
222 | save.gdtr.limit)); |
223 | ||
2c17449b | 224 | env->idt.base = ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 225 | save.idtr.base)); |
fdfba1a2 | 226 | env->idt.limit = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 BS |
227 | save.idtr.limit)); |
228 | ||
229 | /* clear exit_info_2 so we behave like the real hardware */ | |
f606604f EI |
230 | stq_phys(cs->as, |
231 | env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0); | |
6bada5e8 | 232 | |
2c17449b EI |
233 | cpu_x86_update_cr0(env, ldq_phys(cs->as, |
234 | env->vm_vmcb + offsetof(struct vmcb, | |
6bada5e8 | 235 | save.cr0))); |
2c17449b EI |
236 | cpu_x86_update_cr4(env, ldq_phys(cs->as, |
237 | env->vm_vmcb + offsetof(struct vmcb, | |
6bada5e8 | 238 | save.cr4))); |
2c17449b EI |
239 | cpu_x86_update_cr3(env, ldq_phys(cs->as, |
240 | env->vm_vmcb + offsetof(struct vmcb, | |
6bada5e8 | 241 | save.cr3))); |
2c17449b EI |
242 | env->cr[2] = ldq_phys(cs->as, |
243 | env->vm_vmcb + offsetof(struct vmcb, save.cr2)); | |
fdfba1a2 EI |
244 | int_ctl = ldl_phys(cs->as, |
245 | env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); | |
6bada5e8 BS |
246 | env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); |
247 | if (int_ctl & V_INTR_MASKING_MASK) { | |
248 | env->v_tpr = int_ctl & V_TPR_MASK; | |
249 | env->hflags2 |= HF2_VINTR_MASK; | |
250 | if (env->eflags & IF_MASK) { | |
251 | env->hflags2 |= HF2_HIF_MASK; | |
252 | } | |
253 | } | |
254 | ||
255 | cpu_load_efer(env, | |
2c17449b EI |
256 | ldq_phys(cs->as, |
257 | env->vm_vmcb + offsetof(struct vmcb, save.efer))); | |
6bada5e8 | 258 | env->eflags = 0; |
2c17449b EI |
259 | cpu_load_eflags(env, ldq_phys(cs->as, |
260 | env->vm_vmcb + offsetof(struct vmcb, | |
6bada5e8 BS |
261 | save.rflags)), |
262 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); | |
263 | CC_OP = CC_OP_EFLAGS; | |
264 | ||
052e80d5 BS |
265 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.es), |
266 | R_ES); | |
267 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), | |
268 | R_CS); | |
269 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), | |
270 | R_SS); | |
271 | svm_load_seg_cache(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), | |
272 | R_DS); | |
6bada5e8 | 273 | |
2c17449b EI |
274 | env->eip = ldq_phys(cs->as, |
275 | env->vm_vmcb + offsetof(struct vmcb, save.rip)); | |
276 | ||
277 | env->regs[R_ESP] = ldq_phys(cs->as, | |
278 | env->vm_vmcb + offsetof(struct vmcb, save.rsp)); | |
279 | env->regs[R_EAX] = ldq_phys(cs->as, | |
280 | env->vm_vmcb + offsetof(struct vmcb, save.rax)); | |
281 | env->dr[7] = ldq_phys(cs->as, | |
282 | env->vm_vmcb + offsetof(struct vmcb, save.dr7)); | |
283 | env->dr[6] = ldq_phys(cs->as, | |
284 | env->vm_vmcb + offsetof(struct vmcb, save.dr6)); | |
285 | cpu_x86_set_cpl(env, ldub_phys(cs->as, | |
286 | env->vm_vmcb + offsetof(struct vmcb, | |
6bada5e8 BS |
287 | save.cpl))); |
288 | ||
289 | /* FIXME: guest state consistency checks */ | |
290 | ||
2c17449b EI |
291 | switch (ldub_phys(cs->as, |
292 | env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) { | |
6bada5e8 BS |
293 | case TLB_CONTROL_DO_NOTHING: |
294 | break; | |
295 | case TLB_CONTROL_FLUSH_ALL_ASID: | |
296 | /* FIXME: this is not 100% correct but should work for now */ | |
00c8cb0a | 297 | tlb_flush(cs, 1); |
6bada5e8 BS |
298 | break; |
299 | } | |
300 | ||
301 | env->hflags2 |= HF2_GIF_MASK; | |
302 | ||
303 | if (int_ctl & V_IRQ_MASK) { | |
259186a7 AF |
304 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
305 | ||
306 | cs->interrupt_request |= CPU_INTERRUPT_VIRQ; | |
6bada5e8 BS |
307 | } |
308 | ||
309 | /* maybe we need to inject an event */ | |
fdfba1a2 | 310 | event_inj = ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 BS |
311 | control.event_inj)); |
312 | if (event_inj & SVM_EVTINJ_VALID) { | |
313 | uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK; | |
314 | uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR; | |
fdfba1a2 | 315 | uint32_t event_inj_err = ldl_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
316 | offsetof(struct vmcb, |
317 | control.event_inj_err)); | |
318 | ||
319 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err); | |
320 | /* FIXME: need to implement valid_err */ | |
321 | switch (event_inj & SVM_EVTINJ_TYPE_MASK) { | |
322 | case SVM_EVTINJ_TYPE_INTR: | |
27103424 | 323 | cs->exception_index = vector; |
6bada5e8 BS |
324 | env->error_code = event_inj_err; |
325 | env->exception_is_int = 0; | |
326 | env->exception_next_eip = -1; | |
327 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR"); | |
328 | /* XXX: is it always correct? */ | |
329 | do_interrupt_x86_hardirq(env, vector, 1); | |
330 | break; | |
331 | case SVM_EVTINJ_TYPE_NMI: | |
27103424 | 332 | cs->exception_index = EXCP02_NMI; |
6bada5e8 BS |
333 | env->error_code = event_inj_err; |
334 | env->exception_is_int = 0; | |
a78d0eab | 335 | env->exception_next_eip = env->eip; |
6bada5e8 | 336 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI"); |
5638d180 | 337 | cpu_loop_exit(cs); |
6bada5e8 BS |
338 | break; |
339 | case SVM_EVTINJ_TYPE_EXEPT: | |
27103424 | 340 | cs->exception_index = vector; |
6bada5e8 BS |
341 | env->error_code = event_inj_err; |
342 | env->exception_is_int = 0; | |
343 | env->exception_next_eip = -1; | |
344 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT"); | |
5638d180 | 345 | cpu_loop_exit(cs); |
6bada5e8 BS |
346 | break; |
347 | case SVM_EVTINJ_TYPE_SOFT: | |
27103424 | 348 | cs->exception_index = vector; |
6bada5e8 BS |
349 | env->error_code = event_inj_err; |
350 | env->exception_is_int = 1; | |
a78d0eab | 351 | env->exception_next_eip = env->eip; |
6bada5e8 | 352 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT"); |
5638d180 | 353 | cpu_loop_exit(cs); |
6bada5e8 BS |
354 | break; |
355 | } | |
27103424 | 356 | qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", cs->exception_index, |
6bada5e8 BS |
357 | env->error_code); |
358 | } | |
359 | } | |
360 | ||
052e80d5 | 361 | void helper_vmmcall(CPUX86State *env) |
6bada5e8 | 362 | { |
052e80d5 | 363 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMMCALL, 0); |
6bada5e8 BS |
364 | raise_exception(env, EXCP06_ILLOP); |
365 | } | |
366 | ||
052e80d5 | 367 | void helper_vmload(CPUX86State *env, int aflag) |
6bada5e8 | 368 | { |
19d6ca16 | 369 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
6bada5e8 BS |
370 | target_ulong addr; |
371 | ||
052e80d5 | 372 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMLOAD, 0); |
6bada5e8 BS |
373 | |
374 | if (aflag == 2) { | |
4b34e3ad | 375 | addr = env->regs[R_EAX]; |
6bada5e8 | 376 | } else { |
4b34e3ad | 377 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
378 | } |
379 | ||
380 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx | |
381 | "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", | |
2c17449b | 382 | addr, ldq_phys(cs->as, addr + offsetof(struct vmcb, |
052e80d5 | 383 | save.fs.base)), |
6bada5e8 BS |
384 | env->segs[R_FS].base); |
385 | ||
052e80d5 BS |
386 | svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.fs), R_FS); |
387 | svm_load_seg_cache(env, addr + offsetof(struct vmcb, save.gs), R_GS); | |
388 | svm_load_seg(env, addr + offsetof(struct vmcb, save.tr), &env->tr); | |
389 | svm_load_seg(env, addr + offsetof(struct vmcb, save.ldtr), &env->ldt); | |
6bada5e8 BS |
390 | |
391 | #ifdef TARGET_X86_64 | |
2c17449b | 392 | env->kernelgsbase = ldq_phys(cs->as, addr + offsetof(struct vmcb, |
6bada5e8 | 393 | save.kernel_gs_base)); |
2c17449b EI |
394 | env->lstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar)); |
395 | env->cstar = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar)); | |
396 | env->fmask = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask)); | |
6bada5e8 | 397 | #endif |
2c17449b EI |
398 | env->star = ldq_phys(cs->as, addr + offsetof(struct vmcb, save.star)); |
399 | env->sysenter_cs = ldq_phys(cs->as, | |
400 | addr + offsetof(struct vmcb, save.sysenter_cs)); | |
401 | env->sysenter_esp = ldq_phys(cs->as, addr + offsetof(struct vmcb, | |
6bada5e8 | 402 | save.sysenter_esp)); |
2c17449b | 403 | env->sysenter_eip = ldq_phys(cs->as, addr + offsetof(struct vmcb, |
6bada5e8 BS |
404 | save.sysenter_eip)); |
405 | } | |
406 | ||
052e80d5 | 407 | void helper_vmsave(CPUX86State *env, int aflag) |
6bada5e8 | 408 | { |
19d6ca16 | 409 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
6bada5e8 BS |
410 | target_ulong addr; |
411 | ||
052e80d5 | 412 | cpu_svm_check_intercept_param(env, SVM_EXIT_VMSAVE, 0); |
6bada5e8 BS |
413 | |
414 | if (aflag == 2) { | |
4b34e3ad | 415 | addr = env->regs[R_EAX]; |
6bada5e8 | 416 | } else { |
4b34e3ad | 417 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
418 | } |
419 | ||
420 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx | |
421 | "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n", | |
2c17449b EI |
422 | addr, ldq_phys(cs->as, |
423 | addr + offsetof(struct vmcb, save.fs.base)), | |
6bada5e8 BS |
424 | env->segs[R_FS].base); |
425 | ||
052e80d5 | 426 | svm_save_seg(env, addr + offsetof(struct vmcb, save.fs), |
6bada5e8 | 427 | &env->segs[R_FS]); |
052e80d5 | 428 | svm_save_seg(env, addr + offsetof(struct vmcb, save.gs), |
6bada5e8 | 429 | &env->segs[R_GS]); |
052e80d5 | 430 | svm_save_seg(env, addr + offsetof(struct vmcb, save.tr), |
6bada5e8 | 431 | &env->tr); |
052e80d5 | 432 | svm_save_seg(env, addr + offsetof(struct vmcb, save.ldtr), |
6bada5e8 BS |
433 | &env->ldt); |
434 | ||
435 | #ifdef TARGET_X86_64 | |
f606604f | 436 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.kernel_gs_base), |
6bada5e8 | 437 | env->kernelgsbase); |
f606604f EI |
438 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.lstar), env->lstar); |
439 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.cstar), env->cstar); | |
440 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.sfmask), env->fmask); | |
6bada5e8 | 441 | #endif |
f606604f EI |
442 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.star), env->star); |
443 | stq_phys(cs->as, | |
444 | addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs); | |
445 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_esp), | |
6bada5e8 | 446 | env->sysenter_esp); |
f606604f | 447 | stq_phys(cs->as, addr + offsetof(struct vmcb, save.sysenter_eip), |
6bada5e8 BS |
448 | env->sysenter_eip); |
449 | } | |
450 | ||
052e80d5 | 451 | void helper_stgi(CPUX86State *env) |
6bada5e8 | 452 | { |
052e80d5 | 453 | cpu_svm_check_intercept_param(env, SVM_EXIT_STGI, 0); |
6bada5e8 BS |
454 | env->hflags2 |= HF2_GIF_MASK; |
455 | } | |
456 | ||
052e80d5 | 457 | void helper_clgi(CPUX86State *env) |
6bada5e8 | 458 | { |
052e80d5 | 459 | cpu_svm_check_intercept_param(env, SVM_EXIT_CLGI, 0); |
6bada5e8 BS |
460 | env->hflags2 &= ~HF2_GIF_MASK; |
461 | } | |
462 | ||
052e80d5 | 463 | void helper_skinit(CPUX86State *env) |
6bada5e8 | 464 | { |
052e80d5 | 465 | cpu_svm_check_intercept_param(env, SVM_EXIT_SKINIT, 0); |
6bada5e8 BS |
466 | /* XXX: not implemented */ |
467 | raise_exception(env, EXCP06_ILLOP); | |
468 | } | |
469 | ||
052e80d5 | 470 | void helper_invlpga(CPUX86State *env, int aflag) |
6bada5e8 | 471 | { |
31b030d4 | 472 | X86CPU *cpu = x86_env_get_cpu(env); |
6bada5e8 BS |
473 | target_ulong addr; |
474 | ||
052e80d5 | 475 | cpu_svm_check_intercept_param(env, SVM_EXIT_INVLPGA, 0); |
6bada5e8 BS |
476 | |
477 | if (aflag == 2) { | |
4b34e3ad | 478 | addr = env->regs[R_EAX]; |
6bada5e8 | 479 | } else { |
4b34e3ad | 480 | addr = (uint32_t)env->regs[R_EAX]; |
6bada5e8 BS |
481 | } |
482 | ||
483 | /* XXX: could use the ASID to see if it is needed to do the | |
484 | flush */ | |
31b030d4 | 485 | tlb_flush_page(CPU(cpu), addr); |
6bada5e8 BS |
486 | } |
487 | ||
052e80d5 BS |
488 | void helper_svm_check_intercept_param(CPUX86State *env, uint32_t type, |
489 | uint64_t param) | |
6bada5e8 | 490 | { |
19d6ca16 | 491 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
2c17449b | 492 | |
6bada5e8 BS |
493 | if (likely(!(env->hflags & HF_SVMI_MASK))) { |
494 | return; | |
495 | } | |
496 | switch (type) { | |
497 | case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8: | |
498 | if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) { | |
052e80d5 | 499 | helper_vmexit(env, type, param); |
6bada5e8 BS |
500 | } |
501 | break; | |
502 | case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8: | |
503 | if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) { | |
052e80d5 | 504 | helper_vmexit(env, type, param); |
6bada5e8 BS |
505 | } |
506 | break; | |
507 | case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7: | |
508 | if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) { | |
052e80d5 | 509 | helper_vmexit(env, type, param); |
6bada5e8 BS |
510 | } |
511 | break; | |
512 | case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7: | |
513 | if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) { | |
052e80d5 | 514 | helper_vmexit(env, type, param); |
6bada5e8 BS |
515 | } |
516 | break; | |
517 | case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31: | |
518 | if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) { | |
052e80d5 | 519 | helper_vmexit(env, type, param); |
6bada5e8 BS |
520 | } |
521 | break; | |
522 | case SVM_EXIT_MSR: | |
523 | if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) { | |
524 | /* FIXME: this should be read in at vmrun (faster this way?) */ | |
2c17449b | 525 | uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
526 | offsetof(struct vmcb, |
527 | control.msrpm_base_pa)); | |
528 | uint32_t t0, t1; | |
529 | ||
a4165610 | 530 | switch ((uint32_t)env->regs[R_ECX]) { |
6bada5e8 | 531 | case 0 ... 0x1fff: |
a4165610 LG |
532 | t0 = (env->regs[R_ECX] * 2) % 8; |
533 | t1 = (env->regs[R_ECX] * 2) / 8; | |
6bada5e8 BS |
534 | break; |
535 | case 0xc0000000 ... 0xc0001fff: | |
a4165610 | 536 | t0 = (8192 + env->regs[R_ECX] - 0xc0000000) * 2; |
6bada5e8 BS |
537 | t1 = (t0 / 8); |
538 | t0 %= 8; | |
539 | break; | |
540 | case 0xc0010000 ... 0xc0011fff: | |
a4165610 | 541 | t0 = (16384 + env->regs[R_ECX] - 0xc0010000) * 2; |
6bada5e8 BS |
542 | t1 = (t0 / 8); |
543 | t0 %= 8; | |
544 | break; | |
545 | default: | |
052e80d5 | 546 | helper_vmexit(env, type, param); |
6bada5e8 BS |
547 | t0 = 0; |
548 | t1 = 0; | |
549 | break; | |
550 | } | |
2c17449b | 551 | if (ldub_phys(cs->as, addr + t1) & ((1 << param) << t0)) { |
052e80d5 | 552 | helper_vmexit(env, type, param); |
6bada5e8 BS |
553 | } |
554 | } | |
555 | break; | |
556 | default: | |
557 | if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) { | |
052e80d5 | 558 | helper_vmexit(env, type, param); |
6bada5e8 BS |
559 | } |
560 | break; | |
561 | } | |
562 | } | |
563 | ||
052e80d5 | 564 | void cpu_svm_check_intercept_param(CPUX86State *env, uint32_t type, |
6bada5e8 BS |
565 | uint64_t param) |
566 | { | |
052e80d5 | 567 | helper_svm_check_intercept_param(env, type, param); |
6bada5e8 BS |
568 | } |
569 | ||
052e80d5 | 570 | void helper_svm_check_io(CPUX86State *env, uint32_t port, uint32_t param, |
6bada5e8 BS |
571 | uint32_t next_eip_addend) |
572 | { | |
19d6ca16 AF |
573 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
574 | ||
6bada5e8 BS |
575 | if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) { |
576 | /* FIXME: this should be read in at vmrun (faster this way?) */ | |
2c17449b | 577 | uint64_t addr = ldq_phys(cs->as, env->vm_vmcb + |
6bada5e8 BS |
578 | offsetof(struct vmcb, control.iopm_base_pa)); |
579 | uint16_t mask = (1 << ((param >> 4) & 7)) - 1; | |
580 | ||
41701aa4 | 581 | if (lduw_phys(cs->as, addr + port / 8) & (mask << (port & 7))) { |
a78d0eab | 582 | /* next env->eip */ |
f606604f EI |
583 | stq_phys(cs->as, |
584 | env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), | |
6bada5e8 | 585 | env->eip + next_eip_addend); |
052e80d5 | 586 | helper_vmexit(env, SVM_EXIT_IOIO, param | (port << 16)); |
6bada5e8 BS |
587 | } |
588 | } | |
589 | } | |
590 | ||
591 | /* Note: currently only 32 bits of exit_code are used */ | |
052e80d5 | 592 | void helper_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) |
6bada5e8 | 593 | { |
259186a7 | 594 | CPUState *cs = CPU(x86_env_get_cpu(env)); |
6bada5e8 BS |
595 | uint32_t int_ctl; |
596 | ||
597 | qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" | |
598 | PRIx64 ", " TARGET_FMT_lx ")!\n", | |
599 | exit_code, exit_info_1, | |
2c17449b | 600 | ldq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 601 | control.exit_info_2)), |
a78d0eab | 602 | env->eip); |
6bada5e8 BS |
603 | |
604 | if (env->hflags & HF_INHIBIT_IRQ_MASK) { | |
ab1da857 EI |
605 | stl_phys(cs->as, |
606 | env->vm_vmcb + offsetof(struct vmcb, control.int_state), | |
6bada5e8 BS |
607 | SVM_INTERRUPT_SHADOW_MASK); |
608 | env->hflags &= ~HF_INHIBIT_IRQ_MASK; | |
609 | } else { | |
ab1da857 EI |
610 | stl_phys(cs->as, |
611 | env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0); | |
6bada5e8 BS |
612 | } |
613 | ||
614 | /* Save the VM state in the vmcb */ | |
052e80d5 | 615 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.es), |
6bada5e8 | 616 | &env->segs[R_ES]); |
052e80d5 | 617 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.cs), |
6bada5e8 | 618 | &env->segs[R_CS]); |
052e80d5 | 619 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ss), |
6bada5e8 | 620 | &env->segs[R_SS]); |
052e80d5 | 621 | svm_save_seg(env, env->vm_vmcb + offsetof(struct vmcb, save.ds), |
6bada5e8 BS |
622 | &env->segs[R_DS]); |
623 | ||
f606604f | 624 | stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), |
6bada5e8 | 625 | env->gdt.base); |
ab1da857 | 626 | stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), |
6bada5e8 BS |
627 | env->gdt.limit); |
628 | ||
f606604f | 629 | stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), |
6bada5e8 | 630 | env->idt.base); |
ab1da857 | 631 | stl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), |
6bada5e8 BS |
632 | env->idt.limit); |
633 | ||
f606604f EI |
634 | stq_phys(cs->as, |
635 | env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer); | |
636 | stq_phys(cs->as, | |
637 | env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]); | |
638 | stq_phys(cs->as, | |
639 | env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]); | |
640 | stq_phys(cs->as, | |
641 | env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]); | |
642 | stq_phys(cs->as, | |
643 | env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]); | |
6bada5e8 | 644 | |
fdfba1a2 EI |
645 | int_ctl = ldl_phys(cs->as, |
646 | env->vm_vmcb + offsetof(struct vmcb, control.int_ctl)); | |
6bada5e8 BS |
647 | int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK); |
648 | int_ctl |= env->v_tpr & V_TPR_MASK; | |
259186a7 | 649 | if (cs->interrupt_request & CPU_INTERRUPT_VIRQ) { |
6bada5e8 BS |
650 | int_ctl |= V_IRQ_MASK; |
651 | } | |
ab1da857 EI |
652 | stl_phys(cs->as, |
653 | env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl); | |
6bada5e8 | 654 | |
f606604f | 655 | stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rflags), |
6bada5e8 | 656 | cpu_compute_eflags(env)); |
f606604f | 657 | stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.rip), |
052e80d5 | 658 | env->eip); |
f606604f EI |
659 | stq_phys(cs->as, |
660 | env->vm_vmcb + offsetof(struct vmcb, save.rsp), env->regs[R_ESP]); | |
661 | stq_phys(cs->as, | |
662 | env->vm_vmcb + offsetof(struct vmcb, save.rax), env->regs[R_EAX]); | |
663 | stq_phys(cs->as, | |
664 | env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]); | |
665 | stq_phys(cs->as, | |
666 | env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]); | |
db3be60d | 667 | stb_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, save.cpl), |
6bada5e8 BS |
668 | env->hflags & HF_CPL_MASK); |
669 | ||
670 | /* Reload the host state from vm_hsave */ | |
671 | env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK); | |
672 | env->hflags &= ~HF_SVMI_MASK; | |
673 | env->intercept = 0; | |
674 | env->intercept_exceptions = 0; | |
259186a7 | 675 | cs->interrupt_request &= ~CPU_INTERRUPT_VIRQ; |
6bada5e8 BS |
676 | env->tsc_offset = 0; |
677 | ||
2c17449b | 678 | env->gdt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 | 679 | save.gdtr.base)); |
fdfba1a2 | 680 | env->gdt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
681 | save.gdtr.limit)); |
682 | ||
2c17449b | 683 | env->idt.base = ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 | 684 | save.idtr.base)); |
fdfba1a2 | 685 | env->idt.limit = ldl_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
686 | save.idtr.limit)); |
687 | ||
2c17449b EI |
688 | cpu_x86_update_cr0(env, ldq_phys(cs->as, |
689 | env->vm_hsave + offsetof(struct vmcb, | |
6bada5e8 BS |
690 | save.cr0)) | |
691 | CR0_PE_MASK); | |
2c17449b EI |
692 | cpu_x86_update_cr4(env, ldq_phys(cs->as, |
693 | env->vm_hsave + offsetof(struct vmcb, | |
6bada5e8 | 694 | save.cr4))); |
2c17449b EI |
695 | cpu_x86_update_cr3(env, ldq_phys(cs->as, |
696 | env->vm_hsave + offsetof(struct vmcb, | |
6bada5e8 BS |
697 | save.cr3))); |
698 | /* we need to set the efer after the crs so the hidden flags get | |
699 | set properly */ | |
2c17449b | 700 | cpu_load_efer(env, ldq_phys(cs->as, env->vm_hsave + offsetof(struct vmcb, |
6bada5e8 BS |
701 | save.efer))); |
702 | env->eflags = 0; | |
2c17449b EI |
703 | cpu_load_eflags(env, ldq_phys(cs->as, |
704 | env->vm_hsave + offsetof(struct vmcb, | |
6bada5e8 BS |
705 | save.rflags)), |
706 | ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK)); | |
707 | CC_OP = CC_OP_EFLAGS; | |
708 | ||
052e80d5 BS |
709 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.es), |
710 | R_ES); | |
711 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.cs), | |
712 | R_CS); | |
713 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ss), | |
714 | R_SS); | |
715 | svm_load_seg_cache(env, env->vm_hsave + offsetof(struct vmcb, save.ds), | |
716 | R_DS); | |
6bada5e8 | 717 | |
2c17449b EI |
718 | env->eip = ldq_phys(cs->as, |
719 | env->vm_hsave + offsetof(struct vmcb, save.rip)); | |
720 | env->regs[R_ESP] = ldq_phys(cs->as, env->vm_hsave + | |
90a2541b | 721 | offsetof(struct vmcb, save.rsp)); |
2c17449b | 722 | env->regs[R_EAX] = ldq_phys(cs->as, env->vm_hsave + |
90a2541b | 723 | offsetof(struct vmcb, save.rax)); |
6bada5e8 | 724 | |
2c17449b EI |
725 | env->dr[6] = ldq_phys(cs->as, |
726 | env->vm_hsave + offsetof(struct vmcb, save.dr6)); | |
727 | env->dr[7] = ldq_phys(cs->as, | |
728 | env->vm_hsave + offsetof(struct vmcb, save.dr7)); | |
6bada5e8 BS |
729 | |
730 | /* other setups */ | |
731 | cpu_x86_set_cpl(env, 0); | |
f606604f | 732 | stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_code), |
6bada5e8 | 733 | exit_code); |
f606604f | 734 | stq_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), |
6bada5e8 BS |
735 | exit_info_1); |
736 | ||
ab1da857 EI |
737 | stl_phys(cs->as, |
738 | env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info), | |
fdfba1a2 | 739 | ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 740 | control.event_inj))); |
ab1da857 EI |
741 | stl_phys(cs->as, |
742 | env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err), | |
fdfba1a2 | 743 | ldl_phys(cs->as, env->vm_vmcb + offsetof(struct vmcb, |
6bada5e8 | 744 | control.event_inj_err))); |
ab1da857 EI |
745 | stl_phys(cs->as, |
746 | env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0); | |
6bada5e8 BS |
747 | |
748 | env->hflags2 &= ~HF2_GIF_MASK; | |
749 | /* FIXME: Resets the current ASID register to zero (host ASID). */ | |
750 | ||
751 | /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */ | |
752 | ||
753 | /* Clears the TSC_OFFSET inside the processor. */ | |
754 | ||
755 | /* If the host is in PAE mode, the processor reloads the host's PDPEs | |
756 | from the page table indicated the host's CR3. If the PDPEs contain | |
757 | illegal state, the processor causes a shutdown. */ | |
758 | ||
759 | /* Forces CR0.PE = 1, RFLAGS.VM = 0. */ | |
760 | env->cr[0] |= CR0_PE_MASK; | |
761 | env->eflags &= ~VM_MASK; | |
762 | ||
763 | /* Disables all breakpoints in the host DR7 register. */ | |
764 | ||
765 | /* Checks the reloaded host state for consistency. */ | |
766 | ||
767 | /* If the host's rIP reloaded by #VMEXIT is outside the limit of the | |
768 | host's code segment or non-canonical (in the case of long mode), a | |
769 | #GP fault is delivered inside the host. */ | |
770 | ||
771 | /* remove any pending exception */ | |
27103424 | 772 | cs->exception_index = -1; |
6bada5e8 BS |
773 | env->error_code = 0; |
774 | env->old_exception = -1; | |
775 | ||
5638d180 | 776 | cpu_loop_exit(cs); |
6bada5e8 BS |
777 | } |
778 | ||
052e80d5 | 779 | void cpu_vmexit(CPUX86State *env, uint32_t exit_code, uint64_t exit_info_1) |
6bada5e8 | 780 | { |
052e80d5 | 781 | helper_vmexit(env, exit_code, exit_info_1); |
6bada5e8 BS |
782 | } |
783 | ||
784 | #endif |