4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 #include "host-utils.h"
26 #define raise_exception_err(a, b)\
29 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
30 (raise_exception_err)(a, b);\
34 const uint8_t parity_table[256] = {
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
48 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
63 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
64 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
70 const uint8_t rclw_table[32] = {
71 0, 1, 2, 3, 4, 5, 6, 7,
72 8, 9,10,11,12,13,14,15,
73 16, 0, 1, 2, 3, 4, 5, 6,
74 7, 8, 9,10,11,12,13,14,
78 const uint8_t rclb_table[32] = {
79 0, 1, 2, 3, 4, 5, 6, 7,
80 8, 0, 1, 2, 3, 4, 5, 6,
81 7, 8, 0, 1, 2, 3, 4, 5,
82 6, 7, 8, 0, 1, 2, 3, 4,
85 const CPU86_LDouble f15rk[7] =
87 0.00000000000000000000L,
88 1.00000000000000000000L,
89 3.14159265358979323851L, /*pi*/
90 0.30102999566398119523L, /*lg2*/
91 0.69314718055994530943L, /*ln2*/
92 1.44269504088896340739L, /*l2e*/
93 3.32192809488736234781L, /*l2t*/
98 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
102 spin_lock(&global_cpu_lock);
105 void cpu_unlock(void)
107 spin_unlock(&global_cpu_lock);
110 /* return non zero if error */
111 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
122 index = selector & ~7;
123 if ((index + 7) > dt->limit)
125 ptr = dt->base + index;
126 *e1_ptr = ldl_kernel(ptr);
127 *e2_ptr = ldl_kernel(ptr + 4);
131 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
134 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
135 if (e2 & DESC_G_MASK)
136 limit = (limit << 12) | 0xfff;
140 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
142 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
145 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
147 sc->base = get_seg_base(e1, e2);
148 sc->limit = get_seg_limit(e1, e2);
152 /* init the segment cache in vm86 mode. */
153 static inline void load_seg_vm(int seg, int selector)
156 cpu_x86_load_seg_cache(env, seg, selector,
157 (selector << 4), 0xffff, 0);
160 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
161 uint32_t *esp_ptr, int dpl)
163 int type, index, shift;
168 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
169 for(i=0;i<env->tr.limit;i++) {
170 printf("%02x ", env->tr.base[i]);
171 if ((i & 7) == 7) printf("\n");
177 if (!(env->tr.flags & DESC_P_MASK))
178 cpu_abort(env, "invalid tss");
179 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
181 cpu_abort(env, "invalid tss type");
183 index = (dpl * 4 + 2) << shift;
184 if (index + (4 << shift) - 1 > env->tr.limit)
185 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
187 *esp_ptr = lduw_kernel(env->tr.base + index);
188 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
190 *esp_ptr = ldl_kernel(env->tr.base + index);
191 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
195 /* XXX: merge with load_seg() */
196 static void tss_load_seg(int seg_reg, int selector)
201 if ((selector & 0xfffc) != 0) {
202 if (load_segment(&e1, &e2, selector) != 0)
203 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
204 if (!(e2 & DESC_S_MASK))
205 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
207 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
208 cpl = env->hflags & HF_CPL_MASK;
209 if (seg_reg == R_CS) {
210 if (!(e2 & DESC_CS_MASK))
211 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
212 /* XXX: is it correct ? */
214 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
215 if ((e2 & DESC_C_MASK) && dpl > rpl)
216 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
217 } else if (seg_reg == R_SS) {
218 /* SS must be writable data */
219 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
220 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
221 if (dpl != cpl || dpl != rpl)
222 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
224 /* not readable code */
225 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
226 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
227 /* if data or non conforming code, checks the rights */
228 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
229 if (dpl < cpl || dpl < rpl)
230 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
233 if (!(e2 & DESC_P_MASK))
234 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
235 cpu_x86_load_seg_cache(env, seg_reg, selector,
236 get_seg_base(e1, e2),
237 get_seg_limit(e1, e2),
240 if (seg_reg == R_SS || seg_reg == R_CS)
241 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
245 #define SWITCH_TSS_JMP 0
246 #define SWITCH_TSS_IRET 1
247 #define SWITCH_TSS_CALL 2
249 /* XXX: restore CPU state in registers (PowerPC case) */
250 static void switch_tss(int tss_selector,
251 uint32_t e1, uint32_t e2, int source,
254 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
255 target_ulong tss_base;
256 uint32_t new_regs[8], new_segs[6];
257 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
258 uint32_t old_eflags, eflags_mask;
263 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
265 if (loglevel & CPU_LOG_PCALL)
266 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
269 /* if task gate, we read the TSS segment and we load it */
271 if (!(e2 & DESC_P_MASK))
272 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
273 tss_selector = e1 >> 16;
274 if (tss_selector & 4)
275 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
276 if (load_segment(&e1, &e2, tss_selector) != 0)
277 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
278 if (e2 & DESC_S_MASK)
279 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
280 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
282 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
285 if (!(e2 & DESC_P_MASK))
286 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
292 tss_limit = get_seg_limit(e1, e2);
293 tss_base = get_seg_base(e1, e2);
294 if ((tss_selector & 4) != 0 ||
295 tss_limit < tss_limit_max)
296 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
297 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
299 old_tss_limit_max = 103;
301 old_tss_limit_max = 43;
303 /* read all the registers from the new TSS */
306 new_cr3 = ldl_kernel(tss_base + 0x1c);
307 new_eip = ldl_kernel(tss_base + 0x20);
308 new_eflags = ldl_kernel(tss_base + 0x24);
309 for(i = 0; i < 8; i++)
310 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
311 for(i = 0; i < 6; i++)
312 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
313 new_ldt = lduw_kernel(tss_base + 0x60);
314 new_trap = ldl_kernel(tss_base + 0x64);
318 new_eip = lduw_kernel(tss_base + 0x0e);
319 new_eflags = lduw_kernel(tss_base + 0x10);
320 for(i = 0; i < 8; i++)
321 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
322 for(i = 0; i < 4; i++)
323 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
324 new_ldt = lduw_kernel(tss_base + 0x2a);
330 /* NOTE: we must avoid memory exceptions during the task switch,
331 so we make dummy accesses before */
332 /* XXX: it can still fail in some cases, so a bigger hack is
333 necessary to valid the TLB after having done the accesses */
335 v1 = ldub_kernel(env->tr.base);
336 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
337 stb_kernel(env->tr.base, v1);
338 stb_kernel(env->tr.base + old_tss_limit_max, v2);
340 /* clear busy bit (it is restartable) */
341 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
344 ptr = env->gdt.base + (env->tr.selector & ~7);
345 e2 = ldl_kernel(ptr + 4);
346 e2 &= ~DESC_TSS_BUSY_MASK;
347 stl_kernel(ptr + 4, e2);
349 old_eflags = compute_eflags();
350 if (source == SWITCH_TSS_IRET)
351 old_eflags &= ~NT_MASK;
353 /* save the current state in the old TSS */
356 stl_kernel(env->tr.base + 0x20, next_eip);
357 stl_kernel(env->tr.base + 0x24, old_eflags);
358 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
359 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
360 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
361 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
362 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
363 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
364 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
365 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
366 for(i = 0; i < 6; i++)
367 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
370 stw_kernel(env->tr.base + 0x0e, next_eip);
371 stw_kernel(env->tr.base + 0x10, old_eflags);
372 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
373 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
374 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
375 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
376 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
377 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
378 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
379 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
380 for(i = 0; i < 4; i++)
381 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
384 /* now if an exception occurs, it will occurs in the next task
387 if (source == SWITCH_TSS_CALL) {
388 stw_kernel(tss_base, env->tr.selector);
389 new_eflags |= NT_MASK;
393 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
396 ptr = env->gdt.base + (tss_selector & ~7);
397 e2 = ldl_kernel(ptr + 4);
398 e2 |= DESC_TSS_BUSY_MASK;
399 stl_kernel(ptr + 4, e2);
402 /* set the new CPU state */
403 /* from this point, any exception which occurs can give problems */
404 env->cr[0] |= CR0_TS_MASK;
405 env->hflags |= HF_TS_MASK;
406 env->tr.selector = tss_selector;
407 env->tr.base = tss_base;
408 env->tr.limit = tss_limit;
409 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
411 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
412 cpu_x86_update_cr3(env, new_cr3);
415 /* load all registers without an exception, then reload them with
416 possible exception */
418 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
419 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
421 eflags_mask &= 0xffff;
422 load_eflags(new_eflags, eflags_mask);
423 /* XXX: what to do in 16 bit case ? */
432 if (new_eflags & VM_MASK) {
433 for(i = 0; i < 6; i++)
434 load_seg_vm(i, new_segs[i]);
435 /* in vm86, CPL is always 3 */
436 cpu_x86_set_cpl(env, 3);
438 /* CPL is set the RPL of CS */
439 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
440 /* first just selectors as the rest may trigger exceptions */
441 for(i = 0; i < 6; i++)
442 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
445 env->ldt.selector = new_ldt & ~4;
452 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
454 if ((new_ldt & 0xfffc) != 0) {
456 index = new_ldt & ~7;
457 if ((index + 7) > dt->limit)
458 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
459 ptr = dt->base + index;
460 e1 = ldl_kernel(ptr);
461 e2 = ldl_kernel(ptr + 4);
462 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
463 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
464 if (!(e2 & DESC_P_MASK))
465 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
466 load_seg_cache_raw_dt(&env->ldt, e1, e2);
469 /* load the segments */
470 if (!(new_eflags & VM_MASK)) {
471 tss_load_seg(R_CS, new_segs[R_CS]);
472 tss_load_seg(R_SS, new_segs[R_SS]);
473 tss_load_seg(R_ES, new_segs[R_ES]);
474 tss_load_seg(R_DS, new_segs[R_DS]);
475 tss_load_seg(R_FS, new_segs[R_FS]);
476 tss_load_seg(R_GS, new_segs[R_GS]);
479 /* check that EIP is in the CS segment limits */
480 if (new_eip > env->segs[R_CS].limit) {
481 /* XXX: different exception if CALL ? */
482 raise_exception_err(EXCP0D_GPF, 0);
486 /* check if Port I/O is allowed in TSS */
487 static inline void check_io(int addr, int size)
489 int io_offset, val, mask;
491 /* TSS must be a valid 32 bit one */
492 if (!(env->tr.flags & DESC_P_MASK) ||
493 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
496 io_offset = lduw_kernel(env->tr.base + 0x66);
497 io_offset += (addr >> 3);
498 /* Note: the check needs two bytes */
499 if ((io_offset + 1) > env->tr.limit)
501 val = lduw_kernel(env->tr.base + io_offset);
503 mask = (1 << size) - 1;
504 /* all bits must be zero to allow the I/O */
505 if ((val & mask) != 0) {
507 raise_exception_err(EXCP0D_GPF, 0);
511 void check_iob_T0(void)
516 void check_iow_T0(void)
521 void check_iol_T0(void)
526 void check_iob_DX(void)
528 check_io(EDX & 0xffff, 1);
531 void check_iow_DX(void)
533 check_io(EDX & 0xffff, 2);
536 void check_iol_DX(void)
538 check_io(EDX & 0xffff, 4);
541 static inline unsigned int get_sp_mask(unsigned int e2)
543 if (e2 & DESC_B_MASK)
550 #define SET_ESP(val, sp_mask)\
552 if ((sp_mask) == 0xffff)\
553 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
554 else if ((sp_mask) == 0xffffffffLL)\
555 ESP = (uint32_t)(val);\
560 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
563 /* XXX: add a is_user flag to have proper security support */
564 #define PUSHW(ssp, sp, sp_mask, val)\
567 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
570 #define PUSHL(ssp, sp, sp_mask, val)\
573 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
576 #define POPW(ssp, sp, sp_mask, val)\
578 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
582 #define POPL(ssp, sp, sp_mask, val)\
584 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
588 /* protected mode interrupt */
589 static void do_interrupt_protected(int intno, int is_int, int error_code,
590 unsigned int next_eip, int is_hw)
593 target_ulong ptr, ssp;
594 int type, dpl, selector, ss_dpl, cpl;
595 int has_error_code, new_stack, shift;
596 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
597 uint32_t old_eip, sp_mask;
598 int svm_should_check = 1;
600 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
602 svm_should_check = 0;
606 && (INTERCEPTEDl(_exceptions, 1 << intno)
608 raise_interrupt(intno, is_int, error_code, 0);
611 if (!is_int && !is_hw) {
630 if (intno * 8 + 7 > dt->limit)
631 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
632 ptr = dt->base + intno * 8;
633 e1 = ldl_kernel(ptr);
634 e2 = ldl_kernel(ptr + 4);
635 /* check gate type */
636 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
638 case 5: /* task gate */
639 /* must do that check here to return the correct error code */
640 if (!(e2 & DESC_P_MASK))
641 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
642 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
643 if (has_error_code) {
646 /* push the error code */
647 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
649 if (env->segs[R_SS].flags & DESC_B_MASK)
653 esp = (ESP - (2 << shift)) & mask;
654 ssp = env->segs[R_SS].base + esp;
656 stl_kernel(ssp, error_code);
658 stw_kernel(ssp, error_code);
662 case 6: /* 286 interrupt gate */
663 case 7: /* 286 trap gate */
664 case 14: /* 386 interrupt gate */
665 case 15: /* 386 trap gate */
668 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
671 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
672 cpl = env->hflags & HF_CPL_MASK;
673 /* check privledge if software int */
674 if (is_int && dpl < cpl)
675 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
676 /* check valid bit */
677 if (!(e2 & DESC_P_MASK))
678 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
680 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
681 if ((selector & 0xfffc) == 0)
682 raise_exception_err(EXCP0D_GPF, 0);
684 if (load_segment(&e1, &e2, selector) != 0)
685 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
686 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
687 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
691 if (!(e2 & DESC_P_MASK))
692 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
693 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
694 /* to inner privilege */
695 get_ss_esp_from_tss(&ss, &esp, dpl);
696 if ((ss & 0xfffc) == 0)
697 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
699 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
700 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
701 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
702 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
704 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
705 if (!(ss_e2 & DESC_S_MASK) ||
706 (ss_e2 & DESC_CS_MASK) ||
707 !(ss_e2 & DESC_W_MASK))
708 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
709 if (!(ss_e2 & DESC_P_MASK))
710 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
712 sp_mask = get_sp_mask(ss_e2);
713 ssp = get_seg_base(ss_e1, ss_e2);
714 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
715 /* to same privilege */
716 if (env->eflags & VM_MASK)
717 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
719 sp_mask = get_sp_mask(env->segs[R_SS].flags);
720 ssp = env->segs[R_SS].base;
724 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
725 new_stack = 0; /* avoid warning */
726 sp_mask = 0; /* avoid warning */
727 ssp = 0; /* avoid warning */
728 esp = 0; /* avoid warning */
734 /* XXX: check that enough room is available */
735 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
736 if (env->eflags & VM_MASK)
742 if (env->eflags & VM_MASK) {
743 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
744 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
745 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
746 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
748 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
749 PUSHL(ssp, esp, sp_mask, ESP);
751 PUSHL(ssp, esp, sp_mask, compute_eflags());
752 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
753 PUSHL(ssp, esp, sp_mask, old_eip);
754 if (has_error_code) {
755 PUSHL(ssp, esp, sp_mask, error_code);
759 if (env->eflags & VM_MASK) {
760 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
761 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
762 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
763 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
765 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
766 PUSHW(ssp, esp, sp_mask, ESP);
768 PUSHW(ssp, esp, sp_mask, compute_eflags());
769 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
770 PUSHW(ssp, esp, sp_mask, old_eip);
771 if (has_error_code) {
772 PUSHW(ssp, esp, sp_mask, error_code);
777 if (env->eflags & VM_MASK) {
778 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
779 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
780 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
781 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
783 ss = (ss & ~3) | dpl;
784 cpu_x86_load_seg_cache(env, R_SS, ss,
785 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
787 SET_ESP(esp, sp_mask);
789 selector = (selector & ~3) | dpl;
790 cpu_x86_load_seg_cache(env, R_CS, selector,
791 get_seg_base(e1, e2),
792 get_seg_limit(e1, e2),
794 cpu_x86_set_cpl(env, dpl);
797 /* interrupt gate clear IF mask */
798 if ((type & 1) == 0) {
799 env->eflags &= ~IF_MASK;
801 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
806 #define PUSHQ(sp, val)\
809 stq_kernel(sp, (val));\
812 #define POPQ(sp, val)\
814 val = ldq_kernel(sp);\
818 static inline target_ulong get_rsp_from_tss(int level)
823 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
824 env->tr.base, env->tr.limit);
827 if (!(env->tr.flags & DESC_P_MASK))
828 cpu_abort(env, "invalid tss");
829 index = 8 * level + 4;
830 if ((index + 7) > env->tr.limit)
831 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
832 return ldq_kernel(env->tr.base + index);
835 /* 64 bit interrupt */
836 static void do_interrupt64(int intno, int is_int, int error_code,
837 target_ulong next_eip, int is_hw)
841 int type, dpl, selector, cpl, ist;
842 int has_error_code, new_stack;
843 uint32_t e1, e2, e3, ss;
844 target_ulong old_eip, esp, offset;
845 int svm_should_check = 1;
847 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
849 svm_should_check = 0;
852 && INTERCEPTEDl(_exceptions, 1 << intno)
854 raise_interrupt(intno, is_int, error_code, 0);
857 if (!is_int && !is_hw) {
876 if (intno * 16 + 15 > dt->limit)
877 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
878 ptr = dt->base + intno * 16;
879 e1 = ldl_kernel(ptr);
880 e2 = ldl_kernel(ptr + 4);
881 e3 = ldl_kernel(ptr + 8);
882 /* check gate type */
883 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
885 case 14: /* 386 interrupt gate */
886 case 15: /* 386 trap gate */
889 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
892 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
893 cpl = env->hflags & HF_CPL_MASK;
894 /* check privledge if software int */
895 if (is_int && dpl < cpl)
896 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
897 /* check valid bit */
898 if (!(e2 & DESC_P_MASK))
899 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
901 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
903 if ((selector & 0xfffc) == 0)
904 raise_exception_err(EXCP0D_GPF, 0);
906 if (load_segment(&e1, &e2, selector) != 0)
907 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
908 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
909 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
910 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
913 if (!(e2 & DESC_P_MASK))
914 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
915 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
916 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
917 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
918 /* to inner privilege */
920 esp = get_rsp_from_tss(ist + 3);
922 esp = get_rsp_from_tss(dpl);
923 esp &= ~0xfLL; /* align stack */
926 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
927 /* to same privilege */
928 if (env->eflags & VM_MASK)
929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
932 esp = get_rsp_from_tss(ist + 3);
935 esp &= ~0xfLL; /* align stack */
938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
939 new_stack = 0; /* avoid warning */
940 esp = 0; /* avoid warning */
943 PUSHQ(esp, env->segs[R_SS].selector);
945 PUSHQ(esp, compute_eflags());
946 PUSHQ(esp, env->segs[R_CS].selector);
948 if (has_error_code) {
949 PUSHQ(esp, error_code);
954 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
958 selector = (selector & ~3) | dpl;
959 cpu_x86_load_seg_cache(env, R_CS, selector,
960 get_seg_base(e1, e2),
961 get_seg_limit(e1, e2),
963 cpu_x86_set_cpl(env, dpl);
966 /* interrupt gate clear IF mask */
967 if ((type & 1) == 0) {
968 env->eflags &= ~IF_MASK;
970 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
974 #if defined(CONFIG_USER_ONLY)
975 void helper_syscall(int next_eip_addend)
977 env->exception_index = EXCP_SYSCALL;
978 env->exception_next_eip = env->eip + next_eip_addend;
982 void helper_syscall(int next_eip_addend)
986 if (!(env->efer & MSR_EFER_SCE)) {
987 raise_exception_err(EXCP06_ILLOP, 0);
989 selector = (env->star >> 32) & 0xffff;
991 if (env->hflags & HF_LMA_MASK) {
994 ECX = env->eip + next_eip_addend;
995 env->regs[11] = compute_eflags();
997 code64 = env->hflags & HF_CS64_MASK;
999 cpu_x86_set_cpl(env, 0);
1000 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1002 DESC_G_MASK | DESC_P_MASK |
1004 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1005 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1007 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1009 DESC_W_MASK | DESC_A_MASK);
1010 env->eflags &= ~env->fmask;
1011 load_eflags(env->eflags, 0);
1013 env->eip = env->lstar;
1015 env->eip = env->cstar;
1019 ECX = (uint32_t)(env->eip + next_eip_addend);
1021 cpu_x86_set_cpl(env, 0);
1022 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1024 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1026 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1027 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1029 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1031 DESC_W_MASK | DESC_A_MASK);
1032 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1033 env->eip = (uint32_t)env->star;
1038 void helper_sysret(int dflag)
1042 if (!(env->efer & MSR_EFER_SCE)) {
1043 raise_exception_err(EXCP06_ILLOP, 0);
1045 cpl = env->hflags & HF_CPL_MASK;
1046 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1047 raise_exception_err(EXCP0D_GPF, 0);
1049 selector = (env->star >> 48) & 0xffff;
1050 #ifdef TARGET_X86_64
1051 if (env->hflags & HF_LMA_MASK) {
1053 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1055 DESC_G_MASK | DESC_P_MASK |
1056 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1057 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1061 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1063 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1064 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1065 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1066 env->eip = (uint32_t)ECX;
1068 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1070 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1071 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1072 DESC_W_MASK | DESC_A_MASK);
1073 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1074 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1075 cpu_x86_set_cpl(env, 3);
1079 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1081 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1082 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1083 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1084 env->eip = (uint32_t)ECX;
1085 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1087 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1088 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1089 DESC_W_MASK | DESC_A_MASK);
1090 env->eflags |= IF_MASK;
1091 cpu_x86_set_cpl(env, 3);
1094 if (kqemu_is_ok(env)) {
1095 if (env->hflags & HF_LMA_MASK)
1096 CC_OP = CC_OP_EFLAGS;
1097 env->exception_index = -1;
1103 /* real mode interrupt */
1104 static void do_interrupt_real(int intno, int is_int, int error_code,
1105 unsigned int next_eip)
1108 target_ulong ptr, ssp;
1110 uint32_t offset, esp;
1111 uint32_t old_cs, old_eip;
1112 int svm_should_check = 1;
1114 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1116 svm_should_check = 0;
1118 if (svm_should_check
1119 && INTERCEPTEDl(_exceptions, 1 << intno)
1121 raise_interrupt(intno, is_int, error_code, 0);
1123 /* real mode (simpler !) */
1125 if (intno * 4 + 3 > dt->limit)
1126 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1127 ptr = dt->base + intno * 4;
1128 offset = lduw_kernel(ptr);
1129 selector = lduw_kernel(ptr + 2);
1131 ssp = env->segs[R_SS].base;
1136 old_cs = env->segs[R_CS].selector;
1137 /* XXX: use SS segment size ? */
1138 PUSHW(ssp, esp, 0xffff, compute_eflags());
1139 PUSHW(ssp, esp, 0xffff, old_cs);
1140 PUSHW(ssp, esp, 0xffff, old_eip);
1142 /* update processor state */
1143 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1145 env->segs[R_CS].selector = selector;
1146 env->segs[R_CS].base = (selector << 4);
1147 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1150 /* fake user mode interrupt */
1151 void do_interrupt_user(int intno, int is_int, int error_code,
1152 target_ulong next_eip)
1156 int dpl, cpl, shift;
1160 if (env->hflags & HF_LMA_MASK) {
1165 ptr = dt->base + (intno << shift);
1166 e2 = ldl_kernel(ptr + 4);
1168 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1169 cpl = env->hflags & HF_CPL_MASK;
1170 /* check privledge if software int */
1171 if (is_int && dpl < cpl)
1172 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1174 /* Since we emulate only user space, we cannot do more than
1175 exiting the emulation with the suitable exception and error
1182 * Begin execution of an interruption. is_int is TRUE if coming from
1183 * the int instruction. next_eip is the EIP value AFTER the interrupt
1184 * instruction. It is only relevant if is_int is TRUE.
1186 void do_interrupt(int intno, int is_int, int error_code,
1187 target_ulong next_eip, int is_hw)
1189 if (loglevel & CPU_LOG_INT) {
1190 if ((env->cr[0] & CR0_PE_MASK)) {
1192 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1193 count, intno, error_code, is_int,
1194 env->hflags & HF_CPL_MASK,
1195 env->segs[R_CS].selector, EIP,
1196 (int)env->segs[R_CS].base + EIP,
1197 env->segs[R_SS].selector, ESP);
1198 if (intno == 0x0e) {
1199 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1201 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1203 fprintf(logfile, "\n");
1204 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1209 fprintf(logfile, " code=");
1210 ptr = env->segs[R_CS].base + env->eip;
1211 for(i = 0; i < 16; i++) {
1212 fprintf(logfile, " %02x", ldub(ptr + i));
1214 fprintf(logfile, "\n");
1220 if (env->cr[0] & CR0_PE_MASK) {
1222 if (env->hflags & HF_LMA_MASK) {
1223 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1227 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1230 do_interrupt_real(intno, is_int, error_code, next_eip);
1235 * Check nested exceptions and change to double or triple fault if
1236 * needed. It should only be called, if this is not an interrupt.
1237 * Returns the new exception number.
1239 static int check_exception(int intno, int *error_code)
1241 char first_contributory = env->old_exception == 0 ||
1242 (env->old_exception >= 10 &&
1243 env->old_exception <= 13);
1244 char second_contributory = intno == 0 ||
1245 (intno >= 10 && intno <= 13);
1247 if (loglevel & CPU_LOG_INT)
1248 fprintf(logfile, "check_exception old: %x new %x\n",
1249 env->old_exception, intno);
1251 if (env->old_exception == EXCP08_DBLE)
1252 cpu_abort(env, "triple fault");
1254 if ((first_contributory && second_contributory)
1255 || (env->old_exception == EXCP0E_PAGE &&
1256 (second_contributory || (intno == EXCP0E_PAGE)))) {
1257 intno = EXCP08_DBLE;
1261 if (second_contributory || (intno == EXCP0E_PAGE) ||
1262 (intno == EXCP08_DBLE))
1263 env->old_exception = intno;
1269 * Signal an interruption. It is executed in the main CPU loop.
1270 * is_int is TRUE if coming from the int instruction. next_eip is the
1271 * EIP value AFTER the interrupt instruction. It is only relevant if
1274 void raise_interrupt(int intno, int is_int, int error_code,
1275 int next_eip_addend)
1278 svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1279 intno = check_exception(intno, &error_code);
1282 env->exception_index = intno;
1283 env->error_code = error_code;
1284 env->exception_is_int = is_int;
1285 env->exception_next_eip = env->eip + next_eip_addend;
1289 /* same as raise_exception_err, but do not restore global registers */
1290 static void raise_exception_err_norestore(int exception_index, int error_code)
1292 exception_index = check_exception(exception_index, &error_code);
1294 env->exception_index = exception_index;
1295 env->error_code = error_code;
1296 env->exception_is_int = 0;
1297 env->exception_next_eip = 0;
1298 longjmp(env->jmp_env, 1);
1301 /* shortcuts to generate exceptions */
1303 void (raise_exception_err)(int exception_index, int error_code)
1305 raise_interrupt(exception_index, 0, error_code, 0);
1308 void raise_exception(int exception_index)
1310 raise_interrupt(exception_index, 0, 0, 0);
1315 #if defined(CONFIG_USER_ONLY)
1317 void do_smm_enter(void)
1321 void helper_rsm(void)
1327 #ifdef TARGET_X86_64
1328 #define SMM_REVISION_ID 0x00020064
1330 #define SMM_REVISION_ID 0x00020000
1333 void do_smm_enter(void)
1335 target_ulong sm_state;
1339 if (loglevel & CPU_LOG_INT) {
1340 fprintf(logfile, "SMM: enter\n");
1341 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1344 env->hflags |= HF_SMM_MASK;
1345 cpu_smm_update(env);
1347 sm_state = env->smbase + 0x8000;
1349 #ifdef TARGET_X86_64
1350 for(i = 0; i < 6; i++) {
1352 offset = 0x7e00 + i * 16;
1353 stw_phys(sm_state + offset, dt->selector);
1354 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1355 stl_phys(sm_state + offset + 4, dt->limit);
1356 stq_phys(sm_state + offset + 8, dt->base);
1359 stq_phys(sm_state + 0x7e68, env->gdt.base);
1360 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1362 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1363 stq_phys(sm_state + 0x7e78, env->ldt.base);
1364 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1365 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1367 stq_phys(sm_state + 0x7e88, env->idt.base);
1368 stl_phys(sm_state + 0x7e84, env->idt.limit);
1370 stw_phys(sm_state + 0x7e90, env->tr.selector);
1371 stq_phys(sm_state + 0x7e98, env->tr.base);
1372 stl_phys(sm_state + 0x7e94, env->tr.limit);
1373 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1375 stq_phys(sm_state + 0x7ed0, env->efer);
1377 stq_phys(sm_state + 0x7ff8, EAX);
1378 stq_phys(sm_state + 0x7ff0, ECX);
1379 stq_phys(sm_state + 0x7fe8, EDX);
1380 stq_phys(sm_state + 0x7fe0, EBX);
1381 stq_phys(sm_state + 0x7fd8, ESP);
1382 stq_phys(sm_state + 0x7fd0, EBP);
1383 stq_phys(sm_state + 0x7fc8, ESI);
1384 stq_phys(sm_state + 0x7fc0, EDI);
1385 for(i = 8; i < 16; i++)
1386 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1387 stq_phys(sm_state + 0x7f78, env->eip);
1388 stl_phys(sm_state + 0x7f70, compute_eflags());
1389 stl_phys(sm_state + 0x7f68, env->dr[6]);
1390 stl_phys(sm_state + 0x7f60, env->dr[7]);
1392 stl_phys(sm_state + 0x7f48, env->cr[4]);
1393 stl_phys(sm_state + 0x7f50, env->cr[3]);
1394 stl_phys(sm_state + 0x7f58, env->cr[0]);
1396 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1397 stl_phys(sm_state + 0x7f00, env->smbase);
1399 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1400 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1401 stl_phys(sm_state + 0x7ff4, compute_eflags());
1402 stl_phys(sm_state + 0x7ff0, env->eip);
1403 stl_phys(sm_state + 0x7fec, EDI);
1404 stl_phys(sm_state + 0x7fe8, ESI);
1405 stl_phys(sm_state + 0x7fe4, EBP);
1406 stl_phys(sm_state + 0x7fe0, ESP);
1407 stl_phys(sm_state + 0x7fdc, EBX);
1408 stl_phys(sm_state + 0x7fd8, EDX);
1409 stl_phys(sm_state + 0x7fd4, ECX);
1410 stl_phys(sm_state + 0x7fd0, EAX);
1411 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1412 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1414 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1415 stl_phys(sm_state + 0x7f64, env->tr.base);
1416 stl_phys(sm_state + 0x7f60, env->tr.limit);
1417 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1419 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1420 stl_phys(sm_state + 0x7f80, env->ldt.base);
1421 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1422 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1424 stl_phys(sm_state + 0x7f74, env->gdt.base);
1425 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1427 stl_phys(sm_state + 0x7f58, env->idt.base);
1428 stl_phys(sm_state + 0x7f54, env->idt.limit);
1430 for(i = 0; i < 6; i++) {
1433 offset = 0x7f84 + i * 12;
1435 offset = 0x7f2c + (i - 3) * 12;
1436 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1437 stl_phys(sm_state + offset + 8, dt->base);
1438 stl_phys(sm_state + offset + 4, dt->limit);
1439 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1441 stl_phys(sm_state + 0x7f14, env->cr[4]);
1443 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1444 stl_phys(sm_state + 0x7ef8, env->smbase);
1446 /* init SMM cpu state */
1448 #ifdef TARGET_X86_64
1450 env->hflags &= ~HF_LMA_MASK;
1452 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1453 env->eip = 0x00008000;
1454 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1456 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1457 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1458 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1459 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1460 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1462 cpu_x86_update_cr0(env,
1463 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1464 cpu_x86_update_cr4(env, 0);
1465 env->dr[7] = 0x00000400;
1466 CC_OP = CC_OP_EFLAGS;
1469 void helper_rsm(void)
1471 target_ulong sm_state;
1475 sm_state = env->smbase + 0x8000;
1476 #ifdef TARGET_X86_64
1477 env->efer = ldq_phys(sm_state + 0x7ed0);
1478 if (env->efer & MSR_EFER_LMA)
1479 env->hflags |= HF_LMA_MASK;
1481 env->hflags &= ~HF_LMA_MASK;
1483 for(i = 0; i < 6; i++) {
1484 offset = 0x7e00 + i * 16;
1485 cpu_x86_load_seg_cache(env, i,
1486 lduw_phys(sm_state + offset),
1487 ldq_phys(sm_state + offset + 8),
1488 ldl_phys(sm_state + offset + 4),
1489 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1492 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1493 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1495 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1496 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1497 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1498 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1500 env->idt.base = ldq_phys(sm_state + 0x7e88);
1501 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1503 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1504 env->tr.base = ldq_phys(sm_state + 0x7e98);
1505 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1506 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1508 EAX = ldq_phys(sm_state + 0x7ff8);
1509 ECX = ldq_phys(sm_state + 0x7ff0);
1510 EDX = ldq_phys(sm_state + 0x7fe8);
1511 EBX = ldq_phys(sm_state + 0x7fe0);
1512 ESP = ldq_phys(sm_state + 0x7fd8);
1513 EBP = ldq_phys(sm_state + 0x7fd0);
1514 ESI = ldq_phys(sm_state + 0x7fc8);
1515 EDI = ldq_phys(sm_state + 0x7fc0);
1516 for(i = 8; i < 16; i++)
1517 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1518 env->eip = ldq_phys(sm_state + 0x7f78);
1519 load_eflags(ldl_phys(sm_state + 0x7f70),
1520 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1521 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1522 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1524 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1525 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1526 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1528 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1529 if (val & 0x20000) {
1530 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1533 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1534 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1535 load_eflags(ldl_phys(sm_state + 0x7ff4),
1536 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537 env->eip = ldl_phys(sm_state + 0x7ff0);
1538 EDI = ldl_phys(sm_state + 0x7fec);
1539 ESI = ldl_phys(sm_state + 0x7fe8);
1540 EBP = ldl_phys(sm_state + 0x7fe4);
1541 ESP = ldl_phys(sm_state + 0x7fe0);
1542 EBX = ldl_phys(sm_state + 0x7fdc);
1543 EDX = ldl_phys(sm_state + 0x7fd8);
1544 ECX = ldl_phys(sm_state + 0x7fd4);
1545 EAX = ldl_phys(sm_state + 0x7fd0);
1546 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1547 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1549 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1550 env->tr.base = ldl_phys(sm_state + 0x7f64);
1551 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1552 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1554 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1555 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1556 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1557 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1559 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1560 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1562 env->idt.base = ldl_phys(sm_state + 0x7f58);
1563 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1565 for(i = 0; i < 6; i++) {
1567 offset = 0x7f84 + i * 12;
1569 offset = 0x7f2c + (i - 3) * 12;
1570 cpu_x86_load_seg_cache(env, i,
1571 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1572 ldl_phys(sm_state + offset + 8),
1573 ldl_phys(sm_state + offset + 4),
1574 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1576 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1578 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1579 if (val & 0x20000) {
1580 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1583 CC_OP = CC_OP_EFLAGS;
1584 env->hflags &= ~HF_SMM_MASK;
1585 cpu_smm_update(env);
1587 if (loglevel & CPU_LOG_INT) {
1588 fprintf(logfile, "SMM: after RSM\n");
1589 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1593 #endif /* !CONFIG_USER_ONLY */
1596 #ifdef BUGGY_GCC_DIV64
1597 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1598 call it from another function */
1599 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1605 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1612 void helper_divl_EAX_T0(target_ulong t0)
1614 unsigned int den, r;
1617 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1620 raise_exception(EXCP00_DIVZ);
1622 #ifdef BUGGY_GCC_DIV64
1623 r = div32(&q, num, den);
1629 raise_exception(EXCP00_DIVZ);
1634 void helper_idivl_EAX_T0(target_ulong t0)
1639 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1642 raise_exception(EXCP00_DIVZ);
1644 #ifdef BUGGY_GCC_DIV64
1645 r = idiv32(&q, num, den);
1650 if (q != (int32_t)q)
1651 raise_exception(EXCP00_DIVZ);
1656 void helper_cmpxchg8b(void)
1661 eflags = cc_table[CC_OP].compute_all();
1663 if (d == (((uint64_t)EDX << 32) | EAX)) {
1664 stq(A0, ((uint64_t)ECX << 32) | EBX);
1674 void helper_single_step(void)
1676 env->dr[6] |= 0x4000;
1677 raise_exception(EXCP01_SSTP);
1680 void helper_cpuid(void)
1683 index = (uint32_t)EAX;
1685 /* test if maximum index reached */
1686 if (index & 0x80000000) {
1687 if (index > env->cpuid_xlevel)
1688 index = env->cpuid_level;
1690 if (index > env->cpuid_level)
1691 index = env->cpuid_level;
1696 EAX = env->cpuid_level;
1697 EBX = env->cpuid_vendor1;
1698 EDX = env->cpuid_vendor2;
1699 ECX = env->cpuid_vendor3;
1702 EAX = env->cpuid_version;
1703 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1704 ECX = env->cpuid_ext_features;
1705 EDX = env->cpuid_features;
1708 /* cache info: needed for Pentium Pro compatibility */
1715 EAX = env->cpuid_xlevel;
1716 EBX = env->cpuid_vendor1;
1717 EDX = env->cpuid_vendor2;
1718 ECX = env->cpuid_vendor3;
1721 EAX = env->cpuid_features;
1723 ECX = env->cpuid_ext3_features;
1724 EDX = env->cpuid_ext2_features;
1729 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1730 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1731 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1732 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1735 /* cache info (L1 cache) */
1742 /* cache info (L2 cache) */
1749 /* virtual & phys address size in low 2 bytes. */
1750 /* XXX: This value must match the one used in the MMU code. */
1751 #if defined(TARGET_X86_64)
1752 # if defined(USE_KQEMU)
1753 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1755 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1756 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1759 # if defined(USE_KQEMU)
1760 EAX = 0x00000020; /* 32 bits physical */
1762 EAX = 0x00000024; /* 36 bits physical */
1776 /* reserved values: zero */
1785 void helper_enter_level(int level, int data32)
1788 uint32_t esp_mask, esp, ebp;
1790 esp_mask = get_sp_mask(env->segs[R_SS].flags);
1791 ssp = env->segs[R_SS].base;
1800 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
1803 stl(ssp + (esp & esp_mask), T1);
1810 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
1813 stw(ssp + (esp & esp_mask), T1);
1817 #ifdef TARGET_X86_64
1818 void helper_enter64_level(int level, int data64)
1820 target_ulong esp, ebp;
1840 stw(esp, lduw(ebp));
1848 void helper_lldt_T0(void)
1853 int index, entry_limit;
1856 selector = T0 & 0xffff;
1857 if ((selector & 0xfffc) == 0) {
1858 /* XXX: NULL selector case: invalid LDT */
1863 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1865 index = selector & ~7;
1866 #ifdef TARGET_X86_64
1867 if (env->hflags & HF_LMA_MASK)
1872 if ((index + entry_limit) > dt->limit)
1873 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1874 ptr = dt->base + index;
1875 e1 = ldl_kernel(ptr);
1876 e2 = ldl_kernel(ptr + 4);
1877 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
1878 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1879 if (!(e2 & DESC_P_MASK))
1880 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1881 #ifdef TARGET_X86_64
1882 if (env->hflags & HF_LMA_MASK) {
1884 e3 = ldl_kernel(ptr + 8);
1885 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1886 env->ldt.base |= (target_ulong)e3 << 32;
1890 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1893 env->ldt.selector = selector;
1896 void helper_ltr_T0(void)
1901 int index, type, entry_limit;
1904 selector = T0 & 0xffff;
1905 if ((selector & 0xfffc) == 0) {
1906 /* NULL selector case: invalid TR */
1912 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1914 index = selector & ~7;
1915 #ifdef TARGET_X86_64
1916 if (env->hflags & HF_LMA_MASK)
1921 if ((index + entry_limit) > dt->limit)
1922 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1923 ptr = dt->base + index;
1924 e1 = ldl_kernel(ptr);
1925 e2 = ldl_kernel(ptr + 4);
1926 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1927 if ((e2 & DESC_S_MASK) ||
1928 (type != 1 && type != 9))
1929 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1930 if (!(e2 & DESC_P_MASK))
1931 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1932 #ifdef TARGET_X86_64
1933 if (env->hflags & HF_LMA_MASK) {
1935 e3 = ldl_kernel(ptr + 8);
1936 e4 = ldl_kernel(ptr + 12);
1937 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
1938 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1939 load_seg_cache_raw_dt(&env->tr, e1, e2);
1940 env->tr.base |= (target_ulong)e3 << 32;
1944 load_seg_cache_raw_dt(&env->tr, e1, e2);
1946 e2 |= DESC_TSS_BUSY_MASK;
1947 stl_kernel(ptr + 4, e2);
1949 env->tr.selector = selector;
1952 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1953 void load_seg(int seg_reg, int selector)
1962 cpl = env->hflags & HF_CPL_MASK;
1963 if ((selector & 0xfffc) == 0) {
1964 /* null selector case */
1966 #ifdef TARGET_X86_64
1967 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1970 raise_exception_err(EXCP0D_GPF, 0);
1971 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1978 index = selector & ~7;
1979 if ((index + 7) > dt->limit)
1980 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1981 ptr = dt->base + index;
1982 e1 = ldl_kernel(ptr);
1983 e2 = ldl_kernel(ptr + 4);
1985 if (!(e2 & DESC_S_MASK))
1986 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1988 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1989 if (seg_reg == R_SS) {
1990 /* must be writable segment */
1991 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
1992 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1993 if (rpl != cpl || dpl != cpl)
1994 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1996 /* must be readable segment */
1997 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
1998 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2000 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2001 /* if not conforming code, test rights */
2002 if (dpl < cpl || dpl < rpl)
2003 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2007 if (!(e2 & DESC_P_MASK)) {
2008 if (seg_reg == R_SS)
2009 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2011 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2014 /* set the access bit if not already set */
2015 if (!(e2 & DESC_A_MASK)) {
2017 stl_kernel(ptr + 4, e2);
2020 cpu_x86_load_seg_cache(env, seg_reg, selector,
2021 get_seg_base(e1, e2),
2022 get_seg_limit(e1, e2),
2025 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2026 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2031 /* protected mode jump */
2032 void helper_ljmp_protected_T0_T1(int next_eip_addend)
2034 int new_cs, gate_cs, type;
2035 uint32_t e1, e2, cpl, dpl, rpl, limit;
2036 target_ulong new_eip, next_eip;
2040 if ((new_cs & 0xfffc) == 0)
2041 raise_exception_err(EXCP0D_GPF, 0);
2042 if (load_segment(&e1, &e2, new_cs) != 0)
2043 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2044 cpl = env->hflags & HF_CPL_MASK;
2045 if (e2 & DESC_S_MASK) {
2046 if (!(e2 & DESC_CS_MASK))
2047 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2048 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2049 if (e2 & DESC_C_MASK) {
2050 /* conforming code segment */
2052 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2054 /* non conforming code segment */
2057 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2059 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2061 if (!(e2 & DESC_P_MASK))
2062 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2063 limit = get_seg_limit(e1, e2);
2064 if (new_eip > limit &&
2065 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2066 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2067 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2068 get_seg_base(e1, e2), limit, e2);
2071 /* jump to call or task gate */
2072 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2074 cpl = env->hflags & HF_CPL_MASK;
2075 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2077 case 1: /* 286 TSS */
2078 case 9: /* 386 TSS */
2079 case 5: /* task gate */
2080 if (dpl < cpl || dpl < rpl)
2081 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2082 next_eip = env->eip + next_eip_addend;
2083 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2084 CC_OP = CC_OP_EFLAGS;
2086 case 4: /* 286 call gate */
2087 case 12: /* 386 call gate */
2088 if ((dpl < cpl) || (dpl < rpl))
2089 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2090 if (!(e2 & DESC_P_MASK))
2091 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2093 new_eip = (e1 & 0xffff);
2095 new_eip |= (e2 & 0xffff0000);
2096 if (load_segment(&e1, &e2, gate_cs) != 0)
2097 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2098 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2099 /* must be code segment */
2100 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2101 (DESC_S_MASK | DESC_CS_MASK)))
2102 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2103 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2104 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2105 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2106 if (!(e2 & DESC_P_MASK))
2107 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2108 limit = get_seg_limit(e1, e2);
2109 if (new_eip > limit)
2110 raise_exception_err(EXCP0D_GPF, 0);
2111 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2112 get_seg_base(e1, e2), limit, e2);
2116 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2122 /* real mode call */
2123 void helper_lcall_real_T0_T1(int shift, int next_eip)
2125 int new_cs, new_eip;
2126 uint32_t esp, esp_mask;
2132 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2133 ssp = env->segs[R_SS].base;
2135 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2136 PUSHL(ssp, esp, esp_mask, next_eip);
2138 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2139 PUSHW(ssp, esp, esp_mask, next_eip);
2142 SET_ESP(esp, esp_mask);
2144 env->segs[R_CS].selector = new_cs;
2145 env->segs[R_CS].base = (new_cs << 4);
2148 /* protected mode call */
2149 void helper_lcall_protected_T0_T1(int shift, int next_eip_addend)
2151 int new_cs, new_stack, i;
2152 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2153 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2154 uint32_t val, limit, old_sp_mask;
2155 target_ulong ssp, old_ssp, next_eip, new_eip;
2159 next_eip = env->eip + next_eip_addend;
2161 if (loglevel & CPU_LOG_PCALL) {
2162 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2163 new_cs, (uint32_t)new_eip, shift);
2164 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2167 if ((new_cs & 0xfffc) == 0)
2168 raise_exception_err(EXCP0D_GPF, 0);
2169 if (load_segment(&e1, &e2, new_cs) != 0)
2170 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2171 cpl = env->hflags & HF_CPL_MASK;
2173 if (loglevel & CPU_LOG_PCALL) {
2174 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2177 if (e2 & DESC_S_MASK) {
2178 if (!(e2 & DESC_CS_MASK))
2179 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2180 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2181 if (e2 & DESC_C_MASK) {
2182 /* conforming code segment */
2184 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2186 /* non conforming code segment */
2189 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2191 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2193 if (!(e2 & DESC_P_MASK))
2194 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2196 #ifdef TARGET_X86_64
2197 /* XXX: check 16/32 bit cases in long mode */
2202 PUSHQ(rsp, env->segs[R_CS].selector);
2203 PUSHQ(rsp, next_eip);
2204 /* from this point, not restartable */
2206 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2207 get_seg_base(e1, e2),
2208 get_seg_limit(e1, e2), e2);
2214 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2215 ssp = env->segs[R_SS].base;
2217 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2218 PUSHL(ssp, sp, sp_mask, next_eip);
2220 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2221 PUSHW(ssp, sp, sp_mask, next_eip);
2224 limit = get_seg_limit(e1, e2);
2225 if (new_eip > limit)
2226 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2227 /* from this point, not restartable */
2228 SET_ESP(sp, sp_mask);
2229 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2230 get_seg_base(e1, e2), limit, e2);
2234 /* check gate type */
2235 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2236 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2239 case 1: /* available 286 TSS */
2240 case 9: /* available 386 TSS */
2241 case 5: /* task gate */
2242 if (dpl < cpl || dpl < rpl)
2243 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2244 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2245 CC_OP = CC_OP_EFLAGS;
2247 case 4: /* 286 call gate */
2248 case 12: /* 386 call gate */
2251 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2256 if (dpl < cpl || dpl < rpl)
2257 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2258 /* check valid bit */
2259 if (!(e2 & DESC_P_MASK))
2260 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2261 selector = e1 >> 16;
2262 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2263 param_count = e2 & 0x1f;
2264 if ((selector & 0xfffc) == 0)
2265 raise_exception_err(EXCP0D_GPF, 0);
2267 if (load_segment(&e1, &e2, selector) != 0)
2268 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2269 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2270 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2271 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2273 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2274 if (!(e2 & DESC_P_MASK))
2275 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2277 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2278 /* to inner privilege */
2279 get_ss_esp_from_tss(&ss, &sp, dpl);
2281 if (loglevel & CPU_LOG_PCALL)
2282 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2283 ss, sp, param_count, ESP);
2285 if ((ss & 0xfffc) == 0)
2286 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2287 if ((ss & 3) != dpl)
2288 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2289 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2290 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2291 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2293 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2294 if (!(ss_e2 & DESC_S_MASK) ||
2295 (ss_e2 & DESC_CS_MASK) ||
2296 !(ss_e2 & DESC_W_MASK))
2297 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2298 if (!(ss_e2 & DESC_P_MASK))
2299 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2301 // push_size = ((param_count * 2) + 8) << shift;
2303 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2304 old_ssp = env->segs[R_SS].base;
2306 sp_mask = get_sp_mask(ss_e2);
2307 ssp = get_seg_base(ss_e1, ss_e2);
2309 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2310 PUSHL(ssp, sp, sp_mask, ESP);
2311 for(i = param_count - 1; i >= 0; i--) {
2312 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2313 PUSHL(ssp, sp, sp_mask, val);
2316 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2317 PUSHW(ssp, sp, sp_mask, ESP);
2318 for(i = param_count - 1; i >= 0; i--) {
2319 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2320 PUSHW(ssp, sp, sp_mask, val);
2325 /* to same privilege */
2327 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2328 ssp = env->segs[R_SS].base;
2329 // push_size = (4 << shift);
2334 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2335 PUSHL(ssp, sp, sp_mask, next_eip);
2337 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2338 PUSHW(ssp, sp, sp_mask, next_eip);
2341 /* from this point, not restartable */
2344 ss = (ss & ~3) | dpl;
2345 cpu_x86_load_seg_cache(env, R_SS, ss,
2347 get_seg_limit(ss_e1, ss_e2),
2351 selector = (selector & ~3) | dpl;
2352 cpu_x86_load_seg_cache(env, R_CS, selector,
2353 get_seg_base(e1, e2),
2354 get_seg_limit(e1, e2),
2356 cpu_x86_set_cpl(env, dpl);
2357 SET_ESP(sp, sp_mask);
2361 if (kqemu_is_ok(env)) {
2362 env->exception_index = -1;
2368 /* real and vm86 mode iret */
2369 void helper_iret_real(int shift)
2371 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2375 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2377 ssp = env->segs[R_SS].base;
2380 POPL(ssp, sp, sp_mask, new_eip);
2381 POPL(ssp, sp, sp_mask, new_cs);
2383 POPL(ssp, sp, sp_mask, new_eflags);
2386 POPW(ssp, sp, sp_mask, new_eip);
2387 POPW(ssp, sp, sp_mask, new_cs);
2388 POPW(ssp, sp, sp_mask, new_eflags);
2390 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2391 load_seg_vm(R_CS, new_cs);
2393 if (env->eflags & VM_MASK)
2394 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2396 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2398 eflags_mask &= 0xffff;
2399 load_eflags(new_eflags, eflags_mask);
2400 env->hflags &= ~HF_NMI_MASK;
2403 static inline void validate_seg(int seg_reg, int cpl)
2408 /* XXX: on x86_64, we do not want to nullify FS and GS because
2409 they may still contain a valid base. I would be interested to
2410 know how a real x86_64 CPU behaves */
2411 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2412 (env->segs[seg_reg].selector & 0xfffc) == 0)
2415 e2 = env->segs[seg_reg].flags;
2416 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2417 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2418 /* data or non conforming code segment */
2420 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2425 /* protected mode iret */
2426 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2428 uint32_t new_cs, new_eflags, new_ss;
2429 uint32_t new_es, new_ds, new_fs, new_gs;
2430 uint32_t e1, e2, ss_e1, ss_e2;
2431 int cpl, dpl, rpl, eflags_mask, iopl;
2432 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2434 #ifdef TARGET_X86_64
2439 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2441 ssp = env->segs[R_SS].base;
2442 new_eflags = 0; /* avoid warning */
2443 #ifdef TARGET_X86_64
2449 POPQ(sp, new_eflags);
2455 POPL(ssp, sp, sp_mask, new_eip);
2456 POPL(ssp, sp, sp_mask, new_cs);
2459 POPL(ssp, sp, sp_mask, new_eflags);
2460 if (new_eflags & VM_MASK)
2461 goto return_to_vm86;
2465 POPW(ssp, sp, sp_mask, new_eip);
2466 POPW(ssp, sp, sp_mask, new_cs);
2468 POPW(ssp, sp, sp_mask, new_eflags);
2471 if (loglevel & CPU_LOG_PCALL) {
2472 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2473 new_cs, new_eip, shift, addend);
2474 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2477 if ((new_cs & 0xfffc) == 0)
2478 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2479 if (load_segment(&e1, &e2, new_cs) != 0)
2480 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2481 if (!(e2 & DESC_S_MASK) ||
2482 !(e2 & DESC_CS_MASK))
2483 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2484 cpl = env->hflags & HF_CPL_MASK;
2487 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2488 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489 if (e2 & DESC_C_MASK) {
2491 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2494 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2496 if (!(e2 & DESC_P_MASK))
2497 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2500 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2501 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2502 /* return to same priledge level */
2503 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2504 get_seg_base(e1, e2),
2505 get_seg_limit(e1, e2),
2508 /* return to different privilege level */
2509 #ifdef TARGET_X86_64
2518 POPL(ssp, sp, sp_mask, new_esp);
2519 POPL(ssp, sp, sp_mask, new_ss);
2523 POPW(ssp, sp, sp_mask, new_esp);
2524 POPW(ssp, sp, sp_mask, new_ss);
2527 if (loglevel & CPU_LOG_PCALL) {
2528 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2532 if ((new_ss & 0xfffc) == 0) {
2533 #ifdef TARGET_X86_64
2534 /* NULL ss is allowed in long mode if cpl != 3*/
2535 /* XXX: test CS64 ? */
2536 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2537 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2539 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2540 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2541 DESC_W_MASK | DESC_A_MASK);
2542 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2546 raise_exception_err(EXCP0D_GPF, 0);
2549 if ((new_ss & 3) != rpl)
2550 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2551 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2552 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2553 if (!(ss_e2 & DESC_S_MASK) ||
2554 (ss_e2 & DESC_CS_MASK) ||
2555 !(ss_e2 & DESC_W_MASK))
2556 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2557 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2559 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2560 if (!(ss_e2 & DESC_P_MASK))
2561 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2562 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2563 get_seg_base(ss_e1, ss_e2),
2564 get_seg_limit(ss_e1, ss_e2),
2568 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2569 get_seg_base(e1, e2),
2570 get_seg_limit(e1, e2),
2572 cpu_x86_set_cpl(env, rpl);
2574 #ifdef TARGET_X86_64
2575 if (env->hflags & HF_CS64_MASK)
2579 sp_mask = get_sp_mask(ss_e2);
2581 /* validate data segments */
2582 validate_seg(R_ES, rpl);
2583 validate_seg(R_DS, rpl);
2584 validate_seg(R_FS, rpl);
2585 validate_seg(R_GS, rpl);
2589 SET_ESP(sp, sp_mask);
2592 /* NOTE: 'cpl' is the _old_ CPL */
2593 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2595 eflags_mask |= IOPL_MASK;
2596 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2598 eflags_mask |= IF_MASK;
2600 eflags_mask &= 0xffff;
2601 load_eflags(new_eflags, eflags_mask);
2606 POPL(ssp, sp, sp_mask, new_esp);
2607 POPL(ssp, sp, sp_mask, new_ss);
2608 POPL(ssp, sp, sp_mask, new_es);
2609 POPL(ssp, sp, sp_mask, new_ds);
2610 POPL(ssp, sp, sp_mask, new_fs);
2611 POPL(ssp, sp, sp_mask, new_gs);
2613 /* modify processor state */
2614 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2615 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2616 load_seg_vm(R_CS, new_cs & 0xffff);
2617 cpu_x86_set_cpl(env, 3);
2618 load_seg_vm(R_SS, new_ss & 0xffff);
2619 load_seg_vm(R_ES, new_es & 0xffff);
2620 load_seg_vm(R_DS, new_ds & 0xffff);
2621 load_seg_vm(R_FS, new_fs & 0xffff);
2622 load_seg_vm(R_GS, new_gs & 0xffff);
2624 env->eip = new_eip & 0xffff;
2628 void helper_iret_protected(int shift, int next_eip)
2630 int tss_selector, type;
2633 /* specific case for TSS */
2634 if (env->eflags & NT_MASK) {
2635 #ifdef TARGET_X86_64
2636 if (env->hflags & HF_LMA_MASK)
2637 raise_exception_err(EXCP0D_GPF, 0);
2639 tss_selector = lduw_kernel(env->tr.base + 0);
2640 if (tss_selector & 4)
2641 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2642 if (load_segment(&e1, &e2, tss_selector) != 0)
2643 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2644 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2645 /* NOTE: we check both segment and busy TSS */
2647 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2648 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2650 helper_ret_protected(shift, 1, 0);
2652 env->hflags &= ~HF_NMI_MASK;
2654 if (kqemu_is_ok(env)) {
2655 CC_OP = CC_OP_EFLAGS;
2656 env->exception_index = -1;
2662 void helper_lret_protected(int shift, int addend)
2664 helper_ret_protected(shift, 0, addend);
2666 if (kqemu_is_ok(env)) {
2667 env->exception_index = -1;
2673 void helper_sysenter(void)
2675 if (env->sysenter_cs == 0) {
2676 raise_exception_err(EXCP0D_GPF, 0);
2678 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2679 cpu_x86_set_cpl(env, 0);
2680 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2682 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2684 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2685 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2687 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2689 DESC_W_MASK | DESC_A_MASK);
2690 ESP = env->sysenter_esp;
2691 EIP = env->sysenter_eip;
2694 void helper_sysexit(void)
2698 cpl = env->hflags & HF_CPL_MASK;
2699 if (env->sysenter_cs == 0 || cpl != 0) {
2700 raise_exception_err(EXCP0D_GPF, 0);
2702 cpu_x86_set_cpl(env, 3);
2703 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2705 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2706 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2707 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2708 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2710 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2711 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2712 DESC_W_MASK | DESC_A_MASK);
2716 if (kqemu_is_ok(env)) {
2717 env->exception_index = -1;
2723 void helper_movl_crN_T0(int reg)
2725 #if !defined(CONFIG_USER_ONLY)
2728 cpu_x86_update_cr0(env, T0);
2731 cpu_x86_update_cr3(env, T0);
2734 cpu_x86_update_cr4(env, T0);
2737 cpu_set_apic_tpr(env, T0);
2748 void helper_movl_drN_T0(int reg)
2753 void helper_invlpg(target_ulong addr)
2755 cpu_x86_flush_tlb(env, addr);
2758 void helper_rdtsc(void)
2762 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2763 raise_exception(EXCP0D_GPF);
2765 val = cpu_get_tsc(env);
2766 EAX = (uint32_t)(val);
2767 EDX = (uint32_t)(val >> 32);
2770 void helper_rdpmc(void)
2772 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
2773 raise_exception(EXCP0D_GPF);
2776 if (!svm_check_intercept_param(SVM_EXIT_RDPMC, 0)) {
2777 /* currently unimplemented */
2778 raise_exception_err(EXCP06_ILLOP, 0);
2782 #if defined(CONFIG_USER_ONLY)
2783 void helper_wrmsr(void)
2787 void helper_rdmsr(void)
2791 void helper_wrmsr(void)
2795 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
2797 switch((uint32_t)ECX) {
2798 case MSR_IA32_SYSENTER_CS:
2799 env->sysenter_cs = val & 0xffff;
2801 case MSR_IA32_SYSENTER_ESP:
2802 env->sysenter_esp = val;
2804 case MSR_IA32_SYSENTER_EIP:
2805 env->sysenter_eip = val;
2807 case MSR_IA32_APICBASE:
2808 cpu_set_apic_base(env, val);
2812 uint64_t update_mask;
2814 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
2815 update_mask |= MSR_EFER_SCE;
2816 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
2817 update_mask |= MSR_EFER_LME;
2818 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
2819 update_mask |= MSR_EFER_FFXSR;
2820 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
2821 update_mask |= MSR_EFER_NXE;
2822 env->efer = (env->efer & ~update_mask) |
2823 (val & update_mask);
2832 case MSR_VM_HSAVE_PA:
2833 env->vm_hsave = val;
2835 #ifdef TARGET_X86_64
2846 env->segs[R_FS].base = val;
2849 env->segs[R_GS].base = val;
2851 case MSR_KERNELGSBASE:
2852 env->kernelgsbase = val;
2856 /* XXX: exception ? */
2861 void helper_rdmsr(void)
2864 switch((uint32_t)ECX) {
2865 case MSR_IA32_SYSENTER_CS:
2866 val = env->sysenter_cs;
2868 case MSR_IA32_SYSENTER_ESP:
2869 val = env->sysenter_esp;
2871 case MSR_IA32_SYSENTER_EIP:
2872 val = env->sysenter_eip;
2874 case MSR_IA32_APICBASE:
2875 val = cpu_get_apic_base(env);
2886 case MSR_VM_HSAVE_PA:
2887 val = env->vm_hsave;
2889 #ifdef TARGET_X86_64
2900 val = env->segs[R_FS].base;
2903 val = env->segs[R_GS].base;
2905 case MSR_KERNELGSBASE:
2906 val = env->kernelgsbase;
2910 /* XXX: exception ? */
2914 EAX = (uint32_t)(val);
2915 EDX = (uint32_t)(val >> 32);
2919 void helper_lsl(void)
2921 unsigned int selector, limit;
2922 uint32_t e1, e2, eflags;
2923 int rpl, dpl, cpl, type;
2925 eflags = cc_table[CC_OP].compute_all();
2926 selector = T0 & 0xffff;
2927 if (load_segment(&e1, &e2, selector) != 0)
2930 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2931 cpl = env->hflags & HF_CPL_MASK;
2932 if (e2 & DESC_S_MASK) {
2933 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2936 if (dpl < cpl || dpl < rpl)
2940 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2951 if (dpl < cpl || dpl < rpl) {
2953 CC_SRC = eflags & ~CC_Z;
2957 limit = get_seg_limit(e1, e2);
2959 CC_SRC = eflags | CC_Z;
2962 void helper_lar(void)
2964 unsigned int selector;
2965 uint32_t e1, e2, eflags;
2966 int rpl, dpl, cpl, type;
2968 eflags = cc_table[CC_OP].compute_all();
2969 selector = T0 & 0xffff;
2970 if ((selector & 0xfffc) == 0)
2972 if (load_segment(&e1, &e2, selector) != 0)
2975 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2976 cpl = env->hflags & HF_CPL_MASK;
2977 if (e2 & DESC_S_MASK) {
2978 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2981 if (dpl < cpl || dpl < rpl)
2985 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2999 if (dpl < cpl || dpl < rpl) {
3001 CC_SRC = eflags & ~CC_Z;
3005 T1 = e2 & 0x00f0ff00;
3006 CC_SRC = eflags | CC_Z;
3009 void helper_verr(void)
3011 unsigned int selector;
3012 uint32_t e1, e2, eflags;
3015 eflags = cc_table[CC_OP].compute_all();
3016 selector = T0 & 0xffff;
3017 if ((selector & 0xfffc) == 0)
3019 if (load_segment(&e1, &e2, selector) != 0)
3021 if (!(e2 & DESC_S_MASK))
3024 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3025 cpl = env->hflags & HF_CPL_MASK;
3026 if (e2 & DESC_CS_MASK) {
3027 if (!(e2 & DESC_R_MASK))
3029 if (!(e2 & DESC_C_MASK)) {
3030 if (dpl < cpl || dpl < rpl)
3034 if (dpl < cpl || dpl < rpl) {
3036 CC_SRC = eflags & ~CC_Z;
3040 CC_SRC = eflags | CC_Z;
3043 void helper_verw(void)
3045 unsigned int selector;
3046 uint32_t e1, e2, eflags;
3049 eflags = cc_table[CC_OP].compute_all();
3050 selector = T0 & 0xffff;
3051 if ((selector & 0xfffc) == 0)
3053 if (load_segment(&e1, &e2, selector) != 0)
3055 if (!(e2 & DESC_S_MASK))
3058 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3059 cpl = env->hflags & HF_CPL_MASK;
3060 if (e2 & DESC_CS_MASK) {
3063 if (dpl < cpl || dpl < rpl)
3065 if (!(e2 & DESC_W_MASK)) {
3067 CC_SRC = eflags & ~CC_Z;
3071 CC_SRC = eflags | CC_Z;
3076 void helper_fldt_ST0_A0(void)
3079 new_fpstt = (env->fpstt - 1) & 7;
3080 env->fpregs[new_fpstt].d = helper_fldt(A0);
3081 env->fpstt = new_fpstt;
3082 env->fptags[new_fpstt] = 0; /* validate stack entry */
3085 void helper_fstt_ST0_A0(void)
3087 helper_fstt(ST0, A0);
3090 static void fpu_set_exception(int mask)
3093 if (env->fpus & (~env->fpuc & FPUC_EM))
3094 env->fpus |= FPUS_SE | FPUS_B;
3097 CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3100 fpu_set_exception(FPUS_ZE);
3104 void fpu_raise_exception(void)
3106 if (env->cr[0] & CR0_NE_MASK) {
3107 raise_exception(EXCP10_COPR);
3109 #if !defined(CONFIG_USER_ONLY)
3118 void helper_fbld_ST0_A0(void)
3126 for(i = 8; i >= 0; i--) {
3128 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3131 if (ldub(A0 + 9) & 0x80)
3137 void helper_fbst_ST0_A0(void)
3140 target_ulong mem_ref, mem_end;
3143 val = floatx_to_int64(ST0, &env->fp_status);
3145 mem_end = mem_ref + 9;
3152 while (mem_ref < mem_end) {
3157 v = ((v / 10) << 4) | (v % 10);
3160 while (mem_ref < mem_end) {
3165 void helper_f2xm1(void)
3167 ST0 = pow(2.0,ST0) - 1.0;
3170 void helper_fyl2x(void)
3172 CPU86_LDouble fptemp;
3176 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3180 env->fpus &= (~0x4700);
3185 void helper_fptan(void)
3187 CPU86_LDouble fptemp;
3190 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3196 env->fpus &= (~0x400); /* C2 <-- 0 */
3197 /* the above code is for |arg| < 2**52 only */
3201 void helper_fpatan(void)
3203 CPU86_LDouble fptemp, fpsrcop;
3207 ST1 = atan2(fpsrcop,fptemp);
3211 void helper_fxtract(void)
3213 CPU86_LDoubleU temp;
3214 unsigned int expdif;
3217 expdif = EXPD(temp) - EXPBIAS;
3218 /*DP exponent bias*/
3225 void helper_fprem1(void)
3227 CPU86_LDouble dblq, fpsrcop, fptemp;
3228 CPU86_LDoubleU fpsrcop1, fptemp1;
3230 signed long long int q;
3232 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3233 ST0 = 0.0 / 0.0; /* NaN */
3234 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3240 fpsrcop1.d = fpsrcop;
3242 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3245 /* optimisation? taken from the AMD docs */
3246 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3247 /* ST0 is unchanged */
3252 dblq = fpsrcop / fptemp;
3253 /* round dblq towards nearest integer */
3255 ST0 = fpsrcop - fptemp * dblq;
3257 /* convert dblq to q by truncating towards zero */
3259 q = (signed long long int)(-dblq);
3261 q = (signed long long int)dblq;
3263 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3264 /* (C0,C3,C1) <-- (q2,q1,q0) */
3265 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3266 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3267 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3269 env->fpus |= 0x400; /* C2 <-- 1 */
3270 fptemp = pow(2.0, expdif - 50);
3271 fpsrcop = (ST0 / ST1) / fptemp;
3272 /* fpsrcop = integer obtained by chopping */
3273 fpsrcop = (fpsrcop < 0.0) ?
3274 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3275 ST0 -= (ST1 * fpsrcop * fptemp);
3279 void helper_fprem(void)
3281 CPU86_LDouble dblq, fpsrcop, fptemp;
3282 CPU86_LDoubleU fpsrcop1, fptemp1;
3284 signed long long int q;
3286 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3287 ST0 = 0.0 / 0.0; /* NaN */
3288 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3292 fpsrcop = (CPU86_LDouble)ST0;
3293 fptemp = (CPU86_LDouble)ST1;
3294 fpsrcop1.d = fpsrcop;
3296 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3299 /* optimisation? taken from the AMD docs */
3300 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3301 /* ST0 is unchanged */
3305 if ( expdif < 53 ) {
3306 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3307 /* round dblq towards zero */
3308 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3309 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3311 /* convert dblq to q by truncating towards zero */
3313 q = (signed long long int)(-dblq);
3315 q = (signed long long int)dblq;
3317 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3318 /* (C0,C3,C1) <-- (q2,q1,q0) */
3319 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3320 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3321 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3323 int N = 32 + (expdif % 32); /* as per AMD docs */
3324 env->fpus |= 0x400; /* C2 <-- 1 */
3325 fptemp = pow(2.0, (double)(expdif - N));
3326 fpsrcop = (ST0 / ST1) / fptemp;
3327 /* fpsrcop = integer obtained by chopping */
3328 fpsrcop = (fpsrcop < 0.0) ?
3329 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3330 ST0 -= (ST1 * fpsrcop * fptemp);
3334 void helper_fyl2xp1(void)
3336 CPU86_LDouble fptemp;
3339 if ((fptemp+1.0)>0.0) {
3340 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
3344 env->fpus &= (~0x4700);
3349 void helper_fsqrt(void)
3351 CPU86_LDouble fptemp;
3355 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3361 void helper_fsincos(void)
3363 CPU86_LDouble fptemp;
3366 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3372 env->fpus &= (~0x400); /* C2 <-- 0 */
3373 /* the above code is for |arg| < 2**63 only */
3377 void helper_frndint(void)
3379 ST0 = floatx_round_to_int(ST0, &env->fp_status);
3382 void helper_fscale(void)
3384 ST0 = ldexp (ST0, (int)(ST1));
3387 void helper_fsin(void)
3389 CPU86_LDouble fptemp;
3392 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3396 env->fpus &= (~0x400); /* C2 <-- 0 */
3397 /* the above code is for |arg| < 2**53 only */
3401 void helper_fcos(void)
3403 CPU86_LDouble fptemp;
3406 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3410 env->fpus &= (~0x400); /* C2 <-- 0 */
3411 /* the above code is for |arg5 < 2**63 only */
3415 void helper_fxam_ST0(void)
3417 CPU86_LDoubleU temp;
3422 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3424 env->fpus |= 0x200; /* C1 <-- 1 */
3426 /* XXX: test fptags too */
3427 expdif = EXPD(temp);
3428 if (expdif == MAXEXPD) {
3429 #ifdef USE_X86LDOUBLE
3430 if (MANTD(temp) == 0x8000000000000000ULL)
3432 if (MANTD(temp) == 0)
3434 env->fpus |= 0x500 /*Infinity*/;
3436 env->fpus |= 0x100 /*NaN*/;
3437 } else if (expdif == 0) {
3438 if (MANTD(temp) == 0)
3439 env->fpus |= 0x4000 /*Zero*/;
3441 env->fpus |= 0x4400 /*Denormal*/;
3447 void helper_fstenv(target_ulong ptr, int data32)
3449 int fpus, fptag, exp, i;
3453 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3455 for (i=7; i>=0; i--) {
3457 if (env->fptags[i]) {
3460 tmp.d = env->fpregs[i].d;
3463 if (exp == 0 && mant == 0) {
3466 } else if (exp == 0 || exp == MAXEXPD
3467 #ifdef USE_X86LDOUBLE
3468 || (mant & (1LL << 63)) == 0
3471 /* NaNs, infinity, denormal */
3478 stl(ptr, env->fpuc);
3480 stl(ptr + 8, fptag);
3481 stl(ptr + 12, 0); /* fpip */
3482 stl(ptr + 16, 0); /* fpcs */
3483 stl(ptr + 20, 0); /* fpoo */
3484 stl(ptr + 24, 0); /* fpos */
3487 stw(ptr, env->fpuc);
3489 stw(ptr + 4, fptag);
3497 void helper_fldenv(target_ulong ptr, int data32)
3502 env->fpuc = lduw(ptr);
3503 fpus = lduw(ptr + 4);
3504 fptag = lduw(ptr + 8);
3507 env->fpuc = lduw(ptr);
3508 fpus = lduw(ptr + 2);
3509 fptag = lduw(ptr + 4);
3511 env->fpstt = (fpus >> 11) & 7;
3512 env->fpus = fpus & ~0x3800;
3513 for(i = 0;i < 8; i++) {
3514 env->fptags[i] = ((fptag & 3) == 3);
3519 void helper_fsave(target_ulong ptr, int data32)
3524 helper_fstenv(ptr, data32);
3526 ptr += (14 << data32);
3527 for(i = 0;i < 8; i++) {
3529 helper_fstt(tmp, ptr);
3547 void helper_frstor(target_ulong ptr, int data32)
3552 helper_fldenv(ptr, data32);
3553 ptr += (14 << data32);
3555 for(i = 0;i < 8; i++) {
3556 tmp = helper_fldt(ptr);
3562 void helper_fxsave(target_ulong ptr, int data64)
3564 int fpus, fptag, i, nb_xmm_regs;
3568 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3570 for(i = 0; i < 8; i++) {
3571 fptag |= (env->fptags[i] << i);
3573 stw(ptr, env->fpuc);
3575 stw(ptr + 4, fptag ^ 0xff);
3578 for(i = 0;i < 8; i++) {
3580 helper_fstt(tmp, addr);
3584 if (env->cr[4] & CR4_OSFXSR_MASK) {
3585 /* XXX: finish it */
3586 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
3587 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
3588 nb_xmm_regs = 8 << data64;
3590 for(i = 0; i < nb_xmm_regs; i++) {
3591 stq(addr, env->xmm_regs[i].XMM_Q(0));
3592 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
3598 void helper_fxrstor(target_ulong ptr, int data64)
3600 int i, fpus, fptag, nb_xmm_regs;
3604 env->fpuc = lduw(ptr);
3605 fpus = lduw(ptr + 2);
3606 fptag = lduw(ptr + 4);
3607 env->fpstt = (fpus >> 11) & 7;
3608 env->fpus = fpus & ~0x3800;
3610 for(i = 0;i < 8; i++) {
3611 env->fptags[i] = ((fptag >> i) & 1);
3615 for(i = 0;i < 8; i++) {
3616 tmp = helper_fldt(addr);
3621 if (env->cr[4] & CR4_OSFXSR_MASK) {
3622 /* XXX: finish it */
3623 env->mxcsr = ldl(ptr + 0x18);
3625 nb_xmm_regs = 8 << data64;
3627 for(i = 0; i < nb_xmm_regs; i++) {
3628 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
3629 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
3635 #ifndef USE_X86LDOUBLE
3637 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3639 CPU86_LDoubleU temp;
3644 *pmant = (MANTD(temp) << 11) | (1LL << 63);
3645 /* exponent + sign */
3646 e = EXPD(temp) - EXPBIAS + 16383;
3647 e |= SIGND(temp) >> 16;
3651 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3653 CPU86_LDoubleU temp;
3657 /* XXX: handle overflow ? */
3658 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
3659 e |= (upper >> 4) & 0x800; /* sign */
3660 ll = (mant >> 11) & ((1LL << 52) - 1);
3662 temp.l.upper = (e << 20) | (ll >> 32);
3665 temp.ll = ll | ((uint64_t)e << 52);
3672 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
3674 CPU86_LDoubleU temp;
3677 *pmant = temp.l.lower;
3678 *pexp = temp.l.upper;
3681 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
3683 CPU86_LDoubleU temp;
3685 temp.l.upper = upper;
3686 temp.l.lower = mant;
3691 #ifdef TARGET_X86_64
3693 //#define DEBUG_MULDIV
3695 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
3704 static void neg128(uint64_t *plow, uint64_t *phigh)
3708 add128(plow, phigh, 1, 0);
3711 /* return TRUE if overflow */
3712 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
3714 uint64_t q, r, a1, a0;
3727 /* XXX: use a better algorithm */
3728 for(i = 0; i < 64; i++) {
3730 a1 = (a1 << 1) | (a0 >> 63);
3731 if (ab || a1 >= b) {
3737 a0 = (a0 << 1) | qb;
3739 #if defined(DEBUG_MULDIV)
3740 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
3741 *phigh, *plow, b, a0, a1);
3749 /* return TRUE if overflow */
3750 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
3753 sa = ((int64_t)*phigh < 0);
3755 neg128(plow, phigh);
3759 if (div64(plow, phigh, b) != 0)
3762 if (*plow > (1ULL << 63))
3766 if (*plow >= (1ULL << 63))
3774 void helper_mulq_EAX_T0(void)
3778 mulu64(&r0, &r1, EAX, T0);
3785 void helper_imulq_EAX_T0(void)
3789 muls64(&r0, &r1, EAX, T0);
3793 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3796 void helper_imulq_T0_T1(void)
3800 muls64(&r0, &r1, T0, T1);
3803 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
3806 void helper_divq_EAX_T0(void)
3810 raise_exception(EXCP00_DIVZ);
3814 if (div64(&r0, &r1, T0))
3815 raise_exception(EXCP00_DIVZ);
3820 void helper_idivq_EAX_T0(void)
3824 raise_exception(EXCP00_DIVZ);
3828 if (idiv64(&r0, &r1, T0))
3829 raise_exception(EXCP00_DIVZ);
3834 void helper_bswapq_T0(void)
3840 void helper_hlt(void)
3842 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
3843 env->hflags |= HF_HALTED_MASK;
3844 env->exception_index = EXCP_HLT;
3848 void helper_monitor(void)
3850 if ((uint32_t)ECX != 0)
3851 raise_exception(EXCP0D_GPF);
3852 /* XXX: store address ? */
3855 void helper_mwait(void)
3857 if ((uint32_t)ECX != 0)
3858 raise_exception(EXCP0D_GPF);
3859 /* XXX: not complete but not completely erroneous */
3860 if (env->cpu_index != 0 || env->next_cpu != NULL) {
3861 /* more than one CPU: do not sleep because another CPU may
3868 float approx_rsqrt(float a)
3870 return 1.0 / sqrt(a);
3873 float approx_rcp(float a)
3878 void update_fp_status(void)
3882 /* set rounding mode */
3883 switch(env->fpuc & RC_MASK) {
3886 rnd_type = float_round_nearest_even;
3889 rnd_type = float_round_down;
3892 rnd_type = float_round_up;
3895 rnd_type = float_round_to_zero;
3898 set_float_rounding_mode(rnd_type, &env->fp_status);
3900 switch((env->fpuc >> 8) & 3) {
3912 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3916 #if !defined(CONFIG_USER_ONLY)
3918 #define MMUSUFFIX _mmu
3920 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
3922 # define GETPC() (__builtin_return_address(0))
3926 #include "softmmu_template.h"
3929 #include "softmmu_template.h"
3932 #include "softmmu_template.h"
3935 #include "softmmu_template.h"
3939 /* try to fill the TLB and return an exception if error. If retaddr is
3940 NULL, it means that the function was called in C code (i.e. not
3941 from generated code or from helper.c) */
3942 /* XXX: fix it to restore all registers */
3943 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
3945 TranslationBlock *tb;
3948 CPUX86State *saved_env;
3950 /* XXX: hack to restore env in all cases, even if not called from
3953 env = cpu_single_env;
3955 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
3958 /* now we have a real cpu fault */
3959 pc = (unsigned long)retaddr;
3960 tb = tb_find_pc(pc);
3962 /* the PC is inside the translated code. It means that we have
3963 a virtual CPU fault */
3964 cpu_restore_state(tb, env, pc, NULL);
3968 raise_exception_err(env->exception_index, env->error_code);
3970 raise_exception_err_norestore(env->exception_index, env->error_code);
3976 /* Secure Virtual Machine helpers */
3978 void helper_stgi(void)
3980 env->hflags |= HF_GIF_MASK;
3983 void helper_clgi(void)
3985 env->hflags &= ~HF_GIF_MASK;
3988 #if defined(CONFIG_USER_ONLY)
3990 void helper_vmrun(target_ulong addr) { }
3991 void helper_vmmcall(void) { }
3992 void helper_vmload(target_ulong addr) { }
3993 void helper_vmsave(target_ulong addr) { }
3994 void helper_skinit(void) { }
3995 void helper_invlpga(void) { }
3996 void vmexit(uint64_t exit_code, uint64_t exit_info_1) { }
3997 int svm_check_intercept_param(uint32_t type, uint64_t param)
4004 static inline uint32_t
4005 vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4007 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
4008 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
4009 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
4010 | (vmcb_base & 0xff000000) /* Base 31-24 */
4011 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
4014 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4016 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
4017 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4020 void helper_vmrun(target_ulong addr)
4025 if (loglevel & CPU_LOG_TB_IN_ASM)
4026 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4028 env->vm_vmcb = addr;
4031 /* save the current CPU state in the hsave page */
4032 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4033 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4035 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4036 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4038 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4039 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4040 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4041 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4042 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4043 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4044 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4046 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4047 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4049 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4050 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4051 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4052 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4054 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4055 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4056 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4058 /* load the interception bitmaps so we do not need to access the
4060 /* We shift all the intercept bits so we can OR them with the TB
4062 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4063 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4064 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4065 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4066 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4067 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4069 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4070 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4072 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4073 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4075 /* clear exit_info_2 so we behave like the real hardware */
4076 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4078 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4079 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4080 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4081 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4082 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4083 if (int_ctl & V_INTR_MASKING_MASK) {
4084 env->cr[8] = int_ctl & V_TPR_MASK;
4085 cpu_set_apic_tpr(env, env->cr[8]);
4086 if (env->eflags & IF_MASK)
4087 env->hflags |= HF_HIF_MASK;
4090 #ifdef TARGET_X86_64
4091 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4092 env->hflags &= ~HF_LMA_MASK;
4093 if (env->efer & MSR_EFER_LMA)
4094 env->hflags |= HF_LMA_MASK;
4097 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4098 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4099 CC_OP = CC_OP_EFLAGS;
4100 CC_DST = 0xffffffff;
4102 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4103 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4104 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4105 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4107 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4109 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4110 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4111 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4112 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4113 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4115 /* FIXME: guest state consistency checks */
4117 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4118 case TLB_CONTROL_DO_NOTHING:
4120 case TLB_CONTROL_FLUSH_ALL_ASID:
4121 /* FIXME: this is not 100% correct but should work for now */
4130 /* maybe we need to inject an event */
4131 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4132 if (event_inj & SVM_EVTINJ_VALID) {
4133 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4134 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4135 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4136 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4138 if (loglevel & CPU_LOG_TB_IN_ASM)
4139 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4140 /* FIXME: need to implement valid_err */
4141 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4142 case SVM_EVTINJ_TYPE_INTR:
4143 env->exception_index = vector;
4144 env->error_code = event_inj_err;
4145 env->exception_is_int = 0;
4146 env->exception_next_eip = -1;
4147 if (loglevel & CPU_LOG_TB_IN_ASM)
4148 fprintf(logfile, "INTR");
4150 case SVM_EVTINJ_TYPE_NMI:
4151 env->exception_index = vector;
4152 env->error_code = event_inj_err;
4153 env->exception_is_int = 0;
4154 env->exception_next_eip = EIP;
4155 if (loglevel & CPU_LOG_TB_IN_ASM)
4156 fprintf(logfile, "NMI");
4158 case SVM_EVTINJ_TYPE_EXEPT:
4159 env->exception_index = vector;
4160 env->error_code = event_inj_err;
4161 env->exception_is_int = 0;
4162 env->exception_next_eip = -1;
4163 if (loglevel & CPU_LOG_TB_IN_ASM)
4164 fprintf(logfile, "EXEPT");
4166 case SVM_EVTINJ_TYPE_SOFT:
4167 env->exception_index = vector;
4168 env->error_code = event_inj_err;
4169 env->exception_is_int = 1;
4170 env->exception_next_eip = EIP;
4171 if (loglevel & CPU_LOG_TB_IN_ASM)
4172 fprintf(logfile, "SOFT");
4175 if (loglevel & CPU_LOG_TB_IN_ASM)
4176 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4178 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4179 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4185 void helper_vmmcall(void)
4187 if (loglevel & CPU_LOG_TB_IN_ASM)
4188 fprintf(logfile,"vmmcall!\n");
4191 void helper_vmload(target_ulong addr)
4193 if (loglevel & CPU_LOG_TB_IN_ASM)
4194 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4195 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4196 env->segs[R_FS].base);
4198 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4199 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4200 SVM_LOAD_SEG2(addr, tr, tr);
4201 SVM_LOAD_SEG2(addr, ldt, ldtr);
4203 #ifdef TARGET_X86_64
4204 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4205 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4206 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4207 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4209 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4210 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4211 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4212 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4215 void helper_vmsave(target_ulong addr)
4217 if (loglevel & CPU_LOG_TB_IN_ASM)
4218 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4219 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4220 env->segs[R_FS].base);
4222 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4223 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4224 SVM_SAVE_SEG(addr, tr, tr);
4225 SVM_SAVE_SEG(addr, ldt, ldtr);
4227 #ifdef TARGET_X86_64
4228 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4229 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4230 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4231 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4233 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4234 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4235 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4236 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4239 void helper_skinit(void)
4241 if (loglevel & CPU_LOG_TB_IN_ASM)
4242 fprintf(logfile,"skinit!\n");
4245 void helper_invlpga(void)
4250 int svm_check_intercept_param(uint32_t type, uint64_t param)
4253 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4254 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4255 vmexit(type, param);
4259 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4260 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4261 vmexit(type, param);
4265 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4266 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4267 vmexit(type, param);
4271 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4272 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4273 vmexit(type, param);
4277 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
4278 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
4279 vmexit(type, param);
4284 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
4285 /* FIXME: this should be read in at vmrun (faster this way?) */
4286 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
4287 uint16_t port = (uint16_t) (param >> 16);
4289 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
4290 if(lduw_phys(addr + port / 8) & (mask << (port & 7)))
4291 vmexit(type, param);
4296 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
4297 /* FIXME: this should be read in at vmrun (faster this way?) */
4298 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
4299 switch((uint32_t)ECX) {
4304 case 0xc0000000 ... 0xc0001fff:
4305 T0 = (8192 + ECX - 0xc0000000) * 2;
4309 case 0xc0010000 ... 0xc0011fff:
4310 T0 = (16384 + ECX - 0xc0010000) * 2;
4315 vmexit(type, param);
4318 if (ldub_phys(addr + T1) & ((1 << param) << T0))
4319 vmexit(type, param);
4324 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
4325 vmexit(type, param);
4333 void vmexit(uint64_t exit_code, uint64_t exit_info_1)
4337 if (loglevel & CPU_LOG_TB_IN_ASM)
4338 fprintf(logfile,"vmexit(%016" PRIx64 ", %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
4339 exit_code, exit_info_1,
4340 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
4343 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
4344 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
4345 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4347 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
4350 /* Save the VM state in the vmcb */
4351 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
4352 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
4353 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
4354 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
4356 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4357 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4359 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4360 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4362 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
4363 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
4364 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
4365 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
4366 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
4368 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
4369 int_ctl &= ~V_TPR_MASK;
4370 int_ctl |= env->cr[8] & V_TPR_MASK;
4371 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
4374 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
4375 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
4376 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
4377 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
4378 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
4379 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
4380 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
4382 /* Reload the host state from vm_hsave */
4383 env->hflags &= ~HF_HIF_MASK;
4385 env->intercept_exceptions = 0;
4386 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
4388 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
4389 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
4391 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
4392 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
4394 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
4395 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
4396 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
4397 if (int_ctl & V_INTR_MASKING_MASK) {
4398 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
4399 cpu_set_apic_tpr(env, env->cr[8]);
4401 /* we need to set the efer after the crs so the hidden flags get set properly */
4402 #ifdef TARGET_X86_64
4403 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
4404 env->hflags &= ~HF_LMA_MASK;
4405 if (env->efer & MSR_EFER_LMA)
4406 env->hflags |= HF_LMA_MASK;
4410 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
4411 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4412 CC_OP = CC_OP_EFLAGS;
4414 SVM_LOAD_SEG(env->vm_hsave, ES, es);
4415 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
4416 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
4417 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
4419 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
4420 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
4421 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
4423 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
4424 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
4427 cpu_x86_set_cpl(env, 0);
4428 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code_hi), (uint32_t)(exit_code >> 32));
4429 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
4430 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
4433 /* FIXME: Resets the current ASID register to zero (host ASID). */
4435 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
4437 /* Clears the TSC_OFFSET inside the processor. */
4439 /* If the host is in PAE mode, the processor reloads the host's PDPEs
4440 from the page table indicated the host's CR3. If the PDPEs contain
4441 illegal state, the processor causes a shutdown. */
4443 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
4444 env->cr[0] |= CR0_PE_MASK;
4445 env->eflags &= ~VM_MASK;
4447 /* Disables all breakpoints in the host DR7 register. */
4449 /* Checks the reloaded host state for consistency. */
4451 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
4452 host's code segment or non-canonical (in the case of long mode), a
4453 #GP fault is delivered inside the host.) */
4455 /* remove any pending exception */
4456 env->exception_index = -1;
4457 env->error_code = 0;
4458 env->old_exception = -1;