4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #define CPU_NO_GLOBAL_REGS
22 #include "host-utils.h"
27 #define raise_exception_err(a, b)\
30 fprintf(logfile, "raise_exception line=%d\n", __LINE__);\
31 (raise_exception_err)(a, b);\
35 const uint8_t parity_table[256] = {
36 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
37 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
40 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
41 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
52 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
53 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
56 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
57 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
58 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
59 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
60 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
61 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
62 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
63 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
64 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
65 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
66 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
67 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
71 const uint8_t rclw_table[32] = {
72 0, 1, 2, 3, 4, 5, 6, 7,
73 8, 9,10,11,12,13,14,15,
74 16, 0, 1, 2, 3, 4, 5, 6,
75 7, 8, 9,10,11,12,13,14,
79 const uint8_t rclb_table[32] = {
80 0, 1, 2, 3, 4, 5, 6, 7,
81 8, 0, 1, 2, 3, 4, 5, 6,
82 7, 8, 0, 1, 2, 3, 4, 5,
83 6, 7, 8, 0, 1, 2, 3, 4,
86 const CPU86_LDouble f15rk[7] =
88 0.00000000000000000000L,
89 1.00000000000000000000L,
90 3.14159265358979323851L, /*pi*/
91 0.30102999566398119523L, /*lg2*/
92 0.69314718055994530943L, /*ln2*/
93 1.44269504088896340739L, /*l2e*/
94 3.32192809488736234781L, /*l2t*/
97 /* broken thread support */
99 spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
101 void helper_lock(void)
103 spin_lock(&global_cpu_lock);
106 void helper_unlock(void)
108 spin_unlock(&global_cpu_lock);
111 /* return non zero if error */
112 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
123 index = selector & ~7;
124 if ((index + 7) > dt->limit)
126 ptr = dt->base + index;
127 *e1_ptr = ldl_kernel(ptr);
128 *e2_ptr = ldl_kernel(ptr + 4);
132 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
135 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
136 if (e2 & DESC_G_MASK)
137 limit = (limit << 12) | 0xfff;
141 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
143 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
146 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
148 sc->base = get_seg_base(e1, e2);
149 sc->limit = get_seg_limit(e1, e2);
153 /* init the segment cache in vm86 mode. */
154 static inline void load_seg_vm(int seg, int selector)
157 cpu_x86_load_seg_cache(env, seg, selector,
158 (selector << 4), 0xffff, 0);
161 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
162 uint32_t *esp_ptr, int dpl)
164 int type, index, shift;
169 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
170 for(i=0;i<env->tr.limit;i++) {
171 printf("%02x ", env->tr.base[i]);
172 if ((i & 7) == 7) printf("\n");
178 if (!(env->tr.flags & DESC_P_MASK))
179 cpu_abort(env, "invalid tss");
180 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
182 cpu_abort(env, "invalid tss type");
184 index = (dpl * 4 + 2) << shift;
185 if (index + (4 << shift) - 1 > env->tr.limit)
186 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
188 *esp_ptr = lduw_kernel(env->tr.base + index);
189 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
191 *esp_ptr = ldl_kernel(env->tr.base + index);
192 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
196 /* XXX: merge with load_seg() */
197 static void tss_load_seg(int seg_reg, int selector)
202 if ((selector & 0xfffc) != 0) {
203 if (load_segment(&e1, &e2, selector) != 0)
204 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
205 if (!(e2 & DESC_S_MASK))
206 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
208 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
209 cpl = env->hflags & HF_CPL_MASK;
210 if (seg_reg == R_CS) {
211 if (!(e2 & DESC_CS_MASK))
212 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
213 /* XXX: is it correct ? */
215 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
216 if ((e2 & DESC_C_MASK) && dpl > rpl)
217 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
218 } else if (seg_reg == R_SS) {
219 /* SS must be writable data */
220 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
221 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
222 if (dpl != cpl || dpl != rpl)
223 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
225 /* not readable code */
226 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
227 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
228 /* if data or non conforming code, checks the rights */
229 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
230 if (dpl < cpl || dpl < rpl)
231 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
234 if (!(e2 & DESC_P_MASK))
235 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
236 cpu_x86_load_seg_cache(env, seg_reg, selector,
237 get_seg_base(e1, e2),
238 get_seg_limit(e1, e2),
241 if (seg_reg == R_SS || seg_reg == R_CS)
242 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
246 #define SWITCH_TSS_JMP 0
247 #define SWITCH_TSS_IRET 1
248 #define SWITCH_TSS_CALL 2
250 /* XXX: restore CPU state in registers (PowerPC case) */
251 static void switch_tss(int tss_selector,
252 uint32_t e1, uint32_t e2, int source,
255 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
256 target_ulong tss_base;
257 uint32_t new_regs[8], new_segs[6];
258 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
259 uint32_t old_eflags, eflags_mask;
264 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
266 if (loglevel & CPU_LOG_PCALL)
267 fprintf(logfile, "switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
270 /* if task gate, we read the TSS segment and we load it */
272 if (!(e2 & DESC_P_MASK))
273 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
274 tss_selector = e1 >> 16;
275 if (tss_selector & 4)
276 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
277 if (load_segment(&e1, &e2, tss_selector) != 0)
278 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
279 if (e2 & DESC_S_MASK)
280 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
281 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
283 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
286 if (!(e2 & DESC_P_MASK))
287 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
293 tss_limit = get_seg_limit(e1, e2);
294 tss_base = get_seg_base(e1, e2);
295 if ((tss_selector & 4) != 0 ||
296 tss_limit < tss_limit_max)
297 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
298 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
300 old_tss_limit_max = 103;
302 old_tss_limit_max = 43;
304 /* read all the registers from the new TSS */
307 new_cr3 = ldl_kernel(tss_base + 0x1c);
308 new_eip = ldl_kernel(tss_base + 0x20);
309 new_eflags = ldl_kernel(tss_base + 0x24);
310 for(i = 0; i < 8; i++)
311 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
312 for(i = 0; i < 6; i++)
313 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
314 new_ldt = lduw_kernel(tss_base + 0x60);
315 new_trap = ldl_kernel(tss_base + 0x64);
319 new_eip = lduw_kernel(tss_base + 0x0e);
320 new_eflags = lduw_kernel(tss_base + 0x10);
321 for(i = 0; i < 8; i++)
322 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
323 for(i = 0; i < 4; i++)
324 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
325 new_ldt = lduw_kernel(tss_base + 0x2a);
331 /* NOTE: we must avoid memory exceptions during the task switch,
332 so we make dummy accesses before */
333 /* XXX: it can still fail in some cases, so a bigger hack is
334 necessary to valid the TLB after having done the accesses */
336 v1 = ldub_kernel(env->tr.base);
337 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
338 stb_kernel(env->tr.base, v1);
339 stb_kernel(env->tr.base + old_tss_limit_max, v2);
341 /* clear busy bit (it is restartable) */
342 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
345 ptr = env->gdt.base + (env->tr.selector & ~7);
346 e2 = ldl_kernel(ptr + 4);
347 e2 &= ~DESC_TSS_BUSY_MASK;
348 stl_kernel(ptr + 4, e2);
350 old_eflags = compute_eflags();
351 if (source == SWITCH_TSS_IRET)
352 old_eflags &= ~NT_MASK;
354 /* save the current state in the old TSS */
357 stl_kernel(env->tr.base + 0x20, next_eip);
358 stl_kernel(env->tr.base + 0x24, old_eflags);
359 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
360 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
361 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
362 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
363 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
364 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
365 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
366 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
367 for(i = 0; i < 6; i++)
368 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
371 stw_kernel(env->tr.base + 0x0e, next_eip);
372 stw_kernel(env->tr.base + 0x10, old_eflags);
373 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
374 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
375 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
376 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
377 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
378 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
379 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
380 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
381 for(i = 0; i < 4; i++)
382 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
385 /* now if an exception occurs, it will occurs in the next task
388 if (source == SWITCH_TSS_CALL) {
389 stw_kernel(tss_base, env->tr.selector);
390 new_eflags |= NT_MASK;
394 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
397 ptr = env->gdt.base + (tss_selector & ~7);
398 e2 = ldl_kernel(ptr + 4);
399 e2 |= DESC_TSS_BUSY_MASK;
400 stl_kernel(ptr + 4, e2);
403 /* set the new CPU state */
404 /* from this point, any exception which occurs can give problems */
405 env->cr[0] |= CR0_TS_MASK;
406 env->hflags |= HF_TS_MASK;
407 env->tr.selector = tss_selector;
408 env->tr.base = tss_base;
409 env->tr.limit = tss_limit;
410 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
412 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
413 cpu_x86_update_cr3(env, new_cr3);
416 /* load all registers without an exception, then reload them with
417 possible exception */
419 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
420 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
422 eflags_mask &= 0xffff;
423 load_eflags(new_eflags, eflags_mask);
424 /* XXX: what to do in 16 bit case ? */
433 if (new_eflags & VM_MASK) {
434 for(i = 0; i < 6; i++)
435 load_seg_vm(i, new_segs[i]);
436 /* in vm86, CPL is always 3 */
437 cpu_x86_set_cpl(env, 3);
439 /* CPL is set the RPL of CS */
440 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
441 /* first just selectors as the rest may trigger exceptions */
442 for(i = 0; i < 6; i++)
443 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
446 env->ldt.selector = new_ldt & ~4;
453 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
455 if ((new_ldt & 0xfffc) != 0) {
457 index = new_ldt & ~7;
458 if ((index + 7) > dt->limit)
459 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
460 ptr = dt->base + index;
461 e1 = ldl_kernel(ptr);
462 e2 = ldl_kernel(ptr + 4);
463 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
464 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
465 if (!(e2 & DESC_P_MASK))
466 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
467 load_seg_cache_raw_dt(&env->ldt, e1, e2);
470 /* load the segments */
471 if (!(new_eflags & VM_MASK)) {
472 tss_load_seg(R_CS, new_segs[R_CS]);
473 tss_load_seg(R_SS, new_segs[R_SS]);
474 tss_load_seg(R_ES, new_segs[R_ES]);
475 tss_load_seg(R_DS, new_segs[R_DS]);
476 tss_load_seg(R_FS, new_segs[R_FS]);
477 tss_load_seg(R_GS, new_segs[R_GS]);
480 /* check that EIP is in the CS segment limits */
481 if (new_eip > env->segs[R_CS].limit) {
482 /* XXX: different exception if CALL ? */
483 raise_exception_err(EXCP0D_GPF, 0);
487 /* check if Port I/O is allowed in TSS */
488 static inline void check_io(int addr, int size)
490 int io_offset, val, mask;
492 /* TSS must be a valid 32 bit one */
493 if (!(env->tr.flags & DESC_P_MASK) ||
494 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
497 io_offset = lduw_kernel(env->tr.base + 0x66);
498 io_offset += (addr >> 3);
499 /* Note: the check needs two bytes */
500 if ((io_offset + 1) > env->tr.limit)
502 val = lduw_kernel(env->tr.base + io_offset);
504 mask = (1 << size) - 1;
505 /* all bits must be zero to allow the I/O */
506 if ((val & mask) != 0) {
508 raise_exception_err(EXCP0D_GPF, 0);
512 void helper_check_iob(uint32_t t0)
517 void helper_check_iow(uint32_t t0)
522 void helper_check_iol(uint32_t t0)
527 void helper_outb(uint32_t port, uint32_t data)
529 cpu_outb(env, port, data & 0xff);
532 target_ulong helper_inb(uint32_t port)
534 return cpu_inb(env, port);
537 void helper_outw(uint32_t port, uint32_t data)
539 cpu_outw(env, port, data & 0xffff);
542 target_ulong helper_inw(uint32_t port)
544 return cpu_inw(env, port);
547 void helper_outl(uint32_t port, uint32_t data)
549 cpu_outl(env, port, data);
552 target_ulong helper_inl(uint32_t port)
554 return cpu_inl(env, port);
557 static inline unsigned int get_sp_mask(unsigned int e2)
559 if (e2 & DESC_B_MASK)
566 #define SET_ESP(val, sp_mask)\
568 if ((sp_mask) == 0xffff)\
569 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
570 else if ((sp_mask) == 0xffffffffLL)\
571 ESP = (uint32_t)(val);\
576 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
579 /* XXX: add a is_user flag to have proper security support */
580 #define PUSHW(ssp, sp, sp_mask, val)\
583 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
586 #define PUSHL(ssp, sp, sp_mask, val)\
589 stl_kernel((ssp) + (sp & (sp_mask)), (val));\
592 #define POPW(ssp, sp, sp_mask, val)\
594 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
598 #define POPL(ssp, sp, sp_mask, val)\
600 val = (uint32_t)ldl_kernel((ssp) + (sp & (sp_mask)));\
604 /* protected mode interrupt */
605 static void do_interrupt_protected(int intno, int is_int, int error_code,
606 unsigned int next_eip, int is_hw)
609 target_ulong ptr, ssp;
610 int type, dpl, selector, ss_dpl, cpl;
611 int has_error_code, new_stack, shift;
612 uint32_t e1, e2, offset, ss, esp, ss_e1, ss_e2;
613 uint32_t old_eip, sp_mask;
614 int svm_should_check = 1;
616 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
618 svm_should_check = 0;
622 && (INTERCEPTEDl(_exceptions, 1 << intno)
624 raise_interrupt(intno, is_int, error_code, 0);
627 if (!is_int && !is_hw) {
646 if (intno * 8 + 7 > dt->limit)
647 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
648 ptr = dt->base + intno * 8;
649 e1 = ldl_kernel(ptr);
650 e2 = ldl_kernel(ptr + 4);
651 /* check gate type */
652 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
654 case 5: /* task gate */
655 /* must do that check here to return the correct error code */
656 if (!(e2 & DESC_P_MASK))
657 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
658 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
659 if (has_error_code) {
662 /* push the error code */
663 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
665 if (env->segs[R_SS].flags & DESC_B_MASK)
669 esp = (ESP - (2 << shift)) & mask;
670 ssp = env->segs[R_SS].base + esp;
672 stl_kernel(ssp, error_code);
674 stw_kernel(ssp, error_code);
678 case 6: /* 286 interrupt gate */
679 case 7: /* 286 trap gate */
680 case 14: /* 386 interrupt gate */
681 case 15: /* 386 trap gate */
684 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
687 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
688 cpl = env->hflags & HF_CPL_MASK;
689 /* check privledge if software int */
690 if (is_int && dpl < cpl)
691 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
692 /* check valid bit */
693 if (!(e2 & DESC_P_MASK))
694 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
696 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
697 if ((selector & 0xfffc) == 0)
698 raise_exception_err(EXCP0D_GPF, 0);
700 if (load_segment(&e1, &e2, selector) != 0)
701 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
702 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
703 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
704 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
706 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
707 if (!(e2 & DESC_P_MASK))
708 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
709 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
710 /* to inner privilege */
711 get_ss_esp_from_tss(&ss, &esp, dpl);
712 if ((ss & 0xfffc) == 0)
713 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
715 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
716 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
717 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
718 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
720 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
721 if (!(ss_e2 & DESC_S_MASK) ||
722 (ss_e2 & DESC_CS_MASK) ||
723 !(ss_e2 & DESC_W_MASK))
724 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
725 if (!(ss_e2 & DESC_P_MASK))
726 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
728 sp_mask = get_sp_mask(ss_e2);
729 ssp = get_seg_base(ss_e1, ss_e2);
730 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
731 /* to same privilege */
732 if (env->eflags & VM_MASK)
733 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
735 sp_mask = get_sp_mask(env->segs[R_SS].flags);
736 ssp = env->segs[R_SS].base;
740 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
741 new_stack = 0; /* avoid warning */
742 sp_mask = 0; /* avoid warning */
743 ssp = 0; /* avoid warning */
744 esp = 0; /* avoid warning */
750 /* XXX: check that enough room is available */
751 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
752 if (env->eflags & VM_MASK)
758 if (env->eflags & VM_MASK) {
759 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
760 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
761 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
762 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
764 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
765 PUSHL(ssp, esp, sp_mask, ESP);
767 PUSHL(ssp, esp, sp_mask, compute_eflags());
768 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
769 PUSHL(ssp, esp, sp_mask, old_eip);
770 if (has_error_code) {
771 PUSHL(ssp, esp, sp_mask, error_code);
775 if (env->eflags & VM_MASK) {
776 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
777 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
778 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
779 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
781 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
782 PUSHW(ssp, esp, sp_mask, ESP);
784 PUSHW(ssp, esp, sp_mask, compute_eflags());
785 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
786 PUSHW(ssp, esp, sp_mask, old_eip);
787 if (has_error_code) {
788 PUSHW(ssp, esp, sp_mask, error_code);
793 if (env->eflags & VM_MASK) {
794 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
795 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
796 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
797 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
799 ss = (ss & ~3) | dpl;
800 cpu_x86_load_seg_cache(env, R_SS, ss,
801 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
803 SET_ESP(esp, sp_mask);
805 selector = (selector & ~3) | dpl;
806 cpu_x86_load_seg_cache(env, R_CS, selector,
807 get_seg_base(e1, e2),
808 get_seg_limit(e1, e2),
810 cpu_x86_set_cpl(env, dpl);
813 /* interrupt gate clear IF mask */
814 if ((type & 1) == 0) {
815 env->eflags &= ~IF_MASK;
817 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
822 #define PUSHQ(sp, val)\
825 stq_kernel(sp, (val));\
828 #define POPQ(sp, val)\
830 val = ldq_kernel(sp);\
834 static inline target_ulong get_rsp_from_tss(int level)
839 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
840 env->tr.base, env->tr.limit);
843 if (!(env->tr.flags & DESC_P_MASK))
844 cpu_abort(env, "invalid tss");
845 index = 8 * level + 4;
846 if ((index + 7) > env->tr.limit)
847 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
848 return ldq_kernel(env->tr.base + index);
851 /* 64 bit interrupt */
852 static void do_interrupt64(int intno, int is_int, int error_code,
853 target_ulong next_eip, int is_hw)
857 int type, dpl, selector, cpl, ist;
858 int has_error_code, new_stack;
859 uint32_t e1, e2, e3, ss;
860 target_ulong old_eip, esp, offset;
861 int svm_should_check = 1;
863 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
865 svm_should_check = 0;
868 && INTERCEPTEDl(_exceptions, 1 << intno)
870 raise_interrupt(intno, is_int, error_code, 0);
873 if (!is_int && !is_hw) {
892 if (intno * 16 + 15 > dt->limit)
893 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
894 ptr = dt->base + intno * 16;
895 e1 = ldl_kernel(ptr);
896 e2 = ldl_kernel(ptr + 4);
897 e3 = ldl_kernel(ptr + 8);
898 /* check gate type */
899 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
901 case 14: /* 386 interrupt gate */
902 case 15: /* 386 trap gate */
905 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
908 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
909 cpl = env->hflags & HF_CPL_MASK;
910 /* check privledge if software int */
911 if (is_int && dpl < cpl)
912 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
913 /* check valid bit */
914 if (!(e2 & DESC_P_MASK))
915 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
917 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
919 if ((selector & 0xfffc) == 0)
920 raise_exception_err(EXCP0D_GPF, 0);
922 if (load_segment(&e1, &e2, selector) != 0)
923 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
924 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
925 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
926 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
928 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
929 if (!(e2 & DESC_P_MASK))
930 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
931 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
932 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
933 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
934 /* to inner privilege */
936 esp = get_rsp_from_tss(ist + 3);
938 esp = get_rsp_from_tss(dpl);
939 esp &= ~0xfLL; /* align stack */
942 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
943 /* to same privilege */
944 if (env->eflags & VM_MASK)
945 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
948 esp = get_rsp_from_tss(ist + 3);
951 esp &= ~0xfLL; /* align stack */
954 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
955 new_stack = 0; /* avoid warning */
956 esp = 0; /* avoid warning */
959 PUSHQ(esp, env->segs[R_SS].selector);
961 PUSHQ(esp, compute_eflags());
962 PUSHQ(esp, env->segs[R_CS].selector);
964 if (has_error_code) {
965 PUSHQ(esp, error_code);
970 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
974 selector = (selector & ~3) | dpl;
975 cpu_x86_load_seg_cache(env, R_CS, selector,
976 get_seg_base(e1, e2),
977 get_seg_limit(e1, e2),
979 cpu_x86_set_cpl(env, dpl);
982 /* interrupt gate clear IF mask */
983 if ((type & 1) == 0) {
984 env->eflags &= ~IF_MASK;
986 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
990 #if defined(CONFIG_USER_ONLY)
991 void helper_syscall(int next_eip_addend)
993 env->exception_index = EXCP_SYSCALL;
994 env->exception_next_eip = env->eip + next_eip_addend;
998 void helper_syscall(int next_eip_addend)
1002 if (!(env->efer & MSR_EFER_SCE)) {
1003 raise_exception_err(EXCP06_ILLOP, 0);
1005 selector = (env->star >> 32) & 0xffff;
1006 #ifdef TARGET_X86_64
1007 if (env->hflags & HF_LMA_MASK) {
1010 ECX = env->eip + next_eip_addend;
1011 env->regs[11] = compute_eflags();
1013 code64 = env->hflags & HF_CS64_MASK;
1015 cpu_x86_set_cpl(env, 0);
1016 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1018 DESC_G_MASK | DESC_P_MASK |
1020 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1021 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1023 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1025 DESC_W_MASK | DESC_A_MASK);
1026 env->eflags &= ~env->fmask;
1027 load_eflags(env->eflags, 0);
1029 env->eip = env->lstar;
1031 env->eip = env->cstar;
1035 ECX = (uint32_t)(env->eip + next_eip_addend);
1037 cpu_x86_set_cpl(env, 0);
1038 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1040 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1042 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1043 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1045 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1047 DESC_W_MASK | DESC_A_MASK);
1048 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1049 env->eip = (uint32_t)env->star;
1054 void helper_sysret(int dflag)
1058 if (!(env->efer & MSR_EFER_SCE)) {
1059 raise_exception_err(EXCP06_ILLOP, 0);
1061 cpl = env->hflags & HF_CPL_MASK;
1062 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1063 raise_exception_err(EXCP0D_GPF, 0);
1065 selector = (env->star >> 48) & 0xffff;
1066 #ifdef TARGET_X86_64
1067 if (env->hflags & HF_LMA_MASK) {
1069 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1071 DESC_G_MASK | DESC_P_MASK |
1072 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1073 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1077 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1079 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1080 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1081 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1082 env->eip = (uint32_t)ECX;
1084 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1086 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1087 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1088 DESC_W_MASK | DESC_A_MASK);
1089 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1090 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1091 cpu_x86_set_cpl(env, 3);
1095 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1097 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1098 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1099 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1100 env->eip = (uint32_t)ECX;
1101 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1103 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1104 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1105 DESC_W_MASK | DESC_A_MASK);
1106 env->eflags |= IF_MASK;
1107 cpu_x86_set_cpl(env, 3);
1110 if (kqemu_is_ok(env)) {
1111 if (env->hflags & HF_LMA_MASK)
1112 CC_OP = CC_OP_EFLAGS;
1113 env->exception_index = -1;
1119 /* real mode interrupt */
1120 static void do_interrupt_real(int intno, int is_int, int error_code,
1121 unsigned int next_eip)
1124 target_ulong ptr, ssp;
1126 uint32_t offset, esp;
1127 uint32_t old_cs, old_eip;
1128 int svm_should_check = 1;
1130 if ((env->intercept & INTERCEPT_SVM_MASK) && !is_int && next_eip==-1) {
1132 svm_should_check = 0;
1134 if (svm_should_check
1135 && INTERCEPTEDl(_exceptions, 1 << intno)
1137 raise_interrupt(intno, is_int, error_code, 0);
1139 /* real mode (simpler !) */
1141 if (intno * 4 + 3 > dt->limit)
1142 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1143 ptr = dt->base + intno * 4;
1144 offset = lduw_kernel(ptr);
1145 selector = lduw_kernel(ptr + 2);
1147 ssp = env->segs[R_SS].base;
1152 old_cs = env->segs[R_CS].selector;
1153 /* XXX: use SS segment size ? */
1154 PUSHW(ssp, esp, 0xffff, compute_eflags());
1155 PUSHW(ssp, esp, 0xffff, old_cs);
1156 PUSHW(ssp, esp, 0xffff, old_eip);
1158 /* update processor state */
1159 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1161 env->segs[R_CS].selector = selector;
1162 env->segs[R_CS].base = (selector << 4);
1163 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1166 /* fake user mode interrupt */
1167 void do_interrupt_user(int intno, int is_int, int error_code,
1168 target_ulong next_eip)
1172 int dpl, cpl, shift;
1176 if (env->hflags & HF_LMA_MASK) {
1181 ptr = dt->base + (intno << shift);
1182 e2 = ldl_kernel(ptr + 4);
1184 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1185 cpl = env->hflags & HF_CPL_MASK;
1186 /* check privledge if software int */
1187 if (is_int && dpl < cpl)
1188 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1190 /* Since we emulate only user space, we cannot do more than
1191 exiting the emulation with the suitable exception and error
1198 * Begin execution of an interruption. is_int is TRUE if coming from
1199 * the int instruction. next_eip is the EIP value AFTER the interrupt
1200 * instruction. It is only relevant if is_int is TRUE.
1202 void do_interrupt(int intno, int is_int, int error_code,
1203 target_ulong next_eip, int is_hw)
1205 if (loglevel & CPU_LOG_INT) {
1206 if ((env->cr[0] & CR0_PE_MASK)) {
1208 fprintf(logfile, "%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1209 count, intno, error_code, is_int,
1210 env->hflags & HF_CPL_MASK,
1211 env->segs[R_CS].selector, EIP,
1212 (int)env->segs[R_CS].base + EIP,
1213 env->segs[R_SS].selector, ESP);
1214 if (intno == 0x0e) {
1215 fprintf(logfile, " CR2=" TARGET_FMT_lx, env->cr[2]);
1217 fprintf(logfile, " EAX=" TARGET_FMT_lx, EAX);
1219 fprintf(logfile, "\n");
1220 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1225 fprintf(logfile, " code=");
1226 ptr = env->segs[R_CS].base + env->eip;
1227 for(i = 0; i < 16; i++) {
1228 fprintf(logfile, " %02x", ldub(ptr + i));
1230 fprintf(logfile, "\n");
1236 if (env->cr[0] & CR0_PE_MASK) {
1238 if (env->hflags & HF_LMA_MASK) {
1239 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1243 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1246 do_interrupt_real(intno, is_int, error_code, next_eip);
1251 * Check nested exceptions and change to double or triple fault if
1252 * needed. It should only be called, if this is not an interrupt.
1253 * Returns the new exception number.
1255 static int check_exception(int intno, int *error_code)
1257 int first_contributory = env->old_exception == 0 ||
1258 (env->old_exception >= 10 &&
1259 env->old_exception <= 13);
1260 int second_contributory = intno == 0 ||
1261 (intno >= 10 && intno <= 13);
1263 if (loglevel & CPU_LOG_INT)
1264 fprintf(logfile, "check_exception old: 0x%x new 0x%x\n",
1265 env->old_exception, intno);
1267 if (env->old_exception == EXCP08_DBLE)
1268 cpu_abort(env, "triple fault");
1270 if ((first_contributory && second_contributory)
1271 || (env->old_exception == EXCP0E_PAGE &&
1272 (second_contributory || (intno == EXCP0E_PAGE)))) {
1273 intno = EXCP08_DBLE;
1277 if (second_contributory || (intno == EXCP0E_PAGE) ||
1278 (intno == EXCP08_DBLE))
1279 env->old_exception = intno;
1285 * Signal an interruption. It is executed in the main CPU loop.
1286 * is_int is TRUE if coming from the int instruction. next_eip is the
1287 * EIP value AFTER the interrupt instruction. It is only relevant if
1290 void raise_interrupt(int intno, int is_int, int error_code,
1291 int next_eip_addend)
1294 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1295 intno = check_exception(intno, &error_code);
1298 env->exception_index = intno;
1299 env->error_code = error_code;
1300 env->exception_is_int = is_int;
1301 env->exception_next_eip = env->eip + next_eip_addend;
1305 /* same as raise_exception_err, but do not restore global registers */
1306 static void raise_exception_err_norestore(int exception_index, int error_code)
1308 exception_index = check_exception(exception_index, &error_code);
1310 env->exception_index = exception_index;
1311 env->error_code = error_code;
1312 env->exception_is_int = 0;
1313 env->exception_next_eip = 0;
1314 longjmp(env->jmp_env, 1);
1317 /* shortcuts to generate exceptions */
1319 void (raise_exception_err)(int exception_index, int error_code)
1321 raise_interrupt(exception_index, 0, error_code, 0);
1324 void raise_exception(int exception_index)
1326 raise_interrupt(exception_index, 0, 0, 0);
1331 #if defined(CONFIG_USER_ONLY)
1333 void do_smm_enter(void)
1337 void helper_rsm(void)
1343 #ifdef TARGET_X86_64
1344 #define SMM_REVISION_ID 0x00020064
1346 #define SMM_REVISION_ID 0x00020000
1349 void do_smm_enter(void)
1351 target_ulong sm_state;
1355 if (loglevel & CPU_LOG_INT) {
1356 fprintf(logfile, "SMM: enter\n");
1357 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1360 env->hflags |= HF_SMM_MASK;
1361 cpu_smm_update(env);
1363 sm_state = env->smbase + 0x8000;
1365 #ifdef TARGET_X86_64
1366 for(i = 0; i < 6; i++) {
1368 offset = 0x7e00 + i * 16;
1369 stw_phys(sm_state + offset, dt->selector);
1370 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1371 stl_phys(sm_state + offset + 4, dt->limit);
1372 stq_phys(sm_state + offset + 8, dt->base);
1375 stq_phys(sm_state + 0x7e68, env->gdt.base);
1376 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1378 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1379 stq_phys(sm_state + 0x7e78, env->ldt.base);
1380 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1381 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1383 stq_phys(sm_state + 0x7e88, env->idt.base);
1384 stl_phys(sm_state + 0x7e84, env->idt.limit);
1386 stw_phys(sm_state + 0x7e90, env->tr.selector);
1387 stq_phys(sm_state + 0x7e98, env->tr.base);
1388 stl_phys(sm_state + 0x7e94, env->tr.limit);
1389 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1391 stq_phys(sm_state + 0x7ed0, env->efer);
1393 stq_phys(sm_state + 0x7ff8, EAX);
1394 stq_phys(sm_state + 0x7ff0, ECX);
1395 stq_phys(sm_state + 0x7fe8, EDX);
1396 stq_phys(sm_state + 0x7fe0, EBX);
1397 stq_phys(sm_state + 0x7fd8, ESP);
1398 stq_phys(sm_state + 0x7fd0, EBP);
1399 stq_phys(sm_state + 0x7fc8, ESI);
1400 stq_phys(sm_state + 0x7fc0, EDI);
1401 for(i = 8; i < 16; i++)
1402 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1403 stq_phys(sm_state + 0x7f78, env->eip);
1404 stl_phys(sm_state + 0x7f70, compute_eflags());
1405 stl_phys(sm_state + 0x7f68, env->dr[6]);
1406 stl_phys(sm_state + 0x7f60, env->dr[7]);
1408 stl_phys(sm_state + 0x7f48, env->cr[4]);
1409 stl_phys(sm_state + 0x7f50, env->cr[3]);
1410 stl_phys(sm_state + 0x7f58, env->cr[0]);
1412 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1413 stl_phys(sm_state + 0x7f00, env->smbase);
1415 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1416 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1417 stl_phys(sm_state + 0x7ff4, compute_eflags());
1418 stl_phys(sm_state + 0x7ff0, env->eip);
1419 stl_phys(sm_state + 0x7fec, EDI);
1420 stl_phys(sm_state + 0x7fe8, ESI);
1421 stl_phys(sm_state + 0x7fe4, EBP);
1422 stl_phys(sm_state + 0x7fe0, ESP);
1423 stl_phys(sm_state + 0x7fdc, EBX);
1424 stl_phys(sm_state + 0x7fd8, EDX);
1425 stl_phys(sm_state + 0x7fd4, ECX);
1426 stl_phys(sm_state + 0x7fd0, EAX);
1427 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1428 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1430 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1431 stl_phys(sm_state + 0x7f64, env->tr.base);
1432 stl_phys(sm_state + 0x7f60, env->tr.limit);
1433 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1435 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1436 stl_phys(sm_state + 0x7f80, env->ldt.base);
1437 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1438 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1440 stl_phys(sm_state + 0x7f74, env->gdt.base);
1441 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1443 stl_phys(sm_state + 0x7f58, env->idt.base);
1444 stl_phys(sm_state + 0x7f54, env->idt.limit);
1446 for(i = 0; i < 6; i++) {
1449 offset = 0x7f84 + i * 12;
1451 offset = 0x7f2c + (i - 3) * 12;
1452 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1453 stl_phys(sm_state + offset + 8, dt->base);
1454 stl_phys(sm_state + offset + 4, dt->limit);
1455 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1457 stl_phys(sm_state + 0x7f14, env->cr[4]);
1459 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1460 stl_phys(sm_state + 0x7ef8, env->smbase);
1462 /* init SMM cpu state */
1464 #ifdef TARGET_X86_64
1466 env->hflags &= ~HF_LMA_MASK;
1468 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1469 env->eip = 0x00008000;
1470 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1472 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1473 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1474 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1475 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1476 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1478 cpu_x86_update_cr0(env,
1479 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1480 cpu_x86_update_cr4(env, 0);
1481 env->dr[7] = 0x00000400;
1482 CC_OP = CC_OP_EFLAGS;
1485 void helper_rsm(void)
1487 target_ulong sm_state;
1491 sm_state = env->smbase + 0x8000;
1492 #ifdef TARGET_X86_64
1493 env->efer = ldq_phys(sm_state + 0x7ed0);
1494 if (env->efer & MSR_EFER_LMA)
1495 env->hflags |= HF_LMA_MASK;
1497 env->hflags &= ~HF_LMA_MASK;
1499 for(i = 0; i < 6; i++) {
1500 offset = 0x7e00 + i * 16;
1501 cpu_x86_load_seg_cache(env, i,
1502 lduw_phys(sm_state + offset),
1503 ldq_phys(sm_state + offset + 8),
1504 ldl_phys(sm_state + offset + 4),
1505 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1508 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1509 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1511 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1512 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1513 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1514 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1516 env->idt.base = ldq_phys(sm_state + 0x7e88);
1517 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1519 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1520 env->tr.base = ldq_phys(sm_state + 0x7e98);
1521 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1522 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1524 EAX = ldq_phys(sm_state + 0x7ff8);
1525 ECX = ldq_phys(sm_state + 0x7ff0);
1526 EDX = ldq_phys(sm_state + 0x7fe8);
1527 EBX = ldq_phys(sm_state + 0x7fe0);
1528 ESP = ldq_phys(sm_state + 0x7fd8);
1529 EBP = ldq_phys(sm_state + 0x7fd0);
1530 ESI = ldq_phys(sm_state + 0x7fc8);
1531 EDI = ldq_phys(sm_state + 0x7fc0);
1532 for(i = 8; i < 16; i++)
1533 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1534 env->eip = ldq_phys(sm_state + 0x7f78);
1535 load_eflags(ldl_phys(sm_state + 0x7f70),
1536 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1537 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1538 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1540 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1541 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1542 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1544 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1545 if (val & 0x20000) {
1546 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1549 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1550 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1551 load_eflags(ldl_phys(sm_state + 0x7ff4),
1552 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1553 env->eip = ldl_phys(sm_state + 0x7ff0);
1554 EDI = ldl_phys(sm_state + 0x7fec);
1555 ESI = ldl_phys(sm_state + 0x7fe8);
1556 EBP = ldl_phys(sm_state + 0x7fe4);
1557 ESP = ldl_phys(sm_state + 0x7fe0);
1558 EBX = ldl_phys(sm_state + 0x7fdc);
1559 EDX = ldl_phys(sm_state + 0x7fd8);
1560 ECX = ldl_phys(sm_state + 0x7fd4);
1561 EAX = ldl_phys(sm_state + 0x7fd0);
1562 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1563 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1565 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1566 env->tr.base = ldl_phys(sm_state + 0x7f64);
1567 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1568 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1570 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1571 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1572 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1573 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1575 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1576 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1578 env->idt.base = ldl_phys(sm_state + 0x7f58);
1579 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1581 for(i = 0; i < 6; i++) {
1583 offset = 0x7f84 + i * 12;
1585 offset = 0x7f2c + (i - 3) * 12;
1586 cpu_x86_load_seg_cache(env, i,
1587 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1588 ldl_phys(sm_state + offset + 8),
1589 ldl_phys(sm_state + offset + 4),
1590 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1592 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1594 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1595 if (val & 0x20000) {
1596 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1599 CC_OP = CC_OP_EFLAGS;
1600 env->hflags &= ~HF_SMM_MASK;
1601 cpu_smm_update(env);
1603 if (loglevel & CPU_LOG_INT) {
1604 fprintf(logfile, "SMM: after RSM\n");
1605 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
1609 #endif /* !CONFIG_USER_ONLY */
1612 #ifdef BUGGY_GCC_DIV64
1613 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
1614 call it from another function */
1615 uint32_t div32(uint64_t *q_ptr, uint64_t num, uint32_t den)
1621 int32_t idiv32(int64_t *q_ptr, int64_t num, int32_t den)
1628 /* division, flags are undefined */
1630 void helper_divb_AL(target_ulong t0)
1632 unsigned int num, den, q, r;
1634 num = (EAX & 0xffff);
1637 raise_exception(EXCP00_DIVZ);
1641 raise_exception(EXCP00_DIVZ);
1643 r = (num % den) & 0xff;
1644 EAX = (EAX & ~0xffff) | (r << 8) | q;
1647 void helper_idivb_AL(target_ulong t0)
1654 raise_exception(EXCP00_DIVZ);
1658 raise_exception(EXCP00_DIVZ);
1660 r = (num % den) & 0xff;
1661 EAX = (EAX & ~0xffff) | (r << 8) | q;
1664 void helper_divw_AX(target_ulong t0)
1666 unsigned int num, den, q, r;
1668 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1669 den = (t0 & 0xffff);
1671 raise_exception(EXCP00_DIVZ);
1675 raise_exception(EXCP00_DIVZ);
1677 r = (num % den) & 0xffff;
1678 EAX = (EAX & ~0xffff) | q;
1679 EDX = (EDX & ~0xffff) | r;
1682 void helper_idivw_AX(target_ulong t0)
1686 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1689 raise_exception(EXCP00_DIVZ);
1692 if (q != (int16_t)q)
1693 raise_exception(EXCP00_DIVZ);
1695 r = (num % den) & 0xffff;
1696 EAX = (EAX & ~0xffff) | q;
1697 EDX = (EDX & ~0xffff) | r;
1700 void helper_divl_EAX(target_ulong t0)
1702 unsigned int den, r;
1705 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1708 raise_exception(EXCP00_DIVZ);
1710 #ifdef BUGGY_GCC_DIV64
1711 r = div32(&q, num, den);
1717 raise_exception(EXCP00_DIVZ);
1722 void helper_idivl_EAX(target_ulong t0)
1727 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1730 raise_exception(EXCP00_DIVZ);
1732 #ifdef BUGGY_GCC_DIV64
1733 r = idiv32(&q, num, den);
1738 if (q != (int32_t)q)
1739 raise_exception(EXCP00_DIVZ);
1746 /* XXX: exception */
1747 void helper_aam(int base)
1753 EAX = (EAX & ~0xffff) | al | (ah << 8);
1757 void helper_aad(int base)
1761 ah = (EAX >> 8) & 0xff;
1762 al = ((ah * base) + al) & 0xff;
1763 EAX = (EAX & ~0xffff) | al;
1767 void helper_aaa(void)
1773 eflags = cc_table[CC_OP].compute_all();
1776 ah = (EAX >> 8) & 0xff;
1778 icarry = (al > 0xf9);
1779 if (((al & 0x0f) > 9 ) || af) {
1780 al = (al + 6) & 0x0f;
1781 ah = (ah + 1 + icarry) & 0xff;
1782 eflags |= CC_C | CC_A;
1784 eflags &= ~(CC_C | CC_A);
1787 EAX = (EAX & ~0xffff) | al | (ah << 8);
1792 void helper_aas(void)
1798 eflags = cc_table[CC_OP].compute_all();
1801 ah = (EAX >> 8) & 0xff;
1804 if (((al & 0x0f) > 9 ) || af) {
1805 al = (al - 6) & 0x0f;
1806 ah = (ah - 1 - icarry) & 0xff;
1807 eflags |= CC_C | CC_A;
1809 eflags &= ~(CC_C | CC_A);
1812 EAX = (EAX & ~0xffff) | al | (ah << 8);
1817 void helper_daa(void)
1822 eflags = cc_table[CC_OP].compute_all();
1828 if (((al & 0x0f) > 9 ) || af) {
1829 al = (al + 6) & 0xff;
1832 if ((al > 0x9f) || cf) {
1833 al = (al + 0x60) & 0xff;
1836 EAX = (EAX & ~0xff) | al;
1837 /* well, speed is not an issue here, so we compute the flags by hand */
1838 eflags |= (al == 0) << 6; /* zf */
1839 eflags |= parity_table[al]; /* pf */
1840 eflags |= (al & 0x80); /* sf */
1845 void helper_das(void)
1847 int al, al1, af, cf;
1850 eflags = cc_table[CC_OP].compute_all();
1857 if (((al & 0x0f) > 9 ) || af) {
1861 al = (al - 6) & 0xff;
1863 if ((al1 > 0x99) || cf) {
1864 al = (al - 0x60) & 0xff;
1867 EAX = (EAX & ~0xff) | al;
1868 /* well, speed is not an issue here, so we compute the flags by hand */
1869 eflags |= (al == 0) << 6; /* zf */
1870 eflags |= parity_table[al]; /* pf */
1871 eflags |= (al & 0x80); /* sf */
1876 void helper_cmpxchg8b(target_ulong a0)
1881 eflags = cc_table[CC_OP].compute_all();
1883 if (d == (((uint64_t)EDX << 32) | EAX)) {
1884 stq(a0, ((uint64_t)ECX << 32) | EBX);
1887 EDX = (uint32_t)(d >> 32);
1894 void helper_single_step(void)
1896 env->dr[6] |= 0x4000;
1897 raise_exception(EXCP01_SSTP);
1900 void helper_cpuid(void)
1903 index = (uint32_t)EAX;
1905 /* test if maximum index reached */
1906 if (index & 0x80000000) {
1907 if (index > env->cpuid_xlevel)
1908 index = env->cpuid_level;
1910 if (index > env->cpuid_level)
1911 index = env->cpuid_level;
1916 EAX = env->cpuid_level;
1917 EBX = env->cpuid_vendor1;
1918 EDX = env->cpuid_vendor2;
1919 ECX = env->cpuid_vendor3;
1922 EAX = env->cpuid_version;
1923 EBX = (env->cpuid_apic_id << 24) | 8 << 8; /* CLFLUSH size in quad words, Linux wants it. */
1924 ECX = env->cpuid_ext_features;
1925 EDX = env->cpuid_features;
1928 /* cache info: needed for Pentium Pro compatibility */
1935 EAX = env->cpuid_xlevel;
1936 EBX = env->cpuid_vendor1;
1937 EDX = env->cpuid_vendor2;
1938 ECX = env->cpuid_vendor3;
1941 EAX = env->cpuid_features;
1943 ECX = env->cpuid_ext3_features;
1944 EDX = env->cpuid_ext2_features;
1949 EAX = env->cpuid_model[(index - 0x80000002) * 4 + 0];
1950 EBX = env->cpuid_model[(index - 0x80000002) * 4 + 1];
1951 ECX = env->cpuid_model[(index - 0x80000002) * 4 + 2];
1952 EDX = env->cpuid_model[(index - 0x80000002) * 4 + 3];
1955 /* cache info (L1 cache) */
1962 /* cache info (L2 cache) */
1969 /* virtual & phys address size in low 2 bytes. */
1970 /* XXX: This value must match the one used in the MMU code. */
1971 #if defined(TARGET_X86_64)
1972 # if defined(USE_KQEMU)
1973 EAX = 0x00003020; /* 48 bits virtual, 32 bits physical */
1975 /* XXX: The physical address space is limited to 42 bits in exec.c. */
1976 EAX = 0x00003028; /* 48 bits virtual, 40 bits physical */
1979 # if defined(USE_KQEMU)
1980 EAX = 0x00000020; /* 32 bits physical */
1982 EAX = 0x00000024; /* 36 bits physical */
1996 /* reserved values: zero */
2005 void helper_enter_level(int level, int data32, target_ulong t1)
2008 uint32_t esp_mask, esp, ebp;
2010 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2011 ssp = env->segs[R_SS].base;
2020 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2023 stl(ssp + (esp & esp_mask), t1);
2030 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2033 stw(ssp + (esp & esp_mask), t1);
2037 #ifdef TARGET_X86_64
2038 void helper_enter64_level(int level, int data64, target_ulong t1)
2040 target_ulong esp, ebp;
2060 stw(esp, lduw(ebp));
2068 void helper_lldt(int selector)
2072 int index, entry_limit;
2076 if ((selector & 0xfffc) == 0) {
2077 /* XXX: NULL selector case: invalid LDT */
2082 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2084 index = selector & ~7;
2085 #ifdef TARGET_X86_64
2086 if (env->hflags & HF_LMA_MASK)
2091 if ((index + entry_limit) > dt->limit)
2092 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2093 ptr = dt->base + index;
2094 e1 = ldl_kernel(ptr);
2095 e2 = ldl_kernel(ptr + 4);
2096 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2097 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2098 if (!(e2 & DESC_P_MASK))
2099 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2100 #ifdef TARGET_X86_64
2101 if (env->hflags & HF_LMA_MASK) {
2103 e3 = ldl_kernel(ptr + 8);
2104 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2105 env->ldt.base |= (target_ulong)e3 << 32;
2109 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2112 env->ldt.selector = selector;
2115 void helper_ltr(int selector)
2119 int index, type, entry_limit;
2123 if ((selector & 0xfffc) == 0) {
2124 /* NULL selector case: invalid TR */
2130 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2132 index = selector & ~7;
2133 #ifdef TARGET_X86_64
2134 if (env->hflags & HF_LMA_MASK)
2139 if ((index + entry_limit) > dt->limit)
2140 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2141 ptr = dt->base + index;
2142 e1 = ldl_kernel(ptr);
2143 e2 = ldl_kernel(ptr + 4);
2144 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2145 if ((e2 & DESC_S_MASK) ||
2146 (type != 1 && type != 9))
2147 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2148 if (!(e2 & DESC_P_MASK))
2149 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2150 #ifdef TARGET_X86_64
2151 if (env->hflags & HF_LMA_MASK) {
2153 e3 = ldl_kernel(ptr + 8);
2154 e4 = ldl_kernel(ptr + 12);
2155 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2156 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2157 load_seg_cache_raw_dt(&env->tr, e1, e2);
2158 env->tr.base |= (target_ulong)e3 << 32;
2162 load_seg_cache_raw_dt(&env->tr, e1, e2);
2164 e2 |= DESC_TSS_BUSY_MASK;
2165 stl_kernel(ptr + 4, e2);
2167 env->tr.selector = selector;
2170 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2171 void helper_load_seg(int seg_reg, int selector)
2180 cpl = env->hflags & HF_CPL_MASK;
2181 if ((selector & 0xfffc) == 0) {
2182 /* null selector case */
2184 #ifdef TARGET_X86_64
2185 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2188 raise_exception_err(EXCP0D_GPF, 0);
2189 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2196 index = selector & ~7;
2197 if ((index + 7) > dt->limit)
2198 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2199 ptr = dt->base + index;
2200 e1 = ldl_kernel(ptr);
2201 e2 = ldl_kernel(ptr + 4);
2203 if (!(e2 & DESC_S_MASK))
2204 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2206 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2207 if (seg_reg == R_SS) {
2208 /* must be writable segment */
2209 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2210 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2211 if (rpl != cpl || dpl != cpl)
2212 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2214 /* must be readable segment */
2215 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2216 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2218 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2219 /* if not conforming code, test rights */
2220 if (dpl < cpl || dpl < rpl)
2221 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2225 if (!(e2 & DESC_P_MASK)) {
2226 if (seg_reg == R_SS)
2227 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2229 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2232 /* set the access bit if not already set */
2233 if (!(e2 & DESC_A_MASK)) {
2235 stl_kernel(ptr + 4, e2);
2238 cpu_x86_load_seg_cache(env, seg_reg, selector,
2239 get_seg_base(e1, e2),
2240 get_seg_limit(e1, e2),
2243 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2244 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2249 /* protected mode jump */
2250 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2251 int next_eip_addend)
2254 uint32_t e1, e2, cpl, dpl, rpl, limit;
2255 target_ulong next_eip;
2257 if ((new_cs & 0xfffc) == 0)
2258 raise_exception_err(EXCP0D_GPF, 0);
2259 if (load_segment(&e1, &e2, new_cs) != 0)
2260 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2261 cpl = env->hflags & HF_CPL_MASK;
2262 if (e2 & DESC_S_MASK) {
2263 if (!(e2 & DESC_CS_MASK))
2264 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2265 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2266 if (e2 & DESC_C_MASK) {
2267 /* conforming code segment */
2269 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2271 /* non conforming code segment */
2274 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2276 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2278 if (!(e2 & DESC_P_MASK))
2279 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2280 limit = get_seg_limit(e1, e2);
2281 if (new_eip > limit &&
2282 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2283 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2284 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2285 get_seg_base(e1, e2), limit, e2);
2288 /* jump to call or task gate */
2289 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2291 cpl = env->hflags & HF_CPL_MASK;
2292 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2294 case 1: /* 286 TSS */
2295 case 9: /* 386 TSS */
2296 case 5: /* task gate */
2297 if (dpl < cpl || dpl < rpl)
2298 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2299 next_eip = env->eip + next_eip_addend;
2300 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2301 CC_OP = CC_OP_EFLAGS;
2303 case 4: /* 286 call gate */
2304 case 12: /* 386 call gate */
2305 if ((dpl < cpl) || (dpl < rpl))
2306 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2307 if (!(e2 & DESC_P_MASK))
2308 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2310 new_eip = (e1 & 0xffff);
2312 new_eip |= (e2 & 0xffff0000);
2313 if (load_segment(&e1, &e2, gate_cs) != 0)
2314 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2315 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2316 /* must be code segment */
2317 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2318 (DESC_S_MASK | DESC_CS_MASK)))
2319 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2320 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2321 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2322 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2323 if (!(e2 & DESC_P_MASK))
2324 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2325 limit = get_seg_limit(e1, e2);
2326 if (new_eip > limit)
2327 raise_exception_err(EXCP0D_GPF, 0);
2328 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2329 get_seg_base(e1, e2), limit, e2);
2333 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2339 /* real mode call */
2340 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2341 int shift, int next_eip)
2344 uint32_t esp, esp_mask;
2349 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2350 ssp = env->segs[R_SS].base;
2352 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2353 PUSHL(ssp, esp, esp_mask, next_eip);
2355 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2356 PUSHW(ssp, esp, esp_mask, next_eip);
2359 SET_ESP(esp, esp_mask);
2361 env->segs[R_CS].selector = new_cs;
2362 env->segs[R_CS].base = (new_cs << 4);
2365 /* protected mode call */
2366 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2367 int shift, int next_eip_addend)
2370 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2371 uint32_t ss, ss_e1, ss_e2, sp, type, ss_dpl, sp_mask;
2372 uint32_t val, limit, old_sp_mask;
2373 target_ulong ssp, old_ssp, next_eip;
2375 next_eip = env->eip + next_eip_addend;
2377 if (loglevel & CPU_LOG_PCALL) {
2378 fprintf(logfile, "lcall %04x:%08x s=%d\n",
2379 new_cs, (uint32_t)new_eip, shift);
2380 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2383 if ((new_cs & 0xfffc) == 0)
2384 raise_exception_err(EXCP0D_GPF, 0);
2385 if (load_segment(&e1, &e2, new_cs) != 0)
2386 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2387 cpl = env->hflags & HF_CPL_MASK;
2389 if (loglevel & CPU_LOG_PCALL) {
2390 fprintf(logfile, "desc=%08x:%08x\n", e1, e2);
2393 if (e2 & DESC_S_MASK) {
2394 if (!(e2 & DESC_CS_MASK))
2395 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2396 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2397 if (e2 & DESC_C_MASK) {
2398 /* conforming code segment */
2400 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2402 /* non conforming code segment */
2405 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2409 if (!(e2 & DESC_P_MASK))
2410 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2412 #ifdef TARGET_X86_64
2413 /* XXX: check 16/32 bit cases in long mode */
2418 PUSHQ(rsp, env->segs[R_CS].selector);
2419 PUSHQ(rsp, next_eip);
2420 /* from this point, not restartable */
2422 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2423 get_seg_base(e1, e2),
2424 get_seg_limit(e1, e2), e2);
2430 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2431 ssp = env->segs[R_SS].base;
2433 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2434 PUSHL(ssp, sp, sp_mask, next_eip);
2436 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2437 PUSHW(ssp, sp, sp_mask, next_eip);
2440 limit = get_seg_limit(e1, e2);
2441 if (new_eip > limit)
2442 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2443 /* from this point, not restartable */
2444 SET_ESP(sp, sp_mask);
2445 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2446 get_seg_base(e1, e2), limit, e2);
2450 /* check gate type */
2451 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2452 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2455 case 1: /* available 286 TSS */
2456 case 9: /* available 386 TSS */
2457 case 5: /* task gate */
2458 if (dpl < cpl || dpl < rpl)
2459 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2460 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2461 CC_OP = CC_OP_EFLAGS;
2463 case 4: /* 286 call gate */
2464 case 12: /* 386 call gate */
2467 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2472 if (dpl < cpl || dpl < rpl)
2473 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2474 /* check valid bit */
2475 if (!(e2 & DESC_P_MASK))
2476 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2477 selector = e1 >> 16;
2478 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2479 param_count = e2 & 0x1f;
2480 if ((selector & 0xfffc) == 0)
2481 raise_exception_err(EXCP0D_GPF, 0);
2483 if (load_segment(&e1, &e2, selector) != 0)
2484 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2485 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2486 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2487 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2489 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2490 if (!(e2 & DESC_P_MASK))
2491 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2493 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2494 /* to inner privilege */
2495 get_ss_esp_from_tss(&ss, &sp, dpl);
2497 if (loglevel & CPU_LOG_PCALL)
2498 fprintf(logfile, "new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2499 ss, sp, param_count, ESP);
2501 if ((ss & 0xfffc) == 0)
2502 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2503 if ((ss & 3) != dpl)
2504 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2505 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2506 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2507 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2509 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2510 if (!(ss_e2 & DESC_S_MASK) ||
2511 (ss_e2 & DESC_CS_MASK) ||
2512 !(ss_e2 & DESC_W_MASK))
2513 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2514 if (!(ss_e2 & DESC_P_MASK))
2515 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2517 // push_size = ((param_count * 2) + 8) << shift;
2519 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2520 old_ssp = env->segs[R_SS].base;
2522 sp_mask = get_sp_mask(ss_e2);
2523 ssp = get_seg_base(ss_e1, ss_e2);
2525 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2526 PUSHL(ssp, sp, sp_mask, ESP);
2527 for(i = param_count - 1; i >= 0; i--) {
2528 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2529 PUSHL(ssp, sp, sp_mask, val);
2532 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2533 PUSHW(ssp, sp, sp_mask, ESP);
2534 for(i = param_count - 1; i >= 0; i--) {
2535 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2536 PUSHW(ssp, sp, sp_mask, val);
2541 /* to same privilege */
2543 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2544 ssp = env->segs[R_SS].base;
2545 // push_size = (4 << shift);
2550 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2551 PUSHL(ssp, sp, sp_mask, next_eip);
2553 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2554 PUSHW(ssp, sp, sp_mask, next_eip);
2557 /* from this point, not restartable */
2560 ss = (ss & ~3) | dpl;
2561 cpu_x86_load_seg_cache(env, R_SS, ss,
2563 get_seg_limit(ss_e1, ss_e2),
2567 selector = (selector & ~3) | dpl;
2568 cpu_x86_load_seg_cache(env, R_CS, selector,
2569 get_seg_base(e1, e2),
2570 get_seg_limit(e1, e2),
2572 cpu_x86_set_cpl(env, dpl);
2573 SET_ESP(sp, sp_mask);
2577 if (kqemu_is_ok(env)) {
2578 env->exception_index = -1;
2584 /* real and vm86 mode iret */
2585 void helper_iret_real(int shift)
2587 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2591 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2593 ssp = env->segs[R_SS].base;
2596 POPL(ssp, sp, sp_mask, new_eip);
2597 POPL(ssp, sp, sp_mask, new_cs);
2599 POPL(ssp, sp, sp_mask, new_eflags);
2602 POPW(ssp, sp, sp_mask, new_eip);
2603 POPW(ssp, sp, sp_mask, new_cs);
2604 POPW(ssp, sp, sp_mask, new_eflags);
2606 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2607 load_seg_vm(R_CS, new_cs);
2609 if (env->eflags & VM_MASK)
2610 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2612 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2614 eflags_mask &= 0xffff;
2615 load_eflags(new_eflags, eflags_mask);
2616 env->hflags &= ~HF_NMI_MASK;
2619 static inline void validate_seg(int seg_reg, int cpl)
2624 /* XXX: on x86_64, we do not want to nullify FS and GS because
2625 they may still contain a valid base. I would be interested to
2626 know how a real x86_64 CPU behaves */
2627 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2628 (env->segs[seg_reg].selector & 0xfffc) == 0)
2631 e2 = env->segs[seg_reg].flags;
2632 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2633 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2634 /* data or non conforming code segment */
2636 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2641 /* protected mode iret */
2642 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2644 uint32_t new_cs, new_eflags, new_ss;
2645 uint32_t new_es, new_ds, new_fs, new_gs;
2646 uint32_t e1, e2, ss_e1, ss_e2;
2647 int cpl, dpl, rpl, eflags_mask, iopl;
2648 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2650 #ifdef TARGET_X86_64
2655 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2657 ssp = env->segs[R_SS].base;
2658 new_eflags = 0; /* avoid warning */
2659 #ifdef TARGET_X86_64
2665 POPQ(sp, new_eflags);
2671 POPL(ssp, sp, sp_mask, new_eip);
2672 POPL(ssp, sp, sp_mask, new_cs);
2675 POPL(ssp, sp, sp_mask, new_eflags);
2676 if (new_eflags & VM_MASK)
2677 goto return_to_vm86;
2681 POPW(ssp, sp, sp_mask, new_eip);
2682 POPW(ssp, sp, sp_mask, new_cs);
2684 POPW(ssp, sp, sp_mask, new_eflags);
2687 if (loglevel & CPU_LOG_PCALL) {
2688 fprintf(logfile, "lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2689 new_cs, new_eip, shift, addend);
2690 cpu_dump_state(env, logfile, fprintf, X86_DUMP_CCOP);
2693 if ((new_cs & 0xfffc) == 0)
2694 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2695 if (load_segment(&e1, &e2, new_cs) != 0)
2696 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2697 if (!(e2 & DESC_S_MASK) ||
2698 !(e2 & DESC_CS_MASK))
2699 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2700 cpl = env->hflags & HF_CPL_MASK;
2703 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2704 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2705 if (e2 & DESC_C_MASK) {
2707 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2710 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2712 if (!(e2 & DESC_P_MASK))
2713 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2716 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2717 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2718 /* return to same priledge level */
2719 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2720 get_seg_base(e1, e2),
2721 get_seg_limit(e1, e2),
2724 /* return to different privilege level */
2725 #ifdef TARGET_X86_64
2734 POPL(ssp, sp, sp_mask, new_esp);
2735 POPL(ssp, sp, sp_mask, new_ss);
2739 POPW(ssp, sp, sp_mask, new_esp);
2740 POPW(ssp, sp, sp_mask, new_ss);
2743 if (loglevel & CPU_LOG_PCALL) {
2744 fprintf(logfile, "new ss:esp=%04x:" TARGET_FMT_lx "\n",
2748 if ((new_ss & 0xfffc) == 0) {
2749 #ifdef TARGET_X86_64
2750 /* NULL ss is allowed in long mode if cpl != 3*/
2751 /* XXX: test CS64 ? */
2752 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2753 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2755 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2756 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2757 DESC_W_MASK | DESC_A_MASK);
2758 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2762 raise_exception_err(EXCP0D_GPF, 0);
2765 if ((new_ss & 3) != rpl)
2766 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2767 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2768 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2769 if (!(ss_e2 & DESC_S_MASK) ||
2770 (ss_e2 & DESC_CS_MASK) ||
2771 !(ss_e2 & DESC_W_MASK))
2772 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2773 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2775 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2776 if (!(ss_e2 & DESC_P_MASK))
2777 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2778 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2779 get_seg_base(ss_e1, ss_e2),
2780 get_seg_limit(ss_e1, ss_e2),
2784 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2785 get_seg_base(e1, e2),
2786 get_seg_limit(e1, e2),
2788 cpu_x86_set_cpl(env, rpl);
2790 #ifdef TARGET_X86_64
2791 if (env->hflags & HF_CS64_MASK)
2795 sp_mask = get_sp_mask(ss_e2);
2797 /* validate data segments */
2798 validate_seg(R_ES, rpl);
2799 validate_seg(R_DS, rpl);
2800 validate_seg(R_FS, rpl);
2801 validate_seg(R_GS, rpl);
2805 SET_ESP(sp, sp_mask);
2808 /* NOTE: 'cpl' is the _old_ CPL */
2809 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2811 eflags_mask |= IOPL_MASK;
2812 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2814 eflags_mask |= IF_MASK;
2816 eflags_mask &= 0xffff;
2817 load_eflags(new_eflags, eflags_mask);
2822 POPL(ssp, sp, sp_mask, new_esp);
2823 POPL(ssp, sp, sp_mask, new_ss);
2824 POPL(ssp, sp, sp_mask, new_es);
2825 POPL(ssp, sp, sp_mask, new_ds);
2826 POPL(ssp, sp, sp_mask, new_fs);
2827 POPL(ssp, sp, sp_mask, new_gs);
2829 /* modify processor state */
2830 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2831 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2832 load_seg_vm(R_CS, new_cs & 0xffff);
2833 cpu_x86_set_cpl(env, 3);
2834 load_seg_vm(R_SS, new_ss & 0xffff);
2835 load_seg_vm(R_ES, new_es & 0xffff);
2836 load_seg_vm(R_DS, new_ds & 0xffff);
2837 load_seg_vm(R_FS, new_fs & 0xffff);
2838 load_seg_vm(R_GS, new_gs & 0xffff);
2840 env->eip = new_eip & 0xffff;
2844 void helper_iret_protected(int shift, int next_eip)
2846 int tss_selector, type;
2849 /* specific case for TSS */
2850 if (env->eflags & NT_MASK) {
2851 #ifdef TARGET_X86_64
2852 if (env->hflags & HF_LMA_MASK)
2853 raise_exception_err(EXCP0D_GPF, 0);
2855 tss_selector = lduw_kernel(env->tr.base + 0);
2856 if (tss_selector & 4)
2857 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2858 if (load_segment(&e1, &e2, tss_selector) != 0)
2859 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2860 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2861 /* NOTE: we check both segment and busy TSS */
2863 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2864 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2866 helper_ret_protected(shift, 1, 0);
2868 env->hflags &= ~HF_NMI_MASK;
2870 if (kqemu_is_ok(env)) {
2871 CC_OP = CC_OP_EFLAGS;
2872 env->exception_index = -1;
2878 void helper_lret_protected(int shift, int addend)
2880 helper_ret_protected(shift, 0, addend);
2882 if (kqemu_is_ok(env)) {
2883 env->exception_index = -1;
2889 void helper_sysenter(void)
2891 if (env->sysenter_cs == 0) {
2892 raise_exception_err(EXCP0D_GPF, 0);
2894 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2895 cpu_x86_set_cpl(env, 0);
2896 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2898 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2900 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2901 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2903 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2905 DESC_W_MASK | DESC_A_MASK);
2906 ESP = env->sysenter_esp;
2907 EIP = env->sysenter_eip;
2910 void helper_sysexit(void)
2914 cpl = env->hflags & HF_CPL_MASK;
2915 if (env->sysenter_cs == 0 || cpl != 0) {
2916 raise_exception_err(EXCP0D_GPF, 0);
2918 cpu_x86_set_cpl(env, 3);
2919 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
2921 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2922 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2923 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2924 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
2926 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2927 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2928 DESC_W_MASK | DESC_A_MASK);
2932 if (kqemu_is_ok(env)) {
2933 env->exception_index = -1;
2939 void helper_movl_crN_T0(int reg, target_ulong t0)
2941 #if !defined(CONFIG_USER_ONLY)
2944 cpu_x86_update_cr0(env, t0);
2947 cpu_x86_update_cr3(env, t0);
2950 cpu_x86_update_cr4(env, t0);
2953 cpu_set_apic_tpr(env, t0);
2963 void helper_lmsw(target_ulong t0)
2965 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
2966 if already set to one. */
2967 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
2968 helper_movl_crN_T0(0, t0);
2971 void helper_clts(void)
2973 env->cr[0] &= ~CR0_TS_MASK;
2974 env->hflags &= ~HF_TS_MASK;
2977 #if !defined(CONFIG_USER_ONLY)
2978 target_ulong helper_movtl_T0_cr8(void)
2980 return cpu_get_apic_tpr(env);
2985 void helper_movl_drN_T0(int reg, target_ulong t0)
2990 void helper_invlpg(target_ulong addr)
2992 cpu_x86_flush_tlb(env, addr);
2995 void helper_rdtsc(void)
2999 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3000 raise_exception(EXCP0D_GPF);
3002 val = cpu_get_tsc(env);
3003 EAX = (uint32_t)(val);
3004 EDX = (uint32_t)(val >> 32);
3007 void helper_rdpmc(void)
3009 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3010 raise_exception(EXCP0D_GPF);
3013 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3015 /* currently unimplemented */
3016 raise_exception_err(EXCP06_ILLOP, 0);
3019 #if defined(CONFIG_USER_ONLY)
3020 void helper_wrmsr(void)
3024 void helper_rdmsr(void)
3028 void helper_wrmsr(void)
3032 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3034 switch((uint32_t)ECX) {
3035 case MSR_IA32_SYSENTER_CS:
3036 env->sysenter_cs = val & 0xffff;
3038 case MSR_IA32_SYSENTER_ESP:
3039 env->sysenter_esp = val;
3041 case MSR_IA32_SYSENTER_EIP:
3042 env->sysenter_eip = val;
3044 case MSR_IA32_APICBASE:
3045 cpu_set_apic_base(env, val);
3049 uint64_t update_mask;
3051 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3052 update_mask |= MSR_EFER_SCE;
3053 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3054 update_mask |= MSR_EFER_LME;
3055 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3056 update_mask |= MSR_EFER_FFXSR;
3057 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3058 update_mask |= MSR_EFER_NXE;
3059 env->efer = (env->efer & ~update_mask) |
3060 (val & update_mask);
3069 case MSR_VM_HSAVE_PA:
3070 env->vm_hsave = val;
3072 #ifdef TARGET_X86_64
3083 env->segs[R_FS].base = val;
3086 env->segs[R_GS].base = val;
3088 case MSR_KERNELGSBASE:
3089 env->kernelgsbase = val;
3093 /* XXX: exception ? */
3098 void helper_rdmsr(void)
3101 switch((uint32_t)ECX) {
3102 case MSR_IA32_SYSENTER_CS:
3103 val = env->sysenter_cs;
3105 case MSR_IA32_SYSENTER_ESP:
3106 val = env->sysenter_esp;
3108 case MSR_IA32_SYSENTER_EIP:
3109 val = env->sysenter_eip;
3111 case MSR_IA32_APICBASE:
3112 val = cpu_get_apic_base(env);
3123 case MSR_VM_HSAVE_PA:
3124 val = env->vm_hsave;
3126 #ifdef TARGET_X86_64
3137 val = env->segs[R_FS].base;
3140 val = env->segs[R_GS].base;
3142 case MSR_KERNELGSBASE:
3143 val = env->kernelgsbase;
3147 /* XXX: exception ? */
3151 EAX = (uint32_t)(val);
3152 EDX = (uint32_t)(val >> 32);
3156 uint32_t helper_lsl(uint32_t selector)
3159 uint32_t e1, e2, eflags;
3160 int rpl, dpl, cpl, type;
3163 eflags = cc_table[CC_OP].compute_all();
3164 if (load_segment(&e1, &e2, selector) != 0)
3167 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3168 cpl = env->hflags & HF_CPL_MASK;
3169 if (e2 & DESC_S_MASK) {
3170 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3173 if (dpl < cpl || dpl < rpl)
3177 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3188 if (dpl < cpl || dpl < rpl) {
3190 CC_SRC = eflags & ~CC_Z;
3194 limit = get_seg_limit(e1, e2);
3195 CC_SRC = eflags | CC_Z;
3199 uint32_t helper_lar(uint32_t selector)
3201 uint32_t e1, e2, eflags;
3202 int rpl, dpl, cpl, type;
3205 eflags = cc_table[CC_OP].compute_all();
3206 if ((selector & 0xfffc) == 0)
3208 if (load_segment(&e1, &e2, selector) != 0)
3211 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3212 cpl = env->hflags & HF_CPL_MASK;
3213 if (e2 & DESC_S_MASK) {
3214 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3217 if (dpl < cpl || dpl < rpl)
3221 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3235 if (dpl < cpl || dpl < rpl) {
3237 CC_SRC = eflags & ~CC_Z;
3241 CC_SRC = eflags | CC_Z;
3242 return e2 & 0x00f0ff00;
3245 void helper_verr(uint32_t selector)
3247 uint32_t e1, e2, eflags;
3251 eflags = cc_table[CC_OP].compute_all();
3252 if ((selector & 0xfffc) == 0)
3254 if (load_segment(&e1, &e2, selector) != 0)
3256 if (!(e2 & DESC_S_MASK))
3259 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3260 cpl = env->hflags & HF_CPL_MASK;
3261 if (e2 & DESC_CS_MASK) {
3262 if (!(e2 & DESC_R_MASK))
3264 if (!(e2 & DESC_C_MASK)) {
3265 if (dpl < cpl || dpl < rpl)
3269 if (dpl < cpl || dpl < rpl) {
3271 CC_SRC = eflags & ~CC_Z;
3275 CC_SRC = eflags | CC_Z;
3278 void helper_verw(uint32_t selector)
3280 uint32_t e1, e2, eflags;
3284 eflags = cc_table[CC_OP].compute_all();
3285 if ((selector & 0xfffc) == 0)
3287 if (load_segment(&e1, &e2, selector) != 0)
3289 if (!(e2 & DESC_S_MASK))
3292 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3293 cpl = env->hflags & HF_CPL_MASK;
3294 if (e2 & DESC_CS_MASK) {
3297 if (dpl < cpl || dpl < rpl)
3299 if (!(e2 & DESC_W_MASK)) {
3301 CC_SRC = eflags & ~CC_Z;
3305 CC_SRC = eflags | CC_Z;
3308 /* x87 FPU helpers */
3310 static void fpu_set_exception(int mask)
3313 if (env->fpus & (~env->fpuc & FPUC_EM))
3314 env->fpus |= FPUS_SE | FPUS_B;
3317 static inline CPU86_LDouble helper_fdiv(CPU86_LDouble a, CPU86_LDouble b)
3320 fpu_set_exception(FPUS_ZE);
3324 void fpu_raise_exception(void)
3326 if (env->cr[0] & CR0_NE_MASK) {
3327 raise_exception(EXCP10_COPR);
3329 #if !defined(CONFIG_USER_ONLY)
3336 void helper_flds_FT0(uint32_t val)
3343 FT0 = float32_to_floatx(u.f, &env->fp_status);
3346 void helper_fldl_FT0(uint64_t val)
3353 FT0 = float64_to_floatx(u.f, &env->fp_status);
3356 void helper_fildl_FT0(int32_t val)
3358 FT0 = int32_to_floatx(val, &env->fp_status);
3361 void helper_flds_ST0(uint32_t val)
3368 new_fpstt = (env->fpstt - 1) & 7;
3370 env->fpregs[new_fpstt].d = float32_to_floatx(u.f, &env->fp_status);
3371 env->fpstt = new_fpstt;
3372 env->fptags[new_fpstt] = 0; /* validate stack entry */
3375 void helper_fldl_ST0(uint64_t val)
3382 new_fpstt = (env->fpstt - 1) & 7;
3384 env->fpregs[new_fpstt].d = float64_to_floatx(u.f, &env->fp_status);
3385 env->fpstt = new_fpstt;
3386 env->fptags[new_fpstt] = 0; /* validate stack entry */
3389 void helper_fildl_ST0(int32_t val)
3392 new_fpstt = (env->fpstt - 1) & 7;
3393 env->fpregs[new_fpstt].d = int32_to_floatx(val, &env->fp_status);
3394 env->fpstt = new_fpstt;
3395 env->fptags[new_fpstt] = 0; /* validate stack entry */
3398 void helper_fildll_ST0(int64_t val)
3401 new_fpstt = (env->fpstt - 1) & 7;
3402 env->fpregs[new_fpstt].d = int64_to_floatx(val, &env->fp_status);
3403 env->fpstt = new_fpstt;
3404 env->fptags[new_fpstt] = 0; /* validate stack entry */
3407 uint32_t helper_fsts_ST0(void)
3413 u.f = floatx_to_float32(ST0, &env->fp_status);
3417 uint64_t helper_fstl_ST0(void)
3423 u.f = floatx_to_float64(ST0, &env->fp_status);
3427 int32_t helper_fist_ST0(void)
3430 val = floatx_to_int32(ST0, &env->fp_status);
3431 if (val != (int16_t)val)
3436 int32_t helper_fistl_ST0(void)
3439 val = floatx_to_int32(ST0, &env->fp_status);
3443 int64_t helper_fistll_ST0(void)
3446 val = floatx_to_int64(ST0, &env->fp_status);
3450 int32_t helper_fistt_ST0(void)
3453 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3454 if (val != (int16_t)val)
3459 int32_t helper_fisttl_ST0(void)
3462 val = floatx_to_int32_round_to_zero(ST0, &env->fp_status);
3466 int64_t helper_fisttll_ST0(void)
3469 val = floatx_to_int64_round_to_zero(ST0, &env->fp_status);
3473 void helper_fldt_ST0(target_ulong ptr)
3476 new_fpstt = (env->fpstt - 1) & 7;
3477 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3478 env->fpstt = new_fpstt;
3479 env->fptags[new_fpstt] = 0; /* validate stack entry */
3482 void helper_fstt_ST0(target_ulong ptr)
3484 helper_fstt(ST0, ptr);
3487 void helper_fpush(void)
3492 void helper_fpop(void)
3497 void helper_fdecstp(void)
3499 env->fpstt = (env->fpstt - 1) & 7;
3500 env->fpus &= (~0x4700);
3503 void helper_fincstp(void)
3505 env->fpstt = (env->fpstt + 1) & 7;
3506 env->fpus &= (~0x4700);
3511 void helper_ffree_STN(int st_index)
3513 env->fptags[(env->fpstt + st_index) & 7] = 1;
3516 void helper_fmov_ST0_FT0(void)
3521 void helper_fmov_FT0_STN(int st_index)
3526 void helper_fmov_ST0_STN(int st_index)
3531 void helper_fmov_STN_ST0(int st_index)
3536 void helper_fxchg_ST0_STN(int st_index)
3544 /* FPU operations */
3546 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3548 void helper_fcom_ST0_FT0(void)
3552 ret = floatx_compare(ST0, FT0, &env->fp_status);
3553 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3557 void helper_fucom_ST0_FT0(void)
3561 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3562 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3566 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3568 void helper_fcomi_ST0_FT0(void)
3573 ret = floatx_compare(ST0, FT0, &env->fp_status);
3574 eflags = cc_table[CC_OP].compute_all();
3575 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3580 void helper_fucomi_ST0_FT0(void)
3585 ret = floatx_compare_quiet(ST0, FT0, &env->fp_status);
3586 eflags = cc_table[CC_OP].compute_all();
3587 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3592 void helper_fadd_ST0_FT0(void)
3597 void helper_fmul_ST0_FT0(void)
3602 void helper_fsub_ST0_FT0(void)
3607 void helper_fsubr_ST0_FT0(void)
3612 void helper_fdiv_ST0_FT0(void)
3614 ST0 = helper_fdiv(ST0, FT0);
3617 void helper_fdivr_ST0_FT0(void)
3619 ST0 = helper_fdiv(FT0, ST0);
3622 /* fp operations between STN and ST0 */
3624 void helper_fadd_STN_ST0(int st_index)
3626 ST(st_index) += ST0;
3629 void helper_fmul_STN_ST0(int st_index)
3631 ST(st_index) *= ST0;
3634 void helper_fsub_STN_ST0(int st_index)
3636 ST(st_index) -= ST0;
3639 void helper_fsubr_STN_ST0(int st_index)
3646 void helper_fdiv_STN_ST0(int st_index)
3650 *p = helper_fdiv(*p, ST0);
3653 void helper_fdivr_STN_ST0(int st_index)
3657 *p = helper_fdiv(ST0, *p);
3660 /* misc FPU operations */
3661 void helper_fchs_ST0(void)
3663 ST0 = floatx_chs(ST0);
3666 void helper_fabs_ST0(void)
3668 ST0 = floatx_abs(ST0);
3671 void helper_fld1_ST0(void)
3676 void helper_fldl2t_ST0(void)
3681 void helper_fldl2e_ST0(void)
3686 void helper_fldpi_ST0(void)
3691 void helper_fldlg2_ST0(void)
3696 void helper_fldln2_ST0(void)
3701 void helper_fldz_ST0(void)
3706 void helper_fldz_FT0(void)
3711 uint32_t helper_fnstsw(void)
3713 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
3716 uint32_t helper_fnstcw(void)
3721 static void update_fp_status(void)
3725 /* set rounding mode */
3726 switch(env->fpuc & RC_MASK) {
3729 rnd_type = float_round_nearest_even;
3732 rnd_type = float_round_down;
3735 rnd_type = float_round_up;
3738 rnd_type = float_round_to_zero;
3741 set_float_rounding_mode(rnd_type, &env->fp_status);
3743 switch((env->fpuc >> 8) & 3) {
3755 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
3759 void helper_fldcw(uint32_t val)
3765 void helper_fclex(void)
3767 env->fpus &= 0x7f00;
3770 void helper_fwait(void)
3772 if (env->fpus & FPUS_SE)
3773 fpu_raise_exception();
3777 void helper_fninit(void)
3794 void helper_fbld_ST0(target_ulong ptr)
3802 for(i = 8; i >= 0; i--) {
3804 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
3807 if (ldub(ptr + 9) & 0x80)
3813 void helper_fbst_ST0(target_ulong ptr)
3816 target_ulong mem_ref, mem_end;
3819 val = floatx_to_int64(ST0, &env->fp_status);
3821 mem_end = mem_ref + 9;
3828 while (mem_ref < mem_end) {
3833 v = ((v / 10) << 4) | (v % 10);
3836 while (mem_ref < mem_end) {
3841 void helper_f2xm1(void)
3843 ST0 = pow(2.0,ST0) - 1.0;
3846 void helper_fyl2x(void)
3848 CPU86_LDouble fptemp;
3852 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
3856 env->fpus &= (~0x4700);
3861 void helper_fptan(void)
3863 CPU86_LDouble fptemp;
3866 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
3872 env->fpus &= (~0x400); /* C2 <-- 0 */
3873 /* the above code is for |arg| < 2**52 only */
3877 void helper_fpatan(void)
3879 CPU86_LDouble fptemp, fpsrcop;
3883 ST1 = atan2(fpsrcop,fptemp);
3887 void helper_fxtract(void)
3889 CPU86_LDoubleU temp;
3890 unsigned int expdif;
3893 expdif = EXPD(temp) - EXPBIAS;
3894 /*DP exponent bias*/
3901 void helper_fprem1(void)
3903 CPU86_LDouble dblq, fpsrcop, fptemp;
3904 CPU86_LDoubleU fpsrcop1, fptemp1;
3906 signed long long int q;
3908 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3909 ST0 = 0.0 / 0.0; /* NaN */
3910 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3916 fpsrcop1.d = fpsrcop;
3918 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3921 /* optimisation? taken from the AMD docs */
3922 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3923 /* ST0 is unchanged */
3928 dblq = fpsrcop / fptemp;
3929 /* round dblq towards nearest integer */
3931 ST0 = fpsrcop - fptemp * dblq;
3933 /* convert dblq to q by truncating towards zero */
3935 q = (signed long long int)(-dblq);
3937 q = (signed long long int)dblq;
3939 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3940 /* (C0,C3,C1) <-- (q2,q1,q0) */
3941 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3942 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3943 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3945 env->fpus |= 0x400; /* C2 <-- 1 */
3946 fptemp = pow(2.0, expdif - 50);
3947 fpsrcop = (ST0 / ST1) / fptemp;
3948 /* fpsrcop = integer obtained by chopping */
3949 fpsrcop = (fpsrcop < 0.0) ?
3950 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
3951 ST0 -= (ST1 * fpsrcop * fptemp);
3955 void helper_fprem(void)
3957 CPU86_LDouble dblq, fpsrcop, fptemp;
3958 CPU86_LDoubleU fpsrcop1, fptemp1;
3960 signed long long int q;
3962 if (isinf(ST0) || isnan(ST0) || isnan(ST1) || (ST1 == 0.0)) {
3963 ST0 = 0.0 / 0.0; /* NaN */
3964 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3968 fpsrcop = (CPU86_LDouble)ST0;
3969 fptemp = (CPU86_LDouble)ST1;
3970 fpsrcop1.d = fpsrcop;
3972 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
3975 /* optimisation? taken from the AMD docs */
3976 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3977 /* ST0 is unchanged */
3981 if ( expdif < 53 ) {
3982 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
3983 /* round dblq towards zero */
3984 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
3985 ST0 = fpsrcop/*ST0*/ - fptemp * dblq;
3987 /* convert dblq to q by truncating towards zero */
3989 q = (signed long long int)(-dblq);
3991 q = (signed long long int)dblq;
3993 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
3994 /* (C0,C3,C1) <-- (q2,q1,q0) */
3995 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
3996 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
3997 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
3999 int N = 32 + (expdif % 32); /* as per AMD docs */
4000 env->fpus |= 0x400; /* C2 <-- 1 */
4001 fptemp = pow(2.0, (double)(expdif - N));
4002 fpsrcop = (ST0 / ST1) / fptemp;
4003 /* fpsrcop = integer obtained by chopping */
4004 fpsrcop = (fpsrcop < 0.0) ?
4005 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4006 ST0 -= (ST1 * fpsrcop * fptemp);
4010 void helper_fyl2xp1(void)
4012 CPU86_LDouble fptemp;
4015 if ((fptemp+1.0)>0.0) {
4016 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4020 env->fpus &= (~0x4700);
4025 void helper_fsqrt(void)
4027 CPU86_LDouble fptemp;
4031 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4037 void helper_fsincos(void)
4039 CPU86_LDouble fptemp;
4042 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4048 env->fpus &= (~0x400); /* C2 <-- 0 */
4049 /* the above code is for |arg| < 2**63 only */
4053 void helper_frndint(void)
4055 ST0 = floatx_round_to_int(ST0, &env->fp_status);
4058 void helper_fscale(void)
4060 ST0 = ldexp (ST0, (int)(ST1));
4063 void helper_fsin(void)
4065 CPU86_LDouble fptemp;
4068 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4072 env->fpus &= (~0x400); /* C2 <-- 0 */
4073 /* the above code is for |arg| < 2**53 only */
4077 void helper_fcos(void)
4079 CPU86_LDouble fptemp;
4082 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4086 env->fpus &= (~0x400); /* C2 <-- 0 */
4087 /* the above code is for |arg5 < 2**63 only */
4091 void helper_fxam_ST0(void)
4093 CPU86_LDoubleU temp;
4098 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4100 env->fpus |= 0x200; /* C1 <-- 1 */
4102 /* XXX: test fptags too */
4103 expdif = EXPD(temp);
4104 if (expdif == MAXEXPD) {
4105 #ifdef USE_X86LDOUBLE
4106 if (MANTD(temp) == 0x8000000000000000ULL)
4108 if (MANTD(temp) == 0)
4110 env->fpus |= 0x500 /*Infinity*/;
4112 env->fpus |= 0x100 /*NaN*/;
4113 } else if (expdif == 0) {
4114 if (MANTD(temp) == 0)
4115 env->fpus |= 0x4000 /*Zero*/;
4117 env->fpus |= 0x4400 /*Denormal*/;
4123 void helper_fstenv(target_ulong ptr, int data32)
4125 int fpus, fptag, exp, i;
4129 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4131 for (i=7; i>=0; i--) {
4133 if (env->fptags[i]) {
4136 tmp.d = env->fpregs[i].d;
4139 if (exp == 0 && mant == 0) {
4142 } else if (exp == 0 || exp == MAXEXPD
4143 #ifdef USE_X86LDOUBLE
4144 || (mant & (1LL << 63)) == 0
4147 /* NaNs, infinity, denormal */
4154 stl(ptr, env->fpuc);
4156 stl(ptr + 8, fptag);
4157 stl(ptr + 12, 0); /* fpip */
4158 stl(ptr + 16, 0); /* fpcs */
4159 stl(ptr + 20, 0); /* fpoo */
4160 stl(ptr + 24, 0); /* fpos */
4163 stw(ptr, env->fpuc);
4165 stw(ptr + 4, fptag);
4173 void helper_fldenv(target_ulong ptr, int data32)
4178 env->fpuc = lduw(ptr);
4179 fpus = lduw(ptr + 4);
4180 fptag = lduw(ptr + 8);
4183 env->fpuc = lduw(ptr);
4184 fpus = lduw(ptr + 2);
4185 fptag = lduw(ptr + 4);
4187 env->fpstt = (fpus >> 11) & 7;
4188 env->fpus = fpus & ~0x3800;
4189 for(i = 0;i < 8; i++) {
4190 env->fptags[i] = ((fptag & 3) == 3);
4195 void helper_fsave(target_ulong ptr, int data32)
4200 helper_fstenv(ptr, data32);
4202 ptr += (14 << data32);
4203 for(i = 0;i < 8; i++) {
4205 helper_fstt(tmp, ptr);
4223 void helper_frstor(target_ulong ptr, int data32)
4228 helper_fldenv(ptr, data32);
4229 ptr += (14 << data32);
4231 for(i = 0;i < 8; i++) {
4232 tmp = helper_fldt(ptr);
4238 void helper_fxsave(target_ulong ptr, int data64)
4240 int fpus, fptag, i, nb_xmm_regs;
4244 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4246 for(i = 0; i < 8; i++) {
4247 fptag |= (env->fptags[i] << i);
4249 stw(ptr, env->fpuc);
4251 stw(ptr + 4, fptag ^ 0xff);
4254 for(i = 0;i < 8; i++) {
4256 helper_fstt(tmp, addr);
4260 if (env->cr[4] & CR4_OSFXSR_MASK) {
4261 /* XXX: finish it */
4262 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4263 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4264 nb_xmm_regs = 8 << data64;
4266 for(i = 0; i < nb_xmm_regs; i++) {
4267 stq(addr, env->xmm_regs[i].XMM_Q(0));
4268 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4274 void helper_fxrstor(target_ulong ptr, int data64)
4276 int i, fpus, fptag, nb_xmm_regs;
4280 env->fpuc = lduw(ptr);
4281 fpus = lduw(ptr + 2);
4282 fptag = lduw(ptr + 4);
4283 env->fpstt = (fpus >> 11) & 7;
4284 env->fpus = fpus & ~0x3800;
4286 for(i = 0;i < 8; i++) {
4287 env->fptags[i] = ((fptag >> i) & 1);
4291 for(i = 0;i < 8; i++) {
4292 tmp = helper_fldt(addr);
4297 if (env->cr[4] & CR4_OSFXSR_MASK) {
4298 /* XXX: finish it */
4299 env->mxcsr = ldl(ptr + 0x18);
4301 nb_xmm_regs = 8 << data64;
4303 for(i = 0; i < nb_xmm_regs; i++) {
4304 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4305 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4311 #ifndef USE_X86LDOUBLE
4313 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4315 CPU86_LDoubleU temp;
4320 *pmant = (MANTD(temp) << 11) | (1LL << 63);
4321 /* exponent + sign */
4322 e = EXPD(temp) - EXPBIAS + 16383;
4323 e |= SIGND(temp) >> 16;
4327 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4329 CPU86_LDoubleU temp;
4333 /* XXX: handle overflow ? */
4334 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
4335 e |= (upper >> 4) & 0x800; /* sign */
4336 ll = (mant >> 11) & ((1LL << 52) - 1);
4338 temp.l.upper = (e << 20) | (ll >> 32);
4341 temp.ll = ll | ((uint64_t)e << 52);
4348 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, CPU86_LDouble f)
4350 CPU86_LDoubleU temp;
4353 *pmant = temp.l.lower;
4354 *pexp = temp.l.upper;
4357 CPU86_LDouble cpu_set_fp80(uint64_t mant, uint16_t upper)
4359 CPU86_LDoubleU temp;
4361 temp.l.upper = upper;
4362 temp.l.lower = mant;
4367 #ifdef TARGET_X86_64
4369 //#define DEBUG_MULDIV
4371 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4380 static void neg128(uint64_t *plow, uint64_t *phigh)
4384 add128(plow, phigh, 1, 0);
4387 /* return TRUE if overflow */
4388 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4390 uint64_t q, r, a1, a0;
4403 /* XXX: use a better algorithm */
4404 for(i = 0; i < 64; i++) {
4406 a1 = (a1 << 1) | (a0 >> 63);
4407 if (ab || a1 >= b) {
4413 a0 = (a0 << 1) | qb;
4415 #if defined(DEBUG_MULDIV)
4416 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4417 *phigh, *plow, b, a0, a1);
4425 /* return TRUE if overflow */
4426 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4429 sa = ((int64_t)*phigh < 0);
4431 neg128(plow, phigh);
4435 if (div64(plow, phigh, b) != 0)
4438 if (*plow > (1ULL << 63))
4442 if (*plow >= (1ULL << 63))
4450 void helper_mulq_EAX_T0(target_ulong t0)
4454 mulu64(&r0, &r1, EAX, t0);
4461 void helper_imulq_EAX_T0(target_ulong t0)
4465 muls64(&r0, &r1, EAX, t0);
4469 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4472 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4476 muls64(&r0, &r1, t0, t1);
4478 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4482 void helper_divq_EAX(target_ulong t0)
4486 raise_exception(EXCP00_DIVZ);
4490 if (div64(&r0, &r1, t0))
4491 raise_exception(EXCP00_DIVZ);
4496 void helper_idivq_EAX(target_ulong t0)
4500 raise_exception(EXCP00_DIVZ);
4504 if (idiv64(&r0, &r1, t0))
4505 raise_exception(EXCP00_DIVZ);
4511 void helper_hlt(void)
4513 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4514 env->hflags |= HF_HALTED_MASK;
4515 env->exception_index = EXCP_HLT;
4519 void helper_monitor(target_ulong ptr)
4521 if ((uint32_t)ECX != 0)
4522 raise_exception(EXCP0D_GPF);
4523 /* XXX: store address ? */
4526 void helper_mwait(void)
4528 if ((uint32_t)ECX != 0)
4529 raise_exception(EXCP0D_GPF);
4530 /* XXX: not complete but not completely erroneous */
4531 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4532 /* more than one CPU: do not sleep because another CPU may
4539 void helper_debug(void)
4541 env->exception_index = EXCP_DEBUG;
4545 void helper_raise_interrupt(int intno, int next_eip_addend)
4547 raise_interrupt(intno, 1, 0, next_eip_addend);
4550 void helper_raise_exception(int exception_index)
4552 raise_exception(exception_index);
4555 void helper_cli(void)
4557 env->eflags &= ~IF_MASK;
4560 void helper_sti(void)
4562 env->eflags |= IF_MASK;
4566 /* vm86plus instructions */
4567 void helper_cli_vm(void)
4569 env->eflags &= ~VIF_MASK;
4572 void helper_sti_vm(void)
4574 env->eflags |= VIF_MASK;
4575 if (env->eflags & VIP_MASK) {
4576 raise_exception(EXCP0D_GPF);
4581 void helper_set_inhibit_irq(void)
4583 env->hflags |= HF_INHIBIT_IRQ_MASK;
4586 void helper_reset_inhibit_irq(void)
4588 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4591 void helper_boundw(target_ulong a0, int v)
4595 high = ldsw(a0 + 2);
4597 if (v < low || v > high) {
4598 raise_exception(EXCP05_BOUND);
4603 void helper_boundl(target_ulong a0, int v)
4608 if (v < low || v > high) {
4609 raise_exception(EXCP05_BOUND);
4614 static float approx_rsqrt(float a)
4616 return 1.0 / sqrt(a);
4619 static float approx_rcp(float a)
4624 #if !defined(CONFIG_USER_ONLY)
4626 #define MMUSUFFIX _mmu
4628 # define GETPC() ((void*)((unsigned long)__builtin_return_address(0) & 0x7fffffffUL))
4630 # define GETPC() (__builtin_return_address(0))
4634 #include "softmmu_template.h"
4637 #include "softmmu_template.h"
4640 #include "softmmu_template.h"
4643 #include "softmmu_template.h"
4647 /* try to fill the TLB and return an exception if error. If retaddr is
4648 NULL, it means that the function was called in C code (i.e. not
4649 from generated code or from helper.c) */
4650 /* XXX: fix it to restore all registers */
4651 void tlb_fill(target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4653 TranslationBlock *tb;
4656 CPUX86State *saved_env;
4658 /* XXX: hack to restore env in all cases, even if not called from
4661 env = cpu_single_env;
4663 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4666 /* now we have a real cpu fault */
4667 pc = (unsigned long)retaddr;
4668 tb = tb_find_pc(pc);
4670 /* the PC is inside the translated code. It means that we have
4671 a virtual CPU fault */
4672 cpu_restore_state(tb, env, pc, NULL);
4676 raise_exception_err(env->exception_index, env->error_code);
4678 raise_exception_err_norestore(env->exception_index, env->error_code);
4684 /* Secure Virtual Machine helpers */
4686 void helper_stgi(void)
4688 env->hflags |= HF_GIF_MASK;
4691 void helper_clgi(void)
4693 env->hflags &= ~HF_GIF_MASK;
4696 #if defined(CONFIG_USER_ONLY)
4698 void helper_vmrun(void)
4701 void helper_vmmcall(void)
4704 void helper_vmload(void)
4707 void helper_vmsave(void)
4710 void helper_skinit(void)
4713 void helper_invlpga(void)
4716 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
4719 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4723 void helper_svm_check_io(uint32_t port, uint32_t param,
4724 uint32_t next_eip_addend)
4729 static inline uint32_t
4730 vmcb2cpu_attrib(uint16_t vmcb_attrib, uint32_t vmcb_base, uint32_t vmcb_limit)
4732 return ((vmcb_attrib & 0x00ff) << 8) /* Type, S, DPL, P */
4733 | ((vmcb_attrib & 0x0f00) << 12) /* AVL, L, DB, G */
4734 | ((vmcb_base >> 16) & 0xff) /* Base 23-16 */
4735 | (vmcb_base & 0xff000000) /* Base 31-24 */
4736 | (vmcb_limit & 0xf0000); /* Limit 19-16 */
4739 static inline uint16_t cpu2vmcb_attrib(uint32_t cpu_attrib)
4741 return ((cpu_attrib >> 8) & 0xff) /* Type, S, DPL, P */
4742 | ((cpu_attrib & 0xf00000) >> 12); /* AVL, L, DB, G */
4745 void helper_vmrun(void)
4752 if (loglevel & CPU_LOG_TB_IN_ASM)
4753 fprintf(logfile,"vmrun! " TARGET_FMT_lx "\n", addr);
4755 env->vm_vmcb = addr;
4757 /* save the current CPU state in the hsave page */
4758 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
4759 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
4761 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
4762 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
4764 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
4765 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
4766 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
4767 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
4768 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8), env->cr[8]);
4769 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
4770 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
4772 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
4773 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
4775 SVM_SAVE_SEG(env->vm_hsave, segs[R_ES], es);
4776 SVM_SAVE_SEG(env->vm_hsave, segs[R_CS], cs);
4777 SVM_SAVE_SEG(env->vm_hsave, segs[R_SS], ss);
4778 SVM_SAVE_SEG(env->vm_hsave, segs[R_DS], ds);
4780 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip), EIP);
4781 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
4782 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
4784 /* load the interception bitmaps so we do not need to access the
4786 /* We shift all the intercept bits so we can OR them with the TB
4788 env->intercept = (ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept)) << INTERCEPT_INTR) | INTERCEPT_SVM_MASK;
4789 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
4790 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
4791 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
4792 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
4793 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
4795 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
4796 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
4798 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
4799 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
4801 /* clear exit_info_2 so we behave like the real hardware */
4802 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
4804 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
4805 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
4806 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
4807 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
4808 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
4809 if (int_ctl & V_INTR_MASKING_MASK) {
4810 env->cr[8] = int_ctl & V_TPR_MASK;
4811 cpu_set_apic_tpr(env, env->cr[8]);
4812 if (env->eflags & IF_MASK)
4813 env->hflags |= HF_HIF_MASK;
4816 #ifdef TARGET_X86_64
4817 env->efer = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer));
4818 env->hflags &= ~HF_LMA_MASK;
4819 if (env->efer & MSR_EFER_LMA)
4820 env->hflags |= HF_LMA_MASK;
4823 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
4824 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
4825 CC_OP = CC_OP_EFLAGS;
4826 CC_DST = 0xffffffff;
4828 SVM_LOAD_SEG(env->vm_vmcb, ES, es);
4829 SVM_LOAD_SEG(env->vm_vmcb, CS, cs);
4830 SVM_LOAD_SEG(env->vm_vmcb, SS, ss);
4831 SVM_LOAD_SEG(env->vm_vmcb, DS, ds);
4833 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
4835 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
4836 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
4837 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
4838 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
4839 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
4841 /* FIXME: guest state consistency checks */
4843 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
4844 case TLB_CONTROL_DO_NOTHING:
4846 case TLB_CONTROL_FLUSH_ALL_ASID:
4847 /* FIXME: this is not 100% correct but should work for now */
4854 /* maybe we need to inject an event */
4855 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
4856 if (event_inj & SVM_EVTINJ_VALID) {
4857 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
4858 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
4859 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
4860 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
4862 if (loglevel & CPU_LOG_TB_IN_ASM)
4863 fprintf(logfile, "Injecting(%#hx): ", valid_err);
4864 /* FIXME: need to implement valid_err */
4865 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
4866 case SVM_EVTINJ_TYPE_INTR:
4867 env->exception_index = vector;
4868 env->error_code = event_inj_err;
4869 env->exception_is_int = 0;
4870 env->exception_next_eip = -1;
4871 if (loglevel & CPU_LOG_TB_IN_ASM)
4872 fprintf(logfile, "INTR");
4874 case SVM_EVTINJ_TYPE_NMI:
4875 env->exception_index = vector;
4876 env->error_code = event_inj_err;
4877 env->exception_is_int = 0;
4878 env->exception_next_eip = EIP;
4879 if (loglevel & CPU_LOG_TB_IN_ASM)
4880 fprintf(logfile, "NMI");
4882 case SVM_EVTINJ_TYPE_EXEPT:
4883 env->exception_index = vector;
4884 env->error_code = event_inj_err;
4885 env->exception_is_int = 0;
4886 env->exception_next_eip = -1;
4887 if (loglevel & CPU_LOG_TB_IN_ASM)
4888 fprintf(logfile, "EXEPT");
4890 case SVM_EVTINJ_TYPE_SOFT:
4891 env->exception_index = vector;
4892 env->error_code = event_inj_err;
4893 env->exception_is_int = 1;
4894 env->exception_next_eip = EIP;
4895 if (loglevel & CPU_LOG_TB_IN_ASM)
4896 fprintf(logfile, "SOFT");
4899 if (loglevel & CPU_LOG_TB_IN_ASM)
4900 fprintf(logfile, " %#x %#x\n", env->exception_index, env->error_code);
4902 if ((int_ctl & V_IRQ_MASK) || (env->intercept & INTERCEPT_VINTR)) {
4903 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
4909 void helper_vmmcall(void)
4911 if (loglevel & CPU_LOG_TB_IN_ASM)
4912 fprintf(logfile,"vmmcall!\n");
4915 void helper_vmload(void)
4919 if (loglevel & CPU_LOG_TB_IN_ASM)
4920 fprintf(logfile,"vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4921 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4922 env->segs[R_FS].base);
4924 SVM_LOAD_SEG2(addr, segs[R_FS], fs);
4925 SVM_LOAD_SEG2(addr, segs[R_GS], gs);
4926 SVM_LOAD_SEG2(addr, tr, tr);
4927 SVM_LOAD_SEG2(addr, ldt, ldtr);
4929 #ifdef TARGET_X86_64
4930 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
4931 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
4932 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
4933 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
4935 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
4936 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
4937 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
4938 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
4941 void helper_vmsave(void)
4945 if (loglevel & CPU_LOG_TB_IN_ASM)
4946 fprintf(logfile,"vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
4947 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
4948 env->segs[R_FS].base);
4950 SVM_SAVE_SEG(addr, segs[R_FS], fs);
4951 SVM_SAVE_SEG(addr, segs[R_GS], gs);
4952 SVM_SAVE_SEG(addr, tr, tr);
4953 SVM_SAVE_SEG(addr, ldt, ldtr);
4955 #ifdef TARGET_X86_64
4956 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
4957 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
4958 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
4959 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
4961 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
4962 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
4963 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
4964 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
4967 void helper_skinit(void)
4969 if (loglevel & CPU_LOG_TB_IN_ASM)
4970 fprintf(logfile,"skinit!\n");
4973 void helper_invlpga(void)
4978 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
4981 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
4982 if (INTERCEPTEDw(_cr_read, (1 << (type - SVM_EXIT_READ_CR0)))) {
4983 helper_vmexit(type, param);
4986 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 8:
4987 if (INTERCEPTEDw(_dr_read, (1 << (type - SVM_EXIT_READ_DR0)))) {
4988 helper_vmexit(type, param);
4991 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
4992 if (INTERCEPTEDw(_cr_write, (1 << (type - SVM_EXIT_WRITE_CR0)))) {
4993 helper_vmexit(type, param);
4996 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 8:
4997 if (INTERCEPTEDw(_dr_write, (1 << (type - SVM_EXIT_WRITE_DR0)))) {
4998 helper_vmexit(type, param);
5001 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 16:
5002 if (INTERCEPTEDl(_exceptions, (1 << (type - SVM_EXIT_EXCP_BASE)))) {
5003 helper_vmexit(type, param);
5010 if (INTERCEPTED(1ULL << INTERCEPT_MSR_PROT)) {
5011 /* FIXME: this should be read in at vmrun (faster this way?) */
5012 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5014 switch((uint32_t)ECX) {
5019 case 0xc0000000 ... 0xc0001fff:
5020 t0 = (8192 + ECX - 0xc0000000) * 2;
5024 case 0xc0010000 ... 0xc0011fff:
5025 t0 = (16384 + ECX - 0xc0010000) * 2;
5030 helper_vmexit(type, param);
5035 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5036 helper_vmexit(type, param);
5040 if (INTERCEPTED((1ULL << ((type - SVM_EXIT_INTR) + INTERCEPT_INTR)))) {
5041 helper_vmexit(type, param);
5047 void helper_svm_check_io(uint32_t port, uint32_t param,
5048 uint32_t next_eip_addend)
5050 if (INTERCEPTED(1ULL << INTERCEPT_IOIO_PROT)) {
5051 /* FIXME: this should be read in at vmrun (faster this way?) */
5052 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5053 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5054 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5056 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5057 env->eip + next_eip_addend);
5058 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5063 /* Note: currently only 32 bits of exit_code are used */
5064 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5068 if (loglevel & CPU_LOG_TB_IN_ASM)
5069 fprintf(logfile,"vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5070 exit_code, exit_info_1,
5071 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5074 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5075 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5076 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5078 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5081 /* Save the VM state in the vmcb */
5082 SVM_SAVE_SEG(env->vm_vmcb, segs[R_ES], es);
5083 SVM_SAVE_SEG(env->vm_vmcb, segs[R_CS], cs);
5084 SVM_SAVE_SEG(env->vm_vmcb, segs[R_SS], ss);
5085 SVM_SAVE_SEG(env->vm_vmcb, segs[R_DS], ds);
5087 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5088 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5090 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5091 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5093 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5094 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5095 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5096 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5097 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5099 if ((int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl))) & V_INTR_MASKING_MASK) {
5100 int_ctl &= ~V_TPR_MASK;
5101 int_ctl |= env->cr[8] & V_TPR_MASK;
5102 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5105 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5106 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5107 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5108 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5109 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5110 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5111 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5113 /* Reload the host state from vm_hsave */
5114 env->hflags &= ~HF_HIF_MASK;
5116 env->intercept_exceptions = 0;
5117 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5119 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5120 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5122 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5123 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5125 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5126 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5127 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5128 if (int_ctl & V_INTR_MASKING_MASK) {
5129 env->cr[8] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr8));
5130 cpu_set_apic_tpr(env, env->cr[8]);
5132 /* we need to set the efer after the crs so the hidden flags get set properly */
5133 #ifdef TARGET_X86_64
5134 env->efer = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer));
5135 env->hflags &= ~HF_LMA_MASK;
5136 if (env->efer & MSR_EFER_LMA)
5137 env->hflags |= HF_LMA_MASK;
5141 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5142 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5143 CC_OP = CC_OP_EFLAGS;
5145 SVM_LOAD_SEG(env->vm_hsave, ES, es);
5146 SVM_LOAD_SEG(env->vm_hsave, CS, cs);
5147 SVM_LOAD_SEG(env->vm_hsave, SS, ss);
5148 SVM_LOAD_SEG(env->vm_hsave, DS, ds);
5150 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5151 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5152 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5154 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5155 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5158 cpu_x86_set_cpl(env, 0);
5159 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5160 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5163 /* FIXME: Resets the current ASID register to zero (host ASID). */
5165 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5167 /* Clears the TSC_OFFSET inside the processor. */
5169 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5170 from the page table indicated the host's CR3. If the PDPEs contain
5171 illegal state, the processor causes a shutdown. */
5173 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5174 env->cr[0] |= CR0_PE_MASK;
5175 env->eflags &= ~VM_MASK;
5177 /* Disables all breakpoints in the host DR7 register. */
5179 /* Checks the reloaded host state for consistency. */
5181 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5182 host's code segment or non-canonical (in the case of long mode), a
5183 #GP fault is delivered inside the host.) */
5185 /* remove any pending exception */
5186 env->exception_index = -1;
5187 env->error_code = 0;
5188 env->old_exception = -1;
5196 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5197 void helper_enter_mmx(void)
5200 *(uint32_t *)(env->fptags) = 0;
5201 *(uint32_t *)(env->fptags + 4) = 0;
5204 void helper_emms(void)
5206 /* set to empty state */
5207 *(uint32_t *)(env->fptags) = 0x01010101;
5208 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5212 void helper_movq(uint64_t *d, uint64_t *s)
5218 #include "ops_sse.h"
5221 #include "ops_sse.h"
5224 #include "helper_template.h"
5228 #include "helper_template.h"
5232 #include "helper_template.h"
5235 #ifdef TARGET_X86_64
5238 #include "helper_template.h"