2 * i386 micro operations
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 #include "exec-i386.h"
22 /* NOTE: data are not static to force relocation generation by GCC */
24 uint8_t parity_table[256] = {
25 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
26 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
27 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
28 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
29 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
30 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
31 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
32 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
33 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
34 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
35 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
36 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
37 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
38 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
39 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
40 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
41 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
42 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
43 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
44 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
45 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
46 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
47 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
48 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
49 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
50 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
51 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
52 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
53 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
54 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
55 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
56 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
60 const uint8_t rclw_table[32] = {
61 0, 1, 2, 3, 4, 5, 6, 7,
62 8, 9,10,11,12,13,14,15,
63 16, 0, 1, 2, 3, 4, 5, 6,
64 7, 8, 9,10,11,12,13,14,
68 const uint8_t rclb_table[32] = {
69 0, 1, 2, 3, 4, 5, 6, 7,
70 8, 0, 1, 2, 3, 4, 5, 6,
71 7, 8, 0, 1, 2, 3, 4, 5,
72 6, 7, 8, 0, 1, 2, 3, 4,
76 /* an array of Intel 80-bit FP constants, to be loaded via integer ops */
77 typedef unsigned short f15ld[5];
80 /*0*/ {0x0000,0x0000,0x0000,0x0000,0x0000},
81 /*1*/ {0x0000,0x0000,0x0000,0x8000,0x3fff},
82 /*pi*/ {0xc235,0x2168,0xdaa2,0xc90f,0x4000},
83 /*lg2*/ {0xf799,0xfbcf,0x9a84,0x9a20,0x3ffd},
84 /*ln2*/ {0x79ac,0xd1cf,0x17f7,0xb172,0x3ffe},
85 /*l2e*/ {0xf0bc,0x5c17,0x3b29,0xb8aa,0x3fff},
86 /*l2t*/ {0x8afe,0xcd1b,0x784b,0xd49a,0x4000}
89 /* the same, 64-bit version */
90 typedef unsigned short f15ld[4];
93 #ifndef WORDS_BIGENDIAN
94 /*0*/ {0x0000,0x0000,0x0000,0x0000},
95 /*1*/ {0x0000,0x0000,0x0000,0x3ff0},
96 /*pi*/ {0x2d18,0x5444,0x21fb,0x4009},
97 /*lg2*/ {0x79ff,0x509f,0x4413,0x3fd3},
98 /*ln2*/ {0x39ef,0xfefa,0x2e42,0x3fe6},
99 /*l2e*/ {0x82fe,0x652b,0x1547,0x3ff7},
100 /*l2t*/ {0xa371,0x0979,0x934f,0x400a}
102 /*0*/ {0x0000,0x0000,0x0000,0x0000},
103 /*1*/ {0x3ff0,0x0000,0x0000,0x0000},
104 /*pi*/ {0x4009,0x21fb,0x5444,0x2d18},
105 /*lg2*/ {0x3fd3,0x4413,0x509f,0x79ff},
106 /*ln2*/ {0x3fe6,0x2e42,0xfefa,0x39ef},
107 /*l2e*/ {0x3ff7,0x1547,0x652b,0x82fe},
108 /*l2t*/ {0x400a,0x934f,0x0979,0xa371}
113 /* n must be a constant to be efficient */
114 static inline int lshift(int x, int n)
122 /* we define the various pieces of code used by the JIT */
126 #include "opreg_template.h"
132 #include "opreg_template.h"
138 #include "opreg_template.h"
144 #include "opreg_template.h"
150 #include "opreg_template.h"
156 #include "opreg_template.h"
162 #include "opreg_template.h"
168 #include "opreg_template.h"
172 /* operations with flags */
174 void OPPROTO op_addl_T0_T1_cc(void)
181 void OPPROTO op_orl_T0_T1_cc(void)
187 void OPPROTO op_andl_T0_T1_cc(void)
193 void OPPROTO op_subl_T0_T1_cc(void)
200 void OPPROTO op_xorl_T0_T1_cc(void)
206 void OPPROTO op_cmpl_T0_T1_cc(void)
212 void OPPROTO op_negl_T0_cc(void)
219 void OPPROTO op_incl_T0_cc(void)
221 CC_SRC = cc_table[CC_OP].compute_c();
226 void OPPROTO op_decl_T0_cc(void)
228 CC_SRC = cc_table[CC_OP].compute_c();
233 void OPPROTO op_testl_T0_T1_cc(void)
238 /* operations without flags */
240 void OPPROTO op_addl_T0_T1(void)
245 void OPPROTO op_orl_T0_T1(void)
250 void OPPROTO op_andl_T0_T1(void)
255 void OPPROTO op_subl_T0_T1(void)
260 void OPPROTO op_xorl_T0_T1(void)
265 void OPPROTO op_negl_T0(void)
270 void OPPROTO op_incl_T0(void)
275 void OPPROTO op_decl_T0(void)
280 void OPPROTO op_notl_T0(void)
285 void OPPROTO op_bswapl_T0(void)
290 /* multiply/divide */
291 void OPPROTO op_mulb_AL_T0(void)
294 res = (uint8_t)EAX * (uint8_t)T0;
295 EAX = (EAX & 0xffff0000) | res;
296 CC_SRC = (res & 0xff00);
299 void OPPROTO op_imulb_AL_T0(void)
302 res = (int8_t)EAX * (int8_t)T0;
303 EAX = (EAX & 0xffff0000) | (res & 0xffff);
304 CC_SRC = (res != (int8_t)res);
307 void OPPROTO op_mulw_AX_T0(void)
310 res = (uint16_t)EAX * (uint16_t)T0;
311 EAX = (EAX & 0xffff0000) | (res & 0xffff);
312 EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff);
316 void OPPROTO op_imulw_AX_T0(void)
319 res = (int16_t)EAX * (int16_t)T0;
320 EAX = (EAX & 0xffff0000) | (res & 0xffff);
321 EDX = (EDX & 0xffff0000) | ((res >> 16) & 0xffff);
322 CC_SRC = (res != (int16_t)res);
325 void OPPROTO op_mull_EAX_T0(void)
328 res = (uint64_t)((uint32_t)EAX) * (uint64_t)((uint32_t)T0);
334 void OPPROTO op_imull_EAX_T0(void)
337 res = (int64_t)((int32_t)EAX) * (int64_t)((int32_t)T0);
340 CC_SRC = (res != (int32_t)res);
343 void OPPROTO op_imulw_T0_T1(void)
346 res = (int16_t)T0 * (int16_t)T1;
348 CC_SRC = (res != (int16_t)res);
351 void OPPROTO op_imull_T0_T1(void)
354 res = (int64_t)((int32_t)T0) * (int64_t)((int32_t)T1);
356 CC_SRC = (res != (int32_t)res);
359 /* division, flags are undefined */
360 /* XXX: add exceptions for overflow */
361 void OPPROTO op_divb_AL_T0(void)
363 unsigned int num, den, q, r;
365 num = (EAX & 0xffff);
369 raise_exception(EXCP00_DIVZ);
371 q = (num / den) & 0xff;
372 r = (num % den) & 0xff;
373 EAX = (EAX & 0xffff0000) | (r << 8) | q;
376 void OPPROTO op_idivb_AL_T0(void)
384 raise_exception(EXCP00_DIVZ);
386 q = (num / den) & 0xff;
387 r = (num % den) & 0xff;
388 EAX = (EAX & 0xffff0000) | (r << 8) | q;
391 void OPPROTO op_divw_AX_T0(void)
393 unsigned int num, den, q, r;
395 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
399 raise_exception(EXCP00_DIVZ);
401 q = (num / den) & 0xffff;
402 r = (num % den) & 0xffff;
403 EAX = (EAX & 0xffff0000) | q;
404 EDX = (EDX & 0xffff0000) | r;
407 void OPPROTO op_idivw_AX_T0(void)
411 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
415 raise_exception(EXCP00_DIVZ);
417 q = (num / den) & 0xffff;
418 r = (num % den) & 0xffff;
419 EAX = (EAX & 0xffff0000) | q;
420 EDX = (EDX & 0xffff0000) | r;
423 #ifdef BUGGY_GCC_DIV64
424 /* gcc 2.95.4 on PowerPC does not seem to like using __udivdi3, so we
425 call it from another function */
426 uint32_t div64(uint32_t *q_ptr, uint64_t num, uint32_t den)
432 int32_t idiv64(int32_t *q_ptr, int64_t num, int32_t den)
439 void OPPROTO op_divl_EAX_T0(void)
441 unsigned int den, q, r;
444 num = EAX | ((uint64_t)EDX << 32);
448 raise_exception(EXCP00_DIVZ);
450 #ifdef BUGGY_GCC_DIV64
451 r = div64(&q, num, den);
460 void OPPROTO op_idivl_EAX_T0(void)
465 num = EAX | ((uint64_t)EDX << 32);
469 raise_exception(EXCP00_DIVZ);
471 #ifdef BUGGY_GCC_DIV64
472 r = idiv64(&q, num, den);
481 /* constant load & misc op */
483 void OPPROTO op_movl_T0_im(void)
488 void OPPROTO op_addl_T0_im(void)
493 void OPPROTO op_andl_T0_ffff(void)
498 void OPPROTO op_movl_T0_T1(void)
503 void OPPROTO op_movl_T1_im(void)
508 void OPPROTO op_addl_T1_im(void)
513 void OPPROTO op_movl_T1_A0(void)
518 void OPPROTO op_movl_A0_im(void)
523 void OPPROTO op_addl_A0_im(void)
528 void OPPROTO op_addl_A0_AL(void)
533 void OPPROTO op_andl_A0_ffff(void)
540 void OPPROTO op_ldub_T0_A0(void)
542 T0 = ldub((uint8_t *)A0);
545 void OPPROTO op_ldsb_T0_A0(void)
547 T0 = ldsb((int8_t *)A0);
550 void OPPROTO op_lduw_T0_A0(void)
552 T0 = lduw((uint8_t *)A0);
555 void OPPROTO op_ldsw_T0_A0(void)
557 T0 = ldsw((int8_t *)A0);
560 void OPPROTO op_ldl_T0_A0(void)
562 T0 = ldl((uint8_t *)A0);
565 void OPPROTO op_ldub_T1_A0(void)
567 T1 = ldub((uint8_t *)A0);
570 void OPPROTO op_ldsb_T1_A0(void)
572 T1 = ldsb((int8_t *)A0);
575 void OPPROTO op_lduw_T1_A0(void)
577 T1 = lduw((uint8_t *)A0);
580 void OPPROTO op_ldsw_T1_A0(void)
582 T1 = ldsw((int8_t *)A0);
585 void OPPROTO op_ldl_T1_A0(void)
587 T1 = ldl((uint8_t *)A0);
590 void OPPROTO op_stb_T0_A0(void)
592 stb((uint8_t *)A0, T0);
595 void OPPROTO op_stw_T0_A0(void)
597 stw((uint8_t *)A0, T0);
600 void OPPROTO op_stl_T0_A0(void)
602 stl((uint8_t *)A0, T0);
605 /* used for bit operations */
607 void OPPROTO op_add_bitw_A0_T1(void)
609 A0 += ((int32_t)T1 >> 4) << 1;
612 void OPPROTO op_add_bitl_A0_T1(void)
614 A0 += ((int32_t)T1 >> 5) << 2;
619 void OPPROTO op_jmp_T0(void)
624 void OPPROTO op_jmp_im(void)
630 /* full interrupt support (only useful for real CPU emulation, not
631 finished) - I won't do it any time soon, finish it if you want ! */
632 void raise_interrupt(int intno, int is_int, int error_code,
633 unsigned int next_eip)
635 SegmentDescriptorTable *dt;
641 if (intno * 8 + 7 > dt->limit)
642 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
643 ptr = dt->base + intno * 8;
646 /* check gate type */
647 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
649 case 5: /* task gate */
650 case 6: /* 286 interrupt gate */
651 case 7: /* 286 trap gate */
652 case 14: /* 386 interrupt gate */
653 case 15: /* 386 trap gate */
656 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
659 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
660 cpl = env->segs[R_CS] & 3;
661 /* check privledge if software int */
662 if (is_int && dpl < cpl)
663 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
664 /* check valid bit */
665 if (!(e2 & DESC_P_MASK))
666 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
672 * is_int is TRUE if coming from the int instruction. next_eip is the
673 * EIP value AFTER the interrupt instruction. It is only relevant if
676 void raise_interrupt(int intno, int is_int, int error_code,
677 unsigned int next_eip)
679 SegmentDescriptorTable *dt;
685 ptr = dt->base + (intno * 8);
688 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690 /* check privledge if software int */
691 if (is_int && dpl < cpl)
692 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
694 /* Since we emulate only user space, we cannot do more than
695 exiting the emulation with the suitable exception and error
699 env->exception_index = intno;
700 env->error_code = error_code;
707 /* shortcuts to generate exceptions */
708 void raise_exception_err(int exception_index, int error_code)
710 raise_interrupt(exception_index, 0, error_code, 0);
713 void raise_exception(int exception_index)
715 raise_interrupt(exception_index, 0, 0, 0);
718 void OPPROTO op_raise_interrupt(void)
721 unsigned int next_eip;
724 raise_interrupt(intno, 1, 0, next_eip);
727 void OPPROTO op_raise_exception(void)
730 exception_index = PARAM1;
731 raise_exception(exception_index);
734 void OPPROTO op_into(void)
737 eflags = cc_table[CC_OP].compute_all();
739 raise_interrupt(EXCP04_INTO, 1, 0, PARAM1);
744 void OPPROTO op_cli(void)
746 env->eflags &= ~IF_MASK;
749 void OPPROTO op_sti(void)
751 env->eflags |= IF_MASK;
755 /* vm86plus instructions */
756 void OPPROTO op_cli_vm(void)
758 env->eflags &= ~VIF_MASK;
761 void OPPROTO op_sti_vm(void)
763 env->eflags |= VIF_MASK;
764 if (env->eflags & VIP_MASK) {
766 raise_exception(EXCP0D_GPF);
772 void OPPROTO op_boundw(void)
775 low = ldsw((uint8_t *)A0);
776 high = ldsw((uint8_t *)A0 + 2);
778 if (v < low || v > high) {
780 raise_exception(EXCP05_BOUND);
785 void OPPROTO op_boundl(void)
788 low = ldl((uint8_t *)A0);
789 high = ldl((uint8_t *)A0 + 4);
791 if (v < low || v > high) {
793 raise_exception(EXCP05_BOUND);
798 void OPPROTO op_cmpxchg8b(void)
803 eflags = cc_table[CC_OP].compute_all();
804 d = ldq((uint8_t *)A0);
805 if (d == (((uint64_t)EDX << 32) | EAX)) {
806 stq((uint8_t *)A0, ((uint64_t)ECX << 32) | EBX);
817 #if defined(__powerpc__)
819 /* on PowerPC we patch the jump instruction directly */
820 #define JUMP_TB(tbparam, n, eip)\
822 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
823 asm volatile ("b %0" : : "i" (&__op_jmp ## n));\
825 T0 = (long)(tbparam) + (n);\
831 /* jump to next block operations (more portable code, does not need
832 cache flushing, but slower because of indirect jump) */
833 #define JUMP_TB(tbparam, n, eip)\
835 static void __attribute__((unused)) *__op_label ## n = &&label ## n;\
836 goto *((TranslationBlock *)tbparam)->tb_next[n];\
838 T0 = (long)(tbparam) + (n);\
844 void OPPROTO op_jmp_tb_next(void)
846 JUMP_TB(PARAM1, 0, PARAM2);
849 void OPPROTO op_movl_T0_0(void)
854 /* multiple size ops */
859 #include "ops_template.h"
863 #include "ops_template.h"
867 #include "ops_template.h"
872 void OPPROTO op_movsbl_T0_T0(void)
877 void OPPROTO op_movzbl_T0_T0(void)
882 void OPPROTO op_movswl_T0_T0(void)
887 void OPPROTO op_movzwl_T0_T0(void)
892 void OPPROTO op_movswl_EAX_AX(void)
897 void OPPROTO op_movsbw_AX_AL(void)
899 EAX = (EAX & 0xffff0000) | ((int8_t)EAX & 0xffff);
902 void OPPROTO op_movslq_EDX_EAX(void)
904 EDX = (int32_t)EAX >> 31;
907 void OPPROTO op_movswl_DX_AX(void)
909 EDX = (EDX & 0xffff0000) | (((int16_t)EAX >> 15) & 0xffff);
914 void op_pushl_T0(void)
918 stl((void *)offset, T0);
919 /* modify ESP after to handle exceptions correctly */
923 void op_pushw_T0(void)
927 stw((void *)offset, T0);
928 /* modify ESP after to handle exceptions correctly */
932 void op_pushl_ss32_T0(void)
936 stl(env->seg_cache[R_SS].base + offset, T0);
937 /* modify ESP after to handle exceptions correctly */
941 void op_pushw_ss32_T0(void)
945 stw(env->seg_cache[R_SS].base + offset, T0);
946 /* modify ESP after to handle exceptions correctly */
950 void op_pushl_ss16_T0(void)
953 offset = (ESP - 4) & 0xffff;
954 stl(env->seg_cache[R_SS].base + offset, T0);
955 /* modify ESP after to handle exceptions correctly */
956 ESP = (ESP & ~0xffff) | offset;
959 void op_pushw_ss16_T0(void)
962 offset = (ESP - 2) & 0xffff;
963 stw(env->seg_cache[R_SS].base + offset, T0);
964 /* modify ESP after to handle exceptions correctly */
965 ESP = (ESP & ~0xffff) | offset;
968 /* NOTE: ESP update is done after */
969 void op_popl_T0(void)
971 T0 = ldl((void *)ESP);
974 void op_popw_T0(void)
976 T0 = lduw((void *)ESP);
979 void op_popl_ss32_T0(void)
981 T0 = ldl(env->seg_cache[R_SS].base + ESP);
984 void op_popw_ss32_T0(void)
986 T0 = lduw(env->seg_cache[R_SS].base + ESP);
989 void op_popl_ss16_T0(void)
991 T0 = ldl(env->seg_cache[R_SS].base + (ESP & 0xffff));
994 void op_popw_ss16_T0(void)
996 T0 = lduw(env->seg_cache[R_SS].base + (ESP & 0xffff));
999 void op_addl_ESP_4(void)
1004 void op_addl_ESP_2(void)
1009 void op_addw_ESP_4(void)
1011 ESP = (ESP & ~0xffff) | ((ESP + 4) & 0xffff);
1014 void op_addw_ESP_2(void)
1016 ESP = (ESP & ~0xffff) | ((ESP + 2) & 0xffff);
1019 void op_addl_ESP_im(void)
1024 void op_addw_ESP_im(void)
1026 ESP = (ESP & ~0xffff) | ((ESP + PARAM1) & 0xffff);
1034 void OPPROTO op_rdtsc(void)
1038 asm("rdtsc" : "=A" (val));
1040 /* better than nothing: the time increases */
1047 /* We simulate a pre-MMX pentium as in valgrind */
1048 #define CPUID_FP87 (1 << 0)
1049 #define CPUID_VME (1 << 1)
1050 #define CPUID_DE (1 << 2)
1051 #define CPUID_PSE (1 << 3)
1052 #define CPUID_TSC (1 << 4)
1053 #define CPUID_MSR (1 << 5)
1054 #define CPUID_PAE (1 << 6)
1055 #define CPUID_MCE (1 << 7)
1056 #define CPUID_CX8 (1 << 8)
1057 #define CPUID_APIC (1 << 9)
1058 #define CPUID_SEP (1 << 11) /* sysenter/sysexit */
1059 #define CPUID_MTRR (1 << 12)
1060 #define CPUID_PGE (1 << 13)
1061 #define CPUID_MCA (1 << 14)
1062 #define CPUID_CMOV (1 << 15)
1064 #define CPUID_MMX (1 << 23)
1065 #define CPUID_FXSR (1 << 24)
1066 #define CPUID_SSE (1 << 25)
1067 #define CPUID_SSE2 (1 << 26)
1069 void helper_cpuid(void)
1072 EAX = 1; /* max EAX index supported */
1076 } else if (EAX == 1) {
1081 EDX = CPUID_FP87 | CPUID_DE | CPUID_PSE |
1082 CPUID_TSC | CPUID_MSR | CPUID_MCE |
1087 void OPPROTO op_cpuid(void)
1094 /* XXX: exception */
1095 void OPPROTO op_aam(void)
1102 EAX = (EAX & ~0xffff) | al | (ah << 8);
1106 void OPPROTO op_aad(void)
1111 ah = (EAX >> 8) & 0xff;
1112 al = ((ah * base) + al) & 0xff;
1113 EAX = (EAX & ~0xffff) | al;
1117 void OPPROTO op_aaa(void)
1123 eflags = cc_table[CC_OP].compute_all();
1126 ah = (EAX >> 8) & 0xff;
1128 icarry = (al > 0xf9);
1129 if (((al & 0x0f) > 9 ) || af) {
1130 al = (al + 6) & 0x0f;
1131 ah = (ah + 1 + icarry) & 0xff;
1132 eflags |= CC_C | CC_A;
1134 eflags &= ~(CC_C | CC_A);
1137 EAX = (EAX & ~0xffff) | al | (ah << 8);
1141 void OPPROTO op_aas(void)
1147 eflags = cc_table[CC_OP].compute_all();
1150 ah = (EAX >> 8) & 0xff;
1153 if (((al & 0x0f) > 9 ) || af) {
1154 al = (al - 6) & 0x0f;
1155 ah = (ah - 1 - icarry) & 0xff;
1156 eflags |= CC_C | CC_A;
1158 eflags &= ~(CC_C | CC_A);
1161 EAX = (EAX & ~0xffff) | al | (ah << 8);
1165 void OPPROTO op_daa(void)
1170 eflags = cc_table[CC_OP].compute_all();
1176 if (((al & 0x0f) > 9 ) || af) {
1177 al = (al + 6) & 0xff;
1180 if ((al > 0x9f) || cf) {
1181 al = (al + 0x60) & 0xff;
1184 EAX = (EAX & ~0xff) | al;
1185 /* well, speed is not an issue here, so we compute the flags by hand */
1186 eflags |= (al == 0) << 6; /* zf */
1187 eflags |= parity_table[al]; /* pf */
1188 eflags |= (al & 0x80); /* sf */
1192 void OPPROTO op_das(void)
1194 int al, al1, af, cf;
1197 eflags = cc_table[CC_OP].compute_all();
1204 if (((al & 0x0f) > 9 ) || af) {
1208 al = (al - 6) & 0xff;
1210 if ((al1 > 0x99) || cf) {
1211 al = (al - 0x60) & 0xff;
1214 EAX = (EAX & ~0xff) | al;
1215 /* well, speed is not an issue here, so we compute the flags by hand */
1216 eflags |= (al == 0) << 6; /* zf */
1217 eflags |= parity_table[al]; /* pf */
1218 eflags |= (al & 0x80); /* sf */
1222 /* segment handling */
1224 /* only works if protected mode and not VM86 */
1225 void load_seg(int seg_reg, int selector, unsigned cur_eip)
1228 SegmentDescriptorTable *dt;
1233 sc = &env->seg_cache[seg_reg];
1234 if ((selector & 0xfffc) == 0) {
1235 /* null selector case */
1236 if (seg_reg == R_SS) {
1238 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1240 /* XXX: each access should trigger an exception */
1250 index = selector & ~7;
1251 if ((index + 7) > dt->limit) {
1253 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1255 ptr = dt->base + index;
1258 if (!(e2 & DESC_S_MASK) ||
1259 (e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1261 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1264 if (seg_reg == R_SS) {
1265 if ((e2 & (DESC_CS_MASK | DESC_W_MASK)) == 0) {
1267 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1270 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1272 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1276 if (!(e2 & DESC_P_MASK)) {
1278 if (seg_reg == R_SS)
1279 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
1281 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1284 sc->base = (void *)((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
1285 sc->limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1287 sc->limit = (sc->limit << 12) | 0xfff;
1288 sc->seg_32bit = (e2 >> 22) & 1;
1290 fprintf(logfile, "load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx seg_32bit=%d\n",
1291 selector, (unsigned long)sc->base, sc->limit, sc->seg_32bit);
1294 env->segs[seg_reg] = selector;
1297 void OPPROTO op_movl_seg_T0(void)
1299 load_seg(PARAM1, T0 & 0xffff, PARAM2);
1302 /* faster VM86 version */
1303 void OPPROTO op_movl_seg_T0_vm(void)
1307 selector = T0 & 0xffff;
1308 /* env->segs[] access */
1309 *(uint32_t *)((char *)env + PARAM1) = selector;
1310 /* env->seg_cache[] access */
1311 ((SegmentCache *)((char *)env + PARAM2))->base = (void *)(selector << 4);
1314 void OPPROTO op_movl_T0_seg(void)
1316 T0 = env->segs[PARAM1];
1319 void OPPROTO op_movl_A0_seg(void)
1321 A0 = *(unsigned long *)((char *)env + PARAM1);
1324 void OPPROTO op_addl_A0_seg(void)
1326 A0 += *(unsigned long *)((char *)env + PARAM1);
1329 void helper_lsl(void)
1331 unsigned int selector, limit;
1332 SegmentDescriptorTable *dt;
1337 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1338 selector = T0 & 0xffff;
1343 index = selector & ~7;
1344 if ((index + 7) > dt->limit)
1346 ptr = dt->base + index;
1349 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
1351 limit = (limit << 12) | 0xfff;
1356 void OPPROTO op_lsl(void)
1361 void helper_lar(void)
1363 unsigned int selector;
1364 SegmentDescriptorTable *dt;
1369 CC_SRC = cc_table[CC_OP].compute_all() & ~CC_Z;
1370 selector = T0 & 0xffff;
1375 index = selector & ~7;
1376 if ((index + 7) > dt->limit)
1378 ptr = dt->base + index;
1380 T1 = e2 & 0x00f0ff00;
1384 void OPPROTO op_lar(void)
1389 /* flags handling */
1391 /* slow jumps cases : in order to avoid calling a function with a
1392 pointer (which can generate a stack frame on PowerPC), we use
1393 op_setcc to set T0 and then call op_jcc. */
1394 void OPPROTO op_jcc(void)
1397 JUMP_TB(PARAM1, 0, PARAM2);
1399 JUMP_TB(PARAM1, 1, PARAM3);
1403 /* slow set cases (compute x86 flags) */
1404 void OPPROTO op_seto_T0_cc(void)
1407 eflags = cc_table[CC_OP].compute_all();
1408 T0 = (eflags >> 11) & 1;
1411 void OPPROTO op_setb_T0_cc(void)
1413 T0 = cc_table[CC_OP].compute_c();
1416 void OPPROTO op_setz_T0_cc(void)
1419 eflags = cc_table[CC_OP].compute_all();
1420 T0 = (eflags >> 6) & 1;
1423 void OPPROTO op_setbe_T0_cc(void)
1426 eflags = cc_table[CC_OP].compute_all();
1427 T0 = (eflags & (CC_Z | CC_C)) != 0;
1430 void OPPROTO op_sets_T0_cc(void)
1433 eflags = cc_table[CC_OP].compute_all();
1434 T0 = (eflags >> 7) & 1;
1437 void OPPROTO op_setp_T0_cc(void)
1440 eflags = cc_table[CC_OP].compute_all();
1441 T0 = (eflags >> 2) & 1;
1444 void OPPROTO op_setl_T0_cc(void)
1447 eflags = cc_table[CC_OP].compute_all();
1448 T0 = ((eflags ^ (eflags >> 4)) >> 7) & 1;
1451 void OPPROTO op_setle_T0_cc(void)
1454 eflags = cc_table[CC_OP].compute_all();
1455 T0 = (((eflags ^ (eflags >> 4)) & 0x80) || (eflags & CC_Z)) != 0;
1458 void OPPROTO op_xor_T0_1(void)
1463 void OPPROTO op_set_cc_op(void)
1468 #define FL_UPDATE_MASK32 (TF_MASK | AC_MASK | ID_MASK)
1469 #define FL_UPDATE_MASK16 (TF_MASK)
1471 void OPPROTO op_movl_eflags_T0(void)
1475 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1476 DF = 1 - (2 * ((eflags >> 10) & 1));
1477 /* we also update some system flags as in user mode */
1478 env->eflags = (env->eflags & ~FL_UPDATE_MASK32) | (eflags & FL_UPDATE_MASK32);
1481 void OPPROTO op_movw_eflags_T0(void)
1485 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1486 DF = 1 - (2 * ((eflags >> 10) & 1));
1487 /* we also update some system flags as in user mode */
1488 env->eflags = (env->eflags & ~FL_UPDATE_MASK16) | (eflags & FL_UPDATE_MASK16);
1492 /* vm86plus version */
1493 void OPPROTO op_movw_eflags_T0_vm(void)
1497 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1498 DF = 1 - (2 * ((eflags >> 10) & 1));
1499 /* we also update some system flags as in user mode */
1500 env->eflags = (env->eflags & ~(FL_UPDATE_MASK16 | VIF_MASK)) |
1501 (eflags & FL_UPDATE_MASK16);
1502 if (eflags & IF_MASK) {
1503 env->eflags |= VIF_MASK;
1504 if (env->eflags & VIP_MASK) {
1506 raise_exception(EXCP0D_GPF);
1512 void OPPROTO op_movl_eflags_T0_vm(void)
1516 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
1517 DF = 1 - (2 * ((eflags >> 10) & 1));
1518 /* we also update some system flags as in user mode */
1519 env->eflags = (env->eflags & ~(FL_UPDATE_MASK32 | VIF_MASK)) |
1520 (eflags & FL_UPDATE_MASK32);
1521 if (eflags & IF_MASK) {
1522 env->eflags |= VIF_MASK;
1523 if (env->eflags & VIP_MASK) {
1525 raise_exception(EXCP0D_GPF);
1532 /* XXX: compute only O flag */
1533 void OPPROTO op_movb_eflags_T0(void)
1536 of = cc_table[CC_OP].compute_all() & CC_O;
1537 CC_SRC = (T0 & (CC_S | CC_Z | CC_A | CC_P | CC_C)) | of;
1540 void OPPROTO op_movl_T0_eflags(void)
1543 eflags = cc_table[CC_OP].compute_all();
1544 eflags |= (DF & DF_MASK);
1545 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
1549 /* vm86plus version */
1551 void OPPROTO op_movl_T0_eflags_vm(void)
1554 eflags = cc_table[CC_OP].compute_all();
1555 eflags |= (DF & DF_MASK);
1556 eflags |= env->eflags & ~(VM_MASK | RF_MASK | IF_MASK);
1557 if (env->eflags & VIF_MASK)
1563 void OPPROTO op_cld(void)
1568 void OPPROTO op_std(void)
1573 void OPPROTO op_clc(void)
1576 eflags = cc_table[CC_OP].compute_all();
1581 void OPPROTO op_stc(void)
1584 eflags = cc_table[CC_OP].compute_all();
1589 void OPPROTO op_cmc(void)
1592 eflags = cc_table[CC_OP].compute_all();
1597 void OPPROTO op_salc(void)
1600 cf = cc_table[CC_OP].compute_c();
1601 EAX = (EAX & ~0xff) | ((-cf) & 0xff);
1604 static int compute_all_eflags(void)
1609 static int compute_c_eflags(void)
1611 return CC_SRC & CC_C;
1614 static int compute_c_mul(void)
1621 static int compute_all_mul(void)
1623 int cf, pf, af, zf, sf, of;
1625 pf = 0; /* undefined */
1626 af = 0; /* undefined */
1627 zf = 0; /* undefined */
1628 sf = 0; /* undefined */
1630 return cf | pf | af | zf | sf | of;
1633 CCTable cc_table[CC_OP_NB] = {
1634 [CC_OP_DYNAMIC] = { /* should never happen */ },
1636 [CC_OP_EFLAGS] = { compute_all_eflags, compute_c_eflags },
1638 [CC_OP_MUL] = { compute_all_mul, compute_c_mul },
1640 [CC_OP_ADDB] = { compute_all_addb, compute_c_addb },
1641 [CC_OP_ADDW] = { compute_all_addw, compute_c_addw },
1642 [CC_OP_ADDL] = { compute_all_addl, compute_c_addl },
1644 [CC_OP_ADCB] = { compute_all_adcb, compute_c_adcb },
1645 [CC_OP_ADCW] = { compute_all_adcw, compute_c_adcw },
1646 [CC_OP_ADCL] = { compute_all_adcl, compute_c_adcl },
1648 [CC_OP_SUBB] = { compute_all_subb, compute_c_subb },
1649 [CC_OP_SUBW] = { compute_all_subw, compute_c_subw },
1650 [CC_OP_SUBL] = { compute_all_subl, compute_c_subl },
1652 [CC_OP_SBBB] = { compute_all_sbbb, compute_c_sbbb },
1653 [CC_OP_SBBW] = { compute_all_sbbw, compute_c_sbbw },
1654 [CC_OP_SBBL] = { compute_all_sbbl, compute_c_sbbl },
1656 [CC_OP_LOGICB] = { compute_all_logicb, compute_c_logicb },
1657 [CC_OP_LOGICW] = { compute_all_logicw, compute_c_logicw },
1658 [CC_OP_LOGICL] = { compute_all_logicl, compute_c_logicl },
1660 [CC_OP_INCB] = { compute_all_incb, compute_c_incl },
1661 [CC_OP_INCW] = { compute_all_incw, compute_c_incl },
1662 [CC_OP_INCL] = { compute_all_incl, compute_c_incl },
1664 [CC_OP_DECB] = { compute_all_decb, compute_c_incl },
1665 [CC_OP_DECW] = { compute_all_decw, compute_c_incl },
1666 [CC_OP_DECL] = { compute_all_decl, compute_c_incl },
1668 [CC_OP_SHLB] = { compute_all_shlb, compute_c_shlb },
1669 [CC_OP_SHLW] = { compute_all_shlw, compute_c_shlw },
1670 [CC_OP_SHLL] = { compute_all_shll, compute_c_shll },
1672 [CC_OP_SARB] = { compute_all_sarb, compute_c_sarl },
1673 [CC_OP_SARW] = { compute_all_sarw, compute_c_sarl },
1674 [CC_OP_SARL] = { compute_all_sarl, compute_c_sarl },
1677 /* floating point support. Some of the code for complicated x87
1678 functions comes from the LGPL'ed x86 emulator found in the Willows
1679 TWIN windows emulator. */
1681 #ifdef USE_X86LDOUBLE
1682 /* use long double functions */
1683 #define lrint lrintl
1684 #define llrint llrintl
1692 #define atan2 atan2l
1693 #define floor floorl
1698 extern int lrint(CPU86_LDouble x);
1699 extern int64_t llrint(CPU86_LDouble x);
1700 extern CPU86_LDouble fabs(CPU86_LDouble x);
1701 extern CPU86_LDouble sin(CPU86_LDouble x);
1702 extern CPU86_LDouble cos(CPU86_LDouble x);
1703 extern CPU86_LDouble sqrt(CPU86_LDouble x);
1704 extern CPU86_LDouble pow(CPU86_LDouble, CPU86_LDouble);
1705 extern CPU86_LDouble log(CPU86_LDouble x);
1706 extern CPU86_LDouble tan(CPU86_LDouble x);
1707 extern CPU86_LDouble atan2(CPU86_LDouble, CPU86_LDouble);
1708 extern CPU86_LDouble floor(CPU86_LDouble x);
1709 extern CPU86_LDouble ceil(CPU86_LDouble x);
1710 extern CPU86_LDouble rint(CPU86_LDouble x);
1712 #if defined(__powerpc__)
1713 extern CPU86_LDouble copysign(CPU86_LDouble, CPU86_LDouble);
1715 /* correct (but slow) PowerPC rint() (glibc version is incorrect) */
1716 double qemu_rint(double x)
1718 double y = 4503599627370496.0;
1729 #define rint qemu_rint
1732 #define RC_MASK 0xc00
1733 #define RC_NEAR 0x000
1734 #define RC_DOWN 0x400
1736 #define RC_CHOP 0xc00
1738 #define MAXTAN 9223372036854775808.0
1740 #ifdef USE_X86LDOUBLE
1746 unsigned long long lower;
1747 unsigned short upper;
1751 /* the following deal with x86 long double-precision numbers */
1752 #define MAXEXPD 0x7fff
1753 #define EXPBIAS 16383
1754 #define EXPD(fp) (fp.l.upper & 0x7fff)
1755 #define SIGND(fp) ((fp.l.upper) & 0x8000)
1756 #define MANTD(fp) (fp.l.lower)
1757 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
1763 #ifndef WORDS_BIGENDIAN
1765 unsigned long lower;
1771 unsigned long lower;
1777 /* the following deal with IEEE double-precision numbers */
1778 #define MAXEXPD 0x7ff
1779 #define EXPBIAS 1023
1780 #define EXPD(fp) (((fp.l.upper) >> 20) & 0x7FF)
1781 #define SIGND(fp) ((fp.l.upper) & 0x80000000)
1782 #define MANTD(fp) (fp.ll & ((1LL << 52) - 1))
1783 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7ff << 20)) | (EXPBIAS << 20)
1788 void OPPROTO op_flds_FT0_A0(void)
1790 #ifdef USE_FP_CONVERT
1791 FP_CONVERT.i32 = ldl((void *)A0);
1794 FT0 = ldfl((void *)A0);
1798 void OPPROTO op_fldl_FT0_A0(void)
1800 #ifdef USE_FP_CONVERT
1801 FP_CONVERT.i64 = ldq((void *)A0);
1804 FT0 = ldfq((void *)A0);
1808 /* helpers are needed to avoid static constant reference. XXX: find a better way */
1809 #ifdef USE_INT_TO_FLOAT_HELPERS
1811 void helper_fild_FT0_A0(void)
1813 FT0 = (CPU86_LDouble)ldsw((void *)A0);
1816 void helper_fildl_FT0_A0(void)
1818 FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1821 void helper_fildll_FT0_A0(void)
1823 FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1826 void OPPROTO op_fild_FT0_A0(void)
1828 helper_fild_FT0_A0();
1831 void OPPROTO op_fildl_FT0_A0(void)
1833 helper_fildl_FT0_A0();
1836 void OPPROTO op_fildll_FT0_A0(void)
1838 helper_fildll_FT0_A0();
1843 void OPPROTO op_fild_FT0_A0(void)
1845 #ifdef USE_FP_CONVERT
1846 FP_CONVERT.i32 = ldsw((void *)A0);
1847 FT0 = (CPU86_LDouble)FP_CONVERT.i32;
1849 FT0 = (CPU86_LDouble)ldsw((void *)A0);
1853 void OPPROTO op_fildl_FT0_A0(void)
1855 #ifdef USE_FP_CONVERT
1856 FP_CONVERT.i32 = (int32_t) ldl((void *)A0);
1857 FT0 = (CPU86_LDouble)FP_CONVERT.i32;
1859 FT0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1863 void OPPROTO op_fildll_FT0_A0(void)
1865 #ifdef USE_FP_CONVERT
1866 FP_CONVERT.i64 = (int64_t) ldq((void *)A0);
1867 FT0 = (CPU86_LDouble)FP_CONVERT.i64;
1869 FT0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1876 void OPPROTO op_flds_ST0_A0(void)
1878 #ifdef USE_FP_CONVERT
1879 FP_CONVERT.i32 = ldl((void *)A0);
1882 ST0 = ldfl((void *)A0);
1886 void OPPROTO op_fldl_ST0_A0(void)
1888 #ifdef USE_FP_CONVERT
1889 FP_CONVERT.i64 = ldq((void *)A0);
1892 ST0 = ldfq((void *)A0);
1896 #ifdef USE_X86LDOUBLE
1897 void OPPROTO op_fldt_ST0_A0(void)
1899 ST0 = *(long double *)A0;
1902 static inline CPU86_LDouble helper_fldt(uint8_t *ptr)
1904 CPU86_LDoubleU temp;
1907 upper = lduw(ptr + 8);
1908 /* XXX: handle overflow ? */
1909 e = (upper & 0x7fff) - 16383 + EXPBIAS; /* exponent */
1910 e |= (upper >> 4) & 0x800; /* sign */
1911 temp.ll = ((ldq(ptr) >> 11) & ((1LL << 52) - 1)) | ((uint64_t)e << 52);
1915 void helper_fldt_ST0_A0(void)
1917 ST0 = helper_fldt((uint8_t *)A0);
1920 void OPPROTO op_fldt_ST0_A0(void)
1922 helper_fldt_ST0_A0();
1926 /* helpers are needed to avoid static constant reference. XXX: find a better way */
1927 #ifdef USE_INT_TO_FLOAT_HELPERS
1929 void helper_fild_ST0_A0(void)
1931 ST0 = (CPU86_LDouble)ldsw((void *)A0);
1934 void helper_fildl_ST0_A0(void)
1936 ST0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1939 void helper_fildll_ST0_A0(void)
1941 ST0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1944 void OPPROTO op_fild_ST0_A0(void)
1946 helper_fild_ST0_A0();
1949 void OPPROTO op_fildl_ST0_A0(void)
1951 helper_fildl_ST0_A0();
1954 void OPPROTO op_fildll_ST0_A0(void)
1956 helper_fildll_ST0_A0();
1961 void OPPROTO op_fild_ST0_A0(void)
1963 #ifdef USE_FP_CONVERT
1964 FP_CONVERT.i32 = ldsw((void *)A0);
1965 ST0 = (CPU86_LDouble)FP_CONVERT.i32;
1967 ST0 = (CPU86_LDouble)ldsw((void *)A0);
1971 void OPPROTO op_fildl_ST0_A0(void)
1973 #ifdef USE_FP_CONVERT
1974 FP_CONVERT.i32 = (int32_t) ldl((void *)A0);
1975 ST0 = (CPU86_LDouble)FP_CONVERT.i32;
1977 ST0 = (CPU86_LDouble)((int32_t)ldl((void *)A0));
1981 void OPPROTO op_fildll_ST0_A0(void)
1983 #ifdef USE_FP_CONVERT
1984 FP_CONVERT.i64 = (int64_t) ldq((void *)A0);
1985 ST0 = (CPU86_LDouble)FP_CONVERT.i64;
1987 ST0 = (CPU86_LDouble)((int64_t)ldq((void *)A0));
1995 void OPPROTO op_fsts_ST0_A0(void)
1997 #ifdef USE_FP_CONVERT
1999 stfl((void *)A0, FP_CONVERT.f);
2001 stfl((void *)A0, (float)ST0);
2005 void OPPROTO op_fstl_ST0_A0(void)
2007 stfq((void *)A0, (double)ST0);
2010 #ifdef USE_X86LDOUBLE
2011 void OPPROTO op_fstt_ST0_A0(void)
2013 *(long double *)A0 = ST0;
2017 static inline void helper_fstt(CPU86_LDouble f, uint8_t *ptr)
2019 CPU86_LDoubleU temp;
2023 stq(ptr, (MANTD(temp) << 11) | (1LL << 63));
2024 /* exponent + sign */
2025 e = EXPD(temp) - EXPBIAS + 16383;
2026 e |= SIGND(temp) >> 16;
2030 void helper_fstt_ST0_A0(void)
2032 helper_fstt(ST0, (uint8_t *)A0);
2035 void OPPROTO op_fstt_ST0_A0(void)
2037 helper_fstt_ST0_A0();
2041 void OPPROTO op_fist_ST0_A0(void)
2043 #if defined(__sparc__) && !defined(__sparc_v9__)
2044 register CPU86_LDouble d asm("o0");
2052 stw((void *)A0, val);
2055 void OPPROTO op_fistl_ST0_A0(void)
2057 #if defined(__sparc__) && !defined(__sparc_v9__)
2058 register CPU86_LDouble d asm("o0");
2066 stl((void *)A0, val);
2069 void OPPROTO op_fistll_ST0_A0(void)
2071 #if defined(__sparc__) && !defined(__sparc_v9__)
2072 register CPU86_LDouble d asm("o0");
2080 stq((void *)A0, val);
2085 #define MUL10(iv) ( iv + iv + (iv << 3) )
2087 void helper_fbld_ST0_A0(void)
2090 CPU86_LDouble fpsrcop;
2094 /* in this code, seg/m32i will be used as temporary ptr/int */
2095 seg = (uint8_t *)A0 + 8;
2097 /* XXX: raise exception */
2101 /* XXX: raise exception */
2102 if ((v & 0xf0) != 0)
2104 m32i = v; /* <-- d14 */
2106 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d13 */
2107 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d12 */
2109 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d11 */
2110 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d10 */
2112 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d9 */
2113 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d8 */
2114 fpsrcop = ((CPU86_LDouble)m32i) * 100000000.0;
2117 m32i = (v >> 4); /* <-- d7 */
2118 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d6 */
2120 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d5 */
2121 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d4 */
2123 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d3 */
2124 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d2 */
2126 m32i = MUL10(m32i) + (v >> 4); /* <-- val * 10 + d1 */
2127 m32i = MUL10(m32i) + (v & 0xf); /* <-- val * 10 + d0 */
2128 fpsrcop += ((CPU86_LDouble)m32i);
2129 if ( ldub(seg+9) & 0x80 )
2134 void OPPROTO op_fbld_ST0_A0(void)
2136 helper_fbld_ST0_A0();
2139 void helper_fbst_ST0_A0(void)
2141 CPU86_LDouble fptemp;
2142 CPU86_LDouble fpsrcop;
2144 uint8_t *mem_ref, *mem_end;
2146 fpsrcop = rint(ST0);
2147 mem_ref = (uint8_t *)A0;
2148 mem_end = mem_ref + 8;
2149 if ( fpsrcop < 0.0 ) {
2150 stw(mem_end, 0x8000);
2153 stw(mem_end, 0x0000);
2155 while (mem_ref < mem_end) {
2158 fptemp = floor(fpsrcop/10.0);
2159 v = ((int)(fpsrcop - fptemp*10.0));
2160 if (fptemp == 0.0) {
2165 fptemp = floor(fpsrcop/10.0);
2166 v |= (((int)(fpsrcop - fptemp*10.0)) << 4);
2170 while (mem_ref < mem_end) {
2175 void OPPROTO op_fbst_ST0_A0(void)
2177 helper_fbst_ST0_A0();
2182 static inline void fpush(void)
2184 env->fpstt = (env->fpstt - 1) & 7;
2185 env->fptags[env->fpstt] = 0; /* validate stack entry */
2188 static inline void fpop(void)
2190 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
2191 env->fpstt = (env->fpstt + 1) & 7;
2194 void OPPROTO op_fpush(void)
2199 void OPPROTO op_fpop(void)
2204 void OPPROTO op_fdecstp(void)
2206 env->fpstt = (env->fpstt - 1) & 7;
2207 env->fpus &= (~0x4700);
2210 void OPPROTO op_fincstp(void)
2212 env->fpstt = (env->fpstt + 1) & 7;
2213 env->fpus &= (~0x4700);
2216 void OPPROTO op_fmov_ST0_FT0(void)
2221 void OPPROTO op_fmov_FT0_STN(void)
2226 void OPPROTO op_fmov_ST0_STN(void)
2231 void OPPROTO op_fmov_STN_ST0(void)
2236 void OPPROTO op_fxchg_ST0_STN(void)
2244 /* FPU operations */
2246 /* XXX: handle nans */
2247 void OPPROTO op_fcom_ST0_FT0(void)
2249 env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */
2251 env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */
2252 else if (ST0 == FT0)
2253 env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */
2257 /* XXX: handle nans */
2258 void OPPROTO op_fucom_ST0_FT0(void)
2260 env->fpus &= (~0x4500); /* (C3,C2,C0) <-- 000 */
2262 env->fpus |= 0x100; /* (C3,C2,C0) <-- 001 */
2263 else if (ST0 == FT0)
2264 env->fpus |= 0x4000; /* (C3,C2,C0) <-- 100 */
2268 /* XXX: handle nans */
2269 void OPPROTO op_fcomi_ST0_FT0(void)
2272 eflags = cc_table[CC_OP].compute_all();
2273 eflags &= ~(CC_Z | CC_P | CC_C);
2276 else if (ST0 == FT0)
2282 /* XXX: handle nans */
2283 void OPPROTO op_fucomi_ST0_FT0(void)
2286 eflags = cc_table[CC_OP].compute_all();
2287 eflags &= ~(CC_Z | CC_P | CC_C);
2290 else if (ST0 == FT0)
2296 void OPPROTO op_fadd_ST0_FT0(void)
2301 void OPPROTO op_fmul_ST0_FT0(void)
2306 void OPPROTO op_fsub_ST0_FT0(void)
2311 void OPPROTO op_fsubr_ST0_FT0(void)
2316 void OPPROTO op_fdiv_ST0_FT0(void)
2321 void OPPROTO op_fdivr_ST0_FT0(void)
2326 /* fp operations between STN and ST0 */
2328 void OPPROTO op_fadd_STN_ST0(void)
2333 void OPPROTO op_fmul_STN_ST0(void)
2338 void OPPROTO op_fsub_STN_ST0(void)
2343 void OPPROTO op_fsubr_STN_ST0(void)
2350 void OPPROTO op_fdiv_STN_ST0(void)
2355 void OPPROTO op_fdivr_STN_ST0(void)
2362 /* misc FPU operations */
2363 void OPPROTO op_fchs_ST0(void)
2368 void OPPROTO op_fabs_ST0(void)
2373 void helper_fxam_ST0(void)
2375 CPU86_LDoubleU temp;
2380 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2382 env->fpus |= 0x200; /* C1 <-- 1 */
2384 expdif = EXPD(temp);
2385 if (expdif == MAXEXPD) {
2386 if (MANTD(temp) == 0)
2387 env->fpus |= 0x500 /*Infinity*/;
2389 env->fpus |= 0x100 /*NaN*/;
2390 } else if (expdif == 0) {
2391 if (MANTD(temp) == 0)
2392 env->fpus |= 0x4000 /*Zero*/;
2394 env->fpus |= 0x4400 /*Denormal*/;
2400 void OPPROTO op_fxam_ST0(void)
2405 void OPPROTO op_fld1_ST0(void)
2407 ST0 = *(CPU86_LDouble *)&f15rk[1];
2410 void OPPROTO op_fldl2t_ST0(void)
2412 ST0 = *(CPU86_LDouble *)&f15rk[6];
2415 void OPPROTO op_fldl2e_ST0(void)
2417 ST0 = *(CPU86_LDouble *)&f15rk[5];
2420 void OPPROTO op_fldpi_ST0(void)
2422 ST0 = *(CPU86_LDouble *)&f15rk[2];
2425 void OPPROTO op_fldlg2_ST0(void)
2427 ST0 = *(CPU86_LDouble *)&f15rk[3];
2430 void OPPROTO op_fldln2_ST0(void)
2432 ST0 = *(CPU86_LDouble *)&f15rk[4];
2435 void OPPROTO op_fldz_ST0(void)
2437 ST0 = *(CPU86_LDouble *)&f15rk[0];
2440 void OPPROTO op_fldz_FT0(void)
2442 ST0 = *(CPU86_LDouble *)&f15rk[0];
2445 void helper_f2xm1(void)
2447 ST0 = pow(2.0,ST0) - 1.0;
2450 void helper_fyl2x(void)
2452 CPU86_LDouble fptemp;
2456 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
2460 env->fpus &= (~0x4700);
2465 void helper_fptan(void)
2467 CPU86_LDouble fptemp;
2470 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2476 env->fpus &= (~0x400); /* C2 <-- 0 */
2477 /* the above code is for |arg| < 2**52 only */
2481 void helper_fpatan(void)
2483 CPU86_LDouble fptemp, fpsrcop;
2487 ST1 = atan2(fpsrcop,fptemp);
2491 void helper_fxtract(void)
2493 CPU86_LDoubleU temp;
2494 unsigned int expdif;
2497 expdif = EXPD(temp) - EXPBIAS;
2498 /*DP exponent bias*/
2505 void helper_fprem1(void)
2507 CPU86_LDouble dblq, fpsrcop, fptemp;
2508 CPU86_LDoubleU fpsrcop1, fptemp1;
2514 fpsrcop1.d = fpsrcop;
2516 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2518 dblq = fpsrcop / fptemp;
2519 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2520 ST0 = fpsrcop - fptemp*dblq;
2521 q = (int)dblq; /* cutting off top bits is assumed here */
2522 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2523 /* (C0,C1,C3) <-- (q2,q1,q0) */
2524 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2525 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2526 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2528 env->fpus |= 0x400; /* C2 <-- 1 */
2529 fptemp = pow(2.0, expdif-50);
2530 fpsrcop = (ST0 / ST1) / fptemp;
2531 /* fpsrcop = integer obtained by rounding to the nearest */
2532 fpsrcop = (fpsrcop-floor(fpsrcop) < ceil(fpsrcop)-fpsrcop)?
2533 floor(fpsrcop): ceil(fpsrcop);
2534 ST0 -= (ST1 * fpsrcop * fptemp);
2538 void helper_fprem(void)
2540 CPU86_LDouble dblq, fpsrcop, fptemp;
2541 CPU86_LDoubleU fpsrcop1, fptemp1;
2547 fpsrcop1.d = fpsrcop;
2549 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
2550 if ( expdif < 53 ) {
2551 dblq = fpsrcop / fptemp;
2552 dblq = (dblq < 0.0)? ceil(dblq): floor(dblq);
2553 ST0 = fpsrcop - fptemp*dblq;
2554 q = (int)dblq; /* cutting off top bits is assumed here */
2555 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2556 /* (C0,C1,C3) <-- (q2,q1,q0) */
2557 env->fpus |= (q&0x4) << 6; /* (C0) <-- q2 */
2558 env->fpus |= (q&0x2) << 8; /* (C1) <-- q1 */
2559 env->fpus |= (q&0x1) << 14; /* (C3) <-- q0 */
2561 env->fpus |= 0x400; /* C2 <-- 1 */
2562 fptemp = pow(2.0, expdif-50);
2563 fpsrcop = (ST0 / ST1) / fptemp;
2564 /* fpsrcop = integer obtained by chopping */
2565 fpsrcop = (fpsrcop < 0.0)?
2566 -(floor(fabs(fpsrcop))): floor(fpsrcop);
2567 ST0 -= (ST1 * fpsrcop * fptemp);
2571 void helper_fyl2xp1(void)
2573 CPU86_LDouble fptemp;
2576 if ((fptemp+1.0)>0.0) {
2577 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
2581 env->fpus &= (~0x4700);
2586 void helper_fsqrt(void)
2588 CPU86_LDouble fptemp;
2592 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
2598 void helper_fsincos(void)
2600 CPU86_LDouble fptemp;
2603 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2609 env->fpus &= (~0x400); /* C2 <-- 0 */
2610 /* the above code is for |arg| < 2**63 only */
2614 void helper_frndint(void)
2619 void helper_fscale(void)
2621 CPU86_LDouble fpsrcop, fptemp;
2624 fptemp = pow(fpsrcop,ST1);
2628 void helper_fsin(void)
2630 CPU86_LDouble fptemp;
2633 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2637 env->fpus &= (~0x400); /* C2 <-- 0 */
2638 /* the above code is for |arg| < 2**53 only */
2642 void helper_fcos(void)
2644 CPU86_LDouble fptemp;
2647 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
2651 env->fpus &= (~0x400); /* C2 <-- 0 */
2652 /* the above code is for |arg5 < 2**63 only */
2656 /* associated heplers to reduce generated code length and to simplify
2657 relocation (FP constants are usually stored in .rodata section) */
2659 void OPPROTO op_f2xm1(void)
2664 void OPPROTO op_fyl2x(void)
2669 void OPPROTO op_fptan(void)
2674 void OPPROTO op_fpatan(void)
2679 void OPPROTO op_fxtract(void)
2684 void OPPROTO op_fprem1(void)
2690 void OPPROTO op_fprem(void)
2695 void OPPROTO op_fyl2xp1(void)
2700 void OPPROTO op_fsqrt(void)
2705 void OPPROTO op_fsincos(void)
2710 void OPPROTO op_frndint(void)
2715 void OPPROTO op_fscale(void)
2720 void OPPROTO op_fsin(void)
2725 void OPPROTO op_fcos(void)
2730 void OPPROTO op_fnstsw_A0(void)
2733 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2734 stw((void *)A0, fpus);
2737 void OPPROTO op_fnstsw_EAX(void)
2740 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2741 EAX = (EAX & 0xffff0000) | fpus;
2744 void OPPROTO op_fnstcw_A0(void)
2746 stw((void *)A0, env->fpuc);
2749 void OPPROTO op_fldcw_A0(void)
2752 env->fpuc = lduw((void *)A0);
2753 /* set rounding mode */
2754 switch(env->fpuc & RC_MASK) {
2757 rnd_type = FE_TONEAREST;
2760 rnd_type = FE_DOWNWARD;
2763 rnd_type = FE_UPWARD;
2766 rnd_type = FE_TOWARDZERO;
2769 fesetround(rnd_type);
2772 void OPPROTO op_fclex(void)
2774 env->fpus &= 0x7f00;
2777 void OPPROTO op_fninit(void)
2792 void helper_fstenv(uint8_t *ptr, int data32)
2794 int fpus, fptag, exp, i;
2798 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
2800 for (i=7; i>=0; i--) {
2802 if (env->fptags[i]) {
2805 tmp.d = env->fpregs[i];
2808 if (exp == 0 && mant == 0) {
2811 } else if (exp == 0 || exp == MAXEXPD
2812 #ifdef USE_X86LDOUBLE
2813 || (mant & (1LL << 63)) == 0
2816 /* NaNs, infinity, denormal */
2823 stl(ptr, env->fpuc);
2825 stl(ptr + 8, fptag);
2832 stw(ptr, env->fpuc);
2834 stw(ptr + 4, fptag);
2842 void helper_fldenv(uint8_t *ptr, int data32)
2847 env->fpuc = lduw(ptr);
2848 fpus = lduw(ptr + 4);
2849 fptag = lduw(ptr + 8);
2852 env->fpuc = lduw(ptr);
2853 fpus = lduw(ptr + 2);
2854 fptag = lduw(ptr + 4);
2856 env->fpstt = (fpus >> 11) & 7;
2857 env->fpus = fpus & ~0x3800;
2858 for(i = 0;i < 7; i++) {
2859 env->fptags[i] = ((fptag & 3) == 3);
2864 void helper_fsave(uint8_t *ptr, int data32)
2869 helper_fstenv(ptr, data32);
2871 ptr += (14 << data32);
2872 for(i = 0;i < 8; i++) {
2874 #ifdef USE_X86LDOUBLE
2875 *(long double *)ptr = tmp;
2877 helper_fstt(tmp, ptr);
2896 void helper_frstor(uint8_t *ptr, int data32)
2901 helper_fldenv(ptr, data32);
2902 ptr += (14 << data32);
2904 for(i = 0;i < 8; i++) {
2905 #ifdef USE_X86LDOUBLE
2906 tmp = *(long double *)ptr;
2908 tmp = helper_fldt(ptr);
2915 void OPPROTO op_fnstenv_A0(void)
2917 helper_fstenv((uint8_t *)A0, PARAM1);
2920 void OPPROTO op_fldenv_A0(void)
2922 helper_fldenv((uint8_t *)A0, PARAM1);
2925 void OPPROTO op_fnsave_A0(void)
2927 helper_fsave((uint8_t *)A0, PARAM1);
2930 void OPPROTO op_frstor_A0(void)
2932 helper_frstor((uint8_t *)A0, PARAM1);
2935 /* threading support */
2936 void OPPROTO op_lock(void)
2941 void OPPROTO op_unlock(void)