5 Copyright (C) 2003 Fabrice Bellard
7 This library is free software; you can redistribute it and/or
8 modify it under the terms of the GNU Lesser General Public
9 License as published by the Free Software Foundation; either
10 version 2 of the License, or (at your option) any later version.
12 This library is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 Lesser General Public License for more details.
17 You should have received a copy of the GNU Lesser General Public
18 License along with this library; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 NPC/PC static optimisations (use JUMP_TB when possible)
27 Privileged instructions
28 Coprocessor-Instructions
29 Optimize synthetic instructions
30 Optional alignment and privileged instruction check
45 #define DYNAMIC_PC 1 /* dynamic pc value */
46 #define JUMP_PC 2 /* dynamic pc value which takes only two values
47 according to jump_pc[T2] */
49 typedef struct DisasContext {
50 target_ulong pc; /* current Program Counter: integer or DYNAMIC_PC */
51 target_ulong npc; /* next PC: integer or DYNAMIC_PC or JUMP_PC */
52 target_ulong jump_pc[2]; /* used when JUMP_PC pc value is used */
55 struct TranslationBlock *tb;
58 static uint16_t *gen_opc_ptr;
59 static uint32_t *gen_opparam_ptr;
64 #define DEF(s,n,copy_size) INDEX_op_ ## s,
72 #define GET_FIELD(X, FROM, TO) \
73 ((X) >> (31 - (TO)) & ((1 << ((TO) - (FROM) + 1)) - 1))
75 #define IS_IMM (insn & (1<<13))
77 static void disas_sparc_insn(DisasContext * dc);
79 static GenOpFunc *gen_op_movl_TN_reg[2][32] = {
150 static GenOpFunc *gen_op_movl_reg_TN[3][32] = {
255 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
261 #define GEN32(func, NAME) \
262 static GenOpFunc *NAME ## _table [32] = { \
263 NAME ## 0, NAME ## 1, NAME ## 2, NAME ## 3, \
264 NAME ## 4, NAME ## 5, NAME ## 6, NAME ## 7, \
265 NAME ## 8, NAME ## 9, NAME ## 10, NAME ## 11, \
266 NAME ## 12, NAME ## 13, NAME ## 14, NAME ## 15, \
267 NAME ## 16, NAME ## 17, NAME ## 18, NAME ## 19, \
268 NAME ## 20, NAME ## 21, NAME ## 22, NAME ## 23, \
269 NAME ## 24, NAME ## 25, NAME ## 26, NAME ## 27, \
270 NAME ## 28, NAME ## 29, NAME ## 30, NAME ## 31, \
272 static inline void func(int n) \
274 NAME ## _table[n](); \
277 /* floating point registers moves */
278 GEN32(gen_op_load_fpr_FT0, gen_op_load_fpr_FT0_fprf);
279 GEN32(gen_op_load_fpr_FT1, gen_op_load_fpr_FT1_fprf);
280 GEN32(gen_op_load_fpr_FT2, gen_op_load_fpr_FT2_fprf);
281 GEN32(gen_op_store_FT0_fpr, gen_op_store_FT0_fpr_fprf);
282 GEN32(gen_op_store_FT1_fpr, gen_op_store_FT1_fpr_fprf);
283 GEN32(gen_op_store_FT2_fpr, gen_op_store_FT2_fpr_fprf);
285 GEN32(gen_op_load_fpr_DT0, gen_op_load_fpr_DT0_fprf);
286 GEN32(gen_op_load_fpr_DT1, gen_op_load_fpr_DT1_fprf);
287 GEN32(gen_op_load_fpr_DT2, gen_op_load_fpr_DT2_fprf);
288 GEN32(gen_op_store_DT0_fpr, gen_op_store_DT0_fpr_fprf);
289 GEN32(gen_op_store_DT1_fpr, gen_op_store_DT1_fpr_fprf);
290 GEN32(gen_op_store_DT2_fpr, gen_op_store_DT2_fpr_fprf);
292 #if defined(CONFIG_USER_ONLY)
293 #define gen_op_ldst(name) gen_op_##name##_raw()
294 #define OP_LD_TABLE(width)
295 #define supervisor(dc) 0
297 #define gen_op_ldst(name) (*gen_op_##name[dc->mem_idx])()
298 #define OP_LD_TABLE(width) \
299 static GenOpFunc *gen_op_##width[] = { \
300 &gen_op_##width##_user, \
301 &gen_op_##width##_kernel, \
304 static void gen_op_##width##a(int insn, int is_ld, int size, int sign) \
308 asi = GET_FIELD(insn, 19, 26); \
310 case 10: /* User data access */ \
311 gen_op_##width##_user(); \
313 case 11: /* Supervisor data access */ \
314 gen_op_##width##_kernel(); \
316 case 0x20 ... 0x2f: /* MMU passthrough */ \
318 gen_op_ld_asi(asi, size, sign); \
320 gen_op_st_asi(asi, size, sign); \
324 gen_op_ld_asi(asi, size, sign); \
326 gen_op_st_asi(asi, size, sign); \
331 #define supervisor(dc) (dc->mem_idx == 1)
351 static inline void gen_movl_imm_TN(int reg, int imm)
353 gen_op_movl_TN_im[reg] (imm);
356 static inline void gen_movl_imm_T1(int val)
358 gen_movl_imm_TN(1, val);
361 static inline void gen_movl_imm_T0(int val)
363 gen_movl_imm_TN(0, val);
366 static inline void gen_movl_reg_TN(int reg, int t)
369 gen_op_movl_reg_TN[t][reg] ();
371 gen_movl_imm_TN(t, 0);
374 static inline void gen_movl_reg_T0(int reg)
376 gen_movl_reg_TN(reg, 0);
379 static inline void gen_movl_reg_T1(int reg)
381 gen_movl_reg_TN(reg, 1);
384 static inline void gen_movl_reg_T2(int reg)
386 gen_movl_reg_TN(reg, 2);
389 static inline void gen_movl_TN_reg(int reg, int t)
392 gen_op_movl_TN_reg[t][reg] ();
395 static inline void gen_movl_T0_reg(int reg)
397 gen_movl_TN_reg(reg, 0);
400 static inline void gen_movl_T1_reg(int reg)
402 gen_movl_TN_reg(reg, 1);
405 /* call this function before using T2 as it may have been set for a jump */
406 static inline void flush_T2(DisasContext * dc)
408 if (dc->npc == JUMP_PC) {
409 gen_op_generic_branch(dc->jump_pc[0], dc->jump_pc[1]);
410 dc->npc = DYNAMIC_PC;
414 static inline void save_npc(DisasContext * dc)
416 if (dc->npc == JUMP_PC) {
417 gen_op_generic_branch(dc->jump_pc[0], dc->jump_pc[1]);
418 dc->npc = DYNAMIC_PC;
419 } else if (dc->npc != DYNAMIC_PC) {
420 gen_op_movl_npc_im(dc->npc);
424 static inline void save_state(DisasContext * dc)
426 gen_op_jmp_im(dc->pc);
430 static inline void gen_mov_pc_npc(DisasContext * dc)
432 if (dc->npc == JUMP_PC) {
433 gen_op_generic_branch(dc->jump_pc[0], dc->jump_pc[1]);
436 } else if (dc->npc == DYNAMIC_PC) {
444 static void gen_cond(int cond)
493 static void gen_fcond(int cond)
542 /* XXX: potentially incorrect if dynamic npc */
543 static void do_branch(DisasContext * dc, int32_t offset, uint32_t insn)
545 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
546 target_ulong target = dc->pc + offset;
549 /* unconditional not taken */
551 dc->pc = dc->npc + 4;
552 dc->npc = dc->pc + 4;
555 dc->npc = dc->pc + 4;
557 } else if (cond == 0x8) {
558 /* unconditional taken */
561 dc->npc = dc->pc + 4;
570 gen_op_branch_a((long)dc->tb, target, dc->npc);
574 dc->jump_pc[0] = target;
575 dc->jump_pc[1] = dc->npc + 4;
581 /* XXX: potentially incorrect if dynamic npc */
582 static void do_fbranch(DisasContext * dc, int32_t offset, uint32_t insn)
584 unsigned int cond = GET_FIELD(insn, 3, 6), a = (insn & (1 << 29));
585 target_ulong target = dc->pc + offset;
588 /* unconditional not taken */
590 dc->pc = dc->npc + 4;
591 dc->npc = dc->pc + 4;
594 dc->npc = dc->pc + 4;
596 } else if (cond == 0x8) {
597 /* unconditional taken */
600 dc->npc = dc->pc + 4;
609 gen_op_branch_a((long)dc->tb, target, dc->npc);
613 dc->jump_pc[0] = target;
614 dc->jump_pc[1] = dc->npc + 4;
620 #define GET_FIELDs(x,a,b) sign_extend (GET_FIELD(x,a,b), (b) - (a) + 1)
622 static int sign_extend(int x, int len)
625 return (x << len) >> len;
628 /* before an instruction, dc->pc must be static */
629 static void disas_sparc_insn(DisasContext * dc)
631 unsigned int insn, opc, rs1, rs2, rd;
633 insn = ldl_code(dc->pc);
634 opc = GET_FIELD(insn, 0, 1);
636 rd = GET_FIELD(insn, 2, 6);
638 case 0: /* branches/sethi */
640 unsigned int xop = GET_FIELD(insn, 7, 9);
642 target = GET_FIELD(insn, 10, 31);
644 case 0x0: /* UNIMPL */
645 case 0x1: /* V9 BPcc */
646 case 0x3: /* V9 BPr */
647 case 0x5: /* V9 FBPcc */
653 target = sign_extend(target, 22);
654 do_branch(dc, target, insn);
657 case 0x6: /* FBN+x */
659 #if !defined(CONFIG_USER_ONLY)
660 gen_op_trap_ifnofpu();
663 target = sign_extend(target, 22);
664 do_fbranch(dc, target, insn);
667 case 0x4: /* SETHI */
672 gen_movl_imm_T0(target << 10);
684 target_long target = GET_FIELDs(insn, 2, 31) << 2;
686 gen_op_movl_T0_im(dc->pc);
693 case 2: /* FPU & Logical Operations */
695 unsigned int xop = GET_FIELD(insn, 7, 12);
696 if (xop == 0x3a) { /* generate trap */
698 rs1 = GET_FIELD(insn, 13, 17);
699 gen_movl_reg_T0(rs1);
701 rs2 = GET_FIELD(insn, 25, 31);
705 gen_movl_imm_T1(rs2);
711 rs2 = GET_FIELD(insn, 27, 31);
715 gen_movl_reg_T1(rs2);
723 cond = GET_FIELD(insn, 3, 6);
728 } else if (cond != 0) {
732 } else if (xop == 0x28) {
733 rs1 = GET_FIELD(insn, 13, 17);
739 case 15: /* stbar / V9 membar */
740 break; /* no effect? */
742 case 0x2: /* V9 rdccr */
743 case 0x3: /* V9 rdasi */
744 case 0x4: /* V9 rdtick */
745 case 0x5: /* V9 rdpc */
746 case 0x6: /* V9 rdfprs */
749 #if !defined(CONFIG_USER_ONLY)
750 } else if (xop == 0x29) {
756 } else if (xop == 0x2a) {
762 } else if (xop == 0x2b) {
769 } else if (xop == 0x34) { /* FPU Operations */
770 #if !defined(CONFIG_USER_ONLY)
771 gen_op_trap_ifnofpu();
773 rs1 = GET_FIELD(insn, 13, 17);
774 rs2 = GET_FIELD(insn, 27, 31);
775 xop = GET_FIELD(insn, 18, 26);
777 case 0x1: /* fmovs */
778 gen_op_load_fpr_FT0(rs2);
779 gen_op_store_FT0_fpr(rd);
781 case 0x5: /* fnegs */
782 gen_op_load_fpr_FT1(rs2);
784 gen_op_store_FT0_fpr(rd);
786 case 0x9: /* fabss */
787 gen_op_load_fpr_FT1(rs2);
789 gen_op_store_FT0_fpr(rd);
791 case 0x29: /* fsqrts */
792 gen_op_load_fpr_FT1(rs2);
794 gen_op_store_FT0_fpr(rd);
796 case 0x2a: /* fsqrtd */
797 gen_op_load_fpr_DT1(rs2);
799 gen_op_store_DT0_fpr(rd);
801 case 0x2b: /* fsqrtq */
804 gen_op_load_fpr_FT0(rs1);
805 gen_op_load_fpr_FT1(rs2);
807 gen_op_store_FT0_fpr(rd);
810 gen_op_load_fpr_DT0(rs1);
811 gen_op_load_fpr_DT1(rs2);
813 gen_op_store_DT0_fpr(rd);
815 case 0x43: /* faddq */
818 gen_op_load_fpr_FT0(rs1);
819 gen_op_load_fpr_FT1(rs2);
821 gen_op_store_FT0_fpr(rd);
824 gen_op_load_fpr_DT0(rs1);
825 gen_op_load_fpr_DT1(rs2);
827 gen_op_store_DT0_fpr(rd);
829 case 0x47: /* fsubq */
832 gen_op_load_fpr_FT0(rs1);
833 gen_op_load_fpr_FT1(rs2);
835 gen_op_store_FT0_fpr(rd);
838 gen_op_load_fpr_DT0(rs1);
839 gen_op_load_fpr_DT1(rs2);
841 gen_op_store_DT0_fpr(rd);
843 case 0x4b: /* fmulq */
846 gen_op_load_fpr_FT0(rs1);
847 gen_op_load_fpr_FT1(rs2);
849 gen_op_store_FT0_fpr(rd);
852 gen_op_load_fpr_DT0(rs1);
853 gen_op_load_fpr_DT1(rs2);
855 gen_op_store_DT0_fpr(rd);
857 case 0x4f: /* fdivq */
860 gen_op_load_fpr_FT0(rs1);
861 gen_op_load_fpr_FT1(rs2);
863 gen_op_store_DT0_fpr(rd);
865 case 0x6e: /* fdmulq */
868 gen_op_load_fpr_FT1(rs2);
870 gen_op_store_FT0_fpr(rd);
873 gen_op_load_fpr_DT1(rs2);
875 gen_op_store_FT0_fpr(rd);
877 case 0xc7: /* fqtos */
880 gen_op_load_fpr_FT1(rs2);
882 gen_op_store_DT0_fpr(rd);
885 gen_op_load_fpr_FT1(rs2);
887 gen_op_store_DT0_fpr(rd);
889 case 0xcb: /* fqtod */
891 case 0xcc: /* fitoq */
893 case 0xcd: /* fstoq */
895 case 0xce: /* fdtoq */
898 gen_op_load_fpr_FT1(rs2);
900 gen_op_store_FT0_fpr(rd);
903 gen_op_load_fpr_DT1(rs2);
905 gen_op_store_FT0_fpr(rd);
907 case 0xd3: /* fqtoi */
910 case 0x2: /* V9 fmovd */
911 case 0x6: /* V9 fnegd */
912 case 0xa: /* V9 fabsd */
913 case 0x81: /* V9 fstox */
914 case 0x82: /* V9 fdtox */
915 case 0x84: /* V9 fxtos */
916 case 0x88: /* V9 fxtod */
918 case 0x3: /* V9 fmovq */
919 case 0x7: /* V9 fnegq */
920 case 0xb: /* V9 fabsq */
921 case 0x83: /* V9 fqtox */
922 case 0x8c: /* V9 fxtoq */
925 } else if (xop == 0x35) { /* FPU Operations */
926 #if !defined(CONFIG_USER_ONLY)
927 gen_op_trap_ifnofpu();
929 rs1 = GET_FIELD(insn, 13, 17);
930 rs2 = GET_FIELD(insn, 27, 31);
931 xop = GET_FIELD(insn, 18, 26);
932 /* V9 fmovscc: x5, cond = x >> 1 */
933 /* V9 fmovdcc: x6, cond = x >> 1 */
935 /* V9 fmovqcc: x7, cond = x >> 1 */
938 gen_op_load_fpr_FT0(rs1);
939 gen_op_load_fpr_FT1(rs2);
943 gen_op_load_fpr_DT0(rs1);
944 gen_op_load_fpr_DT1(rs2);
947 case 0x53: /* fcmpq */
949 case 0x55: /* fcmpes */
950 gen_op_load_fpr_FT0(rs1);
951 gen_op_load_fpr_FT1(rs2);
952 gen_op_fcmps(); /* XXX should trap if qNaN or sNaN */
954 case 0x56: /* fcmped */
955 gen_op_load_fpr_DT0(rs1);
956 gen_op_load_fpr_DT1(rs2);
957 gen_op_fcmpd(); /* XXX should trap if qNaN or sNaN */
959 case 0x57: /* fcmpeq */
965 } else if (xop == 0x2) {
968 rs1 = GET_FIELD(insn, 13, 17);
970 // or %g0, x, y -> mov T1, x; mov y, T1
971 if (IS_IMM) { /* immediate */
972 rs2 = GET_FIELDs(insn, 19, 31);
973 gen_movl_imm_T1(rs2);
974 } else { /* register */
975 rs2 = GET_FIELD(insn, 27, 31);
976 gen_movl_reg_T1(rs2);
980 gen_movl_reg_T0(rs1);
981 if (IS_IMM) { /* immediate */
982 // or x, #0, y -> mov T1, x; mov y, T1
983 rs2 = GET_FIELDs(insn, 19, 31);
985 gen_movl_imm_T1(rs2);
988 } else { /* register */
989 // or x, %g0, y -> mov T1, x; mov y, T1
990 rs2 = GET_FIELD(insn, 27, 31);
992 gen_movl_reg_T1(rs2);
999 } else if (xop < 0x38) {
1000 rs1 = GET_FIELD(insn, 13, 17);
1001 gen_movl_reg_T0(rs1);
1002 if (IS_IMM) { /* immediate */
1003 rs2 = GET_FIELDs(insn, 19, 31);
1004 gen_movl_imm_T1(rs2);
1005 } else { /* register */
1006 rs2 = GET_FIELD(insn, 27, 31);
1007 gen_movl_reg_T1(rs2);
1010 switch (xop & ~0x10) {
1013 gen_op_add_T1_T0_cc();
1020 gen_op_logic_T0_cc();
1025 gen_op_logic_T0_cc();
1030 gen_op_logic_T0_cc();
1034 gen_op_sub_T1_T0_cc();
1039 gen_op_andn_T1_T0();
1041 gen_op_logic_T0_cc();
1046 gen_op_logic_T0_cc();
1049 gen_op_xnor_T1_T0();
1051 gen_op_logic_T0_cc();
1055 gen_op_addx_T1_T0_cc();
1057 gen_op_addx_T1_T0();
1060 gen_op_umul_T1_T0();
1062 gen_op_logic_T0_cc();
1065 gen_op_smul_T1_T0();
1067 gen_op_logic_T0_cc();
1071 gen_op_subx_T1_T0_cc();
1073 gen_op_subx_T1_T0();
1076 gen_op_udiv_T1_T0();
1081 gen_op_sdiv_T1_T0();
1086 case 0x9: /* V9 mulx */
1087 case 0xd: /* V9 udivx */
1090 gen_movl_T0_reg(rd);
1093 case 0x20: /* taddcc */
1094 case 0x21: /* tsubcc */
1095 case 0x22: /* taddcctv */
1096 case 0x23: /* tsubcctv */
1098 case 0x24: /* mulscc */
1099 gen_op_mulscc_T1_T0();
1100 gen_movl_T0_reg(rd);
1102 case 0x25: /* sll, V9 sllx */
1104 gen_movl_T0_reg(rd);
1106 case 0x26: /* srl, V9 srlx */
1108 gen_movl_T0_reg(rd);
1110 case 0x27: /* sra, V9 srax */
1112 gen_movl_T0_reg(rd);
1122 case 0x2: /* V9 wrccr */
1123 case 0x3: /* V9 wrasi */
1124 case 0x6: /* V9 wrfprs */
1125 case 0xf: /* V9 sir */
1130 #if !defined(CONFIG_USER_ONLY)
1131 case 0x31: /* wrpsr, V9 saved, restored */
1133 if (!supervisor(dc))
1139 case 0x32: /* wrwim, V9 wrpr */
1141 if (!supervisor(dc))
1149 if (!supervisor(dc))
1157 case 0x2a: /* V9 rdpr */
1158 case 0x2b: /* V9 flushw */
1159 case 0x2c: /* V9 movcc */
1160 case 0x2d: /* V9 sdivx */
1161 case 0x2e: /* V9 popc */
1162 case 0x2f: /* V9 movr */
1167 rs1 = GET_FIELD(insn, 13, 17);
1168 gen_movl_reg_T0(rs1);
1169 if (IS_IMM) { /* immediate */
1170 rs2 = GET_FIELDs(insn, 19, 31);
1174 gen_movl_imm_T1(rs2);
1179 } else { /* register */
1180 rs2 = GET_FIELD(insn, 27, 31);
1184 gen_movl_reg_T1(rs2);
1191 case 0x38: /* jmpl */
1194 gen_op_movl_T1_im(dc->pc);
1195 gen_movl_T1_reg(rd);
1198 gen_op_movl_npc_T0();
1199 dc->npc = DYNAMIC_PC;
1202 #if !defined(CONFIG_USER_ONLY)
1203 case 0x39: /* rett, V9 return */
1205 if (!supervisor(dc))
1208 gen_op_movl_npc_T0();
1209 dc->npc = DYNAMIC_PC;
1214 case 0x3b: /* flush */
1217 case 0x3c: /* save */
1220 gen_movl_T0_reg(rd);
1222 case 0x3d: /* restore */
1225 gen_movl_T0_reg(rd);
1228 case 0x3e: /* V9 done/retry */
1235 case 3: /* load/store instructions */
1237 unsigned int xop = GET_FIELD(insn, 7, 12);
1238 rs1 = GET_FIELD(insn, 13, 17);
1239 gen_movl_reg_T0(rs1);
1240 if (IS_IMM) { /* immediate */
1241 rs2 = GET_FIELDs(insn, 19, 31);
1245 gen_movl_imm_T1(rs2);
1250 } else { /* register */
1251 rs2 = GET_FIELD(insn, 27, 31);
1255 gen_movl_reg_T1(rs2);
1261 if (xop < 4 || (xop > 7 && xop < 0x14) || \
1262 (xop > 0x17 && xop < 0x20)) {
1264 case 0x0: /* load word */
1267 case 0x1: /* load unsigned byte */
1270 case 0x2: /* load unsigned halfword */
1273 case 0x3: /* load double word */
1275 gen_movl_T0_reg(rd + 1);
1277 case 0x9: /* load signed byte */
1280 case 0xa: /* load signed halfword */
1283 case 0xd: /* ldstub -- XXX: should be atomically */
1284 gen_op_ldst(ldstub);
1286 case 0x0f: /* swap register with memory. Also atomically */
1287 gen_movl_reg_T1(rd);
1290 #if !defined(CONFIG_USER_ONLY)
1291 case 0x10: /* load word alternate */
1292 if (!supervisor(dc))
1294 gen_op_lda(insn, 1, 4, 0);
1296 case 0x11: /* load unsigned byte alternate */
1297 if (!supervisor(dc))
1299 gen_op_lduba(insn, 1, 1, 0);
1301 case 0x12: /* load unsigned halfword alternate */
1302 if (!supervisor(dc))
1304 gen_op_lduha(insn, 1, 2, 0);
1306 case 0x13: /* load double word alternate */
1307 if (!supervisor(dc))
1309 gen_op_ldda(insn, 1, 8, 0);
1310 gen_movl_T0_reg(rd + 1);
1312 case 0x19: /* load signed byte alternate */
1313 if (!supervisor(dc))
1315 gen_op_ldsba(insn, 1, 1, 1);
1317 case 0x1a: /* load signed halfword alternate */
1318 if (!supervisor(dc))
1320 gen_op_ldsha(insn, 1, 2 ,1);
1322 case 0x1d: /* ldstuba -- XXX: should be atomically */
1323 if (!supervisor(dc))
1325 gen_op_ldstuba(insn, 1, 1, 0);
1327 case 0x1f: /* swap reg with alt. memory. Also atomically */
1328 if (!supervisor(dc))
1330 gen_movl_reg_T1(rd);
1331 gen_op_swapa(insn, 1, 4, 0);
1334 /* avoid warnings */
1335 (void) &gen_op_stfa;
1336 (void) &gen_op_stdfa;
1337 (void) &gen_op_ldfa;
1338 (void) &gen_op_lddfa;
1341 case 0x08: /* V9 ldsw */
1342 case 0x0b: /* V9 ldx */
1343 case 0x18: /* V9 ldswa */
1344 case 0x1b: /* V9 ldxa */
1345 case 0x2d: /* V9 prefetch */
1346 case 0x30: /* V9 ldfa */
1347 case 0x33: /* V9 lddfa */
1348 case 0x3d: /* V9 prefetcha */
1350 case 0x32: /* V9 ldqfa */
1353 gen_movl_T1_reg(rd);
1354 } else if (xop >= 0x20 && xop < 0x24) {
1355 #if !defined(CONFIG_USER_ONLY)
1356 gen_op_trap_ifnofpu();
1359 case 0x20: /* load fpreg */
1361 gen_op_store_FT0_fpr(rd);
1363 case 0x21: /* load fsr */
1365 gen_op_store_FT0_fpr(rd);
1367 case 0x22: /* load quad fpreg */
1369 case 0x23: /* load double fpreg */
1371 gen_op_store_DT0_fpr(rd);
1376 } else if (xop < 8 || (xop >= 0x14 && xop < 0x18)) {
1377 gen_movl_reg_T1(rd);
1390 gen_movl_reg_T2(rd + 1);
1393 #if !defined(CONFIG_USER_ONLY)
1395 if (!supervisor(dc))
1397 gen_op_sta(insn, 0, 4, 0);
1400 if (!supervisor(dc))
1402 gen_op_stba(insn, 0, 1, 0);
1405 if (!supervisor(dc))
1407 gen_op_stha(insn, 0, 2, 0);
1410 if (!supervisor(dc))
1413 gen_movl_reg_T2(rd + 1);
1414 gen_op_stda(insn, 0, 8, 0);
1418 case 0x0e: /* V9 stx */
1419 case 0x1e: /* V9 stxa */
1422 } else if (xop > 0x23 && xop < 0x28) {
1423 #if !defined(CONFIG_USER_ONLY)
1424 gen_op_trap_ifnofpu();
1428 gen_op_load_fpr_FT0(rd);
1431 case 0x25: /* stfsr, V9 stxfsr */
1432 gen_op_load_fpr_FT0(rd);
1435 case 0x26: /* stdfq */
1438 gen_op_load_fpr_DT0(rd);
1442 case 0x34: /* V9 stfa */
1443 case 0x37: /* V9 stdfa */
1444 case 0x3c: /* V9 casa */
1445 case 0x3e: /* V9 casxa */
1447 case 0x36: /* V9 stqfa */
1450 } else if (xop > 0x33 && xop < 0x38) {
1459 /* default case for non jump instructions */
1460 if (dc->npc == DYNAMIC_PC) {
1461 dc->pc = DYNAMIC_PC;
1463 } else if (dc->npc == JUMP_PC) {
1464 /* we can do a static jump */
1465 gen_op_branch2((long)dc->tb, dc->jump_pc[0], dc->jump_pc[1]);
1469 dc->npc = dc->npc + 4;
1475 gen_op_exception(TT_ILL_INSN);
1478 #if !defined(CONFIG_USER_ONLY)
1481 gen_op_exception(TT_PRIV_INSN);
1487 gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
1491 static inline int gen_intermediate_code_internal(TranslationBlock * tb,
1492 int spc, CPUSPARCState *env)
1494 target_ulong pc_start, last_pc;
1495 uint16_t *gen_opc_end;
1496 DisasContext dc1, *dc = &dc1;
1499 memset(dc, 0, sizeof(DisasContext));
1504 dc->npc = (target_ulong) tb->cs_base;
1505 #if defined(CONFIG_USER_ONLY)
1508 dc->mem_idx = ((env->psrs) != 0);
1510 gen_opc_ptr = gen_opc_buf;
1511 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
1512 gen_opparam_ptr = gen_opparam_buf;
1515 if (env->nb_breakpoints > 0) {
1516 for(j = 0; j < env->nb_breakpoints; j++) {
1517 if (env->breakpoints[j] == dc->pc) {
1518 if (dc->pc != pc_start)
1530 fprintf(logfile, "Search PC...\n");
1531 j = gen_opc_ptr - gen_opc_buf;
1535 gen_opc_instr_start[lj++] = 0;
1536 gen_opc_pc[lj] = dc->pc;
1537 gen_opc_npc[lj] = dc->npc;
1538 gen_opc_instr_start[lj] = 1;
1542 disas_sparc_insn(dc);
1545 /* if the next PC is different, we abort now */
1546 if (dc->pc != (last_pc + 4))
1548 /* if single step mode, we generate only one instruction and
1549 generate an exception */
1550 if (env->singlestep_enabled) {
1551 gen_op_jmp_im(dc->pc);
1556 } while ((gen_opc_ptr < gen_opc_end) &&
1557 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
1561 if (dc->pc != DYNAMIC_PC &&
1562 (dc->npc != DYNAMIC_PC && dc->npc != JUMP_PC)) {
1563 /* static PC and NPC: we can use direct chaining */
1564 gen_op_branch((long)tb, dc->pc, dc->npc);
1566 if (dc->pc != DYNAMIC_PC)
1567 gen_op_jmp_im(dc->pc);
1573 *gen_opc_ptr = INDEX_op_end;
1575 j = gen_opc_ptr - gen_opc_buf;
1578 gen_opc_instr_start[lj++] = 0;
1586 tb->size = last_pc + 4 - pc_start;
1589 if (loglevel & CPU_LOG_TB_IN_ASM) {
1590 fprintf(logfile, "--------------\n");
1591 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
1592 target_disas(logfile, pc_start, last_pc + 4 - pc_start, 0);
1593 fprintf(logfile, "\n");
1594 if (loglevel & CPU_LOG_TB_OP) {
1595 fprintf(logfile, "OP:\n");
1596 dump_ops(gen_opc_buf, gen_opparam_buf);
1597 fprintf(logfile, "\n");
1604 int gen_intermediate_code(CPUSPARCState * env, TranslationBlock * tb)
1606 return gen_intermediate_code_internal(tb, 0, env);
1609 int gen_intermediate_code_pc(CPUSPARCState * env, TranslationBlock * tb)
1611 return gen_intermediate_code_internal(tb, 1, env);
1614 extern int ram_size;
1616 void cpu_reset(CPUSPARCState *env)
1618 memset(env, 0, sizeof(*env));
1622 env->regwptr = env->regbase + (env->cwp * 16);
1623 #if defined(CONFIG_USER_ONLY)
1624 env->user_mode_only = 1;
1628 env->pc = 0xffd00000;
1629 env->gregs[1] = ram_size;
1630 env->mmuregs[0] = (0x04 << 24); /* Impl 0, ver 4, MMU disabled */
1631 env->npc = env->pc + 4;
1635 CPUSPARCState *cpu_sparc_init(void)
1641 if (!(env = malloc(sizeof(CPUSPARCState))))
1643 cpu_single_env = env;
1648 #define GET_FLAG(a,b) ((env->psr & a)?b:'-')
1650 void cpu_dump_state(CPUState *env, FILE *f,
1651 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
1656 cpu_fprintf(f, "pc: " TARGET_FMT_lx " npc: " TARGET_FMT_lx "\n", env->pc, env->npc);
1657 cpu_fprintf(f, "General Registers:\n");
1658 for (i = 0; i < 4; i++)
1659 cpu_fprintf(f, "%%g%c: " TARGET_FMT_lx "\t", i + '0', env->gregs[i]);
1660 cpu_fprintf(f, "\n");
1662 cpu_fprintf(f, "%%g%c: " TARGET_FMT_lx "\t", i + '0', env->gregs[i]);
1663 cpu_fprintf(f, "\nCurrent Register Window:\n");
1664 for (x = 0; x < 3; x++) {
1665 for (i = 0; i < 4; i++)
1666 cpu_fprintf(f, "%%%c%d: " TARGET_FMT_lx "\t",
1667 (x == 0 ? 'o' : (x == 1 ? 'l' : 'i')), i,
1668 env->regwptr[i + x * 8]);
1669 cpu_fprintf(f, "\n");
1671 cpu_fprintf(f, "%%%c%d: " TARGET_FMT_lx "\t",
1672 (x == 0 ? 'o' : x == 1 ? 'l' : 'i'), i,
1673 env->regwptr[i + x * 8]);
1674 cpu_fprintf(f, "\n");
1676 cpu_fprintf(f, "\nFloating Point Registers:\n");
1677 for (i = 0; i < 32; i++) {
1679 cpu_fprintf(f, "%%f%02d:", i);
1680 cpu_fprintf(f, " %016lf", env->fpr[i]);
1682 cpu_fprintf(f, "\n");
1684 cpu_fprintf(f, "psr: 0x%08x -> %c%c%c%c %c%c%c wim: 0x%08x\n", GET_PSR(env),
1685 GET_FLAG(PSR_ZERO, 'Z'), GET_FLAG(PSR_OVF, 'V'),
1686 GET_FLAG(PSR_NEG, 'N'), GET_FLAG(PSR_CARRY, 'C'),
1687 env->psrs?'S':'-', env->psrps?'P':'-',
1688 env->psret?'E':'-', env->wim);
1689 cpu_fprintf(f, "fsr: 0x%08x\n", env->fsr);
1692 #if defined(CONFIG_USER_ONLY)
1693 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1699 extern int get_physical_address (CPUState *env, target_phys_addr_t *physical, int *prot,
1700 int *access_index, target_ulong address, int rw,
1703 target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
1705 target_phys_addr_t phys_addr;
1706 int prot, access_index;
1708 if (get_physical_address(env, &phys_addr, &prot, &access_index, addr, 2, 0) != 0)
1714 void helper_flush(target_ulong addr)
1717 tb_invalidate_page_range(addr, addr + 8);