4 * Copyright (c) 2003 Fabrice Bellard
5 * Copyright (c) 2005 CodeSourcery, LLC
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
31 /* internal defines */
32 typedef struct DisasContext {
35 struct TranslationBlock *tb;
38 #define DISAS_JUMP_NEXT 4
40 /* XXX: move that elsewhere */
41 static uint16_t *gen_opc_ptr;
42 static uint32_t *gen_opparam_ptr;
47 #define DEF(s, n, copy_size) INDEX_op_ ## s,
55 static GenOpFunc2 *gen_test_cc[14] = {
72 const uint8_t table_logic_cc[16] = {
91 static GenOpFunc1 *gen_shift_T1_im[4] = {
98 static GenOpFunc *gen_shift_T1_0[4] = {
105 static GenOpFunc1 *gen_shift_T2_im[4] = {
112 static GenOpFunc *gen_shift_T2_0[4] = {
119 static GenOpFunc1 *gen_shift_T1_im_cc[4] = {
120 gen_op_shll_T1_im_cc,
121 gen_op_shrl_T1_im_cc,
122 gen_op_sarl_T1_im_cc,
123 gen_op_rorl_T1_im_cc,
126 static GenOpFunc *gen_shift_T1_0_cc[4] = {
133 static GenOpFunc *gen_shift_T1_T0[4] = {
140 static GenOpFunc *gen_shift_T1_T0_cc[4] = {
141 gen_op_shll_T1_T0_cc,
142 gen_op_shrl_T1_T0_cc,
143 gen_op_sarl_T1_T0_cc,
144 gen_op_rorl_T1_T0_cc,
147 static GenOpFunc *gen_op_movl_TN_reg[3][16] = {
204 static GenOpFunc *gen_op_movl_reg_TN[2][16] = {
243 static GenOpFunc1 *gen_op_movl_TN_im[3] = {
249 static GenOpFunc1 *gen_shift_T0_im_thumb[3] = {
250 gen_op_shll_T0_im_thumb,
251 gen_op_shrl_T0_im_thumb,
252 gen_op_sarl_T0_im_thumb,
255 static inline void gen_bx(DisasContext *s)
257 s->is_jmp = DISAS_UPDATE;
261 static inline void gen_movl_TN_reg(DisasContext *s, int reg, int t)
266 /* normaly, since we updated PC, we need only to add 4 */
267 val = (long)s->pc + 4;
268 gen_op_movl_TN_im[t](val);
270 gen_op_movl_TN_reg[t][reg]();
274 static inline void gen_movl_T0_reg(DisasContext *s, int reg)
276 gen_movl_TN_reg(s, reg, 0);
279 static inline void gen_movl_T1_reg(DisasContext *s, int reg)
281 gen_movl_TN_reg(s, reg, 1);
284 static inline void gen_movl_T2_reg(DisasContext *s, int reg)
286 gen_movl_TN_reg(s, reg, 2);
289 static inline void gen_movl_reg_TN(DisasContext *s, int reg, int t)
291 gen_op_movl_reg_TN[t][reg]();
293 s->is_jmp = DISAS_JUMP;
297 static inline void gen_movl_reg_T0(DisasContext *s, int reg)
299 gen_movl_reg_TN(s, reg, 0);
302 static inline void gen_movl_reg_T1(DisasContext *s, int reg)
304 gen_movl_reg_TN(s, reg, 1);
307 static inline void gen_add_data_offset(DisasContext *s, unsigned int insn)
309 int val, rm, shift, shiftop;
311 if (!(insn & (1 << 25))) {
314 if (!(insn & (1 << 23)))
317 gen_op_addl_T1_im(val);
321 shift = (insn >> 7) & 0x1f;
322 gen_movl_T2_reg(s, rm);
323 shiftop = (insn >> 5) & 3;
325 gen_shift_T2_im[shiftop](shift);
326 } else if (shiftop != 0) {
327 gen_shift_T2_0[shiftop]();
329 if (!(insn & (1 << 23)))
336 static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn)
340 if (insn & (1 << 22)) {
342 val = (insn & 0xf) | ((insn >> 4) & 0xf0);
343 if (!(insn & (1 << 23)))
346 gen_op_addl_T1_im(val);
350 gen_movl_T2_reg(s, rm);
351 if (!(insn & (1 << 23)))
358 #define VFP_OP(name) \
359 static inline void gen_vfp_##name(int dp) \
362 gen_op_vfp_##name##d(); \
364 gen_op_vfp_##name##s(); \
388 static inline void gen_mov_F0_vreg(int dp, int reg)
391 gen_op_vfp_getreg_F0d(offsetof(CPUARMState, vfp.regs.d[reg]));
393 gen_op_vfp_getreg_F0s(offsetof(CPUARMState, vfp.regs.s[reg]));
396 static inline void gen_mov_F1_vreg(int dp, int reg)
399 gen_op_vfp_getreg_F1d(offsetof(CPUARMState, vfp.regs.d[reg]));
401 gen_op_vfp_getreg_F1s(offsetof(CPUARMState, vfp.regs.s[reg]));
404 static inline void gen_mov_vreg_F0(int dp, int reg)
407 gen_op_vfp_setreg_F0d(offsetof(CPUARMState, vfp.regs.d[reg]));
409 gen_op_vfp_setreg_F0s(offsetof(CPUARMState, vfp.regs.s[reg]));
412 /* Disassemble a VFP instruction. Returns nonzero if an error occured
413 (ie. an undefined instruction). */
414 static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
416 uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
419 dp = ((insn & 0xf00) == 0xb00);
420 switch ((insn >> 24) & 0xf) {
422 if (insn & (1 << 4)) {
423 /* single register transfer */
424 if ((insn & 0x6f) != 0x00)
426 rd = (insn >> 12) & 0xf;
430 rn = (insn >> 16) & 0xf;
431 /* Get the existing value even for arm->vfp moves because
432 we only set half the register. */
433 gen_mov_F0_vreg(1, rn);
435 if (insn & (1 << 20)) {
437 if (insn & (1 << 21))
438 gen_movl_reg_T1(s, rd);
440 gen_movl_reg_T0(s, rd);
443 if (insn & (1 << 21))
444 gen_movl_T1_reg(s, rd);
446 gen_movl_T0_reg(s, rd);
448 gen_mov_vreg_F0(dp, rn);
451 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
452 if (insn & (1 << 20)) {
454 if (insn & (1 << 21)) {
455 /* system register */
462 gen_op_vfp_movl_T0_fpscr_flags();
464 gen_op_vfp_movl_T0_fpscr();
470 gen_mov_F0_vreg(0, rn);
474 /* This will only set the 4 flag bits */
475 gen_op_movl_psr_T0();
477 gen_movl_reg_T0(s, rd);
480 gen_movl_T0_reg(s, rd);
481 if (insn & (1 << 21)) {
482 /* system register */
485 /* Writes are ignored. */
488 gen_op_vfp_movl_fpscr_T0();
489 /* This could change vector settings, so jump to
490 the next instuction. */
491 gen_op_movl_T0_im(s->pc);
492 gen_movl_reg_T0(s, 15);
493 s->is_jmp = DISAS_UPDATE;
500 gen_mov_vreg_F0(0, rn);
505 /* data processing */
506 /* The opcode is in bits 23, 21, 20 and 6. */
507 op = ((insn >> 20) & 8) | ((insn >> 19) & 6) | ((insn >> 6) & 1);
511 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
513 /* rn is register number */
516 rn = (insn >> 16) & 0xf;
519 if (op == 15 && (rn == 15 || rn > 17)) {
520 /* Integer or single precision destination. */
521 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
523 if (insn & (1 << 22))
525 rd = (insn >> 12) & 0xf;
528 if (op == 15 && (rn == 16 || rn == 17)) {
529 /* Integer source. */
530 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
537 rn = ((insn >> 15) & 0x1e) | ((insn >> 7) & 1);
538 if (op == 15 && rn == 15) {
539 /* Double precision destination. */
540 if (insn & (1 << 22))
542 rd = (insn >> 12) & 0xf;
544 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
545 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
548 veclen = env->vfp.vec_len;
549 if (op == 15 && rn > 3)
552 /* Shut up compiler warnings. */
563 /* Figure out what type of vector operation this is. */
564 if ((rd & bank_mask) == 0) {
569 delta_d = (env->vfp.vec_stride >> 1) + 1;
571 delta_d = env->vfp.vec_stride + 1;
573 if ((rm & bank_mask) == 0) {
574 /* mixed scalar/vector */
583 /* Load the initial operands. */
589 gen_mov_F0_vreg(0, rm);
594 gen_mov_F0_vreg(dp, rd);
595 gen_mov_F1_vreg(dp, rm);
599 /* Compare with zero */
600 gen_mov_F0_vreg(dp, rd);
604 /* One source operand. */
605 gen_mov_F0_vreg(dp, rm);
608 /* Two source operands. */
609 gen_mov_F0_vreg(dp, rn);
610 gen_mov_F1_vreg(dp, rm);
614 /* Perform the calculation. */
616 case 0: /* mac: fd + (fn * fm) */
618 gen_mov_F1_vreg(dp, rd);
621 case 1: /* nmac: fd - (fn * fm) */
624 gen_mov_F1_vreg(dp, rd);
627 case 2: /* msc: -fd + (fn * fm) */
629 gen_mov_F1_vreg(dp, rd);
632 case 3: /* nmsc: -fd - (fn * fm) */
634 gen_mov_F1_vreg(dp, rd);
638 case 4: /* mul: fn * fm */
641 case 5: /* nmul: -(fn * fm) */
645 case 6: /* add: fn + fm */
648 case 7: /* sub: fn - fm */
651 case 8: /* div: fn / fm */
654 case 15: /* extension space */
681 case 15: /* single<->double conversion */
696 case 25: /* ftouiz */
702 case 27: /* ftosiz */
705 default: /* undefined */
706 printf ("rn:%d\n", rn);
710 default: /* undefined */
711 printf ("op:%d\n", op);
715 /* Write back the result. */
716 if (op == 15 && (rn >= 8 && rn <= 11))
717 ; /* Comparison, do nothing. */
718 else if (op == 15 && rn > 17)
719 /* Integer result. */
720 gen_mov_vreg_F0(0, rd);
721 else if (op == 15 && rn == 15)
723 gen_mov_vreg_F0(!dp, rd);
725 gen_mov_vreg_F0(dp, rd);
727 /* break out of the loop if we have finished */
731 if (op == 15 && delta_m == 0) {
732 /* single source one-many */
734 rd = ((rd + delta_d) & (bank_mask - 1))
736 gen_mov_vreg_F0(dp, rd);
740 /* Setup the next operands. */
742 rd = ((rd + delta_d) & (bank_mask - 1))
746 /* One source operand. */
747 rm = ((rm + delta_m) & (bank_mask - 1))
749 gen_mov_F0_vreg(dp, rm);
751 /* Two source operands. */
752 rn = ((rn + delta_d) & (bank_mask - 1))
754 gen_mov_F0_vreg(dp, rn);
756 rm = ((rm + delta_m) & (bank_mask - 1))
758 gen_mov_F1_vreg(dp, rm);
766 if (dp && (insn & (1 << 22))) {
767 /* two-register transfer */
768 rn = (insn >> 16) & 0xf;
769 rd = (insn >> 12) & 0xf;
775 rm = ((insn << 1) & 0x1e) | ((insn >> 5) & 1);
777 if (insn & (1 << 20)) {
780 gen_mov_F0_vreg(1, rm);
782 gen_movl_reg_T0(s, rd);
783 gen_movl_reg_T1(s, rn);
785 gen_mov_F0_vreg(0, rm);
787 gen_movl_reg_T0(s, rn);
788 gen_mov_F0_vreg(0, rm + 1);
790 gen_movl_reg_T0(s, rd);
795 gen_movl_T0_reg(s, rd);
796 gen_movl_T1_reg(s, rn);
798 gen_mov_vreg_F0(1, rm);
800 gen_movl_T0_reg(s, rn);
802 gen_mov_vreg_F0(0, rm);
803 gen_movl_T0_reg(s, rd);
805 gen_mov_vreg_F0(0, rm + 1);
810 rn = (insn >> 16) & 0xf;
812 rd = (insn >> 12) & 0xf;
814 rd = ((insn >> 11) & 0x1e) | ((insn >> 22) & 1);
815 gen_movl_T1_reg(s, rn);
816 if ((insn & 0x01200000) == 0x01000000) {
817 /* Single load/store */
818 offset = (insn & 0xff) << 2;
819 if ((insn & (1 << 23)) == 0)
821 gen_op_addl_T1_im(offset);
822 if (insn & (1 << 20)) {
824 gen_mov_vreg_F0(dp, rd);
826 gen_mov_F0_vreg(dp, rd);
830 /* load/store multiple */
832 n = (insn >> 1) & 0x7f;
836 if (insn & (1 << 24)) /* pre-decrement */
837 gen_op_addl_T1_im(-((insn & 0xff) << 2));
843 for (i = 0; i < n; i++) {
844 if (insn & (1 << 20)) {
847 gen_mov_vreg_F0(dp, rd + i);
850 gen_mov_F0_vreg(dp, rd + i);
853 gen_op_addl_T1_im(offset);
855 if (insn & (1 << 21)) {
857 if (insn & (1 << 24))
858 offset = -offset * n;
859 else if (dp && (insn & 1))
865 gen_op_addl_T1_im(offset);
866 gen_movl_reg_T1(s, rn);
872 /* Should never happen. */
878 static void disas_arm_insn(CPUState * env, DisasContext *s)
880 unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
887 /* Unconditional instructions. */
888 if ((insn & 0x0d70f000) == 0x0550f000)
890 else if ((insn & 0x0e000000) == 0x0a000000) {
891 /* branch link and change to thumb (blx <offset>) */
894 val = (uint32_t)s->pc;
895 gen_op_movl_T0_im(val);
896 gen_movl_reg_T0(s, 14);
897 /* Sign-extend the 24-bit offset */
898 offset = (((int32_t)insn) << 8) >> 8;
899 /* offset * 4 + bit24 * 2 + (thumb bit) */
900 val += (offset << 2) | ((insn >> 23) & 2) | 1;
901 /* pipeline offset */
903 gen_op_movl_T0_im(val);
906 } else if ((insn & 0x0fe00000) == 0x0c400000) {
907 /* Coprocessor double register transfer. */
908 } else if ((insn & 0x0f000010) == 0x0e000010) {
909 /* Additional coprocessor register transfer. */
914 /* if not always execute, we generate a conditional jump to
916 gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
917 s->is_jmp = DISAS_JUMP_NEXT;
919 if ((insn & 0x0f900000) == 0x03000000) {
920 if ((insn & 0x0ff0f000) != 0x0360f000)
922 /* CPSR = immediate */
924 shift = ((insn >> 8) & 0xf) * 2;
926 val = (val >> shift) | (val << (32 - shift));
927 gen_op_movl_T0_im(val);
928 if (insn & (1 << 19))
929 gen_op_movl_psr_T0();
930 } else if ((insn & 0x0f900000) == 0x01000000
931 && (insn & 0x00000090) != 0x00000090) {
932 /* miscellaneous instructions */
933 op1 = (insn >> 21) & 3;
934 sh = (insn >> 4) & 0xf;
937 case 0x0: /* move program status register */
939 /* SPSR not accessible in user mode */
944 gen_movl_T0_reg(s, rm);
945 if (insn & (1 << 19))
946 gen_op_movl_psr_T0();
949 rd = (insn >> 12) & 0xf;
950 gen_op_movl_T0_psr();
951 gen_movl_reg_T0(s, rd);
956 /* branch/exchange thumb (bx). */
957 gen_movl_T0_reg(s, rm);
959 } else if (op1 == 3) {
961 rd = (insn >> 12) & 0xf;
962 gen_movl_T0_reg(s, rm);
964 gen_movl_reg_T0(s, rd);
973 /* branch link/exchange thumb (blx) */
974 val = (uint32_t)s->pc;
975 gen_op_movl_T0_im(val);
976 gen_movl_reg_T0(s, 14);
977 gen_movl_T0_reg(s, rm);
980 case 0x5: /* saturating add/subtract */
981 rd = (insn >> 12) & 0xf;
982 rn = (insn >> 16) & 0xf;
983 gen_movl_T0_reg(s, rn);
985 gen_movl_T1_reg(s, rn);
987 gen_op_subl_T0_T1_saturate();
989 gen_op_addl_T0_T1_saturate();
991 gen_movl_T1_reg(s, rm);
993 gen_op_subl_T0_T1_saturate();
995 gen_op_addl_T0_T1_saturate();
996 gen_movl_reg_T0(s, rn);
998 case 0x8: /* signed multiply */
1002 rs = (insn >> 8) & 0xf;
1003 rn = (insn >> 12) & 0xf;
1004 rd = (insn >> 16) & 0xf;
1006 /* (32 * 16) >> 16 */
1007 gen_movl_T0_reg(s, rm);
1008 gen_movl_T1_reg(s, rs);
1010 gen_op_sarl_T1_im(16);
1013 gen_op_imulw_T0_T1();
1014 if ((sh & 2) == 0) {
1015 gen_movl_T1_reg(s, rn);
1016 gen_op_addl_T0_T1_setq();
1018 gen_movl_reg_T0(s, rd);
1021 gen_movl_T0_reg(s, rm);
1023 gen_op_sarl_T0_im(16);
1026 gen_movl_T1_reg(s, rs);
1028 gen_op_sarl_T1_im(16);
1032 gen_op_imull_T0_T1();
1033 gen_op_addq_T0_T1(rn, rd);
1034 gen_movl_reg_T0(s, rn);
1035 gen_movl_reg_T1(s, rd);
1039 gen_movl_T1_reg(s, rn);
1040 gen_op_addl_T0_T1_setq();
1042 gen_movl_reg_T0(s, rd);
1049 } else if (((insn & 0x0e000000) == 0 &&
1050 (insn & 0x00000090) != 0x90) ||
1051 ((insn & 0x0e000000) == (1 << 25))) {
1052 int set_cc, logic_cc, shiftop;
1054 op1 = (insn >> 21) & 0xf;
1055 set_cc = (insn >> 20) & 1;
1056 logic_cc = table_logic_cc[op1] & set_cc;
1058 /* data processing instruction */
1059 if (insn & (1 << 25)) {
1060 /* immediate operand */
1062 shift = ((insn >> 8) & 0xf) * 2;
1064 val = (val >> shift) | (val << (32 - shift));
1065 gen_op_movl_T1_im(val);
1066 if (logic_cc && shift)
1071 gen_movl_T1_reg(s, rm);
1072 shiftop = (insn >> 5) & 3;
1073 if (!(insn & (1 << 4))) {
1074 shift = (insn >> 7) & 0x1f;
1077 gen_shift_T1_im_cc[shiftop](shift);
1079 gen_shift_T1_im[shiftop](shift);
1081 } else if (shiftop != 0) {
1083 gen_shift_T1_0_cc[shiftop]();
1085 gen_shift_T1_0[shiftop]();
1089 rs = (insn >> 8) & 0xf;
1090 gen_movl_T0_reg(s, rs);
1092 gen_shift_T1_T0_cc[shiftop]();
1094 gen_shift_T1_T0[shiftop]();
1098 if (op1 != 0x0f && op1 != 0x0d) {
1099 rn = (insn >> 16) & 0xf;
1100 gen_movl_T0_reg(s, rn);
1102 rd = (insn >> 12) & 0xf;
1105 gen_op_andl_T0_T1();
1106 gen_movl_reg_T0(s, rd);
1108 gen_op_logic_T0_cc();
1111 gen_op_xorl_T0_T1();
1112 gen_movl_reg_T0(s, rd);
1114 gen_op_logic_T0_cc();
1118 gen_op_subl_T0_T1_cc();
1120 gen_op_subl_T0_T1();
1121 gen_movl_reg_T0(s, rd);
1125 gen_op_rsbl_T0_T1_cc();
1127 gen_op_rsbl_T0_T1();
1128 gen_movl_reg_T0(s, rd);
1132 gen_op_addl_T0_T1_cc();
1134 gen_op_addl_T0_T1();
1135 gen_movl_reg_T0(s, rd);
1139 gen_op_adcl_T0_T1_cc();
1141 gen_op_adcl_T0_T1();
1142 gen_movl_reg_T0(s, rd);
1146 gen_op_sbcl_T0_T1_cc();
1148 gen_op_sbcl_T0_T1();
1149 gen_movl_reg_T0(s, rd);
1153 gen_op_rscl_T0_T1_cc();
1155 gen_op_rscl_T0_T1();
1156 gen_movl_reg_T0(s, rd);
1160 gen_op_andl_T0_T1();
1161 gen_op_logic_T0_cc();
1166 gen_op_xorl_T0_T1();
1167 gen_op_logic_T0_cc();
1172 gen_op_subl_T0_T1_cc();
1177 gen_op_addl_T0_T1_cc();
1182 gen_movl_reg_T0(s, rd);
1184 gen_op_logic_T0_cc();
1187 gen_movl_reg_T1(s, rd);
1189 gen_op_logic_T1_cc();
1192 gen_op_bicl_T0_T1();
1193 gen_movl_reg_T0(s, rd);
1195 gen_op_logic_T0_cc();
1200 gen_movl_reg_T1(s, rd);
1202 gen_op_logic_T1_cc();
1206 /* other instructions */
1207 op1 = (insn >> 24) & 0xf;
1211 /* multiplies, extra load/stores */
1212 sh = (insn >> 5) & 3;
1215 rd = (insn >> 16) & 0xf;
1216 rn = (insn >> 12) & 0xf;
1217 rs = (insn >> 8) & 0xf;
1219 if (((insn >> 22) & 3) == 0) {
1221 gen_movl_T0_reg(s, rs);
1222 gen_movl_T1_reg(s, rm);
1224 if (insn & (1 << 21)) {
1225 gen_movl_T1_reg(s, rn);
1226 gen_op_addl_T0_T1();
1228 if (insn & (1 << 20))
1229 gen_op_logic_T0_cc();
1230 gen_movl_reg_T0(s, rd);
1233 gen_movl_T0_reg(s, rs);
1234 gen_movl_T1_reg(s, rm);
1235 if (insn & (1 << 22))
1236 gen_op_imull_T0_T1();
1238 gen_op_mull_T0_T1();
1239 if (insn & (1 << 21)) /* mult accumulate */
1240 gen_op_addq_T0_T1(rn, rd);
1241 if (!(insn & (1 << 23))) { /* double accumulate */
1242 gen_op_addq_lo_T0_T1(rn);
1243 gen_op_addq_lo_T0_T1(rd);
1245 if (insn & (1 << 20))
1247 gen_movl_reg_T0(s, rn);
1248 gen_movl_reg_T1(s, rd);
1251 rn = (insn >> 16) & 0xf;
1252 rd = (insn >> 12) & 0xf;
1253 if (insn & (1 << 23)) {
1254 /* load/store exclusive */
1257 /* SWP instruction */
1260 gen_movl_T0_reg(s, rm);
1261 gen_movl_T1_reg(s, rn);
1262 if (insn & (1 << 22)) {
1263 gen_op_swpb_T0_T1();
1265 gen_op_swpl_T0_T1();
1267 gen_movl_reg_T0(s, rd);
1271 /* Misc load/store */
1272 rn = (insn >> 16) & 0xf;
1273 rd = (insn >> 12) & 0xf;
1274 gen_movl_T1_reg(s, rn);
1275 if (insn & (1 << 24))
1276 gen_add_datah_offset(s, insn);
1277 if (insn & (1 << 20)) {
1281 gen_op_lduw_T0_T1();
1284 gen_op_ldsb_T0_T1();
1288 gen_op_ldsw_T0_T1();
1291 gen_movl_reg_T0(s, rd);
1292 } else if (sh & 2) {
1296 gen_movl_T0_reg(s, rd);
1298 gen_op_addl_T1_im(4);
1299 gen_movl_T0_reg(s, rd + 1);
1301 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1302 gen_op_addl_T1_im(-4);
1306 gen_movl_reg_T0(s, rd);
1307 gen_op_addl_T1_im(4);
1309 gen_movl_reg_T0(s, rd + 1);
1310 if ((insn & (1 << 24)) || (insn & (1 << 20)))
1311 gen_op_addl_T1_im(-4);
1315 gen_movl_T0_reg(s, rd);
1318 if (!(insn & (1 << 24))) {
1319 gen_add_datah_offset(s, insn);
1320 gen_movl_reg_T1(s, rn);
1321 } else if (insn & (1 << 21)) {
1322 gen_movl_reg_T1(s, rn);
1330 /* load/store byte/word */
1331 rn = (insn >> 16) & 0xf;
1332 rd = (insn >> 12) & 0xf;
1333 gen_movl_T1_reg(s, rn);
1334 if (insn & (1 << 24))
1335 gen_add_data_offset(s, insn);
1336 if (insn & (1 << 20)) {
1338 if (insn & (1 << 22))
1339 gen_op_ldub_T0_T1();
1345 gen_movl_reg_T0(s, rd);
1348 gen_movl_T0_reg(s, rd);
1349 if (insn & (1 << 22))
1354 if (!(insn & (1 << 24))) {
1355 gen_add_data_offset(s, insn);
1356 gen_movl_reg_T1(s, rn);
1357 } else if (insn & (1 << 21))
1358 gen_movl_reg_T1(s, rn); {
1365 /* load/store multiple words */
1366 /* XXX: store correct base if write back */
1367 if (insn & (1 << 22))
1368 goto illegal_op; /* only usable in supervisor mode */
1369 rn = (insn >> 16) & 0xf;
1370 gen_movl_T1_reg(s, rn);
1372 /* compute total size */
1375 if (insn & (1 << i))
1378 /* XXX: test invalid n == 0 case ? */
1379 if (insn & (1 << 23)) {
1380 if (insn & (1 << 24)) {
1382 gen_op_addl_T1_im(4);
1384 /* post increment */
1387 if (insn & (1 << 24)) {
1389 gen_op_addl_T1_im(-(n * 4));
1391 /* post decrement */
1393 gen_op_addl_T1_im(-((n - 1) * 4));
1398 if (insn & (1 << i)) {
1399 if (insn & (1 << 20)) {
1405 gen_movl_reg_T0(s, i);
1409 /* special case: r15 = PC + 12 */
1410 val = (long)s->pc + 8;
1411 gen_op_movl_TN_im[0](val);
1413 gen_movl_T0_reg(s, i);
1418 /* no need to add after the last transfer */
1420 gen_op_addl_T1_im(4);
1423 if (insn & (1 << 21)) {
1425 if (insn & (1 << 23)) {
1426 if (insn & (1 << 24)) {
1429 /* post increment */
1430 gen_op_addl_T1_im(4);
1433 if (insn & (1 << 24)) {
1436 gen_op_addl_T1_im(-((n - 1) * 4));
1438 /* post decrement */
1439 gen_op_addl_T1_im(-(n * 4));
1442 gen_movl_reg_T1(s, rn);
1451 /* branch (and link) */
1452 val = (int32_t)s->pc;
1453 if (insn & (1 << 24)) {
1454 gen_op_movl_T0_im(val);
1455 gen_op_movl_reg_TN[0][14]();
1457 offset = (((int32_t)insn << 8) >> 8);
1458 val += (offset << 2) + 4;
1459 gen_op_jmp((long)s->tb, val);
1460 s->is_jmp = DISAS_TB_JUMP;
1467 op1 = (insn >> 8) & 0xf;
1471 if (disas_vfp_insn (env, s, insn))
1475 /* unknown coprocessor. */
1481 gen_op_movl_T0_im((long)s->pc);
1482 gen_op_movl_reg_TN[0][15]();
1484 s->is_jmp = DISAS_JUMP;
1488 gen_op_movl_T0_im((long)s->pc - 4);
1489 gen_op_movl_reg_TN[0][15]();
1490 gen_op_undef_insn();
1491 s->is_jmp = DISAS_JUMP;
1497 static void disas_thumb_insn(DisasContext *s)
1499 uint32_t val, insn, op, rm, rn, rd, shift, cond;
1506 switch (insn >> 12) {
1509 op = (insn >> 11) & 3;
1512 rn = (insn >> 3) & 7;
1513 gen_movl_T0_reg(s, rn);
1514 if (insn & (1 << 10)) {
1516 gen_op_movl_T1_im((insn >> 6) & 7);
1519 rm = (insn >> 6) & 7;
1520 gen_movl_T1_reg(s, rm);
1522 if (insn & (1 << 9))
1523 gen_op_addl_T0_T1_cc();
1525 gen_op_addl_T0_T1_cc();
1526 gen_movl_reg_T0(s, rd);
1528 /* shift immediate */
1529 rm = (insn >> 3) & 7;
1530 shift = (insn >> 6) & 0x1f;
1531 gen_movl_T0_reg(s, rm);
1532 gen_shift_T0_im_thumb[op](shift);
1533 gen_movl_reg_T0(s, rd);
1537 /* arithmetic large immediate */
1538 op = (insn >> 11) & 3;
1539 rd = (insn >> 8) & 0x7;
1541 gen_op_movl_T0_im(insn & 0xff);
1543 gen_movl_T0_reg(s, rd);
1544 gen_op_movl_T1_im(insn & 0xff);
1548 gen_op_logic_T0_cc();
1551 gen_op_subl_T0_T1_cc();
1554 gen_op_addl_T0_T1_cc();
1557 gen_op_subl_T0_T1_cc();
1561 gen_movl_reg_T0(s, rd);
1564 if (insn & (1 << 11)) {
1565 rd = (insn >> 8) & 7;
1566 /* load pc-relative */
1567 val = (insn & 0xff) * 4;
1568 gen_op_movl_T1_im(val);
1569 gen_movl_T2_reg(s, 15);
1570 gen_op_addl_T1_T2();
1572 gen_movl_reg_T0(s, rd);
1575 if (insn & (1 << 10)) {
1576 /* data processing extended or blx */
1577 rd = (insn & 7) | ((insn >> 4) & 8);
1578 rm = (insn >> 3) & 0xf;
1579 op = (insn >> 8) & 3;
1582 gen_movl_T0_reg(s, rd);
1583 gen_movl_T1_reg(s, rm);
1584 gen_op_addl_T0_T1();
1585 gen_movl_reg_T0(s, rd);
1588 gen_movl_T0_reg(s, rd);
1589 gen_movl_T1_reg(s, rm);
1590 gen_op_subl_T0_T1_cc();
1592 case 2: /* mov/cpy */
1593 gen_movl_T0_reg(s, rm);
1594 gen_movl_reg_T0(s, rd);
1596 case 3:/* branch [and link] exchange thumb register */
1597 if (insn & (1 << 7)) {
1598 val = (uint32_t)s->pc | 1;
1599 gen_op_movl_T1_im(val);
1600 gen_movl_reg_T1(s, 14);
1602 gen_movl_T0_reg(s, rm);
1609 /* data processing register */
1611 rm = (insn >> 3) & 7;
1612 op = (insn >> 6) & 0xf;
1613 if (op == 2 || op == 3 || op == 4 || op == 7) {
1614 /* the shift/rotate ops want the operands backwards */
1623 if (op == 9) /* neg */
1624 gen_op_movl_T0_im(0);
1625 else if (op != 0xf) /* mvn doesn't read its first operand */
1626 gen_movl_T0_reg(s, rd);
1628 gen_movl_T1_reg(s, rm);
1629 switch (insn >> 6) {
1631 gen_op_andl_T0_T1();
1632 gen_op_logic_T0_cc();
1635 gen_op_xorl_T0_T1();
1636 gen_op_logic_T0_cc();
1639 gen_op_shll_T1_T0_cc();
1642 gen_op_shrl_T1_T0_cc();
1645 gen_op_sarl_T1_T0_cc();
1648 gen_op_adcl_T0_T1_cc();
1651 gen_op_sbcl_T0_T1_cc();
1654 gen_op_rorl_T1_T0_cc();
1657 gen_op_andl_T0_T1();
1658 gen_op_logic_T0_cc();
1661 gen_op_rsbl_T0_T1_cc();
1664 gen_op_subl_T0_T1_cc();
1668 gen_op_addl_T0_T1_cc();
1673 gen_op_logic_T0_cc();
1676 gen_op_mull_T0_T1();
1677 gen_op_logic_T0_cc();
1680 gen_op_bicl_T0_T1();
1681 gen_op_logic_T0_cc();
1685 gen_op_logic_T1_cc();
1691 gen_movl_reg_T1(s, rd);
1693 gen_movl_reg_T0(s, rd);
1698 /* load/store register offset. */
1700 rn = (insn >> 3) & 7;
1701 rm = (insn >> 6) & 7;
1702 op = (insn >> 9) & 7;
1703 gen_movl_T1_reg(s, rn);
1704 gen_movl_T2_reg(s, rm);
1705 gen_op_addl_T1_T2();
1707 if (op < 3) /* store */
1708 gen_movl_T0_reg(s, rd);
1721 gen_op_ldsb_T0_T1();
1727 gen_op_ldsw_T0_T1();
1730 gen_op_ldub_T0_T1();
1733 gen_op_ldsw_T0_T1();
1736 if (op >= 3) /* load */
1737 gen_movl_reg_T0(s, rd);
1741 /* load/store word immediate offset */
1743 rn = (insn >> 3) & 7;
1744 gen_movl_T1_reg(s, rn);
1745 val = (insn >> 4) & 0x7c;
1746 gen_op_movl_T2_im(val);
1747 gen_op_addl_T1_T2();
1749 if (insn & (1 << 11)) {
1752 gen_movl_reg_T0(s, rd);
1755 gen_movl_T0_reg(s, rd);
1761 /* load/store byte immediate offset */
1763 rn = (insn >> 3) & 7;
1764 gen_movl_T1_reg(s, rn);
1765 val = (insn >> 6) & 0x1f;
1766 gen_op_movl_T2_im(val);
1767 gen_op_addl_T1_T2();
1769 if (insn & (1 << 11)) {
1771 gen_op_ldub_T0_T1();
1772 gen_movl_reg_T0(s, rd);
1775 gen_movl_T0_reg(s, rd);
1781 /* load/store halfword immediate offset */
1783 rn = (insn >> 3) & 7;
1784 gen_movl_T1_reg(s, rn);
1785 val = (insn >> 5) & 0x3e;
1786 gen_op_movl_T2_im(val);
1787 gen_op_addl_T1_T2();
1789 if (insn & (1 << 11)) {
1791 gen_op_lduw_T0_T1();
1792 gen_movl_reg_T0(s, rd);
1795 gen_movl_T0_reg(s, rd);
1801 /* load/store from stack */
1802 rd = (insn >> 8) & 7;
1803 gen_movl_T1_reg(s, 13);
1804 val = (insn & 0xff) * 4;
1805 gen_op_movl_T2_im(val);
1806 gen_op_addl_T1_T2();
1808 if (insn & (1 << 11)) {
1811 gen_movl_reg_T0(s, rd);
1814 gen_movl_T0_reg(s, rd);
1820 /* add to high reg */
1821 rd = (insn >> 8) & 7;
1822 if (insn & (1 << 11))
1826 gen_movl_T0_reg(s, rm);
1827 val = (insn & 0xff) * 4;
1828 gen_op_movl_T1_im(val);
1829 gen_op_addl_T0_T1();
1830 gen_movl_reg_T0(s, rd);
1835 op = (insn >> 8) & 0xf;
1838 /* adjust stack pointer */
1839 gen_movl_T1_reg(s, 13);
1840 val = (insn & 0x7f) * 4;
1841 if (insn & (1 << 7))
1842 val = -(int32_t)val;
1843 gen_op_movl_T2_im(val);
1844 gen_op_addl_T1_T2();
1845 gen_movl_reg_T1(s, 13);
1848 case 4: case 5: case 0xc: case 0xd:
1850 gen_movl_T1_reg(s, 13);
1851 if (insn & (1 << 11))
1855 gen_op_movl_T2_im(val);
1856 for (i = 0; i < 8; i++) {
1857 if (insn & (1 << i)) {
1858 if (insn & (1 << 11)) {
1861 gen_movl_reg_T0(s, i);
1864 gen_movl_T0_reg(s, i);
1867 /* move to the next address */
1868 gen_op_addl_T1_T2();
1871 if (insn & (1 << 8)) {
1872 if (insn & (1 << 11)) {
1875 /* don't set the pc until the rest of the instruction
1879 gen_movl_T0_reg(s, 14);
1882 gen_op_addl_T1_T2();
1885 /* write back the new stack pointer */
1886 gen_movl_reg_T1(s, 13);
1887 /* set the new PC value */
1888 if ((insn & 0x0900) == 0x0900)
1898 /* load/store multiple */
1899 rn = (insn >> 8) & 0x7;
1900 gen_movl_T1_reg(s, rn);
1901 gen_op_movl_T2_im(4);
1903 for (i = 0; i < 8; i++) {
1904 if (insn & (1 << i)) {
1905 /* advance to the next address */
1907 gen_op_addl_T1_T2();
1910 if (insn & (1 << 11)) {
1913 gen_movl_reg_T0(s, i);
1916 gen_movl_T0_reg(s, i);
1924 /* conditional branch or swi */
1925 cond = (insn >> 8) & 0xf;
1931 gen_op_movl_T0_im((long)s->pc | 1);
1932 /* Don't set r15. */
1933 gen_op_movl_reg_TN[0][15]();
1935 s->is_jmp = DISAS_JUMP;
1938 /* generate a conditional jump to next instruction */
1939 gen_test_cc[cond ^ 1]((long)s->tb, (long)s->pc);
1940 s->is_jmp = DISAS_JUMP_NEXT;
1941 gen_movl_T1_reg(s, 15);
1943 /* jump to the offset */
1944 val = (uint32_t)s->pc;
1945 offset = ((int32_t)insn << 24) >> 24;
1946 val += (offset << 1) + 2;
1947 gen_op_jmp((long)s->tb, val);
1948 s->is_jmp = DISAS_TB_JUMP;
1952 /* unconditional branch */
1953 if (insn & (1 << 11))
1954 goto undef; /* Second half of a blx */
1955 val = (uint32_t)s->pc;
1956 offset = ((int32_t)insn << 21) >> 21;
1957 val += (offset << 1) + 2;
1958 gen_op_jmp((long)s->tb, val);
1959 s->is_jmp = DISAS_TB_JUMP;
1963 /* branch and link [and switch to arm] */
1964 offset = ((int32_t)insn << 21) >> 10;
1966 offset |= insn & 0x7ff;
1968 val = (uint32_t)s->pc + 2;
1969 gen_op_movl_T1_im(val | 1);
1970 gen_movl_reg_T1(s, 14);
1973 if (insn & (1 << 11)) {
1975 gen_op_jmp((long)s->tb, val);
1976 s->is_jmp = DISAS_TB_JUMP;
1979 gen_op_movl_T0_im(val);
1985 gen_op_movl_T0_im((long)s->pc - 4);
1986 gen_op_movl_reg_TN[0][15]();
1987 gen_op_undef_insn();
1988 s->is_jmp = DISAS_JUMP;
1991 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
1992 basic block 'tb'. If search_pc is TRUE, also generate PC
1993 information for each intermediate instruction. */
1994 static inline int gen_intermediate_code_internal(CPUState *env,
1995 TranslationBlock *tb,
1998 DisasContext dc1, *dc = &dc1;
1999 uint16_t *gen_opc_end;
2001 target_ulong pc_start;
2003 /* generate intermediate code */
2008 gen_opc_ptr = gen_opc_buf;
2009 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
2010 gen_opparam_ptr = gen_opparam_buf;
2012 dc->is_jmp = DISAS_NEXT;
2017 j = gen_opc_ptr - gen_opc_buf;
2021 gen_opc_instr_start[lj++] = 0;
2023 gen_opc_pc[lj] = dc->pc;
2024 gen_opc_instr_start[lj] = 1;
2027 disas_thumb_insn(dc);
2029 disas_arm_insn(env, dc);
2030 } while (!dc->is_jmp && gen_opc_ptr < gen_opc_end &&
2031 (dc->pc - pc_start) < (TARGET_PAGE_SIZE - 32));
2032 switch(dc->is_jmp) {
2033 case DISAS_JUMP_NEXT:
2035 gen_op_jmp((long)dc->tb, (long)dc->pc);
2040 /* indicate that the hash table must be used to find the next TB */
2045 /* nothing more to generate */
2048 *gen_opc_ptr = INDEX_op_end;
2051 if (loglevel & CPU_LOG_TB_IN_ASM) {
2052 fprintf(logfile, "----------------\n");
2053 fprintf(logfile, "IN: %s\n", lookup_symbol(pc_start));
2054 target_disas(logfile, pc_start, dc->pc - pc_start, 0);
2055 fprintf(logfile, "\n");
2056 if (loglevel & (CPU_LOG_TB_OP)) {
2057 fprintf(logfile, "OP:\n");
2058 dump_ops(gen_opc_buf, gen_opparam_buf);
2059 fprintf(logfile, "\n");
2064 tb->size = dc->pc - pc_start;
2068 int gen_intermediate_code(CPUState *env, TranslationBlock *tb)
2070 return gen_intermediate_code_internal(env, tb, 0);
2073 int gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
2075 return gen_intermediate_code_internal(env, tb, 1);
2078 CPUARMState *cpu_arm_init(void)
2084 env = malloc(sizeof(CPUARMState));
2087 memset(env, 0, sizeof(CPUARMState));
2088 cpu_single_env = env;
2092 void cpu_arm_close(CPUARMState *env)
2097 void cpu_dump_state(CPUState *env, FILE *f,
2098 int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
2109 cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
2111 cpu_fprintf(f, "\n");
2113 cpu_fprintf(f, " ");
2115 cpu_fprintf(f, "PSR=%08x %c%c%c%c\n",
2117 env->cpsr & (1 << 31) ? 'N' : '-',
2118 env->cpsr & (1 << 30) ? 'Z' : '-',
2119 env->cpsr & (1 << 29) ? 'C' : '-',
2120 env->cpsr & (1 << 28) ? 'V' : '-');
2122 for (i = 0; i < 16; i++) {
2123 s0.s = env->vfp.regs.s[i * 2];
2124 s1.s = env->vfp.regs.s[i * 2 + 1];
2125 d.d = env->vfp.regs.d[i];
2126 cpu_fprintf(f, "s%02d=%08x(%8f) s%02d=%08x(%8f) d%02d=%08x%08x(%8f)\n",
2127 i * 2, (int)s0.i, s0.s,
2128 i * 2 + 1, (int)s0.i, s0.s,
2129 i, (int)(uint32_t)d.l.upper, (int)(uint32_t)d.l.lower,
2131 cpu_fprintf(f, "FPSCR: %08x\n", (int)env->vfp.fpscr);
2135 target_ulong cpu_get_phys_page_debug(CPUState *env, target_ulong addr)
2140 #if defined(CONFIG_USER_ONLY)
2142 int cpu_arm_handle_mmu_fault (CPUState *env, target_ulong address, int rw,
2143 int is_user, int is_softmmu)
2145 env->cp15_6 = address;
2147 env->exception_index = EXCP_PREFETCH_ABORT;
2149 env->exception_index = EXCP_DATA_ABORT;
2156 #error not implemented