#include <inttypes.h>
#include "cpu.h"
-#include "exec-all.h"
#include "disas.h"
#include "tcg-op.h"
#include "qemu-log.h"
struct TranslationBlock *tb;
int singlestep_enabled;
int thumb;
+ int bswap_code;
#if !defined(CONFIG_USER_ONLY)
int user;
#endif
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, regs[i]),
+ offsetof(CPUARMState, regs[i]),
regnames[i]);
}
cpu_exclusive_addr = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_addr), "exclusive_addr");
+ offsetof(CPUARMState, exclusive_addr), "exclusive_addr");
cpu_exclusive_val = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_val), "exclusive_val");
+ offsetof(CPUARMState, exclusive_val), "exclusive_val");
cpu_exclusive_high = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_high), "exclusive_high");
+ offsetof(CPUARMState, exclusive_high), "exclusive_high");
#ifdef CONFIG_USER_ONLY
cpu_exclusive_test = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_test), "exclusive_test");
+ offsetof(CPUARMState, exclusive_test), "exclusive_test");
cpu_exclusive_info = tcg_global_mem_new_i32(TCG_AREG0,
- offsetof(CPUState, exclusive_info), "exclusive_info");
+ offsetof(CPUARMState, exclusive_info), "exclusive_info");
#endif
#define GEN_HELPER 2
return tmp;
}
-#define load_cpu_field(name) load_cpu_offset(offsetof(CPUState, name))
+#define load_cpu_field(name) load_cpu_offset(offsetof(CPUARMState, name))
static inline void store_cpu_offset(TCGv var, int offset)
{
}
#define store_cpu_field(var, name) \
- store_cpu_offset(var, offsetof(CPUState, name))
+ store_cpu_offset(var, offsetof(CPUARMState, name))
/* Set a variable to the value of a CPU register. */
static void load_reg_var(DisasContext *s, TCGv var, int reg)
tcg_temp_free_i32(t1);
}
-#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, CF))
+#define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, CF))
/* Set CF to the top bit of var. */
static void gen_set_CF_bit31(TCGv var)
/* Set N and Z flags from var. */
static inline void gen_logic_CC(TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, NF));
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, ZF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, NF));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, ZF));
}
/* T0 += T1 + CF. */
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 1:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(s)
tcg_temp_free_ptr(tmp);
break;
case 5:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(u)
tcg_temp_free_ptr(tmp);
break;
#define gen_pas_helper(name) glue(gen_helper_,name)(a, a, b, tmp)
case 0:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(s)
tcg_temp_free_ptr(tmp);
break;
case 4:
tmp = tcg_temp_new_ptr();
- tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_addi_ptr(tmp, cpu_env, offsetof(CPUARMState, GE));
PAS_OP(u)
tcg_temp_free_ptr(tmp);
break;
if (s->thumb != (addr & 1)) {
tmp = tcg_temp_new_i32();
tcg_gen_movi_i32(tmp, addr & 1);
- tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUState, thumb));
+ tcg_gen_st_i32(tmp, cpu_env, offsetof(CPUARMState, thumb));
tcg_temp_free_i32(tmp);
}
tcg_gen_movi_i32(cpu_R[15], addr & ~1);
/* Variant of store_reg which uses branch&exchange logic when storing
to r15 in ARM architecture v7 and above. The source must be a temporary
and will be marked as dead. */
-static inline void store_reg_bx(CPUState *env, DisasContext *s,
+static inline void store_reg_bx(CPUARMState *env, DisasContext *s,
int reg, TCGv var)
{
if (reg == 15 && ENABLE_ARCH_7) {
* to r15 in ARM architecture v5T and above. This is used for storing
* the results of a LDR/LDM/POP into r15, and corresponds to the cases
* in the ARM ARM which use the LoadWritePC() pseudocode function. */
-static inline void store_reg_from_load(CPUState *env, DisasContext *s,
+static inline void store_reg_from_load(CPUARMState *env, DisasContext *s,
int reg, TCGv var)
{
if (reg == 15 && ENABLE_ARCH_5) {
}
}
+static TCGv_ptr get_fpstatus_ptr(int neon)
+{
+ TCGv_ptr statusptr = tcg_temp_new_ptr();
+ int offset;
+ if (neon) {
+ offset = offsetof(CPUARMState, vfp.standard_fp_status);
+ } else {
+ offset = offsetof(CPUARMState, vfp.fp_status);
+ }
+ tcg_gen_addi_ptr(statusptr, cpu_env, offset);
+ return statusptr;
+}
+
#define VFP_OP2(name) \
static inline void gen_vfp_##name(int dp) \
{ \
- if (dp) \
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, cpu_env); \
- else \
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, cpu_env); \
+ TCGv_ptr fpst = get_fpstatus_ptr(0); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, cpu_F1d, fpst); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, cpu_F1s, fpst); \
+ } \
+ tcg_temp_free_ptr(fpst); \
}
VFP_OP2(add)
#undef VFP_OP2
+static inline void gen_vfp_F1_mul(int dp)
+{
+ /* Like gen_vfp_mul() but put result in F1 */
+ TCGv_ptr fpst = get_fpstatus_ptr(0);
+ if (dp) {
+ gen_helper_vfp_muld(cpu_F1d, cpu_F0d, cpu_F1d, fpst);
+ } else {
+ gen_helper_vfp_muls(cpu_F1s, cpu_F0s, cpu_F1s, fpst);
+ }
+ tcg_temp_free_ptr(fpst);
+}
+
+static inline void gen_vfp_F1_neg(int dp)
+{
+ /* Like gen_vfp_neg() but put result in F1 */
+ if (dp) {
+ gen_helper_vfp_negd(cpu_F1d, cpu_F0d);
+ } else {
+ gen_helper_vfp_negs(cpu_F1s, cpu_F0s);
+ }
+}
+
static inline void gen_vfp_abs(int dp)
{
if (dp)
tcg_gen_movi_i32(cpu_F1s, 0);
}
-static inline void gen_vfp_uito(int dp)
-{
- if (dp)
- gen_helper_vfp_uitod(cpu_F0d, cpu_F0s, cpu_env);
- else
- gen_helper_vfp_uitos(cpu_F0s, cpu_F0s, cpu_env);
-}
-
-static inline void gen_vfp_sito(int dp)
-{
- if (dp)
- gen_helper_vfp_sitod(cpu_F0d, cpu_F0s, cpu_env);
- else
- gen_helper_vfp_sitos(cpu_F0s, cpu_F0s, cpu_env);
-}
-
-static inline void gen_vfp_toui(int dp)
-{
- if (dp)
- gen_helper_vfp_touid(cpu_F0s, cpu_F0d, cpu_env);
- else
- gen_helper_vfp_touis(cpu_F0s, cpu_F0s, cpu_env);
+#define VFP_GEN_ITOF(name) \
+static inline void gen_vfp_##name(int dp, int neon) \
+{ \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0s, statusptr); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
+ } \
+ tcg_temp_free_ptr(statusptr); \
}
-static inline void gen_vfp_touiz(int dp)
-{
- if (dp)
- gen_helper_vfp_touizd(cpu_F0s, cpu_F0d, cpu_env);
- else
- gen_helper_vfp_touizs(cpu_F0s, cpu_F0s, cpu_env);
-}
+VFP_GEN_ITOF(uito)
+VFP_GEN_ITOF(sito)
+#undef VFP_GEN_ITOF
-static inline void gen_vfp_tosi(int dp)
-{
- if (dp)
- gen_helper_vfp_tosid(cpu_F0s, cpu_F0d, cpu_env);
- else
- gen_helper_vfp_tosis(cpu_F0s, cpu_F0s, cpu_env);
+#define VFP_GEN_FTOI(name) \
+static inline void gen_vfp_##name(int dp, int neon) \
+{ \
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0s, cpu_F0d, statusptr); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, statusptr); \
+ } \
+ tcg_temp_free_ptr(statusptr); \
}
-static inline void gen_vfp_tosiz(int dp)
-{
- if (dp)
- gen_helper_vfp_tosizd(cpu_F0s, cpu_F0d, cpu_env);
- else
- gen_helper_vfp_tosizs(cpu_F0s, cpu_F0s, cpu_env);
-}
+VFP_GEN_FTOI(toui)
+VFP_GEN_FTOI(touiz)
+VFP_GEN_FTOI(tosi)
+VFP_GEN_FTOI(tosiz)
+#undef VFP_GEN_FTOI
#define VFP_GEN_FIX(name) \
-static inline void gen_vfp_##name(int dp, int shift) \
+static inline void gen_vfp_##name(int dp, int shift, int neon) \
{ \
TCGv tmp_shift = tcg_const_i32(shift); \
- if (dp) \
- gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, cpu_env);\
- else \
- gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, cpu_env);\
+ TCGv_ptr statusptr = get_fpstatus_ptr(neon); \
+ if (dp) { \
+ gen_helper_vfp_##name##d(cpu_F0d, cpu_F0d, tmp_shift, statusptr); \
+ } else { \
+ gen_helper_vfp_##name##s(cpu_F0s, cpu_F0s, tmp_shift, statusptr); \
+ } \
tcg_temp_free_i32(tmp_shift); \
+ tcg_temp_free_ptr(statusptr); \
}
VFP_GEN_FIX(tosh)
VFP_GEN_FIX(tosl)
static inline void iwmmxt_load_reg(TCGv_i64 var, int reg)
{
- tcg_gen_ld_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+ tcg_gen_ld_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline void iwmmxt_store_reg(TCGv_i64 var, int reg)
{
- tcg_gen_st_i64(var, cpu_env, offsetof(CPUState, iwmmxt.regs[reg]));
+ tcg_gen_st_i64(var, cpu_env, offsetof(CPUARMState, iwmmxt.regs[reg]));
}
static inline TCGv iwmmxt_load_creg(int reg)
{
TCGv var = tcg_temp_new_i32();
- tcg_gen_ld_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+ tcg_gen_ld_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
return var;
}
static inline void iwmmxt_store_creg(int reg, TCGv var)
{
- tcg_gen_st_i32(var, cpu_env, offsetof(CPUState, iwmmxt.cregs[reg]));
+ tcg_gen_st_i32(var, cpu_env, offsetof(CPUARMState, iwmmxt.cregs[reg]));
tcg_temp_free_i32(var);
}
gen_helper_iwmmxt_##name(cpu_M0, cpu_M0, cpu_V1); \
}
-#define IWMMXT_OP_SIZE(name) \
-IWMMXT_OP(name##b) \
-IWMMXT_OP(name##w) \
-IWMMXT_OP(name##l)
+#define IWMMXT_OP_ENV(name) \
+static inline void gen_op_iwmmxt_##name##_M0_wRn(int rn) \
+{ \
+ iwmmxt_load_reg(cpu_V1, rn); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0, cpu_V1); \
+}
+
+#define IWMMXT_OP_ENV_SIZE(name) \
+IWMMXT_OP_ENV(name##b) \
+IWMMXT_OP_ENV(name##w) \
+IWMMXT_OP_ENV(name##l)
-#define IWMMXT_OP_1(name) \
+#define IWMMXT_OP_ENV1(name) \
static inline void gen_op_iwmmxt_##name##_M0(void) \
{ \
- gen_helper_iwmmxt_##name(cpu_M0, cpu_M0); \
+ gen_helper_iwmmxt_##name(cpu_M0, cpu_env, cpu_M0); \
}
IWMMXT_OP(maddsq)
IWMMXT_OP(macsw)
IWMMXT_OP(macuw)
-IWMMXT_OP_SIZE(unpackl)
-IWMMXT_OP_SIZE(unpackh)
-
-IWMMXT_OP_1(unpacklub)
-IWMMXT_OP_1(unpackluw)
-IWMMXT_OP_1(unpacklul)
-IWMMXT_OP_1(unpackhub)
-IWMMXT_OP_1(unpackhuw)
-IWMMXT_OP_1(unpackhul)
-IWMMXT_OP_1(unpacklsb)
-IWMMXT_OP_1(unpacklsw)
-IWMMXT_OP_1(unpacklsl)
-IWMMXT_OP_1(unpackhsb)
-IWMMXT_OP_1(unpackhsw)
-IWMMXT_OP_1(unpackhsl)
-
-IWMMXT_OP_SIZE(cmpeq)
-IWMMXT_OP_SIZE(cmpgtu)
-IWMMXT_OP_SIZE(cmpgts)
-
-IWMMXT_OP_SIZE(mins)
-IWMMXT_OP_SIZE(minu)
-IWMMXT_OP_SIZE(maxs)
-IWMMXT_OP_SIZE(maxu)
-
-IWMMXT_OP_SIZE(subn)
-IWMMXT_OP_SIZE(addn)
-IWMMXT_OP_SIZE(subu)
-IWMMXT_OP_SIZE(addu)
-IWMMXT_OP_SIZE(subs)
-IWMMXT_OP_SIZE(adds)
-
-IWMMXT_OP(avgb0)
-IWMMXT_OP(avgb1)
-IWMMXT_OP(avgw0)
-IWMMXT_OP(avgw1)
+IWMMXT_OP_ENV_SIZE(unpackl)
+IWMMXT_OP_ENV_SIZE(unpackh)
+
+IWMMXT_OP_ENV1(unpacklub)
+IWMMXT_OP_ENV1(unpackluw)
+IWMMXT_OP_ENV1(unpacklul)
+IWMMXT_OP_ENV1(unpackhub)
+IWMMXT_OP_ENV1(unpackhuw)
+IWMMXT_OP_ENV1(unpackhul)
+IWMMXT_OP_ENV1(unpacklsb)
+IWMMXT_OP_ENV1(unpacklsw)
+IWMMXT_OP_ENV1(unpacklsl)
+IWMMXT_OP_ENV1(unpackhsb)
+IWMMXT_OP_ENV1(unpackhsw)
+IWMMXT_OP_ENV1(unpackhsl)
+
+IWMMXT_OP_ENV_SIZE(cmpeq)
+IWMMXT_OP_ENV_SIZE(cmpgtu)
+IWMMXT_OP_ENV_SIZE(cmpgts)
+
+IWMMXT_OP_ENV_SIZE(mins)
+IWMMXT_OP_ENV_SIZE(minu)
+IWMMXT_OP_ENV_SIZE(maxs)
+IWMMXT_OP_ENV_SIZE(maxu)
+
+IWMMXT_OP_ENV_SIZE(subn)
+IWMMXT_OP_ENV_SIZE(addn)
+IWMMXT_OP_ENV_SIZE(subu)
+IWMMXT_OP_ENV_SIZE(addu)
+IWMMXT_OP_ENV_SIZE(subs)
+IWMMXT_OP_ENV_SIZE(adds)
+
+IWMMXT_OP_ENV(avgb0)
+IWMMXT_OP_ENV(avgb1)
+IWMMXT_OP_ENV(avgw0)
+IWMMXT_OP_ENV(avgw1)
IWMMXT_OP(msadb)
-IWMMXT_OP(packuw)
-IWMMXT_OP(packul)
-IWMMXT_OP(packuq)
-IWMMXT_OP(packsw)
-IWMMXT_OP(packsl)
-IWMMXT_OP(packsq)
+IWMMXT_OP_ENV(packuw)
+IWMMXT_OP_ENV(packul)
+IWMMXT_OP_ENV(packuq)
+IWMMXT_OP_ENV(packsw)
+IWMMXT_OP_ENV(packsl)
+IWMMXT_OP_ENV(packsq)
static void gen_op_iwmmxt_set_mup(void)
{
return 0;
}
-/* Disassemble an iwMMXt instruction. Returns nonzero if an error occured
+/* Disassemble an iwMMXt instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_iwmmxt_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_iwmmxt_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
int rd, wrd;
int rdhi, rdlo, rd0, rd1, i;
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_srlw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_srlw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_srll(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_srll(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_srlq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_srlq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_sraw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sraw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_sral(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sral(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_sraq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sraq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
}
switch ((insn >> 22) & 3) {
case 1:
- gen_helper_iwmmxt_sllw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sllw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
- gen_helper_iwmmxt_slll(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_slll(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
- gen_helper_iwmmxt_sllq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_sllq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
tcg_temp_free_i32(tmp);
return 1;
}
- gen_helper_iwmmxt_rorw(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorw(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 2:
if (gen_iwmmxt_shift(insn, 0x1f, tmp)) {
tcg_temp_free_i32(tmp);
return 1;
}
- gen_helper_iwmmxt_rorl(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorl(cpu_M0, cpu_env, cpu_M0, tmp);
break;
case 3:
if (gen_iwmmxt_shift(insn, 0x3f, tmp)) {
tcg_temp_free_i32(tmp);
return 1;
}
- gen_helper_iwmmxt_rorq(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_rorq(cpu_M0, cpu_env, cpu_M0, tmp);
break;
}
tcg_temp_free_i32(tmp);
rd0 = (insn >> 16) & 0xf;
gen_op_iwmmxt_movq_M0_wRn(rd0);
tmp = tcg_const_i32(((insn >> 16) & 0xf0) | (insn & 0x0f));
- gen_helper_iwmmxt_shufh(cpu_M0, cpu_M0, tmp);
+ gen_helper_iwmmxt_shufh(cpu_M0, cpu_env, cpu_M0, tmp);
tcg_temp_free(tmp);
gen_op_iwmmxt_movq_wRn_M0(wrd);
gen_op_iwmmxt_set_mup();
return 0;
}
-/* Disassemble an XScale DSP instruction. Returns nonzero if an error occured
+/* Disassemble an XScale DSP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_dsp_insn(CPUState *env, DisasContext *s, uint32_t insn)
+static int disas_dsp_insn(CPUARMState *env, DisasContext *s, uint32_t insn)
{
int acc, rd0, rd1, rdhi, rdlo;
TCGv tmp, tmp2;
return 1;
}
-/* Disassemble system coprocessor instruction. Return nonzero if
- instruction is not defined. */
-static int disas_cp_insn(CPUState *env, DisasContext *s, uint32_t insn)
-{
- TCGv tmp, tmp2;
- uint32_t rd = (insn >> 12) & 0xf;
- uint32_t cp = (insn >> 8) & 0xf;
- if (IS_USER(s)) {
- return 1;
- }
-
- if (insn & ARM_CP_RW_BIT) {
- if (!env->cp[cp].cp_read)
- return 1;
- gen_set_pc_im(s->pc);
- tmp = tcg_temp_new_i32();
- tmp2 = tcg_const_i32(insn);
- gen_helper_get_cp(tmp, cpu_env, tmp2);
- tcg_temp_free(tmp2);
- store_reg(s, rd, tmp);
- } else {
- if (!env->cp[cp].cp_write)
- return 1;
- gen_set_pc_im(s->pc);
- tmp = load_reg(s, rd);
- tmp2 = tcg_const_i32(insn);
- gen_helper_set_cp(cpu_env, tmp2, tmp);
- tcg_temp_free(tmp2);
- tcg_temp_free_i32(tmp);
- }
- return 0;
-}
-
-static int cp15_user_ok(uint32_t insn)
-{
- int cpn = (insn >> 16) & 0xf;
- int cpm = insn & 0xf;
- int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
-
- if (cpn == 13 && cpm == 0) {
- /* TLS register. */
- if (op == 2 || (op == 3 && (insn & ARM_CP_RW_BIT)))
- return 1;
- }
- if (cpn == 7) {
- /* ISB, DSB, DMB. */
- if ((cpm == 5 && op == 4)
- || (cpm == 10 && (op == 4 || op == 5)))
- return 1;
- }
- return 0;
-}
-
-static int cp15_tls_load_store(CPUState *env, DisasContext *s, uint32_t insn, uint32_t rd)
-{
- TCGv tmp;
- int cpn = (insn >> 16) & 0xf;
- int cpm = insn & 0xf;
- int op = ((insn >> 5) & 7) | ((insn >> 18) & 0x38);
-
- if (!arm_feature(env, ARM_FEATURE_V6K))
- return 0;
-
- if (!(cpn == 13 && cpm == 0))
- return 0;
-
- if (insn & ARM_CP_RW_BIT) {
- switch (op) {
- case 2:
- tmp = load_cpu_field(cp15.c13_tls1);
- break;
- case 3:
- tmp = load_cpu_field(cp15.c13_tls2);
- break;
- case 4:
- tmp = load_cpu_field(cp15.c13_tls3);
- break;
- default:
- return 0;
- }
- store_reg(s, rd, tmp);
-
- } else {
- tmp = load_reg(s, rd);
- switch (op) {
- case 2:
- store_cpu_field(tmp, cp15.c13_tls1);
- break;
- case 3:
- store_cpu_field(tmp, cp15.c13_tls2);
- break;
- case 4:
- store_cpu_field(tmp, cp15.c13_tls3);
- break;
- default:
- tcg_temp_free_i32(tmp);
- return 0;
- }
- }
- return 1;
-}
-
-/* Disassemble system coprocessor (cp15) instruction. Return nonzero if
- instruction is not defined. */
-static int disas_cp15_insn(CPUState *env, DisasContext *s, uint32_t insn)
-{
- uint32_t rd;
- TCGv tmp, tmp2;
-
- /* M profile cores use memory mapped registers instead of cp15. */
- if (arm_feature(env, ARM_FEATURE_M))
- return 1;
-
- if ((insn & (1 << 25)) == 0) {
- if (insn & (1 << 20)) {
- /* mrrc */
- return 1;
- }
- /* mcrr. Used for block cache operations, so implement as no-op. */
- return 0;
- }
- if ((insn & (1 << 4)) == 0) {
- /* cdp */
- return 1;
- }
- if (IS_USER(s) && !cp15_user_ok(insn)) {
- return 1;
- }
-
- /* Pre-v7 versions of the architecture implemented WFI via coprocessor
- * instructions rather than a separate instruction.
- */
- if ((insn & 0x0fff0fff) == 0x0e070f90) {
- /* 0,c7,c0,4: Standard v6 WFI (also used in some pre-v6 cores).
- * In v7, this must NOP.
- */
- if (!arm_feature(env, ARM_FEATURE_V7)) {
- /* Wait for interrupt. */
- gen_set_pc_im(s->pc);
- s->is_jmp = DISAS_WFI;
- }
- return 0;
- }
-
- if ((insn & 0x0fff0fff) == 0x0e070f58) {
- /* 0,c7,c8,2: Not all pre-v6 cores implemented this WFI,
- * so this is slightly over-broad.
- */
- if (!arm_feature(env, ARM_FEATURE_V6)) {
- /* Wait for interrupt. */
- gen_set_pc_im(s->pc);
- s->is_jmp = DISAS_WFI;
- return 0;
- }
- /* Otherwise fall through to handle via helper function.
- * In particular, on v7 and some v6 cores this is one of
- * the VA-PA registers.
- */
- }
-
- rd = (insn >> 12) & 0xf;
-
- if (cp15_tls_load_store(env, s, insn, rd))
- return 0;
-
- tmp2 = tcg_const_i32(insn);
- if (insn & ARM_CP_RW_BIT) {
- tmp = tcg_temp_new_i32();
- gen_helper_get_cp15(tmp, cpu_env, tmp2);
- /* If the destination register is r15 then sets condition codes. */
- if (rd != 15)
- store_reg(s, rd, tmp);
- else
- tcg_temp_free_i32(tmp);
- } else {
- tmp = load_reg(s, rd);
- gen_helper_set_cp15(cpu_env, tmp2, tmp);
- tcg_temp_free_i32(tmp);
- /* Normally we would always end the TB here, but Linux
- * arch/arm/mach-pxa/sleep.S expects two instructions following
- * an MMU enable to execute from cache. Imitate this behaviour. */
- if (!arm_feature(env, ARM_FEATURE_XSCALE) ||
- (insn & 0x0fff0fff) != 0x0e010f10)
- gen_lookup_tb(s);
- }
- tcg_temp_free_i32(tmp2);
- return 0;
-}
-
#define VFP_REG_SHR(x, n) (((n) > 0) ? (x) >> (n) : (x) << -(n))
#define VFP_SREG(insn, bigbit, smallbit) \
((VFP_REG_SHR(insn, bigbit - 1) & 0x1e) | (((insn) >> (smallbit)) & 1))
return tmp;
}
-/* Disassemble a VFP instruction. Returns nonzero if an error occured
+/* Disassemble a VFP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
-static int disas_vfp_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_vfp_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
int dp, veclen;
case ARM_VFP_MVFR0:
case ARM_VFP_MVFR1:
if (IS_USER(s)
- || !arm_feature(env, ARM_FEATURE_VFP3))
+ || !arm_feature(env, ARM_FEATURE_MVFR))
return 1;
tmp = load_cpu_field(vfp.xregs[rn]);
break;
/* Source and destination the same. */
gen_mov_F0_vreg(dp, rd);
break;
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ /* VCVTB, VCVTT: only present with the halfprec extension,
+ * UNPREDICTABLE if bit 8 is set (we choose to UNDEF)
+ */
+ if (dp || !arm_feature(env, ARM_FEATURE_VFP_FP16)) {
+ return 1;
+ }
+ /* Otherwise fall through */
default:
/* One source operand. */
gen_mov_F0_vreg(dp, rm);
for (;;) {
/* Perform the calculation. */
switch (op) {
- case 0: /* mac: fd + (fn * fm) */
- gen_vfp_mul(dp);
- gen_mov_F1_vreg(dp, rd);
+ case 0: /* VMLA: fd + (fn * fm) */
+ /* Note that order of inputs to the add matters for NaNs */
+ gen_vfp_F1_mul(dp);
+ gen_mov_F0_vreg(dp, rd);
gen_vfp_add(dp);
break;
- case 1: /* nmac: fd - (fn * fm) */
+ case 1: /* VMLS: fd + -(fn * fm) */
gen_vfp_mul(dp);
- gen_vfp_neg(dp);
- gen_mov_F1_vreg(dp, rd);
+ gen_vfp_F1_neg(dp);
+ gen_mov_F0_vreg(dp, rd);
gen_vfp_add(dp);
break;
- case 2: /* msc: -fd + (fn * fm) */
- gen_vfp_mul(dp);
- gen_mov_F1_vreg(dp, rd);
- gen_vfp_sub(dp);
+ case 2: /* VNMLS: -fd + (fn * fm) */
+ /* Note that it isn't valid to replace (-A + B) with (B - A)
+ * or similar plausible looking simplifications
+ * because this will give wrong results for NaNs.
+ */
+ gen_vfp_F1_mul(dp);
+ gen_mov_F0_vreg(dp, rd);
+ gen_vfp_neg(dp);
+ gen_vfp_add(dp);
break;
- case 3: /* nmsc: -fd - (fn * fm) */
+ case 3: /* VNMLA: -fd + -(fn * fm) */
gen_vfp_mul(dp);
+ gen_vfp_F1_neg(dp);
+ gen_mov_F0_vreg(dp, rd);
gen_vfp_neg(dp);
- gen_mov_F1_vreg(dp, rd);
- gen_vfp_sub(dp);
+ gen_vfp_add(dp);
break;
case 4: /* mul: fn * fm */
gen_vfp_mul(dp);
case 8: /* div: fn / fm */
gen_vfp_div(dp);
break;
+ case 10: /* VFNMA : fd = muladd(-fd, fn, fm) */
+ case 11: /* VFNMS : fd = muladd(-fd, -fn, fm) */
+ case 12: /* VFMA : fd = muladd( fd, fn, fm) */
+ case 13: /* VFMS : fd = muladd( fd, -fn, fm) */
+ /* These are fused multiply-add, and must be done as one
+ * floating point operation with no rounding between the
+ * multiplication and addition steps.
+ * NB that doing the negations here as separate steps is
+ * correct : an input NaN should come out with its sign bit
+ * flipped if it is a negated-input.
+ */
+ if (!arm_feature(env, ARM_FEATURE_VFP4)) {
+ return 1;
+ }
+ if (dp) {
+ TCGv_ptr fpst;
+ TCGv_i64 frd;
+ if (op & 1) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negd(cpu_F0d, cpu_F0d);
+ }
+ frd = tcg_temp_new_i64();
+ tcg_gen_ld_f64(frd, cpu_env, vfp_reg_offset(dp, rd));
+ if (op & 2) {
+ /* VFNMA, VFNMS */
+ gen_helper_vfp_negd(frd, frd);
+ }
+ fpst = get_fpstatus_ptr(0);
+ gen_helper_vfp_muladdd(cpu_F0d, cpu_F0d,
+ cpu_F1d, frd, fpst);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i64(frd);
+ } else {
+ TCGv_ptr fpst;
+ TCGv_i32 frd;
+ if (op & 1) {
+ /* VFNMS, VFMS */
+ gen_helper_vfp_negs(cpu_F0s, cpu_F0s);
+ }
+ frd = tcg_temp_new_i32();
+ tcg_gen_ld_f32(frd, cpu_env, vfp_reg_offset(dp, rd));
+ if (op & 2) {
+ gen_helper_vfp_negs(frd, frd);
+ }
+ fpst = get_fpstatus_ptr(0);
+ gen_helper_vfp_muladds(cpu_F0s, cpu_F0s,
+ cpu_F1s, frd, fpst);
+ tcg_temp_free_ptr(fpst);
+ tcg_temp_free_i32(frd);
+ }
+ break;
case 14: /* fconst */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
gen_vfp_sqrt(dp);
break;
case 4: /* vcvtb.f32.f16 */
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
- return 1;
tmp = gen_vfp_mrs();
tcg_gen_ext16u_i32(tmp, tmp);
gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
tcg_temp_free_i32(tmp);
break;
case 5: /* vcvtt.f32.f16 */
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
- return 1;
tmp = gen_vfp_mrs();
tcg_gen_shri_i32(tmp, tmp, 16);
gen_helper_vfp_fcvt_f16_to_f32(cpu_F0s, tmp, cpu_env);
tcg_temp_free_i32(tmp);
break;
case 6: /* vcvtb.f16.f32 */
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
- return 1;
tmp = tcg_temp_new_i32();
gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
gen_mov_F0_vreg(0, rd);
gen_vfp_msr(tmp);
break;
case 7: /* vcvtt.f16.f32 */
- if (!arm_feature(env, ARM_FEATURE_VFP_FP16))
- return 1;
tmp = tcg_temp_new_i32();
gen_helper_vfp_fcvt_f32_to_f16(tmp, cpu_F0s, cpu_env);
tcg_gen_shli_i32(tmp, tmp, 16);
gen_helper_vfp_fcvtds(cpu_F0d, cpu_F0s, cpu_env);
break;
case 16: /* fuito */
- gen_vfp_uito(dp);
+ gen_vfp_uito(dp, 0);
break;
case 17: /* fsito */
- gen_vfp_sito(dp);
+ gen_vfp_sito(dp, 0);
break;
case 20: /* fshto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_shto(dp, 16 - rm);
+ gen_vfp_shto(dp, 16 - rm, 0);
break;
case 21: /* fslto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_slto(dp, 32 - rm);
+ gen_vfp_slto(dp, 32 - rm, 0);
break;
case 22: /* fuhto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_uhto(dp, 16 - rm);
+ gen_vfp_uhto(dp, 16 - rm, 0);
break;
case 23: /* fulto */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_ulto(dp, 32 - rm);
+ gen_vfp_ulto(dp, 32 - rm, 0);
break;
case 24: /* ftoui */
- gen_vfp_toui(dp);
+ gen_vfp_toui(dp, 0);
break;
case 25: /* ftouiz */
- gen_vfp_touiz(dp);
+ gen_vfp_touiz(dp, 0);
break;
case 26: /* ftosi */
- gen_vfp_tosi(dp);
+ gen_vfp_tosi(dp, 0);
break;
case 27: /* ftosiz */
- gen_vfp_tosiz(dp);
+ gen_vfp_tosiz(dp, 0);
break;
case 28: /* ftosh */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_tosh(dp, 16 - rm);
+ gen_vfp_tosh(dp, 16 - rm, 0);
break;
case 29: /* ftosl */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_tosl(dp, 32 - rm);
+ gen_vfp_tosl(dp, 32 - rm, 0);
break;
case 30: /* ftouh */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_touh(dp, 16 - rm);
+ gen_vfp_touh(dp, 16 - rm, 0);
break;
case 31: /* ftoul */
if (!arm_feature(env, ARM_FEATURE_VFP3))
return 1;
- gen_vfp_toul(dp, 32 - rm);
+ gen_vfp_toul(dp, 32 - rm, 0);
break;
default: /* undefined */
- printf ("rn:%d\n", rn);
return 1;
}
break;
default: /* undefined */
- printf ("op:%d\n", op);
return 1;
}
VFP_DREG_D(rd, insn);
else
rd = VFP_SREG_D(insn);
- if (s->thumb && rn == 15) {
- addr = tcg_temp_new_i32();
- tcg_gen_movi_i32(addr, s->pc & ~2);
- } else {
- addr = load_reg(s, rn);
- }
if ((insn & 0x01200000) == 0x01000000) {
/* Single load/store */
offset = (insn & 0xff) << 2;
if ((insn & (1 << 23)) == 0)
offset = -offset;
+ if (s->thumb && rn == 15) {
+ /* This is actually UNPREDICTABLE */
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, s->pc & ~2);
+ } else {
+ addr = load_reg(s, rn);
+ }
tcg_gen_addi_i32(addr, addr, offset);
if (insn & (1 << 20)) {
gen_vfp_ld(s, dp, addr);
tcg_temp_free_i32(addr);
} else {
/* load/store multiple */
+ int w = insn & (1 << 21);
if (dp)
n = (insn >> 1) & 0x7f;
else
n = insn & 0xff;
+ if (w && !(((insn >> 23) ^ (insn >> 24)) & 1)) {
+ /* P == U , W == 1 => UNDEF */
+ return 1;
+ }
+ if (n == 0 || (rd + n) > 32 || (dp && n > 16)) {
+ /* UNPREDICTABLE cases for bad immediates: we choose to
+ * UNDEF to avoid generating huge numbers of TCG ops
+ */
+ return 1;
+ }
+ if (rn == 15 && w) {
+ /* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
+ return 1;
+ }
+
+ if (s->thumb && rn == 15) {
+ /* This is actually UNPREDICTABLE */
+ addr = tcg_temp_new_i32();
+ tcg_gen_movi_i32(addr, s->pc & ~2);
+ } else {
+ addr = load_reg(s, rn);
+ }
if (insn & (1 << 24)) /* pre-decrement */
tcg_gen_addi_i32(addr, addr, -((insn & 0xff) << 2));
}
tcg_gen_addi_i32(addr, addr, offset);
}
- if (insn & (1 << 21)) {
+ if (w) {
/* writeback */
if (insn & (1 << 24))
offset = -offset * n;
}
/* Return the mask of PSR bits set by a MSR instruction. */
-static uint32_t msr_mask(CPUState *env, DisasContext *s, int flags, int spsr) {
+static uint32_t msr_mask(CPUARMState *env, DisasContext *s, int flags, int spsr) {
uint32_t mask;
mask = 0;
if (q) {
switch (size) {
case 0:
- gen_helper_neon_qunzip8(tmp, tmp2);
+ gen_helper_neon_qunzip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_qunzip16(tmp, tmp2);
+ gen_helper_neon_qunzip16(cpu_env, tmp, tmp2);
break;
case 2:
- gen_helper_neon_qunzip32(tmp, tmp2);
+ gen_helper_neon_qunzip32(cpu_env, tmp, tmp2);
break;
default:
abort();
} else {
switch (size) {
case 0:
- gen_helper_neon_unzip8(tmp, tmp2);
+ gen_helper_neon_unzip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_unzip16(tmp, tmp2);
+ gen_helper_neon_unzip16(cpu_env, tmp, tmp2);
break;
default:
abort();
if (q) {
switch (size) {
case 0:
- gen_helper_neon_qzip8(tmp, tmp2);
+ gen_helper_neon_qzip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_qzip16(tmp, tmp2);
+ gen_helper_neon_qzip16(cpu_env, tmp, tmp2);
break;
case 2:
- gen_helper_neon_qzip32(tmp, tmp2);
+ gen_helper_neon_qzip32(cpu_env, tmp, tmp2);
break;
default:
abort();
} else {
switch (size) {
case 0:
- gen_helper_neon_zip8(tmp, tmp2);
+ gen_helper_neon_zip8(cpu_env, tmp, tmp2);
break;
case 1:
- gen_helper_neon_zip16(tmp, tmp2);
+ gen_helper_neon_zip16(cpu_env, tmp, tmp2);
break;
default:
abort();
/* Translate a NEON load/store element instruction. Return nonzero if the
instruction is invalid. */
-static int disas_neon_ls_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_ls_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int rd, rn, rm;
int op;
static inline void gen_neon_narrow_sats(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
- case 0: gen_helper_neon_narrow_sat_s8(dest, src); break;
- case 1: gen_helper_neon_narrow_sat_s16(dest, src); break;
- case 2: gen_helper_neon_narrow_sat_s32(dest, src); break;
+ case 0: gen_helper_neon_narrow_sat_s8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_s16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_s32(dest, cpu_env, src); break;
default: abort();
}
}
static inline void gen_neon_narrow_satu(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
- case 0: gen_helper_neon_narrow_sat_u8(dest, src); break;
- case 1: gen_helper_neon_narrow_sat_u16(dest, src); break;
- case 2: gen_helper_neon_narrow_sat_u32(dest, src); break;
+ case 0: gen_helper_neon_narrow_sat_u8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_narrow_sat_u16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_narrow_sat_u32(dest, cpu_env, src); break;
default: abort();
}
}
static inline void gen_neon_unarrow_sats(int size, TCGv dest, TCGv_i64 src)
{
switch (size) {
- case 0: gen_helper_neon_unarrow_sat8(dest, src); break;
- case 1: gen_helper_neon_unarrow_sat16(dest, src); break;
- case 2: gen_helper_neon_unarrow_sat32(dest, src); break;
+ case 0: gen_helper_neon_unarrow_sat8(dest, cpu_env, src); break;
+ case 1: gen_helper_neon_unarrow_sat16(dest, cpu_env, src); break;
+ case 2: gen_helper_neon_unarrow_sat32(dest, cpu_env, src); break;
default: abort();
}
}
static inline void gen_neon_addl_saturate(TCGv_i64 op0, TCGv_i64 op1, int size)
{
switch (size) {
- case 1: gen_helper_neon_addl_saturate_s32(op0, op0, op1); break;
- case 2: gen_helper_neon_addl_saturate_s64(op0, op0, op1); break;
+ case 1: gen_helper_neon_addl_saturate_s32(op0, cpu_env, op0, op1); break;
+ case 2: gen_helper_neon_addl_saturate_s64(op0, cpu_env, op0, op1); break;
default: abort();
}
}
#define NEON_3R_VPMIN 21
#define NEON_3R_VQDMULH_VQRDMULH 22
#define NEON_3R_VPADD 23
+#define NEON_3R_VFM 25 /* VFMA, VFMS : float fused multiply-add */
#define NEON_3R_FLOAT_ARITH 26 /* float VADD, VSUB, VPADD, VABD */
#define NEON_3R_FLOAT_MULTIPLY 27 /* float VMLA, VMLS, VMUL */
#define NEON_3R_FLOAT_CMP 28 /* float VCEQ, VCGE, VCGT */
[NEON_3R_VPMIN] = 0x7,
[NEON_3R_VQDMULH_VQRDMULH] = 0x6,
[NEON_3R_VPADD] = 0x7,
+ [NEON_3R_VFM] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_ARITH] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_MULTIPLY] = 0x5, /* size bit 1 encodes op */
[NEON_3R_FLOAT_CMP] = 0x5, /* size bit 1 encodes op */
We process data in a mixture of 32-bit and 64-bit chunks.
Mostly we use 32-bit chunks so we can use normal scalar instructions. */
-static int disas_neon_data_insn(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_neon_data_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
int op;
int q;
switch (op) {
case NEON_3R_VQADD:
if (u) {
- gen_helper_neon_qadd_u64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qadd_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
} else {
- gen_helper_neon_qadd_s64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qadd_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
}
break;
case NEON_3R_VQSUB:
if (u) {
- gen_helper_neon_qsub_u64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qsub_u64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
} else {
- gen_helper_neon_qsub_s64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qsub_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
}
break;
case NEON_3R_VSHL:
break;
case NEON_3R_VQSHL:
if (u) {
- gen_helper_neon_qshl_u64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
} else {
- gen_helper_neon_qshl_s64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
}
break;
case NEON_3R_VRSHL:
break;
case NEON_3R_VQRSHL:
if (u) {
- gen_helper_neon_qrshl_u64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qrshl_u64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
} else {
- gen_helper_neon_qrshl_s64(cpu_V0, cpu_V1, cpu_V0);
+ gen_helper_neon_qrshl_s64(cpu_V0, cpu_env,
+ cpu_V1, cpu_V0);
}
break;
case NEON_3R_VADD_VSUB:
return 1;
}
break;
+ case NEON_3R_VFM:
+ if (!arm_feature(env, ARM_FEATURE_VFP4) || u) {
+ return 1;
+ }
+ break;
default:
break;
}
GEN_NEON_INTEGER_OP(hadd);
break;
case NEON_3R_VQADD:
- GEN_NEON_INTEGER_OP(qadd);
+ GEN_NEON_INTEGER_OP_ENV(qadd);
break;
case NEON_3R_VRHADD:
GEN_NEON_INTEGER_OP(rhadd);
GEN_NEON_INTEGER_OP(hsub);
break;
case NEON_3R_VQSUB:
- GEN_NEON_INTEGER_OP(qsub);
+ GEN_NEON_INTEGER_OP_ENV(qsub);
break;
case NEON_3R_VCGT:
GEN_NEON_INTEGER_OP(cgt);
GEN_NEON_INTEGER_OP(shl);
break;
case NEON_3R_VQSHL:
- GEN_NEON_INTEGER_OP(qshl);
+ GEN_NEON_INTEGER_OP_ENV(qshl);
break;
case NEON_3R_VRSHL:
GEN_NEON_INTEGER_OP(rshl);
break;
case NEON_3R_VQRSHL:
- GEN_NEON_INTEGER_OP(qrshl);
+ GEN_NEON_INTEGER_OP_ENV(qrshl);
break;
case NEON_3R_VMAX:
GEN_NEON_INTEGER_OP(max);
case NEON_3R_VQDMULH_VQRDMULH: /* Multiply high. */
if (!u) { /* VQDMULH */
switch (size) {
- case 1: gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2); break;
+ case 1:
+ gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ break;
default: abort();
}
} else { /* VQRDMULH */
switch (size) {
- case 1: gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2); break;
- case 2: gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2); break;
+ case 1:
+ gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
+ break;
+ case 2:
+ gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
+ break;
default: abort();
}
}
}
break;
case NEON_3R_FLOAT_ARITH: /* Floating point arithmetic. */
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
switch ((u << 2) | size) {
case 0: /* VADD */
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ case 4: /* VPADD */
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
break;
case 2: /* VSUB */
- gen_helper_neon_sub_f32(tmp, tmp, tmp2);
- break;
- case 4: /* VPADD */
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ gen_helper_vfp_subs(tmp, tmp, tmp2, fpstatus);
break;
case 6: /* VABD */
- gen_helper_neon_abd_f32(tmp, tmp, tmp2);
+ gen_helper_neon_abd_f32(tmp, tmp, tmp2, fpstatus);
break;
default:
abort();
}
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_MULTIPLY:
- gen_helper_neon_mul_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
if (!u) {
tcg_temp_free_i32(tmp2);
tmp2 = neon_load_reg(rd, pass);
if (size == 0) {
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
} else {
- gen_helper_neon_sub_f32(tmp, tmp2, tmp);
+ gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
}
}
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_CMP:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
if (!u) {
- gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+ gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
} else {
- if (size == 0)
- gen_helper_neon_cge_f32(tmp, tmp, tmp2);
- else
- gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+ if (size == 0) {
+ gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
+ }
}
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_ACMP:
- if (size == 0)
- gen_helper_neon_acge_f32(tmp, tmp, tmp2);
- else
- gen_helper_neon_acgt_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_neon_acge_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_acgt_f32(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_FLOAT_MINMAX:
- if (size == 0)
- gen_helper_neon_max_f32(tmp, tmp, tmp2);
- else
- gen_helper_neon_min_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ if (size == 0) {
+ gen_helper_neon_max_f32(tmp, tmp, tmp2, fpstatus);
+ } else {
+ gen_helper_neon_min_f32(tmp, tmp, tmp2, fpstatus);
+ }
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_3R_VRECPS_VRSQRTS:
if (size == 0)
gen_helper_recps_f32(tmp, tmp, tmp2, cpu_env);
else
gen_helper_rsqrts_f32(tmp, tmp, tmp2, cpu_env);
break;
+ case NEON_3R_VFM:
+ {
+ /* VFMA, VFMS: fused multiply-add */
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ TCGv_i32 tmp3 = neon_load_reg(rd, pass);
+ if (size) {
+ /* VFMS */
+ gen_helper_vfp_negs(tmp, tmp);
+ }
+ gen_helper_vfp_muladds(tmp, tmp, tmp2, tmp3, fpstatus);
+ tcg_temp_free_i32(tmp3);
+ tcg_temp_free_ptr(fpstatus);
+ break;
+ }
default:
abort();
}
gen_helper_neon_shl_u64(cpu_V0, cpu_V0, cpu_V1);
break;
case 6: /* VQSHLU */
- gen_helper_neon_qshlu_s64(cpu_V0, cpu_V0, cpu_V1);
+ gen_helper_neon_qshlu_s64(cpu_V0, cpu_env,
+ cpu_V0, cpu_V1);
break;
case 7: /* VQSHL */
if (u) {
- gen_helper_neon_qshl_u64(cpu_V0,
+ gen_helper_neon_qshl_u64(cpu_V0, cpu_env,
cpu_V0, cpu_V1);
} else {
- gen_helper_neon_qshl_s64(cpu_V0,
+ gen_helper_neon_qshl_s64(cpu_V0, cpu_env,
cpu_V0, cpu_V1);
}
break;
case 6: /* VQSHLU */
switch (size) {
case 0:
- gen_helper_neon_qshlu_s8(tmp, tmp, tmp2);
+ gen_helper_neon_qshlu_s8(tmp, cpu_env,
+ tmp, tmp2);
break;
case 1:
- gen_helper_neon_qshlu_s16(tmp, tmp, tmp2);
+ gen_helper_neon_qshlu_s16(tmp, cpu_env,
+ tmp, tmp2);
break;
case 2:
- gen_helper_neon_qshlu_s32(tmp, tmp, tmp2);
+ gen_helper_neon_qshlu_s32(tmp, cpu_env,
+ tmp, tmp2);
break;
default:
abort();
}
break;
case 7: /* VQSHL */
- GEN_NEON_INTEGER_OP(qshl);
+ GEN_NEON_INTEGER_OP_ENV(qshl);
break;
}
tcg_temp_free_i32(tmp2);
tcg_gen_ld_f32(cpu_F0s, cpu_env, neon_reg_offset(rm, pass));
if (!(op & 1)) {
if (u)
- gen_vfp_ulto(0, shift);
+ gen_vfp_ulto(0, shift, 1);
else
- gen_vfp_slto(0, shift);
+ gen_vfp_slto(0, shift, 1);
} else {
if (u)
- gen_vfp_toul(0, shift);
+ gen_vfp_toul(0, shift, 1);
else
- gen_vfp_tosl(0, shift);
+ gen_vfp_tosl(0, shift, 1);
}
tcg_gen_st_f32(cpu_F0s, cpu_env, neon_reg_offset(rd, pass));
}
tmp2 = neon_load_reg(rn, pass);
if (op == 12) {
if (size == 1) {
- gen_helper_neon_qdmulh_s16(tmp, tmp, tmp2);
+ gen_helper_neon_qdmulh_s16(tmp, cpu_env, tmp, tmp2);
} else {
- gen_helper_neon_qdmulh_s32(tmp, tmp, tmp2);
+ gen_helper_neon_qdmulh_s32(tmp, cpu_env, tmp, tmp2);
}
} else if (op == 13) {
if (size == 1) {
- gen_helper_neon_qrdmulh_s16(tmp, tmp, tmp2);
+ gen_helper_neon_qrdmulh_s16(tmp, cpu_env, tmp, tmp2);
} else {
- gen_helper_neon_qrdmulh_s32(tmp, tmp, tmp2);
+ gen_helper_neon_qrdmulh_s32(tmp, cpu_env, tmp, tmp2);
}
} else if (op & 1) {
- gen_helper_neon_mul_f32(tmp, tmp, tmp2);
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_muls(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
} else {
switch (size) {
case 0: gen_helper_neon_mul_u8(tmp, tmp, tmp2); break;
gen_neon_add(size, tmp, tmp2);
break;
case 1:
- gen_helper_neon_add_f32(tmp, tmp, tmp2);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_adds(tmp, tmp, tmp2, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case 4:
gen_neon_rsb(size, tmp, tmp2);
break;
case 5:
- gen_helper_neon_sub_f32(tmp, tmp2, tmp);
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
+ gen_helper_vfp_subs(tmp, tmp2, tmp, fpstatus);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
default:
abort();
}
break;
case NEON_2RM_VQABS:
switch (size) {
- case 0: gen_helper_neon_qabs_s8(tmp, tmp); break;
- case 1: gen_helper_neon_qabs_s16(tmp, tmp); break;
- case 2: gen_helper_neon_qabs_s32(tmp, tmp); break;
+ case 0:
+ gen_helper_neon_qabs_s8(tmp, cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_neon_qabs_s16(tmp, cpu_env, tmp);
+ break;
+ case 2:
+ gen_helper_neon_qabs_s32(tmp, cpu_env, tmp);
+ break;
default: abort();
}
break;
case NEON_2RM_VQNEG:
switch (size) {
- case 0: gen_helper_neon_qneg_s8(tmp, tmp); break;
- case 1: gen_helper_neon_qneg_s16(tmp, tmp); break;
- case 2: gen_helper_neon_qneg_s32(tmp, tmp); break;
+ case 0:
+ gen_helper_neon_qneg_s8(tmp, cpu_env, tmp);
+ break;
+ case 1:
+ gen_helper_neon_qneg_s16(tmp, cpu_env, tmp);
+ break;
+ case 2:
+ gen_helper_neon_qneg_s32(tmp, cpu_env, tmp);
+ break;
default: abort();
}
break;
tcg_temp_free(tmp2);
break;
case NEON_2RM_VCGT0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cgt_f32(tmp, tmp, tmp2);
+ gen_helper_neon_cgt_f32(tmp, tmp, tmp2, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCGE0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cge_f32(tmp, tmp, tmp2);
+ gen_helper_neon_cge_f32(tmp, tmp, tmp2, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCEQ0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_ceq_f32(tmp, tmp, tmp2);
+ gen_helper_neon_ceq_f32(tmp, tmp, tmp2, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCLE0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cge_f32(tmp, tmp2, tmp);
+ gen_helper_neon_cge_f32(tmp, tmp2, tmp, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VCLT0_F:
+ {
+ TCGv_ptr fpstatus = get_fpstatus_ptr(1);
tmp2 = tcg_const_i32(0);
- gen_helper_neon_cgt_f32(tmp, tmp2, tmp);
+ gen_helper_neon_cgt_f32(tmp, tmp2, tmp, fpstatus);
tcg_temp_free(tmp2);
+ tcg_temp_free_ptr(fpstatus);
break;
+ }
case NEON_2RM_VABS_F:
gen_vfp_abs(0);
break;
gen_helper_rsqrte_f32(cpu_F0s, cpu_F0s, cpu_env);
break;
case NEON_2RM_VCVT_FS: /* VCVT.F32.S32 */
- gen_vfp_sito(0);
+ gen_vfp_sito(0, 1);
break;
case NEON_2RM_VCVT_FU: /* VCVT.F32.U32 */
- gen_vfp_uito(0);
+ gen_vfp_uito(0, 1);
break;
case NEON_2RM_VCVT_SF: /* VCVT.S32.F32 */
- gen_vfp_tosiz(0);
+ gen_vfp_tosiz(0, 1);
break;
case NEON_2RM_VCVT_UF: /* VCVT.U32.F32 */
- gen_vfp_touiz(0);
+ gen_vfp_touiz(0, 1);
break;
default:
/* Reserved op values were caught by the
return 0;
}
-static int disas_cp14_read(CPUState * env, DisasContext *s, uint32_t insn)
+static int disas_coproc_insn(CPUARMState * env, DisasContext *s, uint32_t insn)
{
- int crn = (insn >> 16) & 0xf;
- int crm = insn & 0xf;
- int op1 = (insn >> 21) & 7;
- int op2 = (insn >> 5) & 7;
- int rt = (insn >> 12) & 0xf;
- TCGv tmp;
-
- /* Minimal set of debug registers, since we don't support debug */
- if (op1 == 0 && crn == 0 && op2 == 0) {
- switch (crm) {
- case 0:
- /* DBGDIDR: just RAZ. In particular this means the
- * "debug architecture version" bits will read as
- * a reserved value, which should cause Linux to
- * not try to use the debug hardware.
- */
- tmp = tcg_const_i32(0);
- store_reg(s, rt, tmp);
- return 0;
- case 1:
- case 2:
- /* DBGDRAR and DBGDSAR: v7 only. Always RAZ since we
- * don't implement memory mapped debug components
- */
- if (ENABLE_ARCH_7) {
- tmp = tcg_const_i32(0);
- store_reg(s, rt, tmp);
- return 0;
- }
- break;
- default:
- break;
- }
- }
-
- if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
- if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
- /* TEECR */
- if (IS_USER(s))
- return 1;
- tmp = load_cpu_field(teecr);
- store_reg(s, rt, tmp);
- return 0;
- }
- if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
- /* TEEHBR */
- if (IS_USER(s) && (env->teecr & 1))
- return 1;
- tmp = load_cpu_field(teehbr);
- store_reg(s, rt, tmp);
- return 0;
- }
- }
- fprintf(stderr, "Unknown cp14 read op1:%d crn:%d crm:%d op2:%d\n",
- op1, crn, crm, op2);
- return 1;
-}
-
-static int disas_cp14_write(CPUState * env, DisasContext *s, uint32_t insn)
-{
- int crn = (insn >> 16) & 0xf;
- int crm = insn & 0xf;
- int op1 = (insn >> 21) & 7;
- int op2 = (insn >> 5) & 7;
- int rt = (insn >> 12) & 0xf;
- TCGv tmp;
-
- if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
- if (op1 == 6 && crn == 0 && crm == 0 && op2 == 0) {
- /* TEECR */
- if (IS_USER(s))
- return 1;
- tmp = load_reg(s, rt);
- gen_helper_set_teecr(cpu_env, tmp);
- tcg_temp_free_i32(tmp);
- return 0;
- }
- if (op1 == 6 && crn == 1 && crm == 0 && op2 == 0) {
- /* TEEHBR */
- if (IS_USER(s) && (env->teecr & 1))
- return 1;
- tmp = load_reg(s, rt);
- store_cpu_field(tmp, teehbr);
- return 0;
- }
- }
- fprintf(stderr, "Unknown cp14 write op1:%d crn:%d crm:%d op2:%d\n",
- op1, crn, crm, op2);
- return 1;
-}
-
-static int disas_coproc_insn(CPUState * env, DisasContext *s, uint32_t insn)
-{
- int cpnum;
+ int cpnum, is64, crn, crm, opc1, opc2, isread, rt, rt2;
+ const ARMCPRegInfo *ri;
+ ARMCPU *cpu = arm_env_get_cpu(env);
cpnum = (insn >> 8) & 0xf;
if (arm_feature(env, ARM_FEATURE_XSCALE)
&& ((env->cp15.c15_cpar ^ 0x3fff) & (1 << cpnum)))
return 1;
+ /* First check for coprocessor space used for actual instructions */
switch (cpnum) {
case 0:
case 1:
case 10:
case 11:
return disas_vfp_insn (env, s, insn);
- case 14:
- /* Coprocessors 7-15 are architecturally reserved by ARM.
- Unfortunately Intel decided to ignore this. */
- if (arm_feature(env, ARM_FEATURE_XSCALE))
- goto board;
- if (insn & (1 << 20))
- return disas_cp14_read(env, s, insn);
- else
- return disas_cp14_write(env, s, insn);
- case 15:
- return disas_cp15_insn (env, s, insn);
default:
- board:
- /* Unknown coprocessor. See if the board has hooked it. */
- return disas_cp_insn (env, s, insn);
+ break;
+ }
+
+ /* Otherwise treat as a generic register access */
+ is64 = (insn & (1 << 25)) == 0;
+ if (!is64 && ((insn & (1 << 4)) == 0)) {
+ /* cdp */
+ return 1;
+ }
+
+ crm = insn & 0xf;
+ if (is64) {
+ crn = 0;
+ opc1 = (insn >> 4) & 0xf;
+ opc2 = 0;
+ rt2 = (insn >> 16) & 0xf;
+ } else {
+ crn = (insn >> 16) & 0xf;
+ opc1 = (insn >> 21) & 7;
+ opc2 = (insn >> 5) & 7;
+ rt2 = 0;
+ }
+ isread = (insn >> 20) & 1;
+ rt = (insn >> 12) & 0xf;
+
+ ri = get_arm_cp_reginfo(cpu,
+ ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
+ if (ri) {
+ /* Check access permissions */
+ if (!cp_access_ok(env, ri, isread)) {
+ return 1;
+ }
+
+ /* Handle special cases first */
+ switch (ri->type & ~(ARM_CP_FLAG_MASK & ~ARM_CP_SPECIAL)) {
+ case ARM_CP_NOP:
+ return 0;
+ case ARM_CP_WFI:
+ if (isread) {
+ return 1;
+ }
+ gen_set_pc_im(s->pc);
+ s->is_jmp = DISAS_WFI;
+ break;
+ default:
+ break;
+ }
+
+ if (isread) {
+ /* Read */
+ if (is64) {
+ TCGv_i64 tmp64;
+ TCGv_i32 tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp64 = tcg_const_i64(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ gen_set_pc_im(s->pc);
+ tmp64 = tcg_temp_new_i64();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg64(tmp64, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp64 = tcg_temp_new_i64();
+ tcg_gen_ld_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tmp = tcg_temp_new_i32();
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
+ store_reg(s, rt, tmp);
+ tcg_gen_shri_i64(tmp64, tmp64, 32);
+ tcg_gen_trunc_i64_i32(tmp, tmp64);
+ store_reg(s, rt2, tmp);
+ } else {
+ TCGv tmp;
+ if (ri->type & ARM_CP_CONST) {
+ tmp = tcg_const_i32(ri->resetvalue);
+ } else if (ri->readfn) {
+ TCGv_ptr tmpptr;
+ gen_set_pc_im(s->pc);
+ tmp = tcg_temp_new_i32();
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_get_cp_reg(tmp, cpu_env, tmpptr);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tmp = load_cpu_offset(ri->fieldoffset);
+ }
+ if (rt == 15) {
+ /* Destination register of r15 for 32 bit loads sets
+ * the condition codes from the high 4 bits of the value
+ */
+ gen_set_nzcv(tmp);
+ tcg_temp_free_i32(tmp);
+ } else {
+ store_reg(s, rt, tmp);
+ }
+ }
+ } else {
+ /* Write */
+ if (ri->type & ARM_CP_CONST) {
+ /* If not forbidden by access permissions, treat as WI */
+ return 0;
+ }
+
+ if (is64) {
+ TCGv tmplo, tmphi;
+ TCGv_i64 tmp64 = tcg_temp_new_i64();
+ tmplo = load_reg(s, rt);
+ tmphi = load_reg(s, rt2);
+ tcg_gen_concat_i32_i64(tmp64, tmplo, tmphi);
+ tcg_temp_free_i32(tmplo);
+ tcg_temp_free_i32(tmphi);
+ if (ri->writefn) {
+ TCGv_ptr tmpptr = tcg_const_ptr(ri);
+ gen_set_pc_im(s->pc);
+ gen_helper_set_cp_reg64(cpu_env, tmpptr, tmp64);
+ tcg_temp_free_ptr(tmpptr);
+ } else {
+ tcg_gen_st_i64(tmp64, cpu_env, ri->fieldoffset);
+ }
+ tcg_temp_free_i64(tmp64);
+ } else {
+ if (ri->writefn) {
+ TCGv tmp;
+ TCGv_ptr tmpptr;
+ gen_set_pc_im(s->pc);
+ tmp = load_reg(s, rt);
+ tmpptr = tcg_const_ptr(ri);
+ gen_helper_set_cp_reg(cpu_env, tmpptr, tmp);
+ tcg_temp_free_ptr(tmpptr);
+ tcg_temp_free_i32(tmp);
+ } else {
+ TCGv tmp = load_reg(s, rt);
+ store_cpu_offset(tmp, ri->fieldoffset);
+ }
+ }
+ /* We default to ending the TB on a coprocessor register write,
+ * but allow this to be suppressed by the register definition
+ * (usually only necessary to work around guest bugs).
+ */
+ if (!(ri->type & ARM_CP_SUPPRESS_TB_END)) {
+ gen_lookup_tb(s);
+ }
+ }
+ return 0;
}
+
+ return 1;
}
}
#endif
-static void disas_arm_insn(CPUState * env, DisasContext *s)
+static void disas_arm_insn(CPUARMState * env, DisasContext *s)
{
unsigned int cond, insn, val, op1, i, shift, rm, rs, rn, rd, sh;
TCGv tmp;
TCGv addr;
TCGv_i64 tmp64;
- insn = ldl_code(s->pc);
+ insn = arm_ldl_code(s->pc, s->bswap_code);
s->pc += 4;
/* M variants do not implement ARM mode. */
if ((insn & 0x0ffffdff) == 0x01010000) {
ARCH(6);
/* setend */
- if (insn & (1 << 9)) {
- /* BE8 mode not implemented. */
+ if (((insn >> 9) & 1) != s->bswap_code) {
+ /* Dynamic endianness switching not implemented. */
goto illegal_op;
}
return;
tmp = load_reg(s, rn);
tmp2 = load_reg(s, rm);
tmp3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
tcg_temp_free_i32(tmp3);
tcg_temp_free_i32(tmp2);
}
break;
case 2: /* Multiplies (Type 3). */
- tmp = load_reg(s, rm);
- tmp2 = load_reg(s, rs);
- if (insn & (1 << 20)) {
+ switch ((insn >> 20) & 0x7) {
+ case 5:
+ if (((insn >> 6) ^ (insn >> 7)) & 1) {
+ /* op2 not 00x or 11x : UNDEF */
+ goto illegal_op;
+ }
/* Signed multiply most significant [accumulate].
(SMMUL, SMMLA, SMMLS) */
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
tmp64 = gen_muls_i64_i32(tmp, tmp2);
if (rd != 15) {
tcg_gen_trunc_i64_i32(tmp, tmp64);
tcg_temp_free_i64(tmp64);
store_reg(s, rn, tmp);
- } else {
+ break;
+ case 0:
+ case 4:
+ /* SMLAD, SMUAD, SMLSD, SMUSD, SMLALD, SMLSLD */
+ if (insn & (1 << 7)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
if (insn & (1 << 5))
gen_swap_half(tmp2);
gen_smul_dual(tmp, tmp2);
}
store_reg(s, rn, tmp);
}
+ break;
+ case 1:
+ case 3:
+ /* SDIV, UDIV */
+ if (!arm_feature(env, ARM_FEATURE_ARM_DIV)) {
+ goto illegal_op;
+ }
+ if (((insn >> 5) & 7) || (rd != 15)) {
+ goto illegal_op;
+ }
+ tmp = load_reg(s, rm);
+ tmp2 = load_reg(s, rs);
+ if (insn & (1 << 21)) {
+ gen_helper_udiv(tmp, tmp, tmp2);
+ } else {
+ gen_helper_sdiv(tmp, tmp, tmp2);
+ }
+ tcg_temp_free_i32(tmp2);
+ store_reg(s, rn, tmp);
+ break;
+ default:
+ goto illegal_op;
}
break;
case 3:
/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
is not legal. */
-static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
+static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
{
uint32_t insn, imm, shift, offset;
uint32_t rd, rn, rm, rs;
/* Fall through to 32-bit decode. */
}
- insn = lduw_code(s->pc);
+ insn = arm_lduw_code(s->pc, s->bswap_code);
s->pc += 2;
insn |= (uint32_t)insn_hw1 << 16;
case 0x10: /* sel */
tmp2 = load_reg(s, rm);
tmp3 = tcg_temp_new_i32();
- tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUState, GE));
+ tcg_gen_ld_i32(tmp3, cpu_env, offsetof(CPUARMState, GE));
gen_helper_sel_flags(tmp, tmp3, tmp, tmp2);
tcg_temp_free_i32(tmp3);
tcg_temp_free_i32(tmp2);
tmp2 = load_reg(s, rm);
if ((op & 0x50) == 0x10) {
/* sdiv, udiv */
- if (!arm_feature(env, ARM_FEATURE_DIV))
+ if (!arm_feature(env, ARM_FEATURE_THUMB_DIV)) {
goto illegal_op;
+ }
if (op & 0x20)
gen_helper_udiv(tmp, tmp, tmp2);
else
goto illegal_op;
}
if (rn == 15) {
- /* UNPREDICTABLE or unallocated hint */
+ /* UNPREDICTABLE, unallocated hint or
+ * PLD/PLDW/PLI (literal)
+ */
return 0;
}
if (op1 & 1) {
- return 0; /* PLD* or unallocated hint */
+ return 0; /* PLD/PLDW/PLI or unallocated hint */
}
if ((op2 == 0) || ((op2 & 0x3c) == 0x30)) {
- return 0; /* PLD* or unallocated hint */
+ return 0; /* PLD/PLDW/PLI or unallocated hint */
}
/* UNDEF space, or an UNPREDICTABLE */
return 1;
return 1;
}
-static void disas_thumb_insn(CPUState *env, DisasContext *s)
+static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
{
uint32_t val, insn, op, rm, rn, rd, shift, cond;
int32_t offset;
}
}
- insn = lduw_code(s->pc);
+ insn = arm_lduw_code(s->pc, s->bswap_code);
s->pc += 2;
switch (insn >> 12) {
store_reg(s, rd, tmp);
break;
- case 6: /* cps */
- ARCH(6);
- if (IS_USER(s))
+ case 6:
+ switch ((insn >> 5) & 7) {
+ case 2:
+ /* setend */
+ ARCH(6);
+ if (((insn >> 3) & 1) != s->bswap_code) {
+ /* Dynamic endianness switching not implemented. */
+ goto illegal_op;
+ }
break;
- if (IS_M(env)) {
- tmp = tcg_const_i32((insn & (1 << 4)) != 0);
- /* PRIMASK */
- if (insn & 1) {
- addr = tcg_const_i32(16);
- gen_helper_v7m_msr(cpu_env, addr, tmp);
- tcg_temp_free_i32(addr);
+ case 3:
+ /* cps */
+ ARCH(6);
+ if (IS_USER(s)) {
+ break;
}
- /* FAULTMASK */
- if (insn & 2) {
- addr = tcg_const_i32(17);
- gen_helper_v7m_msr(cpu_env, addr, tmp);
- tcg_temp_free_i32(addr);
+ if (IS_M(env)) {
+ tmp = tcg_const_i32((insn & (1 << 4)) != 0);
+ /* FAULTMASK */
+ if (insn & 1) {
+ addr = tcg_const_i32(19);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ /* PRIMASK */
+ if (insn & 2) {
+ addr = tcg_const_i32(16);
+ gen_helper_v7m_msr(cpu_env, addr, tmp);
+ tcg_temp_free_i32(addr);
+ }
+ tcg_temp_free_i32(tmp);
+ gen_lookup_tb(s);
+ } else {
+ if (insn & (1 << 4)) {
+ shift = CPSR_A | CPSR_I | CPSR_F;
+ } else {
+ shift = 0;
+ }
+ gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
}
- tcg_temp_free_i32(tmp);
- gen_lookup_tb(s);
- } else {
- if (insn & (1 << 4))
- shift = CPSR_A | CPSR_I | CPSR_F;
- else
- shift = 0;
- gen_set_psr_im(s, ((insn & 7) << 6), 0, shift);
+ break;
+ default:
+ goto undef;
}
break;
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
basic block 'tb'. If search_pc is TRUE, also generate PC
information for each intermediate instruction. */
-static inline void gen_intermediate_code_internal(CPUState *env,
+static inline void gen_intermediate_code_internal(CPUARMState *env,
TranslationBlock *tb,
int search_pc)
{
dc->singlestep_enabled = env->singlestep_enabled;
dc->condjmp = 0;
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
+ dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
#if !defined(CONFIG_USER_ONLY)
/* A note on handling of the condexec (IT) bits:
*
* We want to avoid the overhead of having to write the updated condexec
- * bits back to the CPUState for every instruction in an IT block. So:
+ * bits back to the CPUARMState for every instruction in an IT block. So:
* (1) if the condexec bits are not already zero then we write
- * zero back into the CPUState now. This avoids complications trying
+ * zero back into the CPUARMState now. This avoids complications trying
* to do it at the end of the block. (For example if we don't do this
* it's hard to identify whether we can safely skip writing condexec
* at the end of the TB, which we definitely want to do for the case
* where a TB doesn't do anything with the IT state at all.)
* (2) if we are going to leave the TB then we call gen_set_condexec()
- * which will write the correct value into CPUState if zero is wrong.
+ * which will write the correct value into CPUARMState if zero is wrong.
* This is done both for leaving the TB at the end, and for leaving
* it because of an exception we know will happen, which is done in
* gen_exception_insn(). The latter is necessary because we need to
* leave the TB with the PC/IT state just prior to execution of the
* instruction which caused the exception.
* (3) if we leave the TB unexpectedly (eg a data abort on a load)
- * then the CPUState will be wrong and we need to reset it.
+ * then the CPUARMState will be wrong and we need to reset it.
* This is handled in the same way as restoration of the
* PC in these situations: we will be called again with search_pc=1
* and generate a mapping of the condexec bits for each PC in
*
* Note that there are no instructions which can read the condexec
* bits, and none which can write non-static values to them, so
- * we don't need to care about whether CPUState is correct in the
+ * we don't need to care about whether CPUARMState is correct in the
* middle of a TB.
*/
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
qemu_log("----------------\n");
qemu_log("IN: %s\n", lookup_symbol(pc_start));
- log_target_disas(pc_start, dc->pc - pc_start, dc->thumb);
+ log_target_disas(pc_start, dc->pc - pc_start,
+ dc->thumb | (dc->bswap_code << 1));
qemu_log("\n");
}
#endif
}
}
-void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 0);
}
-void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
+void gen_intermediate_code_pc(CPUARMState *env, TranslationBlock *tb)
{
gen_intermediate_code_internal(env, tb, 1);
}
"???", "???", "???", "und", "???", "???", "???", "sys"
};
-void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
+void cpu_dump_state(CPUARMState *env, FILE *f, fprintf_function cpu_fprintf,
int flags)
{
int i;
#endif
}
-void restore_state_to_opc(CPUState *env, TranslationBlock *tb, int pc_pos)
+void restore_state_to_opc(CPUARMState *env, TranslationBlock *tb, int pc_pos)
{
env->regs[15] = gen_opc_pc[pc_pos];
env->condexec_bits = gen_opc_condexec_bits[pc_pos];