#define IS_USER(s) (s->user)
#endif
-TCGv_env cpu_env;
/* We reuse the same 64-bit temporaries for efficiency. */
static TCGv_i64 cpu_V0, cpu_V1, cpu_M0;
static TCGv_i32 cpu_R[16];
{
int i;
- cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
- tcg_ctx.tcg_env = cpu_env;
-
for (i = 0; i < 16; i++) {
cpu_R[i] = tcg_global_mem_new_i32(cpu_env,
offsetof(CPUARMState, regs[i]),
return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0);
case ARMMMUIdx_MUser:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MPrivNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MUserNegPri);
case ARMMMUIdx_MSUser:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
+ case ARMMMUIdx_MSUserNegPri:
+ case ARMMMUIdx_MSPrivNegPri:
+ return arm_to_core_mmu_idx(ARMMMUIdx_MSUserNegPri);
case ARMMMUIdx_S2NS:
default:
g_assert_not_reached();
static void gen_nop_hint(DisasContext *s, int val)
{
switch (val) {
+ /* When running in MTTCG we don't generate jumps to the yield and
+ * WFE helpers as it won't affect the scheduling of other vCPUs.
+ * If we wanted to more completely model WFE/SEV so we don't busy
+ * spin unnecessarily we would need to do something more involved.
+ */
case 1: /* yield */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_set_pc_im(s, s->pc);
s->base.is_jmp = DISAS_YIELD;
}
s->base.is_jmp = DISAS_WFI;
break;
case 2: /* wfe */
- if (!parallel_cpus) {
+ if (!(tb_cflags(s->base.tb) & CF_PARALLEL)) {
gen_set_pc_im(s, s->pc);
s->base.is_jmp = DISAS_WFE;
}
break;
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
}
}
- if ((s->base.tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
+ if ((tb_cflags(s->base.tb) & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
TCGv_i32 tmp2 = tcg_temp_new_i32();
TCGv_i64 t64 = tcg_temp_new_i64();
- gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
+ /* For AArch32, architecturally the 32-bit word at the lowest
+ * address is always Rt and the one at addr+4 is Rt2, even if
+ * the CPU is big-endian. That means we don't want to do a
+ * gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
+ * for an architecturally 64-bit access, but instead do a
+ * 64-bit access using MO_BE if appropriate and then split
+ * the two halves.
+ * This only makes a difference for BE32 user-mode, where
+ * frob64() must not flip the two halves of the 64-bit data
+ * but this code must treat BE32 user-mode like BE32 system.
+ */
+ TCGv taddr = gen_aa32_addr(s, addr, opc);
+
+ tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
+ tcg_temp_free(taddr);
tcg_gen_mov_i64(cpu_exclusive_val, t64);
- tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ if (s->be_data == MO_BE) {
+ tcg_gen_extr_i64_i32(tmp2, tmp, t64);
+ } else {
+ tcg_gen_extr_i64_i32(tmp, tmp2, t64);
+ }
tcg_temp_free_i64(t64);
store_reg(s, rt2, tmp2);
TCGv_i64 n64 = tcg_temp_new_i64();
t2 = load_reg(s, rt2);
- tcg_gen_concat_i32_i64(n64, t1, t2);
+ /* For AArch32, architecturally the 32-bit word at the lowest
+ * address is always Rt and the one at addr+4 is Rt2, even if
+ * the CPU is big-endian. Since we're going to treat this as a
+ * single 64-bit BE store, we need to put the two halves in the
+ * opposite order for BE to LE, so that they end up in the right
+ * places.
+ * We don't want gen_aa32_frob64() because that does the wrong
+ * thing for BE32 usermode.
+ */
+ if (s->be_data == MO_BE) {
+ tcg_gen_concat_i32_i64(n64, t2, t1);
+ } else {
+ tcg_gen_concat_i32_i64(n64, t1, t2);
+ }
tcg_temp_free_i32(t2);
- gen_aa32_frob64(s, n64);
tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
get_mem_index(s), opc);
tcg_temp_free_i64(n64);
- gen_aa32_frob64(s, o64);
tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
tcg_gen_extrl_i64_i32(t0, o64);
}
insn = arm_ldl_code(env, dc->pc, dc->sctlr_b);
+ dc->insn = insn;
dc->pc += 4;
disas_arm_insn(dc, insn);
insn = insn << 16 | insn2;
dc->pc += 2;
}
+ dc->insn = insn;
if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
uint32_t cond = dc->condexec_cond;
if (is_16bit) {
disas_thumb_insn(dc, insn);
} else {
- disas_thumb2_insn(dc, insn);
+ if (disas_thumb2_insn(dc, insn)) {
+ gen_exception_insn(dc, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(dc));
+ }
}
/* Advance the Thumb condexec condition. */
{
DisasContext *dc = container_of(dcbase, DisasContext, base);
- if (dc->base.tb->cflags & CF_LAST_IO && dc->condjmp) {
+ if (tb_cflags(dc->base.tb) & CF_LAST_IO && dc->condjmp) {
/* FIXME: This can theoretically happen with self-modifying code. */
cpu_abort(cpu, "IO on conditional branch instruction");
}
/* nothing more to generate */
break;
case DISAS_WFI:
- gen_helper_wfi(cpu_env);
+ {
+ TCGv_i32 tmp = tcg_const_i32((dc->thumb &&
+ !(dc->insn & (1U << 31))) ? 2 : 4);
+
+ gen_helper_wfi(cpu_env, tmp);
+ tcg_temp_free_i32(tmp);
/* The helper doesn't necessarily throw an exception, but we
* must go back to the main loop to check for interrupts anyway.
*/
tcg_gen_exit_tb(0);
break;
+ }
case DISAS_WFE:
gen_helper_wfe(cpu_env);
break;
DisasContext *dc = container_of(dcbase, DisasContext, base);
qemu_log("IN: %s\n", lookup_symbol(dc->base.pc_first));
- log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size,
- dc->thumb | (dc->sctlr_b << 1));
+ log_target_disas(cpu, dc->base.pc_first, dc->base.tb->size);
}
static const TranslatorOps arm_translator_ops = {