a64_translate_init();
}
+static inline ARMMMUIdx get_a32_user_mem_index(DisasContext *s)
+{
+ /* Return the mmu_idx to use for A32/T32 "unprivileged load/store"
+ * insns:
+ * if PL2, UNPREDICTABLE (we choose to implement as if PL0)
+ * otherwise, access as if at PL0.
+ */
+ switch (s->mmu_idx) {
+ case ARMMMUIdx_S1E2: /* this one is UNPREDICTABLE */
+ case ARMMMUIdx_S12NSE0:
+ case ARMMMUIdx_S12NSE1:
+ return ARMMMUIdx_S12NSE0;
+ case ARMMMUIdx_S1E3:
+ case ARMMMUIdx_S1SE0:
+ case ARMMMUIdx_S1SE1:
+ return ARMMMUIdx_S1SE0;
+ case ARMMMUIdx_S2NS:
+ default:
+ g_assert_not_reached();
+ }
+}
+
static inline TCGv_i32 load_cpu_offset(int offset)
{
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_temp_free_i32(tcg_excp);
}
-static void gen_exception(int excp, uint32_t syndrome)
+static void gen_exception(int excp, uint32_t syndrome, uint32_t target_el)
{
TCGv_i32 tcg_excp = tcg_const_i32(excp);
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
+ TCGv_i32 tcg_el = tcg_const_i32(target_el);
+
+ gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
+ tcg_syn, tcg_el);
- gen_helper_exception_with_syndrome(cpu_env, tcg_excp, tcg_syn);
+ tcg_temp_free_i32(tcg_el);
tcg_temp_free_i32(tcg_syn);
tcg_temp_free_i32(tcg_excp);
}
* of the exception, and our syndrome information is always correct.
*/
gen_ss_advance(s);
- gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex));
+ gen_exception(EXCP_UDEF, syn_swstep(s->ss_same_el, 1, s->is_ldex),
+ default_exception_el(s));
s->is_jmp = DISAS_EXC;
}
* generate a conditional branch based on ARM condition code cc.
* This is common between ARM and Aarch64 targets.
*/
-void arm_gen_test_cc(int cc, int label)
+void arm_gen_test_cc(int cc, TCGLabel *label)
{
TCGv_i32 tmp;
- int inv;
+ TCGLabel *inv;
switch (cc) {
case 0: /* eq: Z */
s->is_jmp = DISAS_JUMP;
}
-static void gen_exception_insn(DisasContext *s, int offset, int excp, int syn)
+static void gen_exception_insn(DisasContext *s, int offset, int excp,
+ int syn, uint32_t target_el)
{
gen_set_condexec(s);
gen_set_pc_im(s, s->pc - offset);
- gen_exception(excp, syn);
+ gen_exception(excp, syn, target_el);
s->is_jmp = DISAS_JUMP;
}
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
- if (!s->cpacr_fpen) {
+ if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF,
- syn_fp_access_trap(1, 0xe, s->thumb));
+ syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
return 0;
}
rt = (insn >> 12) & 0xf;
ri = get_arm_cp_reginfo(s->cp_regs,
- ENCODE_CP_REG(cpnum, is64, crn, crm, opc1, opc2));
+ ENCODE_CP_REG(cpnum, is64, s->ns, crn, crm, opc1, opc2));
if (ri) {
/* Check access permissions */
if (!cp_access_ok(s->current_el, ri, isread)) {
break;
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
gen_io_start();
}
}
}
- if (use_icount && (ri->type & ARM_CP_IO)) {
+ if ((s->tb->cflags & CF_USE_ICOUNT) && (ri->type & ARM_CP_IO)) {
/* I/O operations must end the TB here (whether read or write) */
gen_io_end();
gen_lookup_tb(s);
*/
if (is64) {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "64 bit system register cp:%d opc1: %d crm:%d\n",
- isread ? "read" : "write", cpnum, opc1, crm);
+ "64 bit system register cp:%d opc1: %d crm:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crm,
+ s->ns ? "non-secure" : "secure");
} else {
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch32 "
- "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d\n",
- isread ? "read" : "write", cpnum, opc1, crn, crm, opc2);
+ "system register cp:%d opc1:%d crn:%d crm:%d opc2:%d "
+ "(%s)\n",
+ isread ? "read" : "write", cpnum, opc1, crn, crm, opc2,
+ s->ns ? "non-secure" : "secure");
}
return 1;
{
TCGv_i32 tmp;
TCGv_i64 val64, extaddr;
- int done_label;
- int fail_label;
+ TCGLabel *done_label;
+ TCGLabel *fail_label;
/* if (env->exclusive_addr == addr && env->exclusive_val == [addr]) {
[addr] = {Rt};
/* bkpt */
ARCH(5);
gen_exception_insn(s, 4, EXCP_BKPT,
- syn_aa32_bkpt(imm16, false));
+ syn_aa32_bkpt(imm16, false),
+ default_exception_el(s));
break;
case 2:
/* Hypervisor call (v7) */
ARCH(6T2);
shift = (insn >> 7) & 0x1f;
i = (insn >> 16) & 0x1f;
+ if (i < shift) {
+ /* UNPREDICTABLE; we choose to UNDEF */
+ goto illegal_op;
+ }
i = i + 1 - shift;
if (rm == 15) {
tmp = tcg_temp_new_i32();
tmp2 = load_reg(s, rn);
if ((insn & 0x01200000) == 0x00200000) {
/* ldrt/strt */
- i = MMU_USER_IDX;
+ i = get_a32_user_mem_index(s);
} else {
i = get_mem_index(s);
}
case 0x08:
case 0x09:
{
- int j, n, user, loaded_base;
+ int j, n, loaded_base;
+ bool exc_return = false;
+ bool is_load = extract32(insn, 20, 1);
+ bool user = false;
TCGv_i32 loaded_var;
/* load/store multiple words */
/* XXX: store correct base if write back */
- user = 0;
if (insn & (1 << 22)) {
+ /* LDM (user), LDM (exception return) and STM (user) */
if (IS_USER(s))
goto illegal_op; /* only usable in supervisor mode */
- if ((insn & (1 << 15)) == 0)
- user = 1;
+ if (is_load && extract32(insn, 15, 1)) {
+ exc_return = true;
+ } else {
+ user = true;
+ }
}
rn = (insn >> 16) & 0xf;
addr = load_reg(s, rn);
j = 0;
for(i=0;i<16;i++) {
if (insn & (1 << i)) {
- if (insn & (1 << 20)) {
+ if (is_load) {
/* load */
tmp = tcg_temp_new_i32();
gen_aa32_ld32u(tmp, addr, get_mem_index(s));
if (loaded_base) {
store_reg(s, rn, loaded_var);
}
- if ((insn & (1 << 22)) && !user) {
+ if (exc_return) {
/* Restore CPSR from SPSR. */
tmp = load_cpu_field(spsr);
gen_set_cpsr(tmp, CPSR_ERET_MASK);
break;
default:
illegal_op:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
break;
}
}
break;
case 0xe: /* User privilege. */
tcg_gen_addi_i32(addr, addr, imm);
- memidx = MMU_USER_IDX;
+ memidx = get_a32_user_mem_index(s);
break;
case 0x9: /* Post-decrement. */
imm = -imm;
{
int imm8 = extract32(insn, 0, 8);
ARCH(5);
- gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true));
+ gen_exception_insn(s, 2, EXCP_BKPT, syn_aa32_bkpt(imm8, true),
+ default_exception_el(s));
break;
}
}
return;
undef32:
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
return;
illegal_op:
undef:
- gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized());
+ gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
+ default_exception_el(s));
}
/* generate intermediate code in gen_opc_buf and gen_opparam_buf for
CPUARMState *env = &cpu->env;
DisasContext dc1, *dc = &dc1;
CPUBreakpoint *bp;
- uint16_t *gen_opc_end;
int j, lj;
target_ulong pc_start;
target_ulong next_page_start;
dc->tb = tb;
- gen_opc_end = tcg_ctx.gen_opc_buf + OPC_MAX_SIZE;
-
dc->is_jmp = DISAS_NEXT;
dc->pc = pc_start;
dc->singlestep_enabled = cs->singlestep_enabled;
dc->condjmp = 0;
dc->aarch64 = 0;
+ dc->el3_is_aa64 = arm_el_is_aa64(env, 3);
dc->thumb = ARM_TBFLAG_THUMB(tb->flags);
dc->bswap_code = ARM_TBFLAG_BSWAP_CODE(tb->flags);
dc->condexec_mask = (ARM_TBFLAG_CONDEXEC(tb->flags) & 0xf) << 1;
dc->condexec_cond = ARM_TBFLAG_CONDEXEC(tb->flags) >> 4;
+ dc->mmu_idx = ARM_TBFLAG_MMUIDX(tb->flags);
+ dc->current_el = arm_mmu_idx_to_el(dc->mmu_idx);
#if !defined(CONFIG_USER_ONLY)
- dc->user = (ARM_TBFLAG_PRIV(tb->flags) == 0);
+ dc->user = (dc->current_el == 0);
#endif
dc->ns = ARM_TBFLAG_NS(tb->flags);
- dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags);
+ dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
dc->c15_cpar = ARM_TBFLAG_XSCALE_CPAR(tb->flags);
dc->cp_regs = cpu->cp_regs;
- dc->current_el = arm_current_el(env);
dc->features = env->features;
/* Single step state. The code-generation logic here is:
if (max_insns == 0)
max_insns = CF_COUNT_MASK;
- gen_tb_start();
+ gen_tb_start(tb);
tcg_clear_temp_count();
}
}
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
if (lj < j) {
lj++;
while (lj < j)
* bits should be zero.
*/
assert(num_insns == 0);
- gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0));
+ gen_exception(EXCP_UDEF, syn_swstep(dc->ss_same_el, 0, 0),
+ default_exception_el(dc));
goto done_generating;
}
* Also stop translation when a page boundary is reached. This
* ensures prefetch aborts occur at the right place. */
num_insns ++;
- } while (!dc->is_jmp && tcg_ctx.gen_opc_ptr < gen_opc_end &&
+ } while (!dc->is_jmp && !tcg_op_buf_full() &&
!cs->singlestep_enabled &&
!singlestep &&
!dc->ss_active &&
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI) {
gen_ss_advance(dc);
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
} else if (dc->is_jmp == DISAS_HVC) {
gen_ss_advance(dc);
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
} else if (dc->is_jmp == DISAS_SMC) {
gen_ss_advance(dc);
- gen_exception(EXCP_SMC, syn_aa32_smc());
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
} else if (dc->ss_active) {
gen_step_complete_exception(dc);
} else {
gen_set_condexec(dc);
if (dc->is_jmp == DISAS_SWI && !dc->condjmp) {
gen_ss_advance(dc);
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
} else if (dc->is_jmp == DISAS_HVC && !dc->condjmp) {
gen_ss_advance(dc);
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
} else if (dc->is_jmp == DISAS_SMC && !dc->condjmp) {
gen_ss_advance(dc);
- gen_exception(EXCP_SMC, syn_aa32_smc());
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
} else if (dc->ss_active) {
gen_step_complete_exception(dc);
} else {
gen_helper_wfe(cpu_env);
break;
case DISAS_SWI:
- gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb));
+ gen_exception(EXCP_SWI, syn_aa32_svc(dc->svc_imm, dc->thumb),
+ default_exception_el(dc));
break;
case DISAS_HVC:
- gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm));
+ gen_exception(EXCP_HVC, syn_aa32_hvc(dc->svc_imm), 2);
break;
case DISAS_SMC:
- gen_exception(EXCP_SMC, syn_aa32_smc());
+ gen_exception(EXCP_SMC, syn_aa32_smc(), 3);
break;
}
if (dc->condjmp) {
done_generating:
gen_tb_end(tb, num_insns);
- *tcg_ctx.gen_opc_ptr = INDEX_op_end;
#ifdef DEBUG_DISAS
if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
}
#endif
if (search_pc) {
- j = tcg_ctx.gen_opc_ptr - tcg_ctx.gen_opc_buf;
+ j = tcg_op_buf_count();
lj++;
while (lj <= j)
tcg_ctx.gen_opc_instr_start[lj++] = 0;