ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
&prot, &page_size, &fi, &cacheattrs);
+ if (ret) {
+ /*
+ * Some kinds of translation fault must cause exceptions rather
+ * than being reported in the PAR.
+ */
+ int current_el = arm_current_el(env);
+ int target_el;
+ uint32_t syn, fsr, fsc;
+ bool take_exc = false;
+
+ if (fi.s1ptw && current_el == 1 && !arm_is_secure(env)
+ && (mmu_idx == ARMMMUIdx_S1NSE1 || mmu_idx == ARMMMUIdx_S1NSE0)) {
+ /*
+ * Synchronous stage 2 fault on an access made as part of the
+ * translation table walk for AT S1E0* or AT S1E1* insn
+ * executed from NS EL1. If this is a synchronous external abort
+ * and SCR_EL3.EA == 1, then we take a synchronous external abort
+ * to EL3. Otherwise the fault is taken as an exception to EL2,
+ * and HPFAR_EL2 holds the faulting IPA.
+ */
+ if (fi.type == ARMFault_SyncExternalOnWalk &&
+ (env->cp15.scr_el3 & SCR_EA)) {
+ target_el = 3;
+ } else {
+ env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
+ target_el = 2;
+ }
+ take_exc = true;
+ } else if (fi.type == ARMFault_SyncExternalOnWalk) {
+ /*
+ * Synchronous external aborts during a translation table walk
+ * are taken as Data Abort exceptions.
+ */
+ if (fi.stage2) {
+ if (current_el == 3) {
+ target_el = 3;
+ } else {
+ target_el = 2;
+ }
+ } else {
+ target_el = exception_target_el(env);
+ }
+ take_exc = true;
+ }
+
+ if (take_exc) {
+ /* Construct FSR and FSC using same logic as arm_deliver_fault() */
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, mmu_idx)) {
+ fsr = arm_fi_to_lfsc(&fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(&fi);
+ fsc = 0x3f;
+ }
+ /*
+ * Report exception with ESR indicating a fault due to a
+ * translation table walk for a cache maintenance instruction.
+ */
+ syn = syn_data_abort_no_iss(current_el == target_el,
+ fi.ea, 1, fi.s1ptw, 1, fsc);
+ env->exception.vaddress = value;
+ env->exception.fsr = fsr;
+ raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+ }
+ }
+
if (is_a64(env)) {
format64 = true;
} else if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* This underdecoding is safe because the reginfo is NO_RAW. */
{ .name = "ATS", .cp = 15, .crn = 7, .crm = 8, .opc1 = 0, .opc2 = CP_ANY,
.access = PL1_W, .accessfn = ats_access,
- .writefn = ats_write, .type = ARM_CP_NO_RAW },
+ .writefn = ats_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
#endif
REGINFO_SENTINEL
};
/* 64 bit address translation operations */
{ .name = "AT_S1E1R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S1E1W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S1E0R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 2,
- .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S1E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 0, .crn = 7, .crm = 8, .opc2 = 3,
- .access = PL1_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL1_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S12E1R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 4,
- .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S12E1W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 5,
- .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S12E0R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 6,
- .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S12E0W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 7,
- .access = PL2_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL2_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
/* AT S1E2* are elsewhere as they UNDEF from EL3 if EL2 is not present */
{ .name = "AT_S1E3R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 0,
- .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "AT_S1E3W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 6, .crn = 7, .crm = 8, .opc2 = 1,
- .access = PL3_W, .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .access = PL3_W, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC,
+ .writefn = ats_write64 },
{ .name = "PAR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 0, .crn = 7, .crm = 4, .opc2 = 0,
{ .name = "AT_S1E2R", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
.access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
{ .name = "AT_S1E2W", .state = ARM_CP_STATE_AA64,
.opc0 = 1, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
.access = PL2_W, .accessfn = at_s1e2_access,
- .type = ARM_CP_NO_RAW, .writefn = ats_write64 },
+ .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC, .writefn = ats_write64 },
/* The AArch32 ATS1H* operations are CONSTRAINED UNPREDICTABLE
* if EL2 is not implemented; we choose to UNDEF. Behaviour at EL3
* with SCR.NS == 0 outside Monitor mode is UNPREDICTABLE; we choose
*/
{ .name = "ATS1HR", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 0,
.access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
{ .name = "ATS1HW", .cp = 15, .opc1 = 4, .crn = 7, .crm = 8, .opc2 = 1,
.access = PL2_W,
- .writefn = ats1h_write, .type = ARM_CP_NO_RAW },
+ .writefn = ats1h_write, .type = ARM_CP_NO_RAW | ARM_CP_RAISES_EXC },
{ .name = "CNTHCTL_EL2", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 4, .crn = 14, .crm = 1, .opc2 = 0,
/* ARMv7 requires bit 0 and 1 to reset to 1. ARMv8 defines the
int new_len;
/* Bits other than [3:0] are RAZ/WI. */
+ QEMU_BUILD_BUG_ON(ARM_MAX_VQ > 16);
raw_write(env, ri, value & 0xf);
/*
}
if (arm_feature(env, ARM_FEATURE_CBAR)) {
+ /*
+ * CBAR is IMPDEF, but common on Arm Cortex-A implementations.
+ * There are two flavours:
+ * (1) older 32-bit only cores have a simple 32-bit CBAR
+ * (2) 64-bit cores have a 64-bit CBAR visible to AArch64, plus a
+ * 32-bit register visible to AArch32 at a different encoding
+ * to the "flavour 1" register and with the bits rearranged to
+ * be able to squash a 64-bit address into the 32-bit view.
+ * We distinguish the two via the ARM_FEATURE_AARCH64 flag, but
+ * in future if we support AArch32-only configs of some of the
+ * AArch64 cores we might need to add a specific feature flag
+ * to indicate cores with "flavour 2" CBAR.
+ */
if (arm_feature(env, ARM_FEATURE_AARCH64)) {
/* 32 bit view is [31:18] 0...0 [43:32]. */
uint32_t cbar32 = (extract64(cpu->reset_cbar, 18, 14) << 18)
ARMCPRegInfo cbar_reginfo[] = {
{ .name = "CBAR",
.type = ARM_CP_CONST,
- .cp = 15, .crn = 15, .crm = 0, .opc1 = 4, .opc2 = 0,
- .access = PL1_R, .resetvalue = cpu->reset_cbar },
+ .cp = 15, .crn = 15, .crm = 3, .opc1 = 1, .opc2 = 0,
+ .access = PL1_R, .resetvalue = cbar32 },
{ .name = "CBAR_EL1", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_CONST,
.opc0 = 3, .opc1 = 1, .crn = 15, .crm = 3, .opc2 = 0,
- .access = PL1_R, .resetvalue = cbar32 },
+ .access = PL1_R, .resetvalue = cpu->reset_cbar },
REGINFO_SENTINEL
};
/* We don't implement a r/w 64 bit CBAR currently */
new_el, env->pc, pstate_read(env));
}
-static inline bool check_for_semihosting(CPUState *cs)
-{
+/*
+ * Do semihosting call and set the appropriate return value. All the
+ * permission and validity checks have been done at translate time.
+ *
+ * We only see semihosting exceptions in TCG only as they are not
+ * trapped to the hypervisor in KVM.
+ */
#ifdef CONFIG_TCG
- /* Check whether this exception is a semihosting call; if so
- * then handle it and return true; otherwise return false.
- */
+static void handle_semihosting(CPUState *cs)
+{
ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
if (is_a64(env)) {
- if (cs->exception_index == EXCP_SEMIHOST) {
- /* This is always the 64-bit semihosting exception.
- * The "is this usermode" and "is semihosting enabled"
- * checks have been done at translate time.
- */
- qemu_log_mask(CPU_LOG_INT,
- "...handling as semihosting call 0x%" PRIx64 "\n",
- env->xregs[0]);
- env->xregs[0] = do_arm_semihosting(env);
- return true;
- }
- return false;
+ qemu_log_mask(CPU_LOG_INT,
+ "...handling as semihosting call 0x%" PRIx64 "\n",
+ env->xregs[0]);
+ env->xregs[0] = do_arm_semihosting(env);
} else {
- uint32_t imm;
-
- /* Only intercept calls from privileged modes, to provide some
- * semblance of security.
- */
- if (cs->exception_index != EXCP_SEMIHOST &&
- (!semihosting_enabled() ||
- ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_USR))) {
- return false;
- }
-
- switch (cs->exception_index) {
- case EXCP_SEMIHOST:
- /* This is always a semihosting call; the "is this usermode"
- * and "is semihosting enabled" checks have been done at
- * translate time.
- */
- break;
- case EXCP_SWI:
- /* Check for semihosting interrupt. */
- if (env->thumb) {
- imm = arm_lduw_code(env, env->regs[15] - 2, arm_sctlr_b(env))
- & 0xff;
- if (imm == 0xab) {
- break;
- }
- } else {
- imm = arm_ldl_code(env, env->regs[15] - 4, arm_sctlr_b(env))
- & 0xffffff;
- if (imm == 0x123456) {
- break;
- }
- }
- return false;
- case EXCP_BKPT:
- /* See if this is a semihosting syscall. */
- if (env->thumb) {
- imm = arm_lduw_code(env, env->regs[15], arm_sctlr_b(env))
- & 0xff;
- if (imm == 0xab) {
- env->regs[15] += 2;
- break;
- }
- }
- return false;
- default:
- return false;
- }
-
qemu_log_mask(CPU_LOG_INT,
"...handling as semihosting call 0x%x\n",
env->regs[0]);
env->regs[0] = do_arm_semihosting(env);
- return true;
}
-#else
- return false;
-#endif
}
+#endif
/* Handle a CPU exception for A and R profile CPUs.
* Do any appropriate logging, handle PSCI calls, and then hand off
return;
}
- /* Semihosting semantics depend on the register width of the
- * code that caused the exception, not the target exception level,
- * so must be handled here.
+ /*
+ * Semihosting semantics depend on the register width of the code
+ * that caused the exception, not the target exception level, so
+ * must be handled here.
*/
- if (check_for_semihosting(cs)) {
+#ifdef CONFIG_TCG
+ if (cs->exception_index == EXCP_SEMIHOST) {
+ handle_semihosting(cs);
return;
}
+#endif
/* Hooks may change global state so BQL should be held, also the
* BQL needs to be held for any modification of
}
#endif
+static uint32_t rebuild_hflags_common(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx, uint32_t flags)
+{
+ flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
+ flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX,
+ arm_to_core_mmu_idx(mmu_idx));
+
+ if (arm_singlestep_active(env)) {
+ flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
+ }
+ return flags;
+}
+
+static uint32_t rebuild_hflags_common_32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx, uint32_t flags)
+{
+ bool sctlr_b = arm_sctlr_b(env);
+
+ if (sctlr_b) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, 1);
+ }
+ if (arm_cpu_data_is_big_endian_a32(env, sctlr_b)) {
+ flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
+ }
+ flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
+
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+}
+
+static uint32_t rebuild_hflags_m32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ uint32_t flags = 0;
+
+ if (arm_v7m_is_handler_mode(env)) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
+ }
+
+ /*
+ * v8M always applies stack limit checks unless CCR.STKOFHFNMIGN
+ * is suppressing them because the requested execution priority
+ * is less than 0.
+ */
+ if (arm_feature(env, ARM_FEATURE_V8) &&
+ !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
+ }
+
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
+}
+
+static uint32_t rebuild_hflags_aprofile(CPUARMState *env)
+{
+ int flags = 0;
+
+ flags = FIELD_DP32(flags, TBFLAG_ANY, DEBUG_TARGET_EL,
+ arm_debug_target_el(env));
+ return flags;
+}
+
+static uint32_t rebuild_hflags_a32(CPUARMState *env, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ uint32_t flags = rebuild_hflags_aprofile(env);
+ return rebuild_hflags_common_32(env, fp_el, mmu_idx, flags);
+}
+
+static uint32_t rebuild_hflags_a64(CPUARMState *env, int el, int fp_el,
+ ARMMMUIdx mmu_idx)
+{
+ uint32_t flags = rebuild_hflags_aprofile(env);
+ ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
+ ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
+ uint64_t sctlr;
+ int tbii, tbid;
+
+ flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
+
+ /* FIXME: ARMv8.1-VHE S2 translation regime. */
+ if (regime_el(env, stage1) < 2) {
+ ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
+ tbid = (p1.tbi << 1) | p0.tbi;
+ tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
+ } else {
+ tbid = p0.tbi;
+ tbii = tbid & !p0.tbid;
+ }
+
+ flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
+ flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
+
+ if (cpu_isar_feature(aa64_sve, env_archcpu(env))) {
+ int sve_el = sve_exception_el(env, el);
+ uint32_t zcr_len;
+
+ /*
+ * If SVE is disabled, but FP is enabled,
+ * then the effective len is 0.
+ */
+ if (sve_el != 0 && fp_el == 0) {
+ zcr_len = 0;
+ } else {
+ zcr_len = sve_zcr_len_for_el(env, el);
+ }
+ flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
+ flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
+ }
+
+ sctlr = arm_sctlr(env, el);
+
+ if (arm_cpu_data_is_big_endian_a64(el, sctlr)) {
+ flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
+ }
+
+ if (cpu_isar_feature(aa64_pauth, env_archcpu(env))) {
+ /*
+ * In order to save space in flags, we record only whether
+ * pauth is "inactive", meaning all insns are implemented as
+ * a nop, or "active" when some action must be performed.
+ * The decision of which action to take is left to a helper.
+ */
+ if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
+ }
+ }
+
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
+ if (sctlr & (el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
+ }
+ }
+
+ return rebuild_hflags_common(env, fp_el, mmu_idx, flags);
+}
+
void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *pflags)
{
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
int current_el = arm_current_el(env);
int fp_el = fp_exception_el(env, current_el);
- uint32_t flags = 0;
+ uint32_t flags;
if (is_a64(env)) {
- ARMCPU *cpu = env_archcpu(env);
- uint64_t sctlr;
-
*pc = env->pc;
- flags = FIELD_DP32(flags, TBFLAG_ANY, AARCH64_STATE, 1);
-
- /* Get control bits for tagged addresses. */
- {
- ARMMMUIdx stage1 = stage_1_mmu_idx(mmu_idx);
- ARMVAParameters p0 = aa64_va_parameters_both(env, 0, stage1);
- int tbii, tbid;
-
- /* FIXME: ARMv8.1-VHE S2 translation regime. */
- if (regime_el(env, stage1) < 2) {
- ARMVAParameters p1 = aa64_va_parameters_both(env, -1, stage1);
- tbid = (p1.tbi << 1) | p0.tbi;
- tbii = tbid & ~((p1.tbid << 1) | p0.tbid);
- } else {
- tbid = p0.tbi;
- tbii = tbid & !p0.tbid;
- }
-
- flags = FIELD_DP32(flags, TBFLAG_A64, TBII, tbii);
- flags = FIELD_DP32(flags, TBFLAG_A64, TBID, tbid);
+ flags = rebuild_hflags_a64(env, current_el, fp_el, mmu_idx);
+ if (cpu_isar_feature(aa64_bti, env_archcpu(env))) {
+ flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
}
+ } else {
+ *pc = env->regs[15];
- if (cpu_isar_feature(aa64_sve, cpu)) {
- int sve_el = sve_exception_el(env, current_el);
- uint32_t zcr_len;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ flags = rebuild_hflags_m32(env, fp_el, mmu_idx);
- /* If SVE is disabled, but FP is enabled,
- * then the effective len is 0.
- */
- if (sve_el != 0 && fp_el == 0) {
- zcr_len = 0;
- } else {
- zcr_len = sve_zcr_len_for_el(env, current_el);
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S)
+ != env->v7m.secure) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
}
- flags = FIELD_DP32(flags, TBFLAG_A64, SVEEXC_EL, sve_el);
- flags = FIELD_DP32(flags, TBFLAG_A64, ZCR_LEN, zcr_len);
- }
-
- sctlr = arm_sctlr(env, current_el);
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- /*
- * In order to save space in flags, we record only whether
- * pauth is "inactive", meaning all insns are implemented as
- * a nop, or "active" when some action must be performed.
- * The decision of which action to take is left to a helper.
- */
- if (sctlr & (SCTLR_EnIA | SCTLR_EnIB | SCTLR_EnDA | SCTLR_EnDB)) {
- flags = FIELD_DP32(flags, TBFLAG_A64, PAUTH_ACTIVE, 1);
+ if ((env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
+ (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
+ (env->v7m.secure &&
+ !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
+ /*
+ * ASPEN is set, but FPCA/SFPA indicate that there is no
+ * active FP context; we must create a new FP context before
+ * executing any FP insn.
+ */
+ flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
}
- }
- if (cpu_isar_feature(aa64_bti, cpu)) {
- /* Note that SCTLR_EL[23].BT == SCTLR_BT1. */
- if (sctlr & (current_el == 0 ? SCTLR_BT0 : SCTLR_BT1)) {
- flags = FIELD_DP32(flags, TBFLAG_A64, BT, 1);
+ bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
+ if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
+ flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
}
- flags = FIELD_DP32(flags, TBFLAG_A64, BTYPE, env->btype);
+ } else {
+ flags = rebuild_hflags_a32(env, fp_el, mmu_idx);
}
- } else {
- *pc = env->regs[15];
+
flags = FIELD_DP32(flags, TBFLAG_A32, THUMB, env->thumb);
flags = FIELD_DP32(flags, TBFLAG_A32, VECLEN, env->vfp.vec_len);
flags = FIELD_DP32(flags, TBFLAG_A32, VECSTRIDE, env->vfp.vec_stride);
flags = FIELD_DP32(flags, TBFLAG_A32, CONDEXEC, env->condexec_bits);
- flags = FIELD_DP32(flags, TBFLAG_A32, SCTLR_B, arm_sctlr_b(env));
- flags = FIELD_DP32(flags, TBFLAG_A32, NS, !access_secure_reg(env));
if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
|| arm_el_is_aa64(env, 1) || arm_feature(env, ARM_FEATURE_M)) {
flags = FIELD_DP32(flags, TBFLAG_A32, VFPEN, 1);
}
}
- flags = FIELD_DP32(flags, TBFLAG_ANY, MMUIDX, arm_to_core_mmu_idx(mmu_idx));
-
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
* states defined in the ARM ARM for software singlestep:
* SS_ACTIVE PSTATE.SS State
* 0 x Inactive (the TB flag for SS is always 0)
* 1 0 Active-pending
* 1 1 Active-not-pending
+ * SS_ACTIVE is set in hflags; PSTATE_SS is computed every TB.
*/
- if (arm_singlestep_active(env)) {
- flags = FIELD_DP32(flags, TBFLAG_ANY, SS_ACTIVE, 1);
+ if (FIELD_EX32(flags, TBFLAG_ANY, SS_ACTIVE)) {
if (is_a64(env)) {
if (env->pstate & PSTATE_SS) {
flags = FIELD_DP32(flags, TBFLAG_ANY, PSTATE_SS, 1);
}
}
}
- if (arm_cpu_data_is_big_endian(env)) {
- flags = FIELD_DP32(flags, TBFLAG_ANY, BE_DATA, 1);
- }
- flags = FIELD_DP32(flags, TBFLAG_ANY, FPEXC_EL, fp_el);
-
- if (arm_v7m_is_handler_mode(env)) {
- flags = FIELD_DP32(flags, TBFLAG_A32, HANDLER, 1);
- }
-
- /* v8M always applies stack limit checks unless CCR.STKOFHFNMIGN is
- * suppressing them because the requested execution priority is less than 0.
- */
- if (arm_feature(env, ARM_FEATURE_V8) &&
- arm_feature(env, ARM_FEATURE_M) &&
- !((mmu_idx & ARM_MMU_IDX_M_NEGPRI) &&
- (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKOFHFNMIGN_MASK))) {
- flags = FIELD_DP32(flags, TBFLAG_A32, STACKCHECK, 1);
- }
-
- if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
- FIELD_EX32(env->v7m.fpccr[M_REG_S], V7M_FPCCR, S) != env->v7m.secure) {
- flags = FIELD_DP32(flags, TBFLAG_A32, FPCCR_S_WRONG, 1);
- }
-
- if (arm_feature(env, ARM_FEATURE_M) &&
- (env->v7m.fpccr[env->v7m.secure] & R_V7M_FPCCR_ASPEN_MASK) &&
- (!(env->v7m.control[M_REG_S] & R_V7M_CONTROL_FPCA_MASK) ||
- (env->v7m.secure &&
- !(env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK)))) {
- /*
- * ASPEN is set, but FPCA/SFPA indicate that there is no active
- * FP context; we must create a new FP context before executing
- * any FP insn.
- */
- flags = FIELD_DP32(flags, TBFLAG_A32, NEW_FP_CTXT_NEEDED, 1);
- }
-
- if (arm_feature(env, ARM_FEATURE_M)) {
- bool is_secure = env->v7m.fpccr[M_REG_S] & R_V7M_FPCCR_S_MASK;
-
- if (env->v7m.fpccr[is_secure] & R_V7M_FPCCR_LSPACT_MASK) {
- flags = FIELD_DP32(flags, TBFLAG_A32, LSPACT, 1);
- }
- }
*pflags = flags;
*cs_base = 0;