#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
- ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
/* Definitions for the PMCCNTR and PMCR registers */
#define PMCRD 0x8
uint64_t par64;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
- ret = get_phys_addr(env, value, access_type, mmu_idx,
- &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
- if (extended_addresses_enabled(env)) {
+ ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
+ &prot, &page_size, &fsr, &fi, &cacheattrs);
+ if (arm_s1_regime_using_lpae_format(env, mmu_idx)) {
/* fsr is a DFSR/IFSR value for the long descriptor
* translation table format, but with WnR always clear.
* Convert it to a 64-bit PAR.
if (!attrs.secure) {
par64 |= (1 << 9); /* NS */
}
- /* We don't set the ATTR or SH fields in the PAR. */
+ par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
+ par64 |= cacheattrs.shareability << 7; /* SH */
} else {
par64 |= 1; /* F */
par64 |= (fsr & 0x3f) << 1; /* FS */
return 0;
}
- u32p += env->pmsav7.rnr;
+ u32p += env->pmsav7.rnr[M_REG_NS];
return *u32p;
}
return;
}
- u32p += env->pmsav7.rnr;
+ u32p += env->pmsav7.rnr[M_REG_NS];
tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
*u32p = value;
}
.resetfn = arm_cp_reset_ignore },
{ .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, pmsav7.rnr),
+ .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
.writefn = pmsav7_rgnr_write,
.resetfn = arm_cp_reset_ignore },
REGINFO_SENTINEL
if (arm_feature(env, ARM_FEATURE_EL3)) {
valid_mask &= ~HCR_HCD;
- } else {
+ } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
+ * However, if we're using the SMC PSCI conduit then QEMU is
+ * effectively acting like EL3 firmware and so the guest at
+ * EL2 should retain the ability to prevent EL1 from being
+ * able to make SMC calls into the ersatz firmware, so in
+ * that case HCR.TSC should be read/write.
+ */
valid_mask &= ~HCR_TSC;
}
}
}
+/* We don't know until after realize whether there's a GICv3
+ * attached, and that is what registers the gicv3 sysregs.
+ * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
+ * at runtime.
+ */
+static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t pfr1 = cpu->id_pfr1;
+
+ if (env->gicv3state) {
+ pfr1 |= 1 << 28;
+ }
+ return pfr1;
+}
+
+static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t pfr0 = cpu->id_aa64pfr0;
+
+ if (env->gicv3state) {
+ pfr0 |= 1 << 24;
+ }
+ return pfr0;
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_pfr0 },
+ /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
+ * the value of the GIC field until after we define these regs.
+ */
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
- .access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_pfr1 },
+ .access = PL1_R, .type = ARM_CP_NO_RAW,
+ .readfn = id_pfr1_read,
+ .writefn = arm_cp_write_ignore },
{ .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
* define new registers here.
*/
ARMCPRegInfo v8_idregs[] = {
+ /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
+ * know the right value for the GIC field until after we
+ * define these regs.
+ */
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
- .access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64pfr0 },
+ .access = PL1_R, .type = ARM_CP_NO_RAW,
+ .readfn = id_aa64pfr0_read,
+ .writefn = arm_cp_write_ignore },
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
return 0;
}
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* The TT instructions can be used by unprivileged code, but in
+ * user-only emulation we don't have the MPU.
+ * Luckily since we know we are NonSecure unprivileged (and that in
+ * turn means that the A flag wasn't specified), all the bits in the
+ * register must be zero:
+ * IREGION: 0 because IRVALID is 0
+ * IRVALID: 0 because NS
+ * S: 0 because NS
+ * NSRW: 0 because NS
+ * NSR: 0 because NS
+ * RW: 0 because unpriv and A flag not set
+ * R: 0 because unpriv and A flag not set
+ * SRVALID: 0 because NS
+ * MRVALID: 0 because unpriv and A flag not set
+ * SREGION: 0 becaus SRVALID is 0
+ * MREGION: 0 because MRVALID is 0
+ */
+ return 0;
+}
+
void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
stl_phys(cs->as, env->regs[13], val);
}
-static uint32_t v7m_pop(CPUARMState *env)
+/* Return true if we're using the process stack pointer (not the MSP) */
+static bool v7m_using_psp(CPUARMState *env)
{
- CPUState *cs = CPU(arm_env_get_cpu(env));
- uint32_t val;
+ /* Handler mode always uses the main stack; for thread mode
+ * the CONTROL.SPSEL bit determines the answer.
+ * Note that in v7M it is not possible to be in Handler mode with
+ * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
+ */
+ return !arm_v7m_is_handler_mode(env) &&
+ env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
+}
+
+/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
+ * This may change the current stack pointer between Main and Process
+ * stack pointers if it is done for the CONTROL register for the current
+ * security state.
+ */
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
+ bool new_spsel,
+ bool secstate)
+{
+ bool old_is_psp = v7m_using_psp(env);
+
+ env->v7m.control[secstate] =
+ deposit32(env->v7m.control[secstate],
+ R_V7M_CONTROL_SPSEL_SHIFT,
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
+
+ if (secstate == env->v7m.secure) {
+ bool new_is_psp = v7m_using_psp(env);
+ uint32_t tmp;
- val = ldl_phys(cs->as, env->regs[13]);
- env->regs[13] += 4;
- return val;
+ if (old_is_psp != new_is_psp) {
+ tmp = env->v7m.other_sp;
+ env->v7m.other_sp = env->regs[13];
+ env->regs[13] = tmp;
+ }
+ }
}
-/* Switch to V7M main or process stack pointer. */
-static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
+/* Write to v7M CONTROL.SPSEL bit. This may change the current
+ * stack pointer between Main and Process stack pointers.
+ */
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
{
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
+}
+
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
+{
+ /* Write a new value to v7m.exception, thus transitioning into or out
+ * of Handler mode; this may result in a change of active stack pointer.
+ */
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
uint32_t tmp;
- bool old_spsel = env->v7m.control & R_V7M_CONTROL_SPSEL_MASK;
- if (old_spsel != new_spsel) {
+ env->v7m.exception = new_exc;
+
+ new_is_psp = v7m_using_psp(env);
+
+ if (old_is_psp != new_is_psp) {
tmp = env->v7m.other_sp;
env->v7m.other_sp = env->regs[13];
env->regs[13] = tmp;
+ }
+}
+
+/* Switch M profile security state between NS and S */
+static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
+{
+ uint32_t new_ss_msp, new_ss_psp;
+
+ if (env->v7m.secure == new_secstate) {
+ return;
+ }
+
+ /* All the banked state is accessed by looking at env->v7m.secure
+ * except for the stack pointer; rearrange the SP appropriately.
+ */
+ new_ss_msp = env->v7m.other_ss_msp;
+ new_ss_psp = env->v7m.other_ss_psp;
+
+ if (v7m_using_psp(env)) {
+ env->v7m.other_ss_psp = env->regs[13];
+ env->v7m.other_ss_msp = env->v7m.other_sp;
+ } else {
+ env->v7m.other_ss_msp = env->regs[13];
+ env->v7m.other_ss_psp = env->v7m.other_sp;
+ }
+
+ env->v7m.secure = new_secstate;
+
+ if (v7m_using_psp(env)) {
+ env->regs[13] = new_ss_psp;
+ env->v7m.other_sp = new_ss_msp;
+ } else {
+ env->regs[13] = new_ss_msp;
+ env->v7m.other_sp = new_ss_psp;
+ }
+}
+
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
+{
+ /* Handle v7M BXNS:
+ * - if the return value is a magic value, do exception return (like BX)
+ * - otherwise bit 0 of the return value is the target security state
+ */
+ uint32_t min_magic;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* Covers FNC_RETURN and EXC_RETURN magic */
+ min_magic = FNC_RETURN_MIN_MAGIC;
+ } else {
+ /* EXC_RETURN magic only */
+ min_magic = EXC_RETURN_MIN_MAGIC;
+ }
+
+ if (dest >= min_magic) {
+ /* This is an exception return magic value; put it where
+ * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
+ * Note that if we ever add gen_ss_advance() singlestep support to
+ * M profile this should count as an "instruction execution complete"
+ * event (compare gen_bx_excret_final_code()).
+ */
+ env->regs[15] = dest & ~1;
+ env->thumb = dest & 1;
+ HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
+ /* notreached */
+ }
+
+ /* translate.c should have made BXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ switch_v7m_security_state(env, dest & 1);
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+}
+
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* Handle v7M BLXNS:
+ * - bit 0 of the destination address is the target security state
+ */
+
+ /* At this point regs[15] is the address just after the BLXNS */
+ uint32_t nextinst = env->regs[15] | 1;
+ uint32_t sp = env->regs[13] - 8;
+ uint32_t saved_psr;
+
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ if (dest & 1) {
+ /* target is Secure, so this is just a normal BLX,
+ * except that the low bit doesn't indicate Thumb/not.
+ */
+ env->regs[14] = nextinst;
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+ return;
+ }
+
+ /* Target is non-secure: first push a stack frame */
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
+ }
- env->v7m.control = deposit32(env->v7m.control,
- R_V7M_CONTROL_SPSEL_SHIFT,
- R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
+ saved_psr = env->v7m.exception;
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
+ saved_psr |= XPSR_SFPA;
+ }
+
+ /* Note that these stores can throw exceptions on MPU faults */
+ cpu_stl_data(env, sp, nextinst);
+ cpu_stl_data(env, sp + 4, saved_psr);
+
+ env->regs[13] = sp;
+ env->regs[14] = 0xfeffffff;
+ if (arm_v7m_is_handler_mode(env)) {
+ /* Write a dummy value to IPSR, to avoid leaking the current secure
+ * exception number to non-secure code. This is guaranteed not
+ * to cause write_v7m_exception() to actually change stacks.
+ */
+ write_v7m_exception(env, 1);
+ }
+ switch_v7m_security_state(env, 0);
+ env->thumb = 1;
+ env->regs[15] = dest;
+}
+
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
+ bool spsel)
+{
+ /* Return a pointer to the location where we currently store the
+ * stack pointer for the requested security state and thread mode.
+ * This pointer will become invalid if the CPU state is updated
+ * such that the stack pointers are switched around (eg changing
+ * the SPSEL control bit).
+ * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
+ * Unlike that pseudocode, we require the caller to pass us in the
+ * SPSEL control bit value; this is because we also use this
+ * function in handling of pushing of the callee-saves registers
+ * part of the v8M stack frame (pseudocode PushCalleeStack()),
+ * and in the tailchain codepath the SPSEL bit comes from the exception
+ * return magic LR value from the previous exception. The pseudocode
+ * opencodes the stack-selection in PushCalleeStack(), but we prefer
+ * to make this utility function generic enough to do the job.
+ */
+ bool want_psp = threadmode && spsel;
+
+ if (secure == env->v7m.secure) {
+ if (want_psp == v7m_using_psp(env)) {
+ return &env->regs[13];
+ } else {
+ return &env->v7m.other_sp;
+ }
+ } else {
+ if (want_psp) {
+ return &env->v7m.other_ss_psp;
+ } else {
+ return &env->v7m.other_ss_msp;
+ }
}
}
-static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
+static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure)
{
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
MemTxResult result;
- hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
+ hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4;
uint32_t addr;
addr = address_space_ldl(cs->as, vec,
* Since we don't model Lockup, we just report this guest error
* via cpu_abort().
*/
- cpu_abort(cs, "Failed to read from exception vector table "
- "entry %08x\n", (unsigned)vec);
+ cpu_abort(cs, "Failed to read from %s exception vector table "
+ "entry %08x\n", targets_secure ? "secure" : "nonsecure",
+ (unsigned)vec);
}
return addr;
}
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
+static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain)
+{
+ /* For v8M, push the callee-saves register part of the stack frame.
+ * Compare the v8M pseudocode PushCalleeStack().
+ * In the tailchaining case this may not be the current stack.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+
+ if (dotailchain) {
+ frame_sp_p = get_v7m_sp_ptr(env, true,
+ lr & R_V7M_EXCRET_MODE_MASK,
+ lr & R_V7M_EXCRET_SPSEL_MASK);
+ } else {
+ frame_sp_p = &env->regs[13];
+ }
+
+ frameptr = *frame_sp_p - 0x28;
+
+ stl_phys(cs->as, frameptr, 0xfefa125b);
+ stl_phys(cs->as, frameptr + 0x8, env->regs[4]);
+ stl_phys(cs->as, frameptr + 0xc, env->regs[5]);
+ stl_phys(cs->as, frameptr + 0x10, env->regs[6]);
+ stl_phys(cs->as, frameptr + 0x14, env->regs[7]);
+ stl_phys(cs->as, frameptr + 0x18, env->regs[8]);
+ stl_phys(cs->as, frameptr + 0x1c, env->regs[9]);
+ stl_phys(cs->as, frameptr + 0x20, env->regs[10]);
+ stl_phys(cs->as, frameptr + 0x24, env->regs[11]);
+
+ *frame_sp_p = frameptr;
+}
+
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
{
/* Do the "take the exception" parts of exception entry,
* but not the pushing of state to the stack. This is
*/
CPUARMState *env = &cpu->env;
uint32_t addr;
+ bool targets_secure;
+
+ targets_secure = armv7m_nvic_acknowledge_irq(env->nvic);
- armv7m_nvic_acknowledge_irq(env->nvic);
- switch_v7m_sp(env, 0);
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (lr & R_V7M_EXCRET_S_MASK)) {
+ /* The background code (the owner of the registers in the
+ * exception frame) is Secure. This means it may either already
+ * have or now needs to push callee-saves registers.
+ */
+ if (targets_secure) {
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
+ /* We took an exception from Secure to NonSecure
+ * (which means the callee-saved registers got stacked)
+ * and are now tailchaining to a Secure exception.
+ * Clear DCRS so eventual return from this Secure
+ * exception unstacks the callee-saved registers.
+ */
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
+ }
+ } else {
+ /* We're going to a non-secure exception; push the
+ * callee-saves registers to the stack now, if they're
+ * not already saved.
+ */
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
+ !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
+ v7m_push_callee_stack(cpu, lr, dotailchain);
+ }
+ lr |= R_V7M_EXCRET_DCRS_MASK;
+ }
+ }
+
+ lr &= ~R_V7M_EXCRET_ES_MASK;
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ lr |= R_V7M_EXCRET_ES_MASK;
+ }
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
+
+ /* Clear registers if necessary to prevent non-secure exception
+ * code being able to see register values from secure code.
+ * Where register values become architecturally UNKNOWN we leave
+ * them with their previous values.
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ if (!targets_secure) {
+ /* Always clear the caller-saved registers (they have been
+ * pushed to the stack earlier in v7m_push_stack()).
+ * Clear callee-saved registers if the background code is
+ * Secure (in which case these regs were saved in
+ * v7m_push_callee_stack()).
+ */
+ int i;
+
+ for (i = 0; i < 13; i++) {
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
+ env->regs[i] = 0;
+ }
+ }
+ /* Clear EAPSR */
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
+ }
+ }
+ }
+
+ /* Switch to target security state -- must do this before writing SPSEL */
+ switch_v7m_security_state(env, targets_secure);
+ write_v7m_control_spsel(env, 0);
+ arm_clear_exclusive(env);
/* Clear IT bits */
env->condexec_bits = 0;
env->regs[14] = lr;
- addr = arm_v7m_load_vector(cpu);
+ addr = arm_v7m_load_vector(cpu, targets_secure);
env->regs[15] = addr & 0xfffffffe;
env->thumb = addr & 1;
}
uint32_t xpsr = xpsr_read(env);
/* Align stack pointer if the guest wants that */
- if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
+ if ((env->regs[13] & 4) &&
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
env->regs[13] -= 4;
xpsr |= XPSR_SPREALIGN;
}
static void do_v7m_exception_exit(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
- uint32_t type;
+ CPUState *cs = CPU(cpu);
+ uint32_t excret;
uint32_t xpsr;
bool ufault = false;
- bool return_to_sp_process = false;
- bool return_to_handler = false;
+ bool sfault = false;
+ bool return_to_sp_process;
+ bool return_to_handler;
bool rettobase = false;
-
- /* We can only get here from an EXCP_EXCEPTION_EXIT, and
- * gen_bx_excret() enforces the architectural rule
- * that jumps to magic addresses don't have magic behaviour unless
- * we're in Handler mode (compare pseudocode BXWritePC()).
+ bool exc_secure = false;
+ bool return_to_secure;
+
+ /* If we're not in Handler mode then jumps to magic exception-exit
+ * addresses don't have magic behaviour. However for the v8M
+ * security extensions the magic secure-function-return has to
+ * work in thread mode too, so to avoid doing an extra check in
+ * the generated code we allow exception-exit magic to also cause the
+ * internal exception and bring us here in thread mode. Correct code
+ * will never try to do this (the following insn fetch will always
+ * fault) so we the overhead of having taken an unnecessary exception
+ * doesn't matter.
*/
- assert(arm_v7m_is_handler_mode(env));
+ if (!arm_v7m_is_handler_mode(env)) {
+ return;
+ }
/* In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including
* the target value up between env->regs[15] and env->thumb in
* gen_bx(). Reconstitute it.
*/
- type = env->regs[15];
+ excret = env->regs[15];
if (env->thumb) {
- type |= 1;
+ excret |= 1;
}
qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
" previous exception %d\n",
- type, env->v7m.exception);
+ excret, env->v7m.exception);
- if (extract32(type, 5, 23) != extract32(-1, 5, 23)) {
+ if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
- "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", type);
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
+ excret);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
+ * we pick which FAULTMASK to clear.
+ */
+ if (!env->v7m.secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
+ sfault = 1;
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
+ excret &= ~R_V7M_EXCRET_ES_MASK;
+ }
}
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
- /* Auto-clear FAULTMASK on return from other than NMI */
- env->v7m.faultmask = 0;
+ /* Auto-clear FAULTMASK on return from other than NMI.
+ * If the security extension is implemented then this only
+ * happens if the raw execution priority is >= 0; the
+ * value of the ES bit in the exception return value indicates
+ * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
+ if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
+ env->v7m.faultmask[exc_secure] = 0;
+ }
+ } else {
+ env->v7m.faultmask[M_REG_NS] = 0;
+ }
}
- switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
+ exc_secure)) {
case -1:
/* attempt to exit an exception that isn't active */
ufault = true;
g_assert_not_reached();
}
- switch (type & 0xf) {
- case 1: /* Return to Handler */
- return_to_handler = true;
- break;
- case 13: /* Return to Thread using Process stack */
- return_to_sp_process = true;
- /* fall through */
- case 9: /* Return to Thread using Main stack */
- if (!rettobase &&
- !(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (excret & R_V7M_EXCRET_S_MASK);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
+ * we choose to take the UsageFault.
+ */
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
+ (excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
+ ufault = true;
+ }
+ }
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
+ ufault = true;
+ }
+ } else {
+ /* For v7M we only recognize certain combinations of the low bits */
+ switch (excret & 0xf) {
+ case 1: /* Return to Handler */
+ break;
+ case 13: /* Return to Thread using Process stack */
+ case 9: /* Return to Thread using Main stack */
+ /* We only need to check NONBASETHRDENA for v7M, because in
+ * v8M this bit does not exist (it is RES1).
+ */
+ if (!rettobase &&
+ !(env->v7m.ccr[env->v7m.secure] &
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ ufault = true;
+ }
+ break;
+ default:
ufault = true;
}
- break;
- default:
- ufault = true;
+ }
+
+ if (sfault) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed EXC_RETURN.ES validity check\n");
+ return;
}
if (ufault) {
/* Bad exception return: instead of popping the exception
* stack, directly take a usage fault on the current stack.
*/
- env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- v7m_exception_taken(cpu, type | 0xf0000000);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ v7m_exception_taken(cpu, excret, true);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity check\n");
return;
}
- /* Switch to the target stack. */
- switch_v7m_sp(env, return_to_sp_process);
- /* Pop registers. */
- env->regs[0] = v7m_pop(env);
- env->regs[1] = v7m_pop(env);
- env->regs[2] = v7m_pop(env);
- env->regs[3] = v7m_pop(env);
- env->regs[12] = v7m_pop(env);
- env->regs[14] = v7m_pop(env);
- env->regs[15] = v7m_pop(env);
- if (env->regs[15] & 1) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "M profile return from interrupt with misaligned "
- "PC is UNPREDICTABLE\n");
- /* Actual hardware seems to ignore the lsbit, and there are several
- * RTOSes out there which incorrectly assume the r15 in the stack
- * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
+ /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
+ * Handler mode (and will be until we write the new XPSR.Interrupt
+ * field) this does not switch around the current stack pointer.
+ */
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+
+ switch_v7m_security_state(env, return_to_secure);
+
+ {
+ /* The stack pointer we should be reading the exception frame from
+ * depends on bits in the magic exception return type value (and
+ * for v8M isn't necessarily the stack pointer we will eventually
+ * end up resuming execution with). Get a pointer to the location
+ * in the CPU state struct where the SP we need is currently being
+ * stored; we will use and modify it in place.
+ * We use this limited C variable scope so we don't accidentally
+ * use 'frame_sp_p' after we do something that makes it invalid.
+ */
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
+ return_to_secure,
+ !return_to_handler,
+ return_to_sp_process);
+ uint32_t frameptr = *frame_sp_p;
+
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile exception return with non-8-aligned SP "
+ "for destination state is UNPREDICTABLE\n");
+ }
+
+ /* Do we need to pop callee-saved registers? */
+ if (return_to_secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
+ uint32_t expected_sig = 0xfefa125b;
+ uint32_t actual_sig = ldl_phys(cs->as, frameptr);
+
+ if (expected_sig != actual_sig) {
+ /* Take a SecureFault on the current stack */
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed exception return integrity "
+ "signature check\n");
+ return;
+ }
+
+ env->regs[4] = ldl_phys(cs->as, frameptr + 0x8);
+ env->regs[5] = ldl_phys(cs->as, frameptr + 0xc);
+ env->regs[6] = ldl_phys(cs->as, frameptr + 0x10);
+ env->regs[7] = ldl_phys(cs->as, frameptr + 0x14);
+ env->regs[8] = ldl_phys(cs->as, frameptr + 0x18);
+ env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c);
+ env->regs[10] = ldl_phys(cs->as, frameptr + 0x20);
+ env->regs[11] = ldl_phys(cs->as, frameptr + 0x24);
+
+ frameptr += 0x28;
+ }
+
+ /* Pop registers. TODO: make these accesses use the correct
+ * attributes and address space (S/NS, priv/unpriv) and handle
+ * memory transaction failures.
*/
- env->regs[15] &= ~1U;
+ env->regs[0] = ldl_phys(cs->as, frameptr);
+ env->regs[1] = ldl_phys(cs->as, frameptr + 0x4);
+ env->regs[2] = ldl_phys(cs->as, frameptr + 0x8);
+ env->regs[3] = ldl_phys(cs->as, frameptr + 0xc);
+ env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
+ env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
+ env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
+
+ /* Returning from an exception with a PC with bit 0 set is defined
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
+ * the lsbit, and there are several RTOSes out there which incorrectly
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
+ * complain about the badly behaved guest.
+ */
+ if (env->regs[15] & 1) {
+ env->regs[15] &= ~1U;
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile return from interrupt with misaligned "
+ "PC is UNPREDICTABLE on v7M\n");
+ }
+ }
+
+ xpsr = ldl_phys(cs->as, frameptr + 0x1c);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* For v8M we have to check whether the xPSR exception field
+ * matches the EXCRET value for return to handler/thread
+ * before we commit to changing the SP and xPSR.
+ */
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
+ if (return_to_handler != will_be_handler) {
+ /* Take an INVPC UsageFault on the current stack.
+ * By this point we will have switched to the security state
+ * for the background state, so this UsageFault will target
+ * that state.
+ */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: failed exception return integrity "
+ "check\n");
+ return;
+ }
+ }
+
+ /* Commit to consuming the stack frame */
+ frameptr += 0x20;
+ /* Undo stack alignment (the SPREALIGN bit indicates that the original
+ * pre-exception SP was not 8-aligned and we added a padding word to
+ * align it, so we undo this by ORing in the bit that increases it
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
+ * would work too but a logical OR is how the pseudocode specifies it.)
+ */
+ if (xpsr & XPSR_SPREALIGN) {
+ frameptr |= 4;
+ }
+ *frame_sp_p = frameptr;
}
- xpsr = v7m_pop(env);
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
- /* Undo stack alignment. */
- if (xpsr & XPSR_SPREALIGN) {
- env->regs[13] |= 4;
- }
/* The restored xPSR exception field will be zero if we're
* resuming in Thread mode. If that doesn't match what the
- * exception return type specified then this is a UsageFault.
+ * exception return excret specified then this is a UsageFault.
+ * v7M requires we make this check here; v8M did it earlier.
*/
if (return_to_handler != arm_v7m_is_handler_mode(env)) {
- /* Take an INVPC UsageFault by pushing the stack again. */
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
+ /* Take an INVPC UsageFault by pushing the stack again;
+ * we know we're v7M so this is never a Secure UsageFault.
+ */
+ assert(!arm_feature(env, ARM_FEATURE_V8));
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
v7m_push_stack(cpu);
- v7m_exception_taken(cpu, type | 0xf0000000);
+ v7m_exception_taken(cpu, excret, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
"failed exception return integrity check\n");
return;
}
/* Otherwise, we have a successful exception exit. */
+ arm_clear_exclusive(env);
qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
}
+static bool do_v7m_function_return(ARMCPU *cpu)
+{
+ /* v8M security extensions magic function return.
+ * We may either:
+ * (1) throw an exception (longjump)
+ * (2) return true if we successfully handled the function return
+ * (3) return false if we failed a consistency check and have
+ * pended a UsageFault that needs to be taken now
+ *
+ * At this point the magic return value is split between env->regs[15]
+ * and env->thumb. We don't bother to reconstitute it because we don't
+ * need it (all values are handled the same way).
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t newpc, newpsr, newpsr_exc;
+
+ qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
+
+ {
+ bool threadmode, spsel;
+ TCGMemOpIdx oi;
+ ARMMMUIdx mmu_idx;
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+
+ /* Pull the return address and IPSR from the Secure stack */
+ threadmode = !arm_v7m_is_handler_mode(env);
+ spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
+
+ frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
+ frameptr = *frame_sp_p;
+
+ /* These loads may throw an exception (for MPU faults). We want to
+ * do them as secure, so work out what MMU index that is.
+ */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+ oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
+ newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
+ newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
+
+ /* Consistency checks on new IPSR */
+ newpsr_exc = newpsr & XPSR_EXCP;
+ if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
+ (env->v7m.exception == 1 && newpsr_exc != 0))) {
+ /* Pend the fault and tell our caller to take it */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking INVPC UsageFault: "
+ "IPSR consistency check failed\n");
+ return false;
+ }
+
+ *frame_sp_p = frameptr + 8;
+ }
+
+ /* This invalidates frame_sp_p */
+ switch_v7m_security_state(env, true);
+ env->v7m.exception = newpsr_exc;
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ if (newpsr & XPSR_SFPA) {
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
+ }
+ xpsr_write(env, 0, XPSR_IT);
+ env->thumb = newpc & 1;
+ env->regs[15] = newpc & ~1;
+
+ qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
+ return true;
+}
+
static void arm_log_exception(int idx)
{
if (qemu_loglevel_mask(CPU_LOG_INT)) {
}
}
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
+ uint32_t addr, uint16_t *insn)
+{
+ /* Load a 16-bit portion of a v7M instruction, returning true on success,
+ * or false on failure (in which case we will have pended the appropriate
+ * exception).
+ * We need to do the instruction fetch's MPU and SAU checks
+ * like this because there is no MMU index that would allow
+ * doing the load with a single function call. Instead we must
+ * first check that the security attributes permit the load
+ * and that they don't mismatch on the two halves of the instruction,
+ * and then we do the load as a secure load (ie using the security
+ * attributes of the address, not the CPU, as architecturally required).
+ */
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ V8M_SAttributes sattrs = {};
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+ uint32_t fsr;
+
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
+ if (!sattrs.nsc || sattrs.ns) {
+ /* This must be the second half of the insn, and it straddles a
+ * region boundary with the second half not being S&NSC.
+ */
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+ }
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
+ &physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) {
+ /* the MPU lookup failed */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
+ return false;
+ }
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
+ return false;
+ }
+ return true;
+}
+
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
+{
+ /* Check whether this attempt to execute code in a Secure & NS-Callable
+ * memory region is for an SG instruction; if so, then emulate the
+ * effect of the SG instruction and return true. Otherwise pend
+ * the correct kind of exception and return false.
+ */
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx;
+ uint16_t insn;
+
+ /* We should never get here unless get_phys_addr_pmsav8() caused
+ * an exception for NS executing in S&NSC memory.
+ */
+ assert(!env->v7m.secure);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
+ return false;
+ }
+
+ if (!env->thumb) {
+ goto gen_invep;
+ }
+
+ if (insn != 0xe97f) {
+ /* Not an SG instruction first half (we choose the IMPDEF
+ * early-SG-check option).
+ */
+ goto gen_invep;
+ }
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
+ return false;
+ }
+
+ if (insn != 0xe97f) {
+ /* Not an SG instruction second half (yes, both halves of the SG
+ * insn have the same hex value)
+ */
+ goto gen_invep;
+ }
+
+ /* OK, we have confirmed that we really have an SG instruction.
+ * We know we're NS in S memory so don't need to repeat those checks.
+ */
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
+ ", executing it\n", env->regs[15]);
+ env->regs[14] &= ~1;
+ switch_v7m_security_state(env, true);
+ xpsr_write(env, 0, XPSR_IT);
+ env->regs[15] += 4;
+ return true;
+
+gen_invep:
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+}
+
void arm_v7m_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
handle it. */
switch (cs->exception_index) {
case EXCP_UDEF:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
break;
case EXCP_NOCP:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
break;
case EXCP_INVSTATE:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
break;
case EXCP_SWI:
/* The PC already points to the next instruction. */
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
* raises the fault, in the A profile short-descriptor format.
*/
switch (env->exception.fsr & 0xf) {
+ case M_FAKE_FSR_NSC_EXEC:
+ /* Exception generated when we try to execute code at an address
+ * which is marked as Secure & Non-Secure Callable and the CPU
+ * is in the Non-Secure state. The only instruction which can
+ * be executed like this is SG (and that only if both halves of
+ * the SG instruction have the same security attributes.)
+ * Everything else must generate an INVEP SecureFault, so we
+ * emulate the SG instruction here.
+ */
+ if (v7m_handle_execute_nsc(cpu)) {
+ return;
+ }
+ break;
+ case M_FAKE_FSR_SFAULT:
+ /* Various flavours of SecureFault for attempts to execute or
+ * access data in the wrong security state.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ if (env->v7m.secure) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVTRAN\n");
+ } else {
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ }
+ break;
+ case EXCP_DATA_ABORT:
+ /* This must be an NS access to S memory */
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.AUVIOL\n");
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ break;
case 0x8: /* External Abort */
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
- env->v7m.cfsr |= R_V7M_CFSR_PRECISERR_MASK;
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
break;
case EXCP_DATA_ABORT:
- env->v7m.cfsr |=
- (R_V7M_CFSR_IBUSERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.cfsr[M_REG_NS] |=
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
env->v7m.bfar = env->exception.vaddress;
qemu_log_mask(CPU_LOG_INT,
- "...with CFSR.IBUSERR and BFAR 0x%x\n",
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
env->v7m.bfar);
break;
}
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break;
default:
/* All other FSR values are either MPU faults or "can't happen
*/
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
- env->v7m.cfsr |= R_V7M_CFSR_IACCVIOL_MASK;
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
break;
case EXCP_DATA_ABORT:
- env->v7m.cfsr |=
+ env->v7m.cfsr[env->v7m.secure] |=
(R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
- env->v7m.mmfar = env->exception.vaddress;
+ env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
qemu_log_mask(CPU_LOG_INT,
"...with CFSR.DACCVIOL and MMFAR 0x%x\n",
- env->v7m.mmfar);
+ env->v7m.mmfar[env->v7m.secure]);
break;
}
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
+ env->v7m.secure);
break;
}
break;
return;
}
}
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
break;
case EXCP_IRQ:
break;
case EXCP_EXCEPTION_EXIT:
- do_v7m_exception_exit(cpu);
- return;
+ if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
+ /* Must be v8M security extension function return */
+ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+ if (do_v7m_function_return(cpu)) {
+ return;
+ }
+ } else {
+ do_v7m_exception_exit(cpu);
+ return;
+ }
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
- lr = 0xfffffff1;
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
- lr |= 4;
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_FTYPE_MASK;
+ /* The S bit indicates whether we should return to Secure
+ * or NonSecure (ie our current state).
+ * The ES bit indicates whether we're taking this exception
+ * to Secure or NonSecure (ie our target state). We set it
+ * later, in v7m_exception_taken().
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
+ * This corresponds to the ARM ARM pseudocode for v8M setting
+ * some LR bits in PushStack() and some in ExceptionTaken();
+ * the distinction matters for the tailchain cases where we
+ * can take an exception without pushing the stack.
+ */
+ if (env->v7m.secure) {
+ lr |= R_V7M_EXCRET_S_MASK;
+ }
+ } else {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_S_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_FTYPE_MASK |
+ R_V7M_EXCRET_ES_MASK;
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
}
if (!arm_v7m_is_handler_mode(env)) {
- lr |= 8;
+ lr |= R_V7M_EXCRET_MODE_MASK;
}
v7m_push_stack(cpu);
- v7m_exception_taken(cpu, lr);
+ v7m_exception_taken(cpu, lr, false);
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
case ARMMMUIdx_S1SE1:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_S1NSE1:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
case ARMMMUIdx_MSUser:
return 1;
default:
}
}
-/* Return true if this address translation regime is secure */
-static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_S12NSE0:
- case ARMMMUIdx_S12NSE1:
- case ARMMMUIdx_S1NSE0:
- case ARMMMUIdx_S1NSE1:
- case ARMMMUIdx_S1E2:
- case ARMMMUIdx_S2NS:
- case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
- case ARMMMUIdx_MUser:
- return false;
- case ARMMMUIdx_S1E3:
- case ARMMMUIdx_S1SE0:
- case ARMMMUIdx_S1SE1:
- case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
- case ARMMMUIdx_MSUser:
- return true;
- default:
- g_assert_not_reached();
- }
-}
-
/* Return the SCTLR value which controls this address translation regime */
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
ARMMMUIdx mmu_idx)
{
if (arm_feature(env, ARM_FEATURE_M)) {
- switch (env->v7m.mpu_ctrl &
+ switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
case R_V7M_MPU_CTRL_ENABLE_MASK:
/* Enabled, but not for HardFault and NMI */
- return mmu_idx == ARMMMUIdx_MNegPri ||
- mmu_idx == ARMMMUIdx_MSNegPri;
+ return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
/* Enabled for all cases */
return false;
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MSUserNegPri:
return true;
default:
return false;
int ret;
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
- &txattrs, &s2prot, &s2size, fsr, fi);
+ &txattrs, &s2prot, &s2size, fsr, fi, NULL);
if (ret) {
fi->s2addr = addr;
fi->stage2 = true;
return true;
}
+/* Translate from the 4-bit stage 2 representation of
+ * memory attributes (without cache-allocation hints) to
+ * the 8-bit representation of the stage 1 MAIR registers
+ * (which includes allocation hints).
+ *
+ * ref: shared/translation/attrs/S2AttrDecode()
+ * .../S2ConvertAttrsHints()
+ */
+static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
+{
+ uint8_t hiattr = extract32(s2attrs, 2, 2);
+ uint8_t loattr = extract32(s2attrs, 0, 2);
+ uint8_t hihint = 0, lohint = 0;
+
+ if (hiattr != 0) { /* normal memory */
+ if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
+ hiattr = loattr = 1; /* non-cacheable */
+ } else {
+ if (hiattr != 1) { /* Write-through or write-back */
+ hihint = 3; /* RW allocate */
+ }
+ if (loattr != 1) { /* Write-through or write-back */
+ lohint = 3; /* RW allocate */
+ }
+ }
+ }
+
+ return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
+}
+
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
*/
txattrs->secure = false;
}
+
+ if (cacheattrs != NULL) {
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ cacheattrs->attrs = convert_stage2_attrs(env,
+ extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
+ }
+ cacheattrs->shareability = extract32(attrs, 6, 2);
+ }
+
*phys_ptr = descaddr;
*page_size_ptr = page_size;
return false;
}
if (arm_feature(env, ARM_FEATURE_M)) {
- return env->v7m.mpu_ctrl & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
+ return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
+ & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
} else {
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
}
return !(*prot & (1 << access_type));
}
-static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot, uint32_t *fsr)
+static bool v8m_is_sau_exempt(CPUARMState *env,
+ uint32_t address, MMUAccessType access_type)
+{
+ /* The architecture specifies that certain address ranges are
+ * exempt from v8M SAU/IDAU checks.
+ */
+ return
+ (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
+ (address >= 0xe0000000 && address <= 0xe0002fff) ||
+ (address >= 0xe000e000 && address <= 0xe000efff) ||
+ (address >= 0xe002e000 && address <= 0xe002efff) ||
+ (address >= 0xe0040000 && address <= 0xe0041fff) ||
+ (address >= 0xe00ff000 && address <= 0xe00fffff);
+}
+
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs)
+{
+ /* Look up the security attributes for this address. Compare the
+ * pseudocode SecurityCheck() function.
+ * We assume the caller has zero-initialized *sattrs.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int r;
+
+ /* TODO: implement IDAU */
+
+ if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
+ /* 0xf0000000..0xffffffff is always S for insn fetches */
+ return;
+ }
+
+ if (v8m_is_sau_exempt(env, address, access_type)) {
+ sattrs->ns = !regime_is_secure(env, mmu_idx);
+ return;
+ }
+
+ switch (env->sau.ctrl & 3) {
+ case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
+ break;
+ case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
+ sattrs->ns = true;
+ break;
+ default: /* SAU.ENABLE == 1 */
+ for (r = 0; r < cpu->sau_sregion; r++) {
+ if (env->sau.rlar[r] & 1) {
+ uint32_t base = env->sau.rbar[r] & ~0x1f;
+ uint32_t limit = env->sau.rlar[r] | 0x1f;
+
+ if (base <= address && limit >= address) {
+ if (sattrs->srvalid) {
+ /* If we hit in more than one region then we must report
+ * as Secure, not NS-Callable, with no valid region
+ * number info.
+ */
+ sattrs->ns = false;
+ sattrs->nsc = false;
+ sattrs->sregion = 0;
+ sattrs->srvalid = false;
+ break;
+ } else {
+ if (env->sau.rlar[r] & 2) {
+ sattrs->nsc = true;
+ } else {
+ sattrs->ns = true;
+ }
+ sattrs->srvalid = true;
+ sattrs->sregion = r;
+ }
+ }
+ }
+ }
+
+ /* TODO when we support the IDAU then it may override the result here */
+ break;
+ }
+}
+
+static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, uint32_t *fsr, uint32_t *mregion)
{
+ /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ * that a full phys-to-virt translation does).
+ * mregion is (if not NULL) set to the region number which matched,
+ * or -1 if no region number is returned (MPU off, address did not
+ * hit a region, address hit in multiple regions).
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
bool is_user = regime_is_user(env, mmu_idx);
+ uint32_t secure = regime_is_secure(env, mmu_idx);
int n;
int matchregion = -1;
bool hit = false;
*phys_ptr = address;
*prot = 0;
+ if (mregion) {
+ *mregion = -1;
+ }
/* Unlike the ARM ARM pseudocode, we don't need to check whether this
* was an exception vector read from the vector table (which is always
* with bits [4:0] all zeroes, but the limit address is bits
* [31:5] from the register with bits [4:0] all ones.
*/
- uint32_t base = env->pmsav8.rbar[n] & ~0x1f;
- uint32_t limit = env->pmsav8.rlar[n] | 0x1f;
+ uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
+ uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
- if (!(env->pmsav8.rlar[n] & 0x1)) {
+ if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
/* Region disabled */
continue;
}
/* hit using the background region */
get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
} else {
- uint32_t ap = extract32(env->pmsav8.rbar[matchregion], 1, 2);
- uint32_t xn = extract32(env->pmsav8.rbar[matchregion], 0, 1);
+ uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
+ uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
if (m_is_system_region(env, address)) {
/* System space is always execute never */
/* We don't need to look the attribute up in the MAIR0/MAIR1
* registers because that only tells us about cacheability.
*/
+ if (mregion) {
+ *mregion = matchregion;
+ }
}
*fsr = 0x00d; /* Permission fault */
return !(*prot & (1 << access_type));
}
+
+static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, uint32_t *fsr)
+{
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ V8M_SAttributes sattrs = {};
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
+ if (access_type == MMU_INST_FETCH) {
+ /* Instruction fetches always use the MMU bank and the
+ * transaction attribute determined by the fetch address,
+ * regardless of CPU state. This is painful for QEMU
+ * to handle, because it would mean we need to encode
+ * into the mmu_idx not just the (user, negpri) information
+ * for the current security state but also that for the
+ * other security state, which would balloon the number
+ * of mmu_idx values needed alarmingly.
+ * Fortunately we can avoid this because it's not actually
+ * possible to arbitrarily execute code from memory with
+ * the wrong security attribute: it will always generate
+ * an exception of some kind or another, apart from the
+ * special case of an NS CPU executing an SG instruction
+ * in S&NSC memory. So we always just fail the translation
+ * here and sort things out in the exception handler
+ * (including possibly emulating an SG instruction).
+ */
+ if (sattrs.ns != !secure) {
+ *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ } else {
+ /* For data accesses we always use the MMU bank indicated
+ * by the current CPU state, but the security attributes
+ * might downgrade a secure access to nonsecure.
+ */
+ if (sattrs.ns) {
+ txattrs->secure = false;
+ } else if (!secure) {
+ /* NS access to S memory must fault.
+ * Architecturally we should first check whether the
+ * MPU information for this address indicates that we
+ * are doing an unaligned access to Device memory, which
+ * should generate a UsageFault instead. QEMU does not
+ * currently check for that kind of unaligned access though.
+ * If we added it we would need to do so as a special case
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
+ */
+ *fsr = M_FAKE_FSR_SFAULT;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ }
+ }
+
+ return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
+ txattrs, prot, fsr, NULL);
+}
+
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
return false;
}
+/* Combine either inner or outer cacheability attributes for normal
+ * memory, according to table D4-42 and pseudocode procedure
+ * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
+ *
+ * NB: only stage 1 includes allocation hints (RW bits), leading to
+ * some asymmetry.
+ */
+static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
+{
+ if (s1 == 4 || s2 == 4) {
+ /* non-cacheable has precedence */
+ return 4;
+ } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
+ /* stage 1 write-through takes precedence */
+ return s1;
+ } else if (extract32(s2, 2, 2) == 2) {
+ /* stage 2 write-through takes precedence, but the allocation hint
+ * is still taken from stage 1
+ */
+ return (2 << 2) | extract32(s1, 0, 2);
+ } else { /* write-back */
+ return s1;
+ }
+}
+
+/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
+ * and CombineS1S2Desc()
+ *
+ * @s1: Attributes from stage 1 walk
+ * @s2: Attributes from stage 2 walk
+ */
+static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
+ uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
+ ARMCacheAttrs ret;
+
+ /* Combine shareability attributes (table D4-43) */
+ if (s1.shareability == 2 || s2.shareability == 2) {
+ /* if either are outer-shareable, the result is outer-shareable */
+ ret.shareability = 2;
+ } else if (s1.shareability == 3 || s2.shareability == 3) {
+ /* if either are inner-shareable, the result is inner-shareable */
+ ret.shareability = 3;
+ } else {
+ /* both non-shareable */
+ ret.shareability = 0;
+ }
+
+ /* Combine memory type and cacheability attributes */
+ if (s1hi == 0 || s2hi == 0) {
+ /* Device has precedence over normal */
+ if (s1lo == 0 || s2lo == 0) {
+ /* nGnRnE has precedence over anything */
+ ret.attrs = 0;
+ } else if (s1lo == 4 || s2lo == 4) {
+ /* non-Reordering has precedence over Reordering */
+ ret.attrs = 4; /* nGnRE */
+ } else if (s1lo == 8 || s2lo == 8) {
+ /* non-Gathering has precedence over Gathering */
+ ret.attrs = 8; /* nGRE */
+ } else {
+ ret.attrs = 0xc; /* GRE */
+ }
+
+ /* Any location for which the resultant memory type is any
+ * type of Device memory is always treated as Outer Shareable.
+ */
+ ret.shareability = 2;
+ } else { /* Normal memory */
+ /* Outer/inner cacheability combine independently */
+ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
+ | combine_cacheattr_nibble(s1lo, s2lo);
+
+ if (ret.attrs == 0x44) {
+ /* Any location for which the resultant memory type is Normal
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
+ * as Outer Shareable.
+ */
+ ret.shareability = 2;
+ }
+ }
+
+ return ret;
+}
+
+
/* get_phys_addr - get the physical address for this virtual address
*
* Find the physical address corresponding to the given virtual address,
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
* @fsr: set to the DFSR/IFSR value on failure
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
/* Call ourselves recursively to do the stage 1 and then stage 2
hwaddr ipa;
int s2_prot;
int ret;
+ ARMCacheAttrs cacheattrs2 = {};
ret = get_phys_addr(env, address, access_type,
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
- prot, page_size, fsr, fi);
+ prot, page_size, fsr, fi, cacheattrs);
/* If S1 fails or S2 is disabled, return early. */
if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
/* S1 is done. Now do S2 translation. */
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
phys_ptr, attrs, &s2_prot,
- page_size, fsr, fi);
+ page_size, fsr, fi,
+ cacheattrs != NULL ? &cacheattrs2 : NULL);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
*prot &= s2_prot;
+
+ /* Combine the S1 and S2 cache attributes, if needed */
+ if (!ret && cacheattrs != NULL) {
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ }
+
return ret;
} else {
/*
if (arm_feature(env, ARM_FEATURE_V8)) {
/* PMSAv8 */
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
+ phys_ptr, attrs, prot, fsr);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
/* PMSAv7 */
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr, fi);
+ attrs, prot, page_size, fsr, fi, cacheattrs);
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
attrs, prot, page_size, fsr, fi);
ret = get_phys_addr(env, address, access_type,
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fsr, fi);
+ &attrs, &prot, &page_size, fsr, fi, NULL);
if (!ret) {
/* Map a single [sub]page. */
phys_addr &= TARGET_PAGE_MASK;
*attrs = (MemTxAttrs) {};
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fsr, &fi);
+ attrs, &prot, &page_size, &fsr, &fi, NULL);
if (ret) {
return -1;
return xpsr_read(env) & mask;
break;
case 20: /* CONTROL */
- return env->v7m.control;
+ return env->v7m.control[env->v7m.secure];
+ case 0x94: /* CONTROL_NS */
+ /* We have to handle this here because unprivileged Secure code
+ * can read the NS CONTROL register.
+ */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.control[M_REG_NS];
}
if (el == 0) {
return 0; /* unprivileged reads others as zero */
}
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ switch (reg) {
+ case 0x88: /* MSP_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.other_ss_msp;
+ case 0x89: /* PSP_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.other_ss_psp;
+ case 0x90: /* PRIMASK_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.primask[M_REG_NS];
+ case 0x91: /* BASEPRI_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.basepri[M_REG_NS];
+ case 0x93: /* FAULTMASK_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.faultmask[M_REG_NS];
+ case 0x98: /* SP_NS */
+ {
+ /* This gives the non-secure SP selected based on whether we're
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
+ */
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
+
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
+ return env->v7m.other_ss_psp;
+ } else {
+ return env->v7m.other_ss_msp;
+ }
+ }
+ default:
+ break;
+ }
+ }
+
switch (reg) {
case 8: /* MSP */
- return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
- env->v7m.other_sp : env->regs[13];
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
case 9: /* PSP */
- return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
- env->regs[13] : env->v7m.other_sp;
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
case 16: /* PRIMASK */
- return env->v7m.primask;
+ return env->v7m.primask[env->v7m.secure];
case 17: /* BASEPRI */
case 18: /* BASEPRI_MAX */
- return env->v7m.basepri;
+ return env->v7m.basepri[env->v7m.secure];
case 19: /* FAULTMASK */
- return env->v7m.faultmask;
+ return env->v7m.faultmask[env->v7m.secure];
default:
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
" register %d\n", reg);
return;
}
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ switch (reg) {
+ case 0x88: /* MSP_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.other_ss_msp = val;
+ return;
+ case 0x89: /* PSP_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.other_ss_psp = val;
+ return;
+ case 0x90: /* PRIMASK_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.primask[M_REG_NS] = val & 1;
+ return;
+ case 0x91: /* BASEPRI_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
+ return;
+ case 0x93: /* FAULTMASK_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.faultmask[M_REG_NS] = val & 1;
+ return;
+ case 0x98: /* SP_NS */
+ {
+ /* This gives the non-secure SP selected based on whether we're
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
+ */
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
+
+ if (!env->v7m.secure) {
+ return;
+ }
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
+ env->v7m.other_ss_psp = val;
+ } else {
+ env->v7m.other_ss_msp = val;
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ }
+
switch (reg) {
case 0 ... 7: /* xPSR sub-fields */
/* only APSR is actually writable */
}
break;
case 8: /* MSP */
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
+ if (v7m_using_psp(env)) {
env->v7m.other_sp = val;
} else {
env->regs[13] = val;
}
break;
case 9: /* PSP */
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
+ if (v7m_using_psp(env)) {
env->regs[13] = val;
} else {
env->v7m.other_sp = val;
}
break;
case 16: /* PRIMASK */
- env->v7m.primask = val & 1;
+ env->v7m.primask[env->v7m.secure] = val & 1;
break;
case 17: /* BASEPRI */
- env->v7m.basepri = val & 0xff;
+ env->v7m.basepri[env->v7m.secure] = val & 0xff;
break;
case 18: /* BASEPRI_MAX */
val &= 0xff;
- if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
- env->v7m.basepri = val;
+ if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
+ || env->v7m.basepri[env->v7m.secure] == 0)) {
+ env->v7m.basepri[env->v7m.secure] = val;
+ }
break;
case 19: /* FAULTMASK */
- env->v7m.faultmask = val & 1;
+ env->v7m.faultmask[env->v7m.secure] = val & 1;
break;
case 20: /* CONTROL */
/* Writing to the SPSEL bit only has an effect if we are in
* thread mode; other bits can be updated by any privileged code.
- * switch_v7m_sp() deals with updating the SPSEL bit in
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
* env->v7m.control, so we only need update the others.
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
+ * mode; for v8M the write is permitted but will have no effect.
*/
- if (!arm_v7m_is_handler_mode(env)) {
- switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
+ if (arm_feature(env, ARM_FEATURE_V8) ||
+ !arm_v7m_is_handler_mode(env)) {
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
- env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
- env->v7m.control |= val & R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"
}
}
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
+ bool forceunpriv = op & 1;
+ bool alt = op & 2;
+ V8M_SAttributes sattrs = {};
+ uint32_t tt_resp;
+ bool r, rw, nsr, nsrw, mrvalid;
+ int prot;
+ MemTxAttrs attrs = {};
+ hwaddr phys_addr;
+ uint32_t fsr;
+ ARMMMUIdx mmu_idx;
+ uint32_t mregion;
+ bool targetpriv;
+ bool targetsec = env->v7m.secure;
+
+ /* Work out what the security state and privilege level we're
+ * interested in is...
+ */
+ if (alt) {
+ targetsec = !targetsec;
+ }
+
+ if (forceunpriv) {
+ targetpriv = false;
+ } else {
+ targetpriv = arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
+ }
+
+ /* ...and then figure out which MMU index this is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
+
+ /* We know that the MPU and SAU don't care about the access type
+ * for our purposes beyond that we don't want to claim to be
+ * an insn fetch, so we arbitrarily call this a read.
+ */
+
+ /* MPU region info only available for privileged or if
+ * inspecting the other MPU state.
+ */
+ if (arm_current_el(env) != 0 || alt) {
+ /* We can ignore the return value as prot is always set */
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
+ &phys_addr, &attrs, &prot, &fsr, &mregion);
+ if (mregion == -1) {
+ mrvalid = false;
+ mregion = 0;
+ } else {
+ mrvalid = true;
+ }
+ r = prot & PAGE_READ;
+ rw = prot & PAGE_WRITE;
+ } else {
+ r = false;
+ rw = false;
+ mrvalid = false;
+ mregion = 0;
+ }
+
+ if (env->v7m.secure) {
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
+ nsr = sattrs.ns && r;
+ nsrw = sattrs.ns && rw;
+ } else {
+ sattrs.ns = true;
+ nsr = false;
+ nsrw = false;
+ }
+
+ tt_resp = (sattrs.iregion << 24) |
+ (sattrs.irvalid << 23) |
+ ((!sattrs.ns) << 22) |
+ (nsrw << 21) |
+ (nsr << 20) |
+ (rw << 19) |
+ (r << 18) |
+ (sattrs.srvalid << 17) |
+ (mrvalid << 16) |
+ (sattrs.sregion << 8) |
+ mregion;
+
+ return tt_resp;
+}
+
#endif
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)