#ifndef CONFIG_USER_ONLY
static bool get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
ARMMMUFaultInfo *fi);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
ARMMMUFaultInfo *fi);
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
+
/* Definitions for the PMCCNTR and PMCR registers */
#define PMCRD 0x8
#define PMCRC 0x4
}
static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
- int access_type, ARMMMUIdx mmu_idx)
+ MMUAccessType access_type, ARMMMUIdx mmu_idx)
{
hwaddr phys_addr;
target_ulong page_size;
static void ats_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
- int access_type = ri->opc2 & 1;
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
uint64_t par64;
ARMMMUIdx mmu_idx;
int el = arm_current_el(env);
static void ats1h_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- int access_type = ri->opc2 & 1;
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
uint64_t par64;
par64 = do_ats_write(env, value, access_type, ARMMMUIdx_S2NS);
static void ats_write64(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
- int access_type = ri->opc2 & 1;
+ MMUAccessType access_type = ri->opc2 & 1 ? MMU_DATA_STORE : MMU_DATA_LOAD;
ARMMMUIdx mmu_idx;
int secure = arm_is_secure_below_el3(env);
return 0;
}
- u32p += env->cp15.c6_rgnr;
+ u32p += env->pmsav7.rnr[M_REG_NS];
return *u32p;
}
return;
}
- u32p += env->cp15.c6_rgnr;
+ u32p += env->pmsav7.rnr[M_REG_NS];
tlb_flush(CPU(cpu)); /* Mappings may have changed - purge! */
*u32p = value;
}
-static void pmsav7_reset(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- ARMCPU *cpu = arm_env_get_cpu(env);
- uint32_t *u32p = *(uint32_t **)raw_ptr(env, ri);
-
- if (!u32p) {
- return;
- }
-
- memset(u32p, 0, sizeof(*u32p) * cpu->pmsav7_dregion);
-}
-
static void pmsav7_rgnr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
}
static const ARMCPRegInfo pmsav7_cp_reginfo[] = {
+ /* Reset for all these registers is handled in arm_cpu_reset(),
+ * because the PMSAv7 is also used by M-profile CPUs, which do
+ * not register cpregs but still need the state to be reset.
+ */
{ .name = "DRBAR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_NO_RAW,
.fieldoffset = offsetof(CPUARMState, pmsav7.drbar),
- .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
+ .readfn = pmsav7_read, .writefn = pmsav7_write,
+ .resetfn = arm_cp_reset_ignore },
{ .name = "DRSR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 2,
.access = PL1_RW, .type = ARM_CP_NO_RAW,
.fieldoffset = offsetof(CPUARMState, pmsav7.drsr),
- .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
+ .readfn = pmsav7_read, .writefn = pmsav7_write,
+ .resetfn = arm_cp_reset_ignore },
{ .name = "DRACR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 1, .opc2 = 4,
.access = PL1_RW, .type = ARM_CP_NO_RAW,
.fieldoffset = offsetof(CPUARMState, pmsav7.dracr),
- .readfn = pmsav7_read, .writefn = pmsav7_write, .resetfn = pmsav7_reset },
+ .readfn = pmsav7_read, .writefn = pmsav7_write,
+ .resetfn = arm_cp_reset_ignore },
{ .name = "RGNR", .cp = 15, .crn = 6, .opc1 = 0, .crm = 2, .opc2 = 0,
.access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c6_rgnr),
- .writefn = pmsav7_rgnr_write },
+ .fieldoffset = offsetof(CPUARMState, pmsav7.rnr[M_REG_NS]),
+ .writefn = pmsav7_rgnr_write,
+ .resetfn = arm_cp_reset_ignore },
REGINFO_SENTINEL
};
if (arm_feature(env, ARM_FEATURE_EL3)) {
valid_mask &= ~HCR_HCD;
- } else {
+ } else if (cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ /* Architecturally HCR.TSC is RES0 if EL3 is not implemented.
+ * However, if we're using the SMC PSCI conduit then QEMU is
+ * effectively acting like EL3 firmware and so the guest at
+ * EL2 should retain the ability to prevent EL1 from being
+ * able to make SMC calls into the ersatz firmware, so in
+ * that case HCR.TSC should be read/write.
+ */
valid_mask &= ~HCR_TSC;
}
}
}
-ARMCPU *cpu_arm_init(const char *cpu_model)
-{
- return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
-}
-
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
{
CPUState *cs = CPU(cpu);
return 0;
}
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
stl_phys(cs->as, env->regs[13], val);
}
-static uint32_t v7m_pop(CPUARMState *env)
+/* Return true if we're using the process stack pointer (not the MSP) */
+static bool v7m_using_psp(CPUARMState *env)
{
- CPUState *cs = CPU(arm_env_get_cpu(env));
- uint32_t val;
+ /* Handler mode always uses the main stack; for thread mode
+ * the CONTROL.SPSEL bit determines the answer.
+ * Note that in v7M it is not possible to be in Handler mode with
+ * CONTROL.SPSEL non-zero, but in v8M it is, so we must check both.
+ */
+ return !arm_v7m_is_handler_mode(env) &&
+ env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK;
+}
+
+/* Write to v7M CONTROL.SPSEL bit for the specified security bank.
+ * This may change the current stack pointer between Main and Process
+ * stack pointers if it is done for the CONTROL register for the current
+ * security state.
+ */
+static void write_v7m_control_spsel_for_secstate(CPUARMState *env,
+ bool new_spsel,
+ bool secstate)
+{
+ bool old_is_psp = v7m_using_psp(env);
- val = ldl_phys(cs->as, env->regs[13]);
- env->regs[13] += 4;
- return val;
+ env->v7m.control[secstate] =
+ deposit32(env->v7m.control[secstate],
+ R_V7M_CONTROL_SPSEL_SHIFT,
+ R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
+
+ if (secstate == env->v7m.secure) {
+ bool new_is_psp = v7m_using_psp(env);
+ uint32_t tmp;
+
+ if (old_is_psp != new_is_psp) {
+ tmp = env->v7m.other_sp;
+ env->v7m.other_sp = env->regs[13];
+ env->regs[13] = tmp;
+ }
+ }
}
-/* Switch to V7M main or process stack pointer. */
-static void switch_v7m_sp(CPUARMState *env, bool new_spsel)
+/* Write to v7M CONTROL.SPSEL bit. This may change the current
+ * stack pointer between Main and Process stack pointers.
+ */
+static void write_v7m_control_spsel(CPUARMState *env, bool new_spsel)
{
+ write_v7m_control_spsel_for_secstate(env, new_spsel, env->v7m.secure);
+}
+
+void write_v7m_exception(CPUARMState *env, uint32_t new_exc)
+{
+ /* Write a new value to v7m.exception, thus transitioning into or out
+ * of Handler mode; this may result in a change of active stack pointer.
+ */
+ bool new_is_psp, old_is_psp = v7m_using_psp(env);
uint32_t tmp;
- bool old_spsel = env->v7m.control & R_V7M_CONTROL_SPSEL_MASK;
- if (old_spsel != new_spsel) {
+ env->v7m.exception = new_exc;
+
+ new_is_psp = v7m_using_psp(env);
+
+ if (old_is_psp != new_is_psp) {
tmp = env->v7m.other_sp;
env->v7m.other_sp = env->regs[13];
env->regs[13] = tmp;
+ }
+}
+
+/* Switch M profile security state between NS and S */
+static void switch_v7m_security_state(CPUARMState *env, bool new_secstate)
+{
+ uint32_t new_ss_msp, new_ss_psp;
+
+ if (env->v7m.secure == new_secstate) {
+ return;
+ }
- env->v7m.control = deposit32(env->v7m.control,
- R_V7M_CONTROL_SPSEL_SHIFT,
- R_V7M_CONTROL_SPSEL_LENGTH, new_spsel);
+ /* All the banked state is accessed by looking at env->v7m.secure
+ * except for the stack pointer; rearrange the SP appropriately.
+ */
+ new_ss_msp = env->v7m.other_ss_msp;
+ new_ss_psp = env->v7m.other_ss_psp;
+
+ if (v7m_using_psp(env)) {
+ env->v7m.other_ss_psp = env->regs[13];
+ env->v7m.other_ss_msp = env->v7m.other_sp;
+ } else {
+ env->v7m.other_ss_msp = env->regs[13];
+ env->v7m.other_ss_psp = env->v7m.other_sp;
+ }
+
+ env->v7m.secure = new_secstate;
+
+ if (v7m_using_psp(env)) {
+ env->regs[13] = new_ss_psp;
+ env->v7m.other_sp = new_ss_msp;
+ } else {
+ env->regs[13] = new_ss_msp;
+ env->v7m.other_sp = new_ss_psp;
}
}
-static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
+void HELPER(v7m_bxns)(CPUARMState *env, uint32_t dest)
+{
+ /* Handle v7M BXNS:
+ * - if the return value is a magic value, do exception return (like BX)
+ * - otherwise bit 0 of the return value is the target security state
+ */
+ if (dest >= 0xff000000) {
+ /* This is an exception return magic value; put it where
+ * do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
+ * Note that if we ever add gen_ss_advance() singlestep support to
+ * M profile this should count as an "instruction execution complete"
+ * event (compare gen_bx_excret_final_code()).
+ */
+ env->regs[15] = dest & ~1;
+ env->thumb = dest & 1;
+ HELPER(exception_internal)(env, EXCP_EXCEPTION_EXIT);
+ /* notreached */
+ }
+
+ /* translate.c should have made BXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ switch_v7m_security_state(env, dest & 1);
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+}
+
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* Handle v7M BLXNS:
+ * - bit 0 of the destination address is the target security state
+ */
+
+ /* At this point regs[15] is the address just after the BLXNS */
+ uint32_t nextinst = env->regs[15] | 1;
+ uint32_t sp = env->regs[13] - 8;
+ uint32_t saved_psr;
+
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ if (dest & 1) {
+ /* target is Secure, so this is just a normal BLX,
+ * except that the low bit doesn't indicate Thumb/not.
+ */
+ env->regs[14] = nextinst;
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+ return;
+ }
+
+ /* Target is non-secure: first push a stack frame */
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
+ }
+
+ saved_psr = env->v7m.exception;
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
+ saved_psr |= XPSR_SFPA;
+ }
+
+ /* Note that these stores can throw exceptions on MPU faults */
+ cpu_stl_data(env, sp, nextinst);
+ cpu_stl_data(env, sp + 4, saved_psr);
+
+ env->regs[13] = sp;
+ env->regs[14] = 0xfeffffff;
+ if (arm_v7m_is_handler_mode(env)) {
+ /* Write a dummy value to IPSR, to avoid leaking the current secure
+ * exception number to non-secure code. This is guaranteed not
+ * to cause write_v7m_exception() to actually change stacks.
+ */
+ write_v7m_exception(env, 1);
+ }
+ switch_v7m_security_state(env, 0);
+ env->thumb = 1;
+ env->regs[15] = dest;
+}
+
+static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
+ bool spsel)
+{
+ /* Return a pointer to the location where we currently store the
+ * stack pointer for the requested security state and thread mode.
+ * This pointer will become invalid if the CPU state is updated
+ * such that the stack pointers are switched around (eg changing
+ * the SPSEL control bit).
+ * Compare the v8M ARM ARM pseudocode LookUpSP_with_security_mode().
+ * Unlike that pseudocode, we require the caller to pass us in the
+ * SPSEL control bit value; this is because we also use this
+ * function in handling of pushing of the callee-saves registers
+ * part of the v8M stack frame (pseudocode PushCalleeStack()),
+ * and in the tailchain codepath the SPSEL bit comes from the exception
+ * return magic LR value from the previous exception. The pseudocode
+ * opencodes the stack-selection in PushCalleeStack(), but we prefer
+ * to make this utility function generic enough to do the job.
+ */
+ bool want_psp = threadmode && spsel;
+
+ if (secure == env->v7m.secure) {
+ if (want_psp == v7m_using_psp(env)) {
+ return &env->regs[13];
+ } else {
+ return &env->v7m.other_sp;
+ }
+ } else {
+ if (want_psp) {
+ return &env->v7m.other_ss_psp;
+ } else {
+ return &env->v7m.other_ss_msp;
+ }
+ }
+}
+
+static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure)
{
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
MemTxResult result;
- hwaddr vec = env->v7m.vecbase + env->v7m.exception * 4;
+ hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4;
uint32_t addr;
addr = address_space_ldl(cs->as, vec,
* Since we don't model Lockup, we just report this guest error
* via cpu_abort().
*/
- cpu_abort(cs, "Failed to read from exception vector table "
- "entry %08x\n", (unsigned)vec);
+ cpu_abort(cs, "Failed to read from %s exception vector table "
+ "entry %08x\n", targets_secure ? "secure" : "nonsecure",
+ (unsigned)vec);
}
return addr;
}
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
+static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain)
+{
+ /* For v8M, push the callee-saves register part of the stack frame.
+ * Compare the v8M pseudocode PushCalleeStack().
+ * In the tailchaining case this may not be the current stack.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+
+ if (dotailchain) {
+ frame_sp_p = get_v7m_sp_ptr(env, true,
+ lr & R_V7M_EXCRET_MODE_MASK,
+ lr & R_V7M_EXCRET_SPSEL_MASK);
+ } else {
+ frame_sp_p = &env->regs[13];
+ }
+
+ frameptr = *frame_sp_p - 0x28;
+
+ stl_phys(cs->as, frameptr, 0xfefa125b);
+ stl_phys(cs->as, frameptr + 0x8, env->regs[4]);
+ stl_phys(cs->as, frameptr + 0xc, env->regs[5]);
+ stl_phys(cs->as, frameptr + 0x10, env->regs[6]);
+ stl_phys(cs->as, frameptr + 0x14, env->regs[7]);
+ stl_phys(cs->as, frameptr + 0x18, env->regs[8]);
+ stl_phys(cs->as, frameptr + 0x1c, env->regs[9]);
+ stl_phys(cs->as, frameptr + 0x20, env->regs[10]);
+ stl_phys(cs->as, frameptr + 0x24, env->regs[11]);
+
+ *frame_sp_p = frameptr;
+}
+
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
{
/* Do the "take the exception" parts of exception entry,
* but not the pushing of state to the stack. This is
*/
CPUARMState *env = &cpu->env;
uint32_t addr;
+ bool targets_secure;
- armv7m_nvic_acknowledge_irq(env->nvic);
- switch_v7m_sp(env, 0);
+ targets_secure = armv7m_nvic_acknowledge_irq(env->nvic);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (lr & R_V7M_EXCRET_S_MASK)) {
+ /* The background code (the owner of the registers in the
+ * exception frame) is Secure. This means it may either already
+ * have or now needs to push callee-saves registers.
+ */
+ if (targets_secure) {
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
+ /* We took an exception from Secure to NonSecure
+ * (which means the callee-saved registers got stacked)
+ * and are now tailchaining to a Secure exception.
+ * Clear DCRS so eventual return from this Secure
+ * exception unstacks the callee-saved registers.
+ */
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
+ }
+ } else {
+ /* We're going to a non-secure exception; push the
+ * callee-saves registers to the stack now, if they're
+ * not already saved.
+ */
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
+ !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
+ v7m_push_callee_stack(cpu, lr, dotailchain);
+ }
+ lr |= R_V7M_EXCRET_DCRS_MASK;
+ }
+ }
+
+ lr &= ~R_V7M_EXCRET_ES_MASK;
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ lr |= R_V7M_EXCRET_ES_MASK;
+ }
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
+
+ /* Clear registers if necessary to prevent non-secure exception
+ * code being able to see register values from secure code.
+ * Where register values become architecturally UNKNOWN we leave
+ * them with their previous values.
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ if (!targets_secure) {
+ /* Always clear the caller-saved registers (they have been
+ * pushed to the stack earlier in v7m_push_stack()).
+ * Clear callee-saved registers if the background code is
+ * Secure (in which case these regs were saved in
+ * v7m_push_callee_stack()).
+ */
+ int i;
+
+ for (i = 0; i < 13; i++) {
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
+ env->regs[i] = 0;
+ }
+ }
+ /* Clear EAPSR */
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
+ }
+ }
+ }
+
+ /* Switch to target security state -- must do this before writing SPSEL */
+ switch_v7m_security_state(env, targets_secure);
+ write_v7m_control_spsel(env, 0);
+ arm_clear_exclusive(env);
/* Clear IT bits */
env->condexec_bits = 0;
env->regs[14] = lr;
- addr = arm_v7m_load_vector(cpu);
+ addr = arm_v7m_load_vector(cpu, targets_secure);
env->regs[15] = addr & 0xfffffffe;
env->thumb = addr & 1;
}
uint32_t xpsr = xpsr_read(env);
/* Align stack pointer if the guest wants that */
- if ((env->regs[13] & 4) && (env->v7m.ccr & R_V7M_CCR_STKALIGN_MASK)) {
+ if ((env->regs[13] & 4) &&
+ (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_STKALIGN_MASK)) {
env->regs[13] -= 4;
- xpsr |= 0x200;
+ xpsr |= XPSR_SPREALIGN;
}
/* Switch to the handler mode. */
v7m_push(env, xpsr);
static void do_v7m_exception_exit(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
- uint32_t type;
+ CPUState *cs = CPU(cpu);
+ uint32_t excret;
uint32_t xpsr;
bool ufault = false;
- bool return_to_sp_process = false;
- bool return_to_handler = false;
+ bool sfault = false;
+ bool return_to_sp_process;
+ bool return_to_handler;
bool rettobase = false;
+ bool exc_secure = false;
+ bool return_to_secure;
/* We can only get here from an EXCP_EXCEPTION_EXIT, and
- * arm_v7m_do_unassigned_access() enforces the architectural rule
+ * gen_bx_excret() enforces the architectural rule
* that jumps to magic addresses don't have magic behaviour unless
* we're in Handler mode (compare pseudocode BXWritePC()).
*/
- assert(env->v7m.exception != 0);
+ assert(arm_v7m_is_handler_mode(env));
/* In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including
* the target value up between env->regs[15] and env->thumb in
* gen_bx(). Reconstitute it.
*/
- type = env->regs[15];
+ excret = env->regs[15];
if (env->thumb) {
- type |= 1;
+ excret |= 1;
}
qemu_log_mask(CPU_LOG_INT, "Exception return: magic PC %" PRIx32
" previous exception %d\n",
- type, env->v7m.exception);
+ excret, env->v7m.exception);
- if (extract32(type, 5, 23) != extract32(-1, 5, 23)) {
+ if ((excret & R_V7M_EXCRET_RES1_MASK) != R_V7M_EXCRET_RES1_MASK) {
qemu_log_mask(LOG_GUEST_ERROR, "M profile: zero high bits in exception "
- "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n", type);
+ "exit PC value 0x%" PRIx32 " are UNPREDICTABLE\n",
+ excret);
+ }
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
+ * we pick which FAULTMASK to clear.
+ */
+ if (!env->v7m.secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
+ sfault = 1;
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
+ excret &= ~R_V7M_EXCRET_ES_MASK;
+ }
}
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
- /* Auto-clear FAULTMASK on return from other than NMI */
- env->daif &= ~PSTATE_F;
+ /* Auto-clear FAULTMASK on return from other than NMI.
+ * If the security extension is implemented then this only
+ * happens if the raw execution priority is >= 0; the
+ * value of the ES bit in the exception return value indicates
+ * which security state's faultmask to clear. (v8M ARM ARM R_KBNF.)
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ exc_secure = excret & R_V7M_EXCRET_ES_MASK;
+ if (armv7m_nvic_raw_execution_priority(env->nvic) >= 0) {
+ env->v7m.faultmask[exc_secure] = 0;
+ }
+ } else {
+ env->v7m.faultmask[M_REG_NS] = 0;
+ }
}
- switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception)) {
+ switch (armv7m_nvic_complete_irq(env->nvic, env->v7m.exception,
+ exc_secure)) {
case -1:
/* attempt to exit an exception that isn't active */
ufault = true;
g_assert_not_reached();
}
- switch (type & 0xf) {
- case 1: /* Return to Handler */
- return_to_handler = true;
- break;
- case 13: /* Return to Thread using Process stack */
- return_to_sp_process = true;
- /* fall through */
- case 9: /* Return to Thread using Main stack */
- if (!rettobase &&
- !(env->v7m.ccr & R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
+ return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (excret & R_V7M_EXCRET_S_MASK);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
+ * we choose to take the UsageFault.
+ */
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
+ (excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
+ ufault = true;
+ }
+ }
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
+ ufault = true;
+ }
+ } else {
+ /* For v7M we only recognize certain combinations of the low bits */
+ switch (excret & 0xf) {
+ case 1: /* Return to Handler */
+ break;
+ case 13: /* Return to Thread using Process stack */
+ case 9: /* Return to Thread using Main stack */
+ /* We only need to check NONBASETHRDENA for v7M, because in
+ * v8M this bit does not exist (it is RES1).
+ */
+ if (!rettobase &&
+ !(env->v7m.ccr[env->v7m.secure] &
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ ufault = true;
+ }
+ break;
+ default:
ufault = true;
}
- break;
- default:
- ufault = true;
+ }
+
+ if (sfault) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed EXC_RETURN.ES validity check\n");
+ return;
}
if (ufault) {
/* Bad exception return: instead of popping the exception
* stack, directly take a usage fault on the current stack.
*/
- env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- v7m_exception_taken(cpu, type | 0xf0000000);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ v7m_exception_taken(cpu, excret, true);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity check\n");
return;
}
- /* Switch to the target stack. */
- switch_v7m_sp(env, return_to_sp_process);
- /* Pop registers. */
- env->regs[0] = v7m_pop(env);
- env->regs[1] = v7m_pop(env);
- env->regs[2] = v7m_pop(env);
- env->regs[3] = v7m_pop(env);
- env->regs[12] = v7m_pop(env);
- env->regs[14] = v7m_pop(env);
- env->regs[15] = v7m_pop(env);
- if (env->regs[15] & 1) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "M profile return from interrupt with misaligned "
- "PC is UNPREDICTABLE\n");
- /* Actual hardware seems to ignore the lsbit, and there are several
- * RTOSes out there which incorrectly assume the r15 in the stack
- * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
+ /* Set CONTROL.SPSEL from excret.SPSEL. Since we're still in
+ * Handler mode (and will be until we write the new XPSR.Interrupt
+ * field) this does not switch around the current stack pointer.
+ */
+ write_v7m_control_spsel_for_secstate(env, return_to_sp_process, exc_secure);
+
+ switch_v7m_security_state(env, return_to_secure);
+
+ {
+ /* The stack pointer we should be reading the exception frame from
+ * depends on bits in the magic exception return type value (and
+ * for v8M isn't necessarily the stack pointer we will eventually
+ * end up resuming execution with). Get a pointer to the location
+ * in the CPU state struct where the SP we need is currently being
+ * stored; we will use and modify it in place.
+ * We use this limited C variable scope so we don't accidentally
+ * use 'frame_sp_p' after we do something that makes it invalid.
+ */
+ uint32_t *frame_sp_p = get_v7m_sp_ptr(env,
+ return_to_secure,
+ !return_to_handler,
+ return_to_sp_process);
+ uint32_t frameptr = *frame_sp_p;
+
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile exception return with non-8-aligned SP "
+ "for destination state is UNPREDICTABLE\n");
+ }
+
+ /* Do we need to pop callee-saved registers? */
+ if (return_to_secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
+ uint32_t expected_sig = 0xfefa125b;
+ uint32_t actual_sig = ldl_phys(cs->as, frameptr);
+
+ if (expected_sig != actual_sig) {
+ /* Take a SecureFault on the current stack */
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed exception return integrity "
+ "signature check\n");
+ return;
+ }
+
+ env->regs[4] = ldl_phys(cs->as, frameptr + 0x8);
+ env->regs[5] = ldl_phys(cs->as, frameptr + 0xc);
+ env->regs[6] = ldl_phys(cs->as, frameptr + 0x10);
+ env->regs[7] = ldl_phys(cs->as, frameptr + 0x14);
+ env->regs[8] = ldl_phys(cs->as, frameptr + 0x18);
+ env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c);
+ env->regs[10] = ldl_phys(cs->as, frameptr + 0x20);
+ env->regs[11] = ldl_phys(cs->as, frameptr + 0x24);
+
+ frameptr += 0x28;
+ }
+
+ /* Pop registers. TODO: make these accesses use the correct
+ * attributes and address space (S/NS, priv/unpriv) and handle
+ * memory transaction failures.
*/
- env->regs[15] &= ~1U;
+ env->regs[0] = ldl_phys(cs->as, frameptr);
+ env->regs[1] = ldl_phys(cs->as, frameptr + 0x4);
+ env->regs[2] = ldl_phys(cs->as, frameptr + 0x8);
+ env->regs[3] = ldl_phys(cs->as, frameptr + 0xc);
+ env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
+ env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
+ env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
+
+ /* Returning from an exception with a PC with bit 0 set is defined
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
+ * the lsbit, and there are several RTOSes out there which incorrectly
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
+ * complain about the badly behaved guest.
+ */
+ if (env->regs[15] & 1) {
+ env->regs[15] &= ~1U;
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile return from interrupt with misaligned "
+ "PC is UNPREDICTABLE on v7M\n");
+ }
+ }
+
+ xpsr = ldl_phys(cs->as, frameptr + 0x1c);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* For v8M we have to check whether the xPSR exception field
+ * matches the EXCRET value for return to handler/thread
+ * before we commit to changing the SP and xPSR.
+ */
+ bool will_be_handler = (xpsr & XPSR_EXCP) != 0;
+ if (return_to_handler != will_be_handler) {
+ /* Take an INVPC UsageFault on the current stack.
+ * By this point we will have switched to the security state
+ * for the background state, so this UsageFault will target
+ * that state.
+ */
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
+ "stackframe: failed exception return integrity "
+ "check\n");
+ return;
+ }
+ }
+
+ /* Commit to consuming the stack frame */
+ frameptr += 0x20;
+ /* Undo stack alignment (the SPREALIGN bit indicates that the original
+ * pre-exception SP was not 8-aligned and we added a padding word to
+ * align it, so we undo this by ORing in the bit that increases it
+ * from the current 8-aligned value to the 8-unaligned value. (Adding 4
+ * would work too but a logical OR is how the pseudocode specifies it.)
+ */
+ if (xpsr & XPSR_SPREALIGN) {
+ frameptr |= 4;
+ }
+ *frame_sp_p = frameptr;
}
- xpsr = v7m_pop(env);
- xpsr_write(env, xpsr, 0xfffffdff);
- /* Undo stack alignment. */
- if (xpsr & 0x200)
- env->regs[13] |= 4;
+ /* This xpsr_write() will invalidate frame_sp_p as it may switch stack */
+ xpsr_write(env, xpsr, ~XPSR_SPREALIGN);
/* The restored xPSR exception field will be zero if we're
* resuming in Thread mode. If that doesn't match what the
- * exception return type specified then this is a UsageFault.
+ * exception return excret specified then this is a UsageFault.
+ * v7M requires we make this check here; v8M did it earlier.
*/
- if (return_to_handler == (env->v7m.exception == 0)) {
- /* Take an INVPC UsageFault by pushing the stack again. */
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_INVPC_MASK;
+ if (return_to_handler != arm_v7m_is_handler_mode(env)) {
+ /* Take an INVPC UsageFault by pushing the stack again;
+ * we know we're v7M so this is never a Secure UsageFault.
+ */
+ assert(!arm_feature(env, ARM_FEATURE_V8));
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
v7m_push_stack(cpu);
- v7m_exception_taken(cpu, type | 0xf0000000);
+ v7m_exception_taken(cpu, excret, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
"failed exception return integrity check\n");
return;
}
/* Otherwise, we have a successful exception exit. */
+ arm_clear_exclusive(env);
qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
}
}
}
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
+ uint32_t addr, uint16_t *insn)
+{
+ /* Load a 16-bit portion of a v7M instruction, returning true on success,
+ * or false on failure (in which case we will have pended the appropriate
+ * exception).
+ * We need to do the instruction fetch's MPU and SAU checks
+ * like this because there is no MMU index that would allow
+ * doing the load with a single function call. Instead we must
+ * first check that the security attributes permit the load
+ * and that they don't mismatch on the two halves of the instruction,
+ * and then we do the load as a secure load (ie using the security
+ * attributes of the address, not the CPU, as architecturally required).
+ */
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ V8M_SAttributes sattrs = {};
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+ uint32_t fsr;
+
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
+ if (!sattrs.nsc || sattrs.ns) {
+ /* This must be the second half of the insn, and it straddles a
+ * region boundary with the second half not being S&NSC.
+ */
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+ }
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
+ &physaddr, &attrs, &prot, &page_size, &fsr, &fi)) {
+ /* the MPU lookup failed */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
+ return false;
+ }
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
+ return false;
+ }
+ return true;
+}
+
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
+{
+ /* Check whether this attempt to execute code in a Secure & NS-Callable
+ * memory region is for an SG instruction; if so, then emulate the
+ * effect of the SG instruction and return true. Otherwise pend
+ * the correct kind of exception and return false.
+ */
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx;
+ uint16_t insn;
+
+ /* We should never get here unless get_phys_addr_pmsav8() caused
+ * an exception for NS executing in S&NSC memory.
+ */
+ assert(!env->v7m.secure);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
+ return false;
+ }
+
+ if (!env->thumb) {
+ goto gen_invep;
+ }
+
+ if (insn != 0xe97f) {
+ /* Not an SG instruction first half (we choose the IMPDEF
+ * early-SG-check option).
+ */
+ goto gen_invep;
+ }
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
+ return false;
+ }
+
+ if (insn != 0xe97f) {
+ /* Not an SG instruction second half (yes, both halves of the SG
+ * insn have the same hex value)
+ */
+ goto gen_invep;
+ }
+
+ /* OK, we have confirmed that we really have an SG instruction.
+ * We know we're NS in S memory so don't need to repeat those checks.
+ */
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
+ ", executing it\n", env->regs[15]);
+ env->regs[14] &= ~1;
+ switch_v7m_security_state(env, true);
+ xpsr_write(env, 0, XPSR_IT);
+ env->regs[15] += 4;
+ return true;
+
+gen_invep:
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+}
+
void arm_v7m_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
arm_log_exception(cs->exception_index);
- lr = 0xfffffff1;
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
- lr |= 4;
- }
- if (env->v7m.exception == 0)
- lr |= 8;
-
/* For exceptions we just mark as pending on the NVIC, and let that
handle it. */
switch (cs->exception_index) {
case EXCP_UDEF:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_UNDEFINSTR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_UNDEFINSTR_MASK;
break;
case EXCP_NOCP:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_NOCP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_NOCP_MASK;
break;
case EXCP_INVSTATE:
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
- env->v7m.cfsr |= R_V7M_CFSR_INVSTATE_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVSTATE_MASK;
break;
case EXCP_SWI:
/* The PC already points to the next instruction. */
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SVC, env->v7m.secure);
break;
case EXCP_PREFETCH_ABORT:
case EXCP_DATA_ABORT:
* raises the fault, in the A profile short-descriptor format.
*/
switch (env->exception.fsr & 0xf) {
+ case M_FAKE_FSR_NSC_EXEC:
+ /* Exception generated when we try to execute code at an address
+ * which is marked as Secure & Non-Secure Callable and the CPU
+ * is in the Non-Secure state. The only instruction which can
+ * be executed like this is SG (and that only if both halves of
+ * the SG instruction have the same security attributes.)
+ * Everything else must generate an INVEP SecureFault, so we
+ * emulate the SG instruction here.
+ */
+ if (v7m_handle_execute_nsc(cpu)) {
+ return;
+ }
+ break;
+ case M_FAKE_FSR_SFAULT:
+ /* Various flavours of SecureFault for attempts to execute or
+ * access data in the wrong security state.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ if (env->v7m.secure) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVTRAN\n");
+ } else {
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ }
+ break;
+ case EXCP_DATA_ABORT:
+ /* This must be an NS access to S memory */
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.AUVIOL\n");
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ break;
case 0x8: /* External Abort */
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
- env->v7m.cfsr |= R_V7M_CFSR_PRECISERR_MASK;
- qemu_log_mask(CPU_LOG_INT, "...with CFSR.PRECISERR\n");
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ qemu_log_mask(CPU_LOG_INT, "...with CFSR.IBUSERR\n");
break;
case EXCP_DATA_ABORT:
- env->v7m.cfsr |=
- (R_V7M_CFSR_IBUSERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
+ env->v7m.cfsr[M_REG_NS] |=
+ (R_V7M_CFSR_PRECISERR_MASK | R_V7M_CFSR_BFARVALID_MASK);
env->v7m.bfar = env->exception.vaddress;
qemu_log_mask(CPU_LOG_INT,
- "...with CFSR.IBUSERR and BFAR 0x%x\n",
+ "...with CFSR.PRECISERR and BFAR 0x%x\n",
env->v7m.bfar);
break;
}
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
break;
default:
/* All other FSR values are either MPU faults or "can't happen
*/
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
- env->v7m.cfsr |= R_V7M_CFSR_IACCVIOL_MASK;
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
qemu_log_mask(CPU_LOG_INT, "...with CFSR.IACCVIOL\n");
break;
case EXCP_DATA_ABORT:
- env->v7m.cfsr |=
+ env->v7m.cfsr[env->v7m.secure] |=
(R_V7M_CFSR_DACCVIOL_MASK | R_V7M_CFSR_MMARVALID_MASK);
- env->v7m.mmfar = env->exception.vaddress;
+ env->v7m.mmfar[env->v7m.secure] = env->exception.vaddress;
qemu_log_mask(CPU_LOG_INT,
"...with CFSR.DACCVIOL and MMFAR 0x%x\n",
- env->v7m.mmfar);
+ env->v7m.mmfar[env->v7m.secure]);
break;
}
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM,
+ env->v7m.secure);
break;
}
break;
return;
}
}
- armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG);
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_DEBUG, false);
break;
case EXCP_IRQ:
break;
return; /* Never happens. Keep compiler happy. */
}
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_FTYPE_MASK;
+ /* The S bit indicates whether we should return to Secure
+ * or NonSecure (ie our current state).
+ * The ES bit indicates whether we're taking this exception
+ * to Secure or NonSecure (ie our target state). We set it
+ * later, in v7m_exception_taken().
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
+ * This corresponds to the ARM ARM pseudocode for v8M setting
+ * some LR bits in PushStack() and some in ExceptionTaken();
+ * the distinction matters for the tailchain cases where we
+ * can take an exception without pushing the stack.
+ */
+ if (env->v7m.secure) {
+ lr |= R_V7M_EXCRET_S_MASK;
+ }
+ } else {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_S_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_FTYPE_MASK |
+ R_V7M_EXCRET_ES_MASK;
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
+ }
+ if (!arm_v7m_is_handler_mode(env)) {
+ lr |= R_V7M_EXCRET_MODE_MASK;
+ }
+
v7m_push_stack(cpu);
- v7m_exception_taken(cpu, lr);
+ v7m_exception_taken(cpu, lr, false);
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
case ARMMMUIdx_MPriv:
case ARMMMUIdx_MNegPri:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSPriv:
+ case ARMMMUIdx_MSNegPri:
+ case ARMMMUIdx_MSUser:
return 1;
default:
g_assert_not_reached();
}
}
-/* Return true if this address translation regime is secure */
-static inline bool regime_is_secure(CPUARMState *env, ARMMMUIdx mmu_idx)
-{
- switch (mmu_idx) {
- case ARMMMUIdx_S12NSE0:
- case ARMMMUIdx_S12NSE1:
- case ARMMMUIdx_S1NSE0:
- case ARMMMUIdx_S1NSE1:
- case ARMMMUIdx_S1E2:
- case ARMMMUIdx_S2NS:
- case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
- case ARMMMUIdx_MUser:
- return false;
- case ARMMMUIdx_S1E3:
- case ARMMMUIdx_S1SE0:
- case ARMMMUIdx_S1SE1:
- return true;
- default:
- g_assert_not_reached();
- }
-}
-
/* Return the SCTLR value which controls this address translation regime */
static inline uint32_t regime_sctlr(CPUARMState *env, ARMMMUIdx mmu_idx)
{
ARMMMUIdx mmu_idx)
{
if (arm_feature(env, ARM_FEATURE_M)) {
- switch (env->v7m.mpu_ctrl &
+ switch (env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)] &
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
case R_V7M_MPU_CTRL_ENABLE_MASK:
/* Enabled, but not for HardFault and NMI */
- return mmu_idx == ARMMMUIdx_MNegPri;
+ return mmu_idx == ARMMMUIdx_MNegPri ||
+ mmu_idx == ARMMMUIdx_MSNegPri;
case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
/* Enabled for all cases */
return false;
}
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size, uint32_t *fsr,
ARMMMUFaultInfo *fi)
}
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
ARMMMUFaultInfo *fi)
if (pxn && !regime_is_user(env, mmu_idx)) {
xn = 1;
}
- if (xn && access_type == 2)
+ if (xn && access_type == MMU_INST_FETCH)
goto do_fault;
if (arm_feature(env, ARM_FEATURE_V6K) &&
}
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
ARMMMUFaultInfo *fi)
}
if (arm_feature(env, ARM_FEATURE_M)) {
- return env->v7m.mpu_ctrl & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
+ return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
+ & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
} else {
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
}
}
+static inline bool m_is_ppb_region(CPUARMState *env, uint32_t address)
+{
+ /* True if address is in the M profile PPB region 0xe0000000 - 0xe00fffff */
+ return arm_feature(env, ARM_FEATURE_M) &&
+ extract32(address, 20, 12) == 0xe00;
+}
+
+static inline bool m_is_system_region(CPUARMState *env, uint32_t address)
+{
+ /* True if address is in the M profile system region
+ * 0xe0000000 - 0xffffffff
+ */
+ return arm_feature(env, ARM_FEATURE_M) && extract32(address, 29, 3) == 0x7;
+}
+
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
{
ARMCPU *cpu = arm_env_get_cpu(env);
*phys_ptr = address;
*prot = 0;
- if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
+ if (regime_translation_disabled(env, mmu_idx) ||
+ m_is_ppb_region(env, address)) {
+ /* MPU disabled or M profile PPB access: use default memory map.
+ * The other case which uses the default memory map in the
+ * v7M ARM ARM pseudocode is exception vector reads from the vector
+ * table. In QEMU those accesses are done in arm_v7m_load_vector(),
+ * which always does a direct read using address_space_ldl(), rather
+ * than going via this function, so we don't need to check that here.
+ */
get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
} else { /* MPU enabled */
for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
} else { /* a MPU hit! */
uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
+ uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
if (is_user) { /* User mode AP bit decoding */
switch (ap) {
}
/* execute never */
- if (env->pmsav7.dracr[n] & (1 << 12)) {
+ if (xn) {
*prot &= ~PAGE_EXEC;
}
}
return !(*prot & (1 << access_type));
}
+static bool v8m_is_sau_exempt(CPUARMState *env,
+ uint32_t address, MMUAccessType access_type)
+{
+ /* The architecture specifies that certain address ranges are
+ * exempt from v8M SAU/IDAU checks.
+ */
+ return
+ (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
+ (address >= 0xe0000000 && address <= 0xe0002fff) ||
+ (address >= 0xe000e000 && address <= 0xe000efff) ||
+ (address >= 0xe002e000 && address <= 0xe002efff) ||
+ (address >= 0xe0040000 && address <= 0xe0041fff) ||
+ (address >= 0xe00ff000 && address <= 0xe00fffff);
+}
+
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs)
+{
+ /* Look up the security attributes for this address. Compare the
+ * pseudocode SecurityCheck() function.
+ * We assume the caller has zero-initialized *sattrs.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int r;
+
+ /* TODO: implement IDAU */
+
+ if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
+ /* 0xf0000000..0xffffffff is always S for insn fetches */
+ return;
+ }
+
+ if (v8m_is_sau_exempt(env, address, access_type)) {
+ sattrs->ns = !regime_is_secure(env, mmu_idx);
+ return;
+ }
+
+ switch (env->sau.ctrl & 3) {
+ case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
+ break;
+ case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
+ sattrs->ns = true;
+ break;
+ default: /* SAU.ENABLE == 1 */
+ for (r = 0; r < cpu->sau_sregion; r++) {
+ if (env->sau.rlar[r] & 1) {
+ uint32_t base = env->sau.rbar[r] & ~0x1f;
+ uint32_t limit = env->sau.rlar[r] | 0x1f;
+
+ if (base <= address && limit >= address) {
+ if (sattrs->srvalid) {
+ /* If we hit in more than one region then we must report
+ * as Secure, not NS-Callable, with no valid region
+ * number info.
+ */
+ sattrs->ns = false;
+ sattrs->nsc = false;
+ sattrs->sregion = 0;
+ sattrs->srvalid = false;
+ break;
+ } else {
+ if (env->sau.rlar[r] & 2) {
+ sattrs->nsc = true;
+ } else {
+ sattrs->ns = true;
+ }
+ sattrs->srvalid = true;
+ sattrs->sregion = r;
+ }
+ }
+ }
+ }
+
+ /* TODO when we support the IDAU then it may override the result here */
+ break;
+ }
+}
+
+static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, uint32_t *fsr)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ bool is_user = regime_is_user(env, mmu_idx);
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ int n;
+ int matchregion = -1;
+ bool hit = false;
+ V8M_SAttributes sattrs = {};
+
+ *phys_ptr = address;
+ *prot = 0;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
+ if (access_type == MMU_INST_FETCH) {
+ /* Instruction fetches always use the MMU bank and the
+ * transaction attribute determined by the fetch address,
+ * regardless of CPU state. This is painful for QEMU
+ * to handle, because it would mean we need to encode
+ * into the mmu_idx not just the (user, negpri) information
+ * for the current security state but also that for the
+ * other security state, which would balloon the number
+ * of mmu_idx values needed alarmingly.
+ * Fortunately we can avoid this because it's not actually
+ * possible to arbitrarily execute code from memory with
+ * the wrong security attribute: it will always generate
+ * an exception of some kind or another, apart from the
+ * special case of an NS CPU executing an SG instruction
+ * in S&NSC memory. So we always just fail the translation
+ * here and sort things out in the exception handler
+ * (including possibly emulating an SG instruction).
+ */
+ if (sattrs.ns != !secure) {
+ *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT;
+ return true;
+ }
+ } else {
+ /* For data accesses we always use the MMU bank indicated
+ * by the current CPU state, but the security attributes
+ * might downgrade a secure access to nonsecure.
+ */
+ if (sattrs.ns) {
+ txattrs->secure = false;
+ } else if (!secure) {
+ /* NS access to S memory must fault.
+ * Architecturally we should first check whether the
+ * MPU information for this address indicates that we
+ * are doing an unaligned access to Device memory, which
+ * should generate a UsageFault instead. QEMU does not
+ * currently check for that kind of unaligned access though.
+ * If we added it we would need to do so as a special case
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
+ */
+ *fsr = M_FAKE_FSR_SFAULT;
+ return true;
+ }
+ }
+ }
+
+ /* Unlike the ARM ARM pseudocode, we don't need to check whether this
+ * was an exception vector read from the vector table (which is always
+ * done using the default system address map), because those accesses
+ * are done in arm_v7m_load_vector(), which always does a direct
+ * read using address_space_ldl(), rather than going via this function.
+ */
+ if (regime_translation_disabled(env, mmu_idx)) { /* MPU disabled */
+ hit = true;
+ } else if (m_is_ppb_region(env, address)) {
+ hit = true;
+ } else if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
+ hit = true;
+ } else {
+ for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
+ /* region search */
+ /* Note that the base address is bits [31:5] from the register
+ * with bits [4:0] all zeroes, but the limit address is bits
+ * [31:5] from the register with bits [4:0] all ones.
+ */
+ uint32_t base = env->pmsav8.rbar[secure][n] & ~0x1f;
+ uint32_t limit = env->pmsav8.rlar[secure][n] | 0x1f;
+
+ if (!(env->pmsav8.rlar[secure][n] & 0x1)) {
+ /* Region disabled */
+ continue;
+ }
+
+ if (address < base || address > limit) {
+ continue;
+ }
+
+ if (hit) {
+ /* Multiple regions match -- always a failure (unlike
+ * PMSAv7 where highest-numbered-region wins)
+ */
+ *fsr = 0x00d; /* permission fault */
+ return true;
+ }
+
+ matchregion = n;
+ hit = true;
+
+ if (base & ~TARGET_PAGE_MASK) {
+ qemu_log_mask(LOG_UNIMP,
+ "MPU_RBAR[%d]: No support for MPU region base"
+ "address of 0x%" PRIx32 ". Minimum alignment is "
+ "%d\n",
+ n, base, TARGET_PAGE_BITS);
+ continue;
+ }
+ if ((limit + 1) & ~TARGET_PAGE_MASK) {
+ qemu_log_mask(LOG_UNIMP,
+ "MPU_RBAR[%d]: No support for MPU region limit"
+ "address of 0x%" PRIx32 ". Minimum alignment is "
+ "%d\n",
+ n, limit, TARGET_PAGE_BITS);
+ continue;
+ }
+ }
+ }
+
+ if (!hit) {
+ /* background fault */
+ *fsr = 0;
+ return true;
+ }
+
+ if (matchregion == -1) {
+ /* hit using the background region */
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
+ } else {
+ uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
+ uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
+
+ if (m_is_system_region(env, address)) {
+ /* System space is always execute never */
+ xn = 1;
+ }
+
+ *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
+ if (*prot && !xn) {
+ *prot |= PAGE_EXEC;
+ }
+ /* We don't need to look the attribute up in the MAIR0/MAIR1
+ * registers because that only tells us about cacheability.
+ */
+ }
+
+ *fsr = 0x00d; /* Permission fault */
+ return !(*prot & (1 << access_type));
+}
+
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
{
int n;
uint32_t base;
bool is_user = regime_is_user(env, mmu_idx);
+ if (regime_translation_disabled(env, mmu_idx)) {
+ /* MPU disabled. */
+ *phys_ptr = address;
+ *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
+ return false;
+ }
+
*phys_ptr = address;
for (n = 7; n >= 0; n--) {
base = env->cp15.c6_region[n];
return true;
}
- if (access_type == 2) {
+ if (access_type == MMU_INST_FETCH) {
mask = env->cp15.pmsav5_insn_ap;
} else {
mask = env->cp15.pmsav5_data_ap;
* @fsr: set to the DFSR/IFSR value on failure
*/
static bool get_phys_addr(CPUARMState *env, target_ulong address,
- int access_type, ARMMMUIdx mmu_idx,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
ARMMMUFaultInfo *fi)
}
}
- /* pmsav7 has special handling for when MPU is disabled so call it before
- * the common MMU/MPU disabled check below.
- */
- if (arm_feature(env, ARM_FEATURE_PMSA) &&
- arm_feature(env, ARM_FEATURE_V7)) {
+ if (arm_feature(env, ARM_FEATURE_PMSA)) {
bool ret;
*page_size = TARGET_PAGE_SIZE;
- ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
- qemu_log_mask(CPU_LOG_MMU, "PMSAv7 MPU lookup for %s at 0x%08" PRIx32
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* PMSAv8 */
+ ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
+ phys_ptr, attrs, prot, fsr);
+ } else if (arm_feature(env, ARM_FEATURE_V7)) {
+ /* PMSAv7 */
+ ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fsr);
+ } else {
+ /* Pre-v7 MPU */
+ ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
+ phys_ptr, prot, fsr);
+ }
+ qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
" mmu_idx %u -> %s (prot %c%c%c)\n",
access_type == MMU_DATA_LOAD ? "reading" :
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
return ret;
}
+ /* Definitely a real MMU, not an MPU */
+
if (regime_translation_disabled(env, mmu_idx)) {
- /* MMU/MPU disabled. */
+ /* MMU disabled. */
*phys_ptr = address;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
*page_size = TARGET_PAGE_SIZE;
return 0;
}
- if (arm_feature(env, ARM_FEATURE_PMSA)) {
- /* Pre-v7 MPU */
- *page_size = TARGET_PAGE_SIZE;
- return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
- }
-
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
attrs, prot, page_size, fsr, fi);
* fsr with ARM DFSR/IFSR fault register format value on failure.
*/
bool arm_tlb_fill(CPUState *cs, vaddr address,
- int access_type, int mmu_idx, uint32_t *fsr,
+ MMUAccessType access_type, int mmu_idx, uint32_t *fsr,
ARMMMUFaultInfo *fi)
{
ARMCPU *cpu = ARM_CPU(cs);
case 0 ... 7: /* xPSR sub-fields */
mask = 0;
if ((reg & 1) && el) {
- mask |= 0x000001ff; /* IPSR (unpriv. reads as zero) */
+ mask |= XPSR_EXCP; /* IPSR (unpriv. reads as zero) */
}
if (!(reg & 4)) {
- mask |= 0xf8000000; /* APSR */
+ mask |= XPSR_NZCV | XPSR_Q; /* APSR */
}
/* EPSR reads as zero */
return xpsr_read(env) & mask;
break;
case 20: /* CONTROL */
- return env->v7m.control;
+ return env->v7m.control[env->v7m.secure];
+ case 0x94: /* CONTROL_NS */
+ /* We have to handle this here because unprivileged Secure code
+ * can read the NS CONTROL register.
+ */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.control[M_REG_NS];
}
if (el == 0) {
return 0; /* unprivileged reads others as zero */
}
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ switch (reg) {
+ case 0x88: /* MSP_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.other_ss_msp;
+ case 0x89: /* PSP_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.other_ss_psp;
+ case 0x90: /* PRIMASK_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.primask[M_REG_NS];
+ case 0x91: /* BASEPRI_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.basepri[M_REG_NS];
+ case 0x93: /* FAULTMASK_NS */
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ return env->v7m.faultmask[M_REG_NS];
+ case 0x98: /* SP_NS */
+ {
+ /* This gives the non-secure SP selected based on whether we're
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
+ */
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
+
+ if (!env->v7m.secure) {
+ return 0;
+ }
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
+ return env->v7m.other_ss_psp;
+ } else {
+ return env->v7m.other_ss_msp;
+ }
+ }
+ default:
+ break;
+ }
+ }
+
switch (reg) {
case 8: /* MSP */
- return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
+ return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
env->v7m.other_sp : env->regs[13];
case 9: /* PSP */
- return (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) ?
+ return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
env->regs[13] : env->v7m.other_sp;
case 16: /* PRIMASK */
- return (env->daif & PSTATE_I) != 0;
+ return env->v7m.primask[env->v7m.secure];
case 17: /* BASEPRI */
case 18: /* BASEPRI_MAX */
- return env->v7m.basepri;
+ return env->v7m.basepri[env->v7m.secure];
case 19: /* FAULTMASK */
- return (env->daif & PSTATE_F) != 0;
+ return env->v7m.faultmask[env->v7m.secure];
default:
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to read unknown special"
" register %d\n", reg);
return;
}
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ switch (reg) {
+ case 0x88: /* MSP_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.other_ss_msp = val;
+ return;
+ case 0x89: /* PSP_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.other_ss_psp = val;
+ return;
+ case 0x90: /* PRIMASK_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.primask[M_REG_NS] = val & 1;
+ return;
+ case 0x91: /* BASEPRI_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.basepri[M_REG_NS] = val & 0xff;
+ return;
+ case 0x93: /* FAULTMASK_NS */
+ if (!env->v7m.secure) {
+ return;
+ }
+ env->v7m.faultmask[M_REG_NS] = val & 1;
+ return;
+ case 0x98: /* SP_NS */
+ {
+ /* This gives the non-secure SP selected based on whether we're
+ * currently in handler mode or not, using the NS CONTROL.SPSEL.
+ */
+ bool spsel = env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK;
+
+ if (!env->v7m.secure) {
+ return;
+ }
+ if (!arm_v7m_is_handler_mode(env) && spsel) {
+ env->v7m.other_ss_psp = val;
+ } else {
+ env->v7m.other_ss_msp = val;
+ }
+ return;
+ }
+ default:
+ break;
+ }
+ }
+
switch (reg) {
case 0 ... 7: /* xPSR sub-fields */
/* only APSR is actually writable */
uint32_t apsrmask = 0;
if (mask & 8) {
- apsrmask |= 0xf8000000; /* APSR NZCVQ */
+ apsrmask |= XPSR_NZCV | XPSR_Q;
}
if ((mask & 4) && arm_feature(env, ARM_FEATURE_THUMB_DSP)) {
- apsrmask |= 0x000f0000; /* APSR GE[3:0] */
+ apsrmask |= XPSR_GE;
}
xpsr_write(env, val, apsrmask);
}
break;
case 8: /* MSP */
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
+ if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
env->v7m.other_sp = val;
} else {
env->regs[13] = val;
}
break;
case 9: /* PSP */
- if (env->v7m.control & R_V7M_CONTROL_SPSEL_MASK) {
+ if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
env->regs[13] = val;
} else {
env->v7m.other_sp = val;
}
break;
case 16: /* PRIMASK */
- if (val & 1) {
- env->daif |= PSTATE_I;
- } else {
- env->daif &= ~PSTATE_I;
- }
+ env->v7m.primask[env->v7m.secure] = val & 1;
break;
case 17: /* BASEPRI */
- env->v7m.basepri = val & 0xff;
+ env->v7m.basepri[env->v7m.secure] = val & 0xff;
break;
case 18: /* BASEPRI_MAX */
val &= 0xff;
- if (val != 0 && (val < env->v7m.basepri || env->v7m.basepri == 0))
- env->v7m.basepri = val;
+ if (val != 0 && (val < env->v7m.basepri[env->v7m.secure]
+ || env->v7m.basepri[env->v7m.secure] == 0)) {
+ env->v7m.basepri[env->v7m.secure] = val;
+ }
break;
case 19: /* FAULTMASK */
- if (val & 1) {
- env->daif |= PSTATE_F;
- } else {
- env->daif &= ~PSTATE_F;
- }
+ env->v7m.faultmask[env->v7m.secure] = val & 1;
break;
case 20: /* CONTROL */
/* Writing to the SPSEL bit only has an effect if we are in
* thread mode; other bits can be updated by any privileged code.
- * switch_v7m_sp() deals with updating the SPSEL bit in
+ * write_v7m_control_spsel() deals with updating the SPSEL bit in
* env->v7m.control, so we only need update the others.
*/
- if (env->v7m.exception == 0) {
- switch_v7m_sp(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
+ if (!arm_v7m_is_handler_mode(env)) {
+ write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
- env->v7m.control &= ~R_V7M_CONTROL_NPRIV_MASK;
- env->v7m.control |= val & R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
+ env->v7m.control[env->v7m.secure] |= val & R_V7M_CONTROL_NPRIV_MASK;
break;
default:
qemu_log_mask(LOG_GUEST_ERROR, "Attempt to write unknown special"