#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
#ifndef CONFIG_USER_ONLY
+/* Cacheability and shareability attributes for a memory access */
+typedef struct ARMCacheAttrs {
+ unsigned int attrs:8; /* as in the MAIR register encoding */
+ unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
+} ARMCacheAttrs;
+
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
- ARMMMUFaultInfo *fi);
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
+
+/* Security attributes for an address, as returned by v8m_security_lookup. */
+typedef struct V8M_SAttributes {
+ bool ns;
+ bool nsc;
+ uint8_t sregion;
+ bool srvalid;
+ uint8_t iregion;
+ bool irvalid;
+} V8M_SAttributes;
+
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs);
/* Definitions for the PMCCNTR and PMCR registers */
#define PMCRD 0x8
uint64_t par64;
MemTxAttrs attrs = {};
ARMMMUFaultInfo fi = {};
+ ARMCacheAttrs cacheattrs = {};
- ret = get_phys_addr(env, value, access_type, mmu_idx,
- &phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
- if (extended_addresses_enabled(env)) {
+ ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
+ &prot, &page_size, &fsr, &fi, &cacheattrs);
+ if (arm_s1_regime_using_lpae_format(env, mmu_idx)) {
/* fsr is a DFSR/IFSR value for the long descriptor
* translation table format, but with WnR always clear.
* Convert it to a 64-bit PAR.
if (!attrs.secure) {
par64 |= (1 << 9); /* NS */
}
- /* We don't set the ATTR or SH fields in the PAR. */
+ par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
+ par64 |= cacheattrs.shareability << 7; /* SH */
} else {
par64 |= 1; /* F */
par64 |= (fsr & 0x3f) << 1; /* FS */
}
}
+/* We don't know until after realize whether there's a GICv3
+ * attached, and that is what registers the gicv3 sysregs.
+ * So we have to fill in the GIC fields in ID_PFR/ID_PFR1_EL1/ID_AA64PFR0_EL1
+ * at runtime.
+ */
+static uint64_t id_pfr1_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t pfr1 = cpu->id_pfr1;
+
+ if (env->gicv3state) {
+ pfr1 |= 1 << 28;
+ }
+ return pfr1;
+}
+
+static uint64_t id_aa64pfr0_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ uint64_t pfr0 = cpu->id_aa64pfr0;
+
+ if (env->gicv3state) {
+ pfr0 |= 1 << 24;
+ }
+ return pfr0;
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 0,
.access = PL1_R, .type = ARM_CP_CONST,
.resetvalue = cpu->id_pfr0 },
+ /* ID_PFR1 is not a plain ARM_CP_CONST because we don't know
+ * the value of the GIC field until after we define these regs.
+ */
{ .name = "ID_PFR1", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 1,
- .access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_pfr1 },
+ .access = PL1_R, .type = ARM_CP_NO_RAW,
+ .readfn = id_pfr1_read,
+ .writefn = arm_cp_write_ignore },
{ .name = "ID_DFR0", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 1, .opc2 = 2,
.access = PL1_R, .type = ARM_CP_CONST,
* define new registers here.
*/
ARMCPRegInfo v8_idregs[] = {
+ /* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST because we don't
+ * know the right value for the GIC field until after we
+ * define these regs.
+ */
{ .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
- .access = PL1_R, .type = ARM_CP_CONST,
- .resetvalue = cpu->id_aa64pfr0 },
+ .access = PL1_R, .type = ARM_CP_NO_RAW,
+ .readfn = id_aa64pfr0_read,
+ .writefn = arm_cp_write_ignore },
{ .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST,
g_assert_not_reached();
}
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* translate.c should never generate calls here in user-only mode */
+ g_assert_not_reached();
+}
+
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* The TT instructions can be used by unprivileged code, but in
+ * user-only emulation we don't have the MPU.
+ * Luckily since we know we are NonSecure unprivileged (and that in
+ * turn means that the A flag wasn't specified), all the bits in the
+ * register must be zero:
+ * IREGION: 0 because IRVALID is 0
+ * IRVALID: 0 because NS
+ * S: 0 because NS
+ * NSRW: 0 because NS
+ * NSR: 0 because NS
+ * RW: 0 because unpriv and A flag not set
+ * R: 0 because unpriv and A flag not set
+ * SRVALID: 0 because NS
+ * MRVALID: 0 because unpriv and A flag not set
+ * SREGION: 0 becaus SRVALID is 0
+ * MREGION: 0 because MRVALID is 0
+ */
+ return 0;
+}
+
void switch_mode(CPUARMState *env, int mode)
{
ARMCPU *cpu = arm_env_get_cpu(env);
* - if the return value is a magic value, do exception return (like BX)
* - otherwise bit 0 of the return value is the target security state
*/
- if (dest >= 0xff000000) {
+ uint32_t min_magic;
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* Covers FNC_RETURN and EXC_RETURN magic */
+ min_magic = FNC_RETURN_MIN_MAGIC;
+ } else {
+ /* EXC_RETURN magic only */
+ min_magic = EXC_RETURN_MIN_MAGIC;
+ }
+
+ if (dest >= min_magic) {
/* This is an exception return magic value; put it where
* do_v7m_exception_exit() expects and raise EXCEPTION_EXIT.
* Note that if we ever add gen_ss_advance() singlestep support to
env->regs[15] = dest & ~1;
}
+void HELPER(v7m_blxns)(CPUARMState *env, uint32_t dest)
+{
+ /* Handle v7M BLXNS:
+ * - bit 0 of the destination address is the target security state
+ */
+
+ /* At this point regs[15] is the address just after the BLXNS */
+ uint32_t nextinst = env->regs[15] | 1;
+ uint32_t sp = env->regs[13] - 8;
+ uint32_t saved_psr;
+
+ /* translate.c will have made BLXNS UNDEF unless we're secure */
+ assert(env->v7m.secure);
+
+ if (dest & 1) {
+ /* target is Secure, so this is just a normal BLX,
+ * except that the low bit doesn't indicate Thumb/not.
+ */
+ env->regs[14] = nextinst;
+ env->thumb = 1;
+ env->regs[15] = dest & ~1;
+ return;
+ }
+
+ /* Target is non-secure: first push a stack frame */
+ if (!QEMU_IS_ALIGNED(sp, 8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "BLXNS with misaligned SP is UNPREDICTABLE\n");
+ }
+
+ saved_psr = env->v7m.exception;
+ if (env->v7m.control[M_REG_S] & R_V7M_CONTROL_SFPA_MASK) {
+ saved_psr |= XPSR_SFPA;
+ }
+
+ /* Note that these stores can throw exceptions on MPU faults */
+ cpu_stl_data(env, sp, nextinst);
+ cpu_stl_data(env, sp + 4, saved_psr);
+
+ env->regs[13] = sp;
+ env->regs[14] = 0xfeffffff;
+ if (arm_v7m_is_handler_mode(env)) {
+ /* Write a dummy value to IPSR, to avoid leaking the current secure
+ * exception number to non-secure code. This is guaranteed not
+ * to cause write_v7m_exception() to actually change stacks.
+ */
+ write_v7m_exception(env, 1);
+ }
+ switch_v7m_security_state(env, 0);
+ env->thumb = 1;
+ env->regs[15] = dest;
+}
+
static uint32_t *get_v7m_sp_ptr(CPUARMState *env, bool secure, bool threadmode,
bool spsel)
{
}
}
-static uint32_t arm_v7m_load_vector(ARMCPU *cpu)
+static uint32_t arm_v7m_load_vector(ARMCPU *cpu, bool targets_secure)
{
CPUState *cs = CPU(cpu);
CPUARMState *env = &cpu->env;
MemTxResult result;
- hwaddr vec = env->v7m.vecbase[env->v7m.secure] + env->v7m.exception * 4;
+ hwaddr vec = env->v7m.vecbase[targets_secure] + env->v7m.exception * 4;
uint32_t addr;
addr = address_space_ldl(cs->as, vec,
* Since we don't model Lockup, we just report this guest error
* via cpu_abort().
*/
- cpu_abort(cs, "Failed to read from exception vector table "
- "entry %08x\n", (unsigned)vec);
+ cpu_abort(cs, "Failed to read from %s exception vector table "
+ "entry %08x\n", targets_secure ? "secure" : "nonsecure",
+ (unsigned)vec);
}
return addr;
}
-static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr)
+static void v7m_push_callee_stack(ARMCPU *cpu, uint32_t lr, bool dotailchain)
+{
+ /* For v8M, push the callee-saves register part of the stack frame.
+ * Compare the v8M pseudocode PushCalleeStack().
+ * In the tailchaining case this may not be the current stack.
+ */
+ CPUARMState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+
+ if (dotailchain) {
+ frame_sp_p = get_v7m_sp_ptr(env, true,
+ lr & R_V7M_EXCRET_MODE_MASK,
+ lr & R_V7M_EXCRET_SPSEL_MASK);
+ } else {
+ frame_sp_p = &env->regs[13];
+ }
+
+ frameptr = *frame_sp_p - 0x28;
+
+ stl_phys(cs->as, frameptr, 0xfefa125b);
+ stl_phys(cs->as, frameptr + 0x8, env->regs[4]);
+ stl_phys(cs->as, frameptr + 0xc, env->regs[5]);
+ stl_phys(cs->as, frameptr + 0x10, env->regs[6]);
+ stl_phys(cs->as, frameptr + 0x14, env->regs[7]);
+ stl_phys(cs->as, frameptr + 0x18, env->regs[8]);
+ stl_phys(cs->as, frameptr + 0x1c, env->regs[9]);
+ stl_phys(cs->as, frameptr + 0x20, env->regs[10]);
+ stl_phys(cs->as, frameptr + 0x24, env->regs[11]);
+
+ *frame_sp_p = frameptr;
+}
+
+static void v7m_exception_taken(ARMCPU *cpu, uint32_t lr, bool dotailchain)
{
/* Do the "take the exception" parts of exception entry,
* but not the pushing of state to the stack. This is
*/
CPUARMState *env = &cpu->env;
uint32_t addr;
+ bool targets_secure;
- armv7m_nvic_acknowledge_irq(env->nvic);
+ targets_secure = armv7m_nvic_acknowledge_irq(env->nvic);
+
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY) &&
+ (lr & R_V7M_EXCRET_S_MASK)) {
+ /* The background code (the owner of the registers in the
+ * exception frame) is Secure. This means it may either already
+ * have or now needs to push callee-saves registers.
+ */
+ if (targets_secure) {
+ if (dotailchain && !(lr & R_V7M_EXCRET_ES_MASK)) {
+ /* We took an exception from Secure to NonSecure
+ * (which means the callee-saved registers got stacked)
+ * and are now tailchaining to a Secure exception.
+ * Clear DCRS so eventual return from this Secure
+ * exception unstacks the callee-saved registers.
+ */
+ lr &= ~R_V7M_EXCRET_DCRS_MASK;
+ }
+ } else {
+ /* We're going to a non-secure exception; push the
+ * callee-saves registers to the stack now, if they're
+ * not already saved.
+ */
+ if (lr & R_V7M_EXCRET_DCRS_MASK &&
+ !(dotailchain && (lr & R_V7M_EXCRET_ES_MASK))) {
+ v7m_push_callee_stack(cpu, lr, dotailchain);
+ }
+ lr |= R_V7M_EXCRET_DCRS_MASK;
+ }
+ }
+
+ lr &= ~R_V7M_EXCRET_ES_MASK;
+ if (targets_secure || !arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ lr |= R_V7M_EXCRET_ES_MASK;
+ }
+ lr &= ~R_V7M_EXCRET_SPSEL_MASK;
+ if (env->v7m.control[targets_secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
+
+ /* Clear registers if necessary to prevent non-secure exception
+ * code being able to see register values from secure code.
+ * Where register values become architecturally UNKNOWN we leave
+ * them with their previous values.
+ */
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ if (!targets_secure) {
+ /* Always clear the caller-saved registers (they have been
+ * pushed to the stack earlier in v7m_push_stack()).
+ * Clear callee-saved registers if the background code is
+ * Secure (in which case these regs were saved in
+ * v7m_push_callee_stack()).
+ */
+ int i;
+
+ for (i = 0; i < 13; i++) {
+ /* r4..r11 are callee-saves, zero only if EXCRET.S == 1 */
+ if (i < 4 || i > 11 || (lr & R_V7M_EXCRET_S_MASK)) {
+ env->regs[i] = 0;
+ }
+ }
+ /* Clear EAPSR */
+ xpsr_write(env, 0, XPSR_NZCV | XPSR_Q | XPSR_GE | XPSR_IT);
+ }
+ }
+ }
+
+ /* Switch to target security state -- must do this before writing SPSEL */
+ switch_v7m_security_state(env, targets_secure);
write_v7m_control_spsel(env, 0);
arm_clear_exclusive(env);
/* Clear IT bits */
env->condexec_bits = 0;
env->regs[14] = lr;
- addr = arm_v7m_load_vector(cpu);
+ addr = arm_v7m_load_vector(cpu, targets_secure);
env->regs[15] = addr & 0xfffffffe;
env->thumb = addr & 1;
}
uint32_t excret;
uint32_t xpsr;
bool ufault = false;
- bool return_to_sp_process = false;
- bool return_to_handler = false;
+ bool sfault = false;
+ bool return_to_sp_process;
+ bool return_to_handler;
bool rettobase = false;
bool exc_secure = false;
bool return_to_secure;
- /* We can only get here from an EXCP_EXCEPTION_EXIT, and
- * gen_bx_excret() enforces the architectural rule
- * that jumps to magic addresses don't have magic behaviour unless
- * we're in Handler mode (compare pseudocode BXWritePC()).
+ /* If we're not in Handler mode then jumps to magic exception-exit
+ * addresses don't have magic behaviour. However for the v8M
+ * security extensions the magic secure-function-return has to
+ * work in thread mode too, so to avoid doing an extra check in
+ * the generated code we allow exception-exit magic to also cause the
+ * internal exception and bring us here in thread mode. Correct code
+ * will never try to do this (the following insn fetch will always
+ * fault) so we the overhead of having taken an unnecessary exception
+ * doesn't matter.
*/
- assert(arm_v7m_is_handler_mode(env));
+ if (!arm_v7m_is_handler_mode(env)) {
+ return;
+ }
/* In the spec pseudocode ExceptionReturn() is called directly
* from BXWritePC() and gets the full target PC value including
excret);
}
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* EXC_RETURN.ES validation check (R_SMFL). We must do this before
+ * we pick which FAULTMASK to clear.
+ */
+ if (!env->v7m.secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK))) {
+ sfault = 1;
+ /* For all other purposes, treat ES as 0 (R_HXSR) */
+ excret &= ~R_V7M_EXCRET_ES_MASK;
+ }
+ }
+
if (env->v7m.exception != ARMV7M_EXCP_NMI) {
/* Auto-clear FAULTMASK on return from other than NMI.
* If the security extension is implemented then this only
g_assert_not_reached();
}
+ return_to_handler = !(excret & R_V7M_EXCRET_MODE_MASK);
+ return_to_sp_process = excret & R_V7M_EXCRET_SPSEL_MASK;
return_to_secure = arm_feature(env, ARM_FEATURE_M_SECURITY) &&
(excret & R_V7M_EXCRET_S_MASK);
- switch (excret & 0xf) {
- case 1: /* Return to Handler */
- return_to_handler = true;
- break;
- case 13: /* Return to Thread using Process stack */
- return_to_sp_process = true;
- /* fall through */
- case 9: /* Return to Thread using Main stack */
- if (!rettobase &&
- !(env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ if (!arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ /* UNPREDICTABLE if S == 1 or DCRS == 0 or ES == 1 (R_XLCP);
+ * we choose to take the UsageFault.
+ */
+ if ((excret & R_V7M_EXCRET_S_MASK) ||
+ (excret & R_V7M_EXCRET_ES_MASK) ||
+ !(excret & R_V7M_EXCRET_DCRS_MASK)) {
+ ufault = true;
+ }
+ }
+ if (excret & R_V7M_EXCRET_RES0_MASK) {
+ ufault = true;
+ }
+ } else {
+ /* For v7M we only recognize certain combinations of the low bits */
+ switch (excret & 0xf) {
+ case 1: /* Return to Handler */
+ break;
+ case 13: /* Return to Thread using Process stack */
+ case 9: /* Return to Thread using Main stack */
+ /* We only need to check NONBASETHRDENA for v7M, because in
+ * v8M this bit does not exist (it is RES1).
+ */
+ if (!rettobase &&
+ !(env->v7m.ccr[env->v7m.secure] &
+ R_V7M_CCR_NONBASETHRDENA_MASK)) {
+ ufault = true;
+ }
+ break;
+ default:
ufault = true;
}
- break;
- default:
- ufault = true;
+ }
+
+ if (sfault) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVER_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed EXC_RETURN.ES validity check\n");
+ return;
}
if (ufault) {
*/
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, env->v7m.secure);
- v7m_exception_taken(cpu, excret);
+ v7m_exception_taken(cpu, excret, true);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity check\n");
return;
return_to_sp_process);
uint32_t frameptr = *frame_sp_p;
+ if (!QEMU_IS_ALIGNED(frameptr, 8) &&
+ arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile exception return with non-8-aligned SP "
+ "for destination state is UNPREDICTABLE\n");
+ }
+
+ /* Do we need to pop callee-saved registers? */
+ if (return_to_secure &&
+ ((excret & R_V7M_EXCRET_ES_MASK) == 0 ||
+ (excret & R_V7M_EXCRET_DCRS_MASK) == 0)) {
+ uint32_t expected_sig = 0xfefa125b;
+ uint32_t actual_sig = ldl_phys(cs->as, frameptr);
+
+ if (expected_sig != actual_sig) {
+ /* Take a SecureFault on the current stack */
+ env->v7m.sfsr |= R_V7M_SFSR_INVIS_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ v7m_exception_taken(cpu, excret, true);
+ qemu_log_mask(CPU_LOG_INT, "...taking SecureFault on existing "
+ "stackframe: failed exception return integrity "
+ "signature check\n");
+ return;
+ }
+
+ env->regs[4] = ldl_phys(cs->as, frameptr + 0x8);
+ env->regs[5] = ldl_phys(cs->as, frameptr + 0xc);
+ env->regs[6] = ldl_phys(cs->as, frameptr + 0x10);
+ env->regs[7] = ldl_phys(cs->as, frameptr + 0x14);
+ env->regs[8] = ldl_phys(cs->as, frameptr + 0x18);
+ env->regs[9] = ldl_phys(cs->as, frameptr + 0x1c);
+ env->regs[10] = ldl_phys(cs->as, frameptr + 0x20);
+ env->regs[11] = ldl_phys(cs->as, frameptr + 0x24);
+
+ frameptr += 0x28;
+ }
+
/* Pop registers. TODO: make these accesses use the correct
* attributes and address space (S/NS, priv/unpriv) and handle
* memory transaction failures.
env->regs[12] = ldl_phys(cs->as, frameptr + 0x10);
env->regs[14] = ldl_phys(cs->as, frameptr + 0x14);
env->regs[15] = ldl_phys(cs->as, frameptr + 0x18);
+
+ /* Returning from an exception with a PC with bit 0 set is defined
+ * behaviour on v8M (bit 0 is ignored), but for v7M it was specified
+ * to be UNPREDICTABLE. In practice actual v7M hardware seems to ignore
+ * the lsbit, and there are several RTOSes out there which incorrectly
+ * assume the r15 in the stack frame should be a Thumb-style "lsbit
+ * indicates ARM/Thumb" value, so ignore the bit on v7M as well, but
+ * complain about the badly behaved guest.
+ */
if (env->regs[15] & 1) {
- qemu_log_mask(LOG_GUEST_ERROR,
- "M profile return from interrupt with misaligned "
- "PC is UNPREDICTABLE\n");
- /* Actual hardware seems to ignore the lsbit, and there are several
- * RTOSes out there which incorrectly assume the r15 in the stack
- * frame should be a Thumb-style "lsbit indicates ARM/Thumb" value.
- */
env->regs[15] &= ~1U;
+ if (!arm_feature(env, ARM_FEATURE_V8)) {
+ qemu_log_mask(LOG_GUEST_ERROR,
+ "M profile return from interrupt with misaligned "
+ "PC is UNPREDICTABLE on v7M\n");
+ }
}
+
xpsr = ldl_phys(cs->as, frameptr + 0x1c);
if (arm_feature(env, ARM_FEATURE_V8)) {
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
env->v7m.secure);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
- v7m_exception_taken(cpu, excret);
+ v7m_exception_taken(cpu, excret, true);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on existing "
"stackframe: failed exception return integrity "
"check\n");
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE, false);
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
v7m_push_stack(cpu);
- v7m_exception_taken(cpu, excret);
+ v7m_exception_taken(cpu, excret, false);
qemu_log_mask(CPU_LOG_INT, "...taking UsageFault on new stackframe: "
"failed exception return integrity check\n");
return;
qemu_log_mask(CPU_LOG_INT, "...successful exception return\n");
}
+static bool do_v7m_function_return(ARMCPU *cpu)
+{
+ /* v8M security extensions magic function return.
+ * We may either:
+ * (1) throw an exception (longjump)
+ * (2) return true if we successfully handled the function return
+ * (3) return false if we failed a consistency check and have
+ * pended a UsageFault that needs to be taken now
+ *
+ * At this point the magic return value is split between env->regs[15]
+ * and env->thumb. We don't bother to reconstitute it because we don't
+ * need it (all values are handled the same way).
+ */
+ CPUARMState *env = &cpu->env;
+ uint32_t newpc, newpsr, newpsr_exc;
+
+ qemu_log_mask(CPU_LOG_INT, "...really v7M secure function return\n");
+
+ {
+ bool threadmode, spsel;
+ TCGMemOpIdx oi;
+ ARMMMUIdx mmu_idx;
+ uint32_t *frame_sp_p;
+ uint32_t frameptr;
+
+ /* Pull the return address and IPSR from the Secure stack */
+ threadmode = !arm_v7m_is_handler_mode(env);
+ spsel = env->v7m.control[M_REG_S] & R_V7M_CONTROL_SPSEL_MASK;
+
+ frame_sp_p = get_v7m_sp_ptr(env, true, threadmode, spsel);
+ frameptr = *frame_sp_p;
+
+ /* These loads may throw an exception (for MPU faults). We want to
+ * do them as secure, so work out what MMU index that is.
+ */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+ oi = make_memop_idx(MO_LE, arm_to_core_mmu_idx(mmu_idx));
+ newpc = helper_le_ldul_mmu(env, frameptr, oi, 0);
+ newpsr = helper_le_ldul_mmu(env, frameptr + 4, oi, 0);
+
+ /* Consistency checks on new IPSR */
+ newpsr_exc = newpsr & XPSR_EXCP;
+ if (!((env->v7m.exception == 0 && newpsr_exc == 0) ||
+ (env->v7m.exception == 1 && newpsr_exc != 0))) {
+ /* Pend the fault and tell our caller to take it */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_INVPC_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE,
+ env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT,
+ "...taking INVPC UsageFault: "
+ "IPSR consistency check failed\n");
+ return false;
+ }
+
+ *frame_sp_p = frameptr + 8;
+ }
+
+ /* This invalidates frame_sp_p */
+ switch_v7m_security_state(env, true);
+ env->v7m.exception = newpsr_exc;
+ env->v7m.control[M_REG_S] &= ~R_V7M_CONTROL_SFPA_MASK;
+ if (newpsr & XPSR_SFPA) {
+ env->v7m.control[M_REG_S] |= R_V7M_CONTROL_SFPA_MASK;
+ }
+ xpsr_write(env, 0, XPSR_IT);
+ env->thumb = newpc & 1;
+ env->regs[15] = newpc & ~1;
+
+ qemu_log_mask(CPU_LOG_INT, "...function return successful\n");
+ return true;
+}
+
static void arm_log_exception(int idx)
{
if (qemu_loglevel_mask(CPU_LOG_INT)) {
}
}
+static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
+ uint32_t addr, uint16_t *insn)
+{
+ /* Load a 16-bit portion of a v7M instruction, returning true on success,
+ * or false on failure (in which case we will have pended the appropriate
+ * exception).
+ * We need to do the instruction fetch's MPU and SAU checks
+ * like this because there is no MMU index that would allow
+ * doing the load with a single function call. Instead we must
+ * first check that the security attributes permit the load
+ * and that they don't mismatch on the two halves of the instruction,
+ * and then we do the load as a secure load (ie using the security
+ * attributes of the address, not the CPU, as architecturally required).
+ */
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+ V8M_SAttributes sattrs = {};
+ MemTxAttrs attrs = {};
+ ARMMMUFaultInfo fi = {};
+ MemTxResult txres;
+ target_ulong page_size;
+ hwaddr physaddr;
+ int prot;
+ uint32_t fsr;
+
+ v8m_security_lookup(env, addr, MMU_INST_FETCH, mmu_idx, &sattrs);
+ if (!sattrs.nsc || sattrs.ns) {
+ /* This must be the second half of the insn, and it straddles a
+ * region boundary with the second half not being S&NSC.
+ */
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+ }
+ if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
+ &physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) {
+ /* the MPU lookup failed */
+ env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
+ qemu_log_mask(CPU_LOG_INT, "...really MemManage with CFSR.IACCVIOL\n");
+ return false;
+ }
+ *insn = address_space_lduw_le(arm_addressspace(cs, attrs), physaddr,
+ attrs, &txres);
+ if (txres != MEMTX_OK) {
+ env->v7m.cfsr[M_REG_NS] |= R_V7M_CFSR_IBUSERR_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_BUS, false);
+ qemu_log_mask(CPU_LOG_INT, "...really BusFault with CFSR.IBUSERR\n");
+ return false;
+ }
+ return true;
+}
+
+static bool v7m_handle_execute_nsc(ARMCPU *cpu)
+{
+ /* Check whether this attempt to execute code in a Secure & NS-Callable
+ * memory region is for an SG instruction; if so, then emulate the
+ * effect of the SG instruction and return true. Otherwise pend
+ * the correct kind of exception and return false.
+ */
+ CPUARMState *env = &cpu->env;
+ ARMMMUIdx mmu_idx;
+ uint16_t insn;
+
+ /* We should never get here unless get_phys_addr_pmsav8() caused
+ * an exception for NS executing in S&NSC memory.
+ */
+ assert(!env->v7m.secure);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+
+ /* We want to do the MPU lookup as secure; work out what mmu_idx that is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate(env, true);
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15], &insn)) {
+ return false;
+ }
+
+ if (!env->thumb) {
+ goto gen_invep;
+ }
+
+ if (insn != 0xe97f) {
+ /* Not an SG instruction first half (we choose the IMPDEF
+ * early-SG-check option).
+ */
+ goto gen_invep;
+ }
+
+ if (!v7m_read_half_insn(cpu, mmu_idx, env->regs[15] + 2, &insn)) {
+ return false;
+ }
+
+ if (insn != 0xe97f) {
+ /* Not an SG instruction second half (yes, both halves of the SG
+ * insn have the same hex value)
+ */
+ goto gen_invep;
+ }
+
+ /* OK, we have confirmed that we really have an SG instruction.
+ * We know we're NS in S memory so don't need to repeat those checks.
+ */
+ qemu_log_mask(CPU_LOG_INT, "...really an SG instruction at 0x%08" PRIx32
+ ", executing it\n", env->regs[15]);
+ env->regs[14] &= ~1;
+ switch_v7m_security_state(env, true);
+ xpsr_write(env, 0, XPSR_IT);
+ env->regs[15] += 4;
+ return true;
+
+gen_invep:
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ return false;
+}
+
void arm_v7m_cpu_do_interrupt(CPUState *cs)
{
ARMCPU *cpu = ARM_CPU(cs);
* raises the fault, in the A profile short-descriptor format.
*/
switch (env->exception.fsr & 0xf) {
+ case M_FAKE_FSR_NSC_EXEC:
+ /* Exception generated when we try to execute code at an address
+ * which is marked as Secure & Non-Secure Callable and the CPU
+ * is in the Non-Secure state. The only instruction which can
+ * be executed like this is SG (and that only if both halves of
+ * the SG instruction have the same security attributes.)
+ * Everything else must generate an INVEP SecureFault, so we
+ * emulate the SG instruction here.
+ */
+ if (v7m_handle_execute_nsc(cpu)) {
+ return;
+ }
+ break;
+ case M_FAKE_FSR_SFAULT:
+ /* Various flavours of SecureFault for attempts to execute or
+ * access data in the wrong security state.
+ */
+ switch (cs->exception_index) {
+ case EXCP_PREFETCH_ABORT:
+ if (env->v7m.secure) {
+ env->v7m.sfsr |= R_V7M_SFSR_INVTRAN_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVTRAN\n");
+ } else {
+ env->v7m.sfsr |= R_V7M_SFSR_INVEP_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.INVEP\n");
+ }
+ break;
+ case EXCP_DATA_ABORT:
+ /* This must be an NS access to S memory */
+ env->v7m.sfsr |= R_V7M_SFSR_AUVIOL_MASK;
+ qemu_log_mask(CPU_LOG_INT,
+ "...really SecureFault with SFSR.AUVIOL\n");
+ break;
+ }
+ armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_SECURE, false);
+ break;
case 0x8: /* External Abort */
switch (cs->exception_index) {
case EXCP_PREFETCH_ABORT:
case EXCP_IRQ:
break;
case EXCP_EXCEPTION_EXIT:
- do_v7m_exception_exit(cpu);
- return;
+ if (env->regs[15] < EXC_RETURN_MIN_MAGIC) {
+ /* Must be v8M security extension function return */
+ assert(env->regs[15] >= FNC_RETURN_MIN_MAGIC);
+ assert(arm_feature(env, ARM_FEATURE_M_SECURITY));
+ if (do_v7m_function_return(cpu)) {
+ return;
+ }
+ } else {
+ do_v7m_exception_exit(cpu);
+ return;
+ }
+ break;
default:
cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
- lr = R_V7M_EXCRET_RES1_MASK |
- R_V7M_EXCRET_S_MASK |
- R_V7M_EXCRET_DCRS_MASK |
- R_V7M_EXCRET_FTYPE_MASK |
- R_V7M_EXCRET_ES_MASK;
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
- lr |= R_V7M_EXCRET_SPSEL_MASK;
+ if (arm_feature(env, ARM_FEATURE_V8)) {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_FTYPE_MASK;
+ /* The S bit indicates whether we should return to Secure
+ * or NonSecure (ie our current state).
+ * The ES bit indicates whether we're taking this exception
+ * to Secure or NonSecure (ie our target state). We set it
+ * later, in v7m_exception_taken().
+ * The SPSEL bit is also set in v7m_exception_taken() for v8M.
+ * This corresponds to the ARM ARM pseudocode for v8M setting
+ * some LR bits in PushStack() and some in ExceptionTaken();
+ * the distinction matters for the tailchain cases where we
+ * can take an exception without pushing the stack.
+ */
+ if (env->v7m.secure) {
+ lr |= R_V7M_EXCRET_S_MASK;
+ }
+ } else {
+ lr = R_V7M_EXCRET_RES1_MASK |
+ R_V7M_EXCRET_S_MASK |
+ R_V7M_EXCRET_DCRS_MASK |
+ R_V7M_EXCRET_FTYPE_MASK |
+ R_V7M_EXCRET_ES_MASK;
+ if (env->v7m.control[M_REG_NS] & R_V7M_CONTROL_SPSEL_MASK) {
+ lr |= R_V7M_EXCRET_SPSEL_MASK;
+ }
}
if (!arm_v7m_is_handler_mode(env)) {
lr |= R_V7M_EXCRET_MODE_MASK;
}
v7m_push_stack(cpu);
- v7m_exception_taken(cpu, lr);
+ v7m_exception_taken(cpu, lr, false);
qemu_log_mask(CPU_LOG_INT, "... as %d\n", env->v7m.exception);
}
case ARMMMUIdx_S1SE1:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_S1NSE1:
+ case ARMMMUIdx_MPrivNegPri:
+ case ARMMMUIdx_MUserNegPri:
case ARMMMUIdx_MPriv:
- case ARMMMUIdx_MNegPri:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSPrivNegPri:
+ case ARMMMUIdx_MSUserNegPri:
case ARMMMUIdx_MSPriv:
- case ARMMMUIdx_MSNegPri:
case ARMMMUIdx_MSUser:
return 1;
default:
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
case R_V7M_MPU_CTRL_ENABLE_MASK:
/* Enabled, but not for HardFault and NMI */
- return mmu_idx == ARMMMUIdx_MNegPri ||
- mmu_idx == ARMMMUIdx_MSNegPri;
+ return mmu_idx & ARM_MMU_IDX_M_NEGPRI;
case R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK:
/* Enabled for all cases */
return false;
case ARMMMUIdx_S1SE0:
case ARMMMUIdx_S1NSE0:
case ARMMMUIdx_MUser:
+ case ARMMMUIdx_MSUser:
+ case ARMMMUIdx_MUserNegPri:
+ case ARMMMUIdx_MSUserNegPri:
return true;
default:
return false;
int ret;
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
- &txattrs, &s2prot, &s2size, fsr, fi);
+ &txattrs, &s2prot, &s2size, fsr, fi, NULL);
if (ret) {
fi->s2addr = addr;
fi->stage2 = true;
return true;
}
+/* Translate from the 4-bit stage 2 representation of
+ * memory attributes (without cache-allocation hints) to
+ * the 8-bit representation of the stage 1 MAIR registers
+ * (which includes allocation hints).
+ *
+ * ref: shared/translation/attrs/S2AttrDecode()
+ * .../S2ConvertAttrsHints()
+ */
+static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
+{
+ uint8_t hiattr = extract32(s2attrs, 2, 2);
+ uint8_t loattr = extract32(s2attrs, 0, 2);
+ uint8_t hihint = 0, lohint = 0;
+
+ if (hiattr != 0) { /* normal memory */
+ if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
+ hiattr = loattr = 1; /* non-cacheable */
+ } else {
+ if (hiattr != 1) { /* Write-through or write-back */
+ hihint = 3; /* RW allocate */
+ }
+ if (loattr != 1) { /* Write-through or write-back */
+ lohint = 3; /* RW allocate */
+ }
+ }
+ }
+
+ return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
+}
+
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
target_ulong *page_size_ptr, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
ARMCPU *cpu = arm_env_get_cpu(env);
CPUState *cs = CPU(cpu);
*/
txattrs->secure = false;
}
+
+ if (cacheattrs != NULL) {
+ if (mmu_idx == ARMMMUIdx_S2NS) {
+ cacheattrs->attrs = convert_stage2_attrs(env,
+ extract32(attrs, 0, 4));
+ } else {
+ /* Index into MAIR registers for cache attributes */
+ uint8_t attrindx = extract32(attrs, 0, 3);
+ uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
+ assert(attrindx <= 7);
+ cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
+ }
+ cacheattrs->shareability = extract32(attrs, 6, 2);
+ }
+
*phys_ptr = descaddr;
*page_size_ptr = page_size;
return false;
return !(*prot & (1 << access_type));
}
-static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
- hwaddr *phys_ptr, int *prot, uint32_t *fsr)
+static bool v8m_is_sau_exempt(CPUARMState *env,
+ uint32_t address, MMUAccessType access_type)
+{
+ /* The architecture specifies that certain address ranges are
+ * exempt from v8M SAU/IDAU checks.
+ */
+ return
+ (access_type == MMU_INST_FETCH && m_is_system_region(env, address)) ||
+ (address >= 0xe0000000 && address <= 0xe0002fff) ||
+ (address >= 0xe000e000 && address <= 0xe000efff) ||
+ (address >= 0xe002e000 && address <= 0xe002efff) ||
+ (address >= 0xe0040000 && address <= 0xe0041fff) ||
+ (address >= 0xe00ff000 && address <= 0xe00fffff);
+}
+
+static void v8m_security_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ V8M_SAttributes *sattrs)
+{
+ /* Look up the security attributes for this address. Compare the
+ * pseudocode SecurityCheck() function.
+ * We assume the caller has zero-initialized *sattrs.
+ */
+ ARMCPU *cpu = arm_env_get_cpu(env);
+ int r;
+
+ /* TODO: implement IDAU */
+
+ if (access_type == MMU_INST_FETCH && extract32(address, 28, 4) == 0xf) {
+ /* 0xf0000000..0xffffffff is always S for insn fetches */
+ return;
+ }
+
+ if (v8m_is_sau_exempt(env, address, access_type)) {
+ sattrs->ns = !regime_is_secure(env, mmu_idx);
+ return;
+ }
+
+ switch (env->sau.ctrl & 3) {
+ case 0: /* SAU.ENABLE == 0, SAU.ALLNS == 0 */
+ break;
+ case 2: /* SAU.ENABLE == 0, SAU.ALLNS == 1 */
+ sattrs->ns = true;
+ break;
+ default: /* SAU.ENABLE == 1 */
+ for (r = 0; r < cpu->sau_sregion; r++) {
+ if (env->sau.rlar[r] & 1) {
+ uint32_t base = env->sau.rbar[r] & ~0x1f;
+ uint32_t limit = env->sau.rlar[r] | 0x1f;
+
+ if (base <= address && limit >= address) {
+ if (sattrs->srvalid) {
+ /* If we hit in more than one region then we must report
+ * as Secure, not NS-Callable, with no valid region
+ * number info.
+ */
+ sattrs->ns = false;
+ sattrs->nsc = false;
+ sattrs->sregion = 0;
+ sattrs->srvalid = false;
+ break;
+ } else {
+ if (env->sau.rlar[r] & 2) {
+ sattrs->nsc = true;
+ } else {
+ sattrs->ns = true;
+ }
+ sattrs->srvalid = true;
+ sattrs->sregion = r;
+ }
+ }
+ }
+ }
+
+ /* TODO when we support the IDAU then it may override the result here */
+ break;
+ }
+}
+
+static bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, uint32_t *fsr, uint32_t *mregion)
{
+ /* Perform a PMSAv8 MPU lookup (without also doing the SAU check
+ * that a full phys-to-virt translation does).
+ * mregion is (if not NULL) set to the region number which matched,
+ * or -1 if no region number is returned (MPU off, address did not
+ * hit a region, address hit in multiple regions).
+ */
ARMCPU *cpu = arm_env_get_cpu(env);
bool is_user = regime_is_user(env, mmu_idx);
uint32_t secure = regime_is_secure(env, mmu_idx);
*phys_ptr = address;
*prot = 0;
+ if (mregion) {
+ *mregion = -1;
+ }
/* Unlike the ARM ARM pseudocode, we don't need to check whether this
* was an exception vector read from the vector table (which is always
/* We don't need to look the attribute up in the MAIR0/MAIR1
* registers because that only tells us about cacheability.
*/
+ if (mregion) {
+ *mregion = matchregion;
+ }
}
*fsr = 0x00d; /* Permission fault */
return !(*prot & (1 << access_type));
}
+
+static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
+ hwaddr *phys_ptr, MemTxAttrs *txattrs,
+ int *prot, uint32_t *fsr)
+{
+ uint32_t secure = regime_is_secure(env, mmu_idx);
+ V8M_SAttributes sattrs = {};
+
+ if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
+ v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
+ if (access_type == MMU_INST_FETCH) {
+ /* Instruction fetches always use the MMU bank and the
+ * transaction attribute determined by the fetch address,
+ * regardless of CPU state. This is painful for QEMU
+ * to handle, because it would mean we need to encode
+ * into the mmu_idx not just the (user, negpri) information
+ * for the current security state but also that for the
+ * other security state, which would balloon the number
+ * of mmu_idx values needed alarmingly.
+ * Fortunately we can avoid this because it's not actually
+ * possible to arbitrarily execute code from memory with
+ * the wrong security attribute: it will always generate
+ * an exception of some kind or another, apart from the
+ * special case of an NS CPU executing an SG instruction
+ * in S&NSC memory. So we always just fail the translation
+ * here and sort things out in the exception handler
+ * (including possibly emulating an SG instruction).
+ */
+ if (sattrs.ns != !secure) {
+ *fsr = sattrs.nsc ? M_FAKE_FSR_NSC_EXEC : M_FAKE_FSR_SFAULT;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ } else {
+ /* For data accesses we always use the MMU bank indicated
+ * by the current CPU state, but the security attributes
+ * might downgrade a secure access to nonsecure.
+ */
+ if (sattrs.ns) {
+ txattrs->secure = false;
+ } else if (!secure) {
+ /* NS access to S memory must fault.
+ * Architecturally we should first check whether the
+ * MPU information for this address indicates that we
+ * are doing an unaligned access to Device memory, which
+ * should generate a UsageFault instead. QEMU does not
+ * currently check for that kind of unaligned access though.
+ * If we added it we would need to do so as a special case
+ * for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
+ */
+ *fsr = M_FAKE_FSR_SFAULT;
+ *phys_ptr = address;
+ *prot = 0;
+ return true;
+ }
+ }
+ }
+
+ return pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
+ txattrs, prot, fsr, NULL);
+}
+
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, int *prot, uint32_t *fsr)
return false;
}
+/* Combine either inner or outer cacheability attributes for normal
+ * memory, according to table D4-42 and pseudocode procedure
+ * CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
+ *
+ * NB: only stage 1 includes allocation hints (RW bits), leading to
+ * some asymmetry.
+ */
+static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
+{
+ if (s1 == 4 || s2 == 4) {
+ /* non-cacheable has precedence */
+ return 4;
+ } else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
+ /* stage 1 write-through takes precedence */
+ return s1;
+ } else if (extract32(s2, 2, 2) == 2) {
+ /* stage 2 write-through takes precedence, but the allocation hint
+ * is still taken from stage 1
+ */
+ return (2 << 2) | extract32(s1, 0, 2);
+ } else { /* write-back */
+ return s1;
+ }
+}
+
+/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
+ * and CombineS1S2Desc()
+ *
+ * @s1: Attributes from stage 1 walk
+ * @s2: Attributes from stage 2 walk
+ */
+static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
+{
+ uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
+ uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
+ ARMCacheAttrs ret;
+
+ /* Combine shareability attributes (table D4-43) */
+ if (s1.shareability == 2 || s2.shareability == 2) {
+ /* if either are outer-shareable, the result is outer-shareable */
+ ret.shareability = 2;
+ } else if (s1.shareability == 3 || s2.shareability == 3) {
+ /* if either are inner-shareable, the result is inner-shareable */
+ ret.shareability = 3;
+ } else {
+ /* both non-shareable */
+ ret.shareability = 0;
+ }
+
+ /* Combine memory type and cacheability attributes */
+ if (s1hi == 0 || s2hi == 0) {
+ /* Device has precedence over normal */
+ if (s1lo == 0 || s2lo == 0) {
+ /* nGnRnE has precedence over anything */
+ ret.attrs = 0;
+ } else if (s1lo == 4 || s2lo == 4) {
+ /* non-Reordering has precedence over Reordering */
+ ret.attrs = 4; /* nGnRE */
+ } else if (s1lo == 8 || s2lo == 8) {
+ /* non-Gathering has precedence over Gathering */
+ ret.attrs = 8; /* nGRE */
+ } else {
+ ret.attrs = 0xc; /* GRE */
+ }
+
+ /* Any location for which the resultant memory type is any
+ * type of Device memory is always treated as Outer Shareable.
+ */
+ ret.shareability = 2;
+ } else { /* Normal memory */
+ /* Outer/inner cacheability combine independently */
+ ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
+ | combine_cacheattr_nibble(s1lo, s2lo);
+
+ if (ret.attrs == 0x44) {
+ /* Any location for which the resultant memory type is Normal
+ * Inner Non-cacheable, Outer Non-cacheable is always treated
+ * as Outer Shareable.
+ */
+ ret.shareability = 2;
+ }
+ }
+
+ return ret;
+}
+
+
/* get_phys_addr - get the physical address for this virtual address
*
* Find the physical address corresponding to the given virtual address,
* @prot: set to the permissions for the page containing phys_ptr
* @page_size: set to the size of the page containing phys_ptr
* @fsr: set to the DFSR/IFSR value on failure
+ * @fi: set to fault info if the translation fails
+ * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
*/
static bool get_phys_addr(CPUARMState *env, target_ulong address,
MMUAccessType access_type, ARMMMUIdx mmu_idx,
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
target_ulong *page_size, uint32_t *fsr,
- ARMMMUFaultInfo *fi)
+ ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
{
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
/* Call ourselves recursively to do the stage 1 and then stage 2
hwaddr ipa;
int s2_prot;
int ret;
+ ARMCacheAttrs cacheattrs2 = {};
ret = get_phys_addr(env, address, access_type,
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
- prot, page_size, fsr, fi);
+ prot, page_size, fsr, fi, cacheattrs);
/* If S1 fails or S2 is disabled, return early. */
if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
/* S1 is done. Now do S2 translation. */
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
phys_ptr, attrs, &s2_prot,
- page_size, fsr, fi);
+ page_size, fsr, fi,
+ cacheattrs != NULL ? &cacheattrs2 : NULL);
fi->s2addr = ipa;
/* Combine the S1 and S2 perms. */
*prot &= s2_prot;
+
+ /* Combine the S1 and S2 cache attributes, if needed */
+ if (!ret && cacheattrs != NULL) {
+ *cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
+ }
+
return ret;
} else {
/*
if (arm_feature(env, ARM_FEATURE_V8)) {
/* PMSAv8 */
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
- phys_ptr, prot, fsr);
+ phys_ptr, attrs, prot, fsr);
} else if (arm_feature(env, ARM_FEATURE_V7)) {
/* PMSAv7 */
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
if (regime_using_lpae_format(env, mmu_idx)) {
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
- attrs, prot, page_size, fsr, fi);
+ attrs, prot, page_size, fsr, fi, cacheattrs);
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
attrs, prot, page_size, fsr, fi);
ret = get_phys_addr(env, address, access_type,
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
- &attrs, &prot, &page_size, fsr, fi);
+ &attrs, &prot, &page_size, fsr, fi, NULL);
if (!ret) {
/* Map a single [sub]page. */
phys_addr &= TARGET_PAGE_MASK;
*attrs = (MemTxAttrs) {};
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
- attrs, &prot, &page_size, &fsr, &fi);
+ attrs, &prot, &page_size, &fsr, &fi, NULL);
if (ret) {
return -1;
switch (reg) {
case 8: /* MSP */
- return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
- env->v7m.other_sp : env->regs[13];
+ return v7m_using_psp(env) ? env->v7m.other_sp : env->regs[13];
case 9: /* PSP */
- return (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) ?
- env->regs[13] : env->v7m.other_sp;
+ return v7m_using_psp(env) ? env->regs[13] : env->v7m.other_sp;
case 16: /* PRIMASK */
return env->v7m.primask[env->v7m.secure];
case 17: /* BASEPRI */
}
break;
case 8: /* MSP */
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ if (v7m_using_psp(env)) {
env->v7m.other_sp = val;
} else {
env->regs[13] = val;
}
break;
case 9: /* PSP */
- if (env->v7m.control[env->v7m.secure] & R_V7M_CONTROL_SPSEL_MASK) {
+ if (v7m_using_psp(env)) {
env->regs[13] = val;
} else {
env->v7m.other_sp = val;
* thread mode; other bits can be updated by any privileged code.
* write_v7m_control_spsel() deals with updating the SPSEL bit in
* env->v7m.control, so we only need update the others.
+ * For v7M, we must just ignore explicit writes to SPSEL in handler
+ * mode; for v8M the write is permitted but will have no effect.
*/
- if (!arm_v7m_is_handler_mode(env)) {
+ if (arm_feature(env, ARM_FEATURE_V8) ||
+ !arm_v7m_is_handler_mode(env)) {
write_v7m_control_spsel(env, (val & R_V7M_CONTROL_SPSEL_MASK) != 0);
}
env->v7m.control[env->v7m.secure] &= ~R_V7M_CONTROL_NPRIV_MASK;
}
}
+uint32_t HELPER(v7m_tt)(CPUARMState *env, uint32_t addr, uint32_t op)
+{
+ /* Implement the TT instruction. op is bits [7:6] of the insn. */
+ bool forceunpriv = op & 1;
+ bool alt = op & 2;
+ V8M_SAttributes sattrs = {};
+ uint32_t tt_resp;
+ bool r, rw, nsr, nsrw, mrvalid;
+ int prot;
+ MemTxAttrs attrs = {};
+ hwaddr phys_addr;
+ uint32_t fsr;
+ ARMMMUIdx mmu_idx;
+ uint32_t mregion;
+ bool targetpriv;
+ bool targetsec = env->v7m.secure;
+
+ /* Work out what the security state and privilege level we're
+ * interested in is...
+ */
+ if (alt) {
+ targetsec = !targetsec;
+ }
+
+ if (forceunpriv) {
+ targetpriv = false;
+ } else {
+ targetpriv = arm_v7m_is_handler_mode(env) ||
+ !(env->v7m.control[targetsec] & R_V7M_CONTROL_NPRIV_MASK);
+ }
+
+ /* ...and then figure out which MMU index this is */
+ mmu_idx = arm_v7m_mmu_idx_for_secstate_and_priv(env, targetsec, targetpriv);
+
+ /* We know that the MPU and SAU don't care about the access type
+ * for our purposes beyond that we don't want to claim to be
+ * an insn fetch, so we arbitrarily call this a read.
+ */
+
+ /* MPU region info only available for privileged or if
+ * inspecting the other MPU state.
+ */
+ if (arm_current_el(env) != 0 || alt) {
+ /* We can ignore the return value as prot is always set */
+ pmsav8_mpu_lookup(env, addr, MMU_DATA_LOAD, mmu_idx,
+ &phys_addr, &attrs, &prot, &fsr, &mregion);
+ if (mregion == -1) {
+ mrvalid = false;
+ mregion = 0;
+ } else {
+ mrvalid = true;
+ }
+ r = prot & PAGE_READ;
+ rw = prot & PAGE_WRITE;
+ } else {
+ r = false;
+ rw = false;
+ mrvalid = false;
+ mregion = 0;
+ }
+
+ if (env->v7m.secure) {
+ v8m_security_lookup(env, addr, MMU_DATA_LOAD, mmu_idx, &sattrs);
+ nsr = sattrs.ns && r;
+ nsrw = sattrs.ns && rw;
+ } else {
+ sattrs.ns = true;
+ nsr = false;
+ nsrw = false;
+ }
+
+ tt_resp = (sattrs.iregion << 24) |
+ (sattrs.irvalid << 23) |
+ ((!sattrs.ns) << 22) |
+ (nsrw << 21) |
+ (nsr << 20) |
+ (rw << 19) |
+ (r << 18) |
+ (sattrs.srvalid << 17) |
+ (mrvalid << 16) |
+ (sattrs.sregion << 8) |
+ mregion;
+
+ return tt_resp;
+}
+
#endif
void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)