#define SIGNBIT (uint32_t)0x80000000
#define SIGNBIT64 ((uint64_t)1 << 63)
-static void raise_exception(CPUARMState *env, uint32_t excp,
- uint32_t syndrome, uint32_t target_el)
+void raise_exception(CPUARMState *env, uint32_t excp,
+ uint32_t syndrome, uint32_t target_el)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
+ if ((env->cp15.hcr_el2 & HCR_TGE) &&
+ target_el == 1 && !arm_is_secure(env)) {
+ /*
+ * Redirect NS EL1 exceptions to NS EL2. These are reported with
+ * their original syndrome register value, with the exception of
+ * SIMD/FP access traps, which are reported as uncategorized
+ * (see DDI0478C.a D1.10.4)
+ */
+ target_el = 2;
+ if (syndrome >> ARM_EL_EC_SHIFT == EC_ADVSIMDFPACCESSTRAP) {
+ syndrome = syn_uncategorized();
+ }
+ }
+
assert(!excp_is_internal(excp));
cs->exception_index = excp;
env->exception.syndrome = syndrome;
return target_el;
}
-uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
- uint32_t rn, uint32_t maxindex)
+uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
+ uint32_t maxindex)
{
- uint32_t val;
- uint32_t tmp;
- int index;
- int shift;
- uint64_t *table;
- table = (uint64_t *)&env->vfp.regs[rn];
+ uint32_t val, shift;
+ uint64_t *table = vn;
+
val = 0;
for (shift = 0; shift < 32; shift += 8) {
- index = (ireg >> shift) & 0xff;
+ uint32_t index = (ireg >> shift) & 0xff;
if (index < maxindex) {
- tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
+ uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
val |= tmp << shift;
} else {
val |= def & (0xff << shift);
static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
unsigned int target_el,
- bool same_el,
+ bool same_el, bool ea,
bool s1ptw, bool is_write,
int fsc)
{
*/
if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
syn = syn_data_abort_no_iss(same_el,
- 0, 0, s1ptw, is_write, fsc);
+ ea, 0, s1ptw, is_write, fsc);
} else {
/* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
* syndrome created at translation time.
*/
syn = syn_data_abort_with_iss(same_el,
0, 0, 0, 0, 0,
- 0, 0, s1ptw, is_write, fsc,
+ ea, 0, s1ptw, is_write, fsc,
false);
/* Merge the runtime syndrome with the template syndrome. */
syn |= template_syn;
return syn;
}
+static void deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
+ int mmu_idx, ARMMMUFaultInfo *fi)
+{
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+ uint32_t syn, exc, fsr, fsc;
+ ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
+
+ target_el = exception_target_el(env);
+ if (fi->stage2) {
+ target_el = 2;
+ env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
+ }
+ same_el = (arm_current_el(env) == target_el);
+
+ if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
+ arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
+ /* LPAE format fault status register : bottom 6 bits are
+ * status code in the same form as needed for syndrome
+ */
+ fsr = arm_fi_to_lfsc(fi);
+ fsc = extract32(fsr, 0, 6);
+ } else {
+ fsr = arm_fi_to_sfsc(fi);
+ /* Short format FSR : this fault will never actually be reported
+ * to an EL that uses a syndrome register. Use a (currently)
+ * reserved FSR code in case the constructed syndrome does leak
+ * into the guest somehow.
+ */
+ fsc = 0x3f;
+ }
+
+ if (access_type == MMU_INST_FETCH) {
+ syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
+ exc = EXCP_PREFETCH_ABORT;
+ } else {
+ syn = merge_syn_data_abort(env->exception.syndrome, target_el,
+ same_el, fi->ea, fi->s1ptw,
+ access_type == MMU_DATA_STORE,
+ fsc);
+ if (access_type == MMU_DATA_STORE
+ && arm_feature(env, ARM_FEATURE_V6)) {
+ fsr |= (1 << 11);
+ }
+ exc = EXCP_DATA_ABORT;
+ }
+
+ env->exception.vaddress = addr;
+ env->exception.fsr = fsr;
+ raise_exception(env, exc, syn, target_el);
+}
+
/* try to fill the TLB and return an exception if error. If retaddr is
* NULL, it means that the function was called in C code (i.e. not
* from generated code or from helper.c)
*/
-void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
- int mmu_idx, uintptr_t retaddr)
+void tlb_fill(CPUState *cs, target_ulong addr, int size,
+ MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
bool ret;
- uint32_t fsr = 0;
ARMMMUFaultInfo fi = {};
- ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
+ ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fi);
if (unlikely(ret)) {
ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint32_t syn, exc;
- unsigned int target_el;
- bool same_el;
-
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
-
- target_el = exception_target_el(env);
- if (fi.stage2) {
- target_el = 2;
- env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
- }
- same_el = arm_current_el(env) == target_el;
- /* AArch64 syndrome does not have an LPAE bit */
- syn = fsr & ~(1 << 9);
- /* For insn and data aborts we assume there is no instruction syndrome
- * information; this is always true for exceptions reported to EL1.
- */
- if (access_type == MMU_INST_FETCH) {
- syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
- exc = EXCP_PREFETCH_ABORT;
- } else {
- syn = merge_syn_data_abort(env->exception.syndrome, target_el,
- same_el, fi.s1ptw,
- access_type == MMU_DATA_STORE, syn);
- if (access_type == MMU_DATA_STORE
- && arm_feature(env, ARM_FEATURE_V6)) {
- fsr |= (1 << 11);
- }
- exc = EXCP_DATA_ABORT;
- }
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
- env->exception.vaddress = addr;
- env->exception.fsr = fsr;
- raise_exception(env, exc, syn, target_el);
+ deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
}
int mmu_idx, uintptr_t retaddr)
{
ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- int target_el;
- bool same_el;
- uint32_t syn;
-
- if (retaddr) {
- /* now we have a real cpu fault */
- cpu_restore_state(cs, retaddr);
- }
+ ARMMMUFaultInfo fi = {};
- target_el = exception_target_el(env);
- same_el = (arm_current_el(env) == target_el);
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
- env->exception.vaddress = vaddr;
+ fi.type = ARMFault_Alignment;
+ deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
+}
- /* the DFSR for an alignment fault depends on whether we're using
- * the LPAE long descriptor format, or the short descriptor format
- */
- if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
- env->exception.fsr = (1 << 9) | 0x21;
- } else {
- env->exception.fsr = 0x1;
- }
+/* arm_cpu_do_transaction_failed: handle a memory system error response
+ * (eg "no device/memory present at address") by raising an external abort
+ * exception
+ */
+void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
+ vaddr addr, unsigned size,
+ MMUAccessType access_type,
+ int mmu_idx, MemTxAttrs attrs,
+ MemTxResult response, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ ARMMMUFaultInfo fi = {};
- if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
- env->exception.fsr |= (1 << 11);
- }
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr, true);
- syn = merge_syn_data_abort(env->exception.syndrome, target_el,
- same_el, 0, access_type == MMU_DATA_STORE,
- 0x21);
- raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
+ fi.ea = arm_extabort_type(response);
+ fi.type = ARMFault_SyncExternal;
+ deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
}
#endif /* !defined(CONFIG_USER_ONLY) */
+void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
+{
+ /*
+ * Perform the v8M stack limit check for SP updates from translated code,
+ * raising an exception if the limit is breached.
+ */
+ if (newvalue < v7m_sp_limit(env)) {
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
+ /*
+ * Stack limit exceptions are a rare case, so rather than syncing
+ * PC/condbits before the call, we use cpu_restore_state() to
+ * get them right before raising the exception.
+ */
+ cpu_restore_state(cs, GETPC(), true);
+ raise_exception(env, EXCP_STKOF, 0, 1);
+ }
+}
+
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
int cur_el = arm_current_el(env);
uint64_t mask;
+ if (arm_feature(env, ARM_FEATURE_M)) {
+ /* M profile cores can never trap WFI/WFE. */
+ return 0;
+ }
+
/* If we are currently in EL0 then we need to check if SCTLR is set up for
* WFx instructions being trapped to EL1. These trap bits don't exist in v7.
*/
return 0;
}
-void HELPER(wfi)(CPUARMState *env)
+void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
{
CPUState *cs = CPU(arm_env_get_cpu(env));
int target_el = check_wfx_trap(env, false);
}
if (target_el) {
- env->pc -= 4;
- raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
+ env->pc -= insn_len;
+ raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
+ target_el);
}
cs->exception_index = EXCP_HLT;
raise_exception(env, excp, syndrome, target_el);
}
+/* Raise an EXCP_BKPT with the specified syndrome register value,
+ * targeting the correct exception level for debug exceptions.
+ */
+void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
+{
+ /* FSR will only be used if the debug target EL is AArch32. */
+ env->exception.fsr = arm_debug_exception_fsr(env);
+ /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
+ * values to the guest that it shouldn't be able to see at its
+ * exception/security level.
+ */
+ env->exception.vaddress = 0;
+ raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
+}
+
uint32_t HELPER(cpsr_read)(CPUARMState *env)
{
return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
/* Write the CPSR for a 32-bit exception return */
void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
{
+ qemu_mutex_lock_iothread();
+ arm_call_pre_el_change_hook(arm_env_get_cpu(env));
+ qemu_mutex_unlock_iothread();
+
cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
/* Generated code has already stored the new PC value, but
*/
int curmode = env->uncached_cpsr & CPSR_M;
+ if (regno == 17) {
+ /* ELR_Hyp: a special case because access from tgtmode is OK */
+ if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
+ goto undef;
+ }
+ return;
+ }
+
if (curmode == tgtmode) {
goto undef;
}
}
if (tgtmode == ARM_CPU_MODE_HYP) {
- switch (regno) {
- case 17: /* ELR_Hyp */
- if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
- goto undef;
- }
- break;
- default:
- if (curmode != ARM_CPU_MODE_MON) {
- goto undef;
- }
- break;
+ /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
+ if (curmode != ARM_CPU_MODE_MON) {
+ goto undef;
}
}
*/
bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
- if (arm_is_psci_call(cpu, EXCP_SMC)) {
- /* If PSCI is enabled and this looks like a valid PSCI call then
- * that overrides the architecturally mandated SMC behaviour.
+ if (!arm_feature(env, ARM_FEATURE_EL3) &&
+ cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
+ /* If we have no EL3 then SMC always UNDEFs and can't be
+ * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
+ * firmware within QEMU, and we want an EL2 guest to be able
+ * to forbid its EL1 from making PSCI calls into QEMU's
+ * "firmware" via HCR.TSC, so for these purposes treat
+ * PSCI-via-SMC as implying an EL3.
*/
- return;
- }
-
- if (!arm_feature(env, ARM_FEATURE_EL3)) {
- /* If we have no EL3 then SMC always UNDEFs */
undef = true;
} else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
- /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
+ /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
+ * We also want an EL2 guest to be able to forbid its EL1 from
+ * making PSCI calls into QEMU's "firmware" via HCR.TSC.
+ */
raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
}
- if (undef) {
+ /* If PSCI is enabled and this looks like a valid PSCI call then
+ * suppress the UNDEF -- we'll catch the SMC exception and
+ * implement the PSCI call behaviour there.
+ */
+ if (undef && !arm_is_psci_call(cpu, EXCP_SMC)) {
raise_exception(env, EXCP_UDEF, syn_uncategorized(),
exception_target_el(env));
}
aarch64_save_sp(env, cur_el);
- env->exclusive_addr = -1;
+ arm_clear_exclusive(env);
/* We must squash the PSTATE.SS bit to zero unless both of the
* following hold:
goto illegal_return;
}
+ qemu_mutex_lock_iothread();
+ arm_call_pre_el_change_hook(arm_env_get_cpu(env));
+ qemu_mutex_unlock_iothread();
+
if (!return_to_aa64) {
env->aarch64 = 0;
/* We do a raw CPSR write because aarch64_sync_64_to_32()
"AArch64 EL%d PC 0x%" PRIx64 "\n",
cur_el, new_el, env->pc);
}
+ aarch64_sve_change_el(env, cur_el, new_el);
qemu_mutex_lock_iothread();
arm_call_el_change_hook(arm_env_get_cpu(env));
cs->watchpoint_hit = NULL;
- if (extended_addresses_enabled(env)) {
- env->exception.fsr = (1 << 9) | 0x22;
- } else {
- env->exception.fsr = 0x2;
- }
+ env->exception.fsr = arm_debug_exception_fsr(env);
env->exception.vaddress = wp_hit->hitaddr;
raise_exception(env, EXCP_DATA_ABORT,
syn_watchpoint(same_el, 0, wnr),
return;
}
- if (extended_addresses_enabled(env)) {
- env->exception.fsr = (1 << 9) | 0x22;
- } else {
- env->exception.fsr = 0x2;
- }
- /* FAR is UNKNOWN, so doesn't need setting */
+ env->exception.fsr = arm_debug_exception_fsr(env);
+ /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
+ * values to the guest that it shouldn't be able to see at its
+ * exception/security level.
+ */
+ env->exception.vaddress = 0;
raise_exception(env, EXCP_PREFETCH_ABORT,
syn_breakpoint(same_el),
arm_debug_target_el(env));