* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
+#include "qemu/osdep.h"
#include "cpu.h"
#include "exec/helper-proto.h"
#include "internals.h"
raise_exception(env, exc, syn, target_el);
}
}
-#endif
+
+/* Raise a data fault alignment exception for the specified virtual address */
+void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, int is_write,
+ int is_user, uintptr_t retaddr)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ int target_el;
+ bool same_el;
+
+ if (retaddr) {
+ /* now we have a real cpu fault */
+ cpu_restore_state(cs, retaddr);
+ }
+
+ target_el = exception_target_el(env);
+ same_el = (arm_current_el(env) == target_el);
+
+ env->exception.vaddress = vaddr;
+
+ /* the DFSR for an alignment fault depends on whether we're using
+ * the LPAE long descriptor format, or the short descriptor format
+ */
+ if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
+ env->exception.fsr = 0x21;
+ } else {
+ env->exception.fsr = 0x1;
+ }
+
+ if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
+ env->exception.fsr |= (1 << 11);
+ }
+
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_data_abort(same_el, 0, 0, 0, is_write == 1, 0x21),
+ target_el);
+}
+
+#endif /* !defined(CONFIG_USER_ONLY) */
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{
void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
{
- cpsr_write(env, val, mask);
+ cpsr_write(env, val, mask, CPSRWriteByInstr);
+}
+
+/* Write the CPSR for a 32-bit exception return */
+void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
+{
+ cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
}
/* Access to user mode registers from privileged modes. */
}
}
-void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
+void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
+{
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ env->regs[13] = val;
+ } else {
+ env->banked_r13[bank_number(mode)] = val;
+ }
+}
+
+uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
+{
+ if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
+ /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
+ * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
+ */
+ raise_exception(env, EXCP_UDEF, syn_uncategorized(),
+ exception_target_el(env));
+ }
+
+ if ((env->uncached_cpsr & CPSR_M) == mode) {
+ return env->regs[13];
+ } else {
+ return env->banked_r13[bank_number(mode)];
+ }
+}
+
+void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
+ uint32_t isread)
{
const ARMCPRegInfo *ri = rip;
int target_el;
return;
}
- switch (ri->accessfn(env, ri)) {
+ switch (ri->accessfn(env, ri, isread)) {
case CP_ACCESS_OK:
return;
case CP_ACCESS_TRAP:
target_el = 3;
syndrome = syn_uncategorized();
break;
+ case CP_ACCESS_TRAP_FP_EL2:
+ target_el = 2;
+ /* Since we are an implementation that takes exceptions on a trapped
+ * conditional insn only if the insn has passed its condition code
+ * check, we take the IMPDEF choice to always report CV=1 COND=0xe
+ * (which is also the required value for AArch64 traps).
+ */
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
+ case CP_ACCESS_TRAP_FP_EL3:
+ target_el = 3;
+ syndrome = syn_fp_access_trap(1, 0xe, false);
+ break;
default:
g_assert_not_reached();
}
int cur_el = arm_current_el(env);
bool secure = arm_is_secure(env);
bool smd = env->cp15.scr_el3 & SCR_SMD;
- /* On ARMv8 AArch32, SMD only applies to NS state.
- * On ARMv7 SMD only applies to NS state and only if EL2 is available.
- * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
- * the EL2 condition here.
+ /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
+ * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
+ * extensions, SMD only applies to NS state.
+ * On ARMv7 without the Virtualization extensions, the SMD bit
+ * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
+ * so we need not special case this here.
*/
- bool undef = is_a64(env) ? smd : (!secure && smd);
+ bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
if (arm_is_psci_call(cpu, EXCP_SMC)) {
/* If PSCI is enabled and this looks like a valid PSCI call then
}
}
+static int el_from_spsr(uint32_t spsr)
+{
+ /* Return the exception level that this SPSR is requesting a return to,
+ * or -1 if it is invalid (an illegal return)
+ */
+ if (spsr & PSTATE_nRW) {
+ switch (spsr & CPSR_M) {
+ case ARM_CPU_MODE_USR:
+ return 0;
+ case ARM_CPU_MODE_HYP:
+ return 2;
+ case ARM_CPU_MODE_FIQ:
+ case ARM_CPU_MODE_IRQ:
+ case ARM_CPU_MODE_SVC:
+ case ARM_CPU_MODE_ABT:
+ case ARM_CPU_MODE_UND:
+ case ARM_CPU_MODE_SYS:
+ return 1;
+ case ARM_CPU_MODE_MON:
+ /* Returning to Mon from AArch64 is never possible,
+ * so this is an illegal return.
+ */
+ default:
+ return -1;
+ }
+ } else {
+ if (extract32(spsr, 1, 1)) {
+ /* Return with reserved M[1] bit set */
+ return -1;
+ }
+ if (extract32(spsr, 0, 4) == 1) {
+ /* return to EL0 with M[0] bit set */
+ return -1;
+ }
+ return extract32(spsr, 2, 2);
+ }
+}
+
void HELPER(exception_return)(CPUARMState *env)
{
int cur_el = arm_current_el(env);
unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
uint32_t spsr = env->banked_spsr[spsr_idx];
int new_el;
+ bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
aarch64_save_sp(env, cur_el);
spsr &= ~PSTATE_SS;
}
- if (spsr & PSTATE_nRW) {
- /* TODO: We currently assume EL1/2/3 are running in AArch64. */
+ new_el = el_from_spsr(spsr);
+ if (new_el == -1) {
+ goto illegal_return;
+ }
+ if (new_el > cur_el
+ || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
+ /* Disallow return to an EL which is unimplemented or higher
+ * than the current one.
+ */
+ goto illegal_return;
+ }
+
+ if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
+ /* Return to an EL which is configured for a different register width */
+ goto illegal_return;
+ }
+
+ if (new_el == 2 && arm_is_secure_below_el3(env)) {
+ /* Return to the non-existent secure-EL2 */
+ goto illegal_return;
+ }
+
+ if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
+ && !arm_is_secure_below_el3(env)) {
+ goto illegal_return;
+ }
+
+ if (!return_to_aa64) {
env->aarch64 = 0;
- new_el = 0;
- env->uncached_cpsr = 0x10;
- cpsr_write(env, spsr, ~0);
+ /* We do a raw CPSR write because aarch64_sync_64_to_32()
+ * will sort the register banks out for us, and we've already
+ * caught all the bad-mode cases in el_from_spsr().
+ */
+ cpsr_write(env, spsr, ~0, CPSRWriteRaw);
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;
}
aarch64_sync_64_to_32(env);
- env->regs[15] = env->elr_el[1] & ~0x1;
- } else {
- new_el = extract32(spsr, 2, 2);
- if (new_el > cur_el
- || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
- /* Disallow return to an EL which is unimplemented or higher
- * than the current one.
- */
- goto illegal_return;
- }
- if (extract32(spsr, 1, 1)) {
- /* Return with reserved M[1] bit set */
- goto illegal_return;
- }
- if (new_el == 0 && (spsr & PSTATE_SP)) {
- /* Return to EL0 with M[0] bit set */
- goto illegal_return;
+ if (spsr & CPSR_T) {
+ env->regs[15] = env->elr_el[cur_el] & ~0x1;
+ } else {
+ env->regs[15] = env->elr_el[cur_el] & ~0x3;
}
+ } else {
env->aarch64 = 1;
pstate_write(env, spsr);
if (!arm_singlestep_active(env)) {
}
}
+bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
+{
+ /* Called by core code when a CPU watchpoint fires; need to check if this
+ * is also an architectural watchpoint match.
+ */
+ ARMCPU *cpu = ARM_CPU(cs);
+
+ return check_watchpoints(cpu);
+}
+
void arm_debug_excp_handler(CPUState *cs)
{
/* Called by core code when a watchpoint or breakpoint fires;
if (wp_hit) {
if (wp_hit->flags & BP_CPU) {
+ bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
+ bool same_el = arm_debug_target_el(env) == arm_current_el(env);
+
cs->watchpoint_hit = NULL;
- if (check_watchpoints(cpu)) {
- bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
- bool same_el = arm_debug_target_el(env) == arm_current_el(env);
-
- if (extended_addresses_enabled(env)) {
- env->exception.fsr = (1 << 9) | 0x22;
- } else {
- env->exception.fsr = 0x2;
- }
- env->exception.vaddress = wp_hit->hitaddr;
- raise_exception(env, EXCP_DATA_ABORT,
- syn_watchpoint(same_el, 0, wnr),
- arm_debug_target_el(env));
+
+ if (extended_addresses_enabled(env)) {
+ env->exception.fsr = (1 << 9) | 0x22;
} else {
- cpu_resume_from_signal(cs, NULL);
+ env->exception.fsr = 0x2;
}
+ env->exception.vaddress = wp_hit->hitaddr;
+ raise_exception(env, EXCP_DATA_ABORT,
+ syn_watchpoint(same_el, 0, wnr),
+ arm_debug_target_el(env));
}
} else {
uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
- if (cpu_breakpoint_test(cs, pc, BP_GDB)) {
+ /* (1) GDB breakpoints should be handled first.
+ * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
+ * since singlestep is also done by generating a debug internal
+ * exception.
+ */
+ if (cpu_breakpoint_test(cs, pc, BP_GDB)
+ || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
return;
}