* In a V8 implementation, it is permitted for privileged software to
* change the CPSR A/F bits regardless of the SCR.AW/FW bits.
*/
- if (!arm_feature(env, ARM_FEATURE_V8) &&
+ if (write_type != CPSRWriteRaw && !arm_feature(env, ARM_FEATURE_V8) &&
arm_feature(env, ARM_FEATURE_EL3) &&
!arm_feature(env, ARM_FEATURE_EL2) &&
!arm_is_secure(env)) {
env->daif &= ~(CPSR_AIF & mask);
env->daif |= val & CPSR_AIF & mask;
- if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
+ if (write_type != CPSRWriteRaw &&
+ ((env->uncached_cpsr ^ val) & mask & CPSR_M)) {
if (bad_mode_switch(env, val & CPSR_M)) {
/* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
* We choose to ignore the attempt and leave the CPSR M field
if (is_a64(env)) {
pstate_write(env, val);
} else {
- env->uncached_cpsr = val & CPSR_M;
cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
}
return 0;
}
- /* Avoid mode switch when restoring CPSR */
- env->uncached_cpsr = val & CPSR_M;
cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
return 0;
}
if (!return_to_aa64) {
env->aarch64 = 0;
- env->uncached_cpsr = spsr & CPSR_M;
+ /* We do a raw CPSR write because aarch64_sync_64_to_32()
+ * will sort the register banks out for us, and we've already
+ * caught all the bad-mode cases in el_from_spsr().
+ */
cpsr_write(env, spsr, ~0, CPSRWriteRaw);
if (!arm_singlestep_active(env)) {
env->uncached_cpsr &= ~PSTATE_SS;