4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "exec/helper-proto.h"
22 #include "internals.h"
23 #include "exec/exec-all.h"
24 #include "exec/cpu_ldst.h"
26 #define SIGNBIT (uint32_t)0x80000000
27 #define SIGNBIT64 ((uint64_t)1 << 63)
29 static void raise_exception(CPUARMState *env, uint32_t excp,
30 uint32_t syndrome, uint32_t target_el)
32 CPUState *cs = CPU(arm_env_get_cpu(env));
34 assert(!excp_is_internal(excp));
35 cs->exception_index = excp;
36 env->exception.syndrome = syndrome;
37 env->exception.target_el = target_el;
41 static int exception_target_el(CPUARMState *env)
43 int target_el = MAX(1, arm_current_el(env));
45 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
46 * to EL3 in this case.
48 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
55 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
56 uint32_t rn, uint32_t maxindex)
63 table = (uint64_t *)&env->vfp.regs[rn];
65 for (shift = 0; shift < 32; shift += 8) {
66 index = (ireg >> shift) & 0xff;
67 if (index < maxindex) {
68 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
71 val |= def & (0xff << shift);
77 #if !defined(CONFIG_USER_ONLY)
79 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
80 unsigned int target_el,
82 bool s1ptw, bool is_write,
87 /* ISV is only set for data aborts routed to EL2 and
88 * never for stage-1 page table walks faulting on stage 2.
90 * Furthermore, ISV is only set for certain kinds of load/stores.
91 * If the template syndrome does not have ISV set, we should leave
94 * See ARMv8 specs, D7-1974:
95 * ISS encoding for an exception from a Data Abort, the
98 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
99 syn = syn_data_abort_no_iss(same_el,
100 0, 0, s1ptw, is_write, fsc);
102 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
103 * syndrome created at translation time.
104 * Now we create the runtime syndrome with the remaining fields.
106 syn = syn_data_abort_with_iss(same_el,
108 0, 0, s1ptw, is_write, fsc,
110 /* Merge the runtime syndrome with the template syndrome. */
116 /* try to fill the TLB and return an exception if error. If retaddr is
117 * NULL, it means that the function was called in C code (i.e. not
118 * from generated code or from helper.c)
120 void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type,
121 int mmu_idx, uintptr_t retaddr)
125 ARMMMUFaultInfo fi = {};
127 ret = arm_tlb_fill(cs, addr, access_type, mmu_idx, &fsr, &fi);
129 ARMCPU *cpu = ARM_CPU(cs);
130 CPUARMState *env = &cpu->env;
132 unsigned int target_el;
136 /* now we have a real cpu fault */
137 cpu_restore_state(cs, retaddr);
140 target_el = exception_target_el(env);
143 env->cp15.hpfar_el2 = extract64(fi.s2addr, 12, 47) << 4;
145 same_el = arm_current_el(env) == target_el;
146 /* AArch64 syndrome does not have an LPAE bit */
147 syn = fsr & ~(1 << 9);
149 /* For insn and data aborts we assume there is no instruction syndrome
150 * information; this is always true for exceptions reported to EL1.
152 if (access_type == MMU_INST_FETCH) {
153 syn = syn_insn_abort(same_el, 0, fi.s1ptw, syn);
154 exc = EXCP_PREFETCH_ABORT;
156 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
158 access_type == MMU_DATA_STORE, syn);
159 if (access_type == MMU_DATA_STORE
160 && arm_feature(env, ARM_FEATURE_V6)) {
163 exc = EXCP_DATA_ABORT;
166 env->exception.vaddress = addr;
167 env->exception.fsr = fsr;
168 raise_exception(env, exc, syn, target_el);
172 /* Raise a data fault alignment exception for the specified virtual address */
173 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
174 MMUAccessType access_type,
175 int mmu_idx, uintptr_t retaddr)
177 ARMCPU *cpu = ARM_CPU(cs);
178 CPUARMState *env = &cpu->env;
184 /* now we have a real cpu fault */
185 cpu_restore_state(cs, retaddr);
188 target_el = exception_target_el(env);
189 same_el = (arm_current_el(env) == target_el);
191 env->exception.vaddress = vaddr;
193 /* the DFSR for an alignment fault depends on whether we're using
194 * the LPAE long descriptor format, or the short descriptor format
196 if (arm_s1_regime_using_lpae_format(env, cpu_mmu_index(env, false))) {
197 env->exception.fsr = 0x21;
199 env->exception.fsr = 0x1;
202 if (access_type == MMU_DATA_STORE && arm_feature(env, ARM_FEATURE_V6)) {
203 env->exception.fsr |= (1 << 11);
206 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
207 same_el, 0, access_type == MMU_DATA_STORE,
209 raise_exception(env, EXCP_DATA_ABORT, syn, target_el);
212 #endif /* !defined(CONFIG_USER_ONLY) */
214 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
216 uint32_t res = a + b;
217 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
222 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
224 uint32_t res = a + b;
225 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
227 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
232 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
234 uint32_t res = a - b;
235 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
237 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
242 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
245 if (val >= 0x40000000) {
248 } else if (val <= (int32_t)0xc0000000) {
257 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
259 uint32_t res = a + b;
267 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
269 uint32_t res = a - b;
277 /* Signed saturation. */
278 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
284 mask = (1u << shift) - 1;
288 } else if (top < -1) {
295 /* Unsigned saturation. */
296 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
300 max = (1u << shift) - 1;
304 } else if (val > max) {
311 /* Signed saturate. */
312 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
314 return do_ssat(env, x, shift);
317 /* Dual halfword signed saturate. */
318 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
322 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
323 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
327 /* Unsigned saturate. */
328 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
330 return do_usat(env, x, shift);
333 /* Dual halfword unsigned saturate. */
334 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
338 res = (uint16_t)do_usat(env, (int16_t)x, shift);
339 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
343 void HELPER(setend)(CPUARMState *env)
345 env->uncached_cpsr ^= CPSR_E;
348 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
349 * The function returns the target EL (1-3) if the instruction is to be trapped;
350 * otherwise it returns 0 indicating it is not trapped.
352 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
354 int cur_el = arm_current_el(env);
357 /* If we are currently in EL0 then we need to check if SCTLR is set up for
358 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
360 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
363 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
364 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
365 /* Secure EL0 and Secure PL1 is at EL3 */
371 if (!(env->cp15.sctlr_el[target_el] & mask)) {
376 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
377 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
378 * bits will be zero indicating no trap.
380 if (cur_el < 2 && !arm_is_secure(env)) {
381 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
382 if (env->cp15.hcr_el2 & mask) {
387 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
389 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
390 if (env->cp15.scr_el3 & mask) {
398 void HELPER(wfi)(CPUARMState *env)
400 CPUState *cs = CPU(arm_env_get_cpu(env));
401 int target_el = check_wfx_trap(env, false);
403 if (cpu_has_work(cs)) {
404 /* Don't bother to go into our "low power state" if
405 * we would just wake up immediately.
412 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
415 cs->exception_index = EXCP_HLT;
420 void HELPER(wfe)(CPUARMState *env)
422 /* This is a hint instruction that is semantically different
423 * from YIELD even though we currently implement it identically.
424 * Don't actually halt the CPU, just yield back to top
425 * level loop. This is not going into a "low power state"
426 * (ie halting until some event occurs), so we never take
427 * a configurable trap to a different exception level.
432 void HELPER(yield)(CPUARMState *env)
434 ARMCPU *cpu = arm_env_get_cpu(env);
435 CPUState *cs = CPU(cpu);
437 /* This is a non-trappable hint instruction that generally indicates
438 * that the guest is currently busy-looping. Yield control back to the
439 * top level loop so that a more deserving VCPU has a chance to run.
441 cs->exception_index = EXCP_YIELD;
445 /* Raise an internal-to-QEMU exception. This is limited to only
446 * those EXCP values which are special cases for QEMU to interrupt
447 * execution and not to be used for exceptions which are passed to
448 * the guest (those must all have syndrome information and thus should
449 * use exception_with_syndrome).
451 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
453 CPUState *cs = CPU(arm_env_get_cpu(env));
455 assert(excp_is_internal(excp));
456 cs->exception_index = excp;
460 /* Raise an exception with the specified syndrome register value */
461 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
462 uint32_t syndrome, uint32_t target_el)
464 raise_exception(env, excp, syndrome, target_el);
467 uint32_t HELPER(cpsr_read)(CPUARMState *env)
469 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
472 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
474 cpsr_write(env, val, mask, CPSRWriteByInstr);
477 /* Write the CPSR for a 32-bit exception return */
478 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
480 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
482 arm_call_el_change_hook(arm_env_get_cpu(env));
485 /* Access to user mode registers from privileged modes. */
486 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
491 val = env->banked_r13[BANK_USRSYS];
492 } else if (regno == 14) {
493 val = env->banked_r14[BANK_USRSYS];
494 } else if (regno >= 8
495 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
496 val = env->usr_regs[regno - 8];
498 val = env->regs[regno];
503 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
506 env->banked_r13[BANK_USRSYS] = val;
507 } else if (regno == 14) {
508 env->banked_r14[BANK_USRSYS] = val;
509 } else if (regno >= 8
510 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
511 env->usr_regs[regno - 8] = val;
513 env->regs[regno] = val;
517 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
519 if ((env->uncached_cpsr & CPSR_M) == mode) {
522 env->banked_r13[bank_number(mode)] = val;
526 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
528 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
529 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
530 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
532 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
533 exception_target_el(env));
536 if ((env->uncached_cpsr & CPSR_M) == mode) {
537 return env->regs[13];
539 return env->banked_r13[bank_number(mode)];
543 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
546 /* Raise an exception if the requested access is one of the UNPREDICTABLE
547 * cases; otherwise return. This broadly corresponds to the pseudocode
548 * BankedRegisterAccessValid() and SPSRAccessValid(),
549 * except that we have already handled some cases at translate time.
551 int curmode = env->uncached_cpsr & CPSR_M;
553 if (curmode == tgtmode) {
557 if (tgtmode == ARM_CPU_MODE_USR) {
560 if (curmode != ARM_CPU_MODE_FIQ) {
565 if (curmode == ARM_CPU_MODE_SYS) {
570 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
579 if (tgtmode == ARM_CPU_MODE_HYP) {
581 case 17: /* ELR_Hyp */
582 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
587 if (curmode != ARM_CPU_MODE_MON) {
597 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
598 exception_target_el(env));
601 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
604 msr_mrs_banked_exc_checks(env, tgtmode, regno);
608 env->banked_spsr[bank_number(tgtmode)] = value;
610 case 17: /* ELR_Hyp */
611 env->elr_el[2] = value;
614 env->banked_r13[bank_number(tgtmode)] = value;
617 env->banked_r14[bank_number(tgtmode)] = value;
621 case ARM_CPU_MODE_USR:
622 env->usr_regs[regno - 8] = value;
624 case ARM_CPU_MODE_FIQ:
625 env->fiq_regs[regno - 8] = value;
628 g_assert_not_reached();
632 g_assert_not_reached();
636 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
638 msr_mrs_banked_exc_checks(env, tgtmode, regno);
642 return env->banked_spsr[bank_number(tgtmode)];
643 case 17: /* ELR_Hyp */
644 return env->elr_el[2];
646 return env->banked_r13[bank_number(tgtmode)];
648 return env->banked_r14[bank_number(tgtmode)];
651 case ARM_CPU_MODE_USR:
652 return env->usr_regs[regno - 8];
653 case ARM_CPU_MODE_FIQ:
654 return env->fiq_regs[regno - 8];
656 g_assert_not_reached();
659 g_assert_not_reached();
663 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
666 const ARMCPRegInfo *ri = rip;
669 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
670 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
671 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
678 switch (ri->accessfn(env, ri, isread)) {
682 target_el = exception_target_el(env);
684 case CP_ACCESS_TRAP_EL2:
685 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
686 * a bug in the access function.
688 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
691 case CP_ACCESS_TRAP_EL3:
694 case CP_ACCESS_TRAP_UNCATEGORIZED:
695 target_el = exception_target_el(env);
696 syndrome = syn_uncategorized();
698 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
700 syndrome = syn_uncategorized();
702 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
704 syndrome = syn_uncategorized();
706 case CP_ACCESS_TRAP_FP_EL2:
708 /* Since we are an implementation that takes exceptions on a trapped
709 * conditional insn only if the insn has passed its condition code
710 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
711 * (which is also the required value for AArch64 traps).
713 syndrome = syn_fp_access_trap(1, 0xe, false);
715 case CP_ACCESS_TRAP_FP_EL3:
717 syndrome = syn_fp_access_trap(1, 0xe, false);
720 g_assert_not_reached();
723 raise_exception(env, EXCP_UDEF, syndrome, target_el);
726 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
728 const ARMCPRegInfo *ri = rip;
730 ri->writefn(env, ri, value);
733 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
735 const ARMCPRegInfo *ri = rip;
737 return ri->readfn(env, ri);
740 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
742 const ARMCPRegInfo *ri = rip;
744 ri->writefn(env, ri, value);
747 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
749 const ARMCPRegInfo *ri = rip;
751 return ri->readfn(env, ri);
754 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
756 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
757 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
758 * to catch that case at translate time.
760 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
761 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
762 extract32(op, 3, 3), 4,
764 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
768 case 0x05: /* SPSel */
769 update_spsel(env, imm);
771 case 0x1e: /* DAIFSet */
772 env->daif |= (imm << 6) & PSTATE_DAIF;
774 case 0x1f: /* DAIFClear */
775 env->daif &= ~((imm << 6) & PSTATE_DAIF);
778 g_assert_not_reached();
782 void HELPER(clear_pstate_ss)(CPUARMState *env)
784 env->pstate &= ~PSTATE_SS;
787 void HELPER(pre_hvc)(CPUARMState *env)
789 ARMCPU *cpu = arm_env_get_cpu(env);
790 int cur_el = arm_current_el(env);
791 /* FIXME: Use actual secure state. */
795 if (arm_is_psci_call(cpu, EXCP_HVC)) {
796 /* If PSCI is enabled and this looks like a valid PSCI call then
797 * that overrides the architecturally mandated HVC behaviour.
802 if (!arm_feature(env, ARM_FEATURE_EL2)) {
803 /* If EL2 doesn't exist, HVC always UNDEFs */
805 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
806 /* EL3.HCE has priority over EL2.HCD. */
807 undef = !(env->cp15.scr_el3 & SCR_HCE);
809 undef = env->cp15.hcr_el2 & HCR_HCD;
812 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
813 * For ARMv8/AArch64, HVC is allowed in EL3.
814 * Note that we've already trapped HVC from EL0 at translation
817 if (secure && (!is_a64(env) || cur_el == 1)) {
822 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
823 exception_target_el(env));
827 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
829 ARMCPU *cpu = arm_env_get_cpu(env);
830 int cur_el = arm_current_el(env);
831 bool secure = arm_is_secure(env);
832 bool smd = env->cp15.scr_el3 & SCR_SMD;
833 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
834 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
835 * extensions, SMD only applies to NS state.
836 * On ARMv7 without the Virtualization extensions, the SMD bit
837 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
838 * so we need not special case this here.
840 bool undef = arm_feature(env, ARM_FEATURE_AARCH64) ? smd : smd && !secure;
842 if (arm_is_psci_call(cpu, EXCP_SMC)) {
843 /* If PSCI is enabled and this looks like a valid PSCI call then
844 * that overrides the architecturally mandated SMC behaviour.
849 if (!arm_feature(env, ARM_FEATURE_EL3)) {
850 /* If we have no EL3 then SMC always UNDEFs */
852 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
853 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
854 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
858 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
859 exception_target_el(env));
863 static int el_from_spsr(uint32_t spsr)
865 /* Return the exception level that this SPSR is requesting a return to,
866 * or -1 if it is invalid (an illegal return)
868 if (spsr & PSTATE_nRW) {
869 switch (spsr & CPSR_M) {
870 case ARM_CPU_MODE_USR:
872 case ARM_CPU_MODE_HYP:
874 case ARM_CPU_MODE_FIQ:
875 case ARM_CPU_MODE_IRQ:
876 case ARM_CPU_MODE_SVC:
877 case ARM_CPU_MODE_ABT:
878 case ARM_CPU_MODE_UND:
879 case ARM_CPU_MODE_SYS:
881 case ARM_CPU_MODE_MON:
882 /* Returning to Mon from AArch64 is never possible,
883 * so this is an illegal return.
889 if (extract32(spsr, 1, 1)) {
890 /* Return with reserved M[1] bit set */
893 if (extract32(spsr, 0, 4) == 1) {
894 /* return to EL0 with M[0] bit set */
897 return extract32(spsr, 2, 2);
901 void HELPER(exception_return)(CPUARMState *env)
903 int cur_el = arm_current_el(env);
904 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
905 uint32_t spsr = env->banked_spsr[spsr_idx];
907 bool return_to_aa64 = (spsr & PSTATE_nRW) == 0;
909 aarch64_save_sp(env, cur_el);
911 env->exclusive_addr = -1;
913 /* We must squash the PSTATE.SS bit to zero unless both of the
915 * 1. debug exceptions are currently disabled
916 * 2. singlestep will be active in the EL we return to
917 * We check 1 here and 2 after we've done the pstate/cpsr write() to
918 * transition to the EL we're going to.
920 if (arm_generate_debug_exceptions(env)) {
924 new_el = el_from_spsr(spsr);
929 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
930 /* Disallow return to an EL which is unimplemented or higher
931 * than the current one.
936 if (new_el != 0 && arm_el_is_aa64(env, new_el) != return_to_aa64) {
937 /* Return to an EL which is configured for a different register width */
941 if (new_el == 2 && arm_is_secure_below_el3(env)) {
942 /* Return to the non-existent secure-EL2 */
946 if (new_el == 1 && (env->cp15.hcr_el2 & HCR_TGE)
947 && !arm_is_secure_below_el3(env)) {
951 if (!return_to_aa64) {
953 /* We do a raw CPSR write because aarch64_sync_64_to_32()
954 * will sort the register banks out for us, and we've already
955 * caught all the bad-mode cases in el_from_spsr().
957 cpsr_write(env, spsr, ~0, CPSRWriteRaw);
958 if (!arm_singlestep_active(env)) {
959 env->uncached_cpsr &= ~PSTATE_SS;
961 aarch64_sync_64_to_32(env);
964 env->regs[15] = env->elr_el[cur_el] & ~0x1;
966 env->regs[15] = env->elr_el[cur_el] & ~0x3;
970 pstate_write(env, spsr);
971 if (!arm_singlestep_active(env)) {
972 env->pstate &= ~PSTATE_SS;
974 aarch64_restore_sp(env, new_el);
975 env->pc = env->elr_el[cur_el];
978 arm_call_el_change_hook(arm_env_get_cpu(env));
983 /* Illegal return events of various kinds have architecturally
984 * mandated behaviour:
985 * restore NZCV and DAIF from SPSR_ELx
987 * restore PC from ELR_ELx
988 * no change to exception level, execution state or stack pointer
990 env->pstate |= PSTATE_IL;
991 env->pc = env->elr_el[cur_el];
992 spsr &= PSTATE_NZCV | PSTATE_DAIF;
993 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
994 pstate_write(env, spsr);
995 if (!arm_singlestep_active(env)) {
996 env->pstate &= ~PSTATE_SS;
1000 /* Return true if the linked breakpoint entry lbn passes its checks */
1001 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
1003 CPUARMState *env = &cpu->env;
1004 uint64_t bcr = env->cp15.dbgbcr[lbn];
1005 int brps = extract32(cpu->dbgdidr, 24, 4);
1006 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
1008 uint32_t contextidr;
1010 /* Links to unimplemented or non-context aware breakpoints are
1011 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
1012 * as if linked to an UNKNOWN context-aware breakpoint (in which
1013 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
1014 * We choose the former.
1016 if (lbn > brps || lbn < (brps - ctx_cmps)) {
1020 bcr = env->cp15.dbgbcr[lbn];
1022 if (extract64(bcr, 0, 1) == 0) {
1023 /* Linked breakpoint disabled : generate no events */
1027 bt = extract64(bcr, 20, 4);
1029 /* We match the whole register even if this is AArch32 using the
1030 * short descriptor format (in which case it holds both PROCID and ASID),
1031 * since we don't implement the optional v7 context ID masking.
1033 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1036 case 3: /* linked context ID match */
1037 if (arm_current_el(env) > 1) {
1038 /* Context matches never fire in EL2 or (AArch64) EL3 */
1041 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1042 case 5: /* linked address mismatch (reserved in AArch64) */
1043 case 9: /* linked VMID match (reserved if no EL2) */
1044 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1046 /* Links to Unlinked context breakpoints must generate no
1047 * events; we choose to do the same for reserved values too.
1055 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1057 CPUARMState *env = &cpu->env;
1059 int pac, hmc, ssc, wt, lbn;
1060 /* Note that for watchpoints the check is against the CPU security
1061 * state, not the S/NS attribute on the offending data access.
1063 bool is_secure = arm_is_secure(env);
1064 int access_el = arm_current_el(env);
1067 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1069 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1072 cr = env->cp15.dbgwcr[n];
1073 if (wp->hitattrs.user) {
1074 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1075 * match watchpoints as if they were accesses done at EL0, even if
1076 * the CPU is at EL1 or higher.
1081 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1083 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1086 cr = env->cp15.dbgbcr[n];
1088 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1089 * enabled and that the address and access type match; for breakpoints
1090 * we know the address matched; check the remaining fields, including
1091 * linked breakpoints. We rely on WCR and BCR having the same layout
1092 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1093 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1094 * must act either like some valid combination or as if the watchpoint
1095 * were disabled. We choose the former, and use this together with
1096 * the fact that EL3 must always be Secure and EL2 must always be
1097 * Non-Secure to simplify the code slightly compared to the full
1098 * table in the ARM ARM.
1100 pac = extract64(cr, 1, 2);
1101 hmc = extract64(cr, 13, 1);
1102 ssc = extract64(cr, 14, 2);
1120 switch (access_el) {
1128 if (extract32(pac, 0, 1) == 0) {
1133 if (extract32(pac, 1, 1) == 0) {
1138 g_assert_not_reached();
1141 wt = extract64(cr, 20, 1);
1142 lbn = extract64(cr, 16, 4);
1144 if (wt && !linked_bp_matches(cpu, lbn)) {
1151 static bool check_watchpoints(ARMCPU *cpu)
1153 CPUARMState *env = &cpu->env;
1156 /* If watchpoints are disabled globally or we can't take debug
1157 * exceptions here then watchpoint firings are ignored.
1159 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1160 || !arm_generate_debug_exceptions(env)) {
1164 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1165 if (bp_wp_matches(cpu, n, true)) {
1172 static bool check_breakpoints(ARMCPU *cpu)
1174 CPUARMState *env = &cpu->env;
1177 /* If breakpoints are disabled globally or we can't take debug
1178 * exceptions here then breakpoint firings are ignored.
1180 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1181 || !arm_generate_debug_exceptions(env)) {
1185 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1186 if (bp_wp_matches(cpu, n, false)) {
1193 void HELPER(check_breakpoints)(CPUARMState *env)
1195 ARMCPU *cpu = arm_env_get_cpu(env);
1197 if (check_breakpoints(cpu)) {
1198 HELPER(exception_internal(env, EXCP_DEBUG));
1202 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1204 /* Called by core code when a CPU watchpoint fires; need to check if this
1205 * is also an architectural watchpoint match.
1207 ARMCPU *cpu = ARM_CPU(cs);
1209 return check_watchpoints(cpu);
1212 void arm_debug_excp_handler(CPUState *cs)
1214 /* Called by core code when a watchpoint or breakpoint fires;
1215 * need to check which one and raise the appropriate exception.
1217 ARMCPU *cpu = ARM_CPU(cs);
1218 CPUARMState *env = &cpu->env;
1219 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1222 if (wp_hit->flags & BP_CPU) {
1223 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1224 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1226 cs->watchpoint_hit = NULL;
1228 if (extended_addresses_enabled(env)) {
1229 env->exception.fsr = (1 << 9) | 0x22;
1231 env->exception.fsr = 0x2;
1233 env->exception.vaddress = wp_hit->hitaddr;
1234 raise_exception(env, EXCP_DATA_ABORT,
1235 syn_watchpoint(same_el, 0, wnr),
1236 arm_debug_target_el(env));
1239 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1240 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1242 /* (1) GDB breakpoints should be handled first.
1243 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1244 * since singlestep is also done by generating a debug internal
1247 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1248 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1252 if (extended_addresses_enabled(env)) {
1253 env->exception.fsr = (1 << 9) | 0x22;
1255 env->exception.fsr = 0x2;
1257 /* FAR is UNKNOWN, so doesn't need setting */
1258 raise_exception(env, EXCP_PREFETCH_ABORT,
1259 syn_breakpoint(same_el),
1260 arm_debug_target_el(env));
1264 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1265 The only way to do that in TCG is a conditional branch, which clobbers
1266 all our temporaries. For now implement these as helper functions. */
1268 /* Similarly for variable shift instructions. */
1270 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1272 int shift = i & 0xff;
1279 } else if (shift != 0) {
1280 env->CF = (x >> (32 - shift)) & 1;
1286 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1288 int shift = i & 0xff;
1291 env->CF = (x >> 31) & 1;
1295 } else if (shift != 0) {
1296 env->CF = (x >> (shift - 1)) & 1;
1302 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1304 int shift = i & 0xff;
1306 env->CF = (x >> 31) & 1;
1307 return (int32_t)x >> 31;
1308 } else if (shift != 0) {
1309 env->CF = (x >> (shift - 1)) & 1;
1310 return (int32_t)x >> shift;
1315 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1319 shift = shift1 & 0x1f;
1322 env->CF = (x >> 31) & 1;
1325 env->CF = (x >> (shift - 1)) & 1;
1326 return ((uint32_t)x >> shift) | (x << (32 - shift));