4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/main-loop.h"
23 #include "exec/helper-proto.h"
24 #include "internals.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #define SIGNBIT (uint32_t)0x80000000
29 #define SIGNBIT64 ((uint64_t)1 << 63)
31 static CPUState *do_raise_exception(CPUARMState *env, uint32_t excp,
32 uint32_t syndrome, uint32_t target_el)
34 CPUState *cs = CPU(arm_env_get_cpu(env));
36 if (target_el == 1 && (arm_hcr_el2_eff(env) & HCR_TGE)) {
38 * Redirect NS EL1 exceptions to NS EL2. These are reported with
39 * their original syndrome register value, with the exception of
40 * SIMD/FP access traps, which are reported as uncategorized
41 * (see DDI0478C.a D1.10.4)
44 if (syn_get_ec(syndrome) == EC_ADVSIMDFPACCESSTRAP) {
45 syndrome = syn_uncategorized();
49 assert(!excp_is_internal(excp));
50 cs->exception_index = excp;
51 env->exception.syndrome = syndrome;
52 env->exception.target_el = target_el;
57 void raise_exception(CPUARMState *env, uint32_t excp,
58 uint32_t syndrome, uint32_t target_el)
60 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
64 void raise_exception_ra(CPUARMState *env, uint32_t excp, uint32_t syndrome,
65 uint32_t target_el, uintptr_t ra)
67 CPUState *cs = do_raise_exception(env, excp, syndrome, target_el);
68 cpu_loop_exit_restore(cs, ra);
71 uint32_t HELPER(neon_tbl)(uint32_t ireg, uint32_t def, void *vn,
78 for (shift = 0; shift < 32; shift += 8) {
79 uint32_t index = (ireg >> shift) & 0xff;
80 if (index < maxindex) {
81 uint32_t tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
84 val |= def & (0xff << shift);
90 #if !defined(CONFIG_USER_ONLY)
92 static inline uint32_t merge_syn_data_abort(uint32_t template_syn,
93 unsigned int target_el,
94 bool same_el, bool ea,
95 bool s1ptw, bool is_write,
100 /* ISV is only set for data aborts routed to EL2 and
101 * never for stage-1 page table walks faulting on stage 2.
103 * Furthermore, ISV is only set for certain kinds of load/stores.
104 * If the template syndrome does not have ISV set, we should leave
107 * See ARMv8 specs, D7-1974:
108 * ISS encoding for an exception from a Data Abort, the
111 if (!(template_syn & ARM_EL_ISV) || target_el != 2 || s1ptw) {
112 syn = syn_data_abort_no_iss(same_el,
113 ea, 0, s1ptw, is_write, fsc);
115 /* Fields: IL, ISV, SAS, SSE, SRT, SF and AR come from the template
116 * syndrome created at translation time.
117 * Now we create the runtime syndrome with the remaining fields.
119 syn = syn_data_abort_with_iss(same_el,
121 ea, 0, s1ptw, is_write, fsc,
123 /* Merge the runtime syndrome with the template syndrome. */
129 void arm_deliver_fault(ARMCPU *cpu, vaddr addr, MMUAccessType access_type,
130 int mmu_idx, ARMMMUFaultInfo *fi)
132 CPUARMState *env = &cpu->env;
135 uint32_t syn, exc, fsr, fsc;
136 ARMMMUIdx arm_mmu_idx = core_to_arm_mmu_idx(env, mmu_idx);
138 target_el = exception_target_el(env);
141 env->cp15.hpfar_el2 = extract64(fi->s2addr, 12, 47) << 4;
143 same_el = (arm_current_el(env) == target_el);
145 if (target_el == 2 || arm_el_is_aa64(env, target_el) ||
146 arm_s1_regime_using_lpae_format(env, arm_mmu_idx)) {
147 /* LPAE format fault status register : bottom 6 bits are
148 * status code in the same form as needed for syndrome
150 fsr = arm_fi_to_lfsc(fi);
151 fsc = extract32(fsr, 0, 6);
153 fsr = arm_fi_to_sfsc(fi);
154 /* Short format FSR : this fault will never actually be reported
155 * to an EL that uses a syndrome register. Use a (currently)
156 * reserved FSR code in case the constructed syndrome does leak
157 * into the guest somehow.
162 if (access_type == MMU_INST_FETCH) {
163 syn = syn_insn_abort(same_el, fi->ea, fi->s1ptw, fsc);
164 exc = EXCP_PREFETCH_ABORT;
166 syn = merge_syn_data_abort(env->exception.syndrome, target_el,
167 same_el, fi->ea, fi->s1ptw,
168 access_type == MMU_DATA_STORE,
170 if (access_type == MMU_DATA_STORE
171 && arm_feature(env, ARM_FEATURE_V6)) {
174 exc = EXCP_DATA_ABORT;
177 env->exception.vaddress = addr;
178 env->exception.fsr = fsr;
179 raise_exception(env, exc, syn, target_el);
182 /* Raise a data fault alignment exception for the specified virtual address */
183 void arm_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr,
184 MMUAccessType access_type,
185 int mmu_idx, uintptr_t retaddr)
187 ARMCPU *cpu = ARM_CPU(cs);
188 ARMMMUFaultInfo fi = {};
190 /* now we have a real cpu fault */
191 cpu_restore_state(cs, retaddr, true);
193 fi.type = ARMFault_Alignment;
194 arm_deliver_fault(cpu, vaddr, access_type, mmu_idx, &fi);
197 /* arm_cpu_do_transaction_failed: handle a memory system error response
198 * (eg "no device/memory present at address") by raising an external abort
201 void arm_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr,
202 vaddr addr, unsigned size,
203 MMUAccessType access_type,
204 int mmu_idx, MemTxAttrs attrs,
205 MemTxResult response, uintptr_t retaddr)
207 ARMCPU *cpu = ARM_CPU(cs);
208 ARMMMUFaultInfo fi = {};
210 /* now we have a real cpu fault */
211 cpu_restore_state(cs, retaddr, true);
213 fi.ea = arm_extabort_type(response);
214 fi.type = ARMFault_SyncExternal;
215 arm_deliver_fault(cpu, addr, access_type, mmu_idx, &fi);
218 #endif /* !defined(CONFIG_USER_ONLY) */
220 void HELPER(v8m_stackcheck)(CPUARMState *env, uint32_t newvalue)
223 * Perform the v8M stack limit check for SP updates from translated code,
224 * raising an exception if the limit is breached.
226 if (newvalue < v7m_sp_limit(env)) {
227 CPUState *cs = CPU(arm_env_get_cpu(env));
230 * Stack limit exceptions are a rare case, so rather than syncing
231 * PC/condbits before the call, we use cpu_restore_state() to
232 * get them right before raising the exception.
234 cpu_restore_state(cs, GETPC(), true);
235 raise_exception(env, EXCP_STKOF, 0, 1);
239 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
241 uint32_t res = a + b;
242 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
247 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
249 uint32_t res = a + b;
250 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
252 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
257 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
259 uint32_t res = a - b;
260 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
262 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
267 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
270 if (val >= 0x40000000) {
273 } else if (val <= (int32_t)0xc0000000) {
282 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
284 uint32_t res = a + b;
292 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
294 uint32_t res = a - b;
302 /* Signed saturation. */
303 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
309 mask = (1u << shift) - 1;
313 } else if (top < -1) {
320 /* Unsigned saturation. */
321 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
325 max = (1u << shift) - 1;
329 } else if (val > max) {
336 /* Signed saturate. */
337 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
339 return do_ssat(env, x, shift);
342 /* Dual halfword signed saturate. */
343 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
347 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
348 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
352 /* Unsigned saturate. */
353 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
355 return do_usat(env, x, shift);
358 /* Dual halfword unsigned saturate. */
359 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
363 res = (uint16_t)do_usat(env, (int16_t)x, shift);
364 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
368 void HELPER(setend)(CPUARMState *env)
370 env->uncached_cpsr ^= CPSR_E;
373 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
374 * The function returns the target EL (1-3) if the instruction is to be trapped;
375 * otherwise it returns 0 indicating it is not trapped.
377 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
379 int cur_el = arm_current_el(env);
382 if (arm_feature(env, ARM_FEATURE_M)) {
383 /* M profile cores can never trap WFI/WFE. */
387 /* If we are currently in EL0 then we need to check if SCTLR is set up for
388 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
390 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
393 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
394 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
395 /* Secure EL0 and Secure PL1 is at EL3 */
401 if (!(env->cp15.sctlr_el[target_el] & mask)) {
406 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
407 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
408 * bits will be zero indicating no trap.
411 mask = is_wfe ? HCR_TWE : HCR_TWI;
412 if (arm_hcr_el2_eff(env) & mask) {
417 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
419 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
420 if (env->cp15.scr_el3 & mask) {
428 void HELPER(wfi)(CPUARMState *env, uint32_t insn_len)
430 CPUState *cs = CPU(arm_env_get_cpu(env));
431 int target_el = check_wfx_trap(env, false);
433 if (cpu_has_work(cs)) {
434 /* Don't bother to go into our "low power state" if
435 * we would just wake up immediately.
442 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0, insn_len == 2),
446 cs->exception_index = EXCP_HLT;
451 void HELPER(wfe)(CPUARMState *env)
453 /* This is a hint instruction that is semantically different
454 * from YIELD even though we currently implement it identically.
455 * Don't actually halt the CPU, just yield back to top
456 * level loop. This is not going into a "low power state"
457 * (ie halting until some event occurs), so we never take
458 * a configurable trap to a different exception level.
463 void HELPER(yield)(CPUARMState *env)
465 ARMCPU *cpu = arm_env_get_cpu(env);
466 CPUState *cs = CPU(cpu);
468 /* This is a non-trappable hint instruction that generally indicates
469 * that the guest is currently busy-looping. Yield control back to the
470 * top level loop so that a more deserving VCPU has a chance to run.
472 cs->exception_index = EXCP_YIELD;
476 /* Raise an internal-to-QEMU exception. This is limited to only
477 * those EXCP values which are special cases for QEMU to interrupt
478 * execution and not to be used for exceptions which are passed to
479 * the guest (those must all have syndrome information and thus should
480 * use exception_with_syndrome).
482 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
484 CPUState *cs = CPU(arm_env_get_cpu(env));
486 assert(excp_is_internal(excp));
487 cs->exception_index = excp;
491 /* Raise an exception with the specified syndrome register value */
492 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
493 uint32_t syndrome, uint32_t target_el)
495 raise_exception(env, excp, syndrome, target_el);
498 /* Raise an EXCP_BKPT with the specified syndrome register value,
499 * targeting the correct exception level for debug exceptions.
501 void HELPER(exception_bkpt_insn)(CPUARMState *env, uint32_t syndrome)
503 /* FSR will only be used if the debug target EL is AArch32. */
504 env->exception.fsr = arm_debug_exception_fsr(env);
505 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
506 * values to the guest that it shouldn't be able to see at its
507 * exception/security level.
509 env->exception.vaddress = 0;
510 raise_exception(env, EXCP_BKPT, syndrome, arm_debug_target_el(env));
513 uint32_t HELPER(cpsr_read)(CPUARMState *env)
515 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
518 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
520 cpsr_write(env, val, mask, CPSRWriteByInstr);
523 /* Write the CPSR for a 32-bit exception return */
524 void HELPER(cpsr_write_eret)(CPUARMState *env, uint32_t val)
526 qemu_mutex_lock_iothread();
527 arm_call_pre_el_change_hook(arm_env_get_cpu(env));
528 qemu_mutex_unlock_iothread();
530 cpsr_write(env, val, CPSR_ERET_MASK, CPSRWriteExceptionReturn);
532 /* Generated code has already stored the new PC value, but
533 * without masking out its low bits, because which bits need
534 * masking depends on whether we're returning to Thumb or ARM
535 * state. Do the masking now.
537 env->regs[15] &= (env->thumb ? ~1 : ~3);
539 qemu_mutex_lock_iothread();
540 arm_call_el_change_hook(arm_env_get_cpu(env));
541 qemu_mutex_unlock_iothread();
544 /* Access to user mode registers from privileged modes. */
545 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
550 val = env->banked_r13[BANK_USRSYS];
551 } else if (regno == 14) {
552 val = env->banked_r14[BANK_USRSYS];
553 } else if (regno >= 8
554 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
555 val = env->usr_regs[regno - 8];
557 val = env->regs[regno];
562 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
565 env->banked_r13[BANK_USRSYS] = val;
566 } else if (regno == 14) {
567 env->banked_r14[BANK_USRSYS] = val;
568 } else if (regno >= 8
569 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
570 env->usr_regs[regno - 8] = val;
572 env->regs[regno] = val;
576 void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
578 if ((env->uncached_cpsr & CPSR_M) == mode) {
581 env->banked_r13[bank_number(mode)] = val;
585 uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
587 if ((env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_SYS) {
588 /* SRS instruction is UNPREDICTABLE from System mode; we UNDEF.
589 * Other UNPREDICTABLE and UNDEF cases were caught at translate time.
591 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
592 exception_target_el(env));
595 if ((env->uncached_cpsr & CPSR_M) == mode) {
596 return env->regs[13];
598 return env->banked_r13[bank_number(mode)];
602 static void msr_mrs_banked_exc_checks(CPUARMState *env, uint32_t tgtmode,
605 /* Raise an exception if the requested access is one of the UNPREDICTABLE
606 * cases; otherwise return. This broadly corresponds to the pseudocode
607 * BankedRegisterAccessValid() and SPSRAccessValid(),
608 * except that we have already handled some cases at translate time.
610 int curmode = env->uncached_cpsr & CPSR_M;
613 /* ELR_Hyp: a special case because access from tgtmode is OK */
614 if (curmode != ARM_CPU_MODE_HYP && curmode != ARM_CPU_MODE_MON) {
620 if (curmode == tgtmode) {
624 if (tgtmode == ARM_CPU_MODE_USR) {
627 if (curmode != ARM_CPU_MODE_FIQ) {
632 if (curmode == ARM_CPU_MODE_SYS) {
637 if (curmode == ARM_CPU_MODE_HYP || curmode == ARM_CPU_MODE_SYS) {
646 if (tgtmode == ARM_CPU_MODE_HYP) {
647 /* SPSR_Hyp, r13_hyp: accessible from Monitor mode only */
648 if (curmode != ARM_CPU_MODE_MON) {
656 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
657 exception_target_el(env));
660 void HELPER(msr_banked)(CPUARMState *env, uint32_t value, uint32_t tgtmode,
663 msr_mrs_banked_exc_checks(env, tgtmode, regno);
667 env->banked_spsr[bank_number(tgtmode)] = value;
669 case 17: /* ELR_Hyp */
670 env->elr_el[2] = value;
673 env->banked_r13[bank_number(tgtmode)] = value;
676 env->banked_r14[r14_bank_number(tgtmode)] = value;
680 case ARM_CPU_MODE_USR:
681 env->usr_regs[regno - 8] = value;
683 case ARM_CPU_MODE_FIQ:
684 env->fiq_regs[regno - 8] = value;
687 g_assert_not_reached();
691 g_assert_not_reached();
695 uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
697 msr_mrs_banked_exc_checks(env, tgtmode, regno);
701 return env->banked_spsr[bank_number(tgtmode)];
702 case 17: /* ELR_Hyp */
703 return env->elr_el[2];
705 return env->banked_r13[bank_number(tgtmode)];
707 return env->banked_r14[r14_bank_number(tgtmode)];
710 case ARM_CPU_MODE_USR:
711 return env->usr_regs[regno - 8];
712 case ARM_CPU_MODE_FIQ:
713 return env->fiq_regs[regno - 8];
715 g_assert_not_reached();
718 g_assert_not_reached();
722 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
725 const ARMCPRegInfo *ri = rip;
728 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
729 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
730 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
737 switch (ri->accessfn(env, ri, isread)) {
741 target_el = exception_target_el(env);
743 case CP_ACCESS_TRAP_EL2:
744 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
745 * a bug in the access function.
747 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
750 case CP_ACCESS_TRAP_EL3:
753 case CP_ACCESS_TRAP_UNCATEGORIZED:
754 target_el = exception_target_el(env);
755 syndrome = syn_uncategorized();
757 case CP_ACCESS_TRAP_UNCATEGORIZED_EL2:
759 syndrome = syn_uncategorized();
761 case CP_ACCESS_TRAP_UNCATEGORIZED_EL3:
763 syndrome = syn_uncategorized();
765 case CP_ACCESS_TRAP_FP_EL2:
767 /* Since we are an implementation that takes exceptions on a trapped
768 * conditional insn only if the insn has passed its condition code
769 * check, we take the IMPDEF choice to always report CV=1 COND=0xe
770 * (which is also the required value for AArch64 traps).
772 syndrome = syn_fp_access_trap(1, 0xe, false);
774 case CP_ACCESS_TRAP_FP_EL3:
776 syndrome = syn_fp_access_trap(1, 0xe, false);
779 g_assert_not_reached();
782 raise_exception(env, EXCP_UDEF, syndrome, target_el);
785 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
787 const ARMCPRegInfo *ri = rip;
789 if (ri->type & ARM_CP_IO) {
790 qemu_mutex_lock_iothread();
791 ri->writefn(env, ri, value);
792 qemu_mutex_unlock_iothread();
794 ri->writefn(env, ri, value);
798 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
800 const ARMCPRegInfo *ri = rip;
803 if (ri->type & ARM_CP_IO) {
804 qemu_mutex_lock_iothread();
805 res = ri->readfn(env, ri);
806 qemu_mutex_unlock_iothread();
808 res = ri->readfn(env, ri);
814 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
816 const ARMCPRegInfo *ri = rip;
818 if (ri->type & ARM_CP_IO) {
819 qemu_mutex_lock_iothread();
820 ri->writefn(env, ri, value);
821 qemu_mutex_unlock_iothread();
823 ri->writefn(env, ri, value);
827 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
829 const ARMCPRegInfo *ri = rip;
832 if (ri->type & ARM_CP_IO) {
833 qemu_mutex_lock_iothread();
834 res = ri->readfn(env, ri);
835 qemu_mutex_unlock_iothread();
837 res = ri->readfn(env, ri);
843 void HELPER(pre_hvc)(CPUARMState *env)
845 ARMCPU *cpu = arm_env_get_cpu(env);
846 int cur_el = arm_current_el(env);
847 /* FIXME: Use actual secure state. */
851 if (arm_is_psci_call(cpu, EXCP_HVC)) {
852 /* If PSCI is enabled and this looks like a valid PSCI call then
853 * that overrides the architecturally mandated HVC behaviour.
858 if (!arm_feature(env, ARM_FEATURE_EL2)) {
859 /* If EL2 doesn't exist, HVC always UNDEFs */
861 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
862 /* EL3.HCE has priority over EL2.HCD. */
863 undef = !(env->cp15.scr_el3 & SCR_HCE);
865 undef = env->cp15.hcr_el2 & HCR_HCD;
868 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
869 * For ARMv8/AArch64, HVC is allowed in EL3.
870 * Note that we've already trapped HVC from EL0 at translation
873 if (secure && (!is_a64(env) || cur_el == 1)) {
878 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
879 exception_target_el(env));
883 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
885 ARMCPU *cpu = arm_env_get_cpu(env);
886 int cur_el = arm_current_el(env);
887 bool secure = arm_is_secure(env);
888 bool smd_flag = env->cp15.scr_el3 & SCR_SMD;
891 * SMC behaviour is summarized in the following table.
892 * This helper handles the "Trap to EL2" and "Undef insn" cases.
893 * The "Trap to EL3" and "PSCI call" cases are handled in the exception
896 * -> ARM_FEATURE_EL3 and !SMD
897 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
899 * Conduit SMC, valid call Trap to EL2 PSCI Call
900 * Conduit SMC, inval call Trap to EL2 Trap to EL3
901 * Conduit not SMC Trap to EL2 Trap to EL3
904 * -> ARM_FEATURE_EL3 and SMD
905 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
907 * Conduit SMC, valid call Trap to EL2 PSCI Call
908 * Conduit SMC, inval call Trap to EL2 Undef insn
909 * Conduit not SMC Trap to EL2 Undef insn
912 * -> !ARM_FEATURE_EL3
913 * HCR_TSC && NS EL1 !HCR_TSC || !NS EL1
915 * Conduit SMC, valid call Trap to EL2 PSCI Call
916 * Conduit SMC, inval call Trap to EL2 Undef insn
917 * Conduit not SMC Undef insn Undef insn
920 /* On ARMv8 with EL3 AArch64, SMD applies to both S and NS state.
921 * On ARMv8 with EL3 AArch32, or ARMv7 with the Virtualization
922 * extensions, SMD only applies to NS state.
923 * On ARMv7 without the Virtualization extensions, the SMD bit
924 * doesn't exist, but we forbid the guest to set it to 1 in scr_write(),
925 * so we need not special case this here.
927 bool smd = arm_feature(env, ARM_FEATURE_AARCH64) ? smd_flag
928 : smd_flag && !secure;
930 if (!arm_feature(env, ARM_FEATURE_EL3) &&
931 cpu->psci_conduit != QEMU_PSCI_CONDUIT_SMC) {
932 /* If we have no EL3 then SMC always UNDEFs and can't be
933 * trapped to EL2. PSCI-via-SMC is a sort of ersatz EL3
934 * firmware within QEMU, and we want an EL2 guest to be able
935 * to forbid its EL1 from making PSCI calls into QEMU's
936 * "firmware" via HCR.TSC, so for these purposes treat
937 * PSCI-via-SMC as implying an EL3.
938 * This handles the very last line of the previous table.
940 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
941 exception_target_el(env));
944 if (cur_el == 1 && (arm_hcr_el2_eff(env) & HCR_TSC)) {
945 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD.
946 * We also want an EL2 guest to be able to forbid its EL1 from
947 * making PSCI calls into QEMU's "firmware" via HCR.TSC.
948 * This handles all the "Trap to EL2" cases of the previous table.
950 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
953 /* Catch the two remaining "Undef insn" cases of the previous table:
954 * - PSCI conduit is SMC but we don't have a valid PCSI call,
955 * - We don't have EL3 or SMD is set.
957 if (!arm_is_psci_call(cpu, EXCP_SMC) &&
958 (smd || !arm_feature(env, ARM_FEATURE_EL3))) {
959 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
960 exception_target_el(env));
964 /* Return true if the linked breakpoint entry lbn passes its checks */
965 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
967 CPUARMState *env = &cpu->env;
968 uint64_t bcr = env->cp15.dbgbcr[lbn];
969 int brps = extract32(cpu->dbgdidr, 24, 4);
970 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
974 /* Links to unimplemented or non-context aware breakpoints are
975 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
976 * as if linked to an UNKNOWN context-aware breakpoint (in which
977 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
978 * We choose the former.
980 if (lbn > brps || lbn < (brps - ctx_cmps)) {
984 bcr = env->cp15.dbgbcr[lbn];
986 if (extract64(bcr, 0, 1) == 0) {
987 /* Linked breakpoint disabled : generate no events */
991 bt = extract64(bcr, 20, 4);
993 /* We match the whole register even if this is AArch32 using the
994 * short descriptor format (in which case it holds both PROCID and ASID),
995 * since we don't implement the optional v7 context ID masking.
997 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
1000 case 3: /* linked context ID match */
1001 if (arm_current_el(env) > 1) {
1002 /* Context matches never fire in EL2 or (AArch64) EL3 */
1005 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
1006 case 5: /* linked address mismatch (reserved in AArch64) */
1007 case 9: /* linked VMID match (reserved if no EL2) */
1008 case 11: /* linked context ID and VMID match (reserved if no EL2) */
1010 /* Links to Unlinked context breakpoints must generate no
1011 * events; we choose to do the same for reserved values too.
1019 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
1021 CPUARMState *env = &cpu->env;
1023 int pac, hmc, ssc, wt, lbn;
1024 /* Note that for watchpoints the check is against the CPU security
1025 * state, not the S/NS attribute on the offending data access.
1027 bool is_secure = arm_is_secure(env);
1028 int access_el = arm_current_el(env);
1031 CPUWatchpoint *wp = env->cpu_watchpoint[n];
1033 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
1036 cr = env->cp15.dbgwcr[n];
1037 if (wp->hitattrs.user) {
1038 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
1039 * match watchpoints as if they were accesses done at EL0, even if
1040 * the CPU is at EL1 or higher.
1045 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1047 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
1050 cr = env->cp15.dbgbcr[n];
1052 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
1053 * enabled and that the address and access type match; for breakpoints
1054 * we know the address matched; check the remaining fields, including
1055 * linked breakpoints. We rely on WCR and BCR having the same layout
1056 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
1057 * Note that some combinations of {PAC, HMC, SSC} are reserved and
1058 * must act either like some valid combination or as if the watchpoint
1059 * were disabled. We choose the former, and use this together with
1060 * the fact that EL3 must always be Secure and EL2 must always be
1061 * Non-Secure to simplify the code slightly compared to the full
1062 * table in the ARM ARM.
1064 pac = extract64(cr, 1, 2);
1065 hmc = extract64(cr, 13, 1);
1066 ssc = extract64(cr, 14, 2);
1084 switch (access_el) {
1092 if (extract32(pac, 0, 1) == 0) {
1097 if (extract32(pac, 1, 1) == 0) {
1102 g_assert_not_reached();
1105 wt = extract64(cr, 20, 1);
1106 lbn = extract64(cr, 16, 4);
1108 if (wt && !linked_bp_matches(cpu, lbn)) {
1115 static bool check_watchpoints(ARMCPU *cpu)
1117 CPUARMState *env = &cpu->env;
1120 /* If watchpoints are disabled globally or we can't take debug
1121 * exceptions here then watchpoint firings are ignored.
1123 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1124 || !arm_generate_debug_exceptions(env)) {
1128 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
1129 if (bp_wp_matches(cpu, n, true)) {
1136 static bool check_breakpoints(ARMCPU *cpu)
1138 CPUARMState *env = &cpu->env;
1141 /* If breakpoints are disabled globally or we can't take debug
1142 * exceptions here then breakpoint firings are ignored.
1144 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
1145 || !arm_generate_debug_exceptions(env)) {
1149 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
1150 if (bp_wp_matches(cpu, n, false)) {
1157 void HELPER(check_breakpoints)(CPUARMState *env)
1159 ARMCPU *cpu = arm_env_get_cpu(env);
1161 if (check_breakpoints(cpu)) {
1162 HELPER(exception_internal(env, EXCP_DEBUG));
1166 bool arm_debug_check_watchpoint(CPUState *cs, CPUWatchpoint *wp)
1168 /* Called by core code when a CPU watchpoint fires; need to check if this
1169 * is also an architectural watchpoint match.
1171 ARMCPU *cpu = ARM_CPU(cs);
1173 return check_watchpoints(cpu);
1176 vaddr arm_adjust_watchpoint_address(CPUState *cs, vaddr addr, int len)
1178 ARMCPU *cpu = ARM_CPU(cs);
1179 CPUARMState *env = &cpu->env;
1181 /* In BE32 system mode, target memory is stored byteswapped (on a
1182 * little-endian host system), and by the time we reach here (via an
1183 * opcode helper) the addresses of subword accesses have been adjusted
1184 * to account for that, which means that watchpoints will not match.
1185 * Undo the adjustment here.
1187 if (arm_sctlr_b(env)) {
1190 } else if (len == 2) {
1198 void arm_debug_excp_handler(CPUState *cs)
1200 /* Called by core code when a watchpoint or breakpoint fires;
1201 * need to check which one and raise the appropriate exception.
1203 ARMCPU *cpu = ARM_CPU(cs);
1204 CPUARMState *env = &cpu->env;
1205 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
1208 if (wp_hit->flags & BP_CPU) {
1209 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
1210 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
1212 cs->watchpoint_hit = NULL;
1214 env->exception.fsr = arm_debug_exception_fsr(env);
1215 env->exception.vaddress = wp_hit->hitaddr;
1216 raise_exception(env, EXCP_DATA_ABORT,
1217 syn_watchpoint(same_el, 0, wnr),
1218 arm_debug_target_el(env));
1221 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
1222 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
1224 /* (1) GDB breakpoints should be handled first.
1225 * (2) Do not raise a CPU exception if no CPU breakpoint has fired,
1226 * since singlestep is also done by generating a debug internal
1229 if (cpu_breakpoint_test(cs, pc, BP_GDB)
1230 || !cpu_breakpoint_test(cs, pc, BP_CPU)) {
1234 env->exception.fsr = arm_debug_exception_fsr(env);
1235 /* FAR is UNKNOWN: clear vaddress to avoid potentially exposing
1236 * values to the guest that it shouldn't be able to see at its
1237 * exception/security level.
1239 env->exception.vaddress = 0;
1240 raise_exception(env, EXCP_PREFETCH_ABORT,
1241 syn_breakpoint(same_el),
1242 arm_debug_target_el(env));
1246 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
1247 The only way to do that in TCG is a conditional branch, which clobbers
1248 all our temporaries. For now implement these as helper functions. */
1250 /* Similarly for variable shift instructions. */
1252 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1254 int shift = i & 0xff;
1261 } else if (shift != 0) {
1262 env->CF = (x >> (32 - shift)) & 1;
1268 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1270 int shift = i & 0xff;
1273 env->CF = (x >> 31) & 1;
1277 } else if (shift != 0) {
1278 env->CF = (x >> (shift - 1)) & 1;
1284 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1286 int shift = i & 0xff;
1288 env->CF = (x >> 31) & 1;
1289 return (int32_t)x >> 31;
1290 } else if (shift != 0) {
1291 env->CF = (x >> (shift - 1)) & 1;
1292 return (int32_t)x >> shift;
1297 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
1301 shift = shift1 & 0x1f;
1304 env->CF = (x >> 31) & 1;
1307 env->CF = (x >> (shift - 1)) & 1;
1308 return ((uint32_t)x >> shift) | (x << (32 - shift));