4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec/helper-proto.h"
21 #include "internals.h"
22 #include "exec/cpu_ldst.h"
24 #define SIGNBIT (uint32_t)0x80000000
25 #define SIGNBIT64 ((uint64_t)1 << 63)
27 static void raise_exception(CPUARMState *env, int tt)
29 ARMCPU *cpu = arm_env_get_cpu(env);
30 CPUState *cs = CPU(cpu);
32 cs->exception_index = tt;
36 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
37 uint32_t rn, uint32_t maxindex)
44 table = (uint64_t *)&env->vfp.regs[rn];
46 for (shift = 0; shift < 32; shift += 8) {
47 index = (ireg >> shift) & 0xff;
48 if (index < maxindex) {
49 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
52 val |= def & (0xff << shift);
58 #if !defined(CONFIG_USER_ONLY)
60 /* try to fill the TLB and return an exception if error. If retaddr is
61 * NULL, it means that the function was called in C code (i.e. not
62 * from generated code or from helper.c)
64 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
69 ret = arm_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
71 ARMCPU *cpu = ARM_CPU(cs);
72 CPUARMState *env = &cpu->env;
75 /* now we have a real cpu fault */
76 cpu_restore_state(cs, retaddr);
78 raise_exception(env, cs->exception_index);
83 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
86 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
91 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
94 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
96 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
101 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
103 uint32_t res = a - b;
104 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
106 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
111 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
114 if (val >= 0x40000000) {
117 } else if (val <= (int32_t)0xc0000000) {
126 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
128 uint32_t res = a + b;
136 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
138 uint32_t res = a - b;
146 /* Signed saturation. */
147 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
153 mask = (1u << shift) - 1;
157 } else if (top < -1) {
164 /* Unsigned saturation. */
165 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
169 max = (1u << shift) - 1;
173 } else if (val > max) {
180 /* Signed saturate. */
181 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
183 return do_ssat(env, x, shift);
186 /* Dual halfword signed saturate. */
187 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
191 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
192 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
196 /* Unsigned saturate. */
197 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
199 return do_usat(env, x, shift);
202 /* Dual halfword unsigned saturate. */
203 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
207 res = (uint16_t)do_usat(env, (int16_t)x, shift);
208 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
212 void HELPER(wfi)(CPUARMState *env)
214 CPUState *cs = CPU(arm_env_get_cpu(env));
216 cs->exception_index = EXCP_HLT;
221 void HELPER(wfe)(CPUARMState *env)
223 CPUState *cs = CPU(arm_env_get_cpu(env));
225 /* Don't actually halt the CPU, just yield back to top
228 cs->exception_index = EXCP_YIELD;
232 /* Raise an internal-to-QEMU exception. This is limited to only
233 * those EXCP values which are special cases for QEMU to interrupt
234 * execution and not to be used for exceptions which are passed to
235 * the guest (those must all have syndrome information and thus should
236 * use exception_with_syndrome).
238 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
240 CPUState *cs = CPU(arm_env_get_cpu(env));
242 assert(excp_is_internal(excp));
243 cs->exception_index = excp;
247 /* Raise an exception with the specified syndrome register value */
248 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
251 CPUState *cs = CPU(arm_env_get_cpu(env));
253 assert(!excp_is_internal(excp));
254 cs->exception_index = excp;
255 env->exception.syndrome = syndrome;
259 uint32_t HELPER(cpsr_read)(CPUARMState *env)
261 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
264 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
266 cpsr_write(env, val, mask);
269 /* Access to user mode registers from privileged modes. */
270 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
275 val = env->banked_r13[0];
276 } else if (regno == 14) {
277 val = env->banked_r14[0];
278 } else if (regno >= 8
279 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
280 val = env->usr_regs[regno - 8];
282 val = env->regs[regno];
287 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
290 env->banked_r13[0] = val;
291 } else if (regno == 14) {
292 env->banked_r14[0] = val;
293 } else if (regno >= 8
294 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
295 env->usr_regs[regno - 8] = val;
297 env->regs[regno] = val;
301 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
303 const ARMCPRegInfo *ri = rip;
305 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
306 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
307 env->exception.syndrome = syndrome;
308 raise_exception(env, EXCP_UDEF);
315 switch (ri->accessfn(env, ri)) {
319 env->exception.syndrome = syndrome;
321 case CP_ACCESS_TRAP_UNCATEGORIZED:
322 env->exception.syndrome = syn_uncategorized();
325 g_assert_not_reached();
327 raise_exception(env, EXCP_UDEF);
330 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
332 const ARMCPRegInfo *ri = rip;
334 ri->writefn(env, ri, value);
337 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
339 const ARMCPRegInfo *ri = rip;
341 return ri->readfn(env, ri);
344 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
346 const ARMCPRegInfo *ri = rip;
348 ri->writefn(env, ri, value);
351 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
353 const ARMCPRegInfo *ri = rip;
355 return ri->readfn(env, ri);
358 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
360 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
361 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
362 * to catch that case at translate time.
364 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
365 raise_exception(env, EXCP_UDEF);
369 case 0x05: /* SPSel */
370 update_spsel(env, imm);
372 case 0x1e: /* DAIFSet */
373 env->daif |= (imm << 6) & PSTATE_DAIF;
375 case 0x1f: /* DAIFClear */
376 env->daif &= ~((imm << 6) & PSTATE_DAIF);
379 g_assert_not_reached();
383 void HELPER(clear_pstate_ss)(CPUARMState *env)
385 env->pstate &= ~PSTATE_SS;
388 void HELPER(pre_hvc)(CPUARMState *env)
390 ARMCPU *cpu = arm_env_get_cpu(env);
391 int cur_el = arm_current_el(env);
392 /* FIXME: Use actual secure state. */
396 if (arm_is_psci_call(cpu, EXCP_HVC)) {
397 /* If PSCI is enabled and this looks like a valid PSCI call then
398 * that overrides the architecturally mandated HVC behaviour.
403 if (!arm_feature(env, ARM_FEATURE_EL2)) {
404 /* If EL2 doesn't exist, HVC always UNDEFs */
406 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
407 /* EL3.HCE has priority over EL2.HCD. */
408 undef = !(env->cp15.scr_el3 & SCR_HCE);
410 undef = env->cp15.hcr_el2 & HCR_HCD;
413 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
414 * For ARMv8/AArch64, HVC is allowed in EL3.
415 * Note that we've already trapped HVC from EL0 at translation
418 if (secure && (!is_a64(env) || cur_el == 1)) {
423 env->exception.syndrome = syn_uncategorized();
424 raise_exception(env, EXCP_UDEF);
428 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
430 ARMCPU *cpu = arm_env_get_cpu(env);
431 int cur_el = arm_current_el(env);
432 bool secure = arm_is_secure(env);
433 bool smd = env->cp15.scr_el3 & SCR_SMD;
434 /* On ARMv8 AArch32, SMD only applies to NS state.
435 * On ARMv7 SMD only applies to NS state and only if EL2 is available.
436 * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
437 * the EL2 condition here.
439 bool undef = is_a64(env) ? smd : (!secure && smd);
441 if (arm_is_psci_call(cpu, EXCP_SMC)) {
442 /* If PSCI is enabled and this looks like a valid PSCI call then
443 * that overrides the architecturally mandated SMC behaviour.
448 if (!arm_feature(env, ARM_FEATURE_EL3)) {
449 /* If we have no EL3 then SMC always UNDEFs */
451 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
452 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
453 env->exception.syndrome = syndrome;
454 raise_exception(env, EXCP_HYP_TRAP);
458 env->exception.syndrome = syn_uncategorized();
459 raise_exception(env, EXCP_UDEF);
463 void HELPER(exception_return)(CPUARMState *env)
465 int cur_el = arm_current_el(env);
466 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
467 uint32_t spsr = env->banked_spsr[spsr_idx];
470 aarch64_save_sp(env, cur_el);
472 env->exclusive_addr = -1;
474 /* We must squash the PSTATE.SS bit to zero unless both of the
476 * 1. debug exceptions are currently disabled
477 * 2. singlestep will be active in the EL we return to
478 * We check 1 here and 2 after we've done the pstate/cpsr write() to
479 * transition to the EL we're going to.
481 if (arm_generate_debug_exceptions(env)) {
485 if (spsr & PSTATE_nRW) {
486 /* TODO: We currently assume EL1/2/3 are running in AArch64. */
489 env->uncached_cpsr = 0x10;
490 cpsr_write(env, spsr, ~0);
491 if (!arm_singlestep_active(env)) {
492 env->uncached_cpsr &= ~PSTATE_SS;
494 aarch64_sync_64_to_32(env);
496 env->regs[15] = env->elr_el[1] & ~0x1;
498 new_el = extract32(spsr, 2, 2);
500 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
501 /* Disallow return to an EL which is unimplemented or higher
502 * than the current one.
506 if (extract32(spsr, 1, 1)) {
507 /* Return with reserved M[1] bit set */
510 if (new_el == 0 && (spsr & PSTATE_SP)) {
511 /* Return to EL0 with M[0] bit set */
515 pstate_write(env, spsr);
516 if (!arm_singlestep_active(env)) {
517 env->pstate &= ~PSTATE_SS;
519 aarch64_restore_sp(env, new_el);
520 env->pc = env->elr_el[cur_el];
526 /* Illegal return events of various kinds have architecturally
527 * mandated behaviour:
528 * restore NZCV and DAIF from SPSR_ELx
530 * restore PC from ELR_ELx
531 * no change to exception level, execution state or stack pointer
533 env->pstate |= PSTATE_IL;
534 env->pc = env->elr_el[cur_el];
535 spsr &= PSTATE_NZCV | PSTATE_DAIF;
536 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
537 pstate_write(env, spsr);
538 if (!arm_singlestep_active(env)) {
539 env->pstate &= ~PSTATE_SS;
543 /* Return true if the linked breakpoint entry lbn passes its checks */
544 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
546 CPUARMState *env = &cpu->env;
547 uint64_t bcr = env->cp15.dbgbcr[lbn];
548 int brps = extract32(cpu->dbgdidr, 24, 4);
549 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
553 /* Links to unimplemented or non-context aware breakpoints are
554 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
555 * as if linked to an UNKNOWN context-aware breakpoint (in which
556 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
557 * We choose the former.
559 if (lbn > brps || lbn < (brps - ctx_cmps)) {
563 bcr = env->cp15.dbgbcr[lbn];
565 if (extract64(bcr, 0, 1) == 0) {
566 /* Linked breakpoint disabled : generate no events */
570 bt = extract64(bcr, 20, 4);
572 /* We match the whole register even if this is AArch32 using the
573 * short descriptor format (in which case it holds both PROCID and ASID),
574 * since we don't implement the optional v7 context ID masking.
576 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
579 case 3: /* linked context ID match */
580 if (arm_current_el(env) > 1) {
581 /* Context matches never fire in EL2 or (AArch64) EL3 */
584 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
585 case 5: /* linked address mismatch (reserved in AArch64) */
586 case 9: /* linked VMID match (reserved if no EL2) */
587 case 11: /* linked context ID and VMID match (reserved if no EL2) */
589 /* Links to Unlinked context breakpoints must generate no
590 * events; we choose to do the same for reserved values too.
598 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
600 CPUARMState *env = &cpu->env;
602 int pac, hmc, ssc, wt, lbn;
603 /* Note that for watchpoints the check is against the CPU security
604 * state, not the S/NS attribute on the offending data access.
606 bool is_secure = arm_is_secure(env);
607 int access_el = arm_current_el(env);
610 CPUWatchpoint *wp = env->cpu_watchpoint[n];
612 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
615 cr = env->cp15.dbgwcr[n];
616 if (wp->hitattrs.user) {
617 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
618 * match watchpoints as if they were accesses done at EL0, even if
619 * the CPU is at EL1 or higher.
624 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
626 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
629 cr = env->cp15.dbgbcr[n];
631 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
632 * enabled and that the address and access type match; for breakpoints
633 * we know the address matched; check the remaining fields, including
634 * linked breakpoints. We rely on WCR and BCR having the same layout
635 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
636 * Note that some combinations of {PAC, HMC, SSC} are reserved and
637 * must act either like some valid combination or as if the watchpoint
638 * were disabled. We choose the former, and use this together with
639 * the fact that EL3 must always be Secure and EL2 must always be
640 * Non-Secure to simplify the code slightly compared to the full
641 * table in the ARM ARM.
643 pac = extract64(cr, 1, 2);
644 hmc = extract64(cr, 13, 1);
645 ssc = extract64(cr, 14, 2);
671 if (extract32(pac, 0, 1) == 0) {
676 if (extract32(pac, 1, 1) == 0) {
681 g_assert_not_reached();
684 wt = extract64(cr, 20, 1);
685 lbn = extract64(cr, 16, 4);
687 if (wt && !linked_bp_matches(cpu, lbn)) {
694 static bool check_watchpoints(ARMCPU *cpu)
696 CPUARMState *env = &cpu->env;
699 /* If watchpoints are disabled globally or we can't take debug
700 * exceptions here then watchpoint firings are ignored.
702 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
703 || !arm_generate_debug_exceptions(env)) {
707 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
708 if (bp_wp_matches(cpu, n, true)) {
715 static bool check_breakpoints(ARMCPU *cpu)
717 CPUARMState *env = &cpu->env;
720 /* If breakpoints are disabled globally or we can't take debug
721 * exceptions here then breakpoint firings are ignored.
723 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
724 || !arm_generate_debug_exceptions(env)) {
728 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
729 if (bp_wp_matches(cpu, n, false)) {
736 void arm_debug_excp_handler(CPUState *cs)
738 /* Called by core code when a watchpoint or breakpoint fires;
739 * need to check which one and raise the appropriate exception.
741 ARMCPU *cpu = ARM_CPU(cs);
742 CPUARMState *env = &cpu->env;
743 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
746 if (wp_hit->flags & BP_CPU) {
747 cs->watchpoint_hit = NULL;
748 if (check_watchpoints(cpu)) {
749 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
750 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
752 env->exception.syndrome = syn_watchpoint(same_el, 0, wnr);
753 if (extended_addresses_enabled(env)) {
754 env->exception.fsr = (1 << 9) | 0x22;
756 env->exception.fsr = 0x2;
758 env->exception.vaddress = wp_hit->hitaddr;
759 raise_exception(env, EXCP_DATA_ABORT);
761 cpu_resume_from_signal(cs, NULL);
765 if (check_breakpoints(cpu)) {
766 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
767 env->exception.syndrome = syn_breakpoint(same_el);
768 if (extended_addresses_enabled(env)) {
769 env->exception.fsr = (1 << 9) | 0x22;
771 env->exception.fsr = 0x2;
773 /* FAR is UNKNOWN, so doesn't need setting */
774 raise_exception(env, EXCP_PREFETCH_ABORT);
779 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
780 The only way to do that in TCG is a conditional branch, which clobbers
781 all our temporaries. For now implement these as helper functions. */
783 /* Similarly for variable shift instructions. */
785 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
787 int shift = i & 0xff;
794 } else if (shift != 0) {
795 env->CF = (x >> (32 - shift)) & 1;
801 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
803 int shift = i & 0xff;
806 env->CF = (x >> 31) & 1;
810 } else if (shift != 0) {
811 env->CF = (x >> (shift - 1)) & 1;
817 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
819 int shift = i & 0xff;
821 env->CF = (x >> 31) & 1;
822 return (int32_t)x >> 31;
823 } else if (shift != 0) {
824 env->CF = (x >> (shift - 1)) & 1;
825 return (int32_t)x >> shift;
830 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
834 shift = shift1 & 0x1f;
837 env->CF = (x >> 31) & 1;
840 env->CF = (x >> (shift - 1)) & 1;
841 return ((uint32_t)x >> shift) | (x << (32 - shift));