4 * Copyright (c) 2005-2007 CodeSourcery, LLC
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "exec/helper-proto.h"
21 #include "internals.h"
22 #include "exec/cpu_ldst.h"
24 #define SIGNBIT (uint32_t)0x80000000
25 #define SIGNBIT64 ((uint64_t)1 << 63)
27 static void raise_exception(CPUARMState *env, uint32_t excp,
28 uint32_t syndrome, uint32_t target_el)
30 CPUState *cs = CPU(arm_env_get_cpu(env));
32 assert(!excp_is_internal(excp));
33 cs->exception_index = excp;
34 env->exception.syndrome = syndrome;
35 env->exception.target_el = target_el;
39 static int exception_target_el(CPUARMState *env)
41 int target_el = MAX(1, arm_current_el(env));
43 /* No such thing as secure EL1 if EL3 is aarch32, so update the target EL
44 * to EL3 in this case.
46 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3) && target_el == 1) {
53 uint32_t HELPER(neon_tbl)(CPUARMState *env, uint32_t ireg, uint32_t def,
54 uint32_t rn, uint32_t maxindex)
61 table = (uint64_t *)&env->vfp.regs[rn];
63 for (shift = 0; shift < 32; shift += 8) {
64 index = (ireg >> shift) & 0xff;
65 if (index < maxindex) {
66 tmp = (table[index >> 3] >> ((index & 7) << 3)) & 0xff;
69 val |= def & (0xff << shift);
75 #if !defined(CONFIG_USER_ONLY)
77 /* try to fill the TLB and return an exception if error. If retaddr is
78 * NULL, it means that the function was called in C code (i.e. not
79 * from generated code or from helper.c)
81 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
87 ret = arm_tlb_fill(cs, addr, is_write, mmu_idx, &fsr);
89 ARMCPU *cpu = ARM_CPU(cs);
90 CPUARMState *env = &cpu->env;
92 bool same_el = (arm_current_el(env) != 0);
95 /* now we have a real cpu fault */
96 cpu_restore_state(cs, retaddr);
99 /* AArch64 syndrome does not have an LPAE bit */
100 syn = fsr & ~(1 << 9);
102 /* For insn and data aborts we assume there is no instruction syndrome
103 * information; this is always true for exceptions reported to EL1.
106 syn = syn_insn_abort(same_el, 0, 0, syn);
107 exc = EXCP_PREFETCH_ABORT;
109 syn = syn_data_abort(same_el, 0, 0, 0, is_write == 1, syn);
110 if (is_write == 1 && arm_feature(env, ARM_FEATURE_V6)) {
113 exc = EXCP_DATA_ABORT;
116 env->exception.vaddress = addr;
117 env->exception.fsr = fsr;
118 raise_exception(env, exc, syn, exception_target_el(env));
123 uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
125 uint32_t res = a + b;
126 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT))
131 uint32_t HELPER(add_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
133 uint32_t res = a + b;
134 if (((res ^ a) & SIGNBIT) && !((a ^ b) & SIGNBIT)) {
136 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
141 uint32_t HELPER(sub_saturate)(CPUARMState *env, uint32_t a, uint32_t b)
143 uint32_t res = a - b;
144 if (((res ^ a) & SIGNBIT) && ((a ^ b) & SIGNBIT)) {
146 res = ~(((int32_t)a >> 31) ^ SIGNBIT);
151 uint32_t HELPER(double_saturate)(CPUARMState *env, int32_t val)
154 if (val >= 0x40000000) {
157 } else if (val <= (int32_t)0xc0000000) {
166 uint32_t HELPER(add_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
168 uint32_t res = a + b;
176 uint32_t HELPER(sub_usaturate)(CPUARMState *env, uint32_t a, uint32_t b)
178 uint32_t res = a - b;
186 /* Signed saturation. */
187 static inline uint32_t do_ssat(CPUARMState *env, int32_t val, int shift)
193 mask = (1u << shift) - 1;
197 } else if (top < -1) {
204 /* Unsigned saturation. */
205 static inline uint32_t do_usat(CPUARMState *env, int32_t val, int shift)
209 max = (1u << shift) - 1;
213 } else if (val > max) {
220 /* Signed saturate. */
221 uint32_t HELPER(ssat)(CPUARMState *env, uint32_t x, uint32_t shift)
223 return do_ssat(env, x, shift);
226 /* Dual halfword signed saturate. */
227 uint32_t HELPER(ssat16)(CPUARMState *env, uint32_t x, uint32_t shift)
231 res = (uint16_t)do_ssat(env, (int16_t)x, shift);
232 res |= do_ssat(env, ((int32_t)x) >> 16, shift) << 16;
236 /* Unsigned saturate. */
237 uint32_t HELPER(usat)(CPUARMState *env, uint32_t x, uint32_t shift)
239 return do_usat(env, x, shift);
242 /* Dual halfword unsigned saturate. */
243 uint32_t HELPER(usat16)(CPUARMState *env, uint32_t x, uint32_t shift)
247 res = (uint16_t)do_usat(env, (int16_t)x, shift);
248 res |= do_usat(env, ((int32_t)x) >> 16, shift) << 16;
252 /* Function checks whether WFx (WFI/WFE) instructions are set up to be trapped.
253 * The function returns the target EL (1-3) if the instruction is to be trapped;
254 * otherwise it returns 0 indicating it is not trapped.
256 static inline int check_wfx_trap(CPUARMState *env, bool is_wfe)
258 int cur_el = arm_current_el(env);
261 /* If we are currently in EL0 then we need to check if SCTLR is set up for
262 * WFx instructions being trapped to EL1. These trap bits don't exist in v7.
264 if (cur_el < 1 && arm_feature(env, ARM_FEATURE_V8)) {
267 mask = is_wfe ? SCTLR_nTWE : SCTLR_nTWI;
268 if (arm_is_secure_below_el3(env) && !arm_el_is_aa64(env, 3)) {
269 /* Secure EL0 and Secure PL1 is at EL3 */
275 if (!(env->cp15.sctlr_el[target_el] & mask)) {
280 /* We are not trapping to EL1; trap to EL2 if HCR_EL2 requires it
281 * No need for ARM_FEATURE check as if HCR_EL2 doesn't exist the
282 * bits will be zero indicating no trap.
284 if (cur_el < 2 && !arm_is_secure(env)) {
285 mask = (is_wfe) ? HCR_TWE : HCR_TWI;
286 if (env->cp15.hcr_el2 & mask) {
291 /* We are not trapping to EL1 or EL2; trap to EL3 if SCR_EL3 requires it */
293 mask = (is_wfe) ? SCR_TWE : SCR_TWI;
294 if (env->cp15.scr_el3 & mask) {
302 void HELPER(wfi)(CPUARMState *env)
304 CPUState *cs = CPU(arm_env_get_cpu(env));
305 int target_el = check_wfx_trap(env, false);
307 if (cpu_has_work(cs)) {
308 /* Don't bother to go into our "low power state" if
309 * we would just wake up immediately.
316 raise_exception(env, EXCP_UDEF, syn_wfx(1, 0xe, 0), target_el);
319 cs->exception_index = EXCP_HLT;
324 void HELPER(wfe)(CPUARMState *env)
326 /* This is a hint instruction that is semantically different
327 * from YIELD even though we currently implement it identically.
328 * Don't actually halt the CPU, just yield back to top
329 * level loop. This is not going into a "low power state"
330 * (ie halting until some event occurs), so we never take
331 * a configurable trap to a different exception level.
336 void HELPER(yield)(CPUARMState *env)
338 ARMCPU *cpu = arm_env_get_cpu(env);
339 CPUState *cs = CPU(cpu);
341 /* This is a non-trappable hint instruction that generally indicates
342 * that the guest is currently busy-looping. Yield control back to the
343 * top level loop so that a more deserving VCPU has a chance to run.
345 cs->exception_index = EXCP_YIELD;
349 /* Raise an internal-to-QEMU exception. This is limited to only
350 * those EXCP values which are special cases for QEMU to interrupt
351 * execution and not to be used for exceptions which are passed to
352 * the guest (those must all have syndrome information and thus should
353 * use exception_with_syndrome).
355 void HELPER(exception_internal)(CPUARMState *env, uint32_t excp)
357 CPUState *cs = CPU(arm_env_get_cpu(env));
359 assert(excp_is_internal(excp));
360 cs->exception_index = excp;
364 /* Raise an exception with the specified syndrome register value */
365 void HELPER(exception_with_syndrome)(CPUARMState *env, uint32_t excp,
366 uint32_t syndrome, uint32_t target_el)
368 raise_exception(env, excp, syndrome, target_el);
371 uint32_t HELPER(cpsr_read)(CPUARMState *env)
373 return cpsr_read(env) & ~(CPSR_EXEC | CPSR_RESERVED);
376 void HELPER(cpsr_write)(CPUARMState *env, uint32_t val, uint32_t mask)
378 cpsr_write(env, val, mask);
381 /* Access to user mode registers from privileged modes. */
382 uint32_t HELPER(get_user_reg)(CPUARMState *env, uint32_t regno)
387 val = env->banked_r13[0];
388 } else if (regno == 14) {
389 val = env->banked_r14[0];
390 } else if (regno >= 8
391 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
392 val = env->usr_regs[regno - 8];
394 val = env->regs[regno];
399 void HELPER(set_user_reg)(CPUARMState *env, uint32_t regno, uint32_t val)
402 env->banked_r13[0] = val;
403 } else if (regno == 14) {
404 env->banked_r14[0] = val;
405 } else if (regno >= 8
406 && (env->uncached_cpsr & 0x1f) == ARM_CPU_MODE_FIQ) {
407 env->usr_regs[regno - 8] = val;
409 env->regs[regno] = val;
413 void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome)
415 const ARMCPRegInfo *ri = rip;
418 if (arm_feature(env, ARM_FEATURE_XSCALE) && ri->cp < 14
419 && extract32(env->cp15.c15_cpar, ri->cp, 1) == 0) {
420 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
427 switch (ri->accessfn(env, ri)) {
431 target_el = exception_target_el(env);
433 case CP_ACCESS_TRAP_EL2:
434 /* Requesting a trap to EL2 when we're in EL3 or S-EL0/1 is
435 * a bug in the access function.
437 assert(!arm_is_secure(env) && arm_current_el(env) != 3);
440 case CP_ACCESS_TRAP_EL3:
443 case CP_ACCESS_TRAP_UNCATEGORIZED:
444 target_el = exception_target_el(env);
445 syndrome = syn_uncategorized();
448 g_assert_not_reached();
451 raise_exception(env, EXCP_UDEF, syndrome, target_el);
454 void HELPER(set_cp_reg)(CPUARMState *env, void *rip, uint32_t value)
456 const ARMCPRegInfo *ri = rip;
458 ri->writefn(env, ri, value);
461 uint32_t HELPER(get_cp_reg)(CPUARMState *env, void *rip)
463 const ARMCPRegInfo *ri = rip;
465 return ri->readfn(env, ri);
468 void HELPER(set_cp_reg64)(CPUARMState *env, void *rip, uint64_t value)
470 const ARMCPRegInfo *ri = rip;
472 ri->writefn(env, ri, value);
475 uint64_t HELPER(get_cp_reg64)(CPUARMState *env, void *rip)
477 const ARMCPRegInfo *ri = rip;
479 return ri->readfn(env, ri);
482 void HELPER(msr_i_pstate)(CPUARMState *env, uint32_t op, uint32_t imm)
484 /* MSR_i to update PSTATE. This is OK from EL0 only if UMA is set.
485 * Note that SPSel is never OK from EL0; we rely on handle_msr_i()
486 * to catch that case at translate time.
488 if (arm_current_el(env) == 0 && !(env->cp15.sctlr_el[1] & SCTLR_UMA)) {
489 uint32_t syndrome = syn_aa64_sysregtrap(0, extract32(op, 0, 3),
490 extract32(op, 3, 3), 4,
492 raise_exception(env, EXCP_UDEF, syndrome, exception_target_el(env));
496 case 0x05: /* SPSel */
497 update_spsel(env, imm);
499 case 0x1e: /* DAIFSet */
500 env->daif |= (imm << 6) & PSTATE_DAIF;
502 case 0x1f: /* DAIFClear */
503 env->daif &= ~((imm << 6) & PSTATE_DAIF);
506 g_assert_not_reached();
510 void HELPER(clear_pstate_ss)(CPUARMState *env)
512 env->pstate &= ~PSTATE_SS;
515 void HELPER(pre_hvc)(CPUARMState *env)
517 ARMCPU *cpu = arm_env_get_cpu(env);
518 int cur_el = arm_current_el(env);
519 /* FIXME: Use actual secure state. */
523 if (arm_is_psci_call(cpu, EXCP_HVC)) {
524 /* If PSCI is enabled and this looks like a valid PSCI call then
525 * that overrides the architecturally mandated HVC behaviour.
530 if (!arm_feature(env, ARM_FEATURE_EL2)) {
531 /* If EL2 doesn't exist, HVC always UNDEFs */
533 } else if (arm_feature(env, ARM_FEATURE_EL3)) {
534 /* EL3.HCE has priority over EL2.HCD. */
535 undef = !(env->cp15.scr_el3 & SCR_HCE);
537 undef = env->cp15.hcr_el2 & HCR_HCD;
540 /* In ARMv7 and ARMv8/AArch32, HVC is undef in secure state.
541 * For ARMv8/AArch64, HVC is allowed in EL3.
542 * Note that we've already trapped HVC from EL0 at translation
545 if (secure && (!is_a64(env) || cur_el == 1)) {
550 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
551 exception_target_el(env));
555 void HELPER(pre_smc)(CPUARMState *env, uint32_t syndrome)
557 ARMCPU *cpu = arm_env_get_cpu(env);
558 int cur_el = arm_current_el(env);
559 bool secure = arm_is_secure(env);
560 bool smd = env->cp15.scr_el3 & SCR_SMD;
561 /* On ARMv8 AArch32, SMD only applies to NS state.
562 * On ARMv7 SMD only applies to NS state and only if EL2 is available.
563 * For ARMv7 non EL2, we force SMD to zero so we don't need to re-check
564 * the EL2 condition here.
566 bool undef = is_a64(env) ? smd : (!secure && smd);
568 if (arm_is_psci_call(cpu, EXCP_SMC)) {
569 /* If PSCI is enabled and this looks like a valid PSCI call then
570 * that overrides the architecturally mandated SMC behaviour.
575 if (!arm_feature(env, ARM_FEATURE_EL3)) {
576 /* If we have no EL3 then SMC always UNDEFs */
578 } else if (!secure && cur_el == 1 && (env->cp15.hcr_el2 & HCR_TSC)) {
579 /* In NS EL1, HCR controlled routing to EL2 has priority over SMD. */
580 raise_exception(env, EXCP_HYP_TRAP, syndrome, 2);
584 raise_exception(env, EXCP_UDEF, syn_uncategorized(),
585 exception_target_el(env));
589 void HELPER(exception_return)(CPUARMState *env)
591 int cur_el = arm_current_el(env);
592 unsigned int spsr_idx = aarch64_banked_spsr_index(cur_el);
593 uint32_t spsr = env->banked_spsr[spsr_idx];
596 aarch64_save_sp(env, cur_el);
598 env->exclusive_addr = -1;
600 /* We must squash the PSTATE.SS bit to zero unless both of the
602 * 1. debug exceptions are currently disabled
603 * 2. singlestep will be active in the EL we return to
604 * We check 1 here and 2 after we've done the pstate/cpsr write() to
605 * transition to the EL we're going to.
607 if (arm_generate_debug_exceptions(env)) {
611 if (spsr & PSTATE_nRW) {
612 /* TODO: We currently assume EL1/2/3 are running in AArch64. */
615 env->uncached_cpsr = 0x10;
616 cpsr_write(env, spsr, ~0);
617 if (!arm_singlestep_active(env)) {
618 env->uncached_cpsr &= ~PSTATE_SS;
620 aarch64_sync_64_to_32(env);
622 env->regs[15] = env->elr_el[1] & ~0x1;
624 new_el = extract32(spsr, 2, 2);
626 || (new_el == 2 && !arm_feature(env, ARM_FEATURE_EL2))) {
627 /* Disallow return to an EL which is unimplemented or higher
628 * than the current one.
632 if (extract32(spsr, 1, 1)) {
633 /* Return with reserved M[1] bit set */
636 if (new_el == 0 && (spsr & PSTATE_SP)) {
637 /* Return to EL0 with M[0] bit set */
641 pstate_write(env, spsr);
642 if (!arm_singlestep_active(env)) {
643 env->pstate &= ~PSTATE_SS;
645 aarch64_restore_sp(env, new_el);
646 env->pc = env->elr_el[cur_el];
652 /* Illegal return events of various kinds have architecturally
653 * mandated behaviour:
654 * restore NZCV and DAIF from SPSR_ELx
656 * restore PC from ELR_ELx
657 * no change to exception level, execution state or stack pointer
659 env->pstate |= PSTATE_IL;
660 env->pc = env->elr_el[cur_el];
661 spsr &= PSTATE_NZCV | PSTATE_DAIF;
662 spsr |= pstate_read(env) & ~(PSTATE_NZCV | PSTATE_DAIF);
663 pstate_write(env, spsr);
664 if (!arm_singlestep_active(env)) {
665 env->pstate &= ~PSTATE_SS;
669 /* Return true if the linked breakpoint entry lbn passes its checks */
670 static bool linked_bp_matches(ARMCPU *cpu, int lbn)
672 CPUARMState *env = &cpu->env;
673 uint64_t bcr = env->cp15.dbgbcr[lbn];
674 int brps = extract32(cpu->dbgdidr, 24, 4);
675 int ctx_cmps = extract32(cpu->dbgdidr, 20, 4);
679 /* Links to unimplemented or non-context aware breakpoints are
680 * CONSTRAINED UNPREDICTABLE: either behave as if disabled, or
681 * as if linked to an UNKNOWN context-aware breakpoint (in which
682 * case DBGWCR<n>_EL1.LBN must indicate that breakpoint).
683 * We choose the former.
685 if (lbn > brps || lbn < (brps - ctx_cmps)) {
689 bcr = env->cp15.dbgbcr[lbn];
691 if (extract64(bcr, 0, 1) == 0) {
692 /* Linked breakpoint disabled : generate no events */
696 bt = extract64(bcr, 20, 4);
698 /* We match the whole register even if this is AArch32 using the
699 * short descriptor format (in which case it holds both PROCID and ASID),
700 * since we don't implement the optional v7 context ID masking.
702 contextidr = extract64(env->cp15.contextidr_el[1], 0, 32);
705 case 3: /* linked context ID match */
706 if (arm_current_el(env) > 1) {
707 /* Context matches never fire in EL2 or (AArch64) EL3 */
710 return (contextidr == extract64(env->cp15.dbgbvr[lbn], 0, 32));
711 case 5: /* linked address mismatch (reserved in AArch64) */
712 case 9: /* linked VMID match (reserved if no EL2) */
713 case 11: /* linked context ID and VMID match (reserved if no EL2) */
715 /* Links to Unlinked context breakpoints must generate no
716 * events; we choose to do the same for reserved values too.
724 static bool bp_wp_matches(ARMCPU *cpu, int n, bool is_wp)
726 CPUARMState *env = &cpu->env;
728 int pac, hmc, ssc, wt, lbn;
729 /* Note that for watchpoints the check is against the CPU security
730 * state, not the S/NS attribute on the offending data access.
732 bool is_secure = arm_is_secure(env);
733 int access_el = arm_current_el(env);
736 CPUWatchpoint *wp = env->cpu_watchpoint[n];
738 if (!wp || !(wp->flags & BP_WATCHPOINT_HIT)) {
741 cr = env->cp15.dbgwcr[n];
742 if (wp->hitattrs.user) {
743 /* The LDRT/STRT/LDT/STT "unprivileged access" instructions should
744 * match watchpoints as if they were accesses done at EL0, even if
745 * the CPU is at EL1 or higher.
750 uint64_t pc = is_a64(env) ? env->pc : env->regs[15];
752 if (!env->cpu_breakpoint[n] || env->cpu_breakpoint[n]->pc != pc) {
755 cr = env->cp15.dbgbcr[n];
757 /* The WATCHPOINT_HIT flag guarantees us that the watchpoint is
758 * enabled and that the address and access type match; for breakpoints
759 * we know the address matched; check the remaining fields, including
760 * linked breakpoints. We rely on WCR and BCR having the same layout
761 * for the LBN, SSC, HMC, PAC/PMC and is-linked fields.
762 * Note that some combinations of {PAC, HMC, SSC} are reserved and
763 * must act either like some valid combination or as if the watchpoint
764 * were disabled. We choose the former, and use this together with
765 * the fact that EL3 must always be Secure and EL2 must always be
766 * Non-Secure to simplify the code slightly compared to the full
767 * table in the ARM ARM.
769 pac = extract64(cr, 1, 2);
770 hmc = extract64(cr, 13, 1);
771 ssc = extract64(cr, 14, 2);
797 if (extract32(pac, 0, 1) == 0) {
802 if (extract32(pac, 1, 1) == 0) {
807 g_assert_not_reached();
810 wt = extract64(cr, 20, 1);
811 lbn = extract64(cr, 16, 4);
813 if (wt && !linked_bp_matches(cpu, lbn)) {
820 static bool check_watchpoints(ARMCPU *cpu)
822 CPUARMState *env = &cpu->env;
825 /* If watchpoints are disabled globally or we can't take debug
826 * exceptions here then watchpoint firings are ignored.
828 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
829 || !arm_generate_debug_exceptions(env)) {
833 for (n = 0; n < ARRAY_SIZE(env->cpu_watchpoint); n++) {
834 if (bp_wp_matches(cpu, n, true)) {
841 static bool check_breakpoints(ARMCPU *cpu)
843 CPUARMState *env = &cpu->env;
846 /* If breakpoints are disabled globally or we can't take debug
847 * exceptions here then breakpoint firings are ignored.
849 if (extract32(env->cp15.mdscr_el1, 15, 1) == 0
850 || !arm_generate_debug_exceptions(env)) {
854 for (n = 0; n < ARRAY_SIZE(env->cpu_breakpoint); n++) {
855 if (bp_wp_matches(cpu, n, false)) {
862 void arm_debug_excp_handler(CPUState *cs)
864 /* Called by core code when a watchpoint or breakpoint fires;
865 * need to check which one and raise the appropriate exception.
867 ARMCPU *cpu = ARM_CPU(cs);
868 CPUARMState *env = &cpu->env;
869 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
872 if (wp_hit->flags & BP_CPU) {
873 cs->watchpoint_hit = NULL;
874 if (check_watchpoints(cpu)) {
875 bool wnr = (wp_hit->flags & BP_WATCHPOINT_HIT_WRITE) != 0;
876 bool same_el = arm_debug_target_el(env) == arm_current_el(env);
878 if (extended_addresses_enabled(env)) {
879 env->exception.fsr = (1 << 9) | 0x22;
881 env->exception.fsr = 0x2;
883 env->exception.vaddress = wp_hit->hitaddr;
884 raise_exception(env, EXCP_DATA_ABORT,
885 syn_watchpoint(same_el, 0, wnr),
886 arm_debug_target_el(env));
888 cpu_resume_from_signal(cs, NULL);
892 if (check_breakpoints(cpu)) {
893 bool same_el = (arm_debug_target_el(env) == arm_current_el(env));
894 if (extended_addresses_enabled(env)) {
895 env->exception.fsr = (1 << 9) | 0x22;
897 env->exception.fsr = 0x2;
899 /* FAR is UNKNOWN, so doesn't need setting */
900 raise_exception(env, EXCP_PREFETCH_ABORT,
901 syn_breakpoint(same_el),
902 arm_debug_target_el(env));
907 /* ??? Flag setting arithmetic is awkward because we need to do comparisons.
908 The only way to do that in TCG is a conditional branch, which clobbers
909 all our temporaries. For now implement these as helper functions. */
911 /* Similarly for variable shift instructions. */
913 uint32_t HELPER(shl_cc)(CPUARMState *env, uint32_t x, uint32_t i)
915 int shift = i & 0xff;
922 } else if (shift != 0) {
923 env->CF = (x >> (32 - shift)) & 1;
929 uint32_t HELPER(shr_cc)(CPUARMState *env, uint32_t x, uint32_t i)
931 int shift = i & 0xff;
934 env->CF = (x >> 31) & 1;
938 } else if (shift != 0) {
939 env->CF = (x >> (shift - 1)) & 1;
945 uint32_t HELPER(sar_cc)(CPUARMState *env, uint32_t x, uint32_t i)
947 int shift = i & 0xff;
949 env->CF = (x >> 31) & 1;
950 return (int32_t)x >> 31;
951 } else if (shift != 0) {
952 env->CF = (x >> (shift - 1)) & 1;
953 return (int32_t)x >> shift;
958 uint32_t HELPER(ror_cc)(CPUARMState *env, uint32_t x, uint32_t i)
962 shift = shift1 & 0x1f;
965 env->CF = (x >> 31) & 1;
968 env->CF = (x >> (shift - 1)) & 1;
969 return ((uint32_t)x >> shift) | (x << (32 - shift));