2 * ARM virtual CPU header
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
23 #include "kvm-consts.h"
24 #include "hw/registerfields.h"
26 #if defined(TARGET_AARCH64)
27 /* AArch64 definitions */
28 # define TARGET_LONG_BITS 64
30 # define TARGET_LONG_BITS 32
33 /* ARM processors have a weak memory model */
34 #define TCG_GUEST_DEFAULT_MO (0)
36 #define CPUArchState struct CPUARMState
38 #include "qemu-common.h"
40 #include "exec/cpu-defs.h"
42 #include "fpu/softfloat.h"
44 #define EXCP_UDEF 1 /* undefined instruction */
45 #define EXCP_SWI 2 /* software interrupt */
46 #define EXCP_PREFETCH_ABORT 3
47 #define EXCP_DATA_ABORT 4
51 #define EXCP_EXCEPTION_EXIT 8 /* Return from v7M exception. */
52 #define EXCP_KERNEL_TRAP 9 /* Jumped to kernel code page. */
53 #define EXCP_HVC 11 /* HyperVisor Call */
54 #define EXCP_HYP_TRAP 12
55 #define EXCP_SMC 13 /* Secure Monitor Call */
58 #define EXCP_SEMIHOST 16 /* semihosting call */
59 #define EXCP_NOCP 17 /* v7M NOCP UsageFault */
60 #define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
61 /* NB: add new EXCP_ defines to the array in arm_log_exception() too */
63 #define ARMV7M_EXCP_RESET 1
64 #define ARMV7M_EXCP_NMI 2
65 #define ARMV7M_EXCP_HARD 3
66 #define ARMV7M_EXCP_MEM 4
67 #define ARMV7M_EXCP_BUS 5
68 #define ARMV7M_EXCP_USAGE 6
69 #define ARMV7M_EXCP_SECURE 7
70 #define ARMV7M_EXCP_SVC 11
71 #define ARMV7M_EXCP_DEBUG 12
72 #define ARMV7M_EXCP_PENDSV 14
73 #define ARMV7M_EXCP_SYSTICK 15
75 /* For M profile, some registers are banked secure vs non-secure;
76 * these are represented as a 2-element array where the first element
77 * is the non-secure copy and the second is the secure copy.
78 * When the CPU does not have implement the security extension then
79 * only the first element is used.
80 * This means that the copy for the current security state can be
81 * accessed via env->registerfield[env->v7m.secure] (whether the security
82 * extension is implemented or not).
90 /* ARM-specific interrupt pending bits. */
91 #define CPU_INTERRUPT_FIQ CPU_INTERRUPT_TGT_EXT_1
92 #define CPU_INTERRUPT_VIRQ CPU_INTERRUPT_TGT_EXT_2
93 #define CPU_INTERRUPT_VFIQ CPU_INTERRUPT_TGT_EXT_3
95 /* The usual mapping for an AArch64 system register to its AArch32
96 * counterpart is for the 32 bit world to have access to the lower
97 * half only (with writes leaving the upper half untouched). It's
98 * therefore useful to be able to pass TCG the offset of the least
99 * significant half of a uint64_t struct member.
101 #ifdef HOST_WORDS_BIGENDIAN
102 #define offsetoflow32(S, M) (offsetof(S, M) + sizeof(uint32_t))
103 #define offsetofhigh32(S, M) offsetof(S, M)
105 #define offsetoflow32(S, M) offsetof(S, M)
106 #define offsetofhigh32(S, M) (offsetof(S, M) + sizeof(uint32_t))
109 /* Meanings of the ARMCPU object's four inbound GPIO lines */
110 #define ARM_CPU_IRQ 0
111 #define ARM_CPU_FIQ 1
112 #define ARM_CPU_VIRQ 2
113 #define ARM_CPU_VFIQ 3
115 #define NB_MMU_MODES 7
116 /* ARM-specific extra insn start words:
117 * 1: Conditional execution bits
118 * 2: Partial exception syndrome for data aborts
120 #define TARGET_INSN_START_EXTRA_WORDS 2
122 /* The 2nd extra word holding syndrome info for data aborts does not use
123 * the upper 6 bits nor the lower 14 bits. We mask and shift it down to
124 * help the sleb128 encoder do a better job.
125 * When restoring the CPU state, we shift it back up.
127 #define ARM_INSN_START_WORD2_MASK ((1 << 26) - 1)
128 #define ARM_INSN_START_WORD2_SHIFT 14
130 /* We currently assume float and double are IEEE single and double
131 precision respectively.
132 Doing runtime conversions is tricky because VFP registers may contain
133 integer values (eg. as the result of a FTOSI instruction).
134 s<2n> maps to the least significant half of d<n>
135 s<2n+1> maps to the most significant half of d<n>
138 /* CPU state for each instance of a generic timer (in cp15 c14) */
139 typedef struct ARMGenericTimer {
140 uint64_t cval; /* Timer CompareValue register */
141 uint64_t ctl; /* Timer Control register */
144 #define GTIMER_PHYS 0
145 #define GTIMER_VIRT 1
148 #define NUM_GTIMERS 4
156 typedef struct CPUARMState {
157 /* Regs for current mode. */
160 /* 32/64 switch only happens when taking and returning from
161 * exceptions so the overlap semantics are taken care of then
162 * instead of having a complicated union.
164 /* Regs for A64 mode. */
167 /* PSTATE isn't an architectural register for ARMv8. However, it is
168 * convenient for us to assemble the underlying state into a 32 bit format
169 * identical to the architectural format used for the SPSR. (This is also
170 * what the Linux kernel's 'pstate' field in signal handlers and KVM's
171 * 'pstate' register are.) Of the PSTATE bits:
172 * NZCV are kept in the split out env->CF/VF/NF/ZF, (which have the same
173 * semantics as for AArch32, as described in the comments on each field)
174 * nRW (also known as M[4]) is kept, inverted, in env->aarch64
175 * DAIF (exception masks) are kept in env->daif
176 * all other bits are stored in their correct places in env->pstate
179 uint32_t aarch64; /* 1 if CPU is in aarch64 state; inverse of PSTATE.nRW */
181 /* Frequently accessed CPSR bits are stored separately for efficiency.
182 This contains all the other bits. Use cpsr_{read,write} to access
184 uint32_t uncached_cpsr;
187 /* Banked registers. */
188 uint64_t banked_spsr[8];
189 uint32_t banked_r13[8];
190 uint32_t banked_r14[8];
192 /* These hold r8-r12. */
193 uint32_t usr_regs[5];
194 uint32_t fiq_regs[5];
196 /* cpsr flag cache for faster execution */
197 uint32_t CF; /* 0 or 1 */
198 uint32_t VF; /* V is the bit 31. All other bits are undefined */
199 uint32_t NF; /* N is bit 31. All other bits are undefined. */
200 uint32_t ZF; /* Z set if zero. */
201 uint32_t QF; /* 0 or 1 */
202 uint32_t GE; /* cpsr[19:16] */
203 uint32_t thumb; /* cpsr[5]. 0 = arm mode, 1 = thumb mode. */
204 uint32_t condexec_bits; /* IT bits. cpsr[15:10,26:25]. */
205 uint64_t daif; /* exception masks, in the bits they are in PSTATE */
207 uint64_t elr_el[4]; /* AArch64 exception link regs */
208 uint64_t sp_el[4]; /* AArch64 banked stack pointers */
210 /* System control coprocessor (cp15) */
213 union { /* Cache size selection */
215 uint64_t _unused_csselr0;
217 uint64_t _unused_csselr1;
220 uint64_t csselr_el[4];
222 union { /* System control register. */
224 uint64_t _unused_sctlr;
229 uint64_t sctlr_el[4];
231 uint64_t cpacr_el1; /* Architectural feature access control register */
232 uint64_t cptr_el[4]; /* ARMv8 feature trap registers */
233 uint32_t c1_xscaleauxcr; /* XScale auxiliary control register. */
234 uint64_t sder; /* Secure debug enable register. */
235 uint32_t nsacr; /* Non-secure access control register. */
236 union { /* MMU translation table base 0. */
238 uint64_t _unused_ttbr0_0;
240 uint64_t _unused_ttbr0_1;
243 uint64_t ttbr0_el[4];
245 union { /* MMU translation table base 1. */
247 uint64_t _unused_ttbr1_0;
249 uint64_t _unused_ttbr1_1;
252 uint64_t ttbr1_el[4];
254 uint64_t vttbr_el2; /* Virtualization Translation Table Base. */
255 /* MMU translation table base control. */
257 TCR vtcr_el2; /* Virtualization Translation Control. */
258 uint32_t c2_data; /* MPU data cacheable bits. */
259 uint32_t c2_insn; /* MPU instruction cacheable bits. */
260 union { /* MMU domain access control register
261 * MPU write buffer control.
271 uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
272 uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
273 uint64_t hcr_el2; /* Hypervisor configuration register */
274 uint64_t scr_el3; /* Secure configuration register. */
275 union { /* Fault status registers. */
286 uint64_t _unused_dfsr;
293 uint32_t c6_region[8]; /* MPU base/size registers. */
294 union { /* Fault address registers. */
296 uint64_t _unused_far0;
297 #ifdef HOST_WORDS_BIGENDIAN
308 uint64_t _unused_far3;
314 union { /* Translation result. */
316 uint64_t _unused_par_0;
318 uint64_t _unused_par_1;
324 uint32_t c9_insn; /* Cache lockdown registers. */
326 uint64_t c9_pmcr; /* performance monitor control register */
327 uint64_t c9_pmcnten; /* perf monitor counter enables */
328 uint32_t c9_pmovsr; /* perf monitor overflow status */
329 uint32_t c9_pmuserenr; /* perf monitor user enable */
330 uint64_t c9_pmselr; /* perf monitor counter selection register */
331 uint64_t c9_pminten; /* perf monitor interrupt enables */
332 union { /* Memory attribute redirection */
334 #ifdef HOST_WORDS_BIGENDIAN
335 uint64_t _unused_mair_0;
338 uint64_t _unused_mair_1;
342 uint64_t _unused_mair_0;
345 uint64_t _unused_mair_1;
352 union { /* vector base address register */
354 uint64_t _unused_vbar;
361 uint32_t mvbar; /* (monitor) vector base address register */
362 struct { /* FCSE PID. */
366 union { /* Context ID. */
368 uint64_t _unused_contextidr_0;
369 uint64_t contextidr_ns;
370 uint64_t _unused_contextidr_1;
371 uint64_t contextidr_s;
373 uint64_t contextidr_el[4];
375 union { /* User RW Thread register. */
377 uint64_t tpidrurw_ns;
378 uint64_t tpidrprw_ns;
382 uint64_t tpidr_el[4];
384 /* The secure banks of these registers don't map anywhere */
389 union { /* User RO Thread register. */
390 uint64_t tpidruro_ns;
391 uint64_t tpidrro_el[1];
393 uint64_t c14_cntfrq; /* Counter Frequency register */
394 uint64_t c14_cntkctl; /* Timer Control register */
395 uint32_t cnthctl_el2; /* Counter/Timer Hyp Control register */
396 uint64_t cntvoff_el2; /* Counter Virtual Offset register */
397 ARMGenericTimer c14_timer[NUM_GTIMERS];
398 uint32_t c15_cpar; /* XScale Coprocessor Access Register */
399 uint32_t c15_ticonfig; /* TI925T configuration byte. */
400 uint32_t c15_i_max; /* Maximum D-cache dirty line index. */
401 uint32_t c15_i_min; /* Minimum D-cache dirty line index. */
402 uint32_t c15_threadid; /* TI debugger thread-ID. */
403 uint32_t c15_config_base_address; /* SCU base address. */
404 uint32_t c15_diagnostic; /* diagnostic register */
405 uint32_t c15_power_diagnostic;
406 uint32_t c15_power_control; /* power control */
407 uint64_t dbgbvr[16]; /* breakpoint value registers */
408 uint64_t dbgbcr[16]; /* breakpoint control registers */
409 uint64_t dbgwvr[16]; /* watchpoint value registers */
410 uint64_t dbgwcr[16]; /* watchpoint control registers */
412 uint64_t oslsr_el1; /* OS Lock Status */
415 /* If the counter is enabled, this stores the last time the counter
416 * was reset. Otherwise it stores the counter value
419 uint64_t pmccfiltr_el0; /* Performance Monitor Filter Register */
420 uint64_t vpidr_el2; /* Virtualization Processor ID Register */
421 uint64_t vmpidr_el2; /* Virtualization Multiprocessor ID Register */
425 /* M profile has up to 4 stack pointers:
426 * a Main Stack Pointer and a Process Stack Pointer for each
427 * of the Secure and Non-Secure states. (If the CPU doesn't support
428 * the security extension then it has only two SPs.)
429 * In QEMU we always store the currently active SP in regs[13],
430 * and the non-active SP for the current security state in
431 * v7m.other_sp. The stack pointers for the inactive security state
432 * are stored in other_ss_msp and other_ss_psp.
433 * switch_v7m_security_state() is responsible for rearranging them
434 * when we change security state.
437 uint32_t other_ss_msp;
438 uint32_t other_ss_psp;
439 uint32_t vecbase[M_REG_NUM_BANKS];
440 uint32_t basepri[M_REG_NUM_BANKS];
441 uint32_t control[M_REG_NUM_BANKS];
442 uint32_t ccr[M_REG_NUM_BANKS]; /* Configuration and Control */
443 uint32_t cfsr[M_REG_NUM_BANKS]; /* Configurable Fault Status */
444 uint32_t hfsr; /* HardFault Status */
445 uint32_t dfsr; /* Debug Fault Status Register */
446 uint32_t mmfar[M_REG_NUM_BANKS]; /* MemManage Fault Address */
447 uint32_t bfar; /* BusFault Address */
448 unsigned mpu_ctrl[M_REG_NUM_BANKS]; /* MPU_CTRL */
450 uint32_t primask[M_REG_NUM_BANKS];
451 uint32_t faultmask[M_REG_NUM_BANKS];
452 uint32_t aircr; /* only holds r/w state if security extn implemented */
453 uint32_t secure; /* Is CPU in Secure state? (not guest visible) */
456 /* Information associated with an exception about to be taken:
457 * code which raises an exception must set cs->exception_index and
458 * the relevant parts of this structure; the cpu_do_interrupt function
459 * will then set the guest-visible registers as part of the exception
463 uint32_t syndrome; /* AArch64 format syndrome register */
464 uint32_t fsr; /* AArch32 format fault status register info */
465 uint64_t vaddress; /* virtual addr associated with exception, if any */
466 uint32_t target_el; /* EL the exception should be targeted for */
467 /* If we implement EL2 we will also need to store information
468 * about the intermediate physical address for stage 2 faults.
472 /* Thumb-2 EE state. */
476 /* VFP coprocessor state. */
478 /* VFP/Neon register state. Note that the mapping between S, D and Q
479 * views of the register bank differs between AArch64 and AArch32:
481 * Qn = regs[2n+1]:regs[2n]
483 * Sn = regs[n/2] bits 31..0 for even n, and bits 63..32 for odd n
484 * (and regs[32] to regs[63] are inaccessible)
486 * Qn = regs[2n+1]:regs[2n]
488 * Sn = regs[2n] bits 31..0
489 * This corresponds to the architecturally defined mapping between
490 * the two execution states, and means we do not need to explicitly
491 * map these registers when changing states.
496 /* We store these fpcsr fields separately for convenience. */
500 /* scratch space when Tn are not sufficient. */
503 /* fp_status is the "normal" fp status. standard_fp_status retains
504 * values corresponding to the ARM "Standard FPSCR Value", ie
505 * default-NaN, flush-to-zero, round-to-nearest and is used by
506 * any operations (generally Neon) which the architecture defines
507 * as controlled by the standard FPSCR value rather than the FPSCR.
509 * To avoid having to transfer exception bits around, we simply
510 * say that the FPSCR cumulative exception flags are the logical
511 * OR of the flags in the two fp statuses. This relies on the
512 * only thing which needs to read the exception flags being
513 * an explicit FPSCR read.
515 float_status fp_status;
516 float_status standard_fp_status;
518 uint64_t exclusive_addr;
519 uint64_t exclusive_val;
520 uint64_t exclusive_high;
522 /* iwMMXt coprocessor state. */
530 #if defined(CONFIG_USER_ONLY)
531 /* For usermode syscall translation. */
535 struct CPUBreakpoint *cpu_breakpoint[16];
536 struct CPUWatchpoint *cpu_watchpoint[16];
538 /* Fields up to this point are cleared by a CPU reset */
539 struct {} end_reset_fields;
543 /* Fields after CPU_COMMON are preserved across CPU reset. */
545 /* Internal CPU feature flags. */
553 uint32_t rnr[M_REG_NUM_BANKS];
558 /* The PMSAv8 implementation also shares some PMSAv7 config
560 * pmsav7.rnr (region number register)
561 * pmsav7_dregion (number of configured regions)
563 uint32_t *rbar[M_REG_NUM_BANKS];
564 uint32_t *rlar[M_REG_NUM_BANKS];
565 uint32_t mair0[M_REG_NUM_BANKS];
566 uint32_t mair1[M_REG_NUM_BANKS];
570 const struct arm_boot_info *boot_info;
571 /* Store GICv3CPUState to access from this struct */
577 * type of a function which can be registered via arm_register_el_change_hook()
578 * to get callbacks when the CPU changes its exception level or mode.
580 typedef void ARMELChangeHook(ARMCPU *cpu, void *opaque);
583 /* These values map onto the return values for
584 * QEMU_PSCI_0_2_FN_AFFINITY_INFO */
585 typedef enum ARMPSCIState {
604 /* Coprocessor information */
606 /* For marshalling (mostly coprocessor) register state between the
607 * kernel and QEMU (for KVM) and between two QEMUs (for migration),
608 * we use these arrays.
610 /* List of register indexes managed via these arrays; (full KVM style
611 * 64 bit indexes, not CPRegInfo 32 bit indexes)
613 uint64_t *cpreg_indexes;
614 /* Values of the registers (cpreg_indexes[i]'s value is cpreg_values[i]) */
615 uint64_t *cpreg_values;
616 /* Length of the indexes, values, reset_values arrays */
617 int32_t cpreg_array_len;
618 /* These are used only for migration: incoming data arrives in
619 * these fields and is sanity checked in post_load before copying
620 * to the working data structures above.
622 uint64_t *cpreg_vmstate_indexes;
623 uint64_t *cpreg_vmstate_values;
624 int32_t cpreg_vmstate_array_len;
626 /* Timers used by the generic (architected) timer */
627 QEMUTimer *gt_timer[NUM_GTIMERS];
628 /* GPIO outputs for generic timer */
629 qemu_irq gt_timer_outputs[NUM_GTIMERS];
630 /* GPIO output for GICv3 maintenance interrupt signal */
631 qemu_irq gicv3_maintenance_interrupt;
632 /* GPIO output for the PMU interrupt */
633 qemu_irq pmu_interrupt;
635 /* MemoryRegion to use for secure physical accesses */
636 MemoryRegion *secure_memory;
638 /* 'compatible' string for this CPU for Linux device trees */
639 const char *dtb_compatible;
641 /* PSCI version for this CPU
642 * Bits[31:16] = Major Version
643 * Bits[15:0] = Minor Version
645 uint32_t psci_version;
647 /* Should CPU start in PSCI powered-off state? */
648 bool start_powered_off;
650 /* Current power state, access guarded by BQL */
651 ARMPSCIState power_state;
653 /* CPU has virtualization extension */
655 /* CPU has security extension */
657 /* CPU has PMU (Performance Monitor Unit) */
660 /* CPU has memory protection unit */
662 /* PMSAv7 MPU number of supported regions */
663 uint32_t pmsav7_dregion;
665 /* PSCI conduit used to invoke PSCI methods
666 * 0 - disabled, 1 - smc, 2 - hvc
668 uint32_t psci_conduit;
670 /* [QEMU_]KVM_ARM_TARGET_* constant for this CPU, or
671 * QEMU_KVM_ARM_TARGET_NONE if the kernel doesn't support this CPU type.
675 /* KVM init features for this CPU */
676 uint32_t kvm_init_features[7];
678 /* Uniprocessor system with MP extensions */
681 /* The instance init functions for implementation-specific subclasses
682 * set these fields to specify the implementation-dependent values of
683 * various constant registers and reset values of non-constant
685 * Some of these might become QOM properties eventually.
686 * Field names match the official register names as defined in the
687 * ARMv7AR ARM Architecture Reference Manual. A reset_ prefix
688 * is used for reset values of non-constant registers; no reset_
689 * prefix means a constant register.
693 uint32_t reset_fpsid;
698 uint32_t reset_sctlr;
716 uint64_t id_aa64pfr0;
717 uint64_t id_aa64pfr1;
718 uint64_t id_aa64dfr0;
719 uint64_t id_aa64dfr1;
720 uint64_t id_aa64afr0;
721 uint64_t id_aa64afr1;
722 uint64_t id_aa64isar0;
723 uint64_t id_aa64isar1;
724 uint64_t id_aa64mmfr0;
725 uint64_t id_aa64mmfr1;
728 uint64_t mp_affinity; /* MP ID without feature bits */
729 /* The elements of this array are the CCSIDR values for each cache,
730 * in the order L1DCache, L1ICache, L2DCache, L2ICache, etc.
734 uint32_t reset_auxcr;
736 /* DCZ blocksize, in log_2(words), ie low 4 bits of DCZID_EL0 */
737 uint32_t dcz_blocksize;
740 /* Configurable aspects of GIC cpu interface (which is part of the CPU) */
741 int gic_num_lrs; /* number of list registers */
742 int gic_vpribits; /* number of virtual priority bits */
743 int gic_vprebits; /* number of virtual preemption bits */
745 /* Whether the cfgend input is high (i.e. this CPU should reset into
746 * big-endian mode). This setting isn't used directly: instead it modifies
747 * the reset_sctlr value to have SCTLR_B or SCTLR_EE set, depending on the
748 * architecture version.
752 ARMELChangeHook *el_change_hook;
753 void *el_change_hook_opaque;
755 int32_t node_id; /* NUMA node this CPU belongs to */
757 /* Used to synchronize KVM and QEMU in-kernel device levels */
758 uint8_t device_irq_level;
761 static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
763 return container_of(env, ARMCPU, env);
766 uint64_t arm_cpu_mp_affinity(int idx, uint8_t clustersz);
768 #define ENV_GET_CPU(e) CPU(arm_env_get_cpu(e))
770 #define ENV_OFFSET offsetof(ARMCPU, env)
772 #ifndef CONFIG_USER_ONLY
773 extern const struct VMStateDescription vmstate_arm_cpu;
776 void arm_cpu_do_interrupt(CPUState *cpu);
777 void arm_v7m_cpu_do_interrupt(CPUState *cpu);
778 bool arm_cpu_exec_interrupt(CPUState *cpu, int int_req);
780 void arm_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
783 hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cpu, vaddr addr,
786 int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
787 int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
789 int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
790 int cpuid, void *opaque);
791 int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
792 int cpuid, void *opaque);
794 #ifdef TARGET_AARCH64
795 int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
796 int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
799 target_ulong do_arm_semihosting(CPUARMState *env);
800 void aarch64_sync_32_to_64(CPUARMState *env);
801 void aarch64_sync_64_to_32(CPUARMState *env);
803 static inline bool is_a64(CPUARMState *env)
808 /* you can call this signal handler from your SIGBUS and SIGSEGV
809 signal handlers to inform the virtual CPU of exceptions. non zero
810 is returned if the signal was handled by the virtual CPU. */
811 int cpu_arm_signal_handler(int host_signum, void *pinfo,
818 * Synchronises the counter in the PMCCNTR. This must always be called twice,
819 * once before any action that might affect the timer and again afterwards.
820 * The function is used to swap the state of the register if required.
821 * This only happens when not in user mode (!CONFIG_USER_ONLY)
823 void pmccntr_sync(CPUARMState *env);
825 /* SCTLR bit meanings. Several bits have been reused in newer
826 * versions of the architecture; in that case we define constants
827 * for both old and new bit meanings. Code which tests against those
828 * bits should probably check or otherwise arrange that the CPU
829 * is the architectural version it expects.
831 #define SCTLR_M (1U << 0)
832 #define SCTLR_A (1U << 1)
833 #define SCTLR_C (1U << 2)
834 #define SCTLR_W (1U << 3) /* up to v6; RAO in v7 */
835 #define SCTLR_SA (1U << 3)
836 #define SCTLR_P (1U << 4) /* up to v5; RAO in v6 and v7 */
837 #define SCTLR_SA0 (1U << 4) /* v8 onward, AArch64 only */
838 #define SCTLR_D (1U << 5) /* up to v5; RAO in v6 */
839 #define SCTLR_CP15BEN (1U << 5) /* v7 onward */
840 #define SCTLR_L (1U << 6) /* up to v5; RAO in v6 and v7; RAZ in v8 */
841 #define SCTLR_B (1U << 7) /* up to v6; RAZ in v7 */
842 #define SCTLR_ITD (1U << 7) /* v8 onward */
843 #define SCTLR_S (1U << 8) /* up to v6; RAZ in v7 */
844 #define SCTLR_SED (1U << 8) /* v8 onward */
845 #define SCTLR_R (1U << 9) /* up to v6; RAZ in v7 */
846 #define SCTLR_UMA (1U << 9) /* v8 onward, AArch64 only */
847 #define SCTLR_F (1U << 10) /* up to v6 */
848 #define SCTLR_SW (1U << 10) /* v7 onward */
849 #define SCTLR_Z (1U << 11)
850 #define SCTLR_I (1U << 12)
851 #define SCTLR_V (1U << 13)
852 #define SCTLR_RR (1U << 14) /* up to v7 */
853 #define SCTLR_DZE (1U << 14) /* v8 onward, AArch64 only */
854 #define SCTLR_L4 (1U << 15) /* up to v6; RAZ in v7 */
855 #define SCTLR_UCT (1U << 15) /* v8 onward, AArch64 only */
856 #define SCTLR_DT (1U << 16) /* up to ??, RAO in v6 and v7 */
857 #define SCTLR_nTWI (1U << 16) /* v8 onward */
858 #define SCTLR_HA (1U << 17)
859 #define SCTLR_BR (1U << 17) /* PMSA only */
860 #define SCTLR_IT (1U << 18) /* up to ??, RAO in v6 and v7 */
861 #define SCTLR_nTWE (1U << 18) /* v8 onward */
862 #define SCTLR_WXN (1U << 19)
863 #define SCTLR_ST (1U << 20) /* up to ??, RAZ in v6 */
864 #define SCTLR_UWXN (1U << 20) /* v7 onward */
865 #define SCTLR_FI (1U << 21)
866 #define SCTLR_U (1U << 22)
867 #define SCTLR_XP (1U << 23) /* up to v6; v7 onward RAO */
868 #define SCTLR_VE (1U << 24) /* up to v7 */
869 #define SCTLR_E0E (1U << 24) /* v8 onward, AArch64 only */
870 #define SCTLR_EE (1U << 25)
871 #define SCTLR_L2 (1U << 26) /* up to v6, RAZ in v7 */
872 #define SCTLR_UCI (1U << 26) /* v8 onward, AArch64 only */
873 #define SCTLR_NMFI (1U << 27)
874 #define SCTLR_TRE (1U << 28)
875 #define SCTLR_AFE (1U << 29)
876 #define SCTLR_TE (1U << 30)
878 #define CPTR_TCPAC (1U << 31)
879 #define CPTR_TTA (1U << 20)
880 #define CPTR_TFP (1U << 10)
882 #define MDCR_EPMAD (1U << 21)
883 #define MDCR_EDAD (1U << 20)
884 #define MDCR_SPME (1U << 17)
885 #define MDCR_SDD (1U << 16)
886 #define MDCR_SPD (3U << 14)
887 #define MDCR_TDRA (1U << 11)
888 #define MDCR_TDOSA (1U << 10)
889 #define MDCR_TDA (1U << 9)
890 #define MDCR_TDE (1U << 8)
891 #define MDCR_HPME (1U << 7)
892 #define MDCR_TPM (1U << 6)
893 #define MDCR_TPMCR (1U << 5)
895 /* Not all of the MDCR_EL3 bits are present in the 32-bit SDCR */
896 #define SDCR_VALID_MASK (MDCR_EPMAD | MDCR_EDAD | MDCR_SPME | MDCR_SPD)
898 #define CPSR_M (0x1fU)
899 #define CPSR_T (1U << 5)
900 #define CPSR_F (1U << 6)
901 #define CPSR_I (1U << 7)
902 #define CPSR_A (1U << 8)
903 #define CPSR_E (1U << 9)
904 #define CPSR_IT_2_7 (0xfc00U)
905 #define CPSR_GE (0xfU << 16)
906 #define CPSR_IL (1U << 20)
907 /* Note that the RESERVED bits include bit 21, which is PSTATE_SS in
908 * an AArch64 SPSR but RES0 in AArch32 SPSR and CPSR. In QEMU we use
909 * env->uncached_cpsr bit 21 to store PSTATE.SS when executing in AArch32,
910 * where it is live state but not accessible to the AArch32 code.
912 #define CPSR_RESERVED (0x7U << 21)
913 #define CPSR_J (1U << 24)
914 #define CPSR_IT_0_1 (3U << 25)
915 #define CPSR_Q (1U << 27)
916 #define CPSR_V (1U << 28)
917 #define CPSR_C (1U << 29)
918 #define CPSR_Z (1U << 30)
919 #define CPSR_N (1U << 31)
920 #define CPSR_NZCV (CPSR_N | CPSR_Z | CPSR_C | CPSR_V)
921 #define CPSR_AIF (CPSR_A | CPSR_I | CPSR_F)
923 #define CPSR_IT (CPSR_IT_0_1 | CPSR_IT_2_7)
924 #define CACHED_CPSR_BITS (CPSR_T | CPSR_AIF | CPSR_GE | CPSR_IT | CPSR_Q \
926 /* Bits writable in user mode. */
927 #define CPSR_USER (CPSR_NZCV | CPSR_Q | CPSR_GE)
928 /* Execution state bits. MRS read as zero, MSR writes ignored. */
929 #define CPSR_EXEC (CPSR_T | CPSR_IT | CPSR_J | CPSR_IL)
930 /* Mask of bits which may be set by exception return copying them from SPSR */
931 #define CPSR_ERET_MASK (~CPSR_RESERVED)
933 /* Bit definitions for M profile XPSR. Most are the same as CPSR. */
934 #define XPSR_EXCP 0x1ffU
935 #define XPSR_SPREALIGN (1U << 9) /* Only set in exception stack frames */
936 #define XPSR_IT_2_7 CPSR_IT_2_7
937 #define XPSR_GE CPSR_GE
938 #define XPSR_SFPA (1U << 20) /* Only set in exception stack frames */
939 #define XPSR_T (1U << 24) /* Not the same as CPSR_T ! */
940 #define XPSR_IT_0_1 CPSR_IT_0_1
941 #define XPSR_Q CPSR_Q
942 #define XPSR_V CPSR_V
943 #define XPSR_C CPSR_C
944 #define XPSR_Z CPSR_Z
945 #define XPSR_N CPSR_N
946 #define XPSR_NZCV CPSR_NZCV
947 #define XPSR_IT CPSR_IT
949 #define TTBCR_N (7U << 0) /* TTBCR.EAE==0 */
950 #define TTBCR_T0SZ (7U << 0) /* TTBCR.EAE==1 */
951 #define TTBCR_PD0 (1U << 4)
952 #define TTBCR_PD1 (1U << 5)
953 #define TTBCR_EPD0 (1U << 7)
954 #define TTBCR_IRGN0 (3U << 8)
955 #define TTBCR_ORGN0 (3U << 10)
956 #define TTBCR_SH0 (3U << 12)
957 #define TTBCR_T1SZ (3U << 16)
958 #define TTBCR_A1 (1U << 22)
959 #define TTBCR_EPD1 (1U << 23)
960 #define TTBCR_IRGN1 (3U << 24)
961 #define TTBCR_ORGN1 (3U << 26)
962 #define TTBCR_SH1 (1U << 28)
963 #define TTBCR_EAE (1U << 31)
965 /* Bit definitions for ARMv8 SPSR (PSTATE) format.
966 * Only these are valid when in AArch64 mode; in
967 * AArch32 mode SPSRs are basically CPSR-format.
969 #define PSTATE_SP (1U)
970 #define PSTATE_M (0xFU)
971 #define PSTATE_nRW (1U << 4)
972 #define PSTATE_F (1U << 6)
973 #define PSTATE_I (1U << 7)
974 #define PSTATE_A (1U << 8)
975 #define PSTATE_D (1U << 9)
976 #define PSTATE_IL (1U << 20)
977 #define PSTATE_SS (1U << 21)
978 #define PSTATE_V (1U << 28)
979 #define PSTATE_C (1U << 29)
980 #define PSTATE_Z (1U << 30)
981 #define PSTATE_N (1U << 31)
982 #define PSTATE_NZCV (PSTATE_N | PSTATE_Z | PSTATE_C | PSTATE_V)
983 #define PSTATE_DAIF (PSTATE_D | PSTATE_A | PSTATE_I | PSTATE_F)
984 #define CACHED_PSTATE_BITS (PSTATE_NZCV | PSTATE_DAIF)
985 /* Mode values for AArch64 */
986 #define PSTATE_MODE_EL3h 13
987 #define PSTATE_MODE_EL3t 12
988 #define PSTATE_MODE_EL2h 9
989 #define PSTATE_MODE_EL2t 8
990 #define PSTATE_MODE_EL1h 5
991 #define PSTATE_MODE_EL1t 4
992 #define PSTATE_MODE_EL0t 0
994 /* Map EL and handler into a PSTATE_MODE. */
995 static inline unsigned int aarch64_pstate_mode(unsigned int el, bool handler)
997 return (el << 2) | handler;
1000 /* Return the current PSTATE value. For the moment we don't support 32<->64 bit
1001 * interprocessing, so we don't attempt to sync with the cpsr state used by
1002 * the 32 bit decoder.
1004 static inline uint32_t pstate_read(CPUARMState *env)
1008 ZF = (env->ZF == 0);
1009 return (env->NF & 0x80000000) | (ZF << 30)
1010 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3)
1011 | env->pstate | env->daif;
1014 static inline void pstate_write(CPUARMState *env, uint32_t val)
1016 env->ZF = (~val) & PSTATE_Z;
1018 env->CF = (val >> 29) & 1;
1019 env->VF = (val << 3) & 0x80000000;
1020 env->daif = val & PSTATE_DAIF;
1021 env->pstate = val & ~CACHED_PSTATE_BITS;
1024 /* Return the current CPSR value. */
1025 uint32_t cpsr_read(CPUARMState *env);
1027 typedef enum CPSRWriteType {
1028 CPSRWriteByInstr = 0, /* from guest MSR or CPS */
1029 CPSRWriteExceptionReturn = 1, /* from guest exception return insn */
1030 CPSRWriteRaw = 2, /* trust values, do not switch reg banks */
1031 CPSRWriteByGDBStub = 3, /* from the GDB stub */
1034 /* Set the CPSR. Note that some bits of mask must be all-set or all-clear.*/
1035 void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask,
1036 CPSRWriteType write_type);
1038 /* Return the current xPSR value. */
1039 static inline uint32_t xpsr_read(CPUARMState *env)
1042 ZF = (env->ZF == 0);
1043 return (env->NF & 0x80000000) | (ZF << 30)
1044 | (env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
1045 | (env->thumb << 24) | ((env->condexec_bits & 3) << 25)
1046 | ((env->condexec_bits & 0xfc) << 8)
1047 | env->v7m.exception;
1050 /* Set the xPSR. Note that some bits of mask must be all-set or all-clear. */
1051 static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
1053 if (mask & XPSR_NZCV) {
1054 env->ZF = (~val) & XPSR_Z;
1056 env->CF = (val >> 29) & 1;
1057 env->VF = (val << 3) & 0x80000000;
1059 if (mask & XPSR_Q) {
1060 env->QF = ((val & XPSR_Q) != 0);
1062 if (mask & XPSR_T) {
1063 env->thumb = ((val & XPSR_T) != 0);
1065 if (mask & XPSR_IT_0_1) {
1066 env->condexec_bits &= ~3;
1067 env->condexec_bits |= (val >> 25) & 3;
1069 if (mask & XPSR_IT_2_7) {
1070 env->condexec_bits &= 3;
1071 env->condexec_bits |= (val >> 8) & 0xfc;
1073 if (mask & XPSR_EXCP) {
1074 env->v7m.exception = val & XPSR_EXCP;
1078 #define HCR_VM (1ULL << 0)
1079 #define HCR_SWIO (1ULL << 1)
1080 #define HCR_PTW (1ULL << 2)
1081 #define HCR_FMO (1ULL << 3)
1082 #define HCR_IMO (1ULL << 4)
1083 #define HCR_AMO (1ULL << 5)
1084 #define HCR_VF (1ULL << 6)
1085 #define HCR_VI (1ULL << 7)
1086 #define HCR_VSE (1ULL << 8)
1087 #define HCR_FB (1ULL << 9)
1088 #define HCR_BSU_MASK (3ULL << 10)
1089 #define HCR_DC (1ULL << 12)
1090 #define HCR_TWI (1ULL << 13)
1091 #define HCR_TWE (1ULL << 14)
1092 #define HCR_TID0 (1ULL << 15)
1093 #define HCR_TID1 (1ULL << 16)
1094 #define HCR_TID2 (1ULL << 17)
1095 #define HCR_TID3 (1ULL << 18)
1096 #define HCR_TSC (1ULL << 19)
1097 #define HCR_TIDCP (1ULL << 20)
1098 #define HCR_TACR (1ULL << 21)
1099 #define HCR_TSW (1ULL << 22)
1100 #define HCR_TPC (1ULL << 23)
1101 #define HCR_TPU (1ULL << 24)
1102 #define HCR_TTLB (1ULL << 25)
1103 #define HCR_TVM (1ULL << 26)
1104 #define HCR_TGE (1ULL << 27)
1105 #define HCR_TDZ (1ULL << 28)
1106 #define HCR_HCD (1ULL << 29)
1107 #define HCR_TRVM (1ULL << 30)
1108 #define HCR_RW (1ULL << 31)
1109 #define HCR_CD (1ULL << 32)
1110 #define HCR_ID (1ULL << 33)
1111 #define HCR_MASK ((1ULL << 34) - 1)
1113 #define SCR_NS (1U << 0)
1114 #define SCR_IRQ (1U << 1)
1115 #define SCR_FIQ (1U << 2)
1116 #define SCR_EA (1U << 3)
1117 #define SCR_FW (1U << 4)
1118 #define SCR_AW (1U << 5)
1119 #define SCR_NET (1U << 6)
1120 #define SCR_SMD (1U << 7)
1121 #define SCR_HCE (1U << 8)
1122 #define SCR_SIF (1U << 9)
1123 #define SCR_RW (1U << 10)
1124 #define SCR_ST (1U << 11)
1125 #define SCR_TWI (1U << 12)
1126 #define SCR_TWE (1U << 13)
1127 #define SCR_AARCH32_MASK (0x3fff & ~(SCR_RW | SCR_ST))
1128 #define SCR_AARCH64_MASK (0x3fff & ~SCR_NET)
1130 /* Return the current FPSCR value. */
1131 uint32_t vfp_get_fpscr(CPUARMState *env);
1132 void vfp_set_fpscr(CPUARMState *env, uint32_t val);
1134 /* For A64 the FPSCR is split into two logically distinct registers,
1135 * FPCR and FPSR. However since they still use non-overlapping bits
1136 * we store the underlying state in fpscr and just mask on read/write.
1138 #define FPSR_MASK 0xf800009f
1139 #define FPCR_MASK 0x07f79f00
1140 static inline uint32_t vfp_get_fpsr(CPUARMState *env)
1142 return vfp_get_fpscr(env) & FPSR_MASK;
1145 static inline void vfp_set_fpsr(CPUARMState *env, uint32_t val)
1147 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPSR_MASK) | (val & FPSR_MASK);
1148 vfp_set_fpscr(env, new_fpscr);
1151 static inline uint32_t vfp_get_fpcr(CPUARMState *env)
1153 return vfp_get_fpscr(env) & FPCR_MASK;
1156 static inline void vfp_set_fpcr(CPUARMState *env, uint32_t val)
1158 uint32_t new_fpscr = (vfp_get_fpscr(env) & ~FPCR_MASK) | (val & FPCR_MASK);
1159 vfp_set_fpscr(env, new_fpscr);
1163 ARM_CPU_MODE_USR = 0x10,
1164 ARM_CPU_MODE_FIQ = 0x11,
1165 ARM_CPU_MODE_IRQ = 0x12,
1166 ARM_CPU_MODE_SVC = 0x13,
1167 ARM_CPU_MODE_MON = 0x16,
1168 ARM_CPU_MODE_ABT = 0x17,
1169 ARM_CPU_MODE_HYP = 0x1a,
1170 ARM_CPU_MODE_UND = 0x1b,
1171 ARM_CPU_MODE_SYS = 0x1f
1174 /* VFP system registers. */
1175 #define ARM_VFP_FPSID 0
1176 #define ARM_VFP_FPSCR 1
1177 #define ARM_VFP_MVFR2 5
1178 #define ARM_VFP_MVFR1 6
1179 #define ARM_VFP_MVFR0 7
1180 #define ARM_VFP_FPEXC 8
1181 #define ARM_VFP_FPINST 9
1182 #define ARM_VFP_FPINST2 10
1184 /* iwMMXt coprocessor control registers. */
1185 #define ARM_IWMMXT_wCID 0
1186 #define ARM_IWMMXT_wCon 1
1187 #define ARM_IWMMXT_wCSSF 2
1188 #define ARM_IWMMXT_wCASF 3
1189 #define ARM_IWMMXT_wCGR0 8
1190 #define ARM_IWMMXT_wCGR1 9
1191 #define ARM_IWMMXT_wCGR2 10
1192 #define ARM_IWMMXT_wCGR3 11
1195 FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
1196 FIELD(V7M_CCR, USERSETMPEND, 1, 1)
1197 FIELD(V7M_CCR, UNALIGN_TRP, 3, 1)
1198 FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
1199 FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
1200 FIELD(V7M_CCR, STKALIGN, 9, 1)
1201 FIELD(V7M_CCR, DC, 16, 1)
1202 FIELD(V7M_CCR, IC, 17, 1)
1204 /* V7M AIRCR bits */
1205 FIELD(V7M_AIRCR, VECTRESET, 0, 1)
1206 FIELD(V7M_AIRCR, VECTCLRACTIVE, 1, 1)
1207 FIELD(V7M_AIRCR, SYSRESETREQ, 2, 1)
1208 FIELD(V7M_AIRCR, SYSRESETREQS, 3, 1)
1209 FIELD(V7M_AIRCR, PRIGROUP, 8, 3)
1210 FIELD(V7M_AIRCR, BFHFNMINS, 13, 1)
1211 FIELD(V7M_AIRCR, PRIS, 14, 1)
1212 FIELD(V7M_AIRCR, ENDIANNESS, 15, 1)
1213 FIELD(V7M_AIRCR, VECTKEY, 16, 16)
1215 /* V7M CFSR bits for MMFSR */
1216 FIELD(V7M_CFSR, IACCVIOL, 0, 1)
1217 FIELD(V7M_CFSR, DACCVIOL, 1, 1)
1218 FIELD(V7M_CFSR, MUNSTKERR, 3, 1)
1219 FIELD(V7M_CFSR, MSTKERR, 4, 1)
1220 FIELD(V7M_CFSR, MLSPERR, 5, 1)
1221 FIELD(V7M_CFSR, MMARVALID, 7, 1)
1223 /* V7M CFSR bits for BFSR */
1224 FIELD(V7M_CFSR, IBUSERR, 8 + 0, 1)
1225 FIELD(V7M_CFSR, PRECISERR, 8 + 1, 1)
1226 FIELD(V7M_CFSR, IMPRECISERR, 8 + 2, 1)
1227 FIELD(V7M_CFSR, UNSTKERR, 8 + 3, 1)
1228 FIELD(V7M_CFSR, STKERR, 8 + 4, 1)
1229 FIELD(V7M_CFSR, LSPERR, 8 + 5, 1)
1230 FIELD(V7M_CFSR, BFARVALID, 8 + 7, 1)
1232 /* V7M CFSR bits for UFSR */
1233 FIELD(V7M_CFSR, UNDEFINSTR, 16 + 0, 1)
1234 FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
1235 FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
1236 FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
1237 FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
1238 FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
1240 /* V7M CFSR bit masks covering all of the subregister bits */
1241 FIELD(V7M_CFSR, MMFSR, 0, 8)
1242 FIELD(V7M_CFSR, BFSR, 8, 8)
1243 FIELD(V7M_CFSR, UFSR, 16, 16)
1246 FIELD(V7M_HFSR, VECTTBL, 1, 1)
1247 FIELD(V7M_HFSR, FORCED, 30, 1)
1248 FIELD(V7M_HFSR, DEBUGEVT, 31, 1)
1251 FIELD(V7M_DFSR, HALTED, 0, 1)
1252 FIELD(V7M_DFSR, BKPT, 1, 1)
1253 FIELD(V7M_DFSR, DWTTRAP, 2, 1)
1254 FIELD(V7M_DFSR, VCATCH, 3, 1)
1255 FIELD(V7M_DFSR, EXTERNAL, 4, 1)
1257 /* v7M MPU_CTRL bits */
1258 FIELD(V7M_MPU_CTRL, ENABLE, 0, 1)
1259 FIELD(V7M_MPU_CTRL, HFNMIENA, 1, 1)
1260 FIELD(V7M_MPU_CTRL, PRIVDEFENA, 2, 1)
1262 /* If adding a feature bit which corresponds to a Linux ELF
1263 * HWCAP bit, remember to update the feature-bit-to-hwcap
1264 * mapping in linux-user/elfload.c:get_elf_hwcap().
1268 ARM_FEATURE_AUXCR, /* ARM1026 Auxiliary control register. */
1269 ARM_FEATURE_XSCALE, /* Intel XScale extensions. */
1270 ARM_FEATURE_IWMMXT, /* Intel iwMMXt extension. */
1275 ARM_FEATURE_PMSA, /* no MMU; may have Memory Protection Unit */
1277 ARM_FEATURE_VFP_FP16,
1279 ARM_FEATURE_THUMB_DIV, /* divide supported in Thumb encoding */
1280 ARM_FEATURE_M, /* Microcontroller profile. */
1281 ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
1282 ARM_FEATURE_THUMB2EE,
1283 ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
1286 ARM_FEATURE_STRONGARM,
1287 ARM_FEATURE_VAPA, /* cp15 VA to PA lookups */
1288 ARM_FEATURE_ARM_DIV, /* divide supported in ARM encoding */
1289 ARM_FEATURE_VFP4, /* VFPv4 (implies that NEON is v2) */
1290 ARM_FEATURE_GENERIC_TIMER,
1291 ARM_FEATURE_MVFR, /* Media and VFP Feature Registers 0 and 1 */
1292 ARM_FEATURE_DUMMY_C15_REGS, /* RAZ/WI all of cp15 crn=15 */
1293 ARM_FEATURE_CACHE_TEST_CLEAN, /* 926/1026 style test-and-clean ops */
1294 ARM_FEATURE_CACHE_DIRTY_REG, /* 1136/1176 cache dirty status register */
1295 ARM_FEATURE_CACHE_BLOCK_OPS, /* v6 optional cache block operations */
1296 ARM_FEATURE_MPIDR, /* has cp15 MPIDR */
1297 ARM_FEATURE_PXN, /* has Privileged Execute Never bit */
1298 ARM_FEATURE_LPAE, /* has Large Physical Address Extension */
1300 ARM_FEATURE_AARCH64, /* supports 64 bit mode */
1301 ARM_FEATURE_V8_AES, /* implements AES part of v8 Crypto Extensions */
1302 ARM_FEATURE_CBAR, /* has cp15 CBAR */
1303 ARM_FEATURE_CRC, /* ARMv8 CRC instructions */
1304 ARM_FEATURE_CBAR_RO, /* has cp15 CBAR and it is read-only */
1305 ARM_FEATURE_EL2, /* has EL2 Virtualization support */
1306 ARM_FEATURE_EL3, /* has EL3 Secure monitor support */
1307 ARM_FEATURE_V8_SHA1, /* implements SHA1 part of v8 Crypto Extensions */
1308 ARM_FEATURE_V8_SHA256, /* implements SHA256 part of v8 Crypto Extensions */
1309 ARM_FEATURE_V8_PMULL, /* implements PMULL part of v8 Crypto Extensions */
1310 ARM_FEATURE_THUMB_DSP, /* DSP insns supported in the Thumb encodings */
1311 ARM_FEATURE_PMU, /* has PMU support */
1312 ARM_FEATURE_VBAR, /* has cp15 VBAR */
1313 ARM_FEATURE_M_SECURITY, /* M profile Security Extension */
1314 ARM_FEATURE_JAZELLE, /* has (trivial) Jazelle implementation */
1317 static inline int arm_feature(CPUARMState *env, int feature)
1319 return (env->features & (1ULL << feature)) != 0;
1322 #if !defined(CONFIG_USER_ONLY)
1323 /* Return true if exception levels below EL3 are in secure state,
1324 * or would be following an exception return to that level.
1325 * Unlike arm_is_secure() (which is always a question about the
1326 * _current_ state of the CPU) this doesn't care about the current
1329 static inline bool arm_is_secure_below_el3(CPUARMState *env)
1331 if (arm_feature(env, ARM_FEATURE_EL3)) {
1332 return !(env->cp15.scr_el3 & SCR_NS);
1334 /* If EL3 is not supported then the secure state is implementation
1335 * defined, in which case QEMU defaults to non-secure.
1341 /* Return true if the CPU is AArch64 EL3 or AArch32 Mon */
1342 static inline bool arm_is_el3_or_mon(CPUARMState *env)
1344 if (arm_feature(env, ARM_FEATURE_EL3)) {
1345 if (is_a64(env) && extract32(env->pstate, 2, 2) == 3) {
1346 /* CPU currently in AArch64 state and EL3 */
1348 } else if (!is_a64(env) &&
1349 (env->uncached_cpsr & CPSR_M) == ARM_CPU_MODE_MON) {
1350 /* CPU currently in AArch32 state and monitor mode */
1357 /* Return true if the processor is in secure state */
1358 static inline bool arm_is_secure(CPUARMState *env)
1360 if (arm_is_el3_or_mon(env)) {
1363 return arm_is_secure_below_el3(env);
1367 static inline bool arm_is_secure_below_el3(CPUARMState *env)
1372 static inline bool arm_is_secure(CPUARMState *env)
1378 /* Return true if the specified exception level is running in AArch64 state. */
1379 static inline bool arm_el_is_aa64(CPUARMState *env, int el)
1381 /* This isn't valid for EL0 (if we're in EL0, is_a64() is what you want,
1382 * and if we're not in EL0 then the state of EL0 isn't well defined.)
1384 assert(el >= 1 && el <= 3);
1385 bool aa64 = arm_feature(env, ARM_FEATURE_AARCH64);
1387 /* The highest exception level is always at the maximum supported
1388 * register width, and then lower levels have a register width controlled
1389 * by bits in the SCR or HCR registers.
1395 if (arm_feature(env, ARM_FEATURE_EL3)) {
1396 aa64 = aa64 && (env->cp15.scr_el3 & SCR_RW);
1403 if (arm_feature(env, ARM_FEATURE_EL2) && !arm_is_secure_below_el3(env)) {
1404 aa64 = aa64 && (env->cp15.hcr_el2 & HCR_RW);
1410 /* Function for determing whether guest cp register reads and writes should
1411 * access the secure or non-secure bank of a cp register. When EL3 is
1412 * operating in AArch32 state, the NS-bit determines whether the secure
1413 * instance of a cp register should be used. When EL3 is AArch64 (or if
1414 * it doesn't exist at all) then there is no register banking, and all
1415 * accesses are to the non-secure version.
1417 static inline bool access_secure_reg(CPUARMState *env)
1419 bool ret = (arm_feature(env, ARM_FEATURE_EL3) &&
1420 !arm_el_is_aa64(env, 3) &&
1421 !(env->cp15.scr_el3 & SCR_NS));
1426 /* Macros for accessing a specified CP register bank */
1427 #define A32_BANKED_REG_GET(_env, _regname, _secure) \
1428 ((_secure) ? (_env)->cp15._regname##_s : (_env)->cp15._regname##_ns)
1430 #define A32_BANKED_REG_SET(_env, _regname, _secure, _val) \
1433 (_env)->cp15._regname##_s = (_val); \
1435 (_env)->cp15._regname##_ns = (_val); \
1439 /* Macros for automatically accessing a specific CP register bank depending on
1440 * the current secure state of the system. These macros are not intended for
1441 * supporting instruction translation reads/writes as these are dependent
1442 * solely on the SCR.NS bit and not the mode.
1444 #define A32_BANKED_CURRENT_REG_GET(_env, _regname) \
1445 A32_BANKED_REG_GET((_env), _regname, \
1446 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)))
1448 #define A32_BANKED_CURRENT_REG_SET(_env, _regname, _val) \
1449 A32_BANKED_REG_SET((_env), _regname, \
1450 (arm_is_secure(_env) && !arm_el_is_aa64((_env), 3)), \
1453 void arm_cpu_list(FILE *f, fprintf_function cpu_fprintf);
1454 uint32_t arm_phys_excp_target_el(CPUState *cs, uint32_t excp_idx,
1455 uint32_t cur_el, bool secure);
1457 /* Interface between CPU and Interrupt controller. */
1458 #ifndef CONFIG_USER_ONLY
1459 bool armv7m_nvic_can_take_pending_exception(void *opaque);
1461 static inline bool armv7m_nvic_can_take_pending_exception(void *opaque)
1466 void armv7m_nvic_set_pending(void *opaque, int irq);
1467 void armv7m_nvic_acknowledge_irq(void *opaque);
1469 * armv7m_nvic_complete_irq: complete specified interrupt or exception
1471 * @irq: the exception number to complete
1473 * Returns: -1 if the irq was not active
1474 * 1 if completing this irq brought us back to base (no active irqs)
1475 * 0 if there is still an irq active after this one was completed
1476 * (Ignoring -1, this is the same as the RETTOBASE value before completion.)
1478 int armv7m_nvic_complete_irq(void *opaque, int irq);
1480 * armv7m_nvic_raw_execution_priority: return the raw execution priority
1483 * Returns: the raw execution priority as defined by the v8M architecture.
1484 * This is the execution priority minus the effects of AIRCR.PRIS,
1485 * and minus any PRIMASK/FAULTMASK/BASEPRI priority boosting.
1486 * (v8M ARM ARM I_PKLD.)
1488 int armv7m_nvic_raw_execution_priority(void *opaque);
1490 /* Interface for defining coprocessor registers.
1491 * Registers are defined in tables of arm_cp_reginfo structs
1492 * which are passed to define_arm_cp_regs().
1495 /* When looking up a coprocessor register we look for it
1496 * via an integer which encodes all of:
1497 * coprocessor number
1498 * Crn, Crm, opc1, opc2 fields
1499 * 32 or 64 bit register (ie is it accessed via MRC/MCR
1500 * or via MRRC/MCRR?)
1501 * non-secure/secure bank (AArch32 only)
1502 * We allow 4 bits for opc1 because MRRC/MCRR have a 4 bit field.
1503 * (In this case crn and opc2 should be zero.)
1504 * For AArch64, there is no 32/64 bit size distinction;
1505 * instead all registers have a 2 bit op0, 3 bit op1 and op2,
1506 * and 4 bit CRn and CRm. The encoding patterns are chosen
1507 * to be easy to convert to and from the KVM encodings, and also
1508 * so that the hashtable can contain both AArch32 and AArch64
1509 * registers (to allow for interprocessing where we might run
1510 * 32 bit code on a 64 bit core).
1512 /* This bit is private to our hashtable cpreg; in KVM register
1513 * IDs the AArch64/32 distinction is the KVM_REG_ARM/ARM64
1514 * in the upper bits of the 64 bit ID.
1516 #define CP_REG_AA64_SHIFT 28
1517 #define CP_REG_AA64_MASK (1 << CP_REG_AA64_SHIFT)
1519 /* To enable banking of coprocessor registers depending on ns-bit we
1520 * add a bit to distinguish between secure and non-secure cpregs in the
1523 #define CP_REG_NS_SHIFT 29
1524 #define CP_REG_NS_MASK (1 << CP_REG_NS_SHIFT)
1526 #define ENCODE_CP_REG(cp, is64, ns, crn, crm, opc1, opc2) \
1527 ((ns) << CP_REG_NS_SHIFT | ((cp) << 16) | ((is64) << 15) | \
1528 ((crn) << 11) | ((crm) << 7) | ((opc1) << 3) | (opc2))
1530 #define ENCODE_AA64_CP_REG(cp, crn, crm, op0, op1, op2) \
1531 (CP_REG_AA64_MASK | \
1532 ((cp) << CP_REG_ARM_COPROC_SHIFT) | \
1533 ((op0) << CP_REG_ARM64_SYSREG_OP0_SHIFT) | \
1534 ((op1) << CP_REG_ARM64_SYSREG_OP1_SHIFT) | \
1535 ((crn) << CP_REG_ARM64_SYSREG_CRN_SHIFT) | \
1536 ((crm) << CP_REG_ARM64_SYSREG_CRM_SHIFT) | \
1537 ((op2) << CP_REG_ARM64_SYSREG_OP2_SHIFT))
1539 /* Convert a full 64 bit KVM register ID to the truncated 32 bit
1540 * version used as a key for the coprocessor register hashtable
1542 static inline uint32_t kvm_to_cpreg_id(uint64_t kvmid)
1544 uint32_t cpregid = kvmid;
1545 if ((kvmid & CP_REG_ARCH_MASK) == CP_REG_ARM64) {
1546 cpregid |= CP_REG_AA64_MASK;
1548 if ((kvmid & CP_REG_SIZE_MASK) == CP_REG_SIZE_U64) {
1549 cpregid |= (1 << 15);
1552 /* KVM is always non-secure so add the NS flag on AArch32 register
1555 cpregid |= 1 << CP_REG_NS_SHIFT;
1560 /* Convert a truncated 32 bit hashtable key into the full
1561 * 64 bit KVM register ID.
1563 static inline uint64_t cpreg_to_kvm_id(uint32_t cpregid)
1567 if (cpregid & CP_REG_AA64_MASK) {
1568 kvmid = cpregid & ~CP_REG_AA64_MASK;
1569 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM64;
1571 kvmid = cpregid & ~(1 << 15);
1572 if (cpregid & (1 << 15)) {
1573 kvmid |= CP_REG_SIZE_U64 | CP_REG_ARM;
1575 kvmid |= CP_REG_SIZE_U32 | CP_REG_ARM;
1581 /* ARMCPRegInfo type field bits. If the SPECIAL bit is set this is a
1582 * special-behaviour cp reg and bits [15..8] indicate what behaviour
1583 * it has. Otherwise it is a simple cp reg, where CONST indicates that
1584 * TCG can assume the value to be constant (ie load at translate time)
1585 * and 64BIT indicates a 64 bit wide coprocessor register. SUPPRESS_TB_END
1586 * indicates that the TB should not be ended after a write to this register
1587 * (the default is that the TB ends after cp writes). OVERRIDE permits
1588 * a register definition to override a previous definition for the
1589 * same (cp, is64, crn, crm, opc1, opc2) tuple: either the new or the
1590 * old must have the OVERRIDE bit set.
1591 * ALIAS indicates that this register is an alias view of some underlying
1592 * state which is also visible via another register, and that the other
1593 * register is handling migration and reset; registers marked ALIAS will not be
1594 * migrated but may have their state set by syncing of register state from KVM.
1595 * NO_RAW indicates that this register has no underlying state and does not
1596 * support raw access for state saving/loading; it will not be used for either
1597 * migration or KVM state synchronization. (Typically this is for "registers"
1598 * which are actually used as instructions for cache maintenance and so on.)
1599 * IO indicates that this register does I/O and therefore its accesses
1600 * need to be surrounded by gen_io_start()/gen_io_end(). In particular,
1601 * registers which implement clocks or timers require this.
1603 #define ARM_CP_SPECIAL 1
1604 #define ARM_CP_CONST 2
1605 #define ARM_CP_64BIT 4
1606 #define ARM_CP_SUPPRESS_TB_END 8
1607 #define ARM_CP_OVERRIDE 16
1608 #define ARM_CP_ALIAS 32
1609 #define ARM_CP_IO 64
1610 #define ARM_CP_NO_RAW 128
1611 #define ARM_CP_NOP (ARM_CP_SPECIAL | (1 << 8))
1612 #define ARM_CP_WFI (ARM_CP_SPECIAL | (2 << 8))
1613 #define ARM_CP_NZCV (ARM_CP_SPECIAL | (3 << 8))
1614 #define ARM_CP_CURRENTEL (ARM_CP_SPECIAL | (4 << 8))
1615 #define ARM_CP_DC_ZVA (ARM_CP_SPECIAL | (5 << 8))
1616 #define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
1617 /* Used only as a terminator for ARMCPRegInfo lists */
1618 #define ARM_CP_SENTINEL 0xffff
1619 /* Mask of only the flag bits in a type field */
1620 #define ARM_CP_FLAG_MASK 0xff
1622 /* Valid values for ARMCPRegInfo state field, indicating which of
1623 * the AArch32 and AArch64 execution states this register is visible in.
1624 * If the reginfo doesn't explicitly specify then it is AArch32 only.
1625 * If the reginfo is declared to be visible in both states then a second
1626 * reginfo is synthesised for the AArch32 view of the AArch64 register,
1627 * such that the AArch32 view is the lower 32 bits of the AArch64 one.
1628 * Note that we rely on the values of these enums as we iterate through
1629 * the various states in some places.
1632 ARM_CP_STATE_AA32 = 0,
1633 ARM_CP_STATE_AA64 = 1,
1634 ARM_CP_STATE_BOTH = 2,
1637 /* ARM CP register secure state flags. These flags identify security state
1638 * attributes for a given CP register entry.
1639 * The existence of both or neither secure and non-secure flags indicates that
1640 * the register has both a secure and non-secure hash entry. A single one of
1641 * these flags causes the register to only be hashed for the specified
1643 * Although definitions may have any combination of the S/NS bits, each
1644 * registered entry will only have one to identify whether the entry is secure
1648 ARM_CP_SECSTATE_S = (1 << 0), /* bit[0]: Secure state register */
1649 ARM_CP_SECSTATE_NS = (1 << 1), /* bit[1]: Non-secure state register */
1652 /* Return true if cptype is a valid type field. This is used to try to
1653 * catch errors where the sentinel has been accidentally left off the end
1654 * of a list of registers.
1656 static inline bool cptype_valid(int cptype)
1658 return ((cptype & ~ARM_CP_FLAG_MASK) == 0)
1659 || ((cptype & ARM_CP_SPECIAL) &&
1660 ((cptype & ~ARM_CP_FLAG_MASK) <= ARM_LAST_SPECIAL));
1664 * We define bits for Read and Write access for what rev C of the v7-AR ARM ARM
1665 * defines as PL0 (user), PL1 (fiq/irq/svc/abt/und/sys, ie privileged), and
1666 * PL2 (hyp). The other level which has Read and Write bits is Secure PL1
1667 * (ie any of the privileged modes in Secure state, or Monitor mode).
1668 * If a register is accessible in one privilege level it's always accessible
1669 * in higher privilege levels too. Since "Secure PL1" also follows this rule
1670 * (ie anything visible in PL2 is visible in S-PL1, some things are only
1671 * visible in S-PL1) but "Secure PL1" is a bit of a mouthful, we bend the
1672 * terminology a little and call this PL3.
1673 * In AArch64 things are somewhat simpler as the PLx bits line up exactly
1674 * with the ELx exception levels.
1676 * If access permissions for a register are more complex than can be
1677 * described with these bits, then use a laxer set of restrictions, and
1678 * do the more restrictive/complex check inside a helper function.
1682 #define PL2_R (0x20 | PL3_R)
1683 #define PL2_W (0x10 | PL3_W)
1684 #define PL1_R (0x08 | PL2_R)
1685 #define PL1_W (0x04 | PL2_W)
1686 #define PL0_R (0x02 | PL1_R)
1687 #define PL0_W (0x01 | PL1_W)
1689 #define PL3_RW (PL3_R | PL3_W)
1690 #define PL2_RW (PL2_R | PL2_W)
1691 #define PL1_RW (PL1_R | PL1_W)
1692 #define PL0_RW (PL0_R | PL0_W)
1694 /* Return the highest implemented Exception Level */
1695 static inline int arm_highest_el(CPUARMState *env)
1697 if (arm_feature(env, ARM_FEATURE_EL3)) {
1700 if (arm_feature(env, ARM_FEATURE_EL2)) {
1706 /* Return true if a v7M CPU is in Handler mode */
1707 static inline bool arm_v7m_is_handler_mode(CPUARMState *env)
1709 return env->v7m.exception != 0;
1712 /* Return the current Exception Level (as per ARMv8; note that this differs
1713 * from the ARMv7 Privilege Level).
1715 static inline int arm_current_el(CPUARMState *env)
1717 if (arm_feature(env, ARM_FEATURE_M)) {
1718 return arm_v7m_is_handler_mode(env) ||
1719 !(env->v7m.control[env->v7m.secure] & 1);
1723 return extract32(env->pstate, 2, 2);
1726 switch (env->uncached_cpsr & 0x1f) {
1727 case ARM_CPU_MODE_USR:
1729 case ARM_CPU_MODE_HYP:
1731 case ARM_CPU_MODE_MON:
1734 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
1735 /* If EL3 is 32-bit then all secure privileged modes run in
1745 typedef struct ARMCPRegInfo ARMCPRegInfo;
1747 typedef enum CPAccessResult {
1748 /* Access is permitted */
1750 /* Access fails due to a configurable trap or enable which would
1751 * result in a categorized exception syndrome giving information about
1752 * the failing instruction (ie syndrome category 0x3, 0x4, 0x5, 0x6,
1753 * 0xc or 0x18). The exception is taken to the usual target EL (EL1 or
1754 * PL1 if in EL0, otherwise to the current EL).
1757 /* Access fails and results in an exception syndrome 0x0 ("uncategorized").
1758 * Note that this is not a catch-all case -- the set of cases which may
1759 * result in this failure is specifically defined by the architecture.
1761 CP_ACCESS_TRAP_UNCATEGORIZED = 2,
1762 /* As CP_ACCESS_TRAP, but for traps directly to EL2 or EL3 */
1763 CP_ACCESS_TRAP_EL2 = 3,
1764 CP_ACCESS_TRAP_EL3 = 4,
1765 /* As CP_ACCESS_UNCATEGORIZED, but for traps directly to EL2 or EL3 */
1766 CP_ACCESS_TRAP_UNCATEGORIZED_EL2 = 5,
1767 CP_ACCESS_TRAP_UNCATEGORIZED_EL3 = 6,
1768 /* Access fails and results in an exception syndrome for an FP access,
1769 * trapped directly to EL2 or EL3
1771 CP_ACCESS_TRAP_FP_EL2 = 7,
1772 CP_ACCESS_TRAP_FP_EL3 = 8,
1775 /* Access functions for coprocessor registers. These cannot fail and
1776 * may not raise exceptions.
1778 typedef uint64_t CPReadFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1779 typedef void CPWriteFn(CPUARMState *env, const ARMCPRegInfo *opaque,
1781 /* Access permission check functions for coprocessor registers. */
1782 typedef CPAccessResult CPAccessFn(CPUARMState *env,
1783 const ARMCPRegInfo *opaque,
1785 /* Hook function for register reset */
1786 typedef void CPResetFn(CPUARMState *env, const ARMCPRegInfo *opaque);
1790 /* Definition of an ARM coprocessor register */
1791 struct ARMCPRegInfo {
1792 /* Name of register (useful mainly for debugging, need not be unique) */
1794 /* Location of register: coprocessor number and (crn,crm,opc1,opc2)
1795 * tuple. Any of crm, opc1 and opc2 may be CP_ANY to indicate a
1796 * 'wildcard' field -- any value of that field in the MRC/MCR insn
1797 * will be decoded to this register. The register read and write
1798 * callbacks will be passed an ARMCPRegInfo with the crn/crm/opc1/opc2
1799 * used by the program, so it is possible to register a wildcard and
1800 * then behave differently on read/write if necessary.
1801 * For 64 bit registers, only crm and opc1 are relevant; crn and opc2
1802 * must both be zero.
1803 * For AArch64-visible registers, opc0 is also used.
1804 * Since there are no "coprocessors" in AArch64, cp is purely used as a
1805 * way to distinguish (for KVM's benefit) guest-visible system registers
1806 * from demuxed ones provided to preserve the "no side effects on
1807 * KVM register read/write from QEMU" semantics. cp==0x13 is guest
1808 * visible (to match KVM's encoding); cp==0 will be converted to
1809 * cp==0x13 when the ARMCPRegInfo is registered, for convenience.
1817 /* Execution state in which this register is visible: ARM_CP_STATE_* */
1819 /* Register type: ARM_CP_* bits/values */
1821 /* Access rights: PL*_[RW] */
1823 /* Security state: ARM_CP_SECSTATE_* bits/values */
1825 /* The opaque pointer passed to define_arm_cp_regs_with_opaque() when
1826 * this register was defined: can be used to hand data through to the
1827 * register read/write functions, since they are passed the ARMCPRegInfo*.
1830 /* Value of this register, if it is ARM_CP_CONST. Otherwise, if
1831 * fieldoffset is non-zero, the reset value of the register.
1833 uint64_t resetvalue;
1834 /* Offset of the field in CPUARMState for this register.
1836 * This is not needed if either:
1837 * 1. type is ARM_CP_CONST or one of the ARM_CP_SPECIALs
1838 * 2. both readfn and writefn are specified
1840 ptrdiff_t fieldoffset; /* offsetof(CPUARMState, field) */
1842 /* Offsets of the secure and non-secure fields in CPUARMState for the
1843 * register if it is banked. These fields are only used during the static
1844 * registration of a register. During hashing the bank associated
1845 * with a given security state is copied to fieldoffset which is used from
1848 * It is expected that register definitions use either fieldoffset or
1849 * bank_fieldoffsets in the definition but not both. It is also expected
1850 * that both bank offsets are set when defining a banked register. This
1851 * use indicates that a register is banked.
1853 ptrdiff_t bank_fieldoffsets[2];
1855 /* Function for making any access checks for this register in addition to
1856 * those specified by the 'access' permissions bits. If NULL, no extra
1857 * checks required. The access check is performed at runtime, not at
1860 CPAccessFn *accessfn;
1861 /* Function for handling reads of this register. If NULL, then reads
1862 * will be done by loading from the offset into CPUARMState specified
1866 /* Function for handling writes of this register. If NULL, then writes
1867 * will be done by writing to the offset into CPUARMState specified
1871 /* Function for doing a "raw" read; used when we need to copy
1872 * coprocessor state to the kernel for KVM or out for
1873 * migration. This only needs to be provided if there is also a
1874 * readfn and it has side effects (for instance clear-on-read bits).
1876 CPReadFn *raw_readfn;
1877 /* Function for doing a "raw" write; used when we need to copy KVM
1878 * kernel coprocessor state into userspace, or for inbound
1879 * migration. This only needs to be provided if there is also a
1880 * writefn and it masks out "unwritable" bits or has write-one-to-clear
1881 * or similar behaviour.
1883 CPWriteFn *raw_writefn;
1884 /* Function for resetting the register. If NULL, then reset will be done
1885 * by writing resetvalue to the field specified in fieldoffset. If
1886 * fieldoffset is 0 then no reset will be done.
1891 /* Macros which are lvalues for the field in CPUARMState for the
1894 #define CPREG_FIELD32(env, ri) \
1895 (*(uint32_t *)((char *)(env) + (ri)->fieldoffset))
1896 #define CPREG_FIELD64(env, ri) \
1897 (*(uint64_t *)((char *)(env) + (ri)->fieldoffset))
1899 #define REGINFO_SENTINEL { .type = ARM_CP_SENTINEL }
1901 void define_arm_cp_regs_with_opaque(ARMCPU *cpu,
1902 const ARMCPRegInfo *regs, void *opaque);
1903 void define_one_arm_cp_reg_with_opaque(ARMCPU *cpu,
1904 const ARMCPRegInfo *regs, void *opaque);
1905 static inline void define_arm_cp_regs(ARMCPU *cpu, const ARMCPRegInfo *regs)
1907 define_arm_cp_regs_with_opaque(cpu, regs, 0);
1909 static inline void define_one_arm_cp_reg(ARMCPU *cpu, const ARMCPRegInfo *regs)
1911 define_one_arm_cp_reg_with_opaque(cpu, regs, 0);
1913 const ARMCPRegInfo *get_arm_cp_reginfo(GHashTable *cpregs, uint32_t encoded_cp);
1915 /* CPWriteFn that can be used to implement writes-ignored behaviour */
1916 void arm_cp_write_ignore(CPUARMState *env, const ARMCPRegInfo *ri,
1918 /* CPReadFn that can be used for read-as-zero behaviour */
1919 uint64_t arm_cp_read_zero(CPUARMState *env, const ARMCPRegInfo *ri);
1921 /* CPResetFn that does nothing, for use if no reset is required even
1922 * if fieldoffset is non zero.
1924 void arm_cp_reset_ignore(CPUARMState *env, const ARMCPRegInfo *opaque);
1926 /* Return true if this reginfo struct's field in the cpu state struct
1929 static inline bool cpreg_field_is_64bit(const ARMCPRegInfo *ri)
1931 return (ri->state == ARM_CP_STATE_AA64) || (ri->type & ARM_CP_64BIT);
1934 static inline bool cp_access_ok(int current_el,
1935 const ARMCPRegInfo *ri, int isread)
1937 return (ri->access >> ((current_el * 2) + isread)) & 1;
1940 /* Raw read of a coprocessor register (as needed for migration, etc) */
1941 uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
1944 * write_list_to_cpustate
1947 * For each register listed in the ARMCPU cpreg_indexes list, write
1948 * its value from the cpreg_values list into the ARMCPUState structure.
1949 * This updates TCG's working data structures from KVM data or
1950 * from incoming migration state.
1952 * Returns: true if all register values were updated correctly,
1953 * false if some register was unknown or could not be written.
1954 * Note that we do not stop early on failure -- we will attempt
1955 * writing all registers in the list.
1957 bool write_list_to_cpustate(ARMCPU *cpu);
1960 * write_cpustate_to_list:
1963 * For each register listed in the ARMCPU cpreg_indexes list, write
1964 * its value from the ARMCPUState structure into the cpreg_values list.
1965 * This is used to copy info from TCG's working data structures into
1966 * KVM or for outbound migration.
1968 * Returns: true if all register values were read correctly,
1969 * false if some register was unknown or could not be read.
1970 * Note that we do not stop early on failure -- we will attempt
1971 * reading all registers in the list.
1973 bool write_cpustate_to_list(ARMCPU *cpu);
1975 #define ARM_CPUID_TI915T 0x54029152
1976 #define ARM_CPUID_TI925T 0x54029252
1978 #if defined(CONFIG_USER_ONLY)
1979 #define TARGET_PAGE_BITS 12
1981 /* ARMv7 and later CPUs have 4K pages minimum, but ARMv5 and v6
1982 * have to support 1K tiny pages.
1984 #define TARGET_PAGE_BITS_VARY
1985 #define TARGET_PAGE_BITS_MIN 10
1988 #if defined(TARGET_AARCH64)
1989 # define TARGET_PHYS_ADDR_SPACE_BITS 48
1990 # define TARGET_VIRT_ADDR_SPACE_BITS 64
1992 # define TARGET_PHYS_ADDR_SPACE_BITS 40
1993 # define TARGET_VIRT_ADDR_SPACE_BITS 32
1996 static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
1997 unsigned int target_el)
1999 CPUARMState *env = cs->env_ptr;
2000 unsigned int cur_el = arm_current_el(env);
2001 bool secure = arm_is_secure(env);
2002 bool pstate_unmasked;
2003 int8_t unmasked = 0;
2005 /* Don't take exceptions if they target a lower EL.
2006 * This check should catch any exceptions that would not be taken but left
2009 if (cur_el > target_el) {
2015 pstate_unmasked = !(env->daif & PSTATE_F);
2019 pstate_unmasked = !(env->daif & PSTATE_I);
2023 if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
2024 /* VFIQs are only taken when hypervized and non-secure. */
2027 return !(env->daif & PSTATE_F);
2029 if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
2030 /* VIRQs are only taken when hypervized and non-secure. */
2033 return !(env->daif & PSTATE_I);
2035 g_assert_not_reached();
2038 /* Use the target EL, current execution state and SCR/HCR settings to
2039 * determine whether the corresponding CPSR bit is used to mask the
2042 if ((target_el > cur_el) && (target_el != 1)) {
2043 /* Exceptions targeting a higher EL may not be maskable */
2044 if (arm_feature(env, ARM_FEATURE_AARCH64)) {
2045 /* 64-bit masking rules are simple: exceptions to EL3
2046 * can't be masked, and exceptions to EL2 can only be
2047 * masked from Secure state. The HCR and SCR settings
2048 * don't affect the masking logic, only the interrupt routing.
2050 if (target_el == 3 || !secure) {
2054 /* The old 32-bit-only environment has a more complicated
2055 * masking setup. HCR and SCR bits not only affect interrupt
2056 * routing but also change the behaviour of masking.
2062 /* If FIQs are routed to EL3 or EL2 then there are cases where
2063 * we override the CPSR.F in determining if the exception is
2064 * masked or not. If neither of these are set then we fall back
2065 * to the CPSR.F setting otherwise we further assess the state
2068 hcr = (env->cp15.hcr_el2 & HCR_FMO);
2069 scr = (env->cp15.scr_el3 & SCR_FIQ);
2071 /* When EL3 is 32-bit, the SCR.FW bit controls whether the
2072 * CPSR.F bit masks FIQ interrupts when taken in non-secure
2073 * state. If SCR.FW is set then FIQs can be masked by CPSR.F
2074 * when non-secure but only when FIQs are only routed to EL3.
2076 scr = scr && !((env->cp15.scr_el3 & SCR_FW) && !hcr);
2079 /* When EL3 execution state is 32-bit, if HCR.IMO is set then
2080 * we may override the CPSR.I masking when in non-secure state.
2081 * The SCR.IRQ setting has already been taken into consideration
2082 * when setting the target EL, so it does not have a further
2085 hcr = (env->cp15.hcr_el2 & HCR_IMO);
2089 g_assert_not_reached();
2092 if ((scr || hcr) && !secure) {
2098 /* The PSTATE bits only mask the interrupt if we have not overriden the
2101 return unmasked || pstate_unmasked;
2104 #define cpu_init(cpu_model) cpu_generic_init(TYPE_ARM_CPU, cpu_model)
2106 #define ARM_CPU_TYPE_SUFFIX "-" TYPE_ARM_CPU
2107 #define ARM_CPU_TYPE_NAME(name) (name ARM_CPU_TYPE_SUFFIX)
2109 #define cpu_signal_handler cpu_arm_signal_handler
2110 #define cpu_list arm_cpu_list
2112 /* ARM has the following "translation regimes" (as the ARM ARM calls them):
2115 * + NonSecure EL1 & 0 stage 1
2116 * + NonSecure EL1 & 0 stage 2
2118 * + Secure EL1 & EL0
2121 * + NonSecure PL1 & 0 stage 1
2122 * + NonSecure PL1 & 0 stage 2
2124 * + Secure PL0 & PL1
2125 * (reminder: for 32 bit EL3, Secure PL1 is *EL3*, not EL1.)
2127 * For QEMU, an mmu_idx is not quite the same as a translation regime because:
2128 * 1. we need to split the "EL1 & 0" regimes into two mmu_idxes, because they
2129 * may differ in access permissions even if the VA->PA map is the same
2130 * 2. we want to cache in our TLB the full VA->IPA->PA lookup for a stage 1+2
2131 * translation, which means that we have one mmu_idx that deals with two
2132 * concatenated translation regimes [this sort of combined s1+2 TLB is
2133 * architecturally permitted]
2134 * 3. we don't need to allocate an mmu_idx to translations that we won't be
2135 * handling via the TLB. The only way to do a stage 1 translation without
2136 * the immediate stage 2 translation is via the ATS or AT system insns,
2137 * which can be slow-pathed and always do a page table walk.
2138 * 4. we can also safely fold together the "32 bit EL3" and "64 bit EL3"
2139 * translation regimes, because they map reasonably well to each other
2140 * and they can't both be active at the same time.
2141 * This gives us the following list of mmu_idx values:
2143 * NS EL0 (aka NS PL0) stage 1+2
2144 * NS EL1 (aka NS PL1) stage 1+2
2145 * NS EL2 (aka NS PL2)
2148 * S EL1 (not used if EL3 is 32 bit)
2151 * (The last of these is an mmu_idx because we want to be able to use the TLB
2152 * for the accesses done as part of a stage 1 page table walk, rather than
2153 * having to walk the stage 2 page table over and over.)
2155 * R profile CPUs have an MPU, but can use the same set of MMU indexes
2156 * as A profile. They only need to distinguish NS EL0 and NS EL1 (and
2157 * NS EL2 if we ever model a Cortex-R52).
2159 * M profile CPUs are rather different as they do not have a true MMU.
2160 * They have the following different MMU indexes:
2163 * Execution priority negative (this is like privileged, but the
2164 * MPU HFNMIENA bit means that it may have different access permission
2165 * check results to normal privileged code, so can't share a TLB).
2166 * If the CPU supports the v8M Security Extension then there are also:
2169 * Secure, execution priority negative
2171 * The ARMMMUIdx and the mmu index value used by the core QEMU TLB code
2172 * are not quite the same -- different CPU types (most notably M profile
2173 * vs A/R profile) would like to use MMU indexes with different semantics,
2174 * but since we don't ever need to use all of those in a single CPU we
2175 * can avoid setting NB_MMU_MODES to more than 8. The lower bits of
2176 * ARMMMUIdx are the core TLB mmu index, and the higher bits are always
2177 * the same for any particular CPU.
2178 * Variables of type ARMMUIdx are always full values, and the core
2179 * index values are in variables of type 'int'.
2181 * Our enumeration includes at the end some entries which are not "true"
2182 * mmu_idx values in that they don't have corresponding TLBs and are only
2183 * valid for doing slow path page table walks.
2185 * The constant names here are patterned after the general style of the names
2186 * of the AT/ATS operations.
2187 * The values used are carefully arranged to make mmu_idx => EL lookup easy.
2189 #define ARM_MMU_IDX_A 0x10 /* A profile */
2190 #define ARM_MMU_IDX_NOTLB 0x20 /* does not have a TLB */
2191 #define ARM_MMU_IDX_M 0x40 /* M profile */
2193 #define ARM_MMU_IDX_TYPE_MASK (~0x7)
2194 #define ARM_MMU_IDX_COREIDX_MASK 0x7
2196 typedef enum ARMMMUIdx {
2197 ARMMMUIdx_S12NSE0 = 0 | ARM_MMU_IDX_A,
2198 ARMMMUIdx_S12NSE1 = 1 | ARM_MMU_IDX_A,
2199 ARMMMUIdx_S1E2 = 2 | ARM_MMU_IDX_A,
2200 ARMMMUIdx_S1E3 = 3 | ARM_MMU_IDX_A,
2201 ARMMMUIdx_S1SE0 = 4 | ARM_MMU_IDX_A,
2202 ARMMMUIdx_S1SE1 = 5 | ARM_MMU_IDX_A,
2203 ARMMMUIdx_S2NS = 6 | ARM_MMU_IDX_A,
2204 ARMMMUIdx_MUser = 0 | ARM_MMU_IDX_M,
2205 ARMMMUIdx_MPriv = 1 | ARM_MMU_IDX_M,
2206 ARMMMUIdx_MNegPri = 2 | ARM_MMU_IDX_M,
2207 ARMMMUIdx_MSUser = 3 | ARM_MMU_IDX_M,
2208 ARMMMUIdx_MSPriv = 4 | ARM_MMU_IDX_M,
2209 ARMMMUIdx_MSNegPri = 5 | ARM_MMU_IDX_M,
2210 /* Indexes below here don't have TLBs and are used only for AT system
2211 * instructions or for the first stage of an S12 page table walk.
2213 ARMMMUIdx_S1NSE0 = 0 | ARM_MMU_IDX_NOTLB,
2214 ARMMMUIdx_S1NSE1 = 1 | ARM_MMU_IDX_NOTLB,
2217 /* Bit macros for the core-mmu-index values for each index,
2218 * for use when calling tlb_flush_by_mmuidx() and friends.
2220 typedef enum ARMMMUIdxBit {
2221 ARMMMUIdxBit_S12NSE0 = 1 << 0,
2222 ARMMMUIdxBit_S12NSE1 = 1 << 1,
2223 ARMMMUIdxBit_S1E2 = 1 << 2,
2224 ARMMMUIdxBit_S1E3 = 1 << 3,
2225 ARMMMUIdxBit_S1SE0 = 1 << 4,
2226 ARMMMUIdxBit_S1SE1 = 1 << 5,
2227 ARMMMUIdxBit_S2NS = 1 << 6,
2228 ARMMMUIdxBit_MUser = 1 << 0,
2229 ARMMMUIdxBit_MPriv = 1 << 1,
2230 ARMMMUIdxBit_MNegPri = 1 << 2,
2231 ARMMMUIdxBit_MSUser = 1 << 3,
2232 ARMMMUIdxBit_MSPriv = 1 << 4,
2233 ARMMMUIdxBit_MSNegPri = 1 << 5,
2236 #define MMU_USER_IDX 0
2238 static inline int arm_to_core_mmu_idx(ARMMMUIdx mmu_idx)
2240 return mmu_idx & ARM_MMU_IDX_COREIDX_MASK;
2243 static inline ARMMMUIdx core_to_arm_mmu_idx(CPUARMState *env, int mmu_idx)
2245 if (arm_feature(env, ARM_FEATURE_M)) {
2246 return mmu_idx | ARM_MMU_IDX_M;
2248 return mmu_idx | ARM_MMU_IDX_A;
2252 /* Return the exception level we're running at if this is our mmu_idx */
2253 static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
2255 switch (mmu_idx & ARM_MMU_IDX_TYPE_MASK) {
2259 return (mmu_idx == ARMMMUIdx_MUser || mmu_idx == ARMMMUIdx_MSUser)
2262 g_assert_not_reached();
2266 /* Determine the current mmu_idx to use for normal loads/stores */
2267 static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
2269 int el = arm_current_el(env);
2271 if (arm_feature(env, ARM_FEATURE_M)) {
2272 ARMMMUIdx mmu_idx = el == 0 ? ARMMMUIdx_MUser : ARMMMUIdx_MPriv;
2274 /* Execution priority is negative if FAULTMASK is set or
2275 * we're in a HardFault or NMI handler.
2277 if ((env->v7m.exception > 0 && env->v7m.exception <= 3)
2278 || env->v7m.faultmask[env->v7m.secure]) {
2279 mmu_idx = ARMMMUIdx_MNegPri;
2282 if (env->v7m.secure) {
2283 mmu_idx += ARMMMUIdx_MSUser;
2286 return arm_to_core_mmu_idx(mmu_idx);
2289 if (el < 2 && arm_is_secure_below_el3(env)) {
2290 return arm_to_core_mmu_idx(ARMMMUIdx_S1SE0 + el);
2295 /* Indexes used when registering address spaces with cpu_address_space_init */
2296 typedef enum ARMASIdx {
2301 /* Return the Exception Level targeted by debug exceptions. */
2302 static inline int arm_debug_target_el(CPUARMState *env)
2304 bool secure = arm_is_secure(env);
2305 bool route_to_el2 = false;
2307 if (arm_feature(env, ARM_FEATURE_EL2) && !secure) {
2308 route_to_el2 = env->cp15.hcr_el2 & HCR_TGE ||
2309 env->cp15.mdcr_el2 & (1 << 8);
2314 } else if (arm_feature(env, ARM_FEATURE_EL3) &&
2315 !arm_el_is_aa64(env, 3) && secure) {
2322 static inline bool aa64_generate_debug_exceptions(CPUARMState *env)
2324 if (arm_is_secure(env)) {
2325 /* MDCR_EL3.SDD disables debug events from Secure state */
2326 if (extract32(env->cp15.mdcr_el3, 16, 1) != 0
2327 || arm_current_el(env) == 3) {
2332 if (arm_current_el(env) == arm_debug_target_el(env)) {
2333 if ((extract32(env->cp15.mdscr_el1, 13, 1) == 0)
2334 || (env->daif & PSTATE_D)) {
2341 static inline bool aa32_generate_debug_exceptions(CPUARMState *env)
2343 int el = arm_current_el(env);
2345 if (el == 0 && arm_el_is_aa64(env, 1)) {
2346 return aa64_generate_debug_exceptions(env);
2349 if (arm_is_secure(env)) {
2352 if (el == 0 && (env->cp15.sder & 1)) {
2353 /* SDER.SUIDEN means debug exceptions from Secure EL0
2354 * are always enabled. Otherwise they are controlled by
2355 * SDCR.SPD like those from other Secure ELs.
2360 spd = extract32(env->cp15.mdcr_el3, 14, 2);
2363 /* SPD == 0b01 is reserved, but behaves as 0b00. */
2365 /* For 0b00 we return true if external secure invasive debug
2366 * is enabled. On real hardware this is controlled by external
2367 * signals to the core. QEMU always permits debug, and behaves
2368 * as if DBGEN, SPIDEN, NIDEN and SPNIDEN are all tied high.
2381 /* Return true if debugging exceptions are currently enabled.
2382 * This corresponds to what in ARM ARM pseudocode would be
2383 * if UsingAArch32() then
2384 * return AArch32.GenerateDebugExceptions()
2386 * return AArch64.GenerateDebugExceptions()
2387 * We choose to push the if() down into this function for clarity,
2388 * since the pseudocode has it at all callsites except for the one in
2389 * CheckSoftwareStep(), where it is elided because both branches would
2390 * always return the same value.
2392 * Parts of the pseudocode relating to EL2 and EL3 are omitted because we
2393 * don't yet implement those exception levels or their associated trap bits.
2395 static inline bool arm_generate_debug_exceptions(CPUARMState *env)
2398 return aa64_generate_debug_exceptions(env);
2400 return aa32_generate_debug_exceptions(env);
2404 /* Is single-stepping active? (Note that the "is EL_D AArch64?" check
2405 * implicitly means this always returns false in pre-v8 CPUs.)
2407 static inline bool arm_singlestep_active(CPUARMState *env)
2409 return extract32(env->cp15.mdscr_el1, 0, 1)
2410 && arm_el_is_aa64(env, arm_debug_target_el(env))
2411 && arm_generate_debug_exceptions(env);
2414 static inline bool arm_sctlr_b(CPUARMState *env)
2417 /* We need not implement SCTLR.ITD in user-mode emulation, so
2418 * let linux-user ignore the fact that it conflicts with SCTLR_B.
2419 * This lets people run BE32 binaries with "-cpu any".
2421 #ifndef CONFIG_USER_ONLY
2422 !arm_feature(env, ARM_FEATURE_V7) &&
2424 (env->cp15.sctlr_el[1] & SCTLR_B) != 0;
2427 /* Return true if the processor is in big-endian mode. */
2428 static inline bool arm_cpu_data_is_big_endian(CPUARMState *env)
2432 /* In 32bit endianness is determined by looking at CPSR's E bit */
2435 #ifdef CONFIG_USER_ONLY
2436 /* In system mode, BE32 is modelled in line with the
2437 * architecture (as word-invariant big-endianness), where loads
2438 * and stores are done little endian but from addresses which
2439 * are adjusted by XORing with the appropriate constant. So the
2440 * endianness to use for the raw data access is not affected by
2442 * In user mode, however, we model BE32 as byte-invariant
2443 * big-endianness (because user-only code cannot tell the
2444 * difference), and so we need to use a data access endianness
2445 * that depends on SCTLR.B.
2449 ((env->uncached_cpsr & CPSR_E) ? 1 : 0);
2452 cur_el = arm_current_el(env);
2455 return (env->cp15.sctlr_el[1] & SCTLR_E0E) != 0;
2458 return (env->cp15.sctlr_el[cur_el] & SCTLR_EE) != 0;
2461 #include "exec/cpu-all.h"
2463 /* Bit usage in the TB flags field: bit 31 indicates whether we are
2464 * in 32 or 64 bit mode. The meaning of the other bits depends on that.
2465 * We put flags which are shared between 32 and 64 bit mode at the top
2466 * of the word, and flags which apply to only one mode at the bottom.
2468 #define ARM_TBFLAG_AARCH64_STATE_SHIFT 31
2469 #define ARM_TBFLAG_AARCH64_STATE_MASK (1U << ARM_TBFLAG_AARCH64_STATE_SHIFT)
2470 #define ARM_TBFLAG_MMUIDX_SHIFT 28
2471 #define ARM_TBFLAG_MMUIDX_MASK (0x7 << ARM_TBFLAG_MMUIDX_SHIFT)
2472 #define ARM_TBFLAG_SS_ACTIVE_SHIFT 27
2473 #define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
2474 #define ARM_TBFLAG_PSTATE_SS_SHIFT 26
2475 #define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
2476 /* Target EL if we take a floating-point-disabled exception */
2477 #define ARM_TBFLAG_FPEXC_EL_SHIFT 24
2478 #define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT)
2480 /* Bit usage when in AArch32 state: */
2481 #define ARM_TBFLAG_THUMB_SHIFT 0
2482 #define ARM_TBFLAG_THUMB_MASK (1 << ARM_TBFLAG_THUMB_SHIFT)
2483 #define ARM_TBFLAG_VECLEN_SHIFT 1
2484 #define ARM_TBFLAG_VECLEN_MASK (0x7 << ARM_TBFLAG_VECLEN_SHIFT)
2485 #define ARM_TBFLAG_VECSTRIDE_SHIFT 4
2486 #define ARM_TBFLAG_VECSTRIDE_MASK (0x3 << ARM_TBFLAG_VECSTRIDE_SHIFT)
2487 #define ARM_TBFLAG_VFPEN_SHIFT 7
2488 #define ARM_TBFLAG_VFPEN_MASK (1 << ARM_TBFLAG_VFPEN_SHIFT)
2489 #define ARM_TBFLAG_CONDEXEC_SHIFT 8
2490 #define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
2491 #define ARM_TBFLAG_SCTLR_B_SHIFT 16
2492 #define ARM_TBFLAG_SCTLR_B_MASK (1 << ARM_TBFLAG_SCTLR_B_SHIFT)
2493 /* We store the bottom two bits of the CPAR as TB flags and handle
2494 * checks on the other bits at runtime
2496 #define ARM_TBFLAG_XSCALE_CPAR_SHIFT 17
2497 #define ARM_TBFLAG_XSCALE_CPAR_MASK (3 << ARM_TBFLAG_XSCALE_CPAR_SHIFT)
2498 /* Indicates whether cp register reads and writes by guest code should access
2499 * the secure or nonsecure bank of banked registers; note that this is not
2500 * the same thing as the current security state of the processor!
2502 #define ARM_TBFLAG_NS_SHIFT 19
2503 #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
2504 #define ARM_TBFLAG_BE_DATA_SHIFT 20
2505 #define ARM_TBFLAG_BE_DATA_MASK (1 << ARM_TBFLAG_BE_DATA_SHIFT)
2506 /* For M profile only, Handler (ie not Thread) mode */
2507 #define ARM_TBFLAG_HANDLER_SHIFT 21
2508 #define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT)
2510 /* Bit usage when in AArch64 state */
2511 #define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */
2512 #define ARM_TBFLAG_TBI0_MASK (0x1ull << ARM_TBFLAG_TBI0_SHIFT)
2513 #define ARM_TBFLAG_TBI1_SHIFT 1 /* TBI1 for EL0/1 */
2514 #define ARM_TBFLAG_TBI1_MASK (0x1ull << ARM_TBFLAG_TBI1_SHIFT)
2516 /* some convenience accessor macros */
2517 #define ARM_TBFLAG_AARCH64_STATE(F) \
2518 (((F) & ARM_TBFLAG_AARCH64_STATE_MASK) >> ARM_TBFLAG_AARCH64_STATE_SHIFT)
2519 #define ARM_TBFLAG_MMUIDX(F) \
2520 (((F) & ARM_TBFLAG_MMUIDX_MASK) >> ARM_TBFLAG_MMUIDX_SHIFT)
2521 #define ARM_TBFLAG_SS_ACTIVE(F) \
2522 (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
2523 #define ARM_TBFLAG_PSTATE_SS(F) \
2524 (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
2525 #define ARM_TBFLAG_FPEXC_EL(F) \
2526 (((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT)
2527 #define ARM_TBFLAG_THUMB(F) \
2528 (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
2529 #define ARM_TBFLAG_VECLEN(F) \
2530 (((F) & ARM_TBFLAG_VECLEN_MASK) >> ARM_TBFLAG_VECLEN_SHIFT)
2531 #define ARM_TBFLAG_VECSTRIDE(F) \
2532 (((F) & ARM_TBFLAG_VECSTRIDE_MASK) >> ARM_TBFLAG_VECSTRIDE_SHIFT)
2533 #define ARM_TBFLAG_VFPEN(F) \
2534 (((F) & ARM_TBFLAG_VFPEN_MASK) >> ARM_TBFLAG_VFPEN_SHIFT)
2535 #define ARM_TBFLAG_CONDEXEC(F) \
2536 (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
2537 #define ARM_TBFLAG_SCTLR_B(F) \
2538 (((F) & ARM_TBFLAG_SCTLR_B_MASK) >> ARM_TBFLAG_SCTLR_B_SHIFT)
2539 #define ARM_TBFLAG_XSCALE_CPAR(F) \
2540 (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
2541 #define ARM_TBFLAG_NS(F) \
2542 (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
2543 #define ARM_TBFLAG_BE_DATA(F) \
2544 (((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
2545 #define ARM_TBFLAG_HANDLER(F) \
2546 (((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT)
2547 #define ARM_TBFLAG_TBI0(F) \
2548 (((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
2549 #define ARM_TBFLAG_TBI1(F) \
2550 (((F) & ARM_TBFLAG_TBI1_MASK) >> ARM_TBFLAG_TBI1_SHIFT)
2552 static inline bool bswap_code(bool sctlr_b)
2554 #ifdef CONFIG_USER_ONLY
2555 /* BE8 (SCTLR.B = 0, TARGET_WORDS_BIGENDIAN = 1) is mixed endian.
2556 * The invalid combination SCTLR.B=1/CPSR.E=1/TARGET_WORDS_BIGENDIAN=0
2557 * would also end up as a mixed-endian mode with BE code, LE data.
2560 #ifdef TARGET_WORDS_BIGENDIAN
2565 /* All code access in ARM is little endian, and there are no loaders
2566 * doing swaps that need to be reversed
2572 /* Return the exception level to which FP-disabled exceptions should
2573 * be taken, or 0 if FP is enabled.
2575 static inline int fp_exception_el(CPUARMState *env)
2578 int cur_el = arm_current_el(env);
2580 /* CPACR and the CPTR registers don't exist before v6, so FP is
2583 if (!arm_feature(env, ARM_FEATURE_V6)) {
2587 /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
2588 * 0, 2 : trap EL0 and EL1/PL1 accesses
2589 * 1 : trap only EL0 accesses
2590 * 3 : trap no accesses
2592 fpen = extract32(env->cp15.cpacr_el1, 20, 2);
2596 if (cur_el == 0 || cur_el == 1) {
2597 /* Trap to PL1, which might be EL1 or EL3 */
2598 if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
2603 if (cur_el == 3 && !is_a64(env)) {
2604 /* Secure PL1 running at EL3 */
2617 /* For the CPTR registers we don't need to guard with an ARM_FEATURE
2618 * check because zero bits in the registers mean "don't trap".
2621 /* CPTR_EL2 : present in v7VE or v8 */
2622 if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
2623 && !arm_is_secure_below_el3(env)) {
2624 /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
2628 /* CPTR_EL3 : present in v8 */
2629 if (extract32(env->cp15.cptr_el[3], 10, 1)) {
2630 /* Trap all FP ops to EL3 */
2637 #ifdef CONFIG_USER_ONLY
2638 static inline bool arm_cpu_bswap_data(CPUARMState *env)
2641 #ifdef TARGET_WORDS_BIGENDIAN
2644 arm_cpu_data_is_big_endian(env);
2648 #ifndef CONFIG_USER_ONLY
2652 * @mmu_idx: MMU index indicating required translation regime
2654 * Extracts the TBI0 value from the appropriate TCR for the current EL
2656 * Returns: the TBI0 value.
2658 uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx);
2663 * @mmu_idx: MMU index indicating required translation regime
2665 * Extracts the TBI1 value from the appropriate TCR for the current EL
2667 * Returns: the TBI1 value.
2669 uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx);
2671 /* We can't handle tagged addresses properly in user-only mode */
2672 static inline uint32_t arm_regime_tbi0(CPUARMState *env, ARMMMUIdx mmu_idx)
2677 static inline uint32_t arm_regime_tbi1(CPUARMState *env, ARMMMUIdx mmu_idx)
2683 static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
2684 target_ulong *cs_base, uint32_t *flags)
2686 ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
2689 *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
2690 /* Get control bits for tagged addresses */
2691 *flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
2692 *flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
2694 *pc = env->regs[15];
2695 *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
2696 | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
2697 | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
2698 | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
2699 | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
2700 if (!(access_secure_reg(env))) {
2701 *flags |= ARM_TBFLAG_NS_MASK;
2703 if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
2704 || arm_el_is_aa64(env, 1)) {
2705 *flags |= ARM_TBFLAG_VFPEN_MASK;
2707 *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
2708 << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
2711 *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
2713 /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
2714 * states defined in the ARM ARM for software singlestep:
2715 * SS_ACTIVE PSTATE.SS State
2716 * 0 x Inactive (the TB flag for SS is always 0)
2717 * 1 0 Active-pending
2718 * 1 1 Active-not-pending
2720 if (arm_singlestep_active(env)) {
2721 *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
2723 if (env->pstate & PSTATE_SS) {
2724 *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
2727 if (env->uncached_cpsr & PSTATE_SS) {
2728 *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
2732 if (arm_cpu_data_is_big_endian(env)) {
2733 *flags |= ARM_TBFLAG_BE_DATA_MASK;
2735 *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
2737 if (arm_v7m_is_handler_mode(env)) {
2738 *flags |= ARM_TBFLAG_HANDLER_MASK;
2745 QEMU_PSCI_CONDUIT_DISABLED = 0,
2746 QEMU_PSCI_CONDUIT_SMC = 1,
2747 QEMU_PSCI_CONDUIT_HVC = 2,
2750 #ifndef CONFIG_USER_ONLY
2751 /* Return the address space index to use for a memory access */
2752 static inline int arm_asidx_from_attrs(CPUState *cs, MemTxAttrs attrs)
2754 return attrs.secure ? ARMASIdx_S : ARMASIdx_NS;
2757 /* Return the AddressSpace to use for a memory access
2758 * (which depends on whether the access is S or NS, and whether
2759 * the board gave us a separate AddressSpace for S accesses).
2761 static inline AddressSpace *arm_addressspace(CPUState *cs, MemTxAttrs attrs)
2763 return cpu_get_address_space(cs, arm_asidx_from_attrs(cs, attrs));
2768 * arm_register_el_change_hook:
2769 * Register a hook function which will be called back whenever this
2770 * CPU changes exception level or mode. The hook function will be
2771 * passed a pointer to the ARMCPU and the opaque data pointer passed
2772 * to this function when the hook was registered.
2774 * Note that we currently only support registering a single hook function,
2775 * and will assert if this function is called twice.
2776 * This facility is intended for the use of the GICv3 emulation.
2778 void arm_register_el_change_hook(ARMCPU *cpu, ARMELChangeHook *hook,
2782 * arm_get_el_change_hook_opaque:
2783 * Return the opaque data that will be used by the el_change_hook
2786 static inline void *arm_get_el_change_hook_opaque(ARMCPU *cpu)
2788 return cpu->el_change_hook_opaque;