#define EXCP_SEMIHOST 16 /* semihosting call */
#define EXCP_NOCP 17 /* v7M NOCP UsageFault */
#define EXCP_INVSTATE 18 /* v7M INVSTATE UsageFault */
+#define EXCP_STKOF 19 /* v8M STKOF UsageFault */
/* NB: add new EXCP_ defines to the array in arm_log_exception() too */
#define ARMV7M_EXCP_RESET 1
s<2n+1> maps to the most significant half of d<n>
*/
+/**
+ * DynamicGDBXMLInfo:
+ * @desc: Contains the XML descriptions.
+ * @num_cpregs: Number of the Coprocessor registers seen by GDB.
+ * @cpregs_keys: Array that contains the corresponding Key of
+ * a given cpreg with the same order of the cpreg in the XML description.
+ */
+typedef struct DynamicGDBXMLInfo {
+ char *desc;
+ int num_cpregs;
+ uint32_t *cpregs_keys;
+} DynamicGDBXMLInfo;
+
/* CPU state for each instance of a generic timer (in cp15 c14) */
typedef struct ARMGenericTimer {
uint64_t cval; /* Timer CompareValue register */
#ifdef TARGET_AARCH64
/* Store FFR as pregs[16] to make it easier to treat as any other. */
+#define FFR_PRED_NUM 16
ARMPredicateReg pregs[17];
+ /* Scratch space for aa64 sve predicate temporary. */
+ ARMPredicateReg preg_tmp;
#endif
uint32_t xregs[16];
int vec_len;
int vec_stride;
- /* scratch space when Tn are not sufficient. */
+ /* Scratch space for aa32 neon expansion. */
uint32_t scratch[8];
/* There are a number of distinct float control structures:
uint64_t *cpreg_vmstate_values;
int32_t cpreg_vmstate_array_len;
+ DynamicGDBXMLInfo dyn_xml;
+
/* Timers used by the generic (architected) timer */
QEMUTimer *gt_timer[NUM_GTIMERS];
/* GPIO outputs for generic timer */
uint32_t id_isar3;
uint32_t id_isar4;
uint32_t id_isar5;
+ uint32_t id_isar6;
uint64_t id_aa64pfr0;
uint64_t id_aa64pfr1;
uint64_t id_aa64dfr0;
/* Used to synchronize KVM and QEMU in-kernel device levels */
uint8_t device_irq_level;
+
+ /* Used to set the maximum vector length the cpu will support. */
+ uint32_t sve_max_vq;
};
static inline ARMCPU *arm_env_get_cpu(CPUARMState *env)
int arm_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int arm_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
+/* Dynamically generates for gdb stub an XML description of the sysregs from
+ * the cp_regs hashtable. Returns the registered sysregs number.
+ */
+int arm_gen_dynamic_xml(CPUState *cpu);
+
+/* Returns the dynamically generated XML for the gdb stub.
+ * Returns a pointer to the XML contents for the specified XML file or NULL
+ * if the XML name doesn't match the predefined one.
+ */
+const char *arm_gdb_get_dynamic_xml(CPUState *cpu, const char *xmlname);
+
int arm_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
int cpuid, void *opaque);
int arm_cpu_write_elf32_note(WriteCoreDumpFunction f, CPUState *cs,
int aarch64_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int aarch64_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq);
+void aarch64_sve_change_el(CPUARMState *env, int old_el,
+ int new_el, bool el0_a64);
+#else
+static inline void aarch64_sve_narrow_vq(CPUARMState *env, unsigned vq) { }
+static inline void aarch64_sve_change_el(CPUARMState *env, int o,
+ int n, bool a)
+{ }
#endif
target_ulong do_arm_semihosting(CPUARMState *env);
void aarch64_sync_32_to_64(CPUARMState *env);
void aarch64_sync_64_to_32(CPUARMState *env);
+int fp_exception_el(CPUARMState *env, int cur_el);
+int sve_exception_el(CPUARMState *env, int cur_el);
+uint32_t sve_zcr_len_for_el(CPUARMState *env, int el);
+
static inline bool is_a64(CPUARMState *env)
{
return env->aarch64;
#define HCR_RW (1ULL << 31)
#define HCR_CD (1ULL << 32)
#define HCR_ID (1ULL << 33)
+#define HCR_E2H (1ULL << 34)
+/*
+ * When we actually implement ARMv8.1-VHE we should add HCR_E2H to
+ * HCR_MASK and then clear it again if the feature bit is not set in
+ * hcr_write().
+ */
#define HCR_MASK ((1ULL << 34) - 1)
#define SCR_NS (1U << 0)
* we store the underlying state in fpscr and just mask on read/write.
*/
#define FPSR_MASK 0xf800009f
-#define FPCR_MASK 0x07f79f00
+#define FPCR_MASK 0x07ff9f00
#define FPCR_FZ16 (1 << 19) /* ARMv8.2+, FP16 flush-to-zero */
#define FPCR_FZ (1 << 24) /* Flush-to-zero enable bit */
#define ARM_VFP_FPINST2 10
/* iwMMXt coprocessor control registers. */
-#define ARM_IWMMXT_wCID 0
-#define ARM_IWMMXT_wCon 1
-#define ARM_IWMMXT_wCSSF 2
-#define ARM_IWMMXT_wCASF 3
-#define ARM_IWMMXT_wCGR0 8
-#define ARM_IWMMXT_wCGR1 9
-#define ARM_IWMMXT_wCGR2 10
-#define ARM_IWMMXT_wCGR3 11
+#define ARM_IWMMXT_wCID 0
+#define ARM_IWMMXT_wCon 1
+#define ARM_IWMMXT_wCSSF 2
+#define ARM_IWMMXT_wCASF 3
+#define ARM_IWMMXT_wCGR0 8
+#define ARM_IWMMXT_wCGR1 9
+#define ARM_IWMMXT_wCGR2 10
+#define ARM_IWMMXT_wCGR3 11
/* V7M CCR bits */
FIELD(V7M_CCR, NONBASETHRDENA, 0, 1)
FIELD(V7M_CCR, DIV_0_TRP, 4, 1)
FIELD(V7M_CCR, BFHFNMIGN, 8, 1)
FIELD(V7M_CCR, STKALIGN, 9, 1)
+FIELD(V7M_CCR, STKOFHFNMIGN, 10, 1)
FIELD(V7M_CCR, DC, 16, 1)
FIELD(V7M_CCR, IC, 17, 1)
+FIELD(V7M_CCR, BP, 18, 1)
/* V7M SCR bits */
FIELD(V7M_SCR, SLEEPONEXIT, 1, 1)
FIELD(V7M_CFSR, INVSTATE, 16 + 1, 1)
FIELD(V7M_CFSR, INVPC, 16 + 2, 1)
FIELD(V7M_CFSR, NOCP, 16 + 3, 1)
+FIELD(V7M_CFSR, STKOF, 16 + 4, 1)
FIELD(V7M_CFSR, UNALIGNED, 16 + 8, 1)
FIELD(V7M_CFSR, DIVBYZERO, 16 + 9, 1)
*/
FIELD(V7M_CSSELR, INDEX, 0, 4)
+/*
+ * System register ID fields.
+ */
+FIELD(ID_ISAR0, SWAP, 0, 4)
+FIELD(ID_ISAR0, BITCOUNT, 4, 4)
+FIELD(ID_ISAR0, BITFIELD, 8, 4)
+FIELD(ID_ISAR0, CMPBRANCH, 12, 4)
+FIELD(ID_ISAR0, COPROC, 16, 4)
+FIELD(ID_ISAR0, DEBUG, 20, 4)
+FIELD(ID_ISAR0, DIVIDE, 24, 4)
+
+FIELD(ID_ISAR1, ENDIAN, 0, 4)
+FIELD(ID_ISAR1, EXCEPT, 4, 4)
+FIELD(ID_ISAR1, EXCEPT_AR, 8, 4)
+FIELD(ID_ISAR1, EXTEND, 12, 4)
+FIELD(ID_ISAR1, IFTHEN, 16, 4)
+FIELD(ID_ISAR1, IMMEDIATE, 20, 4)
+FIELD(ID_ISAR1, INTERWORK, 24, 4)
+FIELD(ID_ISAR1, JAZELLE, 28, 4)
+
+FIELD(ID_ISAR2, LOADSTORE, 0, 4)
+FIELD(ID_ISAR2, MEMHINT, 4, 4)
+FIELD(ID_ISAR2, MULTIACCESSINT, 8, 4)
+FIELD(ID_ISAR2, MULT, 12, 4)
+FIELD(ID_ISAR2, MULTS, 16, 4)
+FIELD(ID_ISAR2, MULTU, 20, 4)
+FIELD(ID_ISAR2, PSR_AR, 24, 4)
+FIELD(ID_ISAR2, REVERSAL, 28, 4)
+
+FIELD(ID_ISAR3, SATURATE, 0, 4)
+FIELD(ID_ISAR3, SIMD, 4, 4)
+FIELD(ID_ISAR3, SVC, 8, 4)
+FIELD(ID_ISAR3, SYNCHPRIM, 12, 4)
+FIELD(ID_ISAR3, TABBRANCH, 16, 4)
+FIELD(ID_ISAR3, T32COPY, 20, 4)
+FIELD(ID_ISAR3, TRUENOP, 24, 4)
+FIELD(ID_ISAR3, T32EE, 28, 4)
+
+FIELD(ID_ISAR4, UNPRIV, 0, 4)
+FIELD(ID_ISAR4, WITHSHIFTS, 4, 4)
+FIELD(ID_ISAR4, WRITEBACK, 8, 4)
+FIELD(ID_ISAR4, SMC, 12, 4)
+FIELD(ID_ISAR4, BARRIER, 16, 4)
+FIELD(ID_ISAR4, SYNCHPRIM_FRAC, 20, 4)
+FIELD(ID_ISAR4, PSR_M, 24, 4)
+FIELD(ID_ISAR4, SWP_FRAC, 28, 4)
+
+FIELD(ID_ISAR5, SEVL, 0, 4)
+FIELD(ID_ISAR5, AES, 4, 4)
+FIELD(ID_ISAR5, SHA1, 8, 4)
+FIELD(ID_ISAR5, SHA2, 12, 4)
+FIELD(ID_ISAR5, CRC32, 16, 4)
+FIELD(ID_ISAR5, RDM, 24, 4)
+FIELD(ID_ISAR5, VCMA, 28, 4)
+
+FIELD(ID_ISAR6, JSCVT, 0, 4)
+FIELD(ID_ISAR6, DP, 4, 4)
+FIELD(ID_ISAR6, FHM, 8, 4)
+FIELD(ID_ISAR6, SB, 12, 4)
+FIELD(ID_ISAR6, SPECRES, 16, 4)
+
+FIELD(ID_AA64ISAR0, AES, 4, 4)
+FIELD(ID_AA64ISAR0, SHA1, 8, 4)
+FIELD(ID_AA64ISAR0, SHA2, 12, 4)
+FIELD(ID_AA64ISAR0, CRC32, 16, 4)
+FIELD(ID_AA64ISAR0, ATOMIC, 20, 4)
+FIELD(ID_AA64ISAR0, RDM, 28, 4)
+FIELD(ID_AA64ISAR0, SHA3, 32, 4)
+FIELD(ID_AA64ISAR0, SM3, 36, 4)
+FIELD(ID_AA64ISAR0, SM4, 40, 4)
+FIELD(ID_AA64ISAR0, DP, 44, 4)
+FIELD(ID_AA64ISAR0, FHM, 48, 4)
+FIELD(ID_AA64ISAR0, TS, 52, 4)
+FIELD(ID_AA64ISAR0, TLB, 56, 4)
+FIELD(ID_AA64ISAR0, RNDR, 60, 4)
+
+FIELD(ID_AA64ISAR1, DPB, 0, 4)
+FIELD(ID_AA64ISAR1, APA, 4, 4)
+FIELD(ID_AA64ISAR1, API, 8, 4)
+FIELD(ID_AA64ISAR1, JSCVT, 12, 4)
+FIELD(ID_AA64ISAR1, FCMA, 16, 4)
+FIELD(ID_AA64ISAR1, LRCPC, 20, 4)
+FIELD(ID_AA64ISAR1, GPA, 24, 4)
+FIELD(ID_AA64ISAR1, GPI, 28, 4)
+FIELD(ID_AA64ISAR1, FRINTTS, 32, 4)
+FIELD(ID_AA64ISAR1, SB, 36, 4)
+FIELD(ID_AA64ISAR1, SPECRES, 40, 4)
+
QEMU_BUILD_BUG_ON(ARRAY_SIZE(((ARMCPU *)0)->ccsidr) <= R_V7M_CSSELR_INDEX_MASK);
/* If adding a feature bit which corresponds to a Linux ELF
ARM_FEATURE_OMAPCP, /* OMAP specific CP15 ops handling. */
ARM_FEATURE_THUMB2EE,
ARM_FEATURE_V7MP, /* v7 Multiprocessing Extensions */
+ ARM_FEATURE_V7VE, /* v7 Virtualization Extensions (non-EL2 parts) */
ARM_FEATURE_V4T,
ARM_FEATURE_V5,
ARM_FEATURE_STRONGARM,
ARM_FEATURE_V8_SM4, /* implements SM4 part of v8 Crypto Extensions */
ARM_FEATURE_V8_ATOMICS, /* ARMv8.1-Atomics feature */
ARM_FEATURE_V8_RDM, /* implements v8.1 simd round multiply */
+ ARM_FEATURE_V8_DOTPROD, /* implements v8.2 simd dot product */
ARM_FEATURE_V8_FP16, /* implements v8.2 half-precision float */
ARM_FEATURE_V8_FCMA, /* has complex number part of v8.3 extensions. */
+ ARM_FEATURE_M_MAIN, /* M profile Main Extension */
};
static inline int arm_feature(CPUARMState *env, int feature)
#define ARM_LAST_SPECIAL ARM_CP_DC_ZVA
#define ARM_CP_FPU 0x1000
#define ARM_CP_SVE 0x2000
+#define ARM_CP_NO_GDB 0x4000
/* Used only as a terminator for ARMCPRegInfo lists */
#define ARM_CP_SENTINEL 0xffff
/* Mask of only the flag bits in a type field */
-#define ARM_CP_FLAG_MASK 0x30ff
+#define ARM_CP_FLAG_MASK 0x70ff
/* Valid values for ARMCPRegInfo state field, indicating which of
* the AArch32 and AArch64 execution states this register is visible in.
# define TARGET_VIRT_ADDR_SPACE_BITS 32
#endif
+/**
+ * arm_hcr_el2_imo(): Return the effective value of HCR_EL2.IMO.
+ * Depending on the values of HCR_EL2.E2H and TGE, this may be
+ * "behaves as 1 for all purposes other than direct read/write" or
+ * "behaves as 0 for all purposes other than direct read/write"
+ */
+static inline bool arm_hcr_el2_imo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_IMO;
+ }
+}
+
+/**
+ * arm_hcr_el2_fmo(): Return the effective value of HCR_EL2.FMO.
+ */
+static inline bool arm_hcr_el2_fmo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_FMO;
+ }
+}
+
+/**
+ * arm_hcr_el2_amo(): Return the effective value of HCR_EL2.AMO.
+ */
+static inline bool arm_hcr_el2_amo(CPUARMState *env)
+{
+ switch (env->cp15.hcr_el2 & (HCR_TGE | HCR_E2H)) {
+ case HCR_TGE:
+ return true;
+ case HCR_TGE | HCR_E2H:
+ return false;
+ default:
+ return env->cp15.hcr_el2 & HCR_AMO;
+ }
+}
+
static inline bool arm_excp_unmasked(CPUState *cs, unsigned int excp_idx,
unsigned int target_el)
{
break;
case EXCP_VFIQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_FMO)) {
+ if (secure || !arm_hcr_el2_fmo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VFIQs are only taken when hypervized and non-secure. */
return false;
}
return !(env->daif & PSTATE_F);
case EXCP_VIRQ:
- if (secure || !(env->cp15.hcr_el2 & HCR_IMO)) {
+ if (secure || !arm_hcr_el2_imo(env) || (env->cp15.hcr_el2 & HCR_TGE)) {
/* VIRQs are only taken when hypervized and non-secure. */
return false;
}
* to the CPSR.F setting otherwise we further assess the state
* below.
*/
- hcr = (env->cp15.hcr_el2 & HCR_FMO);
+ hcr = arm_hcr_el2_fmo(env);
scr = (env->cp15.scr_el3 & SCR_FIQ);
/* When EL3 is 32-bit, the SCR.FW bit controls whether the
* when setting the target EL, so it does not have a further
* affect here.
*/
- hcr = (env->cp15.hcr_el2 & HCR_IMO);
+ hcr = arm_hcr_el2_imo(env);
scr = false;
break;
default:
/* For M profile only, Handler (ie not Thread) mode */
#define ARM_TBFLAG_HANDLER_SHIFT 21
#define ARM_TBFLAG_HANDLER_MASK (1 << ARM_TBFLAG_HANDLER_SHIFT)
+/* For M profile only, whether we should generate stack-limit checks */
+#define ARM_TBFLAG_STACKCHECK_SHIFT 22
+#define ARM_TBFLAG_STACKCHECK_MASK (1 << ARM_TBFLAG_STACKCHECK_SHIFT)
/* Bit usage when in AArch64 state */
#define ARM_TBFLAG_TBI0_SHIFT 0 /* TBI0 for EL0/1 or TBI for EL2/3 */
(((F) & ARM_TBFLAG_BE_DATA_MASK) >> ARM_TBFLAG_BE_DATA_SHIFT)
#define ARM_TBFLAG_HANDLER(F) \
(((F) & ARM_TBFLAG_HANDLER_MASK) >> ARM_TBFLAG_HANDLER_SHIFT)
+#define ARM_TBFLAG_STACKCHECK(F) \
+ (((F) & ARM_TBFLAG_STACKCHECK_MASK) >> ARM_TBFLAG_STACKCHECK_SHIFT)
#define ARM_TBFLAG_TBI0(F) \
(((F) & ARM_TBFLAG_TBI0_MASK) >> ARM_TBFLAG_TBI0_SHIFT)
#define ARM_TBFLAG_TBI1(F) \
return &env->vfp.zregs[regno].d[0];
}
+/* Shared between translate-sve.c and sve_helper.c. */
+extern const uint64_t pred_esz_masks[4];
+
#endif