#endif
}
-/* Return the exception level to which FP-disabled exceptions should
- * be taken, or 0 if FP is enabled.
- */
-static inline int fp_exception_el(CPUARMState *env)
-{
- int fpen;
- int cur_el = arm_current_el(env);
-
- /* CPACR and the CPTR registers don't exist before v6, so FP is
- * always accessible
- */
- if (!arm_feature(env, ARM_FEATURE_V6)) {
- return 0;
- }
-
- /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
- * 0, 2 : trap EL0 and EL1/PL1 accesses
- * 1 : trap only EL0 accesses
- * 3 : trap no accesses
- */
- fpen = extract32(env->cp15.cpacr_el1, 20, 2);
- switch (fpen) {
- case 0:
- case 2:
- if (cur_el == 0 || cur_el == 1) {
- /* Trap to PL1, which might be EL1 or EL3 */
- if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
- return 3;
- }
- return 1;
- }
- if (cur_el == 3 && !is_a64(env)) {
- /* Secure PL1 running at EL3 */
- return 3;
- }
- break;
- case 1:
- if (cur_el == 0) {
- return 1;
- }
- break;
- case 3:
- break;
- }
-
- /* For the CPTR registers we don't need to guard with an ARM_FEATURE
- * check because zero bits in the registers mean "don't trap".
- */
-
- /* CPTR_EL2 : present in v7VE or v8 */
- if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
- && !arm_is_secure_below_el3(env)) {
- /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
- return 2;
- }
-
- /* CPTR_EL3 : present in v8 */
- if (extract32(env->cp15.cptr_el[3], 10, 1)) {
- /* Trap all FP ops to EL3 */
- return 3;
- }
-
- return 0;
-}
-
#ifdef CONFIG_USER_ONLY
static inline bool arm_cpu_bswap_data(CPUARMState *env)
{
}
#endif
-static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
- target_ulong *cs_base, uint32_t *flags)
-{
- ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
- if (is_a64(env)) {
- *pc = env->pc;
- *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
- /* Get control bits for tagged addresses */
- *flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
- *flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
- } else {
- *pc = env->regs[15];
- *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
- | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
- | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
- | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
- | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
- if (!(access_secure_reg(env))) {
- *flags |= ARM_TBFLAG_NS_MASK;
- }
- if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
- || arm_el_is_aa64(env, 1)) {
- *flags |= ARM_TBFLAG_VFPEN_MASK;
- }
- *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
- << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
- }
-
- *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
-
- /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
- * states defined in the ARM ARM for software singlestep:
- * SS_ACTIVE PSTATE.SS State
- * 0 x Inactive (the TB flag for SS is always 0)
- * 1 0 Active-pending
- * 1 1 Active-not-pending
- */
- if (arm_singlestep_active(env)) {
- *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
- if (is_a64(env)) {
- if (env->pstate & PSTATE_SS) {
- *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
- }
- } else {
- if (env->uncached_cpsr & PSTATE_SS) {
- *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
- }
- }
- }
- if (arm_cpu_data_is_big_endian(env)) {
- *flags |= ARM_TBFLAG_BE_DATA_MASK;
- }
- *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
-
- if (arm_v7m_is_handler_mode(env)) {
- *flags |= ARM_TBFLAG_HANDLER_MASK;
- }
-
- *cs_base = 0;
-}
+void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags);
enum {
QEMU_PSCI_CONDUIT_DISABLED = 0,
/* Linux crc32c converts the output to one's complement. */
return crc32c(acc, buf, bytes) ^ 0xffffffff;
}
+
+/* Return the exception level to which FP-disabled exceptions should
+ * be taken, or 0 if FP is enabled.
+ */
+static inline int fp_exception_el(CPUARMState *env)
+{
+ int fpen;
+ int cur_el = arm_current_el(env);
+
+ /* CPACR and the CPTR registers don't exist before v6, so FP is
+ * always accessible
+ */
+ if (!arm_feature(env, ARM_FEATURE_V6)) {
+ return 0;
+ }
+
+ /* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
+ * 0, 2 : trap EL0 and EL1/PL1 accesses
+ * 1 : trap only EL0 accesses
+ * 3 : trap no accesses
+ */
+ fpen = extract32(env->cp15.cpacr_el1, 20, 2);
+ switch (fpen) {
+ case 0:
+ case 2:
+ if (cur_el == 0 || cur_el == 1) {
+ /* Trap to PL1, which might be EL1 or EL3 */
+ if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
+ return 3;
+ }
+ return 1;
+ }
+ if (cur_el == 3 && !is_a64(env)) {
+ /* Secure PL1 running at EL3 */
+ return 3;
+ }
+ break;
+ case 1:
+ if (cur_el == 0) {
+ return 1;
+ }
+ break;
+ case 3:
+ break;
+ }
+
+ /* For the CPTR registers we don't need to guard with an ARM_FEATURE
+ * check because zero bits in the registers mean "don't trap".
+ */
+
+ /* CPTR_EL2 : present in v7VE or v8 */
+ if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
+ && !arm_is_secure_below_el3(env)) {
+ /* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
+ return 2;
+ }
+
+ /* CPTR_EL3 : present in v8 */
+ if (extract32(env->cp15.cptr_el[3], 10, 1)) {
+ /* Trap all FP ops to EL3 */
+ return 3;
+ }
+
+ return 0;
+}
+
+void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
+ target_ulong *cs_base, uint32_t *flags)
+{
+ ARMMMUIdx mmu_idx = core_to_arm_mmu_idx(env, cpu_mmu_index(env, false));
+ if (is_a64(env)) {
+ *pc = env->pc;
+ *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
+ /* Get control bits for tagged addresses */
+ *flags |= (arm_regime_tbi0(env, mmu_idx) << ARM_TBFLAG_TBI0_SHIFT);
+ *flags |= (arm_regime_tbi1(env, mmu_idx) << ARM_TBFLAG_TBI1_SHIFT);
+ } else {
+ *pc = env->regs[15];
+ *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
+ | (env->vfp.vec_len << ARM_TBFLAG_VECLEN_SHIFT)
+ | (env->vfp.vec_stride << ARM_TBFLAG_VECSTRIDE_SHIFT)
+ | (env->condexec_bits << ARM_TBFLAG_CONDEXEC_SHIFT)
+ | (arm_sctlr_b(env) << ARM_TBFLAG_SCTLR_B_SHIFT);
+ if (!(access_secure_reg(env))) {
+ *flags |= ARM_TBFLAG_NS_MASK;
+ }
+ if (env->vfp.xregs[ARM_VFP_FPEXC] & (1 << 30)
+ || arm_el_is_aa64(env, 1)) {
+ *flags |= ARM_TBFLAG_VFPEN_MASK;
+ }
+ *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
+ << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
+ }
+
+ *flags |= (arm_to_core_mmu_idx(mmu_idx) << ARM_TBFLAG_MMUIDX_SHIFT);
+
+ /* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
+ * states defined in the ARM ARM for software singlestep:
+ * SS_ACTIVE PSTATE.SS State
+ * 0 x Inactive (the TB flag for SS is always 0)
+ * 1 0 Active-pending
+ * 1 1 Active-not-pending
+ */
+ if (arm_singlestep_active(env)) {
+ *flags |= ARM_TBFLAG_SS_ACTIVE_MASK;
+ if (is_a64(env)) {
+ if (env->pstate & PSTATE_SS) {
+ *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
+ }
+ } else {
+ if (env->uncached_cpsr & PSTATE_SS) {
+ *flags |= ARM_TBFLAG_PSTATE_SS_MASK;
+ }
+ }
+ }
+ if (arm_cpu_data_is_big_endian(env)) {
+ *flags |= ARM_TBFLAG_BE_DATA_MASK;
+ }
+ *flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
+
+ if (arm_v7m_is_handler_mode(env)) {
+ *flags |= ARM_TBFLAG_HANDLER_MASK;
+ }
+
+ *cs_base = 0;
+}