#include "sysemu/arch_init.h"
#include "sysemu/sysemu.h"
#include "qemu/bitops.h"
+#include "qemu/crc32c.h"
+#include <zlib.h> /* For crc32 */
#ifndef CONFIG_USER_ONLY
static inline int get_phys_addr(CPUARMState *env, uint32_t address,
int access_type, int is_user,
hwaddr *phys_ptr, int *prot,
target_ulong *page_size);
+
+/* Definitions for the PMCCNTR and PMCR registers */
+#define PMCRD 0x8
+#define PMCRC 0x4
+#define PMCRE 0x1
#endif
static int vfp_gdb_get_reg(CPUARMState *env, uint8_t *buf, int reg)
static void dacr_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
env->cp15.c3 = value;
- tlb_flush(env, 1); /* Flush TLB as domain not tracked in TLB */
+ tlb_flush(CPU(cpu), 1); /* Flush TLB as domain not tracked in TLB */
}
static void fcse_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
if (env->cp15.c13_fcse != value) {
/* Unlike real hardware the qemu TLB uses virtual addresses,
* not modified virtual addresses, so this causes a TLB flush.
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
env->cp15.c13_fcse = value;
}
}
static void contextidr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
if (env->cp15.c13_context != value && !arm_feature(env, ARM_FEATURE_MPU)) {
/* For VMSA (when not using the LPAE long descriptor page table
* format) this register includes the ASID, so do a TLB flush.
* For PMSA it is purely a process ID and no action is needed.
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
env->cp15.c13_context = value;
}
uint64_t value)
{
/* Invalidate all (TLBIALL) */
- tlb_flush(env, 1);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
}
static void tlbimva_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single TLB entry by MVA and ASID (TLBIMVA) */
- tlb_flush_page(env, value & TARGET_PAGE_MASK);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static void tlbiasid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (TLBIASID) */
- tlb_flush(env, value == 0);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), value == 0);
}
static void tlbimvaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate single entry by MVA, all ASIDs (TLBIMVAA) */
- tlb_flush_page(env, value & TARGET_PAGE_MASK);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush_page(CPU(cpu), value & TARGET_PAGE_MASK);
}
static const ARMCPRegInfo cp_reginfo[] = {
*/
{ .name = "WFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0, },
- { .name = "CPACR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
+ { .name = "CPACR", .state = ARM_CP_STATE_BOTH, .opc0 = 3,
+ .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 2,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_coproc),
.resetvalue = 0, .writefn = cpacr_write },
REGINFO_SENTINEL
static CPAccessResult pmreg_access(CPUARMState *env, const ARMCPRegInfo *ri)
{
- /* Perfomance monitor registers user accessibility is controlled
+ /* Performance monitor registers user accessibility is controlled
* by PMUSERENR.
*/
if (arm_current_pl(env) == 0 && !env->cp15.c9_pmuserenr) {
return CP_ACCESS_OK;
}
+#ifndef CONFIG_USER_ONLY
static void pmcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ /* Don't computer the number of ticks in user mode */
+ uint32_t temp_ticks;
+
+ temp_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
+ get_ticks_per_sec() / 1000000;
+
+ if (env->cp15.c9_pmcr & PMCRE) {
+ /* If the counter is enabled */
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ env->cp15.c15_ccnt = (temp_ticks/64) - env->cp15.c15_ccnt;
+ } else {
+ env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
+ }
+ }
+
+ if (value & PMCRC) {
+ /* The counter has been reset */
+ env->cp15.c15_ccnt = 0;
+ }
+
/* only the DP, X, D and E bits are writable */
env->cp15.c9_pmcr &= ~0x39;
env->cp15.c9_pmcr |= (value & 0x39);
+
+ if (env->cp15.c9_pmcr & PMCRE) {
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ temp_ticks /= 64;
+ }
+ env->cp15.c15_ccnt = temp_ticks - env->cp15.c15_ccnt;
+ }
}
+static uint64_t pmccntr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint32_t total_ticks;
+
+ if (!(env->cp15.c9_pmcr & PMCRE)) {
+ /* Counter is disabled, do not change value */
+ return env->cp15.c15_ccnt;
+ }
+
+ total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
+ get_ticks_per_sec() / 1000000;
+
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ total_ticks /= 64;
+ }
+ return total_ticks - env->cp15.c15_ccnt;
+}
+
+static void pmccntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ uint32_t total_ticks;
+
+ if (!(env->cp15.c9_pmcr & PMCRE)) {
+ /* Counter is disabled, set the absolute value */
+ env->cp15.c15_ccnt = value;
+ return;
+ }
+
+ total_ticks = qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) *
+ get_ticks_per_sec() / 1000000;
+
+ if (env->cp15.c9_pmcr & PMCRD) {
+ /* Increment once every 64 processor clock cycles */
+ total_ticks /= 64;
+ }
+ env->cp15.c15_ccnt = total_ticks - value;
+}
+#endif
+
static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
static void vbar_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ /* Note that even though the AArch64 view of this register has bits
+ * [10:0] all RES0 we can only mask the bottom 5, to comply with the
+ * architectural requirements for bits which are RES0 only in some
+ * contexts. (ARMv8 would permit us to do no masking at all, but ARMv7
+ * requires the bottom five bits to be RAZ/WI because they're UNK/SBZP.)
+ */
env->cp15.c12_vbar = value & ~0x1Ful;
}
{ .name = "PMSELR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 5,
.access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
.accessfn = pmreg_access },
- /* Unimplemented, RAZ/WI. */
+#ifndef CONFIG_USER_ONLY
{ .name = "PMCCNTR", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 0,
- .access = PL0_RW, .type = ARM_CP_CONST, .resetvalue = 0,
+ .access = PL0_RW, .resetvalue = 0, .type = ARM_CP_IO,
+ .readfn = pmccntr_read, .writefn = pmccntr_write,
.accessfn = pmreg_access },
+#endif
{ .name = "PMXEVTYPER", .cp = 15, .crn = 9, .crm = 13, .opc1 = 0, .opc2 = 1,
.access = PL0_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmxevtyper),
.access = PL1_RW, .type = ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pminten),
.resetvalue = 0, .writefn = pmintenclr_write, },
- { .name = "VBAR", .cp = 15, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
+ { .name = "VBAR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 12, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .writefn = vbar_write,
.fieldoffset = offsetof(CPUARMState, cp15.c12_vbar),
.resetvalue = 0 },
*/
{ .name = "AIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 7,
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* MAIR can just read-as-written because we don't implement caches
+ * and so don't need to care about memory attributes.
+ */
+ { .name = "MAIR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.mair_el1),
+ .resetvalue = 0 },
+ /* For non-long-descriptor page tables these are PRRR and NMRR;
+ * regardless they still act as reads-as-written for QEMU.
+ * The override is necessary because of the overly-broad TLB_LOCKDOWN
+ * definition.
+ */
+ { .name = "MAIR0", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
+ .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 0, .access = PL1_RW,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.mair_el1),
+ .resetfn = arm_cp_reset_ignore },
+ { .name = "MAIR1", .state = ARM_CP_STATE_AA32, .type = ARM_CP_OVERRIDE,
+ .cp = 15, .opc1 = 0, .crn = 10, .crm = 2, .opc2 = 1, .access = PL1_RW,
+ .fieldoffset = offsetofhigh32(CPUARMState, cp15.mair_el1),
+ .resetfn = arm_cp_reset_ignore },
REGINFO_SENTINEL
};
* Our reset value matches the fixed frequency we implement the timer at.
*/
{ .name = "CNTFRQ", .cp = 15, .crn = 14, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW | PL0_R,
+ .type = ARM_CP_NO_MIGRATE,
+ .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c14_cntfrq),
+ .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTFRQ_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 0,
+ .access = PL1_RW | PL0_R, .accessfn = gt_cntfrq_access,
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntfrq),
.resetvalue = (1000 * 1000 * 1000) / GTIMER_SCALE,
- .accessfn = gt_cntfrq_access,
},
/* overall control: mostly access permissions */
- { .name = "CNTKCTL", .cp = 15, .crn = 14, .crm = 1, .opc1 = 0, .opc2 = 0,
+ { .name = "CNTKCTL", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .opc1 = 0, .crn = 14, .crm = 1, .opc2 = 0,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c14_cntkctl),
.resetvalue = 0,
},
/* per-timer control */
{ .name = "CNTP_CTL", .cp = 15, .crn = 14, .crm = 2, .opc1 = 0, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_PHYS].ctl),
+ .resetfn = arm_cp_reset_ignore,
+ .writefn = gt_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CTL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 1,
.type = ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_ptimer_access,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].ctl),
.resetvalue = 0,
- .accessfn = gt_ptimer_access,
.writefn = gt_ctl_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CTL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 1,
+ .type = ARM_CP_IO | ARM_CP_NO_MIGRATE, .access = PL1_RW | PL0_R,
+ .accessfn = gt_vtimer_access,
+ .fieldoffset = offsetoflow32(CPUARMState,
+ cp15.c14_timer[GTIMER_VIRT].ctl),
+ .resetfn = arm_cp_reset_ignore,
+ .writefn = gt_ctl_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CTL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 1,
.type = ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .accessfn = gt_vtimer_access,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].ctl),
.resetvalue = 0,
- .accessfn = gt_vtimer_access,
.writefn = gt_ctl_write, .raw_writefn = raw_write,
},
/* TimerValue views: a 32 bit downcounting view of the underlying state */
.accessfn = gt_ptimer_access,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
+ { .name = "CNTP_TVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .readfn = gt_tval_read, .writefn = gt_tval_write,
+ },
{ .name = "CNTV_TVAL", .cp = 15, .crn = 14, .crm = 3, .opc1 = 0, .opc2 = 0,
.type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
.accessfn = gt_vtimer_access,
.readfn = gt_tval_read, .writefn = gt_tval_write,
},
+ { .name = "CNTV_TVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 0,
+ .type = ARM_CP_NO_MIGRATE | ARM_CP_IO, .access = PL1_RW | PL0_R,
+ .readfn = gt_tval_read, .writefn = gt_tval_write,
+ },
/* The counter itself */
{ .name = "CNTPCT", .cp = 15, .crm = 14, .opc1 = 0,
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
.accessfn = gt_pct_access,
+ .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTPCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 1,
+ .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .accessfn = gt_pct_access,
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
},
{ .name = "CNTVCT", .cp = 15, .crm = 14, .opc1 = 1,
.access = PL0_R, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE | ARM_CP_IO,
.accessfn = gt_vct_access,
+ .readfn = gt_cnt_read, .resetfn = arm_cp_reset_ignore,
+ },
+ { .name = "CNTVCT_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 0, .opc2 = 2,
+ .access = PL0_R, .type = ARM_CP_NO_MIGRATE | ARM_CP_IO,
+ .accessfn = gt_vct_access,
.readfn = gt_cnt_read, .resetfn = gt_cnt_reset,
},
/* Comparison value, indicating when the timer goes off */
{ .name = "CNTP_CVAL", .cp = 15, .crm = 14, .opc1 = 2,
.access = PL1_RW | PL0_R,
- .type = ARM_CP_64BIT | ARM_CP_IO,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
- .resetvalue = 0,
- .accessfn = gt_ptimer_access,
+ .accessfn = gt_ptimer_access, .resetfn = arm_cp_reset_ignore,
+ .writefn = gt_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTP_CVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 2, .opc2 = 2,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_PHYS].cval),
+ .resetvalue = 0, .accessfn = gt_vtimer_access,
.writefn = gt_cval_write, .raw_writefn = raw_write,
},
{ .name = "CNTV_CVAL", .cp = 15, .crm = 14, .opc1 = 3,
.access = PL1_RW | PL0_R,
- .type = ARM_CP_64BIT | ARM_CP_IO,
+ .type = ARM_CP_64BIT | ARM_CP_IO | ARM_CP_NO_MIGRATE,
.fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
- .resetvalue = 0,
- .accessfn = gt_vtimer_access,
+ .accessfn = gt_vtimer_access, .resetfn = arm_cp_reset_ignore,
+ .writefn = gt_cval_write, .raw_writefn = raw_write,
+ },
+ { .name = "CNTV_CVAL_EL0", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 3, .crn = 14, .crm = 3, .opc2 = 2,
+ .access = PL1_RW | PL0_R,
+ .type = ARM_CP_IO,
+ .fieldoffset = offsetof(CPUARMState, cp15.c14_timer[GTIMER_VIRT].cval),
+ .resetvalue = 0, .accessfn = gt_vtimer_access,
.writefn = gt_cval_write, .raw_writefn = raw_write,
},
REGINFO_SENTINEL
static void vmsa_ttbcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
if (arm_feature(env, ARM_FEATURE_LPAE)) {
/* With LPAE the TTBCR could result in a change of ASID
* via the TTBCR.A1 bit, so do a TLB flush.
*/
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
vmsa_ttbcr_raw_write(env, ri, value);
}
env->cp15.c2_mask = 0;
}
+static void vmsa_tcr_el1_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ /* For AArch64 the A1 bit could result in a change of ASID, so TLB flush. */
+ tlb_flush(CPU(cpu), 1);
+ env->cp15.c2_control = value;
+}
+
+static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
+ uint64_t value)
+{
+ /* 64 bit accesses to the TTBRs can change the ASID and so we
+ * must flush the TLB.
+ */
+ if (cpreg_field_is_64bit(ri)) {
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ tlb_flush(CPU(cpu), 1);
+ }
+ raw_write(env, ri, value);
+}
+
static const ARMCPRegInfo vmsa_cp_reginfo[] = {
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW,
{ .name = "IFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 1,
.access = PL1_RW,
.fieldoffset = offsetof(CPUARMState, cp15.c5_insn), .resetvalue = 0, },
- { .name = "TTBR0", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
- .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c2_base0), .resetvalue = 0, },
- { .name = "TTBR1", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
- .access = PL1_RW,
- .fieldoffset = offsetof(CPUARMState, cp15.c2_base1), .resetvalue = 0, },
- { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
- .access = PL1_RW, .writefn = vmsa_ttbcr_write,
- .resetfn = vmsa_ttbcr_reset, .raw_writefn = vmsa_ttbcr_raw_write,
+ { .name = "TTBR0_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
+ .writefn = vmsa_ttbr_write, .resetvalue = 0 },
+ { .name = "TTBR1_EL1", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 1,
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
+ .writefn = vmsa_ttbr_write, .resetvalue = 0 },
+ { .name = "TCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .writefn = vmsa_tcr_el1_write,
+ .resetfn = vmsa_ttbcr_reset, .raw_writefn = raw_write,
.fieldoffset = offsetof(CPUARMState, cp15.c2_control) },
+ { .name = "TTBCR", .cp = 15, .crn = 2, .crm = 0, .opc1 = 0, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_NO_MIGRATE, .writefn = vmsa_ttbcr_write,
+ .resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
+ .fieldoffset = offsetoflow32(CPUARMState, cp15.c2_control) },
{ .name = "DFAR", .cp = 15, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c6_data),
.resetvalue = 0, },
{
CPUState *cs = CPU(arm_env_get_cpu(env));
uint32_t mpidr = cs->cpu_index;
- /* We don't support setting cluster ID ([8..11])
+ /* We don't support setting cluster ID ([8..11]) (known as Aff1
+ * in later ARM ARM versions), or any of the higher affinity level fields,
* so these bits always RAZ.
*/
if (arm_feature(env, ARM_FEATURE_V7MP)) {
}
static const ARMCPRegInfo mpidr_cp_reginfo[] = {
- { .name = "MPIDR", .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
+ { .name = "MPIDR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 5,
.access = PL1_R, .readfn = mpidr_read, .type = ARM_CP_NO_MIGRATE },
REGINFO_SENTINEL
};
env->cp15.c7_par = 0;
}
-static uint64_t ttbr064_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
-}
-
-static void ttbr064_raw_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- env->cp15.c2_base0_hi = value >> 32;
- env->cp15.c2_base0 = value;
-}
-
-static void ttbr064_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- /* Writes to the 64 bit format TTBRs may change the ASID */
- tlb_flush(env, 1);
- ttbr064_raw_write(env, ri, value);
-}
-
-static void ttbr064_reset(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- env->cp15.c2_base0_hi = 0;
- env->cp15.c2_base0 = 0;
-}
-
-static uint64_t ttbr164_read(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- return ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
-}
-
-static void ttbr164_write(CPUARMState *env, const ARMCPRegInfo *ri,
- uint64_t value)
-{
- env->cp15.c2_base1_hi = value >> 32;
- env->cp15.c2_base1 = value;
-}
-
-static void ttbr164_reset(CPUARMState *env, const ARMCPRegInfo *ri)
-{
- env->cp15.c2_base1_hi = 0;
- env->cp15.c2_base1 = 0;
-}
-
static const ARMCPRegInfo lpae_cp_reginfo[] = {
/* NOP AMAIR0/1: the override is because these clash with the rather
* broadly specified TLB_LOCKDOWN entry in the generic cp_reginfo.
*/
- { .name = "AMAIR0", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
+ { .name = "AMAIR0", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
.resetvalue = 0 },
+ /* AMAIR1 is mapped to AMAIR_EL1[63:32] */
{ .name = "AMAIR1", .cp = 15, .crn = 10, .crm = 3, .opc1 = 0, .opc2 = 1,
.access = PL1_RW, .type = ARM_CP_CONST | ARM_CP_OVERRIDE,
.resetvalue = 0 },
.access = PL1_RW, .type = ARM_CP_64BIT,
.readfn = par64_read, .writefn = par64_write, .resetfn = par64_reset },
{ .name = "TTBR0", .cp = 15, .crm = 2, .opc1 = 0,
- .access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr064_read,
- .writefn = ttbr064_write, .raw_writefn = ttbr064_raw_write,
- .resetfn = ttbr064_reset },
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr0_el1),
+ .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
{ .name = "TTBR1", .cp = 15, .crm = 2, .opc1 = 1,
- .access = PL1_RW, .type = ARM_CP_64BIT, .readfn = ttbr164_read,
- .writefn = ttbr164_write, .resetfn = ttbr164_reset },
+ .access = PL1_RW, .type = ARM_CP_64BIT | ARM_CP_NO_MIGRATE,
+ .fieldoffset = offsetof(CPUARMState, cp15.ttbr1_el1),
+ .writefn = vmsa_ttbr_write, .resetfn = arm_cp_reset_ignore },
REGINFO_SENTINEL
};
uint64_t value)
{
/* Invalidate by VA (AArch64 version) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pageaddr = value << 12;
- tlb_flush_page(env, pageaddr);
+ tlb_flush_page(CPU(cpu), pageaddr);
}
static void tlbi_aa64_vaa_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by VA, all ASIDs (AArch64 version) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
uint64_t pageaddr = value << 12;
- tlb_flush_page(env, pageaddr);
+ tlb_flush_page(CPU(cpu), pageaddr);
}
static void tlbi_aa64_asid_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
/* Invalidate by ASID (AArch64 version) */
+ ARMCPU *cpu = arm_env_get_cpu(env);
int asid = extract64(value, 48, 16);
- tlb_flush(env, asid == 0);
+ tlb_flush(CPU(cpu), asid == 0);
}
static const ARMCPRegInfo v8_cp_reginfo[] = {
.opc0 = 1, .opc2 = 0, .crn = 8, .crm = 7, .opc2 = 7,
.access = PL1_W, .type = ARM_CP_NO_MIGRATE,
.writefn = tlbi_aa64_vaa_write },
+ /* Dummy implementation of monitor debug system control register:
+ * we don't support debug.
+ */
+ { .name = "MDSCR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = 2, .opc2 = 2,
+ .access = PL1_RW, .type = ARM_CP_CONST, .resetvalue = 0 },
+ /* We define a dummy WI OSLAR_EL1, because Linux writes to it. */
+ { .name = "OSLAR_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 1, .crm = 0, .opc2 = 4,
+ .access = PL1_W, .type = ARM_CP_NOP },
REGINFO_SENTINEL
};
static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
env->cp15.c1_sys = value;
/* ??? Lots of these bits are not implemented. */
/* This may enable/disable the MMU, so do a TLB flush. */
- tlb_flush(env, 1);
+ tlb_flush(CPU(cpu), 1);
}
static CPAccessResult ctr_el0_access(CPUARMState *env, const ARMCPRegInfo *ri)
return CP_ACCESS_OK;
}
+static void define_aarch64_debug_regs(ARMCPU *cpu)
+{
+ /* Define breakpoint and watchpoint registers. These do nothing
+ * but read as written, for now.
+ */
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ ARMCPRegInfo dbgregs[] = {
+ { .name = "DBGBVR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]) },
+ { .name = "DBGBCR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]) },
+ { .name = "DBGWVR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]) },
+ { .name = "DBGWCR", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
+ .access = PL1_RW,
+ .fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]) },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, dbgregs);
+ }
+}
+
void register_cp_regs_for_features(ARMCPU *cpu)
{
/* Register all the coprocessor registers based on feature bits */
}
if (arm_feature(env, ARM_FEATURE_V7)) {
/* v7 performance monitor control register: same implementor
- * field as main ID register, and we implement no event counters.
+ * field as main ID register, and we implement only the cycle
+ * count register.
*/
+#ifndef CONFIG_USER_ONLY
ARMCPRegInfo pmcr = {
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
.access = PL0_RW, .resetvalue = cpu->midr & 0xff000000,
.accessfn = pmreg_access, .writefn = pmcr_write,
.raw_writefn = raw_write,
};
+ define_one_arm_cp_reg(cpu, &pmcr);
+#endif
ARMCPRegInfo clidr = {
.name = "CLIDR", .state = ARM_CP_STATE_BOTH,
.opc0 = 3, .crn = 0, .crm = 0, .opc1 = 1, .opc2 = 1,
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->clidr
};
- define_one_arm_cp_reg(cpu, &pmcr);
define_one_arm_cp_reg(cpu, &clidr);
define_arm_cp_regs(cpu, v7_cp_reginfo);
} else {
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
}
if (arm_feature(env, ARM_FEATURE_V8)) {
+ /* AArch64 ID registers, which all have impdef reset values */
+ ARMCPRegInfo v8_idregs[] = {
+ { .name = "ID_AA64PFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64pfr0 },
+ { .name = "ID_AA64PFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 4, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64pfr1},
+ { .name = "ID_AA64DFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64dfr0 },
+ { .name = "ID_AA64DFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64dfr1 },
+ { .name = "ID_AA64AFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 4,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64afr0 },
+ { .name = "ID_AA64AFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 5, .opc2 = 5,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64afr1 },
+ { .name = "ID_AA64ISAR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64isar0 },
+ { .name = "ID_AA64ISAR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 6, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64isar1 },
+ { .name = "ID_AA64MMFR0_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64mmfr0 },
+ { .name = "ID_AA64MMFR1_EL1", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 1,
+ .access = PL1_R, .type = ARM_CP_CONST,
+ .resetvalue = cpu->id_aa64mmfr1 },
+ REGINFO_SENTINEL
+ };
+ define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
+ define_aarch64_debug_regs(cpu);
}
if (arm_feature(env, ARM_FEATURE_MPU)) {
/* These are the MPU registers prior to PMSAv6. Any new
/* Generic registers whose values depend on the implementation */
{
ARMCPRegInfo sctlr = {
- .name = "SCTLR", .cp = 15, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
+ .name = "SCTLR", .state = ARM_CP_STATE_BOTH,
+ .opc0 = 3, .crn = 1, .crm = 0, .opc1 = 0, .opc2 = 0,
.access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.c1_sys),
.writefn = sctlr_write, .resetvalue = cpu->reset_sctlr,
.raw_writefn = raw_write,
ARMCPU *cpu_arm_init(const char *cpu_model)
{
- ARMCPU *cpu;
- ObjectClass *oc;
-
- oc = cpu_class_by_name(TYPE_ARM_CPU, cpu_model);
- if (!oc) {
- return NULL;
- }
- cpu = ARM_CPU(object_new(object_class_get_name(oc)));
-
- /* TODO this should be set centrally, once possible */
- object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
-
- return cpu;
+ return ARM_CPU(cpu_generic_init(TYPE_ARM_CPU, cpu_model));
}
void arm_cpu_register_gdb_regs_for_features(ARMCPU *cpu)
(env->CF << 29) | ((env->VF & 0x80000000) >> 3) | (env->QF << 27)
| (env->thumb << 5) | ((env->condexec_bits & 3) << 25)
| ((env->condexec_bits & 0xfc) << 8)
- | (env->GE << 16);
+ | (env->GE << 16) | (env->daif & CPSR_AIF);
}
void cpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
env->GE = (val >> 16) & 0xf;
}
+ env->daif &= ~(CPSR_AIF & mask);
+ env->daif |= val & CPSR_AIF & mask;
+
if ((env->uncached_cpsr ^ val) & mask & CPSR_M) {
if (bad_mode_switch(env, val & CPSR_M)) {
/* Attempt to switch to an invalid mode: this is UNPREDICTABLE.
void arm_cpu_do_interrupt(CPUState *cs)
{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- env->exception_index = -1;
+ cs->exception_index = -1;
}
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address, int rw,
- int mmu_idx)
+int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int rw,
+ int mmu_idx)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
if (rw == 2) {
- env->exception_index = EXCP_PREFETCH_ABORT;
+ cs->exception_index = EXCP_PREFETCH_ABORT;
env->cp15.c6_insn = address;
} else {
- env->exception_index = EXCP_DATA_ABORT;
+ cs->exception_index = EXCP_DATA_ABORT;
env->cp15.c6_data = address;
}
return 1;
/* These should probably raise undefined insn exceptions. */
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
- cpu_abort(env, "v7m_mrs %d\n", reg);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "v7m_msr %d\n", reg);
}
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
- cpu_abort(env, "v7m_mrs %d\n", reg);
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "v7m_mrs %d\n", reg);
return 0;
}
void switch_mode(CPUARMState *env, int mode)
{
- if (mode != ARM_CPU_MODE_USR)
- cpu_abort(env, "Tried to switch out of user mode\n");
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ if (mode != ARM_CPU_MODE_USR) {
+ cpu_abort(CPU(cpu), "Tried to switch out of user mode\n");
+ }
}
void HELPER(set_r13_banked)(CPUARMState *env, uint32_t mode, uint32_t val)
{
- cpu_abort(env, "banked r13 write\n");
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "banked r13 write\n");
}
uint32_t HELPER(get_r13_banked)(CPUARMState *env, uint32_t mode)
{
- cpu_abort(env, "banked r13 read\n");
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
+ cpu_abort(CPU(cpu), "banked r13 read\n");
return 0;
}
static void v7m_push(CPUARMState *env, uint32_t val)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
+
env->regs[13] -= 4;
stl_phys(cs->as, env->regs[13], val);
}
static uint32_t v7m_pop(CPUARMState *env)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
uint32_t val;
+
val = ldl_phys(cs->as, env->regs[13]);
env->regs[13] += 4;
return val;
uint32_t lr;
uint32_t addr;
- arm_log_exception(env->exception_index);
+ arm_log_exception(cs->exception_index);
lr = 0xfffffff1;
if (env->v7m.current_sp)
handle it. */
/* TODO: Need to escalate if the current priority is higher than the
one we're raising. */
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_UDEF:
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_USAGE);
return;
do_v7m_exception_exit(env);
return;
default:
- cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
assert(!IS_M(env));
- arm_log_exception(env->exception_index);
+ arm_log_exception(cs->exception_index);
/* TODO: Vectored interrupt controller. */
- switch (env->exception_index) {
+ switch (cs->exception_index) {
case EXCP_UDEF:
new_mode = ARM_CPU_MODE_UND;
addr = 0x04;
offset = 4;
break;
default:
- cpu_abort(env, "Unhandled exception 0x%x\n", env->exception_index);
+ cpu_abort(cs, "Unhandled exception 0x%x\n", cs->exception_index);
return; /* Never happens. Keep compiler happy. */
}
/* High vectors. */
env->condexec_bits = 0;
/* Switch to the new mode, and to the correct instruction set. */
env->uncached_cpsr = (env->uncached_cpsr & ~CPSR_M) | new_mode;
- env->uncached_cpsr |= mask;
+ env->daif |= mask;
/* this is a lie, as the was no c1_sys on V4T/V5, but who cares
* and we should just guard the thumb mode on V4 */
if (arm_feature(env, ARM_FEATURE_V4T)) {
uint32_t table;
if (address & env->cp15.c2_mask)
- table = env->cp15.c2_base1 & 0xffffc000;
+ table = env->cp15.ttbr1_el1 & 0xffffc000;
else
- table = env->cp15.c2_base0 & env->cp15.c2_base_mask;
+ table = env->cp15.ttbr0_el1 & env->cp15.c2_base_mask;
table |= (address >> 18) & 0x3ffc;
return table;
int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
int code;
uint32_t table;
uint32_t desc;
int is_user, hwaddr *phys_ptr,
int *prot, target_ulong *page_size)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
int code;
uint32_t table;
uint32_t desc;
hwaddr *phys_ptr, int *prot,
target_ulong *page_size_ptr)
{
- CPUState *cs = ENV_GET_CPU(env);
+ CPUState *cs = CPU(arm_env_get_cpu(env));
/* Read an LPAE long-descriptor translation table. */
MMUFaultType fault_type = translation_fault;
uint32_t level = 1;
* we will always flush the TLB any time the ASID is changed).
*/
if (ttbr_select == 0) {
- ttbr = ((uint64_t)env->cp15.c2_base0_hi << 32) | env->cp15.c2_base0;
+ ttbr = env->cp15.ttbr0_el1;
epd = extract32(env->cp15.c2_control, 7, 1);
tsz = t0sz;
} else {
- ttbr = ((uint64_t)env->cp15.c2_base1_hi << 32) | env->cp15.c2_base1;
+ ttbr = env->cp15.ttbr1_el1;
epd = extract32(env->cp15.c2_control, 23, 1);
tsz = t1sz;
}
}
}
-int cpu_arm_handle_mmu_fault (CPUARMState *env, target_ulong address,
- int access_type, int mmu_idx)
+int arm_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
+ int access_type, int mmu_idx)
{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
hwaddr phys_addr;
target_ulong page_size;
int prot;
/* Map a single [sub]page. */
phys_addr &= ~(hwaddr)0x3ff;
address &= ~(uint32_t)0x3ff;
- tlb_set_page (env, address, phys_addr, prot, mmu_idx, page_size);
+ tlb_set_page(cs, address, phys_addr, prot, mmu_idx, page_size);
return 0;
}
if (access_type == 2) {
env->cp15.c5_insn = ret;
env->cp15.c6_insn = address;
- env->exception_index = EXCP_PREFETCH_ABORT;
+ cs->exception_index = EXCP_PREFETCH_ABORT;
} else {
env->cp15.c5_data = ret;
if (access_type == 1 && arm_feature(env, ARM_FEATURE_V6))
env->cp15.c5_data |= (1 << 11);
env->cp15.c6_data = address;
- env->exception_index = EXCP_DATA_ABORT;
+ cs->exception_index = EXCP_DATA_ABORT;
}
return 1;
}
uint32_t HELPER(v7m_mrs)(CPUARMState *env, uint32_t reg)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
switch (reg) {
case 0: /* APSR */
return xpsr_read(env) & 0xf8000000;
case 9: /* PSP */
return env->v7m.current_sp ? env->regs[13] : env->v7m.other_sp;
case 16: /* PRIMASK */
- return (env->uncached_cpsr & CPSR_I) != 0;
+ return (env->daif & PSTATE_I) != 0;
case 17: /* BASEPRI */
case 18: /* BASEPRI_MAX */
return env->v7m.basepri;
case 19: /* FAULTMASK */
- return (env->uncached_cpsr & CPSR_F) != 0;
+ return (env->daif & PSTATE_F) != 0;
case 20: /* CONTROL */
return env->v7m.control;
default:
/* ??? For debugging only. */
- cpu_abort(env, "Unimplemented system register read (%d)\n", reg);
+ cpu_abort(CPU(cpu), "Unimplemented system register read (%d)\n", reg);
return 0;
}
}
void HELPER(v7m_msr)(CPUARMState *env, uint32_t reg, uint32_t val)
{
+ ARMCPU *cpu = arm_env_get_cpu(env);
+
switch (reg) {
case 0: /* APSR */
xpsr_write(env, val, 0xf8000000);
env->v7m.other_sp = val;
break;
case 16: /* PRIMASK */
- if (val & 1)
- env->uncached_cpsr |= CPSR_I;
- else
- env->uncached_cpsr &= ~CPSR_I;
+ if (val & 1) {
+ env->daif |= PSTATE_I;
+ } else {
+ env->daif &= ~PSTATE_I;
+ }
break;
case 17: /* BASEPRI */
env->v7m.basepri = val & 0xff;
env->v7m.basepri = val;
break;
case 19: /* FAULTMASK */
- if (val & 1)
- env->uncached_cpsr |= CPSR_F;
- else
- env->uncached_cpsr &= ~CPSR_F;
+ if (val & 1) {
+ env->daif |= PSTATE_F;
+ } else {
+ env->daif &= ~PSTATE_F;
+ }
break;
case 20: /* CONTROL */
env->v7m.control = val & 3;
break;
default:
/* ??? For debugging only. */
- cpu_abort(env, "Unimplemented system register write (%d)\n", reg);
+ cpu_abort(CPU(cpu), "Unimplemented system register write (%d)\n", reg);
return;
}
}
}
return rmode;
}
+
+static void crc_init_buffer(uint8_t *buf, uint32_t val, uint32_t bytes)
+{
+ memset(buf, 0, 4);
+
+ if (bytes == 1) {
+ buf[0] = val & 0xff;
+ } else if (bytes == 2) {
+ buf[0] = val & 0xff;
+ buf[1] = (val >> 8) & 0xff;
+ } else {
+ buf[0] = val & 0xff;
+ buf[1] = (val >> 8) & 0xff;
+ buf[2] = (val >> 16) & 0xff;
+ buf[3] = (val >> 24) & 0xff;
+ }
+}
+
+uint32_t HELPER(crc32)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ crc_init_buffer(buf, val, bytes);
+
+ /* zlib crc32 converts the accumulator and output to one's complement. */
+ return crc32(acc ^ 0xffffffff, buf, bytes) ^ 0xffffffff;
+}
+
+uint32_t HELPER(crc32c)(uint32_t acc, uint32_t val, uint32_t bytes)
+{
+ uint8_t buf[4];
+
+ crc_init_buffer(buf, val, bytes);
+
+ /* Linux crc32c converts the output to one's complement. */
+ return crc32c(acc, buf, bytes) ^ 0xffffffff;
+}