}
}
-void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
+static void do_armv7m_nvic_set_pending(void *opaque, int irq, bool secure,
+ bool derived)
{
+ /* Pend an exception, including possibly escalating it to HardFault.
+ *
+ * This function handles both "normal" pending of interrupts and
+ * exceptions, and also derived exceptions (ones which occur as
+ * a result of trying to take some other exception).
+ *
+ * If derived == true, the caller guarantees that we are part way through
+ * trying to take an exception (but have not yet called
+ * armv7m_nvic_acknowledge_irq() to make it active), and so:
+ * - s->vectpending is the "original exception" we were trying to take
+ * - irq is the "derived exception"
+ * - nvic_exec_prio(s) gives the priority before exception entry
+ * Here we handle the prioritization logic which the pseudocode puts
+ * in the DerivedLateArrival() function.
+ */
+
NVICState *s = (NVICState *)opaque;
bool banked = exc_is_banked(irq);
VecInfo *vec;
vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
- trace_nvic_set_pending(irq, secure, vec->enabled, vec->prio);
+ trace_nvic_set_pending(irq, secure, derived, vec->enabled, vec->prio);
+
+ if (derived) {
+ /* Derived exceptions are always synchronous. */
+ assert(irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV);
+
+ if (irq == ARMV7M_EXCP_DEBUG &&
+ exc_group_prio(s, vec->prio, secure) >= nvic_exec_prio(s)) {
+ /* DebugMonitorFault, but its priority is lower than the
+ * preempted exception priority: just ignore it.
+ */
+ return;
+ }
+
+ if (irq == ARMV7M_EXCP_HARD && vec->prio >= s->vectpending_prio) {
+ /* If this is a terminal exception (one which means we cannot
+ * take the original exception, like a failure to read its
+ * vector table entry), then we must take the derived exception.
+ * If the derived exception can't take priority over the
+ * original exception, then we go into Lockup.
+ *
+ * For QEMU, we rely on the fact that a derived exception is
+ * terminal if and only if it's reported to us as HardFault,
+ * which saves having to have an extra argument is_terminal
+ * that we'd only use in one place.
+ */
+ cpu_abort(&s->cpu->parent_obj,
+ "Lockup: can't take terminal derived exception "
+ "(original exception priority %d)\n",
+ s->vectpending_prio);
+ }
+ /* We now continue with the same code as for a normal pending
+ * exception, which will cause us to pend the derived exception.
+ * We'll then take either the original or the derived exception
+ * based on which is higher priority by the usual mechanism
+ * for selecting the highest priority pending interrupt.
+ */
+ }
if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
/* If a synchronous exception is pending then it may be
}
}
+void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
+{
+ do_armv7m_nvic_set_pending(opaque, irq, secure, false);
+}
+
+void armv7m_nvic_set_pending_derived(void *opaque, int irq, bool secure)
+{
+ do_armv7m_nvic_set_pending(opaque, irq, secure, true);
+}
+
/* Make pending IRQ active. */
-bool armv7m_nvic_acknowledge_irq(void *opaque)
+void armv7m_nvic_acknowledge_irq(void *opaque)
{
NVICState *s = (NVICState *)opaque;
CPUARMState *env = &s->cpu->env;
const int pending = s->vectpending;
const int running = nvic_exec_prio(s);
VecInfo *vec;
- bool targets_secure;
assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
if (s->vectpending_is_s_banked) {
vec = &s->sec_vectors[pending];
- targets_secure = true;
} else {
vec = &s->vectors[pending];
- targets_secure = !exc_is_banked(s->vectpending) &&
- exc_targets_secure(s, s->vectpending);
}
assert(vec->enabled);
assert(s->vectpending_prio < running);
- trace_nvic_acknowledge_irq(pending, s->vectpending_prio, targets_secure);
+ trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
vec->active = 1;
vec->pending = 0;
write_v7m_exception(env, s->vectpending);
nvic_irq_update(s);
+}
+
+void armv7m_nvic_get_pending_irq_info(void *opaque,
+ int *pirq, bool *ptargets_secure)
+{
+ NVICState *s = (NVICState *)opaque;
+ const int pending = s->vectpending;
+ bool targets_secure;
- return targets_secure;
+ assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
+
+ if (s->vectpending_is_s_banked) {
+ targets_secure = true;
+ } else {
+ targets_secure = !exc_is_banked(pending) &&
+ exc_targets_secure(s, pending);
+ }
+
+ trace_nvic_get_pending_irq_info(pending, targets_secure);
+
+ *ptargets_secure = targets_secure;
+ *pirq = pending;
}
int armv7m_nvic_complete_irq(void *opaque, int irq, bool secure)
switch (offset) {
case 4: /* Interrupt Control Type. */
return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
+ case 0xc: /* CPPWR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ /* We make the IMPDEF choice that nothing can ever go into a
+ * non-retentive power state, which allows us to RAZ/WI this.
+ */
+ return 0;
case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
{
int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
}
}
/* NMIPENDSET */
- if ((cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) &&
- s->vectors[ARMV7M_EXCP_NMI].pending) {
+ if ((attrs.secure || (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))
+ && s->vectors[ARMV7M_EXCP_NMI].pending) {
val |= (1 << 31);
}
/* ISRPREEMPT: RES0 when halting debug not implemented */
}
return val;
case 0xd10: /* System Control. */
- /* TODO: Implement SLEEPONEXIT. */
- return 0;
+ return cpu->env.v7m.scr[attrs.secure];
case 0xd14: /* Configuration Control. */
/* The BFHFNMIGN bit is the only non-banked bit; we
* keep it in the non-secure copy of the register.
val |= (1 << 8);
}
return val;
- case 0xd28: /* Configurable Fault Status. */
- /* The BFSR bits [15:8] are shared between security states
- * and we store them in the NS copy
- */
- val = cpu->env.v7m.cfsr[attrs.secure];
- val |= cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
- return val;
case 0xd2c: /* Hard Fault Status. */
return cpu->env.v7m.hfsr;
case 0xd30: /* Debug Fault Status. */
"Aux Fault status registers unimplemented\n");
return 0;
case 0xd40: /* PFR0. */
- return 0x00000030;
- case 0xd44: /* PRF1. */
- return 0x00000200;
+ return cpu->id_pfr0;
+ case 0xd44: /* PFR1. */
+ return cpu->id_pfr1;
case 0xd48: /* DFR0. */
- return 0x00100000;
+ return cpu->id_dfr0;
case 0xd4c: /* AFR0. */
- return 0x00000000;
+ return cpu->id_afr0;
case 0xd50: /* MMFR0. */
- return 0x00000030;
+ return cpu->id_mmfr0;
case 0xd54: /* MMFR1. */
- return 0x00000000;
+ return cpu->id_mmfr1;
case 0xd58: /* MMFR2. */
- return 0x00000000;
+ return cpu->id_mmfr2;
case 0xd5c: /* MMFR3. */
- return 0x00000000;
+ return cpu->id_mmfr3;
case 0xd60: /* ISAR0. */
- return 0x01141110;
+ return cpu->id_isar0;
case 0xd64: /* ISAR1. */
- return 0x02111000;
+ return cpu->id_isar1;
case 0xd68: /* ISAR2. */
- return 0x21112231;
+ return cpu->id_isar2;
case 0xd6c: /* ISAR3. */
- return 0x01111110;
+ return cpu->id_isar3;
case 0xd70: /* ISAR4. */
- return 0x01310102;
+ return cpu->id_isar4;
+ case 0xd74: /* ISAR5. */
+ return cpu->id_isar5;
+ case 0xd78: /* CLIDR */
+ return cpu->clidr;
+ case 0xd7c: /* CTR */
+ return cpu->ctr;
+ case 0xd80: /* CSSIDR */
+ {
+ int idx = cpu->env.v7m.csselr[attrs.secure] & R_V7M_CSSELR_INDEX_MASK;
+ return cpu->ccsidr[idx];
+ }
+ case 0xd84: /* CSSELR */
+ return cpu->env.v7m.csselr[attrs.secure];
/* TODO: Implement debug registers. */
case 0xd90: /* MPU_TYPE */
/* Unified MPU; if the MPU is not present this value is zero */
ARMCPU *cpu = s->cpu;
switch (offset) {
+ case 0xc: /* CPPWR */
+ if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
+ goto bad_offset;
+ }
+ /* Make the IMPDEF choice to RAZ/WI this. */
+ break;
case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
{
int startvec = 8 * (offset - 0x380) + NVIC_FIRST_IRQ;
break;
}
case 0xd04: /* Interrupt Control State (ICSR) */
- if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
+ if (attrs.secure || cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
if (value & (1 << 31)) {
armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
} else if (value & (1 << 30) &&
}
break;
case 0xd10: /* System Control. */
- /* TODO: Implement control registers. */
- qemu_log_mask(LOG_UNIMP, "NVIC: SCR unimplemented\n");
+ /* We don't implement deep-sleep so these bits are RAZ/WI.
+ * The other bits in the register are banked.
+ * QEMU's implementation ignores SEVONPEND and SLEEPONEXIT, which
+ * is architecturally permitted.
+ */
+ value &= ~(R_V7M_SCR_SLEEPDEEP_MASK | R_V7M_SCR_SLEEPDEEPS_MASK);
+ cpu->env.v7m.scr[attrs.secure] = value;
break;
case 0xd14: /* Configuration Control. */
/* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
nvic_irq_update(s);
break;
- case 0xd28: /* Configurable Fault Status. */
- cpu->env.v7m.cfsr[attrs.secure] &= ~value; /* W1C */
- if (attrs.secure) {
- /* The BFSR bits [15:8] are shared between security states
- * and we store them in the NS copy.
- */
- cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
- }
- break;
case 0xd2c: /* Hard Fault Status. */
cpu->env.v7m.hfsr &= ~value; /* W1C */
break;
qemu_log_mask(LOG_UNIMP,
"NVIC: Aux fault status registers unimplemented\n");
break;
+ case 0xd84: /* CSSELR */
+ if (!arm_v7m_csselr_razwi(cpu)) {
+ cpu->env.v7m.csselr[attrs.secure] = value & R_V7M_CSSELR_INDEX_MASK;
+ }
+ break;
case 0xd90: /* MPU_TYPE */
return; /* RO */
case 0xd94: /* MPU_CTRL */
}
break;
}
+ case 0xf50: /* ICIALLU */
+ case 0xf58: /* ICIMVAU */
+ case 0xf5c: /* DCIMVAC */
+ case 0xf60: /* DCISW */
+ case 0xf64: /* DCCMVAU */
+ case 0xf68: /* DCCMVAC */
+ case 0xf6c: /* DCCSW */
+ case 0xf70: /* DCCIMVAC */
+ case 0xf74: /* DCCISW */
+ case 0xf78: /* BPIALL */
+ /* Cache and branch predictor maintenance: for QEMU these always NOP */
+ break;
default:
bad_offset:
qemu_log_mask(LOG_GUEST_ERROR,
/* fall through */
case 0x180 ... 0x1bf: /* NVIC Clear enable */
val = 0;
- startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */
+ startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
if (s->vectors[startvec + i].enabled &&
/* fall through */
case 0x280 ... 0x2bf: /* NVIC Clear pend */
val = 0;
- startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */
+ startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
if (s->vectors[startvec + i].pending &&
(attrs.secure || s->itns[startvec + i])) {
break;
case 0x300 ... 0x33f: /* NVIC Active */
val = 0;
- startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */
+ startvec = 8 * (offset - 0x300) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
if (s->vectors[startvec + i].active &&
val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
}
break;
+ case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ /* The BFSR bits [15:8] are shared between security states
+ * and we store them in the NS copy
+ */
+ val = s->cpu->env.v7m.cfsr[attrs.secure];
+ val |= s->cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
+ val = extract32(val, (offset - 0xd28) * 8, size * 8);
+ break;
case 0xfe0 ... 0xfff: /* ID. */
if (offset & 3) {
val = 0;
case 0x300 ... 0x33f: /* NVIC Active */
return MEMTX_OK; /* R/O */
case 0x400 ... 0x5ef: /* NVIC Priority */
- startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
+ startvec = (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
for (i = 0; i < size && startvec + i < s->num_irq; i++) {
if (attrs.secure || s->itns[startvec + i]) {
}
nvic_irq_update(s);
return MEMTX_OK;
+ case 0xd28 ... 0xd2b: /* Configurable Fault Status (CFSR) */
+ /* All bits are W1C, so construct 32 bit value with 0s in
+ * the parts not written by the access size
+ */
+ value <<= ((offset - 0xd28) * 8);
+
+ s->cpu->env.v7m.cfsr[attrs.secure] &= ~value;
+ if (attrs.secure) {
+ /* The BFSR bits [15:8] are shared between security states
+ * and we store them in the NS copy.
+ */
+ s->cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
+ }
+ return MEMTX_OK;
}
if (size == 4) {
nvic_writel(s, offset, value, attrs);