nvic: Handle v8M changes in nvic_exec_prio()
[qemu.git] / hw / intc / armv7m_nvic.c
CommitLineData
9ee6e8bb
PB
1/*
2 * ARM Nested Vectored Interrupt Controller
3 *
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
6 *
8e31bf38 7 * This code is licensed under the GPL.
9ee6e8bb
PB
8 *
9 * The ARMv7M System controller is fairly tightly tied in with the
10 * NVIC. Much of that is also implemented here.
11 */
12
8ef94f0b 13#include "qemu/osdep.h"
da34e65c 14#include "qapi/error.h"
4771d756 15#include "qemu-common.h"
33c11879 16#include "cpu.h"
83c9f4ca 17#include "hw/sysbus.h"
1de7afc9 18#include "qemu/timer.h"
bd2be150 19#include "hw/arm/arm.h"
d2db1de6 20#include "hw/intc/armv7m_nvic.h"
da6d674e 21#include "target/arm/cpu.h"
29c483a5 22#include "exec/exec-all.h"
03dd024f 23#include "qemu/log.h"
da6d674e
MD
24#include "trace.h"
25
26/* IRQ number counting:
27 *
28 * the num-irq property counts the number of external IRQ lines
29 *
30 * NVICState::num_irq counts the total number of exceptions
31 * (external IRQs, the 15 internal exceptions including reset,
32 * and one for the unused exception number 0).
33 *
34 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
35 *
36 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
37 *
38 * Iterating through all exceptions should typically be done with
39 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
40 *
41 * The external qemu_irq lines are the NVIC's external IRQ lines,
42 * so line 0 is exception 16.
43 *
44 * In the terminology of the architecture manual, "interrupts" are
45 * a subcategory of exception referring to the external interrupts
46 * (which are exception numbers NVIC_FIRST_IRQ and upward).
47 * For historical reasons QEMU tends to use "interrupt" and
48 * "exception" more or less interchangeably.
49 */
17906a16 50#define NVIC_FIRST_IRQ NVIC_INTERNAL_VECTORS
da6d674e
MD
51#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
52
53/* Effective running priority of the CPU when no exception is active
54 * (higher than the highest possible priority value)
55 */
56#define NVIC_NOEXC_PRIO 0x100
ff96c64a
PM
57/* Maximum priority of non-secure exceptions when AIRCR.PRIS is set */
58#define NVIC_NS_PRIO_LIMIT 0x80
da6d674e 59
2a29ddee
PM
60static const uint8_t nvic_id[] = {
61 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
62};
63
da6d674e
MD
64static int nvic_pending_prio(NVICState *s)
65{
5255fcf8 66 /* return the group priority of the current pending interrupt,
da6d674e
MD
67 * or NVIC_NOEXC_PRIO if no interrupt is pending
68 */
5255fcf8 69 return s->vectpending_prio;
da6d674e
MD
70}
71
72/* Return the value of the ISCR RETTOBASE bit:
73 * 1 if there is exactly one active exception
74 * 0 if there is more than one active exception
75 * UNKNOWN if there are no active exceptions (we choose 1,
76 * which matches the choice Cortex-M3 is documented as making).
77 *
78 * NB: some versions of the documentation talk about this
79 * counting "active exceptions other than the one shown by IPSR";
80 * this is only different in the obscure corner case where guest
81 * code has manually deactivated an exception and is about
82 * to fail an exception-return integrity check. The definition
83 * above is the one from the v8M ARM ARM and is also in line
84 * with the behaviour documented for the Cortex-M3.
85 */
86static bool nvic_rettobase(NVICState *s)
87{
88 int irq, nhand = 0;
028b0da4 89 bool check_sec = arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
da6d674e
MD
90
91 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
028b0da4
PM
92 if (s->vectors[irq].active ||
93 (check_sec && irq < NVIC_INTERNAL_VECTORS &&
94 s->sec_vectors[irq].active)) {
da6d674e
MD
95 nhand++;
96 if (nhand == 2) {
97 return 0;
98 }
99 }
100 }
101
102 return 1;
103}
104
105/* Return the value of the ISCR ISRPENDING bit:
106 * 1 if an external interrupt is pending
107 * 0 if no external interrupt is pending
108 */
109static bool nvic_isrpending(NVICState *s)
110{
111 int irq;
112
113 /* We can shortcut if the highest priority pending interrupt
114 * happens to be external or if there is nothing pending.
115 */
116 if (s->vectpending > NVIC_FIRST_IRQ) {
117 return true;
118 }
119 if (s->vectpending == 0) {
120 return false;
121 }
122
123 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
124 if (s->vectors[irq].pending) {
125 return true;
126 }
127 }
128 return false;
129}
130
ff96c64a
PM
131static bool exc_is_banked(int exc)
132{
133 /* Return true if this is one of the limited set of exceptions which
134 * are banked (and thus have state in sec_vectors[])
135 */
136 return exc == ARMV7M_EXCP_HARD ||
137 exc == ARMV7M_EXCP_MEM ||
138 exc == ARMV7M_EXCP_USAGE ||
139 exc == ARMV7M_EXCP_SVC ||
140 exc == ARMV7M_EXCP_PENDSV ||
141 exc == ARMV7M_EXCP_SYSTICK;
142}
143
da6d674e
MD
144/* Return a mask word which clears the subpriority bits from
145 * a priority value for an M-profile exception, leaving only
146 * the group priority.
147 */
ff96c64a 148static inline uint32_t nvic_gprio_mask(NVICState *s, bool secure)
da6d674e 149{
ff96c64a
PM
150 return ~0U << (s->prigroup[secure] + 1);
151}
152
153static bool exc_targets_secure(NVICState *s, int exc)
154{
155 /* Return true if this non-banked exception targets Secure state. */
156 if (!arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
157 return false;
158 }
159
160 if (exc >= NVIC_FIRST_IRQ) {
161 return !s->itns[exc];
162 }
163
164 /* Function shouldn't be called for banked exceptions. */
165 assert(!exc_is_banked(exc));
166
167 switch (exc) {
168 case ARMV7M_EXCP_NMI:
169 case ARMV7M_EXCP_BUS:
170 return !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK);
171 case ARMV7M_EXCP_SECURE:
172 return true;
173 case ARMV7M_EXCP_DEBUG:
174 /* TODO: controlled by DEMCR.SDME, which we don't yet implement */
175 return false;
176 default:
177 /* reset, and reserved (unused) low exception numbers.
178 * We'll get called by code that loops through all the exception
179 * numbers, but it doesn't matter what we return here as these
180 * non-existent exceptions will never be pended or active.
181 */
182 return true;
183 }
184}
185
186static int exc_group_prio(NVICState *s, int rawprio, bool targets_secure)
187{
188 /* Return the group priority for this exception, given its raw
189 * (group-and-subgroup) priority value and whether it is targeting
190 * secure state or not.
191 */
192 if (rawprio < 0) {
193 return rawprio;
194 }
195 rawprio &= nvic_gprio_mask(s, targets_secure);
196 /* AIRCR.PRIS causes us to squash all NS priorities into the
197 * lower half of the total range
198 */
199 if (!targets_secure &&
200 (s->cpu->env.v7m.aircr & R_V7M_AIRCR_PRIS_MASK)) {
201 rawprio = (rawprio >> 1) + NVIC_NS_PRIO_LIMIT;
202 }
203 return rawprio;
204}
205
206/* Recompute vectpending and exception_prio for a CPU which implements
207 * the Security extension
208 */
209static void nvic_recompute_state_secure(NVICState *s)
210{
211 int i, bank;
212 int pend_prio = NVIC_NOEXC_PRIO;
213 int active_prio = NVIC_NOEXC_PRIO;
214 int pend_irq = 0;
215 bool pending_is_s_banked = false;
216
217 /* R_CQRV: precedence is by:
218 * - lowest group priority; if both the same then
219 * - lowest subpriority; if both the same then
220 * - lowest exception number; if both the same (ie banked) then
221 * - secure exception takes precedence
222 * Compare pseudocode RawExecutionPriority.
223 * Annoyingly, now we have two prigroup values (for S and NS)
224 * we can't do the loop comparison on raw priority values.
225 */
226 for (i = 1; i < s->num_irq; i++) {
227 for (bank = M_REG_S; bank >= M_REG_NS; bank--) {
228 VecInfo *vec;
229 int prio;
230 bool targets_secure;
231
232 if (bank == M_REG_S) {
233 if (!exc_is_banked(i)) {
234 continue;
235 }
236 vec = &s->sec_vectors[i];
237 targets_secure = true;
238 } else {
239 vec = &s->vectors[i];
240 targets_secure = !exc_is_banked(i) && exc_targets_secure(s, i);
241 }
242
243 prio = exc_group_prio(s, vec->prio, targets_secure);
244 if (vec->enabled && vec->pending && prio < pend_prio) {
245 pend_prio = prio;
246 pend_irq = i;
247 pending_is_s_banked = (bank == M_REG_S);
248 }
249 if (vec->active && prio < active_prio) {
250 active_prio = prio;
251 }
252 }
253 }
254
255 s->vectpending_is_s_banked = pending_is_s_banked;
256 s->vectpending = pend_irq;
257 s->vectpending_prio = pend_prio;
258 s->exception_prio = active_prio;
259
260 trace_nvic_recompute_state_secure(s->vectpending,
261 s->vectpending_is_s_banked,
262 s->vectpending_prio,
263 s->exception_prio);
da6d674e
MD
264}
265
266/* Recompute vectpending and exception_prio */
267static void nvic_recompute_state(NVICState *s)
268{
269 int i;
270 int pend_prio = NVIC_NOEXC_PRIO;
271 int active_prio = NVIC_NOEXC_PRIO;
272 int pend_irq = 0;
273
ff96c64a
PM
274 /* In theory we could write one function that handled both
275 * the "security extension present" and "not present"; however
276 * the security related changes significantly complicate the
277 * recomputation just by themselves and mixing both cases together
278 * would be even worse, so we retain a separate non-secure-only
279 * version for CPUs which don't implement the security extension.
280 */
281 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
282 nvic_recompute_state_secure(s);
283 return;
284 }
285
da6d674e
MD
286 for (i = 1; i < s->num_irq; i++) {
287 VecInfo *vec = &s->vectors[i];
288
289 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
290 pend_prio = vec->prio;
291 pend_irq = i;
292 }
293 if (vec->active && vec->prio < active_prio) {
294 active_prio = vec->prio;
295 }
296 }
297
22a9c26a 298 if (active_prio > 0) {
ff96c64a 299 active_prio &= nvic_gprio_mask(s, false);
22a9c26a
PM
300 }
301
5255fcf8 302 if (pend_prio > 0) {
ff96c64a 303 pend_prio &= nvic_gprio_mask(s, false);
5255fcf8
PM
304 }
305
da6d674e 306 s->vectpending = pend_irq;
5255fcf8 307 s->vectpending_prio = pend_prio;
22a9c26a 308 s->exception_prio = active_prio;
da6d674e 309
5255fcf8
PM
310 trace_nvic_recompute_state(s->vectpending,
311 s->vectpending_prio,
312 s->exception_prio);
da6d674e
MD
313}
314
315/* Return the current execution priority of the CPU
316 * (equivalent to the pseudocode ExecutionPriority function).
317 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
318 */
319static inline int nvic_exec_prio(NVICState *s)
320{
321 CPUARMState *env = &s->cpu->env;
49c80c38 322 int running = NVIC_NOEXC_PRIO;
da6d674e 323
49c80c38
PM
324 if (env->v7m.basepri[M_REG_NS] > 0) {
325 running = exc_group_prio(s, env->v7m.basepri[M_REG_NS], M_REG_NS);
326 }
327
328 if (env->v7m.basepri[M_REG_S] > 0) {
329 int basepri = exc_group_prio(s, env->v7m.basepri[M_REG_S], M_REG_S);
330 if (running > basepri) {
331 running = basepri;
332 }
333 }
334
335 if (env->v7m.primask[M_REG_NS]) {
336 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
337 if (running > NVIC_NS_PRIO_LIMIT) {
338 running = NVIC_NS_PRIO_LIMIT;
339 }
340 } else {
341 running = 0;
342 }
343 }
344
345 if (env->v7m.primask[M_REG_S]) {
da6d674e 346 running = 0;
da6d674e 347 }
49c80c38
PM
348
349 if (env->v7m.faultmask[M_REG_NS]) {
350 if (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
351 running = -1;
352 } else {
353 if (env->v7m.aircr & R_V7M_AIRCR_PRIS_MASK) {
354 if (running > NVIC_NS_PRIO_LIMIT) {
355 running = NVIC_NS_PRIO_LIMIT;
356 }
357 } else {
358 running = 0;
359 }
360 }
361 }
362
363 if (env->v7m.faultmask[M_REG_S]) {
364 running = (env->v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) ? -3 : -1;
365 }
366
da6d674e
MD
367 /* consider priority of active handler */
368 return MIN(running, s->exception_prio);
369}
370
7ecdaa4a
PM
371bool armv7m_nvic_can_take_pending_exception(void *opaque)
372{
373 NVICState *s = opaque;
374
375 return nvic_exec_prio(s) > nvic_pending_prio(s);
376}
377
42a6686b
PM
378int armv7m_nvic_raw_execution_priority(void *opaque)
379{
380 NVICState *s = opaque;
381
382 return s->exception_prio;
383}
384
e6a0d350
PM
385/* caller must call nvic_irq_update() after this.
386 * secure indicates the bank to use for banked exceptions (we assert if
387 * we are passed secure=true for a non-banked exception).
388 */
389static void set_prio(NVICState *s, unsigned irq, bool secure, uint8_t prio)
da6d674e
MD
390{
391 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
392 assert(irq < s->num_irq);
393
e6a0d350
PM
394 if (secure) {
395 assert(exc_is_banked(irq));
396 s->sec_vectors[irq].prio = prio;
397 } else {
398 s->vectors[irq].prio = prio;
399 }
400
401 trace_nvic_set_prio(irq, secure, prio);
402}
403
404/* Return the current raw priority register value.
405 * secure indicates the bank to use for banked exceptions (we assert if
406 * we are passed secure=true for a non-banked exception).
407 */
408static int get_prio(NVICState *s, unsigned irq, bool secure)
409{
410 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
411 assert(irq < s->num_irq);
da6d674e 412
e6a0d350
PM
413 if (secure) {
414 assert(exc_is_banked(irq));
415 return s->sec_vectors[irq].prio;
416 } else {
417 return s->vectors[irq].prio;
418 }
da6d674e
MD
419}
420
421/* Recompute state and assert irq line accordingly.
422 * Must be called after changes to:
423 * vec->active, vec->enabled, vec->pending or vec->prio for any vector
424 * prigroup
425 */
426static void nvic_irq_update(NVICState *s)
427{
428 int lvl;
429 int pend_prio;
430
431 nvic_recompute_state(s);
432 pend_prio = nvic_pending_prio(s);
433
434 /* Raise NVIC output if this IRQ would be taken, except that we
435 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
436 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
437 * to those CPU registers don't cause us to recalculate the NVIC
438 * pending info.
439 */
440 lvl = (pend_prio < s->exception_prio);
441 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
442 qemu_set_irq(s->excpout, lvl);
443}
444
2fb50a33
PM
445/**
446 * armv7m_nvic_clear_pending: mark the specified exception as not pending
447 * @opaque: the NVIC
448 * @irq: the exception number to mark as not pending
449 * @secure: false for non-banked exceptions or for the nonsecure
450 * version of a banked exception, true for the secure version of a banked
451 * exception.
452 *
453 * Marks the specified exception as not pending. Note that we will assert()
454 * if @secure is true and @irq does not specify one of the fixed set
455 * of architecturally banked exceptions.
456 */
457static void armv7m_nvic_clear_pending(void *opaque, int irq, bool secure)
da6d674e
MD
458{
459 NVICState *s = (NVICState *)opaque;
460 VecInfo *vec;
461
462 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
463
2fb50a33
PM
464 if (secure) {
465 assert(exc_is_banked(irq));
466 vec = &s->sec_vectors[irq];
467 } else {
468 vec = &s->vectors[irq];
469 }
470 trace_nvic_clear_pending(irq, secure, vec->enabled, vec->prio);
da6d674e
MD
471 if (vec->pending) {
472 vec->pending = 0;
473 nvic_irq_update(s);
474 }
475}
476
2fb50a33 477void armv7m_nvic_set_pending(void *opaque, int irq, bool secure)
9ee6e8bb 478{
f797c075 479 NVICState *s = (NVICState *)opaque;
2fb50a33 480 bool banked = exc_is_banked(irq);
da6d674e
MD
481 VecInfo *vec;
482
483 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
2fb50a33 484 assert(!secure || banked);
da6d674e 485
2fb50a33 486 vec = (banked && secure) ? &s->sec_vectors[irq] : &s->vectors[irq];
a73c98e1 487
2fb50a33 488 trace_nvic_set_pending(irq, secure, vec->enabled, vec->prio);
a73c98e1
MD
489
490 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
491 /* If a synchronous exception is pending then it may be
492 * escalated to HardFault if:
493 * * it is equal or lower priority to current execution
494 * * it is disabled
495 * (ie we need to take it immediately but we can't do so).
496 * Asynchronous exceptions (and interrupts) simply remain pending.
497 *
498 * For QEMU, we don't have any imprecise (asynchronous) faults,
499 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
500 * synchronous.
501 * Debug exceptions are awkward because only Debug exceptions
502 * resulting from the BKPT instruction should be escalated,
503 * but we don't currently implement any Debug exceptions other
504 * than those that result from BKPT, so we treat all debug exceptions
505 * as needing escalation.
506 *
507 * This all means we can identify whether to escalate based only on
508 * the exception number and don't (yet) need the caller to explicitly
509 * tell us whether this exception is synchronous or not.
510 */
511 int running = nvic_exec_prio(s);
512 bool escalate = false;
513
80ac2390 514 if (exc_group_prio(s, vec->prio, secure) >= running) {
a73c98e1
MD
515 trace_nvic_escalate_prio(irq, vec->prio, running);
516 escalate = true;
517 } else if (!vec->enabled) {
518 trace_nvic_escalate_disabled(irq);
519 escalate = true;
520 }
521
522 if (escalate) {
a73c98e1 523
94a34abe 524 /* We need to escalate this exception to a synchronous HardFault.
2fb50a33
PM
525 * If BFHFNMINS is set then we escalate to the banked HF for
526 * the target security state of the original exception; otherwise
527 * we take a Secure HardFault.
528 */
a73c98e1 529 irq = ARMV7M_EXCP_HARD;
2fb50a33
PM
530 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY) &&
531 (secure ||
532 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK))) {
533 vec = &s->sec_vectors[irq];
534 } else {
535 vec = &s->vectors[irq];
536 }
94a34abe
PM
537 if (running <= vec->prio) {
538 /* We want to escalate to HardFault but we can't take the
539 * synchronous HardFault at this point either. This is a
540 * Lockup condition due to a guest bug. We don't model
541 * Lockup, so report via cpu_abort() instead.
542 */
543 cpu_abort(&s->cpu->parent_obj,
544 "Lockup: can't escalate %d to HardFault "
545 "(current priority %d)\n", irq, running);
546 }
547
2fb50a33 548 /* HF may be banked but there is only one shared HFSR */
a73c98e1
MD
549 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
550 }
551 }
552
da6d674e
MD
553 if (!vec->pending) {
554 vec->pending = 1;
555 nvic_irq_update(s);
556 }
9ee6e8bb
PB
557}
558
559/* Make pending IRQ active. */
a5d82355 560void armv7m_nvic_acknowledge_irq(void *opaque)
9ee6e8bb 561{
f797c075 562 NVICState *s = (NVICState *)opaque;
da6d674e
MD
563 CPUARMState *env = &s->cpu->env;
564 const int pending = s->vectpending;
565 const int running = nvic_exec_prio(s);
da6d674e
MD
566 VecInfo *vec;
567
568 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
569
570 vec = &s->vectors[pending];
571
572 assert(vec->enabled);
573 assert(vec->pending);
574
5255fcf8 575 assert(s->vectpending_prio < running);
da6d674e 576
5255fcf8 577 trace_nvic_acknowledge_irq(pending, s->vectpending_prio);
da6d674e
MD
578
579 vec->active = 1;
580 vec->pending = 0;
581
582 env->v7m.exception = s->vectpending;
583
584 nvic_irq_update(s);
9ee6e8bb
PB
585}
586
aa488fe3 587int armv7m_nvic_complete_irq(void *opaque, int irq)
9ee6e8bb 588{
f797c075 589 NVICState *s = (NVICState *)opaque;
da6d674e 590 VecInfo *vec;
aa488fe3 591 int ret;
da6d674e
MD
592
593 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
594
595 vec = &s->vectors[irq];
596
597 trace_nvic_complete_irq(irq);
598
aa488fe3
PM
599 if (!vec->active) {
600 /* Tell the caller this was an illegal exception return */
601 return -1;
602 }
603
604 ret = nvic_rettobase(s);
605
da6d674e
MD
606 vec->active = 0;
607 if (vec->level) {
608 /* Re-pend the exception if it's still held high; only
609 * happens for extenal IRQs
610 */
611 assert(irq >= NVIC_FIRST_IRQ);
612 vec->pending = 1;
613 }
614
615 nvic_irq_update(s);
aa488fe3
PM
616
617 return ret;
da6d674e
MD
618}
619
620/* callback when external interrupt line is changed */
621static void set_irq_level(void *opaque, int n, int level)
622{
623 NVICState *s = opaque;
624 VecInfo *vec;
625
626 n += NVIC_FIRST_IRQ;
627
628 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
629
630 trace_nvic_set_irq_level(n, level);
631
632 /* The pending status of an external interrupt is
633 * latched on rising edge and exception handler return.
634 *
635 * Pulsing the IRQ will always run the handler
636 * once, and the handler will re-run until the
637 * level is low when the handler completes.
638 */
639 vec = &s->vectors[n];
640 if (level != vec->level) {
641 vec->level = level;
642 if (level) {
2fb50a33 643 armv7m_nvic_set_pending(s, n, false);
da6d674e
MD
644 }
645 }
9ee6e8bb
PB
646}
647
45db7ba6 648static uint32_t nvic_readl(NVICState *s, uint32_t offset, MemTxAttrs attrs)
9ee6e8bb 649{
d713ea6c 650 ARMCPU *cpu = s->cpu;
9ee6e8bb 651 uint32_t val;
9ee6e8bb
PB
652
653 switch (offset) {
654 case 4: /* Interrupt Control Type. */
da6d674e 655 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
e1be0a57
PM
656 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
657 {
658 int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
659 int i;
660
661 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
662 goto bad_offset;
663 }
664 if (!attrs.secure) {
665 return 0;
666 }
667 val = 0;
668 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
669 if (s->itns[startvec + i]) {
670 val |= (1 << i);
671 }
672 }
673 return val;
674 }
9ee6e8bb 675 case 0xd00: /* CPUID Base. */
e3da9921 676 return cpu->midr;
e03ba136 677 case 0xd04: /* Interrupt Control State. */
9ee6e8bb 678 /* VECTACTIVE */
b06c262b 679 val = cpu->env.v7m.exception;
9ee6e8bb 680 /* VECTPENDING */
da6d674e
MD
681 val |= (s->vectpending & 0xff) << 12;
682 /* ISRPENDING - set if any external IRQ is pending */
683 if (nvic_isrpending(s)) {
684 val |= (1 << 22);
685 }
686 /* RETTOBASE - set if only one handler is active */
687 if (nvic_rettobase(s)) {
688 val |= (1 << 11);
9ee6e8bb
PB
689 }
690 /* PENDSTSET */
da6d674e 691 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
9ee6e8bb 692 val |= (1 << 26);
da6d674e 693 }
9ee6e8bb 694 /* PENDSVSET */
da6d674e 695 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
9ee6e8bb 696 val |= (1 << 28);
da6d674e 697 }
9ee6e8bb 698 /* NMIPENDSET */
da6d674e 699 if (s->vectors[ARMV7M_EXCP_NMI].pending) {
9ee6e8bb 700 val |= (1 << 31);
da6d674e
MD
701 }
702 /* ISRPREEMPT not implemented */
9ee6e8bb
PB
703 return val;
704 case 0xd08: /* Vector Table Offset. */
45db7ba6 705 return cpu->env.v7m.vecbase[attrs.secure];
3b2e9344
PM
706 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
707 val = 0xfa050000 | (s->prigroup[attrs.secure] << 8);
708 if (attrs.secure) {
709 /* s->aircr stores PRIS, BFHFNMINS, SYSRESETREQS */
710 val |= cpu->env.v7m.aircr;
711 } else {
712 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
713 /* BFHFNMINS is R/O from NS; other bits are RAZ/WI. If
714 * security isn't supported then BFHFNMINS is RAO (and
715 * the bit in env.v7m.aircr is always set).
716 */
717 val |= cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK;
718 }
719 }
720 return val;
9ee6e8bb
PB
721 case 0xd10: /* System Control. */
722 /* TODO: Implement SLEEPONEXIT. */
723 return 0;
724 case 0xd14: /* Configuration Control. */
9d40cd8a
PM
725 /* The BFHFNMIGN bit is the only non-banked bit; we
726 * keep it in the non-secure copy of the register.
727 */
728 val = cpu->env.v7m.ccr[attrs.secure];
729 val |= cpu->env.v7m.ccr[M_REG_NS] & R_V7M_CCR_BFHFNMIGN_MASK;
730 return val;
9ee6e8bb
PB
731 case 0xd24: /* System Handler Status. */
732 val = 0;
da6d674e
MD
733 if (s->vectors[ARMV7M_EXCP_MEM].active) {
734 val |= (1 << 0);
735 }
736 if (s->vectors[ARMV7M_EXCP_BUS].active) {
737 val |= (1 << 1);
738 }
739 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
740 val |= (1 << 3);
741 }
742 if (s->vectors[ARMV7M_EXCP_SVC].active) {
743 val |= (1 << 7);
744 }
745 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
746 val |= (1 << 8);
747 }
748 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
749 val |= (1 << 10);
750 }
751 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
752 val |= (1 << 11);
753 }
754 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
755 val |= (1 << 12);
756 }
757 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
758 val |= (1 << 13);
759 }
760 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
761 val |= (1 << 14);
762 }
763 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
764 val |= (1 << 15);
765 }
766 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
767 val |= (1 << 16);
768 }
769 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
770 val |= (1 << 17);
771 }
772 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
773 val |= (1 << 18);
774 }
9ee6e8bb
PB
775 return val;
776 case 0xd28: /* Configurable Fault Status. */
334e8dad
PM
777 /* The BFSR bits [15:8] are shared between security states
778 * and we store them in the NS copy
779 */
780 val = cpu->env.v7m.cfsr[attrs.secure];
781 val |= cpu->env.v7m.cfsr[M_REG_NS] & R_V7M_CFSR_BFSR_MASK;
782 return val;
9ee6e8bb 783 case 0xd2c: /* Hard Fault Status. */
e6b33209 784 return cpu->env.v7m.hfsr;
9ee6e8bb 785 case 0xd30: /* Debug Fault Status. */
e6b33209
MD
786 return cpu->env.v7m.dfsr;
787 case 0xd34: /* MMFAR MemManage Fault Address */
c51a5cfc 788 return cpu->env.v7m.mmfar[attrs.secure];
9ee6e8bb 789 case 0xd38: /* Bus Fault Address. */
e6b33209 790 return cpu->env.v7m.bfar;
9ee6e8bb
PB
791 case 0xd3c: /* Aux Fault Status. */
792 /* TODO: Implement fault status registers. */
e6b33209
MD
793 qemu_log_mask(LOG_UNIMP,
794 "Aux Fault status registers unimplemented\n");
e72e3ffc 795 return 0;
9ee6e8bb
PB
796 case 0xd40: /* PFR0. */
797 return 0x00000030;
798 case 0xd44: /* PRF1. */
799 return 0x00000200;
800 case 0xd48: /* DFR0. */
801 return 0x00100000;
802 case 0xd4c: /* AFR0. */
803 return 0x00000000;
804 case 0xd50: /* MMFR0. */
805 return 0x00000030;
806 case 0xd54: /* MMFR1. */
807 return 0x00000000;
808 case 0xd58: /* MMFR2. */
809 return 0x00000000;
810 case 0xd5c: /* MMFR3. */
811 return 0x00000000;
812 case 0xd60: /* ISAR0. */
813 return 0x01141110;
814 case 0xd64: /* ISAR1. */
815 return 0x02111000;
816 case 0xd68: /* ISAR2. */
817 return 0x21112231;
818 case 0xd6c: /* ISAR3. */
819 return 0x01111110;
820 case 0xd70: /* ISAR4. */
821 return 0x01310102;
822 /* TODO: Implement debug registers. */
29c483a5
MD
823 case 0xd90: /* MPU_TYPE */
824 /* Unified MPU; if the MPU is not present this value is zero */
825 return cpu->pmsav7_dregion << 8;
826 break;
827 case 0xd94: /* MPU_CTRL */
ecf5e8ea 828 return cpu->env.v7m.mpu_ctrl[attrs.secure];
29c483a5 829 case 0xd98: /* MPU_RNR */
1bc04a88 830 return cpu->env.pmsav7.rnr[attrs.secure];
29c483a5
MD
831 case 0xd9c: /* MPU_RBAR */
832 case 0xda4: /* MPU_RBAR_A1 */
833 case 0xdac: /* MPU_RBAR_A2 */
834 case 0xdb4: /* MPU_RBAR_A3 */
835 {
1bc04a88 836 int region = cpu->env.pmsav7.rnr[attrs.secure];
29c483a5 837
0e1a46bb
PM
838 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
839 /* PMSAv8M handling of the aliases is different from v7M:
840 * aliases A1, A2, A3 override the low two bits of the region
841 * number in MPU_RNR, and there is no 'region' field in the
842 * RBAR register.
843 */
844 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
845 if (aliasno) {
846 region = deposit32(region, 0, 2, aliasno);
847 }
848 if (region >= cpu->pmsav7_dregion) {
849 return 0;
850 }
62c58ee0 851 return cpu->env.pmsav8.rbar[attrs.secure][region];
0e1a46bb
PM
852 }
853
29c483a5
MD
854 if (region >= cpu->pmsav7_dregion) {
855 return 0;
856 }
857 return (cpu->env.pmsav7.drbar[region] & 0x1f) | (region & 0xf);
858 }
0e1a46bb
PM
859 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
860 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
861 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
862 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
29c483a5 863 {
1bc04a88 864 int region = cpu->env.pmsav7.rnr[attrs.secure];
29c483a5 865
0e1a46bb
PM
866 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
867 /* PMSAv8M handling of the aliases is different from v7M:
868 * aliases A1, A2, A3 override the low two bits of the region
869 * number in MPU_RNR.
870 */
871 int aliasno = (offset - 0xda0) / 8; /* 0..3 */
872 if (aliasno) {
873 region = deposit32(region, 0, 2, aliasno);
874 }
875 if (region >= cpu->pmsav7_dregion) {
876 return 0;
877 }
62c58ee0 878 return cpu->env.pmsav8.rlar[attrs.secure][region];
0e1a46bb
PM
879 }
880
29c483a5
MD
881 if (region >= cpu->pmsav7_dregion) {
882 return 0;
883 }
884 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
885 (cpu->env.pmsav7.drsr[region] & 0xffff);
886 }
0e1a46bb
PM
887 case 0xdc0: /* MPU_MAIR0 */
888 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
889 goto bad_offset;
890 }
4125e6fe 891 return cpu->env.pmsav8.mair0[attrs.secure];
0e1a46bb
PM
892 case 0xdc4: /* MPU_MAIR1 */
893 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
894 goto bad_offset;
895 }
4125e6fe 896 return cpu->env.pmsav8.mair1[attrs.secure];
9ee6e8bb 897 default:
0e1a46bb 898 bad_offset:
e72e3ffc
PM
899 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
900 return 0;
9ee6e8bb
PB
901 }
902}
903
45db7ba6
PM
904static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value,
905 MemTxAttrs attrs)
9ee6e8bb 906{
d713ea6c 907 ARMCPU *cpu = s->cpu;
ff68dacb 908
9ee6e8bb 909 switch (offset) {
e1be0a57
PM
910 case 0x380 ... 0x3bf: /* NVIC_ITNS<n> */
911 {
912 int startvec = 32 * (offset - 0x380) + NVIC_FIRST_IRQ;
913 int i;
914
915 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
916 goto bad_offset;
917 }
918 if (!attrs.secure) {
919 break;
920 }
921 for (i = 0; i < 32 && startvec + i < s->num_irq; i++) {
922 s->itns[startvec + i] = (value >> i) & 1;
923 }
924 nvic_irq_update(s);
925 break;
926 }
9ee6e8bb
PB
927 case 0xd04: /* Interrupt Control State. */
928 if (value & (1 << 31)) {
2fb50a33 929 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI, false);
9ee6e8bb
PB
930 }
931 if (value & (1 << 28)) {
2fb50a33 932 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
9ee6e8bb 933 } else if (value & (1 << 27)) {
2fb50a33 934 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV, attrs.secure);
9ee6e8bb
PB
935 }
936 if (value & (1 << 26)) {
2fb50a33 937 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
9ee6e8bb 938 } else if (value & (1 << 25)) {
2fb50a33 939 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK, attrs.secure);
9ee6e8bb
PB
940 }
941 break;
942 case 0xd08: /* Vector Table Offset. */
45db7ba6 943 cpu->env.v7m.vecbase[attrs.secure] = value & 0xffffff80;
9ee6e8bb 944 break;
3b2e9344
PM
945 case 0xd0c: /* Application Interrupt/Reset Control (AIRCR) */
946 if ((value >> R_V7M_AIRCR_VECTKEY_SHIFT) == 0x05fa) {
947 if (value & R_V7M_AIRCR_SYSRESETREQ_MASK) {
948 if (attrs.secure ||
949 !(cpu->env.v7m.aircr & R_V7M_AIRCR_SYSRESETREQS_MASK)) {
950 qemu_irq_pulse(s->sysresetreq);
951 }
e192becd 952 }
3b2e9344 953 if (value & R_V7M_AIRCR_VECTCLRACTIVE_MASK) {
14790f73
MD
954 qemu_log_mask(LOG_GUEST_ERROR,
955 "Setting VECTCLRACTIVE when not in DEBUG mode "
956 "is UNPREDICTABLE\n");
9ee6e8bb 957 }
3b2e9344
PM
958 if (value & R_V7M_AIRCR_VECTRESET_MASK) {
959 /* NB: this bit is RES0 in v8M */
14790f73
MD
960 qemu_log_mask(LOG_GUEST_ERROR,
961 "Setting VECTRESET when not in DEBUG mode "
962 "is UNPREDICTABLE\n");
9ee6e8bb 963 }
3b2e9344
PM
964 s->prigroup[attrs.secure] = extract32(value,
965 R_V7M_AIRCR_PRIGROUP_SHIFT,
966 R_V7M_AIRCR_PRIGROUP_LENGTH);
967 if (attrs.secure) {
968 /* These bits are only writable by secure */
969 cpu->env.v7m.aircr = value &
970 (R_V7M_AIRCR_SYSRESETREQS_MASK |
971 R_V7M_AIRCR_BFHFNMINS_MASK |
972 R_V7M_AIRCR_PRIS_MASK);
7208b426
PM
973 /* BFHFNMINS changes the priority of Secure HardFault, and
974 * allows a pending Non-secure HardFault to preempt (which
975 * we implement by marking it enabled).
976 */
331f4bae
PM
977 if (cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK) {
978 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -3;
7208b426 979 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
331f4bae
PM
980 } else {
981 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
7208b426 982 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
331f4bae 983 }
3b2e9344 984 }
da6d674e 985 nvic_irq_update(s);
9ee6e8bb
PB
986 }
987 break;
988 case 0xd10: /* System Control. */
9ee6e8bb 989 /* TODO: Implement control registers. */
e6b33209
MD
990 qemu_log_mask(LOG_UNIMP, "NVIC: SCR unimplemented\n");
991 break;
992 case 0xd14: /* Configuration Control. */
993 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
994 value &= (R_V7M_CCR_STKALIGN_MASK |
995 R_V7M_CCR_BFHFNMIGN_MASK |
996 R_V7M_CCR_DIV_0_TRP_MASK |
997 R_V7M_CCR_UNALIGN_TRP_MASK |
998 R_V7M_CCR_USERSETMPEND_MASK |
999 R_V7M_CCR_NONBASETHRDENA_MASK);
1000
9d40cd8a
PM
1001 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1002 /* v8M makes NONBASETHRDENA and STKALIGN be RES1 */
1003 value |= R_V7M_CCR_NONBASETHRDENA_MASK
1004 | R_V7M_CCR_STKALIGN_MASK;
1005 }
1006 if (attrs.secure) {
1007 /* the BFHFNMIGN bit is not banked; keep that in the NS copy */
1008 cpu->env.v7m.ccr[M_REG_NS] =
1009 (cpu->env.v7m.ccr[M_REG_NS] & ~R_V7M_CCR_BFHFNMIGN_MASK)
1010 | (value & R_V7M_CCR_BFHFNMIGN_MASK);
1011 value &= ~R_V7M_CCR_BFHFNMIGN_MASK;
1012 }
1013
1014 cpu->env.v7m.ccr[attrs.secure] = value;
e72e3ffc 1015 break;
9ee6e8bb 1016 case 0xd24: /* System Handler Control. */
5db53e35
PM
1017 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
1018 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
1019 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
1020 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
1021 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
1022 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
1023 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
1024 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
1025 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
1026 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
1027 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
da6d674e
MD
1028 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
1029 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
1030 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
1031 nvic_irq_update(s);
9ee6e8bb
PB
1032 break;
1033 case 0xd28: /* Configurable Fault Status. */
334e8dad
PM
1034 cpu->env.v7m.cfsr[attrs.secure] &= ~value; /* W1C */
1035 if (attrs.secure) {
1036 /* The BFSR bits [15:8] are shared between security states
1037 * and we store them in the NS copy.
1038 */
1039 cpu->env.v7m.cfsr[M_REG_NS] &= ~(value & R_V7M_CFSR_BFSR_MASK);
1040 }
e6b33209 1041 break;
9ee6e8bb 1042 case 0xd2c: /* Hard Fault Status. */
e6b33209
MD
1043 cpu->env.v7m.hfsr &= ~value; /* W1C */
1044 break;
9ee6e8bb 1045 case 0xd30: /* Debug Fault Status. */
e6b33209
MD
1046 cpu->env.v7m.dfsr &= ~value; /* W1C */
1047 break;
9ee6e8bb 1048 case 0xd34: /* Mem Manage Address. */
c51a5cfc 1049 cpu->env.v7m.mmfar[attrs.secure] = value;
e6b33209 1050 return;
9ee6e8bb 1051 case 0xd38: /* Bus Fault Address. */
e6b33209
MD
1052 cpu->env.v7m.bfar = value;
1053 return;
9ee6e8bb 1054 case 0xd3c: /* Aux Fault Status. */
e72e3ffc 1055 qemu_log_mask(LOG_UNIMP,
e6b33209 1056 "NVIC: Aux fault status registers unimplemented\n");
e72e3ffc 1057 break;
29c483a5
MD
1058 case 0xd90: /* MPU_TYPE */
1059 return; /* RO */
1060 case 0xd94: /* MPU_CTRL */
1061 if ((value &
1062 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
1063 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
1064 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
1065 "UNPREDICTABLE\n");
1066 }
ecf5e8ea
PM
1067 cpu->env.v7m.mpu_ctrl[attrs.secure]
1068 = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
1069 R_V7M_MPU_CTRL_HFNMIENA_MASK |
1070 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
29c483a5
MD
1071 tlb_flush(CPU(cpu));
1072 break;
1073 case 0xd98: /* MPU_RNR */
1074 if (value >= cpu->pmsav7_dregion) {
1075 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
1076 PRIu32 "/%" PRIu32 "\n",
1077 value, cpu->pmsav7_dregion);
1078 } else {
1bc04a88 1079 cpu->env.pmsav7.rnr[attrs.secure] = value;
29c483a5
MD
1080 }
1081 break;
1082 case 0xd9c: /* MPU_RBAR */
1083 case 0xda4: /* MPU_RBAR_A1 */
1084 case 0xdac: /* MPU_RBAR_A2 */
1085 case 0xdb4: /* MPU_RBAR_A3 */
1086 {
1087 int region;
1088
0e1a46bb
PM
1089 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1090 /* PMSAv8M handling of the aliases is different from v7M:
1091 * aliases A1, A2, A3 override the low two bits of the region
1092 * number in MPU_RNR, and there is no 'region' field in the
1093 * RBAR register.
1094 */
1095 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1096
1bc04a88 1097 region = cpu->env.pmsav7.rnr[attrs.secure];
0e1a46bb
PM
1098 if (aliasno) {
1099 region = deposit32(region, 0, 2, aliasno);
1100 }
1101 if (region >= cpu->pmsav7_dregion) {
1102 return;
1103 }
62c58ee0 1104 cpu->env.pmsav8.rbar[attrs.secure][region] = value;
0e1a46bb
PM
1105 tlb_flush(CPU(cpu));
1106 return;
1107 }
1108
29c483a5
MD
1109 if (value & (1 << 4)) {
1110 /* VALID bit means use the region number specified in this
1111 * value and also update MPU_RNR.REGION with that value.
1112 */
1113 region = extract32(value, 0, 4);
1114 if (region >= cpu->pmsav7_dregion) {
1115 qemu_log_mask(LOG_GUEST_ERROR,
1116 "MPU region out of range %u/%" PRIu32 "\n",
1117 region, cpu->pmsav7_dregion);
1118 return;
1119 }
1bc04a88 1120 cpu->env.pmsav7.rnr[attrs.secure] = region;
29c483a5 1121 } else {
1bc04a88 1122 region = cpu->env.pmsav7.rnr[attrs.secure];
29c483a5
MD
1123 }
1124
1125 if (region >= cpu->pmsav7_dregion) {
1126 return;
1127 }
1128
1129 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
1130 tlb_flush(CPU(cpu));
1131 break;
1132 }
0e1a46bb
PM
1133 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
1134 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
1135 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
1136 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
29c483a5 1137 {
1bc04a88 1138 int region = cpu->env.pmsav7.rnr[attrs.secure];
29c483a5 1139
0e1a46bb
PM
1140 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1141 /* PMSAv8M handling of the aliases is different from v7M:
1142 * aliases A1, A2, A3 override the low two bits of the region
1143 * number in MPU_RNR.
1144 */
1145 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
1146
1bc04a88 1147 region = cpu->env.pmsav7.rnr[attrs.secure];
0e1a46bb
PM
1148 if (aliasno) {
1149 region = deposit32(region, 0, 2, aliasno);
1150 }
1151 if (region >= cpu->pmsav7_dregion) {
1152 return;
1153 }
62c58ee0 1154 cpu->env.pmsav8.rlar[attrs.secure][region] = value;
0e1a46bb
PM
1155 tlb_flush(CPU(cpu));
1156 return;
1157 }
1158
29c483a5
MD
1159 if (region >= cpu->pmsav7_dregion) {
1160 return;
1161 }
1162
1163 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
1164 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
1165 tlb_flush(CPU(cpu));
1166 break;
1167 }
0e1a46bb
PM
1168 case 0xdc0: /* MPU_MAIR0 */
1169 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1170 goto bad_offset;
1171 }
1172 if (cpu->pmsav7_dregion) {
1173 /* Register is RES0 if no MPU regions are implemented */
4125e6fe 1174 cpu->env.pmsav8.mair0[attrs.secure] = value;
0e1a46bb
PM
1175 }
1176 /* We don't need to do anything else because memory attributes
1177 * only affect cacheability, and we don't implement caching.
1178 */
1179 break;
1180 case 0xdc4: /* MPU_MAIR1 */
1181 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
1182 goto bad_offset;
1183 }
1184 if (cpu->pmsav7_dregion) {
1185 /* Register is RES0 if no MPU regions are implemented */
4125e6fe 1186 cpu->env.pmsav8.mair1[attrs.secure] = value;
0e1a46bb
PM
1187 }
1188 /* We don't need to do anything else because memory attributes
1189 * only affect cacheability, and we don't implement caching.
1190 */
1191 break;
2a29ddee 1192 case 0xf00: /* Software Triggered Interrupt Register */
da6d674e 1193 {
da6d674e 1194 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
eb578a23 1195 if (excnum < s->num_irq) {
2fb50a33 1196 armv7m_nvic_set_pending(s, excnum, false);
2a29ddee
PM
1197 }
1198 break;
da6d674e 1199 }
9ee6e8bb 1200 default:
0e1a46bb 1201 bad_offset:
e72e3ffc
PM
1202 qemu_log_mask(LOG_GUEST_ERROR,
1203 "NVIC: Bad write offset 0x%x\n", offset);
9ee6e8bb
PB
1204 }
1205}
1206
9d40cd8a 1207static bool nvic_user_access_ok(NVICState *s, hwaddr offset, MemTxAttrs attrs)
eb578a23
PM
1208{
1209 /* Return true if unprivileged access to this register is permitted. */
1210 switch (offset) {
1211 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
9d40cd8a
PM
1212 /* For access via STIR_NS it is the NS CCR.USERSETMPEND that
1213 * controls access even though the CPU is in Secure state (I_QDKX).
1214 */
1215 return s->cpu->env.v7m.ccr[attrs.secure] & R_V7M_CCR_USERSETMPEND_MASK;
eb578a23
PM
1216 default:
1217 /* All other user accesses cause a BusFault unconditionally */
1218 return false;
1219 }
1220}
1221
e6a0d350
PM
1222static int shpr_bank(NVICState *s, int exc, MemTxAttrs attrs)
1223{
1224 /* Behaviour for the SHPR register field for this exception:
1225 * return M_REG_NS to use the nonsecure vector (including for
1226 * non-banked exceptions), M_REG_S for the secure version of
1227 * a banked exception, and -1 if this field should RAZ/WI.
1228 */
1229 switch (exc) {
1230 case ARMV7M_EXCP_MEM:
1231 case ARMV7M_EXCP_USAGE:
1232 case ARMV7M_EXCP_SVC:
1233 case ARMV7M_EXCP_PENDSV:
1234 case ARMV7M_EXCP_SYSTICK:
1235 /* Banked exceptions */
1236 return attrs.secure;
1237 case ARMV7M_EXCP_BUS:
1238 /* Not banked, RAZ/WI from nonsecure if BFHFNMINS is zero */
1239 if (!attrs.secure &&
1240 !(s->cpu->env.v7m.aircr & R_V7M_AIRCR_BFHFNMINS_MASK)) {
1241 return -1;
1242 }
1243 return M_REG_NS;
1244 case ARMV7M_EXCP_SECURE:
1245 /* Not banked, RAZ/WI from nonsecure */
1246 if (!attrs.secure) {
1247 return -1;
1248 }
1249 return M_REG_NS;
1250 case ARMV7M_EXCP_DEBUG:
1251 /* Not banked. TODO should RAZ/WI if DEMCR.SDME is set */
1252 return M_REG_NS;
1253 case 8 ... 10:
1254 case 13:
1255 /* RES0 */
1256 return -1;
1257 default:
1258 /* Not reachable due to decode of SHPR register addresses */
1259 g_assert_not_reached();
1260 }
1261}
1262
eb578a23
PM
1263static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
1264 uint64_t *data, unsigned size,
1265 MemTxAttrs attrs)
2a29ddee 1266{
f797c075 1267 NVICState *s = (NVICState *)opaque;
2a29ddee 1268 uint32_t offset = addr;
da6d674e 1269 unsigned i, startvec, end;
0e8153dd
AB
1270 uint32_t val;
1271
9d40cd8a 1272 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
eb578a23
PM
1273 /* Generate BusFault for unprivileged accesses */
1274 return MEMTX_ERROR;
1275 }
1276
0e8153dd 1277 switch (offset) {
da6d674e
MD
1278 /* reads of set and clear both return the status */
1279 case 0x100 ... 0x13f: /* NVIC Set enable */
1280 offset += 0x80;
1281 /* fall through */
1282 case 0x180 ... 0x1bf: /* NVIC Clear enable */
1283 val = 0;
1284 startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */
1285
1286 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
e1be0a57
PM
1287 if (s->vectors[startvec + i].enabled &&
1288 (attrs.secure || s->itns[startvec + i])) {
da6d674e
MD
1289 val |= (1 << i);
1290 }
1291 }
1292 break;
1293 case 0x200 ... 0x23f: /* NVIC Set pend */
1294 offset += 0x80;
1295 /* fall through */
1296 case 0x280 ... 0x2bf: /* NVIC Clear pend */
1297 val = 0;
1298 startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */
1299 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
e1be0a57
PM
1300 if (s->vectors[startvec + i].pending &&
1301 (attrs.secure || s->itns[startvec + i])) {
da6d674e
MD
1302 val |= (1 << i);
1303 }
1304 }
1305 break;
1306 case 0x300 ... 0x33f: /* NVIC Active */
1307 val = 0;
1308 startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */
1309
1310 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
e1be0a57
PM
1311 if (s->vectors[startvec + i].active &&
1312 (attrs.secure || s->itns[startvec + i])) {
da6d674e
MD
1313 val |= (1 << i);
1314 }
1315 }
1316 break;
1317 case 0x400 ... 0x5ef: /* NVIC Priority */
1318 val = 0;
1319 startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
1320
1321 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
e1be0a57
PM
1322 if (attrs.secure || s->itns[startvec + i]) {
1323 val |= s->vectors[startvec + i].prio << (8 * i);
1324 }
da6d674e
MD
1325 }
1326 break;
e6a0d350 1327 case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
0e8153dd
AB
1328 val = 0;
1329 for (i = 0; i < size; i++) {
e6a0d350
PM
1330 unsigned hdlidx = (offset - 0xd14) + i;
1331 int sbank = shpr_bank(s, hdlidx, attrs);
1332
1333 if (sbank < 0) {
1334 continue;
1335 }
1336 val = deposit32(val, i * 8, 8, get_prio(s, hdlidx, sbank));
0e8153dd 1337 }
da6d674e 1338 break;
0e8153dd 1339 case 0xfe0 ... 0xfff: /* ID. */
2a29ddee 1340 if (offset & 3) {
da6d674e
MD
1341 val = 0;
1342 } else {
1343 val = nvic_id[(offset - 0xfe0) >> 2];
1344 }
1345 break;
1346 default:
1347 if (size == 4) {
45db7ba6 1348 val = nvic_readl(s, offset, attrs);
da6d674e
MD
1349 } else {
1350 qemu_log_mask(LOG_GUEST_ERROR,
1351 "NVIC: Bad read of size %d at offset 0x%x\n",
1352 size, offset);
1353 val = 0;
2a29ddee 1354 }
2a29ddee 1355 }
da6d674e
MD
1356
1357 trace_nvic_sysreg_read(addr, val, size);
eb578a23
PM
1358 *data = val;
1359 return MEMTX_OK;
2a29ddee
PM
1360}
1361
eb578a23
PM
1362static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
1363 uint64_t value, unsigned size,
1364 MemTxAttrs attrs)
2a29ddee 1365{
f797c075 1366 NVICState *s = (NVICState *)opaque;
2a29ddee 1367 uint32_t offset = addr;
da6d674e
MD
1368 unsigned i, startvec, end;
1369 unsigned setval = 0;
1370
1371 trace_nvic_sysreg_write(addr, value, size);
0e8153dd 1372
9d40cd8a 1373 if (attrs.user && !nvic_user_access_ok(s, addr, attrs)) {
eb578a23
PM
1374 /* Generate BusFault for unprivileged accesses */
1375 return MEMTX_ERROR;
1376 }
1377
0e8153dd 1378 switch (offset) {
da6d674e
MD
1379 case 0x100 ... 0x13f: /* NVIC Set enable */
1380 offset += 0x80;
1381 setval = 1;
1382 /* fall through */
1383 case 0x180 ... 0x1bf: /* NVIC Clear enable */
1384 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
1385
1386 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
e1be0a57
PM
1387 if (value & (1 << i) &&
1388 (attrs.secure || s->itns[startvec + i])) {
da6d674e
MD
1389 s->vectors[startvec + i].enabled = setval;
1390 }
1391 }
1392 nvic_irq_update(s);
eb578a23 1393 return MEMTX_OK;
da6d674e
MD
1394 case 0x200 ... 0x23f: /* NVIC Set pend */
1395 /* the special logic in armv7m_nvic_set_pending()
1396 * is not needed since IRQs are never escalated
1397 */
1398 offset += 0x80;
1399 setval = 1;
1400 /* fall through */
1401 case 0x280 ... 0x2bf: /* NVIC Clear pend */
1402 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
1403
1404 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
e1be0a57
PM
1405 if (value & (1 << i) &&
1406 (attrs.secure || s->itns[startvec + i])) {
da6d674e
MD
1407 s->vectors[startvec + i].pending = setval;
1408 }
1409 }
1410 nvic_irq_update(s);
eb578a23 1411 return MEMTX_OK;
da6d674e 1412 case 0x300 ... 0x33f: /* NVIC Active */
eb578a23 1413 return MEMTX_OK; /* R/O */
da6d674e
MD
1414 case 0x400 ... 0x5ef: /* NVIC Priority */
1415 startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
1416
1417 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
e1be0a57 1418 if (attrs.secure || s->itns[startvec + i]) {
e6a0d350 1419 set_prio(s, startvec + i, false, (value >> (i * 8)) & 0xff);
e1be0a57 1420 }
da6d674e
MD
1421 }
1422 nvic_irq_update(s);
eb578a23 1423 return MEMTX_OK;
e6a0d350 1424 case 0xd18 ... 0xd23: /* System Handler Priority (SHPR1, SHPR2, SHPR3) */
0e8153dd 1425 for (i = 0; i < size; i++) {
da6d674e 1426 unsigned hdlidx = (offset - 0xd14) + i;
e6a0d350
PM
1427 int newprio = extract32(value, i * 8, 8);
1428 int sbank = shpr_bank(s, hdlidx, attrs);
1429
1430 if (sbank < 0) {
1431 continue;
1432 }
1433 set_prio(s, hdlidx, sbank, newprio);
0e8153dd 1434 }
da6d674e 1435 nvic_irq_update(s);
eb578a23 1436 return MEMTX_OK;
0e8153dd 1437 }
2a29ddee 1438 if (size == 4) {
45db7ba6 1439 nvic_writel(s, offset, value, attrs);
eb578a23 1440 return MEMTX_OK;
2a29ddee 1441 }
e72e3ffc
PM
1442 qemu_log_mask(LOG_GUEST_ERROR,
1443 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
eb578a23
PM
1444 /* This is UNPREDICTABLE; treat as RAZ/WI */
1445 return MEMTX_OK;
2a29ddee
PM
1446}
1447
1448static const MemoryRegionOps nvic_sysreg_ops = {
eb578a23
PM
1449 .read_with_attrs = nvic_sysreg_read,
1450 .write_with_attrs = nvic_sysreg_write,
2a29ddee
PM
1451 .endianness = DEVICE_NATIVE_ENDIAN,
1452};
1453
f104919d
PM
1454static MemTxResult nvic_sysreg_ns_write(void *opaque, hwaddr addr,
1455 uint64_t value, unsigned size,
1456 MemTxAttrs attrs)
1457{
1458 if (attrs.secure) {
1459 /* S accesses to the alias act like NS accesses to the real region */
1460 attrs.secure = 0;
1461 return nvic_sysreg_write(opaque, addr, value, size, attrs);
1462 } else {
1463 /* NS attrs are RAZ/WI for privileged, and BusFault for user */
1464 if (attrs.user) {
1465 return MEMTX_ERROR;
1466 }
1467 return MEMTX_OK;
1468 }
1469}
1470
1471static MemTxResult nvic_sysreg_ns_read(void *opaque, hwaddr addr,
1472 uint64_t *data, unsigned size,
1473 MemTxAttrs attrs)
1474{
1475 if (attrs.secure) {
1476 /* S accesses to the alias act like NS accesses to the real region */
1477 attrs.secure = 0;
1478 return nvic_sysreg_read(opaque, addr, data, size, attrs);
1479 } else {
1480 /* NS attrs are RAZ/WI for privileged, and BusFault for user */
1481 if (attrs.user) {
1482 return MEMTX_ERROR;
1483 }
1484 *data = 0;
1485 return MEMTX_OK;
1486 }
1487}
1488
1489static const MemoryRegionOps nvic_sysreg_ns_ops = {
1490 .read_with_attrs = nvic_sysreg_ns_read,
1491 .write_with_attrs = nvic_sysreg_ns_write,
1492 .endianness = DEVICE_NATIVE_ENDIAN,
1493};
1494
da6d674e
MD
1495static int nvic_post_load(void *opaque, int version_id)
1496{
1497 NVICState *s = opaque;
1498 unsigned i;
331f4bae 1499 int resetprio;
da6d674e
MD
1500
1501 /* Check for out of range priority settings */
331f4bae
PM
1502 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
1503
1504 if (s->vectors[ARMV7M_EXCP_RESET].prio != resetprio ||
da6d674e
MD
1505 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
1506 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
1507 return 1;
1508 }
1509 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
1510 if (s->vectors[i].prio & ~0xff) {
1511 return 1;
1512 }
1513 }
1514
1515 nvic_recompute_state(s);
1516
1517 return 0;
1518}
1519
1520static const VMStateDescription vmstate_VecInfo = {
1521 .name = "armv7m_nvic_info",
1522 .version_id = 1,
1523 .minimum_version_id = 1,
1524 .fields = (VMStateField[]) {
1525 VMSTATE_INT16(prio, VecInfo),
1526 VMSTATE_UINT8(enabled, VecInfo),
1527 VMSTATE_UINT8(pending, VecInfo),
1528 VMSTATE_UINT8(active, VecInfo),
1529 VMSTATE_UINT8(level, VecInfo),
1530 VMSTATE_END_OF_LIST()
1531 }
1532};
1533
17906a16
PM
1534static bool nvic_security_needed(void *opaque)
1535{
1536 NVICState *s = opaque;
1537
1538 return arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY);
1539}
1540
1541static int nvic_security_post_load(void *opaque, int version_id)
1542{
1543 NVICState *s = opaque;
1544 int i;
1545
1546 /* Check for out of range priority settings */
331f4bae
PM
1547 if (s->sec_vectors[ARMV7M_EXCP_HARD].prio != -1
1548 && s->sec_vectors[ARMV7M_EXCP_HARD].prio != -3) {
1549 /* We can't cross-check against AIRCR.BFHFNMINS as we don't know
1550 * if the CPU state has been migrated yet; a mismatch won't
1551 * cause the emulation to blow up, though.
1552 */
17906a16
PM
1553 return 1;
1554 }
1555 for (i = ARMV7M_EXCP_MEM; i < ARRAY_SIZE(s->sec_vectors); i++) {
1556 if (s->sec_vectors[i].prio & ~0xff) {
1557 return 1;
1558 }
1559 }
1560 return 0;
1561}
1562
1563static const VMStateDescription vmstate_nvic_security = {
1564 .name = "nvic/m-security",
1565 .version_id = 1,
1566 .minimum_version_id = 1,
1567 .needed = nvic_security_needed,
1568 .post_load = &nvic_security_post_load,
1569 .fields = (VMStateField[]) {
1570 VMSTATE_STRUCT_ARRAY(sec_vectors, NVICState, NVIC_INTERNAL_VECTORS, 1,
1571 vmstate_VecInfo, VecInfo),
3b2e9344 1572 VMSTATE_UINT32(prigroup[M_REG_S], NVICState),
e1be0a57 1573 VMSTATE_BOOL_ARRAY(itns, NVICState, NVIC_MAX_VECTORS),
17906a16
PM
1574 VMSTATE_END_OF_LIST()
1575 }
1576};
1577
0797226c
JQ
1578static const VMStateDescription vmstate_nvic = {
1579 .name = "armv7m_nvic",
ff68dacb
PM
1580 .version_id = 4,
1581 .minimum_version_id = 4,
da6d674e 1582 .post_load = &nvic_post_load,
8f1e884b 1583 .fields = (VMStateField[]) {
da6d674e
MD
1584 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
1585 vmstate_VecInfo, VecInfo),
3b2e9344 1586 VMSTATE_UINT32(prigroup[M_REG_NS], NVICState),
0797226c 1587 VMSTATE_END_OF_LIST()
17906a16
PM
1588 },
1589 .subsections = (const VMStateDescription*[]) {
1590 &vmstate_nvic_security,
1591 NULL
0797226c
JQ
1592 }
1593};
23e39294 1594
da6d674e
MD
1595static Property props_nvic[] = {
1596 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
1597 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
1598 DEFINE_PROP_END_OF_LIST()
1599};
1600
aecff692
PM
1601static void armv7m_nvic_reset(DeviceState *dev)
1602{
331f4bae 1603 int resetprio;
f797c075 1604 NVICState *s = NVIC(dev);
da6d674e
MD
1605
1606 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
da6d674e
MD
1607 /* MEM, BUS, and USAGE are enabled through
1608 * the System Handler Control register
b3387ede 1609 */
da6d674e
MD
1610 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
1611 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
1612 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
1613 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
1614
331f4bae
PM
1615 resetprio = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? -4 : -3;
1616 s->vectors[ARMV7M_EXCP_RESET].prio = resetprio;
da6d674e
MD
1617 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
1618 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
1619
17906a16
PM
1620 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
1621 s->sec_vectors[ARMV7M_EXCP_HARD].enabled = 1;
1622 s->sec_vectors[ARMV7M_EXCP_SVC].enabled = 1;
1623 s->sec_vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
1624 s->sec_vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
1625
1626 /* AIRCR.BFHFNMINS resets to 0 so Secure HF is priority -1 (R_CMTC) */
1627 s->sec_vectors[ARMV7M_EXCP_HARD].prio = -1;
7208b426
PM
1628 /* If AIRCR.BFHFNMINS is 0 then NS HF is (effectively) disabled */
1629 s->vectors[ARMV7M_EXCP_HARD].enabled = 0;
1630 } else {
1631 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
17906a16
PM
1632 }
1633
da6d674e
MD
1634 /* Strictly speaking the reset handler should be enabled.
1635 * However, we don't simulate soft resets through the NVIC,
1636 * and the reset vector should never be pended.
1637 * So we leave it disabled to catch logic errors.
1638 */
1639
1640 s->exception_prio = NVIC_NOEXC_PRIO;
1641 s->vectpending = 0;
e93bc2ac 1642 s->vectpending_is_s_banked = false;
5255fcf8 1643 s->vectpending_prio = NVIC_NOEXC_PRIO;
e1be0a57
PM
1644
1645 if (arm_feature(&s->cpu->env, ARM_FEATURE_M_SECURITY)) {
1646 memset(s->itns, 0, sizeof(s->itns));
1647 } else {
1648 /* This state is constant and not guest accessible in a non-security
1649 * NVIC; we set the bits to true to avoid having to do a feature
1650 * bit check in the NVIC enable/pend/etc register accessors.
1651 */
1652 int i;
1653
1654 for (i = NVIC_FIRST_IRQ; i < ARRAY_SIZE(s->itns); i++) {
1655 s->itns[i] = true;
1656 }
1657 }
ff68dacb 1658}
da6d674e 1659
ff68dacb
PM
1660static void nvic_systick_trigger(void *opaque, int n, int level)
1661{
1662 NVICState *s = opaque;
1663
1664 if (level) {
1665 /* SysTick just asked us to pend its exception.
1666 * (This is different from an external interrupt line's
1667 * behaviour.)
2fb50a33
PM
1668 * TODO: when we implement the banked systicks we must make
1669 * this pend the correct banked exception.
ff68dacb 1670 */
2fb50a33 1671 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK, false);
ff68dacb 1672 }
aecff692
PM
1673}
1674
53111180 1675static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
9ee6e8bb 1676{
f797c075 1677 NVICState *s = NVIC(dev);
ff68dacb
PM
1678 SysBusDevice *systick_sbd;
1679 Error *err = NULL;
f104919d 1680 int regionlen;
9ee6e8bb 1681
d713ea6c
MD
1682 s->cpu = ARM_CPU(qemu_get_cpu(0));
1683 assert(s->cpu);
da6d674e
MD
1684
1685 if (s->num_irq > NVIC_MAX_IRQ) {
1686 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
53111180
PM
1687 return;
1688 }
da6d674e
MD
1689
1690 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
1691
1692 /* include space for internal exception vectors */
1693 s->num_irq += NVIC_FIRST_IRQ;
1694
ff68dacb
PM
1695 object_property_set_bool(OBJECT(&s->systick), true, "realized", &err);
1696 if (err != NULL) {
1697 error_propagate(errp, err);
1698 return;
1699 }
1700 systick_sbd = SYS_BUS_DEVICE(&s->systick);
1701 sysbus_connect_irq(systick_sbd, 0,
1702 qdev_get_gpio_in_named(dev, "systick-trigger", 0));
1703
da6d674e
MD
1704 /* The NVIC and System Control Space (SCS) starts at 0xe000e000
1705 * and looks like this:
1706 * 0x004 - ICTR
ff68dacb 1707 * 0x010 - 0xff - systick
da6d674e
MD
1708 * 0x100..0x7ec - NVIC
1709 * 0x7f0..0xcff - Reserved
1710 * 0xd00..0xd3c - SCS registers
1711 * 0xd40..0xeff - Reserved or Not implemented
1712 * 0xf00 - STIR
f104919d
PM
1713 *
1714 * Some registers within this space are banked between security states.
1715 * In v8M there is a second range 0xe002e000..0xe002efff which is the
1716 * NonSecure alias SCS; secure accesses to this behave like NS accesses
1717 * to the main SCS range, and non-secure accesses (including when
1718 * the security extension is not implemented) are RAZ/WI.
1719 * Note that both the main SCS range and the alias range are defined
1720 * to be exempt from memory attribution (R_BLJT) and so the memory
1721 * transaction attribute always matches the current CPU security
1722 * state (attrs.secure == env->v7m.secure). In the nvic_sysreg_ns_ops
1723 * wrappers we change attrs.secure to indicate the NS access; so
1724 * generally code determining which banked register to use should
1725 * use attrs.secure; code determining actual behaviour of the system
1726 * should use env->v7m.secure.
2a29ddee 1727 */
f104919d
PM
1728 regionlen = arm_feature(&s->cpu->env, ARM_FEATURE_V8) ? 0x21000 : 0x1000;
1729 memory_region_init(&s->container, OBJECT(s), "nvic", regionlen);
2a29ddee
PM
1730 /* The system register region goes at the bottom of the priority
1731 * stack as it covers the whole page.
1732 */
1437c94b 1733 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2a29ddee
PM
1734 "nvic_sysregs", 0x1000);
1735 memory_region_add_subregion(&s->container, 0, &s->sysregmem);
ff68dacb
PM
1736 memory_region_add_subregion_overlap(&s->container, 0x10,
1737 sysbus_mmio_get_region(systick_sbd, 0),
1738 1);
da6d674e 1739
f104919d
PM
1740 if (arm_feature(&s->cpu->env, ARM_FEATURE_V8)) {
1741 memory_region_init_io(&s->sysreg_ns_mem, OBJECT(s),
1742 &nvic_sysreg_ns_ops, s,
1743 "nvic_sysregs_ns", 0x1000);
1744 memory_region_add_subregion(&s->container, 0x20000, &s->sysreg_ns_mem);
1745 }
1746
98957a94 1747 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
9ee6e8bb 1748}
fe7e8758 1749
55e00a19
PM
1750static void armv7m_nvic_instance_init(Object *obj)
1751{
1752 /* We have a different default value for the num-irq property
1753 * than our superclass. This function runs after qdev init
1754 * has set the defaults from the Property array and before
1755 * any user-specified property setting, so just modify the
fae15286 1756 * value in the GICState struct.
55e00a19 1757 */
e192becd 1758 DeviceState *dev = DEVICE(obj);
f797c075 1759 NVICState *nvic = NVIC(obj);
da6d674e
MD
1760 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1761
ff68dacb
PM
1762 object_initialize(&nvic->systick, sizeof(nvic->systick), TYPE_SYSTICK);
1763 qdev_set_parent_bus(DEVICE(&nvic->systick), sysbus_get_default());
1764
da6d674e 1765 sysbus_init_irq(sbd, &nvic->excpout);
e192becd 1766 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
ff68dacb 1767 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 1);
55e00a19 1768}
39bffca2 1769
999e12bb
AL
1770static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
1771{
39bffca2 1772 DeviceClass *dc = DEVICE_CLASS(klass);
999e12bb 1773
39bffca2 1774 dc->vmsd = &vmstate_nvic;
da6d674e 1775 dc->props = props_nvic;
aecff692 1776 dc->reset = armv7m_nvic_reset;
53111180 1777 dc->realize = armv7m_nvic_realize;
999e12bb
AL
1778}
1779
8c43a6f0 1780static const TypeInfo armv7m_nvic_info = {
1e8cae4d 1781 .name = TYPE_NVIC,
da6d674e 1782 .parent = TYPE_SYS_BUS_DEVICE,
55e00a19 1783 .instance_init = armv7m_nvic_instance_init,
f797c075 1784 .instance_size = sizeof(NVICState),
39bffca2 1785 .class_init = armv7m_nvic_class_init,
da6d674e 1786 .class_size = sizeof(SysBusDeviceClass),
a32134aa
ML
1787};
1788
83f7d43a 1789static void armv7m_nvic_register_types(void)
fe7e8758 1790{
39bffca2 1791 type_register_static(&armv7m_nvic_info);
fe7e8758
PB
1792}
1793
83f7d43a 1794type_init(armv7m_nvic_register_types)
This page took 1.002281 seconds and 4 git commands to generate.