]> Git Repo - qemu.git/blame - hw/intc/armv7m_nvic.c
target/arm: Make CONTROL register banked for v8M
[qemu.git] / hw / intc / armv7m_nvic.c
CommitLineData
9ee6e8bb
PB
1/*
2 * ARM Nested Vectored Interrupt Controller
3 *
4 * Copyright (c) 2006-2007 CodeSourcery.
5 * Written by Paul Brook
6 *
8e31bf38 7 * This code is licensed under the GPL.
9ee6e8bb
PB
8 *
9 * The ARMv7M System controller is fairly tightly tied in with the
10 * NVIC. Much of that is also implemented here.
11 */
12
8ef94f0b 13#include "qemu/osdep.h"
da34e65c 14#include "qapi/error.h"
4771d756 15#include "qemu-common.h"
33c11879 16#include "cpu.h"
83c9f4ca 17#include "hw/sysbus.h"
1de7afc9 18#include "qemu/timer.h"
bd2be150 19#include "hw/arm/arm.h"
d2db1de6 20#include "hw/intc/armv7m_nvic.h"
da6d674e 21#include "target/arm/cpu.h"
29c483a5 22#include "exec/exec-all.h"
03dd024f 23#include "qemu/log.h"
da6d674e
MD
24#include "trace.h"
25
26/* IRQ number counting:
27 *
28 * the num-irq property counts the number of external IRQ lines
29 *
30 * NVICState::num_irq counts the total number of exceptions
31 * (external IRQs, the 15 internal exceptions including reset,
32 * and one for the unused exception number 0).
33 *
34 * NVIC_MAX_IRQ is the highest permitted number of external IRQ lines.
35 *
36 * NVIC_MAX_VECTORS is the highest permitted number of exceptions.
37 *
38 * Iterating through all exceptions should typically be done with
39 * for (i = 1; i < s->num_irq; i++) to avoid the unused slot 0.
40 *
41 * The external qemu_irq lines are the NVIC's external IRQ lines,
42 * so line 0 is exception 16.
43 *
44 * In the terminology of the architecture manual, "interrupts" are
45 * a subcategory of exception referring to the external interrupts
46 * (which are exception numbers NVIC_FIRST_IRQ and upward).
47 * For historical reasons QEMU tends to use "interrupt" and
48 * "exception" more or less interchangeably.
49 */
50#define NVIC_FIRST_IRQ 16
da6d674e
MD
51#define NVIC_MAX_IRQ (NVIC_MAX_VECTORS - NVIC_FIRST_IRQ)
52
53/* Effective running priority of the CPU when no exception is active
54 * (higher than the highest possible priority value)
55 */
56#define NVIC_NOEXC_PRIO 0x100
57
2a29ddee
PM
58static const uint8_t nvic_id[] = {
59 0x00, 0xb0, 0x1b, 0x00, 0x0d, 0xe0, 0x05, 0xb1
60};
61
da6d674e
MD
62static int nvic_pending_prio(NVICState *s)
63{
64 /* return the priority of the current pending interrupt,
65 * or NVIC_NOEXC_PRIO if no interrupt is pending
66 */
67 return s->vectpending ? s->vectors[s->vectpending].prio : NVIC_NOEXC_PRIO;
68}
69
70/* Return the value of the ISCR RETTOBASE bit:
71 * 1 if there is exactly one active exception
72 * 0 if there is more than one active exception
73 * UNKNOWN if there are no active exceptions (we choose 1,
74 * which matches the choice Cortex-M3 is documented as making).
75 *
76 * NB: some versions of the documentation talk about this
77 * counting "active exceptions other than the one shown by IPSR";
78 * this is only different in the obscure corner case where guest
79 * code has manually deactivated an exception and is about
80 * to fail an exception-return integrity check. The definition
81 * above is the one from the v8M ARM ARM and is also in line
82 * with the behaviour documented for the Cortex-M3.
83 */
84static bool nvic_rettobase(NVICState *s)
85{
86 int irq, nhand = 0;
87
88 for (irq = ARMV7M_EXCP_RESET; irq < s->num_irq; irq++) {
89 if (s->vectors[irq].active) {
90 nhand++;
91 if (nhand == 2) {
92 return 0;
93 }
94 }
95 }
96
97 return 1;
98}
99
100/* Return the value of the ISCR ISRPENDING bit:
101 * 1 if an external interrupt is pending
102 * 0 if no external interrupt is pending
103 */
104static bool nvic_isrpending(NVICState *s)
105{
106 int irq;
107
108 /* We can shortcut if the highest priority pending interrupt
109 * happens to be external or if there is nothing pending.
110 */
111 if (s->vectpending > NVIC_FIRST_IRQ) {
112 return true;
113 }
114 if (s->vectpending == 0) {
115 return false;
116 }
117
118 for (irq = NVIC_FIRST_IRQ; irq < s->num_irq; irq++) {
119 if (s->vectors[irq].pending) {
120 return true;
121 }
122 }
123 return false;
124}
125
126/* Return a mask word which clears the subpriority bits from
127 * a priority value for an M-profile exception, leaving only
128 * the group priority.
129 */
130static inline uint32_t nvic_gprio_mask(NVICState *s)
131{
132 return ~0U << (s->prigroup + 1);
133}
134
135/* Recompute vectpending and exception_prio */
136static void nvic_recompute_state(NVICState *s)
137{
138 int i;
139 int pend_prio = NVIC_NOEXC_PRIO;
140 int active_prio = NVIC_NOEXC_PRIO;
141 int pend_irq = 0;
142
143 for (i = 1; i < s->num_irq; i++) {
144 VecInfo *vec = &s->vectors[i];
145
146 if (vec->enabled && vec->pending && vec->prio < pend_prio) {
147 pend_prio = vec->prio;
148 pend_irq = i;
149 }
150 if (vec->active && vec->prio < active_prio) {
151 active_prio = vec->prio;
152 }
153 }
154
155 s->vectpending = pend_irq;
156 s->exception_prio = active_prio & nvic_gprio_mask(s);
157
158 trace_nvic_recompute_state(s->vectpending, s->exception_prio);
159}
160
161/* Return the current execution priority of the CPU
162 * (equivalent to the pseudocode ExecutionPriority function).
163 * This is a value between -2 (NMI priority) and NVIC_NOEXC_PRIO.
164 */
165static inline int nvic_exec_prio(NVICState *s)
166{
167 CPUARMState *env = &s->cpu->env;
168 int running;
169
42a6686b 170 if (env->v7m.faultmask[env->v7m.secure]) {
da6d674e 171 running = -1;
6d804834 172 } else if (env->v7m.primask[env->v7m.secure]) {
da6d674e 173 running = 0;
acf94941
PM
174 } else if (env->v7m.basepri[env->v7m.secure] > 0) {
175 running = env->v7m.basepri[env->v7m.secure] & nvic_gprio_mask(s);
da6d674e
MD
176 } else {
177 running = NVIC_NOEXC_PRIO; /* lower than any possible priority */
178 }
179 /* consider priority of active handler */
180 return MIN(running, s->exception_prio);
181}
182
7ecdaa4a
PM
183bool armv7m_nvic_can_take_pending_exception(void *opaque)
184{
185 NVICState *s = opaque;
186
187 return nvic_exec_prio(s) > nvic_pending_prio(s);
188}
189
42a6686b
PM
190int armv7m_nvic_raw_execution_priority(void *opaque)
191{
192 NVICState *s = opaque;
193
194 return s->exception_prio;
195}
196
da6d674e
MD
197/* caller must call nvic_irq_update() after this */
198static void set_prio(NVICState *s, unsigned irq, uint8_t prio)
199{
200 assert(irq > ARMV7M_EXCP_NMI); /* only use for configurable prios */
201 assert(irq < s->num_irq);
202
203 s->vectors[irq].prio = prio;
204
205 trace_nvic_set_prio(irq, prio);
206}
207
208/* Recompute state and assert irq line accordingly.
209 * Must be called after changes to:
210 * vec->active, vec->enabled, vec->pending or vec->prio for any vector
211 * prigroup
212 */
213static void nvic_irq_update(NVICState *s)
214{
215 int lvl;
216 int pend_prio;
217
218 nvic_recompute_state(s);
219 pend_prio = nvic_pending_prio(s);
220
221 /* Raise NVIC output if this IRQ would be taken, except that we
222 * ignore the effects of the BASEPRI, FAULTMASK and PRIMASK (which
223 * will be checked for in arm_v7m_cpu_exec_interrupt()); changes
224 * to those CPU registers don't cause us to recalculate the NVIC
225 * pending info.
226 */
227 lvl = (pend_prio < s->exception_prio);
228 trace_nvic_irq_update(s->vectpending, pend_prio, s->exception_prio, lvl);
229 qemu_set_irq(s->excpout, lvl);
230}
231
232static void armv7m_nvic_clear_pending(void *opaque, int irq)
233{
234 NVICState *s = (NVICState *)opaque;
235 VecInfo *vec;
236
237 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
238
239 vec = &s->vectors[irq];
240 trace_nvic_clear_pending(irq, vec->enabled, vec->prio);
241 if (vec->pending) {
242 vec->pending = 0;
243 nvic_irq_update(s);
244 }
245}
246
9ee6e8bb
PB
247void armv7m_nvic_set_pending(void *opaque, int irq)
248{
f797c075 249 NVICState *s = (NVICState *)opaque;
da6d674e
MD
250 VecInfo *vec;
251
252 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
253
254 vec = &s->vectors[irq];
255 trace_nvic_set_pending(irq, vec->enabled, vec->prio);
a73c98e1
MD
256
257
258 if (irq >= ARMV7M_EXCP_HARD && irq < ARMV7M_EXCP_PENDSV) {
259 /* If a synchronous exception is pending then it may be
260 * escalated to HardFault if:
261 * * it is equal or lower priority to current execution
262 * * it is disabled
263 * (ie we need to take it immediately but we can't do so).
264 * Asynchronous exceptions (and interrupts) simply remain pending.
265 *
266 * For QEMU, we don't have any imprecise (asynchronous) faults,
267 * so we can assume that PREFETCH_ABORT and DATA_ABORT are always
268 * synchronous.
269 * Debug exceptions are awkward because only Debug exceptions
270 * resulting from the BKPT instruction should be escalated,
271 * but we don't currently implement any Debug exceptions other
272 * than those that result from BKPT, so we treat all debug exceptions
273 * as needing escalation.
274 *
275 * This all means we can identify whether to escalate based only on
276 * the exception number and don't (yet) need the caller to explicitly
277 * tell us whether this exception is synchronous or not.
278 */
279 int running = nvic_exec_prio(s);
280 bool escalate = false;
281
282 if (vec->prio >= running) {
283 trace_nvic_escalate_prio(irq, vec->prio, running);
284 escalate = true;
285 } else if (!vec->enabled) {
286 trace_nvic_escalate_disabled(irq);
287 escalate = true;
288 }
289
290 if (escalate) {
291 if (running < 0) {
292 /* We want to escalate to HardFault but we can't take a
293 * synchronous HardFault at this point either. This is a
294 * Lockup condition due to a guest bug. We don't model
295 * Lockup, so report via cpu_abort() instead.
296 */
297 cpu_abort(&s->cpu->parent_obj,
298 "Lockup: can't escalate %d to HardFault "
299 "(current priority %d)\n", irq, running);
300 }
301
302 /* We can do the escalation, so we take HardFault instead */
303 irq = ARMV7M_EXCP_HARD;
304 vec = &s->vectors[irq];
305 s->cpu->env.v7m.hfsr |= R_V7M_HFSR_FORCED_MASK;
306 }
307 }
308
da6d674e
MD
309 if (!vec->pending) {
310 vec->pending = 1;
311 nvic_irq_update(s);
312 }
9ee6e8bb
PB
313}
314
315/* Make pending IRQ active. */
a5d82355 316void armv7m_nvic_acknowledge_irq(void *opaque)
9ee6e8bb 317{
f797c075 318 NVICState *s = (NVICState *)opaque;
da6d674e
MD
319 CPUARMState *env = &s->cpu->env;
320 const int pending = s->vectpending;
321 const int running = nvic_exec_prio(s);
322 int pendgroupprio;
323 VecInfo *vec;
324
325 assert(pending > ARMV7M_EXCP_RESET && pending < s->num_irq);
326
327 vec = &s->vectors[pending];
328
329 assert(vec->enabled);
330 assert(vec->pending);
331
332 pendgroupprio = vec->prio & nvic_gprio_mask(s);
333 assert(pendgroupprio < running);
334
335 trace_nvic_acknowledge_irq(pending, vec->prio);
336
337 vec->active = 1;
338 vec->pending = 0;
339
340 env->v7m.exception = s->vectpending;
341
342 nvic_irq_update(s);
9ee6e8bb
PB
343}
344
aa488fe3 345int armv7m_nvic_complete_irq(void *opaque, int irq)
9ee6e8bb 346{
f797c075 347 NVICState *s = (NVICState *)opaque;
da6d674e 348 VecInfo *vec;
aa488fe3 349 int ret;
da6d674e
MD
350
351 assert(irq > ARMV7M_EXCP_RESET && irq < s->num_irq);
352
353 vec = &s->vectors[irq];
354
355 trace_nvic_complete_irq(irq);
356
aa488fe3
PM
357 if (!vec->active) {
358 /* Tell the caller this was an illegal exception return */
359 return -1;
360 }
361
362 ret = nvic_rettobase(s);
363
da6d674e
MD
364 vec->active = 0;
365 if (vec->level) {
366 /* Re-pend the exception if it's still held high; only
367 * happens for extenal IRQs
368 */
369 assert(irq >= NVIC_FIRST_IRQ);
370 vec->pending = 1;
371 }
372
373 nvic_irq_update(s);
aa488fe3
PM
374
375 return ret;
da6d674e
MD
376}
377
378/* callback when external interrupt line is changed */
379static void set_irq_level(void *opaque, int n, int level)
380{
381 NVICState *s = opaque;
382 VecInfo *vec;
383
384 n += NVIC_FIRST_IRQ;
385
386 assert(n >= NVIC_FIRST_IRQ && n < s->num_irq);
387
388 trace_nvic_set_irq_level(n, level);
389
390 /* The pending status of an external interrupt is
391 * latched on rising edge and exception handler return.
392 *
393 * Pulsing the IRQ will always run the handler
394 * once, and the handler will re-run until the
395 * level is low when the handler completes.
396 */
397 vec = &s->vectors[n];
398 if (level != vec->level) {
399 vec->level = level;
400 if (level) {
401 armv7m_nvic_set_pending(s, n);
402 }
403 }
9ee6e8bb
PB
404}
405
f797c075 406static uint32_t nvic_readl(NVICState *s, uint32_t offset)
9ee6e8bb 407{
d713ea6c 408 ARMCPU *cpu = s->cpu;
9ee6e8bb 409 uint32_t val;
9ee6e8bb
PB
410
411 switch (offset) {
412 case 4: /* Interrupt Control Type. */
da6d674e 413 return ((s->num_irq - NVIC_FIRST_IRQ) / 32) - 1;
9ee6e8bb 414 case 0xd00: /* CPUID Base. */
e3da9921 415 return cpu->midr;
e03ba136 416 case 0xd04: /* Interrupt Control State. */
9ee6e8bb 417 /* VECTACTIVE */
b06c262b 418 val = cpu->env.v7m.exception;
9ee6e8bb 419 /* VECTPENDING */
da6d674e
MD
420 val |= (s->vectpending & 0xff) << 12;
421 /* ISRPENDING - set if any external IRQ is pending */
422 if (nvic_isrpending(s)) {
423 val |= (1 << 22);
424 }
425 /* RETTOBASE - set if only one handler is active */
426 if (nvic_rettobase(s)) {
427 val |= (1 << 11);
9ee6e8bb
PB
428 }
429 /* PENDSTSET */
da6d674e 430 if (s->vectors[ARMV7M_EXCP_SYSTICK].pending) {
9ee6e8bb 431 val |= (1 << 26);
da6d674e 432 }
9ee6e8bb 433 /* PENDSVSET */
da6d674e 434 if (s->vectors[ARMV7M_EXCP_PENDSV].pending) {
9ee6e8bb 435 val |= (1 << 28);
da6d674e 436 }
9ee6e8bb 437 /* NMIPENDSET */
da6d674e 438 if (s->vectors[ARMV7M_EXCP_NMI].pending) {
9ee6e8bb 439 val |= (1 << 31);
da6d674e
MD
440 }
441 /* ISRPREEMPT not implemented */
9ee6e8bb
PB
442 return val;
443 case 0xd08: /* Vector Table Offset. */
4917cf44 444 return cpu->env.v7m.vecbase;
9ee6e8bb 445 case 0xd0c: /* Application Interrupt/Reset Control. */
1004102a 446 return 0xfa050000 | (s->prigroup << 8);
9ee6e8bb
PB
447 case 0xd10: /* System Control. */
448 /* TODO: Implement SLEEPONEXIT. */
449 return 0;
450 case 0xd14: /* Configuration Control. */
e6b33209 451 return cpu->env.v7m.ccr;
9ee6e8bb
PB
452 case 0xd24: /* System Handler Status. */
453 val = 0;
da6d674e
MD
454 if (s->vectors[ARMV7M_EXCP_MEM].active) {
455 val |= (1 << 0);
456 }
457 if (s->vectors[ARMV7M_EXCP_BUS].active) {
458 val |= (1 << 1);
459 }
460 if (s->vectors[ARMV7M_EXCP_USAGE].active) {
461 val |= (1 << 3);
462 }
463 if (s->vectors[ARMV7M_EXCP_SVC].active) {
464 val |= (1 << 7);
465 }
466 if (s->vectors[ARMV7M_EXCP_DEBUG].active) {
467 val |= (1 << 8);
468 }
469 if (s->vectors[ARMV7M_EXCP_PENDSV].active) {
470 val |= (1 << 10);
471 }
472 if (s->vectors[ARMV7M_EXCP_SYSTICK].active) {
473 val |= (1 << 11);
474 }
475 if (s->vectors[ARMV7M_EXCP_USAGE].pending) {
476 val |= (1 << 12);
477 }
478 if (s->vectors[ARMV7M_EXCP_MEM].pending) {
479 val |= (1 << 13);
480 }
481 if (s->vectors[ARMV7M_EXCP_BUS].pending) {
482 val |= (1 << 14);
483 }
484 if (s->vectors[ARMV7M_EXCP_SVC].pending) {
485 val |= (1 << 15);
486 }
487 if (s->vectors[ARMV7M_EXCP_MEM].enabled) {
488 val |= (1 << 16);
489 }
490 if (s->vectors[ARMV7M_EXCP_BUS].enabled) {
491 val |= (1 << 17);
492 }
493 if (s->vectors[ARMV7M_EXCP_USAGE].enabled) {
494 val |= (1 << 18);
495 }
9ee6e8bb
PB
496 return val;
497 case 0xd28: /* Configurable Fault Status. */
e6b33209 498 return cpu->env.v7m.cfsr;
9ee6e8bb 499 case 0xd2c: /* Hard Fault Status. */
e6b33209 500 return cpu->env.v7m.hfsr;
9ee6e8bb 501 case 0xd30: /* Debug Fault Status. */
e6b33209
MD
502 return cpu->env.v7m.dfsr;
503 case 0xd34: /* MMFAR MemManage Fault Address */
504 return cpu->env.v7m.mmfar;
9ee6e8bb 505 case 0xd38: /* Bus Fault Address. */
e6b33209 506 return cpu->env.v7m.bfar;
9ee6e8bb
PB
507 case 0xd3c: /* Aux Fault Status. */
508 /* TODO: Implement fault status registers. */
e6b33209
MD
509 qemu_log_mask(LOG_UNIMP,
510 "Aux Fault status registers unimplemented\n");
e72e3ffc 511 return 0;
9ee6e8bb
PB
512 case 0xd40: /* PFR0. */
513 return 0x00000030;
514 case 0xd44: /* PRF1. */
515 return 0x00000200;
516 case 0xd48: /* DFR0. */
517 return 0x00100000;
518 case 0xd4c: /* AFR0. */
519 return 0x00000000;
520 case 0xd50: /* MMFR0. */
521 return 0x00000030;
522 case 0xd54: /* MMFR1. */
523 return 0x00000000;
524 case 0xd58: /* MMFR2. */
525 return 0x00000000;
526 case 0xd5c: /* MMFR3. */
527 return 0x00000000;
528 case 0xd60: /* ISAR0. */
529 return 0x01141110;
530 case 0xd64: /* ISAR1. */
531 return 0x02111000;
532 case 0xd68: /* ISAR2. */
533 return 0x21112231;
534 case 0xd6c: /* ISAR3. */
535 return 0x01111110;
536 case 0xd70: /* ISAR4. */
537 return 0x01310102;
538 /* TODO: Implement debug registers. */
29c483a5
MD
539 case 0xd90: /* MPU_TYPE */
540 /* Unified MPU; if the MPU is not present this value is zero */
541 return cpu->pmsav7_dregion << 8;
542 break;
543 case 0xd94: /* MPU_CTRL */
544 return cpu->env.v7m.mpu_ctrl;
545 case 0xd98: /* MPU_RNR */
8531eb4f 546 return cpu->env.pmsav7.rnr;
29c483a5
MD
547 case 0xd9c: /* MPU_RBAR */
548 case 0xda4: /* MPU_RBAR_A1 */
549 case 0xdac: /* MPU_RBAR_A2 */
550 case 0xdb4: /* MPU_RBAR_A3 */
551 {
8531eb4f 552 int region = cpu->env.pmsav7.rnr;
29c483a5 553
0e1a46bb
PM
554 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
555 /* PMSAv8M handling of the aliases is different from v7M:
556 * aliases A1, A2, A3 override the low two bits of the region
557 * number in MPU_RNR, and there is no 'region' field in the
558 * RBAR register.
559 */
560 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
561 if (aliasno) {
562 region = deposit32(region, 0, 2, aliasno);
563 }
564 if (region >= cpu->pmsav7_dregion) {
565 return 0;
566 }
567 return cpu->env.pmsav8.rbar[region];
568 }
569
29c483a5
MD
570 if (region >= cpu->pmsav7_dregion) {
571 return 0;
572 }
573 return (cpu->env.pmsav7.drbar[region] & 0x1f) | (region & 0xf);
574 }
0e1a46bb
PM
575 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
576 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
577 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
578 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
29c483a5 579 {
8531eb4f 580 int region = cpu->env.pmsav7.rnr;
29c483a5 581
0e1a46bb
PM
582 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
583 /* PMSAv8M handling of the aliases is different from v7M:
584 * aliases A1, A2, A3 override the low two bits of the region
585 * number in MPU_RNR.
586 */
587 int aliasno = (offset - 0xda0) / 8; /* 0..3 */
588 if (aliasno) {
589 region = deposit32(region, 0, 2, aliasno);
590 }
591 if (region >= cpu->pmsav7_dregion) {
592 return 0;
593 }
594 return cpu->env.pmsav8.rlar[region];
595 }
596
29c483a5
MD
597 if (region >= cpu->pmsav7_dregion) {
598 return 0;
599 }
600 return ((cpu->env.pmsav7.dracr[region] & 0xffff) << 16) |
601 (cpu->env.pmsav7.drsr[region] & 0xffff);
602 }
0e1a46bb
PM
603 case 0xdc0: /* MPU_MAIR0 */
604 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
605 goto bad_offset;
606 }
607 return cpu->env.pmsav8.mair0;
608 case 0xdc4: /* MPU_MAIR1 */
609 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
610 goto bad_offset;
611 }
612 return cpu->env.pmsav8.mair1;
9ee6e8bb 613 default:
0e1a46bb 614 bad_offset:
e72e3ffc
PM
615 qemu_log_mask(LOG_GUEST_ERROR, "NVIC: Bad read offset 0x%x\n", offset);
616 return 0;
9ee6e8bb
PB
617 }
618}
619
f797c075 620static void nvic_writel(NVICState *s, uint32_t offset, uint32_t value)
9ee6e8bb 621{
d713ea6c 622 ARMCPU *cpu = s->cpu;
ff68dacb 623
9ee6e8bb 624 switch (offset) {
9ee6e8bb
PB
625 case 0xd04: /* Interrupt Control State. */
626 if (value & (1 << 31)) {
627 armv7m_nvic_set_pending(s, ARMV7M_EXCP_NMI);
628 }
629 if (value & (1 << 28)) {
630 armv7m_nvic_set_pending(s, ARMV7M_EXCP_PENDSV);
631 } else if (value & (1 << 27)) {
da6d674e 632 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_PENDSV);
9ee6e8bb
PB
633 }
634 if (value & (1 << 26)) {
635 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK);
636 } else if (value & (1 << 25)) {
da6d674e 637 armv7m_nvic_clear_pending(s, ARMV7M_EXCP_SYSTICK);
9ee6e8bb
PB
638 }
639 break;
640 case 0xd08: /* Vector Table Offset. */
4917cf44 641 cpu->env.v7m.vecbase = value & 0xffffff80;
9ee6e8bb
PB
642 break;
643 case 0xd0c: /* Application Interrupt/Reset Control. */
644 if ((value >> 16) == 0x05fa) {
e192becd
MD
645 if (value & 4) {
646 qemu_irq_pulse(s->sysresetreq);
647 }
9ee6e8bb 648 if (value & 2) {
14790f73
MD
649 qemu_log_mask(LOG_GUEST_ERROR,
650 "Setting VECTCLRACTIVE when not in DEBUG mode "
651 "is UNPREDICTABLE\n");
9ee6e8bb 652 }
e192becd 653 if (value & 1) {
14790f73
MD
654 qemu_log_mask(LOG_GUEST_ERROR,
655 "Setting VECTRESET when not in DEBUG mode "
656 "is UNPREDICTABLE\n");
9ee6e8bb 657 }
1004102a 658 s->prigroup = extract32(value, 8, 3);
da6d674e 659 nvic_irq_update(s);
9ee6e8bb
PB
660 }
661 break;
662 case 0xd10: /* System Control. */
9ee6e8bb 663 /* TODO: Implement control registers. */
e6b33209
MD
664 qemu_log_mask(LOG_UNIMP, "NVIC: SCR unimplemented\n");
665 break;
666 case 0xd14: /* Configuration Control. */
667 /* Enforce RAZ/WI on reserved and must-RAZ/WI bits */
668 value &= (R_V7M_CCR_STKALIGN_MASK |
669 R_V7M_CCR_BFHFNMIGN_MASK |
670 R_V7M_CCR_DIV_0_TRP_MASK |
671 R_V7M_CCR_UNALIGN_TRP_MASK |
672 R_V7M_CCR_USERSETMPEND_MASK |
673 R_V7M_CCR_NONBASETHRDENA_MASK);
674
675 cpu->env.v7m.ccr = value;
e72e3ffc 676 break;
9ee6e8bb 677 case 0xd24: /* System Handler Control. */
5db53e35
PM
678 s->vectors[ARMV7M_EXCP_MEM].active = (value & (1 << 0)) != 0;
679 s->vectors[ARMV7M_EXCP_BUS].active = (value & (1 << 1)) != 0;
680 s->vectors[ARMV7M_EXCP_USAGE].active = (value & (1 << 3)) != 0;
681 s->vectors[ARMV7M_EXCP_SVC].active = (value & (1 << 7)) != 0;
682 s->vectors[ARMV7M_EXCP_DEBUG].active = (value & (1 << 8)) != 0;
683 s->vectors[ARMV7M_EXCP_PENDSV].active = (value & (1 << 10)) != 0;
684 s->vectors[ARMV7M_EXCP_SYSTICK].active = (value & (1 << 11)) != 0;
685 s->vectors[ARMV7M_EXCP_USAGE].pending = (value & (1 << 12)) != 0;
686 s->vectors[ARMV7M_EXCP_MEM].pending = (value & (1 << 13)) != 0;
687 s->vectors[ARMV7M_EXCP_BUS].pending = (value & (1 << 14)) != 0;
688 s->vectors[ARMV7M_EXCP_SVC].pending = (value & (1 << 15)) != 0;
da6d674e
MD
689 s->vectors[ARMV7M_EXCP_MEM].enabled = (value & (1 << 16)) != 0;
690 s->vectors[ARMV7M_EXCP_BUS].enabled = (value & (1 << 17)) != 0;
691 s->vectors[ARMV7M_EXCP_USAGE].enabled = (value & (1 << 18)) != 0;
692 nvic_irq_update(s);
9ee6e8bb
PB
693 break;
694 case 0xd28: /* Configurable Fault Status. */
e6b33209
MD
695 cpu->env.v7m.cfsr &= ~value; /* W1C */
696 break;
9ee6e8bb 697 case 0xd2c: /* Hard Fault Status. */
e6b33209
MD
698 cpu->env.v7m.hfsr &= ~value; /* W1C */
699 break;
9ee6e8bb 700 case 0xd30: /* Debug Fault Status. */
e6b33209
MD
701 cpu->env.v7m.dfsr &= ~value; /* W1C */
702 break;
9ee6e8bb 703 case 0xd34: /* Mem Manage Address. */
e6b33209
MD
704 cpu->env.v7m.mmfar = value;
705 return;
9ee6e8bb 706 case 0xd38: /* Bus Fault Address. */
e6b33209
MD
707 cpu->env.v7m.bfar = value;
708 return;
9ee6e8bb 709 case 0xd3c: /* Aux Fault Status. */
e72e3ffc 710 qemu_log_mask(LOG_UNIMP,
e6b33209 711 "NVIC: Aux fault status registers unimplemented\n");
e72e3ffc 712 break;
29c483a5
MD
713 case 0xd90: /* MPU_TYPE */
714 return; /* RO */
715 case 0xd94: /* MPU_CTRL */
716 if ((value &
717 (R_V7M_MPU_CTRL_HFNMIENA_MASK | R_V7M_MPU_CTRL_ENABLE_MASK))
718 == R_V7M_MPU_CTRL_HFNMIENA_MASK) {
719 qemu_log_mask(LOG_GUEST_ERROR, "MPU_CTRL: HFNMIENA and !ENABLE is "
720 "UNPREDICTABLE\n");
721 }
722 cpu->env.v7m.mpu_ctrl = value & (R_V7M_MPU_CTRL_ENABLE_MASK |
723 R_V7M_MPU_CTRL_HFNMIENA_MASK |
724 R_V7M_MPU_CTRL_PRIVDEFENA_MASK);
725 tlb_flush(CPU(cpu));
726 break;
727 case 0xd98: /* MPU_RNR */
728 if (value >= cpu->pmsav7_dregion) {
729 qemu_log_mask(LOG_GUEST_ERROR, "MPU region out of range %"
730 PRIu32 "/%" PRIu32 "\n",
731 value, cpu->pmsav7_dregion);
732 } else {
8531eb4f 733 cpu->env.pmsav7.rnr = value;
29c483a5
MD
734 }
735 break;
736 case 0xd9c: /* MPU_RBAR */
737 case 0xda4: /* MPU_RBAR_A1 */
738 case 0xdac: /* MPU_RBAR_A2 */
739 case 0xdb4: /* MPU_RBAR_A3 */
740 {
741 int region;
742
0e1a46bb
PM
743 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
744 /* PMSAv8M handling of the aliases is different from v7M:
745 * aliases A1, A2, A3 override the low two bits of the region
746 * number in MPU_RNR, and there is no 'region' field in the
747 * RBAR register.
748 */
749 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
750
751 region = cpu->env.pmsav7.rnr;
752 if (aliasno) {
753 region = deposit32(region, 0, 2, aliasno);
754 }
755 if (region >= cpu->pmsav7_dregion) {
756 return;
757 }
758 cpu->env.pmsav8.rbar[region] = value;
759 tlb_flush(CPU(cpu));
760 return;
761 }
762
29c483a5
MD
763 if (value & (1 << 4)) {
764 /* VALID bit means use the region number specified in this
765 * value and also update MPU_RNR.REGION with that value.
766 */
767 region = extract32(value, 0, 4);
768 if (region >= cpu->pmsav7_dregion) {
769 qemu_log_mask(LOG_GUEST_ERROR,
770 "MPU region out of range %u/%" PRIu32 "\n",
771 region, cpu->pmsav7_dregion);
772 return;
773 }
8531eb4f 774 cpu->env.pmsav7.rnr = region;
29c483a5 775 } else {
8531eb4f 776 region = cpu->env.pmsav7.rnr;
29c483a5
MD
777 }
778
779 if (region >= cpu->pmsav7_dregion) {
780 return;
781 }
782
783 cpu->env.pmsav7.drbar[region] = value & ~0x1f;
784 tlb_flush(CPU(cpu));
785 break;
786 }
0e1a46bb
PM
787 case 0xda0: /* MPU_RASR (v7M), MPU_RLAR (v8M) */
788 case 0xda8: /* MPU_RASR_A1 (v7M), MPU_RLAR_A1 (v8M) */
789 case 0xdb0: /* MPU_RASR_A2 (v7M), MPU_RLAR_A2 (v8M) */
790 case 0xdb8: /* MPU_RASR_A3 (v7M), MPU_RLAR_A3 (v8M) */
29c483a5 791 {
8531eb4f 792 int region = cpu->env.pmsav7.rnr;
29c483a5 793
0e1a46bb
PM
794 if (arm_feature(&cpu->env, ARM_FEATURE_V8)) {
795 /* PMSAv8M handling of the aliases is different from v7M:
796 * aliases A1, A2, A3 override the low two bits of the region
797 * number in MPU_RNR.
798 */
799 int aliasno = (offset - 0xd9c) / 8; /* 0..3 */
800
801 region = cpu->env.pmsav7.rnr;
802 if (aliasno) {
803 region = deposit32(region, 0, 2, aliasno);
804 }
805 if (region >= cpu->pmsav7_dregion) {
806 return;
807 }
808 cpu->env.pmsav8.rlar[region] = value;
809 tlb_flush(CPU(cpu));
810 return;
811 }
812
29c483a5
MD
813 if (region >= cpu->pmsav7_dregion) {
814 return;
815 }
816
817 cpu->env.pmsav7.drsr[region] = value & 0xff3f;
818 cpu->env.pmsav7.dracr[region] = (value >> 16) & 0x173f;
819 tlb_flush(CPU(cpu));
820 break;
821 }
0e1a46bb
PM
822 case 0xdc0: /* MPU_MAIR0 */
823 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
824 goto bad_offset;
825 }
826 if (cpu->pmsav7_dregion) {
827 /* Register is RES0 if no MPU regions are implemented */
828 cpu->env.pmsav8.mair0 = value;
829 }
830 /* We don't need to do anything else because memory attributes
831 * only affect cacheability, and we don't implement caching.
832 */
833 break;
834 case 0xdc4: /* MPU_MAIR1 */
835 if (!arm_feature(&cpu->env, ARM_FEATURE_V8)) {
836 goto bad_offset;
837 }
838 if (cpu->pmsav7_dregion) {
839 /* Register is RES0 if no MPU regions are implemented */
840 cpu->env.pmsav8.mair1 = value;
841 }
842 /* We don't need to do anything else because memory attributes
843 * only affect cacheability, and we don't implement caching.
844 */
845 break;
2a29ddee 846 case 0xf00: /* Software Triggered Interrupt Register */
da6d674e 847 {
da6d674e 848 int excnum = (value & 0x1ff) + NVIC_FIRST_IRQ;
eb578a23 849 if (excnum < s->num_irq) {
da6d674e 850 armv7m_nvic_set_pending(s, excnum);
2a29ddee
PM
851 }
852 break;
da6d674e 853 }
9ee6e8bb 854 default:
0e1a46bb 855 bad_offset:
e72e3ffc
PM
856 qemu_log_mask(LOG_GUEST_ERROR,
857 "NVIC: Bad write offset 0x%x\n", offset);
9ee6e8bb
PB
858 }
859}
860
eb578a23
PM
861static bool nvic_user_access_ok(NVICState *s, hwaddr offset)
862{
863 /* Return true if unprivileged access to this register is permitted. */
864 switch (offset) {
865 case 0xf00: /* STIR: accessible only if CCR.USERSETMPEND permits */
866 return s->cpu->env.v7m.ccr & R_V7M_CCR_USERSETMPEND_MASK;
867 default:
868 /* All other user accesses cause a BusFault unconditionally */
869 return false;
870 }
871}
872
873static MemTxResult nvic_sysreg_read(void *opaque, hwaddr addr,
874 uint64_t *data, unsigned size,
875 MemTxAttrs attrs)
2a29ddee 876{
f797c075 877 NVICState *s = (NVICState *)opaque;
2a29ddee 878 uint32_t offset = addr;
da6d674e 879 unsigned i, startvec, end;
0e8153dd
AB
880 uint32_t val;
881
eb578a23
PM
882 if (attrs.user && !nvic_user_access_ok(s, addr)) {
883 /* Generate BusFault for unprivileged accesses */
884 return MEMTX_ERROR;
885 }
886
0e8153dd 887 switch (offset) {
da6d674e
MD
888 /* reads of set and clear both return the status */
889 case 0x100 ... 0x13f: /* NVIC Set enable */
890 offset += 0x80;
891 /* fall through */
892 case 0x180 ... 0x1bf: /* NVIC Clear enable */
893 val = 0;
894 startvec = offset - 0x180 + NVIC_FIRST_IRQ; /* vector # */
895
896 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
897 if (s->vectors[startvec + i].enabled) {
898 val |= (1 << i);
899 }
900 }
901 break;
902 case 0x200 ... 0x23f: /* NVIC Set pend */
903 offset += 0x80;
904 /* fall through */
905 case 0x280 ... 0x2bf: /* NVIC Clear pend */
906 val = 0;
907 startvec = offset - 0x280 + NVIC_FIRST_IRQ; /* vector # */
908 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
909 if (s->vectors[startvec + i].pending) {
910 val |= (1 << i);
911 }
912 }
913 break;
914 case 0x300 ... 0x33f: /* NVIC Active */
915 val = 0;
916 startvec = offset - 0x300 + NVIC_FIRST_IRQ; /* vector # */
917
918 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
919 if (s->vectors[startvec + i].active) {
920 val |= (1 << i);
921 }
922 }
923 break;
924 case 0x400 ... 0x5ef: /* NVIC Priority */
925 val = 0;
926 startvec = offset - 0x400 + NVIC_FIRST_IRQ; /* vector # */
927
928 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
929 val |= s->vectors[startvec + i].prio << (8 * i);
930 }
931 break;
0e8153dd
AB
932 case 0xd18 ... 0xd23: /* System Handler Priority. */
933 val = 0;
934 for (i = 0; i < size; i++) {
da6d674e 935 val |= s->vectors[(offset - 0xd14) + i].prio << (i * 8);
0e8153dd 936 }
da6d674e 937 break;
0e8153dd 938 case 0xfe0 ... 0xfff: /* ID. */
2a29ddee 939 if (offset & 3) {
da6d674e
MD
940 val = 0;
941 } else {
942 val = nvic_id[(offset - 0xfe0) >> 2];
943 }
944 break;
945 default:
946 if (size == 4) {
947 val = nvic_readl(s, offset);
948 } else {
949 qemu_log_mask(LOG_GUEST_ERROR,
950 "NVIC: Bad read of size %d at offset 0x%x\n",
951 size, offset);
952 val = 0;
2a29ddee 953 }
2a29ddee 954 }
da6d674e
MD
955
956 trace_nvic_sysreg_read(addr, val, size);
eb578a23
PM
957 *data = val;
958 return MEMTX_OK;
2a29ddee
PM
959}
960
eb578a23
PM
961static MemTxResult nvic_sysreg_write(void *opaque, hwaddr addr,
962 uint64_t value, unsigned size,
963 MemTxAttrs attrs)
2a29ddee 964{
f797c075 965 NVICState *s = (NVICState *)opaque;
2a29ddee 966 uint32_t offset = addr;
da6d674e
MD
967 unsigned i, startvec, end;
968 unsigned setval = 0;
969
970 trace_nvic_sysreg_write(addr, value, size);
0e8153dd 971
eb578a23
PM
972 if (attrs.user && !nvic_user_access_ok(s, addr)) {
973 /* Generate BusFault for unprivileged accesses */
974 return MEMTX_ERROR;
975 }
976
0e8153dd 977 switch (offset) {
da6d674e
MD
978 case 0x100 ... 0x13f: /* NVIC Set enable */
979 offset += 0x80;
980 setval = 1;
981 /* fall through */
982 case 0x180 ... 0x1bf: /* NVIC Clear enable */
983 startvec = 8 * (offset - 0x180) + NVIC_FIRST_IRQ;
984
985 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
986 if (value & (1 << i)) {
987 s->vectors[startvec + i].enabled = setval;
988 }
989 }
990 nvic_irq_update(s);
eb578a23 991 return MEMTX_OK;
da6d674e
MD
992 case 0x200 ... 0x23f: /* NVIC Set pend */
993 /* the special logic in armv7m_nvic_set_pending()
994 * is not needed since IRQs are never escalated
995 */
996 offset += 0x80;
997 setval = 1;
998 /* fall through */
999 case 0x280 ... 0x2bf: /* NVIC Clear pend */
1000 startvec = 8 * (offset - 0x280) + NVIC_FIRST_IRQ; /* vector # */
1001
1002 for (i = 0, end = size * 8; i < end && startvec + i < s->num_irq; i++) {
1003 if (value & (1 << i)) {
1004 s->vectors[startvec + i].pending = setval;
1005 }
1006 }
1007 nvic_irq_update(s);
eb578a23 1008 return MEMTX_OK;
da6d674e 1009 case 0x300 ... 0x33f: /* NVIC Active */
eb578a23 1010 return MEMTX_OK; /* R/O */
da6d674e
MD
1011 case 0x400 ... 0x5ef: /* NVIC Priority */
1012 startvec = 8 * (offset - 0x400) + NVIC_FIRST_IRQ; /* vector # */
1013
1014 for (i = 0; i < size && startvec + i < s->num_irq; i++) {
1015 set_prio(s, startvec + i, (value >> (i * 8)) & 0xff);
1016 }
1017 nvic_irq_update(s);
eb578a23 1018 return MEMTX_OK;
0e8153dd
AB
1019 case 0xd18 ... 0xd23: /* System Handler Priority. */
1020 for (i = 0; i < size; i++) {
da6d674e
MD
1021 unsigned hdlidx = (offset - 0xd14) + i;
1022 set_prio(s, hdlidx, (value >> (i * 8)) & 0xff);
0e8153dd 1023 }
da6d674e 1024 nvic_irq_update(s);
eb578a23 1025 return MEMTX_OK;
0e8153dd 1026 }
2a29ddee 1027 if (size == 4) {
0e8153dd 1028 nvic_writel(s, offset, value);
eb578a23 1029 return MEMTX_OK;
2a29ddee 1030 }
e72e3ffc
PM
1031 qemu_log_mask(LOG_GUEST_ERROR,
1032 "NVIC: Bad write of size %d at offset 0x%x\n", size, offset);
eb578a23
PM
1033 /* This is UNPREDICTABLE; treat as RAZ/WI */
1034 return MEMTX_OK;
2a29ddee
PM
1035}
1036
1037static const MemoryRegionOps nvic_sysreg_ops = {
eb578a23
PM
1038 .read_with_attrs = nvic_sysreg_read,
1039 .write_with_attrs = nvic_sysreg_write,
2a29ddee
PM
1040 .endianness = DEVICE_NATIVE_ENDIAN,
1041};
1042
da6d674e
MD
1043static int nvic_post_load(void *opaque, int version_id)
1044{
1045 NVICState *s = opaque;
1046 unsigned i;
1047
1048 /* Check for out of range priority settings */
1049 if (s->vectors[ARMV7M_EXCP_RESET].prio != -3 ||
1050 s->vectors[ARMV7M_EXCP_NMI].prio != -2 ||
1051 s->vectors[ARMV7M_EXCP_HARD].prio != -1) {
1052 return 1;
1053 }
1054 for (i = ARMV7M_EXCP_MEM; i < s->num_irq; i++) {
1055 if (s->vectors[i].prio & ~0xff) {
1056 return 1;
1057 }
1058 }
1059
1060 nvic_recompute_state(s);
1061
1062 return 0;
1063}
1064
1065static const VMStateDescription vmstate_VecInfo = {
1066 .name = "armv7m_nvic_info",
1067 .version_id = 1,
1068 .minimum_version_id = 1,
1069 .fields = (VMStateField[]) {
1070 VMSTATE_INT16(prio, VecInfo),
1071 VMSTATE_UINT8(enabled, VecInfo),
1072 VMSTATE_UINT8(pending, VecInfo),
1073 VMSTATE_UINT8(active, VecInfo),
1074 VMSTATE_UINT8(level, VecInfo),
1075 VMSTATE_END_OF_LIST()
1076 }
1077};
1078
0797226c
JQ
1079static const VMStateDescription vmstate_nvic = {
1080 .name = "armv7m_nvic",
ff68dacb
PM
1081 .version_id = 4,
1082 .minimum_version_id = 4,
da6d674e 1083 .post_load = &nvic_post_load,
8f1e884b 1084 .fields = (VMStateField[]) {
da6d674e
MD
1085 VMSTATE_STRUCT_ARRAY(vectors, NVICState, NVIC_MAX_VECTORS, 1,
1086 vmstate_VecInfo, VecInfo),
1004102a 1087 VMSTATE_UINT32(prigroup, NVICState),
0797226c
JQ
1088 VMSTATE_END_OF_LIST()
1089 }
1090};
23e39294 1091
da6d674e
MD
1092static Property props_nvic[] = {
1093 /* Number of external IRQ lines (so excluding the 16 internal exceptions) */
1094 DEFINE_PROP_UINT32("num-irq", NVICState, num_irq, 64),
1095 DEFINE_PROP_END_OF_LIST()
1096};
1097
aecff692
PM
1098static void armv7m_nvic_reset(DeviceState *dev)
1099{
f797c075 1100 NVICState *s = NVIC(dev);
da6d674e
MD
1101
1102 s->vectors[ARMV7M_EXCP_NMI].enabled = 1;
1103 s->vectors[ARMV7M_EXCP_HARD].enabled = 1;
1104 /* MEM, BUS, and USAGE are enabled through
1105 * the System Handler Control register
b3387ede 1106 */
da6d674e
MD
1107 s->vectors[ARMV7M_EXCP_SVC].enabled = 1;
1108 s->vectors[ARMV7M_EXCP_DEBUG].enabled = 1;
1109 s->vectors[ARMV7M_EXCP_PENDSV].enabled = 1;
1110 s->vectors[ARMV7M_EXCP_SYSTICK].enabled = 1;
1111
1112 s->vectors[ARMV7M_EXCP_RESET].prio = -3;
1113 s->vectors[ARMV7M_EXCP_NMI].prio = -2;
1114 s->vectors[ARMV7M_EXCP_HARD].prio = -1;
1115
1116 /* Strictly speaking the reset handler should be enabled.
1117 * However, we don't simulate soft resets through the NVIC,
1118 * and the reset vector should never be pended.
1119 * So we leave it disabled to catch logic errors.
1120 */
1121
1122 s->exception_prio = NVIC_NOEXC_PRIO;
1123 s->vectpending = 0;
ff68dacb 1124}
da6d674e 1125
ff68dacb
PM
1126static void nvic_systick_trigger(void *opaque, int n, int level)
1127{
1128 NVICState *s = opaque;
1129
1130 if (level) {
1131 /* SysTick just asked us to pend its exception.
1132 * (This is different from an external interrupt line's
1133 * behaviour.)
1134 */
1135 armv7m_nvic_set_pending(s, ARMV7M_EXCP_SYSTICK);
1136 }
aecff692
PM
1137}
1138
53111180 1139static void armv7m_nvic_realize(DeviceState *dev, Error **errp)
9ee6e8bb 1140{
f797c075 1141 NVICState *s = NVIC(dev);
ff68dacb
PM
1142 SysBusDevice *systick_sbd;
1143 Error *err = NULL;
9ee6e8bb 1144
d713ea6c
MD
1145 s->cpu = ARM_CPU(qemu_get_cpu(0));
1146 assert(s->cpu);
da6d674e
MD
1147
1148 if (s->num_irq > NVIC_MAX_IRQ) {
1149 error_setg(errp, "num-irq %d exceeds NVIC maximum", s->num_irq);
53111180
PM
1150 return;
1151 }
da6d674e
MD
1152
1153 qdev_init_gpio_in(dev, set_irq_level, s->num_irq);
1154
1155 /* include space for internal exception vectors */
1156 s->num_irq += NVIC_FIRST_IRQ;
1157
ff68dacb
PM
1158 object_property_set_bool(OBJECT(&s->systick), true, "realized", &err);
1159 if (err != NULL) {
1160 error_propagate(errp, err);
1161 return;
1162 }
1163 systick_sbd = SYS_BUS_DEVICE(&s->systick);
1164 sysbus_connect_irq(systick_sbd, 0,
1165 qdev_get_gpio_in_named(dev, "systick-trigger", 0));
1166
da6d674e
MD
1167 /* The NVIC and System Control Space (SCS) starts at 0xe000e000
1168 * and looks like this:
1169 * 0x004 - ICTR
ff68dacb 1170 * 0x010 - 0xff - systick
da6d674e
MD
1171 * 0x100..0x7ec - NVIC
1172 * 0x7f0..0xcff - Reserved
1173 * 0xd00..0xd3c - SCS registers
1174 * 0xd40..0xeff - Reserved or Not implemented
1175 * 0xf00 - STIR
2a29ddee 1176 */
1437c94b 1177 memory_region_init(&s->container, OBJECT(s), "nvic", 0x1000);
2a29ddee
PM
1178 /* The system register region goes at the bottom of the priority
1179 * stack as it covers the whole page.
1180 */
1437c94b 1181 memory_region_init_io(&s->sysregmem, OBJECT(s), &nvic_sysreg_ops, s,
2a29ddee
PM
1182 "nvic_sysregs", 0x1000);
1183 memory_region_add_subregion(&s->container, 0, &s->sysregmem);
ff68dacb
PM
1184 memory_region_add_subregion_overlap(&s->container, 0x10,
1185 sysbus_mmio_get_region(systick_sbd, 0),
1186 1);
da6d674e 1187
98957a94 1188 sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->container);
9ee6e8bb 1189}
fe7e8758 1190
55e00a19
PM
1191static void armv7m_nvic_instance_init(Object *obj)
1192{
1193 /* We have a different default value for the num-irq property
1194 * than our superclass. This function runs after qdev init
1195 * has set the defaults from the Property array and before
1196 * any user-specified property setting, so just modify the
fae15286 1197 * value in the GICState struct.
55e00a19 1198 */
e192becd 1199 DeviceState *dev = DEVICE(obj);
f797c075 1200 NVICState *nvic = NVIC(obj);
da6d674e
MD
1201 SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
1202
ff68dacb
PM
1203 object_initialize(&nvic->systick, sizeof(nvic->systick), TYPE_SYSTICK);
1204 qdev_set_parent_bus(DEVICE(&nvic->systick), sysbus_get_default());
1205
da6d674e 1206 sysbus_init_irq(sbd, &nvic->excpout);
e192becd 1207 qdev_init_gpio_out_named(dev, &nvic->sysresetreq, "SYSRESETREQ", 1);
ff68dacb 1208 qdev_init_gpio_in_named(dev, nvic_systick_trigger, "systick-trigger", 1);
55e00a19 1209}
39bffca2 1210
999e12bb
AL
1211static void armv7m_nvic_class_init(ObjectClass *klass, void *data)
1212{
39bffca2 1213 DeviceClass *dc = DEVICE_CLASS(klass);
999e12bb 1214
39bffca2 1215 dc->vmsd = &vmstate_nvic;
da6d674e 1216 dc->props = props_nvic;
aecff692 1217 dc->reset = armv7m_nvic_reset;
53111180 1218 dc->realize = armv7m_nvic_realize;
999e12bb
AL
1219}
1220
8c43a6f0 1221static const TypeInfo armv7m_nvic_info = {
1e8cae4d 1222 .name = TYPE_NVIC,
da6d674e 1223 .parent = TYPE_SYS_BUS_DEVICE,
55e00a19 1224 .instance_init = armv7m_nvic_instance_init,
f797c075 1225 .instance_size = sizeof(NVICState),
39bffca2 1226 .class_init = armv7m_nvic_class_init,
da6d674e 1227 .class_size = sizeof(SysBusDeviceClass),
a32134aa
ML
1228};
1229
83f7d43a 1230static void armv7m_nvic_register_types(void)
fe7e8758 1231{
39bffca2 1232 type_register_static(&armv7m_nvic_info);
fe7e8758
PB
1233}
1234
83f7d43a 1235type_init(armv7m_nvic_register_types)
This page took 0.93266 seconds and 4 git commands to generate.