]> Git Repo - qemu.git/blame - hw/intc/arm_gicv3_redist.c
hw/intc/arm_gicv3: Implement GICv3 redistributor registers
[qemu.git] / hw / intc / arm_gicv3_redist.c
CommitLineData
cec93a93
SP
1/*
2 * ARM GICv3 emulation: Redistributor
3 *
4 * Copyright (c) 2015 Huawei.
5 * Copyright (c) 2016 Linaro Limited.
6 * Written by Shlomo Pongratz, Peter Maydell
7 *
8 * This code is licensed under the GPL, version 2 or (at your option)
9 * any later version.
10 */
11
12#include "qemu/osdep.h"
13#include "trace.h"
14#include "gicv3_internal.h"
15
16static uint32_t mask_group(GICv3CPUState *cs, MemTxAttrs attrs)
17{
18 /* Return a 32-bit mask which should be applied for this set of 32
19 * interrupts; each bit is 1 if access is permitted by the
20 * combination of attrs.secure and GICR_GROUPR. (GICR_NSACR does
21 * not affect config register accesses, unlike GICD_NSACR.)
22 */
23 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
24 /* bits for Group 0 or Secure Group 1 interrupts are RAZ/WI */
25 return cs->gicr_igroupr0;
26 }
27 return 0xFFFFFFFFU;
28}
29
30static void gicr_write_set_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
31 uint32_t *reg, uint32_t val)
32{
33 /* Helper routine to implement writing to a "set-bitmap" register */
34 val &= mask_group(cs, attrs);
35 *reg |= val;
36 gicv3_redist_update(cs);
37}
38
39static void gicr_write_clear_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
40 uint32_t *reg, uint32_t val)
41{
42 /* Helper routine to implement writing to a "clear-bitmap" register */
43 val &= mask_group(cs, attrs);
44 *reg &= ~val;
45 gicv3_redist_update(cs);
46}
47
48static uint32_t gicr_read_bitmap_reg(GICv3CPUState *cs, MemTxAttrs attrs,
49 uint32_t reg)
50{
51 reg &= mask_group(cs, attrs);
52 return reg;
53}
54
55static uint8_t gicr_read_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs,
56 int irq)
57{
58 /* Read the value of GICR_IPRIORITYR<n> for the specified interrupt,
59 * honouring security state (these are RAZ/WI for Group 0 or Secure
60 * Group 1 interrupts).
61 */
62 uint32_t prio;
63
64 prio = cs->gicr_ipriorityr[irq];
65
66 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
67 if (!(cs->gicr_igroupr0 & (1U << irq))) {
68 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
69 return 0;
70 }
71 /* NS view of the interrupt priority */
72 prio = (prio << 1) & 0xff;
73 }
74 return prio;
75}
76
77static void gicr_write_ipriorityr(GICv3CPUState *cs, MemTxAttrs attrs, int irq,
78 uint8_t value)
79{
80 /* Write the value of GICD_IPRIORITYR<n> for the specified interrupt,
81 * honouring security state (these are RAZ/WI for Group 0 or Secure
82 * Group 1 interrupts).
83 */
84 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
85 if (!(cs->gicr_igroupr0 & (1U << irq))) {
86 /* Fields for Group 0 or Secure Group 1 interrupts are RAZ/WI */
87 return;
88 }
89 /* NS view of the interrupt priority */
90 value = 0x80 | (value >> 1);
91 }
92 cs->gicr_ipriorityr[irq] = value;
93}
94
95static MemTxResult gicr_readb(GICv3CPUState *cs, hwaddr offset,
96 uint64_t *data, MemTxAttrs attrs)
97{
98 switch (offset) {
99 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
100 *data = gicr_read_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR);
101 return MEMTX_OK;
102 default:
103 return MEMTX_ERROR;
104 }
105}
106
107static MemTxResult gicr_writeb(GICv3CPUState *cs, hwaddr offset,
108 uint64_t value, MemTxAttrs attrs)
109{
110 switch (offset) {
111 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
112 gicr_write_ipriorityr(cs, attrs, offset - GICR_IPRIORITYR, value);
113 gicv3_redist_update(cs);
114 return MEMTX_OK;
115 default:
116 return MEMTX_ERROR;
117 }
118}
119
120static MemTxResult gicr_readl(GICv3CPUState *cs, hwaddr offset,
121 uint64_t *data, MemTxAttrs attrs)
122{
123 switch (offset) {
124 case GICR_CTLR:
125 *data = cs->gicr_ctlr;
126 return MEMTX_OK;
127 case GICR_IIDR:
128 *data = gicv3_iidr();
129 return MEMTX_OK;
130 case GICR_TYPER:
131 *data = extract64(cs->gicr_typer, 0, 32);
132 return MEMTX_OK;
133 case GICR_TYPER + 4:
134 *data = extract64(cs->gicr_typer, 32, 32);
135 return MEMTX_OK;
136 case GICR_STATUSR:
137 /* RAZ/WI for us (this is an optional register and our implementation
138 * does not track RO/WO/reserved violations to report them to the guest)
139 */
140 *data = 0;
141 return MEMTX_OK;
142 case GICR_WAKER:
143 *data = cs->gicr_waker;
144 return MEMTX_OK;
145 case GICR_PROPBASER:
146 *data = extract64(cs->gicr_propbaser, 0, 32);
147 return MEMTX_OK;
148 case GICR_PROPBASER + 4:
149 *data = extract64(cs->gicr_propbaser, 32, 32);
150 return MEMTX_OK;
151 case GICR_PENDBASER:
152 *data = extract64(cs->gicr_pendbaser, 0, 32);
153 return MEMTX_OK;
154 case GICR_PENDBASER + 4:
155 *data = extract64(cs->gicr_pendbaser, 32, 32);
156 return MEMTX_OK;
157 case GICR_IGROUPR0:
158 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
159 *data = 0;
160 return MEMTX_OK;
161 }
162 *data = cs->gicr_igroupr0;
163 return MEMTX_OK;
164 case GICR_ISENABLER0:
165 case GICR_ICENABLER0:
166 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_ienabler0);
167 return MEMTX_OK;
168 case GICR_ISPENDR0:
169 case GICR_ICPENDR0:
170 {
171 /* The pending register reads as the logical OR of the pending
172 * latch and the input line level for level-triggered interrupts.
173 */
174 uint32_t val = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level);
175 *data = gicr_read_bitmap_reg(cs, attrs, val);
176 return MEMTX_OK;
177 }
178 case GICR_ISACTIVER0:
179 case GICR_ICACTIVER0:
180 *data = gicr_read_bitmap_reg(cs, attrs, cs->gicr_iactiver0);
181 return MEMTX_OK;
182 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
183 {
184 int i, irq = offset - GICR_IPRIORITYR;
185 uint32_t value = 0;
186
187 for (i = irq + 3; i >= irq; i--, value <<= 8) {
188 value |= gicr_read_ipriorityr(cs, attrs, i);
189 }
190 *data = value;
191 return MEMTX_OK;
192 }
193 case GICR_ICFGR0:
194 case GICR_ICFGR1:
195 {
196 /* Our edge_trigger bitmap is one bit per irq; take the correct
197 * half of it, and spread it out into the odd bits.
198 */
199 uint32_t value;
200
201 value = cs->edge_trigger & mask_group(cs, attrs);
202 value = extract32(value, (offset == GICR_ICFGR1) ? 16 : 0, 16);
203 value = half_shuffle32(value) << 1;
204 *data = value;
205 return MEMTX_OK;
206 }
207 case GICR_IGRPMODR0:
208 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
209 /* RAZ/WI if security disabled, or if
210 * security enabled and this is an NS access
211 */
212 *data = 0;
213 return MEMTX_OK;
214 }
215 *data = cs->gicr_igrpmodr0;
216 return MEMTX_OK;
217 case GICR_NSACR:
218 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
219 /* RAZ/WI if security disabled, or if
220 * security enabled and this is an NS access
221 */
222 *data = 0;
223 return MEMTX_OK;
224 }
225 *data = cs->gicr_nsacr;
226 return MEMTX_OK;
227 case GICR_IDREGS ... GICR_IDREGS + 0x1f:
228 *data = gicv3_idreg(offset - GICR_IDREGS);
229 return MEMTX_OK;
230 default:
231 return MEMTX_ERROR;
232 }
233}
234
235static MemTxResult gicr_writel(GICv3CPUState *cs, hwaddr offset,
236 uint64_t value, MemTxAttrs attrs)
237{
238 switch (offset) {
239 case GICR_CTLR:
240 /* For our implementation, GICR_TYPER.DPGS is 0 and so all
241 * the DPG bits are RAZ/WI. We don't do anything asynchronously,
242 * so UWP and RWP are RAZ/WI. And GICR_TYPER.LPIS is 0 (we don't
243 * implement LPIs) so Enable_LPIs is RES0. So there are no writable
244 * bits for us.
245 */
246 return MEMTX_OK;
247 case GICR_STATUSR:
248 /* RAZ/WI for our implementation */
249 return MEMTX_OK;
250 case GICR_WAKER:
251 /* Only the ProcessorSleep bit is writeable. When the guest sets
252 * it it requests that we transition the channel between the
253 * redistributor and the cpu interface to quiescent, and that
254 * we set the ChildrenAsleep bit once the inteface has reached the
255 * quiescent state.
256 * Setting the ProcessorSleep to 0 reverses the quiescing, and
257 * ChildrenAsleep is cleared once the transition is complete.
258 * Since our interface is not asynchronous, we complete these
259 * transitions instantaneously, so we set ChildrenAsleep to the
260 * same value as ProcessorSleep here.
261 */
262 value &= GICR_WAKER_ProcessorSleep;
263 if (value & GICR_WAKER_ProcessorSleep) {
264 value |= GICR_WAKER_ChildrenAsleep;
265 }
266 cs->gicr_waker = value;
267 return MEMTX_OK;
268 case GICR_PROPBASER:
269 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 0, 32, value);
270 return MEMTX_OK;
271 case GICR_PROPBASER + 4:
272 cs->gicr_propbaser = deposit64(cs->gicr_propbaser, 32, 32, value);
273 return MEMTX_OK;
274 case GICR_PENDBASER:
275 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 0, 32, value);
276 return MEMTX_OK;
277 case GICR_PENDBASER + 4:
278 cs->gicr_pendbaser = deposit64(cs->gicr_pendbaser, 32, 32, value);
279 return MEMTX_OK;
280 case GICR_IGROUPR0:
281 if (!attrs.secure && !(cs->gic->gicd_ctlr & GICD_CTLR_DS)) {
282 return MEMTX_OK;
283 }
284 cs->gicr_igroupr0 = value;
285 gicv3_redist_update(cs);
286 return MEMTX_OK;
287 case GICR_ISENABLER0:
288 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
289 return MEMTX_OK;
290 case GICR_ICENABLER0:
291 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ienabler0, value);
292 return MEMTX_OK;
293 case GICR_ISPENDR0:
294 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
295 return MEMTX_OK;
296 case GICR_ICPENDR0:
297 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_ipendr0, value);
298 return MEMTX_OK;
299 case GICR_ISACTIVER0:
300 gicr_write_set_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
301 return MEMTX_OK;
302 case GICR_ICACTIVER0:
303 gicr_write_clear_bitmap_reg(cs, attrs, &cs->gicr_iactiver0, value);
304 return MEMTX_OK;
305 case GICR_IPRIORITYR ... GICR_IPRIORITYR + 0x1f:
306 {
307 int i, irq = offset - GICR_IPRIORITYR;
308
309 for (i = irq; i < irq + 4; i++, value >>= 8) {
310 gicr_write_ipriorityr(cs, attrs, i, value);
311 }
312 gicv3_redist_update(cs);
313 return MEMTX_OK;
314 }
315 case GICR_ICFGR0:
316 /* Register is all RAZ/WI or RAO/WI bits */
317 return MEMTX_OK;
318 case GICR_ICFGR1:
319 {
320 uint32_t mask;
321
322 /* Since our edge_trigger bitmap is one bit per irq, our input
323 * 32-bits will compress down into 16 bits which we need
324 * to write into the bitmap.
325 */
326 value = half_unshuffle32(value >> 1) << 16;
327 mask = mask_group(cs, attrs) & 0xffff0000U;
328
329 cs->edge_trigger &= ~mask;
330 cs->edge_trigger |= (value & mask);
331
332 gicv3_redist_update(cs);
333 return MEMTX_OK;
334 }
335 case GICR_IGRPMODR0:
336 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
337 /* RAZ/WI if security disabled, or if
338 * security enabled and this is an NS access
339 */
340 return MEMTX_OK;
341 }
342 cs->gicr_igrpmodr0 = value;
343 gicv3_redist_update(cs);
344 return MEMTX_OK;
345 case GICR_NSACR:
346 if ((cs->gic->gicd_ctlr & GICD_CTLR_DS) || !attrs.secure) {
347 /* RAZ/WI if security disabled, or if
348 * security enabled and this is an NS access
349 */
350 return MEMTX_OK;
351 }
352 cs->gicr_nsacr = value;
353 /* no update required as this only affects access permission checks */
354 return MEMTX_OK;
355 case GICR_IIDR:
356 case GICR_TYPER:
357 case GICR_IDREGS ... GICR_IDREGS + 0x1f:
358 /* RO registers, ignore the write */
359 qemu_log_mask(LOG_GUEST_ERROR,
360 "%s: invalid guest write to RO register at offset "
361 TARGET_FMT_plx "\n", __func__, offset);
362 return MEMTX_OK;
363 default:
364 return MEMTX_ERROR;
365 }
366}
367
368static MemTxResult gicr_readll(GICv3CPUState *cs, hwaddr offset,
369 uint64_t *data, MemTxAttrs attrs)
370{
371 switch (offset) {
372 case GICR_TYPER:
373 *data = cs->gicr_typer;
374 return MEMTX_OK;
375 case GICR_PROPBASER:
376 *data = cs->gicr_propbaser;
377 return MEMTX_OK;
378 case GICR_PENDBASER:
379 *data = cs->gicr_pendbaser;
380 return MEMTX_OK;
381 default:
382 return MEMTX_ERROR;
383 }
384}
385
386static MemTxResult gicr_writell(GICv3CPUState *cs, hwaddr offset,
387 uint64_t value, MemTxAttrs attrs)
388{
389 switch (offset) {
390 case GICR_PROPBASER:
391 cs->gicr_propbaser = value;
392 return MEMTX_OK;
393 case GICR_PENDBASER:
394 cs->gicr_pendbaser = value;
395 return MEMTX_OK;
396 case GICR_TYPER:
397 /* RO register, ignore the write */
398 qemu_log_mask(LOG_GUEST_ERROR,
399 "%s: invalid guest write to RO register at offset "
400 TARGET_FMT_plx "\n", __func__, offset);
401 return MEMTX_OK;
402 default:
403 return MEMTX_ERROR;
404 }
405}
406
407MemTxResult gicv3_redist_read(void *opaque, hwaddr offset, uint64_t *data,
408 unsigned size, MemTxAttrs attrs)
409{
410 GICv3State *s = opaque;
411 GICv3CPUState *cs;
412 MemTxResult r;
413 int cpuidx;
414
415 /* This region covers all the redistributor pages; there are
416 * (for GICv3) two 64K pages per CPU. At the moment they are
417 * all contiguous (ie in this one region), though we might later
418 * want to allow splitting of redistributor pages into several
419 * blocks so we can support more CPUs.
420 */
421 cpuidx = offset / 0x20000;
422 offset %= 0x20000;
423 assert(cpuidx < s->num_cpu);
424
425 cs = &s->cpu[cpuidx];
426
427 switch (size) {
428 case 1:
429 r = gicr_readb(cs, offset, data, attrs);
430 break;
431 case 4:
432 r = gicr_readl(cs, offset, data, attrs);
433 break;
434 case 8:
435 r = gicr_readll(cs, offset, data, attrs);
436 break;
437 default:
438 r = MEMTX_ERROR;
439 break;
440 }
441
442 if (r == MEMTX_ERROR) {
443 qemu_log_mask(LOG_GUEST_ERROR,
444 "%s: invalid guest read at offset " TARGET_FMT_plx
445 "size %u\n", __func__, offset, size);
446 trace_gicv3_redist_badread(gicv3_redist_affid(cs), offset,
447 size, attrs.secure);
448 } else {
449 trace_gicv3_redist_read(gicv3_redist_affid(cs), offset, *data,
450 size, attrs.secure);
451 }
452 return r;
453}
454
455MemTxResult gicv3_redist_write(void *opaque, hwaddr offset, uint64_t data,
456 unsigned size, MemTxAttrs attrs)
457{
458 GICv3State *s = opaque;
459 GICv3CPUState *cs;
460 MemTxResult r;
461 int cpuidx;
462
463 /* This region covers all the redistributor pages; there are
464 * (for GICv3) two 64K pages per CPU. At the moment they are
465 * all contiguous (ie in this one region), though we might later
466 * want to allow splitting of redistributor pages into several
467 * blocks so we can support more CPUs.
468 */
469 cpuidx = offset / 0x20000;
470 offset %= 0x20000;
471 assert(cpuidx < s->num_cpu);
472
473 cs = &s->cpu[cpuidx];
474
475 switch (size) {
476 case 1:
477 r = gicr_writeb(cs, offset, data, attrs);
478 break;
479 case 4:
480 r = gicr_writel(cs, offset, data, attrs);
481 break;
482 case 8:
483 r = gicr_writell(cs, offset, data, attrs);
484 break;
485 default:
486 r = MEMTX_ERROR;
487 break;
488 }
489
490 if (r == MEMTX_ERROR) {
491 qemu_log_mask(LOG_GUEST_ERROR,
492 "%s: invalid guest write at offset " TARGET_FMT_plx
493 "size %u\n", __func__, offset, size);
494 trace_gicv3_redist_badwrite(gicv3_redist_affid(cs), offset, data,
495 size, attrs.secure);
496 } else {
497 trace_gicv3_redist_write(gicv3_redist_affid(cs), offset, data,
498 size, attrs.secure);
499 }
500 return r;
501}
This page took 0.070701 seconds and 4 git commands to generate.