]>
Commit | Line | Data |
---|---|---|
56992670 SP |
1 | /* |
2 | * ARM Generic Interrupt Controller v3 | |
3 | * | |
4 | * Copyright (c) 2015 Huawei. | |
5 | * Copyright (c) 2016 Linaro Limited | |
6 | * Written by Shlomo Pongratz, Peter Maydell | |
7 | * | |
8 | * This code is licensed under the GPL, version 2 or (at your option) | |
9 | * any later version. | |
10 | */ | |
11 | ||
12 | /* This file contains implementation code for an interrupt controller | |
13 | * which implements the GICv3 architecture. Specifically this is where | |
14 | * the device class itself and the functions for handling interrupts | |
15 | * coming in and going out live. | |
16 | */ | |
17 | ||
18 | #include "qemu/osdep.h" | |
19 | #include "qapi/error.h" | |
20 | #include "hw/sysbus.h" | |
21 | #include "hw/intc/arm_gicv3.h" | |
22 | #include "gicv3_internal.h" | |
23 | ||
ce187c3c PM |
24 | static bool irqbetter(GICv3CPUState *cs, int irq, uint8_t prio) |
25 | { | |
26 | /* Return true if this IRQ at this priority should take | |
27 | * precedence over the current recorded highest priority | |
28 | * pending interrupt for this CPU. We also return true if | |
29 | * the current recorded highest priority pending interrupt | |
30 | * is the same as this one (a property which the calling code | |
31 | * relies on). | |
32 | */ | |
33 | if (prio < cs->hppi.prio) { | |
34 | return true; | |
35 | } | |
36 | /* If multiple pending interrupts have the same priority then it is an | |
37 | * IMPDEF choice which of them to signal to the CPU. We choose to | |
38 | * signal the one with the lowest interrupt number. | |
39 | */ | |
40 | if (prio == cs->hppi.prio && irq <= cs->hppi.irq) { | |
41 | return true; | |
42 | } | |
43 | return false; | |
44 | } | |
45 | ||
46 | static uint32_t gicd_int_pending(GICv3State *s, int irq) | |
47 | { | |
48 | /* Recalculate which distributor interrupts are actually pending | |
49 | * in the group of 32 interrupts starting at irq (which should be a multiple | |
50 | * of 32), and return a 32-bit integer which has a bit set for each | |
51 | * interrupt that is eligible to be signaled to the CPU interface. | |
52 | * | |
53 | * An interrupt is pending if: | |
54 | * + the PENDING latch is set OR it is level triggered and the input is 1 | |
55 | * + its ENABLE bit is set | |
56 | * + the GICD enable bit for its group is set | |
0bfa0259 | 57 | * + its ACTIVE bit is not set (otherwise it would be Active+Pending) |
ce187c3c PM |
58 | * Conveniently we can bulk-calculate this with bitwise operations. |
59 | */ | |
60 | uint32_t pend, grpmask; | |
61 | uint32_t pending = *gic_bmp_ptr32(s->pending, irq); | |
62 | uint32_t edge_trigger = *gic_bmp_ptr32(s->edge_trigger, irq); | |
63 | uint32_t level = *gic_bmp_ptr32(s->level, irq); | |
64 | uint32_t group = *gic_bmp_ptr32(s->group, irq); | |
65 | uint32_t grpmod = *gic_bmp_ptr32(s->grpmod, irq); | |
66 | uint32_t enable = *gic_bmp_ptr32(s->enabled, irq); | |
0bfa0259 | 67 | uint32_t active = *gic_bmp_ptr32(s->active, irq); |
ce187c3c PM |
68 | |
69 | pend = pending | (~edge_trigger & level); | |
70 | pend &= enable; | |
0bfa0259 | 71 | pend &= ~active; |
ce187c3c PM |
72 | |
73 | if (s->gicd_ctlr & GICD_CTLR_DS) { | |
74 | grpmod = 0; | |
75 | } | |
76 | ||
77 | grpmask = 0; | |
78 | if (s->gicd_ctlr & GICD_CTLR_EN_GRP1NS) { | |
79 | grpmask |= group; | |
80 | } | |
81 | if (s->gicd_ctlr & GICD_CTLR_EN_GRP1S) { | |
82 | grpmask |= (~group & grpmod); | |
83 | } | |
84 | if (s->gicd_ctlr & GICD_CTLR_EN_GRP0) { | |
85 | grpmask |= (~group & ~grpmod); | |
86 | } | |
87 | pend &= grpmask; | |
88 | ||
89 | return pend; | |
90 | } | |
91 | ||
92 | static uint32_t gicr_int_pending(GICv3CPUState *cs) | |
93 | { | |
94 | /* Recalculate which redistributor interrupts are actually pending, | |
95 | * and return a 32-bit integer which has a bit set for each interrupt | |
96 | * that is eligible to be signaled to the CPU interface. | |
97 | * | |
98 | * An interrupt is pending if: | |
99 | * + the PENDING latch is set OR it is level triggered and the input is 1 | |
100 | * + its ENABLE bit is set | |
101 | * + the GICD enable bit for its group is set | |
0bfa0259 | 102 | * + its ACTIVE bit is not set (otherwise it would be Active+Pending) |
ce187c3c PM |
103 | * Conveniently we can bulk-calculate this with bitwise operations. |
104 | */ | |
105 | uint32_t pend, grpmask, grpmod; | |
106 | ||
107 | pend = cs->gicr_ipendr0 | (~cs->edge_trigger & cs->level); | |
108 | pend &= cs->gicr_ienabler0; | |
0bfa0259 | 109 | pend &= ~cs->gicr_iactiver0; |
ce187c3c PM |
110 | |
111 | if (cs->gic->gicd_ctlr & GICD_CTLR_DS) { | |
112 | grpmod = 0; | |
113 | } else { | |
114 | grpmod = cs->gicr_igrpmodr0; | |
115 | } | |
116 | ||
117 | grpmask = 0; | |
118 | if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1NS) { | |
119 | grpmask |= cs->gicr_igroupr0; | |
120 | } | |
121 | if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP1S) { | |
122 | grpmask |= (~cs->gicr_igroupr0 & grpmod); | |
123 | } | |
124 | if (cs->gic->gicd_ctlr & GICD_CTLR_EN_GRP0) { | |
125 | grpmask |= (~cs->gicr_igroupr0 & ~grpmod); | |
126 | } | |
127 | pend &= grpmask; | |
128 | ||
129 | return pend; | |
130 | } | |
131 | ||
132 | /* Update the interrupt status after state in a redistributor | |
133 | * or CPU interface has changed, but don't tell the CPU i/f. | |
134 | */ | |
135 | static void gicv3_redist_update_noirqset(GICv3CPUState *cs) | |
136 | { | |
137 | /* Find the highest priority pending interrupt among the | |
138 | * redistributor interrupts (SGIs and PPIs). | |
139 | */ | |
140 | bool seenbetter = false; | |
141 | uint8_t prio; | |
142 | int i; | |
143 | uint32_t pend; | |
144 | ||
145 | /* Find out which redistributor interrupts are eligible to be | |
146 | * signaled to the CPU interface. | |
147 | */ | |
148 | pend = gicr_int_pending(cs); | |
149 | ||
150 | if (pend) { | |
151 | for (i = 0; i < GIC_INTERNAL; i++) { | |
152 | if (!(pend & (1 << i))) { | |
153 | continue; | |
154 | } | |
155 | prio = cs->gicr_ipriorityr[i]; | |
156 | if (irqbetter(cs, i, prio)) { | |
157 | cs->hppi.irq = i; | |
158 | cs->hppi.prio = prio; | |
159 | seenbetter = true; | |
160 | } | |
161 | } | |
162 | } | |
163 | ||
164 | if (seenbetter) { | |
165 | cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); | |
166 | } | |
167 | ||
168 | /* If the best interrupt we just found would preempt whatever | |
169 | * was the previous best interrupt before this update, then | |
170 | * we know it's definitely the best one now. | |
171 | * If we didn't find an interrupt that would preempt the previous | |
172 | * best, and the previous best is outside our range (or there was no | |
173 | * previous pending interrupt at all), then that is still valid, and | |
174 | * we leave it as the best. | |
175 | * Otherwise, we need to do a full update (because the previous best | |
176 | * interrupt has reduced in priority and any other interrupt could | |
177 | * now be the new best one). | |
178 | */ | |
179 | if (!seenbetter && cs->hppi.prio != 0xff && cs->hppi.irq < GIC_INTERNAL) { | |
180 | gicv3_full_update_noirqset(cs->gic); | |
181 | } | |
182 | } | |
183 | ||
184 | /* Update the GIC status after state in a redistributor or | |
185 | * CPU interface has changed, and inform the CPU i/f of | |
186 | * its new highest priority pending interrupt. | |
187 | */ | |
188 | void gicv3_redist_update(GICv3CPUState *cs) | |
189 | { | |
190 | gicv3_redist_update_noirqset(cs); | |
191 | gicv3_cpuif_update(cs); | |
192 | } | |
193 | ||
194 | /* Update the GIC status after state in the distributor has | |
195 | * changed affecting @len interrupts starting at @start, | |
196 | * but don't tell the CPU i/f. | |
197 | */ | |
198 | static void gicv3_update_noirqset(GICv3State *s, int start, int len) | |
199 | { | |
200 | int i; | |
201 | uint8_t prio; | |
202 | uint32_t pend = 0; | |
203 | ||
204 | assert(start >= GIC_INTERNAL); | |
205 | assert(len > 0); | |
206 | ||
207 | for (i = 0; i < s->num_cpu; i++) { | |
208 | s->cpu[i].seenbetter = false; | |
209 | } | |
210 | ||
211 | /* Find the highest priority pending interrupt in this range. */ | |
212 | for (i = start; i < start + len; i++) { | |
213 | GICv3CPUState *cs; | |
214 | ||
215 | if (i == start || (i & 0x1f) == 0) { | |
216 | /* Calculate the next 32 bits worth of pending status */ | |
217 | pend = gicd_int_pending(s, i & ~0x1f); | |
218 | } | |
219 | ||
220 | if (!(pend & (1 << (i & 0x1f)))) { | |
221 | continue; | |
222 | } | |
223 | cs = s->gicd_irouter_target[i]; | |
224 | if (!cs) { | |
225 | /* Interrupts targeting no implemented CPU should remain pending | |
226 | * and not be forwarded to any CPU. | |
227 | */ | |
228 | continue; | |
229 | } | |
230 | prio = s->gicd_ipriority[i]; | |
231 | if (irqbetter(cs, i, prio)) { | |
232 | cs->hppi.irq = i; | |
233 | cs->hppi.prio = prio; | |
234 | cs->seenbetter = true; | |
235 | } | |
236 | } | |
237 | ||
238 | /* If the best interrupt we just found would preempt whatever | |
239 | * was the previous best interrupt before this update, then | |
240 | * we know it's definitely the best one now. | |
241 | * If we didn't find an interrupt that would preempt the previous | |
242 | * best, and the previous best is outside our range (or there was | |
243 | * no previous pending interrupt at all), then that | |
244 | * is still valid, and we leave it as the best. | |
245 | * Otherwise, we need to do a full update (because the previous best | |
246 | * interrupt has reduced in priority and any other interrupt could | |
247 | * now be the new best one). | |
248 | */ | |
249 | for (i = 0; i < s->num_cpu; i++) { | |
250 | GICv3CPUState *cs = &s->cpu[i]; | |
251 | ||
252 | if (cs->seenbetter) { | |
253 | cs->hppi.grp = gicv3_irq_group(cs->gic, cs, cs->hppi.irq); | |
254 | } | |
255 | ||
256 | if (!cs->seenbetter && cs->hppi.prio != 0xff && | |
257 | cs->hppi.irq >= start && cs->hppi.irq < start + len) { | |
258 | gicv3_full_update_noirqset(s); | |
259 | break; | |
260 | } | |
261 | } | |
262 | } | |
263 | ||
264 | void gicv3_update(GICv3State *s, int start, int len) | |
265 | { | |
266 | int i; | |
267 | ||
268 | gicv3_update_noirqset(s, start, len); | |
269 | for (i = 0; i < s->num_cpu; i++) { | |
270 | gicv3_cpuif_update(&s->cpu[i]); | |
271 | } | |
272 | } | |
273 | ||
274 | void gicv3_full_update_noirqset(GICv3State *s) | |
275 | { | |
276 | /* Completely recalculate the GIC status from scratch, but | |
277 | * don't update any outbound IRQ lines. | |
278 | */ | |
279 | int i; | |
280 | ||
281 | for (i = 0; i < s->num_cpu; i++) { | |
282 | s->cpu[i].hppi.prio = 0xff; | |
283 | } | |
284 | ||
285 | /* Note that we can guarantee that these functions will not | |
286 | * recursively call back into gicv3_full_update(), because | |
287 | * at each point the "previous best" is always outside the | |
288 | * range we ask them to update. | |
289 | */ | |
290 | gicv3_update_noirqset(s, GIC_INTERNAL, s->num_irq - GIC_INTERNAL); | |
291 | ||
292 | for (i = 0; i < s->num_cpu; i++) { | |
293 | gicv3_redist_update_noirqset(&s->cpu[i]); | |
294 | } | |
295 | } | |
296 | ||
297 | void gicv3_full_update(GICv3State *s) | |
298 | { | |
299 | /* Completely recalculate the GIC status from scratch, including | |
300 | * updating outbound IRQ lines. | |
301 | */ | |
302 | int i; | |
303 | ||
304 | gicv3_full_update_noirqset(s); | |
305 | for (i = 0; i < s->num_cpu; i++) { | |
306 | gicv3_cpuif_update(&s->cpu[i]); | |
307 | } | |
308 | } | |
309 | ||
56992670 SP |
310 | /* Process a change in an external IRQ input. */ |
311 | static void gicv3_set_irq(void *opaque, int irq, int level) | |
312 | { | |
313 | /* Meaning of the 'irq' parameter: | |
314 | * [0..N-1] : external interrupts | |
315 | * [N..N+31] : PPI (internal) interrupts for CPU 0 | |
316 | * [N+32..N+63] : PPI (internal interrupts for CPU 1 | |
317 | * ... | |
318 | */ | |
c84428b3 PM |
319 | GICv3State *s = opaque; |
320 | ||
321 | if (irq < (s->num_irq - GIC_INTERNAL)) { | |
322 | /* external interrupt (SPI) */ | |
323 | gicv3_dist_set_irq(s, irq + GIC_INTERNAL, level); | |
324 | } else { | |
325 | /* per-cpu interrupt (PPI) */ | |
326 | int cpu; | |
327 | ||
328 | irq -= (s->num_irq - GIC_INTERNAL); | |
329 | cpu = irq / GIC_INTERNAL; | |
330 | irq %= GIC_INTERNAL; | |
331 | assert(cpu < s->num_cpu); | |
332 | /* Raising SGIs via this function would be a bug in how the board | |
333 | * model wires up interrupts. | |
334 | */ | |
335 | assert(irq >= GIC_NR_SGIS); | |
336 | gicv3_redist_set_irq(&s->cpu[cpu], irq, level); | |
337 | } | |
56992670 SP |
338 | } |
339 | ||
ce187c3c PM |
340 | static void arm_gicv3_post_load(GICv3State *s) |
341 | { | |
342 | /* Recalculate our cached idea of the current highest priority | |
343 | * pending interrupt, but don't set IRQ or FIQ lines. | |
344 | */ | |
345 | gicv3_full_update_noirqset(s); | |
346 | /* Repopulate the cache of GICv3CPUState pointers for target CPUs */ | |
347 | gicv3_cache_all_target_cpustates(s); | |
348 | } | |
349 | ||
287c181a PM |
350 | static const MemoryRegionOps gic_ops[] = { |
351 | { | |
352 | .read_with_attrs = gicv3_dist_read, | |
353 | .write_with_attrs = gicv3_dist_write, | |
354 | .endianness = DEVICE_NATIVE_ENDIAN, | |
355 | }, | |
356 | { | |
357 | .read_with_attrs = gicv3_redist_read, | |
358 | .write_with_attrs = gicv3_redist_write, | |
359 | .endianness = DEVICE_NATIVE_ENDIAN, | |
360 | } | |
361 | }; | |
362 | ||
56992670 SP |
363 | static void arm_gic_realize(DeviceState *dev, Error **errp) |
364 | { | |
365 | /* Device instance realize function for the GIC sysbus device */ | |
366 | GICv3State *s = ARM_GICV3(dev); | |
367 | ARMGICv3Class *agc = ARM_GICV3_GET_CLASS(s); | |
368 | Error *local_err = NULL; | |
369 | ||
370 | agc->parent_realize(dev, &local_err); | |
371 | if (local_err) { | |
372 | error_propagate(errp, local_err); | |
373 | return; | |
374 | } | |
375 | ||
287c181a | 376 | gicv3_init_irqs_and_mmio(s, gicv3_set_irq, gic_ops); |
359fbe65 PM |
377 | |
378 | gicv3_init_cpuif(s); | |
56992670 SP |
379 | } |
380 | ||
381 | static void arm_gicv3_class_init(ObjectClass *klass, void *data) | |
382 | { | |
383 | DeviceClass *dc = DEVICE_CLASS(klass); | |
ce187c3c | 384 | ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); |
56992670 SP |
385 | ARMGICv3Class *agc = ARM_GICV3_CLASS(klass); |
386 | ||
ce187c3c | 387 | agcc->post_load = arm_gicv3_post_load; |
bf853881 | 388 | device_class_set_parent_realize(dc, arm_gic_realize, &agc->parent_realize); |
56992670 SP |
389 | } |
390 | ||
391 | static const TypeInfo arm_gicv3_info = { | |
392 | .name = TYPE_ARM_GICV3, | |
393 | .parent = TYPE_ARM_GICV3_COMMON, | |
394 | .instance_size = sizeof(GICv3State), | |
395 | .class_init = arm_gicv3_class_init, | |
396 | .class_size = sizeof(ARMGICv3Class), | |
397 | }; | |
398 | ||
399 | static void arm_gicv3_register_types(void) | |
400 | { | |
401 | type_register_static(&arm_gicv3_info); | |
402 | } | |
403 | ||
404 | type_init(arm_gicv3_register_types) |