]>
Commit | Line | Data |
---|---|---|
a7bf3034 PF |
1 | /* |
2 | * ARM Generic Interrupt Controller using KVM in-kernel support | |
3 | * | |
4 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. | |
5 | * Written by Pavel Fedin | |
6 | * Based on vGICv2 code by Peter Maydell | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation, either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
8ef94f0b | 22 | #include "qemu/osdep.h" |
da34e65c | 23 | #include "qapi/error.h" |
a7bf3034 PF |
24 | #include "hw/intc/arm_gicv3_common.h" |
25 | #include "hw/sysbus.h" | |
367b9f52 | 26 | #include "qemu/error-report.h" |
a7bf3034 | 27 | #include "sysemu/kvm.h" |
d5aa0c22 | 28 | #include "sysemu/sysemu.h" |
a7bf3034 | 29 | #include "kvm_arm.h" |
367b9f52 | 30 | #include "gicv3_internal.h" |
a7bf3034 | 31 | #include "vgic_common.h" |
795c40b8 | 32 | #include "migration/blocker.h" |
a7bf3034 PF |
33 | |
34 | #ifdef DEBUG_GICV3_KVM | |
35 | #define DPRINTF(fmt, ...) \ | |
36 | do { fprintf(stderr, "kvm_gicv3: " fmt, ## __VA_ARGS__); } while (0) | |
37 | #else | |
38 | #define DPRINTF(fmt, ...) \ | |
39 | do { } while (0) | |
40 | #endif | |
41 | ||
42 | #define TYPE_KVM_ARM_GICV3 "kvm-arm-gicv3" | |
43 | #define KVM_ARM_GICV3(obj) \ | |
44 | OBJECT_CHECK(GICv3State, (obj), TYPE_KVM_ARM_GICV3) | |
45 | #define KVM_ARM_GICV3_CLASS(klass) \ | |
46 | OBJECT_CLASS_CHECK(KVMARMGICv3Class, (klass), TYPE_KVM_ARM_GICV3) | |
47 | #define KVM_ARM_GICV3_GET_CLASS(obj) \ | |
48 | OBJECT_GET_CLASS(KVMARMGICv3Class, (obj), TYPE_KVM_ARM_GICV3) | |
49 | ||
367b9f52 VK |
50 | #define KVM_DEV_ARM_VGIC_SYSREG(op0, op1, crn, crm, op2) \ |
51 | (ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \ | |
52 | ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \ | |
53 | ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \ | |
54 | ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \ | |
55 | ARM64_SYS_REG_SHIFT_MASK(op2, OP2)) | |
56 | ||
57 | #define ICC_PMR_EL1 \ | |
58 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 4, 6, 0) | |
59 | #define ICC_BPR0_EL1 \ | |
60 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 3) | |
61 | #define ICC_AP0R_EL1(n) \ | |
62 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 4 | n) | |
63 | #define ICC_AP1R_EL1(n) \ | |
64 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 9, n) | |
65 | #define ICC_BPR1_EL1 \ | |
66 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 3) | |
67 | #define ICC_CTLR_EL1 \ | |
68 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 4) | |
69 | #define ICC_SRE_EL1 \ | |
70 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 5) | |
71 | #define ICC_IGRPEN0_EL1 \ | |
72 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 6) | |
73 | #define ICC_IGRPEN1_EL1 \ | |
74 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 7) | |
75 | ||
a7bf3034 PF |
76 | typedef struct KVMARMGICv3Class { |
77 | ARMGICv3CommonClass parent_class; | |
78 | DeviceRealize parent_realize; | |
79 | void (*parent_reset)(DeviceState *dev); | |
80 | } KVMARMGICv3Class; | |
81 | ||
82 | static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level) | |
83 | { | |
84 | GICv3State *s = (GICv3State *)opaque; | |
85 | ||
86 | kvm_arm_gic_set_irq(s->num_irq, irq, level); | |
87 | } | |
88 | ||
367b9f52 VK |
89 | #define KVM_VGIC_ATTR(reg, typer) \ |
90 | ((typer & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) | (reg)) | |
91 | ||
92 | static inline void kvm_gicd_access(GICv3State *s, int offset, | |
93 | uint32_t *val, bool write) | |
94 | { | |
95 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, | |
96 | KVM_VGIC_ATTR(offset, 0), | |
556969e9 | 97 | val, write, &error_abort); |
367b9f52 VK |
98 | } |
99 | ||
100 | static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu, | |
101 | uint32_t *val, bool write) | |
102 | { | |
103 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, | |
104 | KVM_VGIC_ATTR(offset, s->cpu[cpu].gicr_typer), | |
556969e9 | 105 | val, write, &error_abort); |
367b9f52 VK |
106 | } |
107 | ||
108 | static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu, | |
109 | uint64_t *val, bool write) | |
110 | { | |
111 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, | |
112 | KVM_VGIC_ATTR(reg, s->cpu[cpu].gicr_typer), | |
556969e9 | 113 | val, write, &error_abort); |
367b9f52 VK |
114 | } |
115 | ||
116 | static inline void kvm_gic_line_level_access(GICv3State *s, int irq, int cpu, | |
117 | uint32_t *val, bool write) | |
118 | { | |
119 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, | |
120 | KVM_VGIC_ATTR(irq, s->cpu[cpu].gicr_typer) | | |
121 | (VGIC_LEVEL_INFO_LINE_LEVEL << | |
122 | KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT), | |
556969e9 | 123 | val, write, &error_abort); |
367b9f52 VK |
124 | } |
125 | ||
126 | /* Loop through each distributor IRQ related register; since bits | |
127 | * corresponding to SPIs and PPIs are RAZ/WI when affinity routing | |
128 | * is enabled, we skip those. | |
129 | */ | |
130 | #define for_each_dist_irq_reg(_irq, _max, _field_width) \ | |
131 | for (_irq = GIC_INTERNAL; _irq < _max; _irq += (32 / _field_width)) | |
132 | ||
133 | static void kvm_dist_get_priority(GICv3State *s, uint32_t offset, uint8_t *bmp) | |
134 | { | |
135 | uint32_t reg, *field; | |
136 | int irq; | |
137 | ||
138 | field = (uint32_t *)bmp; | |
139 | for_each_dist_irq_reg(irq, s->num_irq, 8) { | |
140 | kvm_gicd_access(s, offset, ®, false); | |
141 | *field = reg; | |
142 | offset += 4; | |
143 | field++; | |
144 | } | |
145 | } | |
146 | ||
147 | static void kvm_dist_put_priority(GICv3State *s, uint32_t offset, uint8_t *bmp) | |
148 | { | |
149 | uint32_t reg, *field; | |
150 | int irq; | |
151 | ||
152 | field = (uint32_t *)bmp; | |
153 | for_each_dist_irq_reg(irq, s->num_irq, 8) { | |
154 | reg = *field; | |
155 | kvm_gicd_access(s, offset, ®, true); | |
156 | offset += 4; | |
157 | field++; | |
158 | } | |
159 | } | |
160 | ||
161 | static void kvm_dist_get_edge_trigger(GICv3State *s, uint32_t offset, | |
162 | uint32_t *bmp) | |
163 | { | |
164 | uint32_t reg; | |
165 | int irq; | |
166 | ||
167 | for_each_dist_irq_reg(irq, s->num_irq, 2) { | |
168 | kvm_gicd_access(s, offset, ®, false); | |
169 | reg = half_unshuffle32(reg >> 1); | |
170 | if (irq % 32 != 0) { | |
171 | reg = (reg << 16); | |
172 | } | |
173 | *gic_bmp_ptr32(bmp, irq) |= reg; | |
174 | offset += 4; | |
175 | } | |
176 | } | |
177 | ||
178 | static void kvm_dist_put_edge_trigger(GICv3State *s, uint32_t offset, | |
179 | uint32_t *bmp) | |
180 | { | |
181 | uint32_t reg; | |
182 | int irq; | |
183 | ||
184 | for_each_dist_irq_reg(irq, s->num_irq, 2) { | |
185 | reg = *gic_bmp_ptr32(bmp, irq); | |
186 | if (irq % 32 != 0) { | |
187 | reg = (reg & 0xffff0000) >> 16; | |
188 | } else { | |
189 | reg = reg & 0xffff; | |
190 | } | |
191 | reg = half_shuffle32(reg) << 1; | |
192 | kvm_gicd_access(s, offset, ®, true); | |
193 | offset += 4; | |
194 | } | |
195 | } | |
196 | ||
197 | static void kvm_gic_get_line_level_bmp(GICv3State *s, uint32_t *bmp) | |
198 | { | |
199 | uint32_t reg; | |
200 | int irq; | |
201 | ||
202 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
203 | kvm_gic_line_level_access(s, irq, 0, ®, false); | |
204 | *gic_bmp_ptr32(bmp, irq) = reg; | |
205 | } | |
206 | } | |
207 | ||
208 | static void kvm_gic_put_line_level_bmp(GICv3State *s, uint32_t *bmp) | |
209 | { | |
210 | uint32_t reg; | |
211 | int irq; | |
212 | ||
213 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
214 | reg = *gic_bmp_ptr32(bmp, irq); | |
215 | kvm_gic_line_level_access(s, irq, 0, ®, true); | |
216 | } | |
217 | } | |
218 | ||
219 | /* Read a bitmap register group from the kernel VGIC. */ | |
220 | static void kvm_dist_getbmp(GICv3State *s, uint32_t offset, uint32_t *bmp) | |
221 | { | |
222 | uint32_t reg; | |
223 | int irq; | |
224 | ||
225 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
226 | kvm_gicd_access(s, offset, ®, false); | |
227 | *gic_bmp_ptr32(bmp, irq) = reg; | |
228 | offset += 4; | |
229 | } | |
230 | } | |
231 | ||
232 | static void kvm_dist_putbmp(GICv3State *s, uint32_t offset, | |
233 | uint32_t clroffset, uint32_t *bmp) | |
234 | { | |
235 | uint32_t reg; | |
236 | int irq; | |
237 | ||
238 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
239 | /* If this bitmap is a set/clear register pair, first write to the | |
240 | * clear-reg to clear all bits before using the set-reg to write | |
241 | * the 1 bits. | |
242 | */ | |
243 | if (clroffset != 0) { | |
244 | reg = 0; | |
245 | kvm_gicd_access(s, clroffset, ®, true); | |
246 | } | |
247 | reg = *gic_bmp_ptr32(bmp, irq); | |
248 | kvm_gicd_access(s, offset, ®, true); | |
249 | offset += 4; | |
250 | } | |
251 | } | |
252 | ||
253 | static void kvm_arm_gicv3_check(GICv3State *s) | |
254 | { | |
255 | uint32_t reg; | |
256 | uint32_t num_irq; | |
257 | ||
258 | /* Sanity checking s->num_irq */ | |
259 | kvm_gicd_access(s, GICD_TYPER, ®, false); | |
260 | num_irq = ((reg & 0x1f) + 1) * 32; | |
261 | ||
262 | if (num_irq < s->num_irq) { | |
263 | error_report("Model requests %u IRQs, but kernel supports max %u", | |
264 | s->num_irq, num_irq); | |
265 | abort(); | |
266 | } | |
267 | } | |
268 | ||
a7bf3034 PF |
269 | static void kvm_arm_gicv3_put(GICv3State *s) |
270 | { | |
367b9f52 VK |
271 | uint32_t regl, regh, reg; |
272 | uint64_t reg64, redist_typer; | |
273 | int ncpu, i; | |
274 | ||
275 | kvm_arm_gicv3_check(s); | |
276 | ||
277 | kvm_gicr_access(s, GICR_TYPER, 0, ®l, false); | |
278 | kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false); | |
279 | redist_typer = ((uint64_t)regh << 32) | regl; | |
280 | ||
281 | reg = s->gicd_ctlr; | |
282 | kvm_gicd_access(s, GICD_CTLR, ®, true); | |
283 | ||
284 | if (redist_typer & GICR_TYPER_PLPIS) { | |
285 | /* Set base addresses before LPIs are enabled by GICR_CTLR write */ | |
286 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
287 | GICv3CPUState *c = &s->cpu[ncpu]; | |
288 | ||
289 | reg64 = c->gicr_propbaser; | |
290 | regl = (uint32_t)reg64; | |
291 | kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, true); | |
292 | regh = (uint32_t)(reg64 >> 32); | |
293 | kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, true); | |
294 | ||
295 | reg64 = c->gicr_pendbaser; | |
7229ec58 | 296 | if (!(c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS)) { |
367b9f52 VK |
297 | /* Setting PTZ is advised if LPIs are disabled, to reduce |
298 | * GIC initialization time. | |
299 | */ | |
300 | reg64 |= GICR_PENDBASER_PTZ; | |
301 | } | |
302 | regl = (uint32_t)reg64; | |
303 | kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, true); | |
304 | regh = (uint32_t)(reg64 >> 32); | |
305 | kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, true); | |
306 | } | |
307 | } | |
308 | ||
309 | /* Redistributor state (one per CPU) */ | |
310 | ||
311 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
312 | GICv3CPUState *c = &s->cpu[ncpu]; | |
313 | ||
314 | reg = c->gicr_ctlr; | |
315 | kvm_gicr_access(s, GICR_CTLR, ncpu, ®, true); | |
316 | ||
317 | reg = c->gicr_statusr[GICV3_NS]; | |
318 | kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, true); | |
319 | ||
320 | reg = c->gicr_waker; | |
321 | kvm_gicr_access(s, GICR_WAKER, ncpu, ®, true); | |
322 | ||
323 | reg = c->gicr_igroupr0; | |
324 | kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, true); | |
325 | ||
326 | reg = ~0; | |
327 | kvm_gicr_access(s, GICR_ICENABLER0, ncpu, ®, true); | |
328 | reg = c->gicr_ienabler0; | |
329 | kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, true); | |
330 | ||
331 | /* Restore config before pending so we treat level/edge correctly */ | |
332 | reg = half_shuffle32(c->edge_trigger >> 16) << 1; | |
333 | kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, true); | |
334 | ||
335 | reg = c->level; | |
336 | kvm_gic_line_level_access(s, 0, ncpu, ®, true); | |
337 | ||
338 | reg = ~0; | |
339 | kvm_gicr_access(s, GICR_ICPENDR0, ncpu, ®, true); | |
340 | reg = c->gicr_ipendr0; | |
341 | kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, true); | |
342 | ||
343 | reg = ~0; | |
344 | kvm_gicr_access(s, GICR_ICACTIVER0, ncpu, ®, true); | |
345 | reg = c->gicr_iactiver0; | |
346 | kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, true); | |
347 | ||
348 | for (i = 0; i < GIC_INTERNAL; i += 4) { | |
349 | reg = c->gicr_ipriorityr[i] | | |
350 | (c->gicr_ipriorityr[i + 1] << 8) | | |
351 | (c->gicr_ipriorityr[i + 2] << 16) | | |
352 | (c->gicr_ipriorityr[i + 3] << 24); | |
353 | kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, true); | |
354 | } | |
355 | } | |
356 | ||
357 | /* Distributor state (shared between all CPUs */ | |
358 | reg = s->gicd_statusr[GICV3_NS]; | |
359 | kvm_gicd_access(s, GICD_STATUSR, ®, true); | |
360 | ||
361 | /* s->enable bitmap -> GICD_ISENABLERn */ | |
362 | kvm_dist_putbmp(s, GICD_ISENABLER, GICD_ICENABLER, s->enabled); | |
363 | ||
364 | /* s->group bitmap -> GICD_IGROUPRn */ | |
365 | kvm_dist_putbmp(s, GICD_IGROUPR, 0, s->group); | |
366 | ||
367 | /* Restore targets before pending to ensure the pending state is set on | |
368 | * the appropriate CPU interfaces in the kernel | |
369 | */ | |
370 | ||
371 | /* s->gicd_irouter[irq] -> GICD_IROUTERn | |
372 | * We can't use kvm_dist_put() here because the registers are 64-bit | |
373 | */ | |
374 | for (i = GIC_INTERNAL; i < s->num_irq; i++) { | |
375 | uint32_t offset; | |
376 | ||
377 | offset = GICD_IROUTER + (sizeof(uint32_t) * i); | |
378 | reg = (uint32_t)s->gicd_irouter[i]; | |
379 | kvm_gicd_access(s, offset, ®, true); | |
380 | ||
381 | offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4; | |
382 | reg = (uint32_t)(s->gicd_irouter[i] >> 32); | |
383 | kvm_gicd_access(s, offset, ®, true); | |
384 | } | |
385 | ||
386 | /* s->trigger bitmap -> GICD_ICFGRn | |
387 | * (restore configuration registers before pending IRQs so we treat | |
388 | * level/edge correctly) | |
389 | */ | |
390 | kvm_dist_put_edge_trigger(s, GICD_ICFGR, s->edge_trigger); | |
391 | ||
392 | /* s->level bitmap -> line_level */ | |
393 | kvm_gic_put_line_level_bmp(s, s->level); | |
394 | ||
395 | /* s->pending bitmap -> GICD_ISPENDRn */ | |
396 | kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending); | |
397 | ||
398 | /* s->active bitmap -> GICD_ISACTIVERn */ | |
399 | kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active); | |
400 | ||
401 | /* s->gicd_ipriority[] -> GICD_IPRIORITYRn */ | |
402 | kvm_dist_put_priority(s, GICD_IPRIORITYR, s->gicd_ipriority); | |
403 | ||
404 | /* CPU Interface state (one per CPU) */ | |
405 | ||
406 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
407 | GICv3CPUState *c = &s->cpu[ncpu]; | |
408 | int num_pri_bits; | |
409 | ||
410 | kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true); | |
411 | kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, | |
412 | &c->icc_ctlr_el1[GICV3_NS], true); | |
413 | kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, | |
414 | &c->icc_igrpen[GICV3_G0], true); | |
415 | kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, | |
416 | &c->icc_igrpen[GICV3_G1NS], true); | |
417 | kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, true); | |
418 | kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], true); | |
419 | kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], true); | |
420 | ||
421 | num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] & | |
422 | ICC_CTLR_EL1_PRIBITS_MASK) >> | |
423 | ICC_CTLR_EL1_PRIBITS_SHIFT) + 1; | |
424 | ||
425 | switch (num_pri_bits) { | |
426 | case 7: | |
427 | reg64 = c->icc_apr[GICV3_G0][3]; | |
428 | kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, true); | |
429 | reg64 = c->icc_apr[GICV3_G0][2]; | |
430 | kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, true); | |
431 | case 6: | |
432 | reg64 = c->icc_apr[GICV3_G0][1]; | |
433 | kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, true); | |
434 | default: | |
435 | reg64 = c->icc_apr[GICV3_G0][0]; | |
436 | kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, true); | |
437 | } | |
438 | ||
439 | switch (num_pri_bits) { | |
440 | case 7: | |
441 | reg64 = c->icc_apr[GICV3_G1NS][3]; | |
442 | kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, true); | |
443 | reg64 = c->icc_apr[GICV3_G1NS][2]; | |
444 | kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, true); | |
445 | case 6: | |
446 | reg64 = c->icc_apr[GICV3_G1NS][1]; | |
447 | kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, true); | |
448 | default: | |
449 | reg64 = c->icc_apr[GICV3_G1NS][0]; | |
450 | kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, true); | |
451 | } | |
452 | } | |
a7bf3034 PF |
453 | } |
454 | ||
455 | static void kvm_arm_gicv3_get(GICv3State *s) | |
456 | { | |
367b9f52 VK |
457 | uint32_t regl, regh, reg; |
458 | uint64_t reg64, redist_typer; | |
459 | int ncpu, i; | |
460 | ||
461 | kvm_arm_gicv3_check(s); | |
462 | ||
463 | kvm_gicr_access(s, GICR_TYPER, 0, ®l, false); | |
464 | kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false); | |
465 | redist_typer = ((uint64_t)regh << 32) | regl; | |
466 | ||
467 | kvm_gicd_access(s, GICD_CTLR, ®, false); | |
468 | s->gicd_ctlr = reg; | |
469 | ||
470 | /* Redistributor state (one per CPU) */ | |
471 | ||
472 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
473 | GICv3CPUState *c = &s->cpu[ncpu]; | |
474 | ||
475 | kvm_gicr_access(s, GICR_CTLR, ncpu, ®, false); | |
476 | c->gicr_ctlr = reg; | |
477 | ||
478 | kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, false); | |
479 | c->gicr_statusr[GICV3_NS] = reg; | |
480 | ||
481 | kvm_gicr_access(s, GICR_WAKER, ncpu, ®, false); | |
482 | c->gicr_waker = reg; | |
483 | ||
484 | kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, false); | |
485 | c->gicr_igroupr0 = reg; | |
486 | kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, false); | |
487 | c->gicr_ienabler0 = reg; | |
488 | kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, false); | |
489 | c->edge_trigger = half_unshuffle32(reg >> 1) << 16; | |
490 | kvm_gic_line_level_access(s, 0, ncpu, ®, false); | |
491 | c->level = reg; | |
492 | kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, false); | |
493 | c->gicr_ipendr0 = reg; | |
494 | kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, false); | |
495 | c->gicr_iactiver0 = reg; | |
496 | ||
497 | for (i = 0; i < GIC_INTERNAL; i += 4) { | |
498 | kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, false); | |
499 | c->gicr_ipriorityr[i] = extract32(reg, 0, 8); | |
500 | c->gicr_ipriorityr[i + 1] = extract32(reg, 8, 8); | |
501 | c->gicr_ipriorityr[i + 2] = extract32(reg, 16, 8); | |
502 | c->gicr_ipriorityr[i + 3] = extract32(reg, 24, 8); | |
503 | } | |
504 | } | |
505 | ||
506 | if (redist_typer & GICR_TYPER_PLPIS) { | |
507 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
508 | GICv3CPUState *c = &s->cpu[ncpu]; | |
509 | ||
510 | kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, false); | |
511 | kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, false); | |
512 | c->gicr_propbaser = ((uint64_t)regh << 32) | regl; | |
513 | ||
514 | kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, false); | |
515 | kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, false); | |
516 | c->gicr_pendbaser = ((uint64_t)regh << 32) | regl; | |
517 | } | |
518 | } | |
519 | ||
520 | /* Distributor state (shared between all CPUs */ | |
521 | ||
522 | kvm_gicd_access(s, GICD_STATUSR, ®, false); | |
523 | s->gicd_statusr[GICV3_NS] = reg; | |
524 | ||
525 | /* GICD_IGROUPRn -> s->group bitmap */ | |
526 | kvm_dist_getbmp(s, GICD_IGROUPR, s->group); | |
527 | ||
528 | /* GICD_ISENABLERn -> s->enabled bitmap */ | |
529 | kvm_dist_getbmp(s, GICD_ISENABLER, s->enabled); | |
530 | ||
531 | /* Line level of irq */ | |
532 | kvm_gic_get_line_level_bmp(s, s->level); | |
533 | /* GICD_ISPENDRn -> s->pending bitmap */ | |
534 | kvm_dist_getbmp(s, GICD_ISPENDR, s->pending); | |
535 | ||
536 | /* GICD_ISACTIVERn -> s->active bitmap */ | |
537 | kvm_dist_getbmp(s, GICD_ISACTIVER, s->active); | |
538 | ||
539 | /* GICD_ICFGRn -> s->trigger bitmap */ | |
540 | kvm_dist_get_edge_trigger(s, GICD_ICFGR, s->edge_trigger); | |
541 | ||
542 | /* GICD_IPRIORITYRn -> s->gicd_ipriority[] */ | |
543 | kvm_dist_get_priority(s, GICD_IPRIORITYR, s->gicd_ipriority); | |
544 | ||
545 | /* GICD_IROUTERn -> s->gicd_irouter[irq] */ | |
546 | for (i = GIC_INTERNAL; i < s->num_irq; i++) { | |
547 | uint32_t offset; | |
548 | ||
549 | offset = GICD_IROUTER + (sizeof(uint32_t) * i); | |
550 | kvm_gicd_access(s, offset, ®l, false); | |
551 | offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4; | |
552 | kvm_gicd_access(s, offset, ®h, false); | |
553 | s->gicd_irouter[i] = ((uint64_t)regh << 32) | regl; | |
554 | } | |
555 | ||
556 | /***************************************************************** | |
557 | * CPU Interface(s) State | |
558 | */ | |
559 | ||
560 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
561 | GICv3CPUState *c = &s->cpu[ncpu]; | |
562 | int num_pri_bits; | |
563 | ||
564 | kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, false); | |
565 | kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, | |
566 | &c->icc_ctlr_el1[GICV3_NS], false); | |
567 | kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, | |
568 | &c->icc_igrpen[GICV3_G0], false); | |
569 | kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, | |
570 | &c->icc_igrpen[GICV3_G1NS], false); | |
571 | kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, false); | |
572 | kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], false); | |
573 | kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], false); | |
574 | num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] & | |
575 | ICC_CTLR_EL1_PRIBITS_MASK) >> | |
576 | ICC_CTLR_EL1_PRIBITS_SHIFT) + 1; | |
577 | ||
578 | switch (num_pri_bits) { | |
579 | case 7: | |
580 | kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, false); | |
581 | c->icc_apr[GICV3_G0][3] = reg64; | |
582 | kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, false); | |
583 | c->icc_apr[GICV3_G0][2] = reg64; | |
584 | case 6: | |
585 | kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, false); | |
586 | c->icc_apr[GICV3_G0][1] = reg64; | |
587 | default: | |
588 | kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, false); | |
589 | c->icc_apr[GICV3_G0][0] = reg64; | |
590 | } | |
591 | ||
592 | switch (num_pri_bits) { | |
593 | case 7: | |
594 | kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, false); | |
595 | c->icc_apr[GICV3_G1NS][3] = reg64; | |
596 | kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, false); | |
597 | c->icc_apr[GICV3_G1NS][2] = reg64; | |
598 | case 6: | |
599 | kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, false); | |
600 | c->icc_apr[GICV3_G1NS][1] = reg64; | |
601 | default: | |
602 | kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, false); | |
603 | c->icc_apr[GICV3_G1NS][0] = reg64; | |
604 | } | |
605 | } | |
a7bf3034 PF |
606 | } |
607 | ||
07a5628c VK |
608 | static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) |
609 | { | |
610 | ARMCPU *cpu; | |
611 | GICv3State *s; | |
612 | GICv3CPUState *c; | |
613 | ||
614 | c = (GICv3CPUState *)env->gicv3state; | |
615 | s = c->gic; | |
616 | cpu = ARM_CPU(c->cpu); | |
617 | ||
07a5628c VK |
618 | c->icc_pmr_el1 = 0; |
619 | c->icc_bpr[GICV3_G0] = GIC_MIN_BPR; | |
620 | c->icc_bpr[GICV3_G1] = GIC_MIN_BPR; | |
621 | c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR; | |
622 | ||
623 | c->icc_sre_el1 = 0x7; | |
624 | memset(c->icc_apr, 0, sizeof(c->icc_apr)); | |
625 | memset(c->icc_igrpen, 0, sizeof(c->icc_igrpen)); | |
e7d54416 EA |
626 | |
627 | if (s->migration_blocker) { | |
628 | return; | |
629 | } | |
630 | ||
631 | /* Initialize to actual HW supported configuration */ | |
632 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, | |
633 | KVM_VGIC_ATTR(ICC_CTLR_EL1, cpu->mp_affinity), | |
556969e9 | 634 | &c->icc_ctlr_el1[GICV3_NS], false, &error_abort); |
e7d54416 EA |
635 | |
636 | c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; | |
07a5628c VK |
637 | } |
638 | ||
a7bf3034 PF |
639 | static void kvm_arm_gicv3_reset(DeviceState *dev) |
640 | { | |
641 | GICv3State *s = ARM_GICV3_COMMON(dev); | |
642 | KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); | |
643 | ||
644 | DPRINTF("Reset\n"); | |
645 | ||
646 | kgc->parent_reset(dev); | |
367b9f52 VK |
647 | |
648 | if (s->migration_blocker) { | |
649 | DPRINTF("Cannot put kernel gic state, no kernel interface\n"); | |
650 | return; | |
651 | } | |
652 | ||
a7bf3034 PF |
653 | kvm_arm_gicv3_put(s); |
654 | } | |
655 | ||
07a5628c VK |
656 | /* |
657 | * CPU interface registers of GIC needs to be reset on CPU reset. | |
658 | * For the calling arm_gicv3_icc_reset() on CPU reset, we register | |
659 | * below ARMCPRegInfo. As we reset the whole cpu interface under single | |
660 | * register reset, we define only one register of CPU interface instead | |
661 | * of defining all the registers. | |
662 | */ | |
663 | static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { | |
664 | { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, | |
665 | .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, | |
666 | /* | |
667 | * If ARM_CP_NOP is used, resetfn is not called, | |
668 | * So ARM_CP_NO_RAW is appropriate type. | |
669 | */ | |
670 | .type = ARM_CP_NO_RAW, | |
671 | .access = PL1_RW, | |
672 | .readfn = arm_cp_read_zero, | |
673 | .writefn = arm_cp_write_ignore, | |
674 | /* | |
675 | * We hang the whole cpu interface reset routine off here | |
676 | * rather than parcelling it out into one little function | |
677 | * per register | |
678 | */ | |
679 | .resetfn = arm_gicv3_icc_reset, | |
680 | }, | |
681 | REGINFO_SENTINEL | |
682 | }; | |
683 | ||
d5aa0c22 EA |
684 | /** |
685 | * vm_change_state_handler - VM change state callback aiming at flushing | |
686 | * RDIST pending tables into guest RAM | |
687 | * | |
688 | * The tables get flushed to guest RAM whenever the VM gets stopped. | |
689 | */ | |
690 | static void vm_change_state_handler(void *opaque, int running, | |
691 | RunState state) | |
692 | { | |
693 | GICv3State *s = (GICv3State *)opaque; | |
694 | Error *err = NULL; | |
695 | int ret; | |
696 | ||
697 | if (running) { | |
698 | return; | |
699 | } | |
700 | ||
701 | ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, | |
702 | KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES, | |
703 | NULL, true, &err); | |
704 | if (err) { | |
705 | error_report_err(err); | |
706 | } | |
707 | if (ret < 0 && ret != -EFAULT) { | |
708 | abort(); | |
709 | } | |
710 | } | |
711 | ||
712 | ||
a7bf3034 PF |
713 | static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) |
714 | { | |
715 | GICv3State *s = KVM_ARM_GICV3(dev); | |
716 | KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); | |
717 | Error *local_err = NULL; | |
d19a4d4e | 718 | int i; |
a7bf3034 PF |
719 | |
720 | DPRINTF("kvm_arm_gicv3_realize\n"); | |
721 | ||
722 | kgc->parent_realize(dev, &local_err); | |
723 | if (local_err) { | |
724 | error_propagate(errp, local_err); | |
725 | return; | |
726 | } | |
727 | ||
728 | if (s->security_extn) { | |
729 | error_setg(errp, "the in-kernel VGICv3 does not implement the " | |
730 | "security extensions"); | |
731 | return; | |
732 | } | |
733 | ||
734 | gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); | |
735 | ||
07a5628c VK |
736 | for (i = 0; i < s->num_cpu; i++) { |
737 | ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); | |
738 | ||
739 | define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); | |
740 | } | |
741 | ||
a7bf3034 PF |
742 | /* Try to create the device via the device control API */ |
743 | s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false); | |
744 | if (s->dev_fd < 0) { | |
745 | error_setg_errno(errp, -s->dev_fd, "error creating in-kernel VGIC"); | |
746 | return; | |
747 | } | |
748 | ||
749 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, | |
556969e9 | 750 | 0, &s->num_irq, true, &error_abort); |
a7bf3034 PF |
751 | |
752 | /* Tell the kernel to complete VGIC initialization now */ | |
753 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, | |
556969e9 | 754 | KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true, &error_abort); |
a7bf3034 PF |
755 | |
756 | kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, | |
757 | KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd); | |
758 | kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, | |
759 | KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd); | |
757caeed | 760 | |
d19a4d4e EA |
761 | if (kvm_has_gsi_routing()) { |
762 | /* set up irq routing */ | |
763 | kvm_init_irq_routing(kvm_state); | |
764 | for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) { | |
765 | kvm_irqchip_add_irq_route(kvm_state, i, 0, i); | |
766 | } | |
767 | ||
768 | kvm_gsi_routing_allowed = true; | |
769 | ||
770 | kvm_irqchip_commit_routes(kvm_state); | |
771 | } | |
367b9f52 VK |
772 | |
773 | if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, | |
774 | GICD_CTLR)) { | |
775 | error_setg(&s->migration_blocker, "This operating system kernel does " | |
776 | "not support vGICv3 migration"); | |
777 | migrate_add_blocker(s->migration_blocker, &local_err); | |
778 | if (local_err) { | |
779 | error_propagate(errp, local_err); | |
780 | error_free(s->migration_blocker); | |
781 | return; | |
782 | } | |
783 | } | |
d5aa0c22 EA |
784 | if (kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, |
785 | KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES)) { | |
786 | qemu_add_vm_change_state_handler(vm_change_state_handler, s); | |
787 | } | |
a7bf3034 PF |
788 | } |
789 | ||
790 | static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) | |
791 | { | |
792 | DeviceClass *dc = DEVICE_CLASS(klass); | |
793 | ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); | |
794 | KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass); | |
795 | ||
796 | agcc->pre_save = kvm_arm_gicv3_get; | |
797 | agcc->post_load = kvm_arm_gicv3_put; | |
bf853881 PMD |
798 | device_class_set_parent_realize(dc, kvm_arm_gicv3_realize, |
799 | &kgc->parent_realize); | |
800 | device_class_set_parent_reset(dc, kvm_arm_gicv3_reset, &kgc->parent_reset); | |
a7bf3034 PF |
801 | } |
802 | ||
803 | static const TypeInfo kvm_arm_gicv3_info = { | |
804 | .name = TYPE_KVM_ARM_GICV3, | |
805 | .parent = TYPE_ARM_GICV3_COMMON, | |
806 | .instance_size = sizeof(GICv3State), | |
807 | .class_init = kvm_arm_gicv3_class_init, | |
808 | .class_size = sizeof(KVMARMGICv3Class), | |
809 | }; | |
810 | ||
811 | static void kvm_arm_gicv3_register_types(void) | |
812 | { | |
813 | type_register_static(&kvm_arm_gicv3_info); | |
814 | } | |
815 | ||
816 | type_init(kvm_arm_gicv3_register_types) |