]>
Commit | Line | Data |
---|---|---|
a7bf3034 PF |
1 | /* |
2 | * ARM Generic Interrupt Controller using KVM in-kernel support | |
3 | * | |
4 | * Copyright (c) 2015 Samsung Electronics Co., Ltd. | |
5 | * Written by Pavel Fedin | |
6 | * Based on vGICv2 code by Peter Maydell | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation, either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
8ef94f0b | 22 | #include "qemu/osdep.h" |
da34e65c | 23 | #include "qapi/error.h" |
a7bf3034 PF |
24 | #include "hw/intc/arm_gicv3_common.h" |
25 | #include "hw/sysbus.h" | |
367b9f52 | 26 | #include "qemu/error-report.h" |
a7bf3034 PF |
27 | #include "sysemu/kvm.h" |
28 | #include "kvm_arm.h" | |
367b9f52 | 29 | #include "gicv3_internal.h" |
a7bf3034 | 30 | #include "vgic_common.h" |
795c40b8 | 31 | #include "migration/blocker.h" |
a7bf3034 PF |
32 | |
33 | #ifdef DEBUG_GICV3_KVM | |
34 | #define DPRINTF(fmt, ...) \ | |
35 | do { fprintf(stderr, "kvm_gicv3: " fmt, ## __VA_ARGS__); } while (0) | |
36 | #else | |
37 | #define DPRINTF(fmt, ...) \ | |
38 | do { } while (0) | |
39 | #endif | |
40 | ||
41 | #define TYPE_KVM_ARM_GICV3 "kvm-arm-gicv3" | |
42 | #define KVM_ARM_GICV3(obj) \ | |
43 | OBJECT_CHECK(GICv3State, (obj), TYPE_KVM_ARM_GICV3) | |
44 | #define KVM_ARM_GICV3_CLASS(klass) \ | |
45 | OBJECT_CLASS_CHECK(KVMARMGICv3Class, (klass), TYPE_KVM_ARM_GICV3) | |
46 | #define KVM_ARM_GICV3_GET_CLASS(obj) \ | |
47 | OBJECT_GET_CLASS(KVMARMGICv3Class, (obj), TYPE_KVM_ARM_GICV3) | |
48 | ||
367b9f52 VK |
49 | #define KVM_DEV_ARM_VGIC_SYSREG(op0, op1, crn, crm, op2) \ |
50 | (ARM64_SYS_REG_SHIFT_MASK(op0, OP0) | \ | |
51 | ARM64_SYS_REG_SHIFT_MASK(op1, OP1) | \ | |
52 | ARM64_SYS_REG_SHIFT_MASK(crn, CRN) | \ | |
53 | ARM64_SYS_REG_SHIFT_MASK(crm, CRM) | \ | |
54 | ARM64_SYS_REG_SHIFT_MASK(op2, OP2)) | |
55 | ||
56 | #define ICC_PMR_EL1 \ | |
57 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 4, 6, 0) | |
58 | #define ICC_BPR0_EL1 \ | |
59 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 3) | |
60 | #define ICC_AP0R_EL1(n) \ | |
61 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 8, 4 | n) | |
62 | #define ICC_AP1R_EL1(n) \ | |
63 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 9, n) | |
64 | #define ICC_BPR1_EL1 \ | |
65 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 3) | |
66 | #define ICC_CTLR_EL1 \ | |
67 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 4) | |
68 | #define ICC_SRE_EL1 \ | |
69 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 5) | |
70 | #define ICC_IGRPEN0_EL1 \ | |
71 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 6) | |
72 | #define ICC_IGRPEN1_EL1 \ | |
73 | KVM_DEV_ARM_VGIC_SYSREG(3, 0, 12, 12, 7) | |
74 | ||
a7bf3034 PF |
75 | typedef struct KVMARMGICv3Class { |
76 | ARMGICv3CommonClass parent_class; | |
77 | DeviceRealize parent_realize; | |
78 | void (*parent_reset)(DeviceState *dev); | |
79 | } KVMARMGICv3Class; | |
80 | ||
81 | static void kvm_arm_gicv3_set_irq(void *opaque, int irq, int level) | |
82 | { | |
83 | GICv3State *s = (GICv3State *)opaque; | |
84 | ||
85 | kvm_arm_gic_set_irq(s->num_irq, irq, level); | |
86 | } | |
87 | ||
367b9f52 VK |
88 | #define KVM_VGIC_ATTR(reg, typer) \ |
89 | ((typer & KVM_DEV_ARM_VGIC_V3_MPIDR_MASK) | (reg)) | |
90 | ||
91 | static inline void kvm_gicd_access(GICv3State *s, int offset, | |
92 | uint32_t *val, bool write) | |
93 | { | |
94 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, | |
95 | KVM_VGIC_ATTR(offset, 0), | |
96 | val, write); | |
97 | } | |
98 | ||
99 | static inline void kvm_gicr_access(GICv3State *s, int offset, int cpu, | |
100 | uint32_t *val, bool write) | |
101 | { | |
102 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS, | |
103 | KVM_VGIC_ATTR(offset, s->cpu[cpu].gicr_typer), | |
104 | val, write); | |
105 | } | |
106 | ||
107 | static inline void kvm_gicc_access(GICv3State *s, uint64_t reg, int cpu, | |
108 | uint64_t *val, bool write) | |
109 | { | |
110 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, | |
111 | KVM_VGIC_ATTR(reg, s->cpu[cpu].gicr_typer), | |
112 | val, write); | |
113 | } | |
114 | ||
115 | static inline void kvm_gic_line_level_access(GICv3State *s, int irq, int cpu, | |
116 | uint32_t *val, bool write) | |
117 | { | |
118 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, | |
119 | KVM_VGIC_ATTR(irq, s->cpu[cpu].gicr_typer) | | |
120 | (VGIC_LEVEL_INFO_LINE_LEVEL << | |
121 | KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT), | |
122 | val, write); | |
123 | } | |
124 | ||
125 | /* Loop through each distributor IRQ related register; since bits | |
126 | * corresponding to SPIs and PPIs are RAZ/WI when affinity routing | |
127 | * is enabled, we skip those. | |
128 | */ | |
129 | #define for_each_dist_irq_reg(_irq, _max, _field_width) \ | |
130 | for (_irq = GIC_INTERNAL; _irq < _max; _irq += (32 / _field_width)) | |
131 | ||
132 | static void kvm_dist_get_priority(GICv3State *s, uint32_t offset, uint8_t *bmp) | |
133 | { | |
134 | uint32_t reg, *field; | |
135 | int irq; | |
136 | ||
137 | field = (uint32_t *)bmp; | |
138 | for_each_dist_irq_reg(irq, s->num_irq, 8) { | |
139 | kvm_gicd_access(s, offset, ®, false); | |
140 | *field = reg; | |
141 | offset += 4; | |
142 | field++; | |
143 | } | |
144 | } | |
145 | ||
146 | static void kvm_dist_put_priority(GICv3State *s, uint32_t offset, uint8_t *bmp) | |
147 | { | |
148 | uint32_t reg, *field; | |
149 | int irq; | |
150 | ||
151 | field = (uint32_t *)bmp; | |
152 | for_each_dist_irq_reg(irq, s->num_irq, 8) { | |
153 | reg = *field; | |
154 | kvm_gicd_access(s, offset, ®, true); | |
155 | offset += 4; | |
156 | field++; | |
157 | } | |
158 | } | |
159 | ||
160 | static void kvm_dist_get_edge_trigger(GICv3State *s, uint32_t offset, | |
161 | uint32_t *bmp) | |
162 | { | |
163 | uint32_t reg; | |
164 | int irq; | |
165 | ||
166 | for_each_dist_irq_reg(irq, s->num_irq, 2) { | |
167 | kvm_gicd_access(s, offset, ®, false); | |
168 | reg = half_unshuffle32(reg >> 1); | |
169 | if (irq % 32 != 0) { | |
170 | reg = (reg << 16); | |
171 | } | |
172 | *gic_bmp_ptr32(bmp, irq) |= reg; | |
173 | offset += 4; | |
174 | } | |
175 | } | |
176 | ||
177 | static void kvm_dist_put_edge_trigger(GICv3State *s, uint32_t offset, | |
178 | uint32_t *bmp) | |
179 | { | |
180 | uint32_t reg; | |
181 | int irq; | |
182 | ||
183 | for_each_dist_irq_reg(irq, s->num_irq, 2) { | |
184 | reg = *gic_bmp_ptr32(bmp, irq); | |
185 | if (irq % 32 != 0) { | |
186 | reg = (reg & 0xffff0000) >> 16; | |
187 | } else { | |
188 | reg = reg & 0xffff; | |
189 | } | |
190 | reg = half_shuffle32(reg) << 1; | |
191 | kvm_gicd_access(s, offset, ®, true); | |
192 | offset += 4; | |
193 | } | |
194 | } | |
195 | ||
196 | static void kvm_gic_get_line_level_bmp(GICv3State *s, uint32_t *bmp) | |
197 | { | |
198 | uint32_t reg; | |
199 | int irq; | |
200 | ||
201 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
202 | kvm_gic_line_level_access(s, irq, 0, ®, false); | |
203 | *gic_bmp_ptr32(bmp, irq) = reg; | |
204 | } | |
205 | } | |
206 | ||
207 | static void kvm_gic_put_line_level_bmp(GICv3State *s, uint32_t *bmp) | |
208 | { | |
209 | uint32_t reg; | |
210 | int irq; | |
211 | ||
212 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
213 | reg = *gic_bmp_ptr32(bmp, irq); | |
214 | kvm_gic_line_level_access(s, irq, 0, ®, true); | |
215 | } | |
216 | } | |
217 | ||
218 | /* Read a bitmap register group from the kernel VGIC. */ | |
219 | static void kvm_dist_getbmp(GICv3State *s, uint32_t offset, uint32_t *bmp) | |
220 | { | |
221 | uint32_t reg; | |
222 | int irq; | |
223 | ||
224 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
225 | kvm_gicd_access(s, offset, ®, false); | |
226 | *gic_bmp_ptr32(bmp, irq) = reg; | |
227 | offset += 4; | |
228 | } | |
229 | } | |
230 | ||
231 | static void kvm_dist_putbmp(GICv3State *s, uint32_t offset, | |
232 | uint32_t clroffset, uint32_t *bmp) | |
233 | { | |
234 | uint32_t reg; | |
235 | int irq; | |
236 | ||
237 | for_each_dist_irq_reg(irq, s->num_irq, 1) { | |
238 | /* If this bitmap is a set/clear register pair, first write to the | |
239 | * clear-reg to clear all bits before using the set-reg to write | |
240 | * the 1 bits. | |
241 | */ | |
242 | if (clroffset != 0) { | |
243 | reg = 0; | |
244 | kvm_gicd_access(s, clroffset, ®, true); | |
245 | } | |
246 | reg = *gic_bmp_ptr32(bmp, irq); | |
247 | kvm_gicd_access(s, offset, ®, true); | |
248 | offset += 4; | |
249 | } | |
250 | } | |
251 | ||
252 | static void kvm_arm_gicv3_check(GICv3State *s) | |
253 | { | |
254 | uint32_t reg; | |
255 | uint32_t num_irq; | |
256 | ||
257 | /* Sanity checking s->num_irq */ | |
258 | kvm_gicd_access(s, GICD_TYPER, ®, false); | |
259 | num_irq = ((reg & 0x1f) + 1) * 32; | |
260 | ||
261 | if (num_irq < s->num_irq) { | |
262 | error_report("Model requests %u IRQs, but kernel supports max %u", | |
263 | s->num_irq, num_irq); | |
264 | abort(); | |
265 | } | |
266 | } | |
267 | ||
a7bf3034 PF |
268 | static void kvm_arm_gicv3_put(GICv3State *s) |
269 | { | |
367b9f52 VK |
270 | uint32_t regl, regh, reg; |
271 | uint64_t reg64, redist_typer; | |
272 | int ncpu, i; | |
273 | ||
274 | kvm_arm_gicv3_check(s); | |
275 | ||
276 | kvm_gicr_access(s, GICR_TYPER, 0, ®l, false); | |
277 | kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false); | |
278 | redist_typer = ((uint64_t)regh << 32) | regl; | |
279 | ||
280 | reg = s->gicd_ctlr; | |
281 | kvm_gicd_access(s, GICD_CTLR, ®, true); | |
282 | ||
283 | if (redist_typer & GICR_TYPER_PLPIS) { | |
284 | /* Set base addresses before LPIs are enabled by GICR_CTLR write */ | |
285 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
286 | GICv3CPUState *c = &s->cpu[ncpu]; | |
287 | ||
288 | reg64 = c->gicr_propbaser; | |
289 | regl = (uint32_t)reg64; | |
290 | kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, true); | |
291 | regh = (uint32_t)(reg64 >> 32); | |
292 | kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, true); | |
293 | ||
294 | reg64 = c->gicr_pendbaser; | |
295 | if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) { | |
296 | /* Setting PTZ is advised if LPIs are disabled, to reduce | |
297 | * GIC initialization time. | |
298 | */ | |
299 | reg64 |= GICR_PENDBASER_PTZ; | |
300 | } | |
301 | regl = (uint32_t)reg64; | |
302 | kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, true); | |
303 | regh = (uint32_t)(reg64 >> 32); | |
304 | kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, true); | |
305 | } | |
306 | } | |
307 | ||
308 | /* Redistributor state (one per CPU) */ | |
309 | ||
310 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
311 | GICv3CPUState *c = &s->cpu[ncpu]; | |
312 | ||
313 | reg = c->gicr_ctlr; | |
314 | kvm_gicr_access(s, GICR_CTLR, ncpu, ®, true); | |
315 | ||
316 | reg = c->gicr_statusr[GICV3_NS]; | |
317 | kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, true); | |
318 | ||
319 | reg = c->gicr_waker; | |
320 | kvm_gicr_access(s, GICR_WAKER, ncpu, ®, true); | |
321 | ||
322 | reg = c->gicr_igroupr0; | |
323 | kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, true); | |
324 | ||
325 | reg = ~0; | |
326 | kvm_gicr_access(s, GICR_ICENABLER0, ncpu, ®, true); | |
327 | reg = c->gicr_ienabler0; | |
328 | kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, true); | |
329 | ||
330 | /* Restore config before pending so we treat level/edge correctly */ | |
331 | reg = half_shuffle32(c->edge_trigger >> 16) << 1; | |
332 | kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, true); | |
333 | ||
334 | reg = c->level; | |
335 | kvm_gic_line_level_access(s, 0, ncpu, ®, true); | |
336 | ||
337 | reg = ~0; | |
338 | kvm_gicr_access(s, GICR_ICPENDR0, ncpu, ®, true); | |
339 | reg = c->gicr_ipendr0; | |
340 | kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, true); | |
341 | ||
342 | reg = ~0; | |
343 | kvm_gicr_access(s, GICR_ICACTIVER0, ncpu, ®, true); | |
344 | reg = c->gicr_iactiver0; | |
345 | kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, true); | |
346 | ||
347 | for (i = 0; i < GIC_INTERNAL; i += 4) { | |
348 | reg = c->gicr_ipriorityr[i] | | |
349 | (c->gicr_ipriorityr[i + 1] << 8) | | |
350 | (c->gicr_ipriorityr[i + 2] << 16) | | |
351 | (c->gicr_ipriorityr[i + 3] << 24); | |
352 | kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, true); | |
353 | } | |
354 | } | |
355 | ||
356 | /* Distributor state (shared between all CPUs */ | |
357 | reg = s->gicd_statusr[GICV3_NS]; | |
358 | kvm_gicd_access(s, GICD_STATUSR, ®, true); | |
359 | ||
360 | /* s->enable bitmap -> GICD_ISENABLERn */ | |
361 | kvm_dist_putbmp(s, GICD_ISENABLER, GICD_ICENABLER, s->enabled); | |
362 | ||
363 | /* s->group bitmap -> GICD_IGROUPRn */ | |
364 | kvm_dist_putbmp(s, GICD_IGROUPR, 0, s->group); | |
365 | ||
366 | /* Restore targets before pending to ensure the pending state is set on | |
367 | * the appropriate CPU interfaces in the kernel | |
368 | */ | |
369 | ||
370 | /* s->gicd_irouter[irq] -> GICD_IROUTERn | |
371 | * We can't use kvm_dist_put() here because the registers are 64-bit | |
372 | */ | |
373 | for (i = GIC_INTERNAL; i < s->num_irq; i++) { | |
374 | uint32_t offset; | |
375 | ||
376 | offset = GICD_IROUTER + (sizeof(uint32_t) * i); | |
377 | reg = (uint32_t)s->gicd_irouter[i]; | |
378 | kvm_gicd_access(s, offset, ®, true); | |
379 | ||
380 | offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4; | |
381 | reg = (uint32_t)(s->gicd_irouter[i] >> 32); | |
382 | kvm_gicd_access(s, offset, ®, true); | |
383 | } | |
384 | ||
385 | /* s->trigger bitmap -> GICD_ICFGRn | |
386 | * (restore configuration registers before pending IRQs so we treat | |
387 | * level/edge correctly) | |
388 | */ | |
389 | kvm_dist_put_edge_trigger(s, GICD_ICFGR, s->edge_trigger); | |
390 | ||
391 | /* s->level bitmap -> line_level */ | |
392 | kvm_gic_put_line_level_bmp(s, s->level); | |
393 | ||
394 | /* s->pending bitmap -> GICD_ISPENDRn */ | |
395 | kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending); | |
396 | ||
397 | /* s->active bitmap -> GICD_ISACTIVERn */ | |
398 | kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active); | |
399 | ||
400 | /* s->gicd_ipriority[] -> GICD_IPRIORITYRn */ | |
401 | kvm_dist_put_priority(s, GICD_IPRIORITYR, s->gicd_ipriority); | |
402 | ||
403 | /* CPU Interface state (one per CPU) */ | |
404 | ||
405 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
406 | GICv3CPUState *c = &s->cpu[ncpu]; | |
407 | int num_pri_bits; | |
408 | ||
409 | kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true); | |
410 | kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, | |
411 | &c->icc_ctlr_el1[GICV3_NS], true); | |
412 | kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, | |
413 | &c->icc_igrpen[GICV3_G0], true); | |
414 | kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, | |
415 | &c->icc_igrpen[GICV3_G1NS], true); | |
416 | kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, true); | |
417 | kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], true); | |
418 | kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], true); | |
419 | ||
420 | num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] & | |
421 | ICC_CTLR_EL1_PRIBITS_MASK) >> | |
422 | ICC_CTLR_EL1_PRIBITS_SHIFT) + 1; | |
423 | ||
424 | switch (num_pri_bits) { | |
425 | case 7: | |
426 | reg64 = c->icc_apr[GICV3_G0][3]; | |
427 | kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, true); | |
428 | reg64 = c->icc_apr[GICV3_G0][2]; | |
429 | kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, true); | |
430 | case 6: | |
431 | reg64 = c->icc_apr[GICV3_G0][1]; | |
432 | kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, true); | |
433 | default: | |
434 | reg64 = c->icc_apr[GICV3_G0][0]; | |
435 | kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, true); | |
436 | } | |
437 | ||
438 | switch (num_pri_bits) { | |
439 | case 7: | |
440 | reg64 = c->icc_apr[GICV3_G1NS][3]; | |
441 | kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, true); | |
442 | reg64 = c->icc_apr[GICV3_G1NS][2]; | |
443 | kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, true); | |
444 | case 6: | |
445 | reg64 = c->icc_apr[GICV3_G1NS][1]; | |
446 | kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, true); | |
447 | default: | |
448 | reg64 = c->icc_apr[GICV3_G1NS][0]; | |
449 | kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, true); | |
450 | } | |
451 | } | |
a7bf3034 PF |
452 | } |
453 | ||
454 | static void kvm_arm_gicv3_get(GICv3State *s) | |
455 | { | |
367b9f52 VK |
456 | uint32_t regl, regh, reg; |
457 | uint64_t reg64, redist_typer; | |
458 | int ncpu, i; | |
459 | ||
460 | kvm_arm_gicv3_check(s); | |
461 | ||
462 | kvm_gicr_access(s, GICR_TYPER, 0, ®l, false); | |
463 | kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false); | |
464 | redist_typer = ((uint64_t)regh << 32) | regl; | |
465 | ||
466 | kvm_gicd_access(s, GICD_CTLR, ®, false); | |
467 | s->gicd_ctlr = reg; | |
468 | ||
469 | /* Redistributor state (one per CPU) */ | |
470 | ||
471 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
472 | GICv3CPUState *c = &s->cpu[ncpu]; | |
473 | ||
474 | kvm_gicr_access(s, GICR_CTLR, ncpu, ®, false); | |
475 | c->gicr_ctlr = reg; | |
476 | ||
477 | kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, false); | |
478 | c->gicr_statusr[GICV3_NS] = reg; | |
479 | ||
480 | kvm_gicr_access(s, GICR_WAKER, ncpu, ®, false); | |
481 | c->gicr_waker = reg; | |
482 | ||
483 | kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, false); | |
484 | c->gicr_igroupr0 = reg; | |
485 | kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, false); | |
486 | c->gicr_ienabler0 = reg; | |
487 | kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, false); | |
488 | c->edge_trigger = half_unshuffle32(reg >> 1) << 16; | |
489 | kvm_gic_line_level_access(s, 0, ncpu, ®, false); | |
490 | c->level = reg; | |
491 | kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, false); | |
492 | c->gicr_ipendr0 = reg; | |
493 | kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, false); | |
494 | c->gicr_iactiver0 = reg; | |
495 | ||
496 | for (i = 0; i < GIC_INTERNAL; i += 4) { | |
497 | kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, false); | |
498 | c->gicr_ipriorityr[i] = extract32(reg, 0, 8); | |
499 | c->gicr_ipriorityr[i + 1] = extract32(reg, 8, 8); | |
500 | c->gicr_ipriorityr[i + 2] = extract32(reg, 16, 8); | |
501 | c->gicr_ipriorityr[i + 3] = extract32(reg, 24, 8); | |
502 | } | |
503 | } | |
504 | ||
505 | if (redist_typer & GICR_TYPER_PLPIS) { | |
506 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
507 | GICv3CPUState *c = &s->cpu[ncpu]; | |
508 | ||
509 | kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, false); | |
510 | kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, false); | |
511 | c->gicr_propbaser = ((uint64_t)regh << 32) | regl; | |
512 | ||
513 | kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, false); | |
514 | kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, false); | |
515 | c->gicr_pendbaser = ((uint64_t)regh << 32) | regl; | |
516 | } | |
517 | } | |
518 | ||
519 | /* Distributor state (shared between all CPUs */ | |
520 | ||
521 | kvm_gicd_access(s, GICD_STATUSR, ®, false); | |
522 | s->gicd_statusr[GICV3_NS] = reg; | |
523 | ||
524 | /* GICD_IGROUPRn -> s->group bitmap */ | |
525 | kvm_dist_getbmp(s, GICD_IGROUPR, s->group); | |
526 | ||
527 | /* GICD_ISENABLERn -> s->enabled bitmap */ | |
528 | kvm_dist_getbmp(s, GICD_ISENABLER, s->enabled); | |
529 | ||
530 | /* Line level of irq */ | |
531 | kvm_gic_get_line_level_bmp(s, s->level); | |
532 | /* GICD_ISPENDRn -> s->pending bitmap */ | |
533 | kvm_dist_getbmp(s, GICD_ISPENDR, s->pending); | |
534 | ||
535 | /* GICD_ISACTIVERn -> s->active bitmap */ | |
536 | kvm_dist_getbmp(s, GICD_ISACTIVER, s->active); | |
537 | ||
538 | /* GICD_ICFGRn -> s->trigger bitmap */ | |
539 | kvm_dist_get_edge_trigger(s, GICD_ICFGR, s->edge_trigger); | |
540 | ||
541 | /* GICD_IPRIORITYRn -> s->gicd_ipriority[] */ | |
542 | kvm_dist_get_priority(s, GICD_IPRIORITYR, s->gicd_ipriority); | |
543 | ||
544 | /* GICD_IROUTERn -> s->gicd_irouter[irq] */ | |
545 | for (i = GIC_INTERNAL; i < s->num_irq; i++) { | |
546 | uint32_t offset; | |
547 | ||
548 | offset = GICD_IROUTER + (sizeof(uint32_t) * i); | |
549 | kvm_gicd_access(s, offset, ®l, false); | |
550 | offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4; | |
551 | kvm_gicd_access(s, offset, ®h, false); | |
552 | s->gicd_irouter[i] = ((uint64_t)regh << 32) | regl; | |
553 | } | |
554 | ||
555 | /***************************************************************** | |
556 | * CPU Interface(s) State | |
557 | */ | |
558 | ||
559 | for (ncpu = 0; ncpu < s->num_cpu; ncpu++) { | |
560 | GICv3CPUState *c = &s->cpu[ncpu]; | |
561 | int num_pri_bits; | |
562 | ||
563 | kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, false); | |
564 | kvm_gicc_access(s, ICC_CTLR_EL1, ncpu, | |
565 | &c->icc_ctlr_el1[GICV3_NS], false); | |
566 | kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu, | |
567 | &c->icc_igrpen[GICV3_G0], false); | |
568 | kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu, | |
569 | &c->icc_igrpen[GICV3_G1NS], false); | |
570 | kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, false); | |
571 | kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], false); | |
572 | kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], false); | |
573 | num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] & | |
574 | ICC_CTLR_EL1_PRIBITS_MASK) >> | |
575 | ICC_CTLR_EL1_PRIBITS_SHIFT) + 1; | |
576 | ||
577 | switch (num_pri_bits) { | |
578 | case 7: | |
579 | kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, false); | |
580 | c->icc_apr[GICV3_G0][3] = reg64; | |
581 | kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, false); | |
582 | c->icc_apr[GICV3_G0][2] = reg64; | |
583 | case 6: | |
584 | kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, false); | |
585 | c->icc_apr[GICV3_G0][1] = reg64; | |
586 | default: | |
587 | kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, false); | |
588 | c->icc_apr[GICV3_G0][0] = reg64; | |
589 | } | |
590 | ||
591 | switch (num_pri_bits) { | |
592 | case 7: | |
593 | kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, false); | |
594 | c->icc_apr[GICV3_G1NS][3] = reg64; | |
595 | kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, false); | |
596 | c->icc_apr[GICV3_G1NS][2] = reg64; | |
597 | case 6: | |
598 | kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, false); | |
599 | c->icc_apr[GICV3_G1NS][1] = reg64; | |
600 | default: | |
601 | kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, false); | |
602 | c->icc_apr[GICV3_G1NS][0] = reg64; | |
603 | } | |
604 | } | |
a7bf3034 PF |
605 | } |
606 | ||
07a5628c VK |
607 | static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri) |
608 | { | |
609 | ARMCPU *cpu; | |
610 | GICv3State *s; | |
611 | GICv3CPUState *c; | |
612 | ||
613 | c = (GICv3CPUState *)env->gicv3state; | |
614 | s = c->gic; | |
615 | cpu = ARM_CPU(c->cpu); | |
616 | ||
07a5628c VK |
617 | c->icc_pmr_el1 = 0; |
618 | c->icc_bpr[GICV3_G0] = GIC_MIN_BPR; | |
619 | c->icc_bpr[GICV3_G1] = GIC_MIN_BPR; | |
620 | c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR; | |
621 | ||
622 | c->icc_sre_el1 = 0x7; | |
623 | memset(c->icc_apr, 0, sizeof(c->icc_apr)); | |
624 | memset(c->icc_igrpen, 0, sizeof(c->icc_igrpen)); | |
e7d54416 EA |
625 | |
626 | if (s->migration_blocker) { | |
627 | return; | |
628 | } | |
629 | ||
630 | /* Initialize to actual HW supported configuration */ | |
631 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS, | |
632 | KVM_VGIC_ATTR(ICC_CTLR_EL1, cpu->mp_affinity), | |
633 | &c->icc_ctlr_el1[GICV3_NS], false); | |
634 | ||
635 | c->icc_ctlr_el1[GICV3_S] = c->icc_ctlr_el1[GICV3_NS]; | |
07a5628c VK |
636 | } |
637 | ||
a7bf3034 PF |
638 | static void kvm_arm_gicv3_reset(DeviceState *dev) |
639 | { | |
640 | GICv3State *s = ARM_GICV3_COMMON(dev); | |
641 | KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); | |
642 | ||
643 | DPRINTF("Reset\n"); | |
644 | ||
645 | kgc->parent_reset(dev); | |
367b9f52 VK |
646 | |
647 | if (s->migration_blocker) { | |
648 | DPRINTF("Cannot put kernel gic state, no kernel interface\n"); | |
649 | return; | |
650 | } | |
651 | ||
a7bf3034 PF |
652 | kvm_arm_gicv3_put(s); |
653 | } | |
654 | ||
07a5628c VK |
655 | /* |
656 | * CPU interface registers of GIC needs to be reset on CPU reset. | |
657 | * For the calling arm_gicv3_icc_reset() on CPU reset, we register | |
658 | * below ARMCPRegInfo. As we reset the whole cpu interface under single | |
659 | * register reset, we define only one register of CPU interface instead | |
660 | * of defining all the registers. | |
661 | */ | |
662 | static const ARMCPRegInfo gicv3_cpuif_reginfo[] = { | |
663 | { .name = "ICC_CTLR_EL1", .state = ARM_CP_STATE_BOTH, | |
664 | .opc0 = 3, .opc1 = 0, .crn = 12, .crm = 12, .opc2 = 4, | |
665 | /* | |
666 | * If ARM_CP_NOP is used, resetfn is not called, | |
667 | * So ARM_CP_NO_RAW is appropriate type. | |
668 | */ | |
669 | .type = ARM_CP_NO_RAW, | |
670 | .access = PL1_RW, | |
671 | .readfn = arm_cp_read_zero, | |
672 | .writefn = arm_cp_write_ignore, | |
673 | /* | |
674 | * We hang the whole cpu interface reset routine off here | |
675 | * rather than parcelling it out into one little function | |
676 | * per register | |
677 | */ | |
678 | .resetfn = arm_gicv3_icc_reset, | |
679 | }, | |
680 | REGINFO_SENTINEL | |
681 | }; | |
682 | ||
a7bf3034 PF |
683 | static void kvm_arm_gicv3_realize(DeviceState *dev, Error **errp) |
684 | { | |
685 | GICv3State *s = KVM_ARM_GICV3(dev); | |
686 | KVMARMGICv3Class *kgc = KVM_ARM_GICV3_GET_CLASS(s); | |
687 | Error *local_err = NULL; | |
d19a4d4e | 688 | int i; |
a7bf3034 PF |
689 | |
690 | DPRINTF("kvm_arm_gicv3_realize\n"); | |
691 | ||
692 | kgc->parent_realize(dev, &local_err); | |
693 | if (local_err) { | |
694 | error_propagate(errp, local_err); | |
695 | return; | |
696 | } | |
697 | ||
698 | if (s->security_extn) { | |
699 | error_setg(errp, "the in-kernel VGICv3 does not implement the " | |
700 | "security extensions"); | |
701 | return; | |
702 | } | |
703 | ||
704 | gicv3_init_irqs_and_mmio(s, kvm_arm_gicv3_set_irq, NULL); | |
705 | ||
07a5628c VK |
706 | for (i = 0; i < s->num_cpu; i++) { |
707 | ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i)); | |
708 | ||
709 | define_arm_cp_regs(cpu, gicv3_cpuif_reginfo); | |
710 | } | |
711 | ||
a7bf3034 PF |
712 | /* Try to create the device via the device control API */ |
713 | s->dev_fd = kvm_create_device(kvm_state, KVM_DEV_TYPE_ARM_VGIC_V3, false); | |
714 | if (s->dev_fd < 0) { | |
715 | error_setg_errno(errp, -s->dev_fd, "error creating in-kernel VGIC"); | |
716 | return; | |
717 | } | |
718 | ||
719 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, | |
720 | 0, &s->num_irq, true); | |
721 | ||
722 | /* Tell the kernel to complete VGIC initialization now */ | |
723 | kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, | |
724 | KVM_DEV_ARM_VGIC_CTRL_INIT, NULL, true); | |
725 | ||
726 | kvm_arm_register_device(&s->iomem_dist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, | |
727 | KVM_VGIC_V3_ADDR_TYPE_DIST, s->dev_fd); | |
728 | kvm_arm_register_device(&s->iomem_redist, -1, KVM_DEV_ARM_VGIC_GRP_ADDR, | |
729 | KVM_VGIC_V3_ADDR_TYPE_REDIST, s->dev_fd); | |
757caeed | 730 | |
d19a4d4e EA |
731 | if (kvm_has_gsi_routing()) { |
732 | /* set up irq routing */ | |
733 | kvm_init_irq_routing(kvm_state); | |
734 | for (i = 0; i < s->num_irq - GIC_INTERNAL; ++i) { | |
735 | kvm_irqchip_add_irq_route(kvm_state, i, 0, i); | |
736 | } | |
737 | ||
738 | kvm_gsi_routing_allowed = true; | |
739 | ||
740 | kvm_irqchip_commit_routes(kvm_state); | |
741 | } | |
367b9f52 VK |
742 | |
743 | if (!kvm_device_check_attr(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_DIST_REGS, | |
744 | GICD_CTLR)) { | |
745 | error_setg(&s->migration_blocker, "This operating system kernel does " | |
746 | "not support vGICv3 migration"); | |
747 | migrate_add_blocker(s->migration_blocker, &local_err); | |
748 | if (local_err) { | |
749 | error_propagate(errp, local_err); | |
750 | error_free(s->migration_blocker); | |
751 | return; | |
752 | } | |
753 | } | |
a7bf3034 PF |
754 | } |
755 | ||
756 | static void kvm_arm_gicv3_class_init(ObjectClass *klass, void *data) | |
757 | { | |
758 | DeviceClass *dc = DEVICE_CLASS(klass); | |
759 | ARMGICv3CommonClass *agcc = ARM_GICV3_COMMON_CLASS(klass); | |
760 | KVMARMGICv3Class *kgc = KVM_ARM_GICV3_CLASS(klass); | |
761 | ||
762 | agcc->pre_save = kvm_arm_gicv3_get; | |
763 | agcc->post_load = kvm_arm_gicv3_put; | |
764 | kgc->parent_realize = dc->realize; | |
765 | kgc->parent_reset = dc->reset; | |
766 | dc->realize = kvm_arm_gicv3_realize; | |
767 | dc->reset = kvm_arm_gicv3_reset; | |
768 | } | |
769 | ||
770 | static const TypeInfo kvm_arm_gicv3_info = { | |
771 | .name = TYPE_KVM_ARM_GICV3, | |
772 | .parent = TYPE_ARM_GICV3_COMMON, | |
773 | .instance_size = sizeof(GICv3State), | |
774 | .class_init = kvm_arm_gicv3_class_init, | |
775 | .class_size = sizeof(KVMARMGICv3Class), | |
776 | }; | |
777 | ||
778 | static void kvm_arm_gicv3_register_types(void) | |
779 | { | |
780 | type_register_static(&kvm_arm_gicv3_info); | |
781 | } | |
782 | ||
783 | type_init(kvm_arm_gicv3_register_types) |