]> Git Repo - qemu.git/blame_incremental - hw/intc/arm_gicv3_common.c
hw/intc/arm_gicv3_its: Handle virtual interrupts in process_its_cmd()
[qemu.git] / hw / intc / arm_gicv3_common.c
... / ...
CommitLineData
1/*
2 * ARM GICv3 support - common bits of emulated and KVM kernel model
3 *
4 * Copyright (c) 2012 Linaro Limited
5 * Copyright (c) 2015 Huawei.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Written by Peter Maydell
8 * Reworked for GICv3 by Shlomo Pongratz and Pavel Fedin
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation, either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include "qemu/osdep.h"
25#include "qapi/error.h"
26#include "qemu/module.h"
27#include "hw/core/cpu.h"
28#include "hw/intc/arm_gicv3_common.h"
29#include "hw/qdev-properties.h"
30#include "migration/vmstate.h"
31#include "gicv3_internal.h"
32#include "hw/arm/linux-boot-if.h"
33#include "sysemu/kvm.h"
34
35
36static void gicv3_gicd_no_migration_shift_bug_post_load(GICv3State *cs)
37{
38 if (cs->gicd_no_migration_shift_bug) {
39 return;
40 }
41
42 /* Older versions of QEMU had a bug in the handling of state save/restore
43 * to the KVM GICv3: they got the offset in the bitmap arrays wrong,
44 * so that instead of the data for external interrupts 32 and up
45 * starting at bit position 32 in the bitmap, it started at bit
46 * position 64. If we're receiving data from a QEMU with that bug,
47 * we must move the data down into the right place.
48 */
49 memmove(cs->group, (uint8_t *)cs->group + GIC_INTERNAL / 8,
50 sizeof(cs->group) - GIC_INTERNAL / 8);
51 memmove(cs->grpmod, (uint8_t *)cs->grpmod + GIC_INTERNAL / 8,
52 sizeof(cs->grpmod) - GIC_INTERNAL / 8);
53 memmove(cs->enabled, (uint8_t *)cs->enabled + GIC_INTERNAL / 8,
54 sizeof(cs->enabled) - GIC_INTERNAL / 8);
55 memmove(cs->pending, (uint8_t *)cs->pending + GIC_INTERNAL / 8,
56 sizeof(cs->pending) - GIC_INTERNAL / 8);
57 memmove(cs->active, (uint8_t *)cs->active + GIC_INTERNAL / 8,
58 sizeof(cs->active) - GIC_INTERNAL / 8);
59 memmove(cs->edge_trigger, (uint8_t *)cs->edge_trigger + GIC_INTERNAL / 8,
60 sizeof(cs->edge_trigger) - GIC_INTERNAL / 8);
61
62 /*
63 * While this new version QEMU doesn't have this kind of bug as we fix it,
64 * so it needs to set the flag to true to indicate that and it's necessary
65 * for next migration to work from this new version QEMU.
66 */
67 cs->gicd_no_migration_shift_bug = true;
68}
69
70static int gicv3_pre_save(void *opaque)
71{
72 GICv3State *s = (GICv3State *)opaque;
73 ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
74
75 if (c->pre_save) {
76 c->pre_save(s);
77 }
78
79 return 0;
80}
81
82static int gicv3_post_load(void *opaque, int version_id)
83{
84 GICv3State *s = (GICv3State *)opaque;
85 ARMGICv3CommonClass *c = ARM_GICV3_COMMON_GET_CLASS(s);
86
87 gicv3_gicd_no_migration_shift_bug_post_load(s);
88
89 if (c->post_load) {
90 c->post_load(s);
91 }
92 return 0;
93}
94
95static bool virt_state_needed(void *opaque)
96{
97 GICv3CPUState *cs = opaque;
98
99 return cs->num_list_regs != 0;
100}
101
102static const VMStateDescription vmstate_gicv3_cpu_virt = {
103 .name = "arm_gicv3_cpu/virt",
104 .version_id = 1,
105 .minimum_version_id = 1,
106 .needed = virt_state_needed,
107 .fields = (VMStateField[]) {
108 VMSTATE_UINT64_2DARRAY(ich_apr, GICv3CPUState, 3, 4),
109 VMSTATE_UINT64(ich_hcr_el2, GICv3CPUState),
110 VMSTATE_UINT64_ARRAY(ich_lr_el2, GICv3CPUState, GICV3_LR_MAX),
111 VMSTATE_UINT64(ich_vmcr_el2, GICv3CPUState),
112 VMSTATE_END_OF_LIST()
113 }
114};
115
116static int vmstate_gicv3_cpu_pre_load(void *opaque)
117{
118 GICv3CPUState *cs = opaque;
119
120 /*
121 * If the sre_el1 subsection is not transferred this
122 * means SRE_EL1 is 0x7 (which might not be the same as
123 * our reset value).
124 */
125 cs->icc_sre_el1 = 0x7;
126 return 0;
127}
128
129static bool icc_sre_el1_reg_needed(void *opaque)
130{
131 GICv3CPUState *cs = opaque;
132
133 return cs->icc_sre_el1 != 7;
134}
135
136const VMStateDescription vmstate_gicv3_cpu_sre_el1 = {
137 .name = "arm_gicv3_cpu/sre_el1",
138 .version_id = 1,
139 .minimum_version_id = 1,
140 .needed = icc_sre_el1_reg_needed,
141 .fields = (VMStateField[]) {
142 VMSTATE_UINT64(icc_sre_el1, GICv3CPUState),
143 VMSTATE_END_OF_LIST()
144 }
145};
146
147static const VMStateDescription vmstate_gicv3_cpu = {
148 .name = "arm_gicv3_cpu",
149 .version_id = 1,
150 .minimum_version_id = 1,
151 .pre_load = vmstate_gicv3_cpu_pre_load,
152 .fields = (VMStateField[]) {
153 VMSTATE_UINT32(level, GICv3CPUState),
154 VMSTATE_UINT32(gicr_ctlr, GICv3CPUState),
155 VMSTATE_UINT32_ARRAY(gicr_statusr, GICv3CPUState, 2),
156 VMSTATE_UINT32(gicr_waker, GICv3CPUState),
157 VMSTATE_UINT64(gicr_propbaser, GICv3CPUState),
158 VMSTATE_UINT64(gicr_pendbaser, GICv3CPUState),
159 VMSTATE_UINT32(gicr_igroupr0, GICv3CPUState),
160 VMSTATE_UINT32(gicr_ienabler0, GICv3CPUState),
161 VMSTATE_UINT32(gicr_ipendr0, GICv3CPUState),
162 VMSTATE_UINT32(gicr_iactiver0, GICv3CPUState),
163 VMSTATE_UINT32(edge_trigger, GICv3CPUState),
164 VMSTATE_UINT32(gicr_igrpmodr0, GICv3CPUState),
165 VMSTATE_UINT32(gicr_nsacr, GICv3CPUState),
166 VMSTATE_UINT8_ARRAY(gicr_ipriorityr, GICv3CPUState, GIC_INTERNAL),
167 VMSTATE_UINT64_ARRAY(icc_ctlr_el1, GICv3CPUState, 2),
168 VMSTATE_UINT64(icc_pmr_el1, GICv3CPUState),
169 VMSTATE_UINT64_ARRAY(icc_bpr, GICv3CPUState, 3),
170 VMSTATE_UINT64_2DARRAY(icc_apr, GICv3CPUState, 3, 4),
171 VMSTATE_UINT64_ARRAY(icc_igrpen, GICv3CPUState, 3),
172 VMSTATE_UINT64(icc_ctlr_el3, GICv3CPUState),
173 VMSTATE_END_OF_LIST()
174 },
175 .subsections = (const VMStateDescription * []) {
176 &vmstate_gicv3_cpu_virt,
177 &vmstate_gicv3_cpu_sre_el1,
178 NULL
179 }
180};
181
182static int gicv3_pre_load(void *opaque)
183{
184 GICv3State *cs = opaque;
185
186 /*
187 * The gicd_no_migration_shift_bug flag is used for migration compatibility
188 * for old version QEMU which may have the GICD bmp shift bug under KVM mode.
189 * Strictly, what we want to know is whether the migration source is using
190 * KVM. Since we don't have any way to determine that, we look at whether the
191 * destination is using KVM; this is close enough because for the older QEMU
192 * versions with this bug KVM -> TCG migration didn't work anyway. If the
193 * source is a newer QEMU without this bug it will transmit the migration
194 * subsection which sets the flag to true; otherwise it will remain set to
195 * the value we select here.
196 */
197 if (kvm_enabled()) {
198 cs->gicd_no_migration_shift_bug = false;
199 }
200
201 return 0;
202}
203
204static bool needed_always(void *opaque)
205{
206 return true;
207}
208
209const VMStateDescription vmstate_gicv3_gicd_no_migration_shift_bug = {
210 .name = "arm_gicv3/gicd_no_migration_shift_bug",
211 .version_id = 1,
212 .minimum_version_id = 1,
213 .needed = needed_always,
214 .fields = (VMStateField[]) {
215 VMSTATE_BOOL(gicd_no_migration_shift_bug, GICv3State),
216 VMSTATE_END_OF_LIST()
217 }
218};
219
220static const VMStateDescription vmstate_gicv3 = {
221 .name = "arm_gicv3",
222 .version_id = 1,
223 .minimum_version_id = 1,
224 .pre_load = gicv3_pre_load,
225 .pre_save = gicv3_pre_save,
226 .post_load = gicv3_post_load,
227 .priority = MIG_PRI_GICV3,
228 .fields = (VMStateField[]) {
229 VMSTATE_UINT32(gicd_ctlr, GICv3State),
230 VMSTATE_UINT32_ARRAY(gicd_statusr, GICv3State, 2),
231 VMSTATE_UINT32_ARRAY(group, GICv3State, GICV3_BMP_SIZE),
232 VMSTATE_UINT32_ARRAY(grpmod, GICv3State, GICV3_BMP_SIZE),
233 VMSTATE_UINT32_ARRAY(enabled, GICv3State, GICV3_BMP_SIZE),
234 VMSTATE_UINT32_ARRAY(pending, GICv3State, GICV3_BMP_SIZE),
235 VMSTATE_UINT32_ARRAY(active, GICv3State, GICV3_BMP_SIZE),
236 VMSTATE_UINT32_ARRAY(level, GICv3State, GICV3_BMP_SIZE),
237 VMSTATE_UINT32_ARRAY(edge_trigger, GICv3State, GICV3_BMP_SIZE),
238 VMSTATE_UINT8_ARRAY(gicd_ipriority, GICv3State, GICV3_MAXIRQ),
239 VMSTATE_UINT64_ARRAY(gicd_irouter, GICv3State, GICV3_MAXIRQ),
240 VMSTATE_UINT32_ARRAY(gicd_nsacr, GICv3State,
241 DIV_ROUND_UP(GICV3_MAXIRQ, 16)),
242 VMSTATE_STRUCT_VARRAY_POINTER_UINT32(cpu, GICv3State, num_cpu,
243 vmstate_gicv3_cpu, GICv3CPUState),
244 VMSTATE_END_OF_LIST()
245 },
246 .subsections = (const VMStateDescription * []) {
247 &vmstate_gicv3_gicd_no_migration_shift_bug,
248 NULL
249 }
250};
251
252void gicv3_init_irqs_and_mmio(GICv3State *s, qemu_irq_handler handler,
253 const MemoryRegionOps *ops)
254{
255 SysBusDevice *sbd = SYS_BUS_DEVICE(s);
256 int i;
257 int cpuidx;
258
259 /* For the GIC, also expose incoming GPIO lines for PPIs for each CPU.
260 * GPIO array layout is thus:
261 * [0..N-1] spi
262 * [N..N+31] PPIs for CPU 0
263 * [N+32..N+63] PPIs for CPU 1
264 * ...
265 */
266 i = s->num_irq - GIC_INTERNAL + GIC_INTERNAL * s->num_cpu;
267 qdev_init_gpio_in(DEVICE(s), handler, i);
268
269 for (i = 0; i < s->num_cpu; i++) {
270 sysbus_init_irq(sbd, &s->cpu[i].parent_irq);
271 }
272 for (i = 0; i < s->num_cpu; i++) {
273 sysbus_init_irq(sbd, &s->cpu[i].parent_fiq);
274 }
275 for (i = 0; i < s->num_cpu; i++) {
276 sysbus_init_irq(sbd, &s->cpu[i].parent_virq);
277 }
278 for (i = 0; i < s->num_cpu; i++) {
279 sysbus_init_irq(sbd, &s->cpu[i].parent_vfiq);
280 }
281
282 memory_region_init_io(&s->iomem_dist, OBJECT(s), ops, s,
283 "gicv3_dist", 0x10000);
284 sysbus_init_mmio(sbd, &s->iomem_dist);
285
286 s->redist_regions = g_new0(GICv3RedistRegion, s->nb_redist_regions);
287 cpuidx = 0;
288 for (i = 0; i < s->nb_redist_regions; i++) {
289 char *name = g_strdup_printf("gicv3_redist_region[%d]", i);
290 GICv3RedistRegion *region = &s->redist_regions[i];
291
292 region->gic = s;
293 region->cpuidx = cpuidx;
294 cpuidx += s->redist_region_count[i];
295
296 memory_region_init_io(&region->iomem, OBJECT(s),
297 ops ? &ops[1] : NULL, region, name,
298 s->redist_region_count[i] * GICV3_REDIST_SIZE);
299 sysbus_init_mmio(sbd, &region->iomem);
300 g_free(name);
301 }
302}
303
304static void arm_gicv3_common_realize(DeviceState *dev, Error **errp)
305{
306 GICv3State *s = ARM_GICV3_COMMON(dev);
307 int i, rdist_capacity, cpuidx;
308
309 /* revision property is actually reserved and currently used only in order
310 * to keep the interface compatible with GICv2 code, avoiding extra
311 * conditions. However, in future it could be used, for example, if we
312 * implement GICv4.
313 */
314 if (s->revision != 3) {
315 error_setg(errp, "unsupported GIC revision %d", s->revision);
316 return;
317 }
318
319 if (s->num_irq > GICV3_MAXIRQ) {
320 error_setg(errp,
321 "requested %u interrupt lines exceeds GIC maximum %d",
322 s->num_irq, GICV3_MAXIRQ);
323 return;
324 }
325 if (s->num_irq < GIC_INTERNAL) {
326 error_setg(errp,
327 "requested %u interrupt lines is below GIC minimum %d",
328 s->num_irq, GIC_INTERNAL);
329 return;
330 }
331 if (s->num_cpu == 0) {
332 error_setg(errp, "num-cpu must be at least 1");
333 return;
334 }
335
336 /* ITLinesNumber is represented as (N / 32) - 1, so this is an
337 * implementation imposed restriction, not an architectural one,
338 * so we don't have to deal with bitfields where only some of the
339 * bits in a 32-bit word should be valid.
340 */
341 if (s->num_irq % 32) {
342 error_setg(errp,
343 "%d interrupt lines unsupported: not divisible by 32",
344 s->num_irq);
345 return;
346 }
347
348 if (s->lpi_enable && !s->dma) {
349 error_setg(errp, "Redist-ITS: Guest 'sysmem' reference link not set");
350 return;
351 }
352
353 rdist_capacity = 0;
354 for (i = 0; i < s->nb_redist_regions; i++) {
355 rdist_capacity += s->redist_region_count[i];
356 }
357 if (rdist_capacity != s->num_cpu) {
358 error_setg(errp, "Capacity of the redist regions(%d) "
359 "does not match the number of vcpus(%d)",
360 rdist_capacity, s->num_cpu);
361 return;
362 }
363
364 if (s->lpi_enable) {
365 address_space_init(&s->dma_as, s->dma,
366 "gicv3-its-sysmem");
367 }
368
369 s->cpu = g_new0(GICv3CPUState, s->num_cpu);
370
371 for (i = 0; i < s->num_cpu; i++) {
372 CPUState *cpu = qemu_get_cpu(i);
373 uint64_t cpu_affid;
374
375 s->cpu[i].cpu = cpu;
376 s->cpu[i].gic = s;
377 /* Store GICv3CPUState in CPUARMState gicv3state pointer */
378 gicv3_set_gicv3state(cpu, &s->cpu[i]);
379
380 /* Pre-construct the GICR_TYPER:
381 * For our implementation:
382 * Top 32 bits are the affinity value of the associated CPU
383 * CommonLPIAff == 01 (redistributors with same Aff3 share LPI table)
384 * Processor_Number == CPU index starting from 0
385 * DPGS == 0 (GICR_CTLR.DPG* not supported)
386 * Last == 1 if this is the last redistributor in a series of
387 * contiguous redistributor pages
388 * DirectLPI == 0 (direct injection of LPIs not supported)
389 * VLPIS == 0 (virtual LPIs not supported)
390 * PLPIS == 0 (physical LPIs not supported)
391 */
392 cpu_affid = object_property_get_uint(OBJECT(cpu), "mp-affinity", NULL);
393
394 /* The CPU mp-affinity property is in MPIDR register format; squash
395 * the affinity bytes into 32 bits as the GICR_TYPER has them.
396 */
397 cpu_affid = ((cpu_affid & 0xFF00000000ULL) >> 8) |
398 (cpu_affid & 0xFFFFFF);
399 s->cpu[i].gicr_typer = (cpu_affid << 32) |
400 (1 << 24) |
401 (i << 8);
402
403 if (s->lpi_enable) {
404 s->cpu[i].gicr_typer |= GICR_TYPER_PLPIS;
405 }
406 }
407
408 /*
409 * Now go through and set GICR_TYPER.Last for the final
410 * redistributor in each region.
411 */
412 cpuidx = 0;
413 for (i = 0; i < s->nb_redist_regions; i++) {
414 cpuidx += s->redist_region_count[i];
415 s->cpu[cpuidx - 1].gicr_typer |= GICR_TYPER_LAST;
416 }
417}
418
419static void arm_gicv3_finalize(Object *obj)
420{
421 GICv3State *s = ARM_GICV3_COMMON(obj);
422
423 g_free(s->redist_region_count);
424}
425
426static void arm_gicv3_common_reset(DeviceState *dev)
427{
428 GICv3State *s = ARM_GICV3_COMMON(dev);
429 int i;
430
431 for (i = 0; i < s->num_cpu; i++) {
432 GICv3CPUState *cs = &s->cpu[i];
433
434 cs->level = 0;
435 cs->gicr_ctlr = 0;
436 if (s->lpi_enable) {
437 /* Our implementation supports clearing GICR_CTLR.EnableLPIs */
438 cs->gicr_ctlr |= GICR_CTLR_CES;
439 }
440 cs->gicr_statusr[GICV3_S] = 0;
441 cs->gicr_statusr[GICV3_NS] = 0;
442 cs->gicr_waker = GICR_WAKER_ProcessorSleep | GICR_WAKER_ChildrenAsleep;
443 cs->gicr_propbaser = 0;
444 cs->gicr_pendbaser = 0;
445 /* If we're resetting a TZ-aware GIC as if secure firmware
446 * had set it up ready to start a kernel in non-secure, we
447 * need to set interrupts to group 1 so the kernel can use them.
448 * Otherwise they reset to group 0 like the hardware.
449 */
450 if (s->irq_reset_nonsecure) {
451 cs->gicr_igroupr0 = 0xffffffff;
452 } else {
453 cs->gicr_igroupr0 = 0;
454 }
455
456 cs->gicr_ienabler0 = 0;
457 cs->gicr_ipendr0 = 0;
458 cs->gicr_iactiver0 = 0;
459 cs->edge_trigger = 0xffff;
460 cs->gicr_igrpmodr0 = 0;
461 cs->gicr_nsacr = 0;
462 memset(cs->gicr_ipriorityr, 0, sizeof(cs->gicr_ipriorityr));
463
464 cs->hppi.prio = 0xff;
465 cs->hpplpi.prio = 0xff;
466
467 /* State in the CPU interface must *not* be reset here, because it
468 * is part of the CPU's reset domain, not the GIC device's.
469 */
470 }
471
472 /* For our implementation affinity routing is always enabled */
473 if (s->security_extn) {
474 s->gicd_ctlr = GICD_CTLR_ARE_S | GICD_CTLR_ARE_NS;
475 } else {
476 s->gicd_ctlr = GICD_CTLR_DS | GICD_CTLR_ARE;
477 }
478
479 s->gicd_statusr[GICV3_S] = 0;
480 s->gicd_statusr[GICV3_NS] = 0;
481
482 memset(s->group, 0, sizeof(s->group));
483 memset(s->grpmod, 0, sizeof(s->grpmod));
484 memset(s->enabled, 0, sizeof(s->enabled));
485 memset(s->pending, 0, sizeof(s->pending));
486 memset(s->active, 0, sizeof(s->active));
487 memset(s->level, 0, sizeof(s->level));
488 memset(s->edge_trigger, 0, sizeof(s->edge_trigger));
489 memset(s->gicd_ipriority, 0, sizeof(s->gicd_ipriority));
490 memset(s->gicd_irouter, 0, sizeof(s->gicd_irouter));
491 memset(s->gicd_nsacr, 0, sizeof(s->gicd_nsacr));
492 /* GICD_IROUTER are UNKNOWN at reset so in theory the guest must
493 * write these to get sane behaviour and we need not populate the
494 * pointer cache here; however having the cache be different for
495 * "happened to be 0 from reset" and "guest wrote 0" would be
496 * too confusing.
497 */
498 gicv3_cache_all_target_cpustates(s);
499
500 if (s->irq_reset_nonsecure) {
501 /* If we're resetting a TZ-aware GIC as if secure firmware
502 * had set it up ready to start a kernel in non-secure, we
503 * need to set interrupts to group 1 so the kernel can use them.
504 * Otherwise they reset to group 0 like the hardware.
505 */
506 for (i = GIC_INTERNAL; i < s->num_irq; i++) {
507 gicv3_gicd_group_set(s, i);
508 }
509 }
510 s->gicd_no_migration_shift_bug = true;
511}
512
513static void arm_gic_common_linux_init(ARMLinuxBootIf *obj,
514 bool secure_boot)
515{
516 GICv3State *s = ARM_GICV3_COMMON(obj);
517
518 if (s->security_extn && !secure_boot) {
519 /* We're directly booting a kernel into NonSecure. If this GIC
520 * implements the security extensions then we must configure it
521 * to have all the interrupts be NonSecure (this is a job that
522 * is done by the Secure boot firmware in real hardware, and in
523 * this mode QEMU is acting as a minimalist firmware-and-bootloader
524 * equivalent).
525 */
526 s->irq_reset_nonsecure = true;
527 }
528}
529
530static Property arm_gicv3_common_properties[] = {
531 DEFINE_PROP_UINT32("num-cpu", GICv3State, num_cpu, 1),
532 DEFINE_PROP_UINT32("num-irq", GICv3State, num_irq, 32),
533 DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
534 DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
535 DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
536 DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
537 redist_region_count, qdev_prop_uint32, uint32_t),
538 DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
539 MemoryRegion *),
540 DEFINE_PROP_END_OF_LIST(),
541};
542
543static void arm_gicv3_common_class_init(ObjectClass *klass, void *data)
544{
545 DeviceClass *dc = DEVICE_CLASS(klass);
546 ARMLinuxBootIfClass *albifc = ARM_LINUX_BOOT_IF_CLASS(klass);
547
548 dc->reset = arm_gicv3_common_reset;
549 dc->realize = arm_gicv3_common_realize;
550 device_class_set_props(dc, arm_gicv3_common_properties);
551 dc->vmsd = &vmstate_gicv3;
552 albifc->arm_linux_init = arm_gic_common_linux_init;
553}
554
555static const TypeInfo arm_gicv3_common_type = {
556 .name = TYPE_ARM_GICV3_COMMON,
557 .parent = TYPE_SYS_BUS_DEVICE,
558 .instance_size = sizeof(GICv3State),
559 .class_size = sizeof(ARMGICv3CommonClass),
560 .class_init = arm_gicv3_common_class_init,
561 .instance_finalize = arm_gicv3_finalize,
562 .abstract = true,
563 .interfaces = (InterfaceInfo []) {
564 { TYPE_ARM_LINUX_BOOT_IF },
565 { },
566 },
567};
568
569static void register_types(void)
570{
571 type_register_static(&arm_gicv3_common_type);
572}
573
574type_init(register_types)
This page took 0.028416 seconds and 4 git commands to generate.