+ uint32_t regl, regh, reg;
+ uint64_t reg64, redist_typer;
+ int ncpu, i;
+
+ kvm_arm_gicv3_check(s);
+
+ kvm_gicr_access(s, GICR_TYPER, 0, ®l, false);
+ kvm_gicr_access(s, GICR_TYPER + 4, 0, ®h, false);
+ redist_typer = ((uint64_t)regh << 32) | regl;
+
+ reg = s->gicd_ctlr;
+ kvm_gicd_access(s, GICD_CTLR, ®, true);
+
+ if (redist_typer & GICR_TYPER_PLPIS) {
+ /* Set base addresses before LPIs are enabled by GICR_CTLR write */
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+
+ reg64 = c->gicr_propbaser;
+ regl = (uint32_t)reg64;
+ kvm_gicr_access(s, GICR_PROPBASER, ncpu, ®l, true);
+ regh = (uint32_t)(reg64 >> 32);
+ kvm_gicr_access(s, GICR_PROPBASER + 4, ncpu, ®h, true);
+
+ reg64 = c->gicr_pendbaser;
+ if (!c->gicr_ctlr & GICR_CTLR_ENABLE_LPIS) {
+ /* Setting PTZ is advised if LPIs are disabled, to reduce
+ * GIC initialization time.
+ */
+ reg64 |= GICR_PENDBASER_PTZ;
+ }
+ regl = (uint32_t)reg64;
+ kvm_gicr_access(s, GICR_PENDBASER, ncpu, ®l, true);
+ regh = (uint32_t)(reg64 >> 32);
+ kvm_gicr_access(s, GICR_PENDBASER + 4, ncpu, ®h, true);
+ }
+ }
+
+ /* Redistributor state (one per CPU) */
+
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+
+ reg = c->gicr_ctlr;
+ kvm_gicr_access(s, GICR_CTLR, ncpu, ®, true);
+
+ reg = c->gicr_statusr[GICV3_NS];
+ kvm_gicr_access(s, GICR_STATUSR, ncpu, ®, true);
+
+ reg = c->gicr_waker;
+ kvm_gicr_access(s, GICR_WAKER, ncpu, ®, true);
+
+ reg = c->gicr_igroupr0;
+ kvm_gicr_access(s, GICR_IGROUPR0, ncpu, ®, true);
+
+ reg = ~0;
+ kvm_gicr_access(s, GICR_ICENABLER0, ncpu, ®, true);
+ reg = c->gicr_ienabler0;
+ kvm_gicr_access(s, GICR_ISENABLER0, ncpu, ®, true);
+
+ /* Restore config before pending so we treat level/edge correctly */
+ reg = half_shuffle32(c->edge_trigger >> 16) << 1;
+ kvm_gicr_access(s, GICR_ICFGR1, ncpu, ®, true);
+
+ reg = c->level;
+ kvm_gic_line_level_access(s, 0, ncpu, ®, true);
+
+ reg = ~0;
+ kvm_gicr_access(s, GICR_ICPENDR0, ncpu, ®, true);
+ reg = c->gicr_ipendr0;
+ kvm_gicr_access(s, GICR_ISPENDR0, ncpu, ®, true);
+
+ reg = ~0;
+ kvm_gicr_access(s, GICR_ICACTIVER0, ncpu, ®, true);
+ reg = c->gicr_iactiver0;
+ kvm_gicr_access(s, GICR_ISACTIVER0, ncpu, ®, true);
+
+ for (i = 0; i < GIC_INTERNAL; i += 4) {
+ reg = c->gicr_ipriorityr[i] |
+ (c->gicr_ipriorityr[i + 1] << 8) |
+ (c->gicr_ipriorityr[i + 2] << 16) |
+ (c->gicr_ipriorityr[i + 3] << 24);
+ kvm_gicr_access(s, GICR_IPRIORITYR + i, ncpu, ®, true);
+ }
+ }
+
+ /* Distributor state (shared between all CPUs */
+ reg = s->gicd_statusr[GICV3_NS];
+ kvm_gicd_access(s, GICD_STATUSR, ®, true);
+
+ /* s->enable bitmap -> GICD_ISENABLERn */
+ kvm_dist_putbmp(s, GICD_ISENABLER, GICD_ICENABLER, s->enabled);
+
+ /* s->group bitmap -> GICD_IGROUPRn */
+ kvm_dist_putbmp(s, GICD_IGROUPR, 0, s->group);
+
+ /* Restore targets before pending to ensure the pending state is set on
+ * the appropriate CPU interfaces in the kernel
+ */
+
+ /* s->gicd_irouter[irq] -> GICD_IROUTERn
+ * We can't use kvm_dist_put() here because the registers are 64-bit
+ */
+ for (i = GIC_INTERNAL; i < s->num_irq; i++) {
+ uint32_t offset;
+
+ offset = GICD_IROUTER + (sizeof(uint32_t) * i);
+ reg = (uint32_t)s->gicd_irouter[i];
+ kvm_gicd_access(s, offset, ®, true);
+
+ offset = GICD_IROUTER + (sizeof(uint32_t) * i) + 4;
+ reg = (uint32_t)(s->gicd_irouter[i] >> 32);
+ kvm_gicd_access(s, offset, ®, true);
+ }
+
+ /* s->trigger bitmap -> GICD_ICFGRn
+ * (restore configuration registers before pending IRQs so we treat
+ * level/edge correctly)
+ */
+ kvm_dist_put_edge_trigger(s, GICD_ICFGR, s->edge_trigger);
+
+ /* s->level bitmap -> line_level */
+ kvm_gic_put_line_level_bmp(s, s->level);
+
+ /* s->pending bitmap -> GICD_ISPENDRn */
+ kvm_dist_putbmp(s, GICD_ISPENDR, GICD_ICPENDR, s->pending);
+
+ /* s->active bitmap -> GICD_ISACTIVERn */
+ kvm_dist_putbmp(s, GICD_ISACTIVER, GICD_ICACTIVER, s->active);
+
+ /* s->gicd_ipriority[] -> GICD_IPRIORITYRn */
+ kvm_dist_put_priority(s, GICD_IPRIORITYR, s->gicd_ipriority);
+
+ /* CPU Interface state (one per CPU) */
+
+ for (ncpu = 0; ncpu < s->num_cpu; ncpu++) {
+ GICv3CPUState *c = &s->cpu[ncpu];
+ int num_pri_bits;
+
+ kvm_gicc_access(s, ICC_SRE_EL1, ncpu, &c->icc_sre_el1, true);
+ kvm_gicc_access(s, ICC_CTLR_EL1, ncpu,
+ &c->icc_ctlr_el1[GICV3_NS], true);
+ kvm_gicc_access(s, ICC_IGRPEN0_EL1, ncpu,
+ &c->icc_igrpen[GICV3_G0], true);
+ kvm_gicc_access(s, ICC_IGRPEN1_EL1, ncpu,
+ &c->icc_igrpen[GICV3_G1NS], true);
+ kvm_gicc_access(s, ICC_PMR_EL1, ncpu, &c->icc_pmr_el1, true);
+ kvm_gicc_access(s, ICC_BPR0_EL1, ncpu, &c->icc_bpr[GICV3_G0], true);
+ kvm_gicc_access(s, ICC_BPR1_EL1, ncpu, &c->icc_bpr[GICV3_G1NS], true);
+
+ num_pri_bits = ((c->icc_ctlr_el1[GICV3_NS] &
+ ICC_CTLR_EL1_PRIBITS_MASK) >>
+ ICC_CTLR_EL1_PRIBITS_SHIFT) + 1;
+
+ switch (num_pri_bits) {
+ case 7:
+ reg64 = c->icc_apr[GICV3_G0][3];
+ kvm_gicc_access(s, ICC_AP0R_EL1(3), ncpu, ®64, true);
+ reg64 = c->icc_apr[GICV3_G0][2];
+ kvm_gicc_access(s, ICC_AP0R_EL1(2), ncpu, ®64, true);
+ case 6:
+ reg64 = c->icc_apr[GICV3_G0][1];
+ kvm_gicc_access(s, ICC_AP0R_EL1(1), ncpu, ®64, true);
+ default:
+ reg64 = c->icc_apr[GICV3_G0][0];
+ kvm_gicc_access(s, ICC_AP0R_EL1(0), ncpu, ®64, true);
+ }
+
+ switch (num_pri_bits) {
+ case 7:
+ reg64 = c->icc_apr[GICV3_G1NS][3];
+ kvm_gicc_access(s, ICC_AP1R_EL1(3), ncpu, ®64, true);
+ reg64 = c->icc_apr[GICV3_G1NS][2];
+ kvm_gicc_access(s, ICC_AP1R_EL1(2), ncpu, ®64, true);
+ case 6:
+ reg64 = c->icc_apr[GICV3_G1NS][1];
+ kvm_gicc_access(s, ICC_AP1R_EL1(1), ncpu, ®64, true);
+ default:
+ reg64 = c->icc_apr[GICV3_G1NS][0];
+ kvm_gicc_access(s, ICC_AP1R_EL1(0), ncpu, ®64, true);
+ }
+ }