return NULL;
}
+static bool pre_2_10_vmstate_dummy_icp_needed(void *opaque)
+{
+ /* Dummy entries correspond to unused ICPState objects in older QEMUs,
+ * and newer QEMUs don't even have them. In both cases, we don't want
+ * to send anything on the wire.
+ */
+ return false;
+}
+
+static const VMStateDescription pre_2_10_vmstate_dummy_icp = {
+ .name = "icp/server",
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .needed = pre_2_10_vmstate_dummy_icp_needed,
+ .fields = (VMStateField[]) {
+ VMSTATE_UNUSED(4), /* uint32_t xirr */
+ VMSTATE_UNUSED(1), /* uint8_t pending_priority */
+ VMSTATE_UNUSED(1), /* uint8_t mfrr */
+ VMSTATE_END_OF_LIST()
+ },
+};
+
+static void pre_2_10_vmstate_register_dummy_icp(int i)
+{
+ vmstate_register(NULL, i, &pre_2_10_vmstate_dummy_icp,
+ (void *)(uintptr_t) i);
+}
+
+static void pre_2_10_vmstate_unregister_dummy_icp(int i)
+{
+ vmstate_unregister(NULL, &pre_2_10_vmstate_dummy_icp,
+ (void *)(uintptr_t) i);
+}
+
+static inline int xics_max_server_number(void)
+{
+ return DIV_ROUND_UP(max_cpus * kvmppc_smt_threads(), smp_threads);
+}
+
static void xics_system_init(MachineState *machine, int nr_irqs, Error **errp)
{
sPAPRMachineState *spapr = SPAPR_MACHINE(machine);
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(machine);
if (kvm_enabled()) {
if (machine_kernel_irqchip_allowed(machine) &&
return;
}
}
+
+ if (smc->pre_2_10_has_unused_icps) {
+ int i;
+
+ for (i = 0; i < xics_max_server_number(); i++) {
+ /* Dummy entries get deregistered when real ICPState objects
+ * are registered during CPU core hotplug.
+ */
+ pre_2_10_vmstate_register_dummy_icp(i);
+ }
+ }
}
static int spapr_fixup_cpu_smt_dt(void *fdt, int offset, PowerPCCPU *cpu,
_FDT((fdt_setprop_cell(fdt, offset, "d-cache-size",
pcc->l1_dcache_size)));
} else {
- error_report("Warning: Unknown L1 dcache size for cpu");
+ warn_report("Unknown L1 dcache size for cpu");
}
if (pcc->l1_icache_size) {
_FDT((fdt_setprop_cell(fdt, offset, "i-cache-size",
pcc->l1_icache_size)));
} else {
- error_report("Warning: Unknown L1 icache size for cpu");
+ warn_report("Unknown L1 icache size for cpu");
}
_FDT((fdt_setprop_cell(fdt, offset, "timebase-frequency", tbfreq)));
}
}
+ /* /interrupt controller */
+ if (!spapr_ovec_test(ov5_updates, OV5_XIVE_EXPLOIT)) {
+ spapr_dt_xics(xics_max_server_number(), fdt, PHANDLE_XICP);
+ }
+
offset = fdt_path_offset(fdt, "/chosen");
if (offset < 0) {
offset = fdt_add_subnode(fdt, 0, "chosen");
size -= sizeof(hdr);
- /* Create sceleton */
+ /* Create skeleton */
fdt_skel = g_malloc0(size);
_FDT((fdt_create(fdt_skel, size)));
_FDT((fdt_begin_node(fdt_skel, "")));
{
PowerPCCPU *first_ppc_cpu = POWERPC_CPU(first_cpu);
- char val[2 * 3] = {
+ char val[2 * 4] = {
+ 23, 0x00, /* Xive mode: 0 = legacy (as in ISA 2.7), 1 = Exploitation */
24, 0x00, /* Hash/Radix, filled in below. */
25, 0x00, /* Hash options: Segment Tables == no, GTSE == no. */
26, 0x40, /* Radix options: GTSE == yes. */
if (kvm_enabled()) {
if (kvmppc_has_cap_mmu_radix() && kvmppc_has_cap_mmu_hash_v3()) {
- val[1] = 0x80; /* OV5_MMU_BOTH */
+ val[3] = 0x80; /* OV5_MMU_BOTH */
} else if (kvmppc_has_cap_mmu_radix()) {
- val[1] = 0x40; /* OV5_MMU_RADIX_300 */
+ val[3] = 0x40; /* OV5_MMU_RADIX_300 */
} else {
- val[1] = 0x00; /* Hash */
+ val[3] = 0x00; /* Hash */
}
} else {
if (first_ppc_cpu->env.mmu_model & POWERPC_MMU_V3) {
/* V3 MMU supports both hash and radix (with dynamic switching) */
- val[1] = 0xC0;
+ val[3] = 0xC0;
} else {
/* Otherwise we can only do hash */
- val[1] = 0x00;
+ val[3] = 0x00;
}
}
_FDT(fdt_setprop(fdt, chosen, "ibm,arch-vec-5-platform-support",
void *fdt;
sPAPRPHBState *phb;
char *buf;
- int smt = kvmppc_smt_threads();
fdt = g_malloc0(FDT_MAX_SIZE);
_FDT((fdt_create_empty_tree(fdt, FDT_MAX_SIZE)));
_FDT(fdt_setprop_cell(fdt, 0, "#address-cells", 2));
_FDT(fdt_setprop_cell(fdt, 0, "#size-cells", 2));
- /* /interrupt controller */
- spapr_dt_xics(DIV_ROUND_UP(max_cpus * smt, smp_threads), fdt, PHANDLE_XICP);
-
ret = spapr_populate_memory(spapr, fdt);
if (ret < 0) {
error_report("couldn't setup memory nodes in fdt");
* Set the GR bit in PATB so that we know there is no HPT. */
spapr->patb_entry = PATBE1_GR;
} else {
- spapr->patb_entry = 0;
spapr_setup_hpt_and_vrma(spapr);
}
err = spapr_rtc_import_offset(&spapr->rtc, spapr->rtc_offset);
}
+ if (spapr->patb_entry) {
+ PowerPCCPU *cpu = POWERPC_CPU(first_cpu);
+ bool radix = !!(spapr->patb_entry & PATBE1_GR);
+ bool gtse = !!(cpu->env.spr[SPR_LPCR] & LPCR_GTSE);
+
+ err = kvmppc_configure_v3_mmu(cpu, radix, gtse, spapr->patb_entry);
+ if (err) {
+ error_report("Process table config unsupported by the host");
+ return -EINVAL;
+ }
+ }
+
return err;
}
sPAPRMachineState *spapr = opaque;
/* "Iteration" header */
- qemu_put_be32(f, spapr->htab_shift);
+ if (!spapr->htab_shift) {
+ qemu_put_be32(f, -1);
+ } else {
+ qemu_put_be32(f, spapr->htab_shift);
+ }
if (spapr->htab) {
spapr->htab_save_index = 0;
spapr->htab_first_pass = true;
} else {
- assert(kvm_enabled());
+ if (spapr->htab_shift) {
+ assert(kvm_enabled());
+ }
}
int rc = 0;
/* Iteration header */
- qemu_put_be32(f, 0);
+ if (!spapr->htab_shift) {
+ qemu_put_be32(f, -1);
+ return 0;
+ } else {
+ qemu_put_be32(f, 0);
+ }
if (!spapr->htab) {
assert(kvm_enabled());
int fd;
/* Iteration header */
- qemu_put_be32(f, 0);
+ if (!spapr->htab_shift) {
+ qemu_put_be32(f, -1);
+ return 0;
+ } else {
+ qemu_put_be32(f, 0);
+ }
if (!spapr->htab) {
int rc;
section_hdr = qemu_get_be32(f);
+ if (section_hdr == -1) {
+ spapr_free_hpt(spapr);
+ return 0;
+ }
+
if (section_hdr) {
Error *local_err = NULL;
return 0;
}
-static void htab_cleanup(void *opaque)
+static void htab_save_cleanup(void *opaque)
{
sPAPRMachineState *spapr = opaque;
}
static SaveVMHandlers savevm_htab_handlers = {
- .save_live_setup = htab_save_setup,
+ .save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete,
- .cleanup = htab_cleanup,
+ .save_cleanup = htab_save_cleanup,
.load_state = htab_load,
};
machine->boot_order = g_strdup(boot_device);
}
-/*
- * Reset routine for LMB DR devices.
- *
- * Unlike PCI DR devices, LMB DR devices explicitly register this reset
- * routine. Reset for PCI DR devices will be handled by PHB reset routine
- * when it walks all its children devices. LMB devices reset occurs
- * as part of spapr_ppc_reset().
- */
-static void spapr_drc_reset(void *opaque)
-{
- sPAPRDRConnector *drc = opaque;
- DeviceState *d = DEVICE(drc);
-
- if (d) {
- device_reset(d);
- }
-}
-
static void spapr_create_lmb_dr_connectors(sPAPRMachineState *spapr)
{
MachineState *machine = MACHINE(spapr);
int i;
for (i = 0; i < nr_lmbs; i++) {
- sPAPRDRConnector *drc;
uint64_t addr;
addr = i * lmb_size + spapr->hotplug_memory.base;
- drc = spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
- addr/lmb_size);
- qemu_register_reset(spapr_drc_reset, drc);
+ spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_LMB,
+ addr / lmb_size);
}
}
int core_id = i * smp_threads;
if (mc->has_hotpluggable_cpus) {
- sPAPRDRConnector *drc =
- spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
- (core_id / smp_threads) * smt);
-
- qemu_register_reset(spapr_drc_reset, drc);
+ spapr_dr_connector_new(OBJECT(spapr), TYPE_SPAPR_DRC_CPU,
+ (core_id / smp_threads) * smt);
}
if (i < boot_cores_nr) {
int i, fdt_offset, fdt_size;
void *fdt;
uint64_t addr = addr_start;
+ Error *local_err = NULL;
for (i = 0; i < nr_lmbs; i++) {
drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
fdt_offset = spapr_populate_memory_node(fdt, node, addr,
SPAPR_MEMORY_BLOCK_SIZE);
- spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged, errp);
- addr += SPAPR_MEMORY_BLOCK_SIZE;
- if (!dev->hotplugged) {
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- /* guests expect coldplugged LMBs to be pre-allocated */
- drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
- drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
+ spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
+ if (local_err) {
+ while (addr > addr_start) {
+ addr -= SPAPR_MEMORY_BLOCK_SIZE;
+ drc = spapr_drc_by_id(TYPE_SPAPR_DRC_LMB,
+ addr / SPAPR_MEMORY_BLOCK_SIZE);
+ spapr_drc_detach(drc, dev, NULL);
+ }
+ g_free(fdt);
+ error_propagate(errp, local_err);
+ return;
}
+ addr += SPAPR_MEMORY_BLOCK_SIZE;
}
/* send hotplug notification to the
* guest only in case of hotplugged memory
addr = object_property_get_uint(OBJECT(dimm),
PC_DIMM_ADDR_PROP, &local_err);
if (local_err) {
- pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
- goto out;
+ goto out_unplug;
}
spapr_add_lmbs(dev, addr, size, node,
spapr_ovec_test(ms->ov5_cas, OV5_HP_EVT),
- &error_abort);
+ &local_err);
+ if (local_err) {
+ goto out_unplug;
+ }
+
+ return;
+out_unplug:
+ pc_dimm_memory_unplug(dev, &ms->hotplug_memory, mr);
out:
error_propagate(errp, local_err);
}
error_propagate(errp, local_err);
}
-void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
- sPAPRMachineState *spapr)
+static void *spapr_populate_hotplug_cpu_dt(CPUState *cs, int *fdt_offset,
+ sPAPRMachineState *spapr)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
DeviceClass *dc = DEVICE_GET_CLASS(cs);
Error **errp)
{
MachineState *ms = MACHINE(qdev_get_machine());
+ sPAPRMachineClass *smc = SPAPR_MACHINE_GET_CLASS(ms);
CPUCore *cc = CPU_CORE(dev);
CPUArchId *core_slot = spapr_find_cpu_slot(ms, cc->core_id, NULL);
+ if (smc->pre_2_10_has_unused_icps) {
+ sPAPRCPUCore *sc = SPAPR_CPU_CORE(OBJECT(dev));
+ sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
+ const char *typename = object_class_get_name(scc->cpu_class);
+ size_t size = object_type_get_instance_size(typename);
+ int i;
+
+ for (i = 0; i < cc->nr_threads; i++) {
+ CPUState *cs = CPU(sc->threads + i * size);
+
+ pre_2_10_vmstate_register_dummy_icp(cs->cpu_index);
+ }
+ }
+
assert(core_slot);
core_slot->cpu = NULL;
object_unparent(OBJECT(dev));
{
sPAPRMachineState *spapr = SPAPR_MACHINE(OBJECT(hotplug_dev));
MachineClass *mc = MACHINE_GET_CLASS(spapr);
+ sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
sPAPRCPUCore *core = SPAPR_CPU_CORE(OBJECT(dev));
CPUCore *cc = CPU_CORE(dev);
CPUState *cs = CPU(core->threads);
g_assert(drc || !mc->has_hotpluggable_cpus);
- /*
- * Setup CPU DT entries only for hotplugged CPUs. For boot time or
- * coldplugged CPUs DT entries are setup in spapr_build_fdt().
- */
- if (dev->hotplugged) {
- fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
- }
+ fdt = spapr_populate_hotplug_cpu_dt(cs, &fdt_offset, spapr);
if (drc) {
- spapr_drc_attach(drc, dev, fdt, fdt_offset, !dev->hotplugged,
- &local_err);
+ spapr_drc_attach(drc, dev, fdt, fdt_offset, &local_err);
if (local_err) {
g_free(fdt);
error_propagate(errp, local_err);
* of hotplugged CPUs.
*/
spapr_hotplug_req_add_by_index(drc);
- } else {
- /*
- * Set the right DRC states for cold plugged CPU.
- */
- if (drc) {
- sPAPRDRConnectorClass *drck = SPAPR_DR_CONNECTOR_GET_CLASS(drc);
- drck->set_allocation_state(drc, SPAPR_DR_ALLOCATION_STATE_USABLE);
- drck->set_isolation_state(drc, SPAPR_DR_ISOLATION_STATE_UNISOLATED);
- }
}
core_slot->cpu = OBJECT(dev);
+
+ if (smc->pre_2_10_has_unused_icps) {
+ sPAPRCPUCoreClass *scc = SPAPR_CPU_CORE_GET_CLASS(OBJECT(cc));
+ const char *typename = object_class_get_name(scc->cpu_class);
+ size_t size = object_type_get_instance_size(typename);
+ int i;
+
+ for (i = 0; i < cc->nr_threads; i++) {
+ sPAPRCPUCore *sc = SPAPR_CPU_CORE(dev);
+ void *obj = sc->threads + i * size;
+
+ cs = CPU(obj);
+ pre_2_10_vmstate_unregister_dummy_icp(cs->cpu_index);
+ }
+ }
}
static void spapr_core_pre_plug(HotplugHandler *hotplug_dev, DeviceState *dev,
static void spapr_machine_2_9_class_options(MachineClass *mc)
{
+ sPAPRMachineClass *smc = SPAPR_MACHINE_CLASS(mc);
+
spapr_machine_2_10_class_options(mc);
SET_MACHINE_COMPAT(mc, SPAPR_COMPAT_2_9);
mc->numa_auto_assign_ram = numa_legacy_auto_assign_ram;
+ smc->pre_2_10_has_unused_icps = true;
}
DEFINE_SPAPR_MACHINE(2_9, "2.9", false);