#include "qemu/osdep.h"
#include <dirent.h>
#include <sys/ioctl.h>
-#include <sys/mman.h>
#include <sys/vfs.h>
#include <linux/kvm.h>
#include "qemu-common.h"
#include "qemu/error-report.h"
+#include "cpu.h"
#include "qemu/timer.h"
#include "sysemu/sysemu.h"
#include "sysemu/kvm.h"
+#include "sysemu/numa.h"
#include "kvm_ppc.h"
-#include "cpu.h"
#include "sysemu/cpus.h"
#include "sysemu/device_tree.h"
#include "mmu-hash64.h"
#include "hw/sysbus.h"
#include "hw/ppc/spapr.h"
#include "hw/ppc/spapr_vio.h"
+#include "hw/ppc/spapr_cpu_core.h"
#include "hw/ppc/ppc.h"
#include "sysemu/watchdog.h"
#include "trace.h"
#include "exec/gdbstub.h"
#include "exec/memattrs.h"
#include "sysemu/hostmem.h"
+#include "qemu/cutils.h"
+#if defined(TARGET_PPC64)
+#include "hw/ppc/spapr_cpu_core.h"
+#endif
//#define DEBUG_KVM
static int cap_papr;
static int cap_htab_fd;
static int cap_fixup_hcalls;
+static int cap_htm; /* Hardware transactional memory support */
static uint32_t debug_inst_opcode;
qemu_cpu_kick(CPU(cpu));
}
+/* Check whether we are running with KVM-PR (instead of KVM-HV). This
+ * should only be used for fallback tests - generally we should use
+ * explicit capabilities for the features we want, rather than
+ * assuming what is/isn't available depending on the KVM variant. */
+static bool kvmppc_is_pr(KVMState *ks)
+{
+ /* Assume KVM-PR if the GET_PVINFO capability is available */
+ return kvm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
+}
+
static int kvm_ppc_register_host_cpu_type(void);
int kvm_arch_init(MachineState *ms, KVMState *s)
* only activated after this by kvmppc_set_papr() */
cap_htab_fd = kvm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
+ cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
if (!cap_interrupt_level) {
fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
*
* For that to work we make a few assumptions:
*
- * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
- * KVM which only supports 4K and 16M pages, but supports them
- * regardless of the backing store characteritics. We also don't
- * support 1T segments.
+ * - Check whether we are running "PR" KVM which only supports 4K
+ * and 16M pages, but supports them regardless of the backing
+ * store characteritics. We also don't support 1T segments.
*
* This is safe as if HV KVM ever supports that capability or PR
* KVM grows supports for more page/segment sizes, those versions
* implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
* this fallback.
*/
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ if (kvmppc_is_pr(cs->kvm_state)) {
/* No flags */
info->flags = 0;
info->slb_size = 64;
return fs.f_bsize;
}
+/*
+ * FIXME TOCTTOU: this iterates over memory backends' mem-path, which
+ * may or may not name the same files / on the same filesystem now as
+ * when we actually open and map them. Iterate over the file
+ * descriptors instead, and use qemu_fd_getpagesize().
+ */
static int find_max_supported_pagesize(Object *obj, void *opaque)
{
char *mem_path;
static long getrampagesize(void)
{
long hpsize = LONG_MAX;
+ long mainrampagesize;
Object *memdev_root;
if (mem_path) {
- return gethugepagesize(mem_path);
+ mainrampagesize = gethugepagesize(mem_path);
+ } else {
+ mainrampagesize = getpagesize();
}
/* it's possible we have memory-backend objects with
* backend isn't backed by hugepages.
*/
memdev_root = object_resolve_path("/objects", NULL);
- if (!memdev_root) {
- return getpagesize();
+ if (memdev_root) {
+ object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
+ }
+ if (hpsize == LONG_MAX) {
+ /* No additional memory regions found ==> Report main RAM page size */
+ return mainrampagesize;
}
- object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
+ /* If NUMA is disabled or the NUMA nodes are not backed with a
+ * memory-backend, then there is at least one node using "normal" RAM,
+ * so if its page size is smaller we have got to report that size instead.
+ */
+ if (hpsize > mainrampagesize &&
+ (nb_numa_nodes == 0 || numa_info[0].node_memdev == NULL)) {
+ static bool warned;
+ if (!warned) {
+ error_report("Huge page support disabled (n/a for main memory).");
+ warned = true;
+ }
+ return mainrampagesize;
+ }
- return (hpsize == LONG_MAX) ? getpagesize() : hpsize;
+ return hpsize;
}
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
CPUPPCState *env = &cpu->env;
long rampagesize;
int iq, ik, jq, jk;
+ bool has_64k_pages = false;
/* We only handle page sizes for 64-bit server guests for now */
if (!(env->mmu_model & POWERPC_MMU_64)) {
ksps->enc[jk].page_shift)) {
continue;
}
+ if (ksps->enc[jk].page_shift == 16) {
+ has_64k_pages = true;
+ }
qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
env->mmu_model &= ~POWERPC_MMU_1TSEG;
}
+ if (!has_64k_pages) {
+ env->mmu_model &= ~POWERPC_MMU_64K;
+ }
}
#else /* defined (TARGET_PPC64) */
idle_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, kvm_kick_cpu, cpu);
- /* Some targets support access to KVM's guest TLB. */
switch (cenv->mmu_model) {
case POWERPC_MMU_BOOKE206:
+ /* This target supports access to KVM's guest TLB */
ret = kvm_booke206_tlb_init(cpu);
break;
+ case POWERPC_MMU_2_07:
+ if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
+ /* KVM-HV has transactional memory on POWER8 also without the
+ * KVM_CAP_PPC_HTM extension, so enable it here instead. */
+ cap_htm = true;
+ }
+ break;
default:
break;
}
}
#endif /* TARGET_PPC64 */
+int kvmppc_put_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int i;
+
+ sregs.pvr = env->spr[SPR_PVR];
+
+ sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+ if (env->slb[i].esid & SLB_ESID_V) {
+ sregs.u.s.ppc64.slb[i].slbe |= i;
+ }
+ sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ sregs.u.s.ppc32.sr[i] = env->sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ /* Beware. We have to swap upper and lower bits here */
+ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
+ | env->DBAT[1][i];
+ sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
+ | env->IBAT[1][i];
+ }
+
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
+}
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
}
if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
- struct kvm_sregs sregs;
-
- sregs.pvr = env->spr[SPR_PVR];
-
- sregs.u.s.sdr1 = env->spr[SPR_SDR1];
-
- /* Sync SLB */
-#ifdef TARGET_PPC64
- for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
- sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
- if (env->slb[i].esid & SLB_ESID_V) {
- sregs.u.s.ppc64.slb[i].slbe |= i;
- }
- sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
- }
-#endif
-
- /* Sync SRs */
- for (i = 0; i < 16; i++) {
- sregs.u.s.ppc32.sr[i] = env->sr[i];
- }
-
- /* Sync BATs */
- for (i = 0; i < 8; i++) {
- /* Beware. We have to swap upper and lower bits here */
- sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
- | env->DBAT[1][i];
- sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
- | env->IBAT[1][i];
- }
-
- ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
- if (ret) {
+ ret = kvmppc_put_books_sregs(cpu);
+ if (ret < 0) {
return ret;
}
}
env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
}
+static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_BASE) {
+ env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
+ env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
+ env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
+ env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
+ env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
+ env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
+ env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
+ env->spr[SPR_DECR] = sregs.u.e.dec;
+ env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
+ env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
+ env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
+ env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
+ env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
+ env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
+ env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
+ env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_64) {
+ env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
+ env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
+ env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
+ kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
+ env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
+ kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
+ env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
+ kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
+ env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
+ kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
+ env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
+ kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
+ env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
+ kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
+ env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
+ kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
+ env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
+ kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
+ env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
+ kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
+ env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
+ kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
+ env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
+ kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
+ env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
+ kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
+ env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
+ kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
+ env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
+ kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
+ env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
+ kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
+ env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
+ kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
+
+ if (sregs.u.e.features & KVM_SREGS_E_SPE) {
+ env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
+ kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
+ env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
+ env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
+ kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PM) {
+ env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
+ kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PC) {
+ env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
+ env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
+ kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
+ }
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
+ env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
+ env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
+ env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
+ env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
+ env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
+ env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
+ env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
+ env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
+ env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
+ env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_EXP) {
+ env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
+ }
+
+ if (sregs.u.e.features & KVM_SREGS_E_PD) {
+ env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
+ env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
+ }
+
+ if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
+ env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
+ env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
+ env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
+
+ if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
+ env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
+ env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
+ }
+ }
+
+ return 0;
+}
+
+static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
+{
+ CPUPPCState *env = &cpu->env;
+ struct kvm_sregs sregs;
+ int ret;
+ int i;
+
+ ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
+ if (ret < 0) {
+ return ret;
+ }
+
+ if (!env->external_htab) {
+ ppc_store_sdr1(env, sregs.u.s.sdr1);
+ }
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ /*
+ * The packed SLB array we get from KVM_GET_SREGS only contains
+ * information about valid entries. So we flush our internal copy
+ * to get rid of stale ones, then put all valid SLB entries back
+ * in.
+ */
+ memset(env->slb, 0, sizeof(env->slb));
+ for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
+ target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
+ target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
+ /*
+ * Only restore valid entries
+ */
+ if (rb & SLB_ESID_V) {
+ ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
+ }
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ env->sr[i] = sregs.u.s.ppc32.sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
+ env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
+ env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
+ env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
+ }
+
+ return 0;
+}
+
int kvm_arch_get_registers(CPUState *cs)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
- struct kvm_sregs sregs;
uint32_t cr;
int i, ret;
kvm_get_fp(cs);
if (cap_booke_sregs) {
- ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ ret = kvmppc_get_booke_sregs(cpu);
if (ret < 0) {
return ret;
}
-
- if (sregs.u.e.features & KVM_SREGS_E_BASE) {
- env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
- env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
- env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
- env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
- env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
- env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
- env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
- env->spr[SPR_DECR] = sregs.u.e.dec;
- env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
- env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
- env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
- env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
- env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
- env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
- env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
- env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_64) {
- env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
- env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
- env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
- kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
- env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
- kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
- env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
- kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
- env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
- kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
- env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
- kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
- env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
- kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
- env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
- kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
- env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
- kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
- env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
- kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
- env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
- kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
- env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
- kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
- env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
- kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
- env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
- kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
- env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
- kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
- env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
- kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
- env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
- kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
-
- if (sregs.u.e.features & KVM_SREGS_E_SPE) {
- env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
- kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
- env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
- kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
- env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
- kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_PM) {
- env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
- kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_PC) {
- env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
- kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
- env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
- kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
- }
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
- env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
- env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
- env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
- env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
- env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
- env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
- env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
- env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
- env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
- env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
- }
-
- if (sregs.u.e.features & KVM_SREGS_EXP) {
- env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
- }
-
- if (sregs.u.e.features & KVM_SREGS_E_PD) {
- env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
- env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
- }
-
- if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
- env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
- env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
- env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
-
- if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
- env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
- env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
- }
- }
}
if (cap_segstate) {
- ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
+ ret = kvmppc_get_books_sregs(cpu);
if (ret < 0) {
return ret;
}
-
- if (!env->external_htab) {
- ppc_store_sdr1(env, sregs.u.s.sdr1);
- }
-
- /* Sync SLB */
-#ifdef TARGET_PPC64
- /*
- * The packed SLB array we get from KVM_GET_SREGS only contains
- * information about valid entries. So we flush our internal
- * copy to get rid of stale ones, then put all valid SLB entries
- * back in.
- */
- memset(env->slb, 0, sizeof(env->slb));
- for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
- target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
- target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
- /*
- * Only restore valid entries
- */
- if (rb & SLB_ESID_V) {
- ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
- }
- }
-#endif
-
- /* Sync SRs */
- for (i = 0; i < 16; i++) {
- env->sr[i] = sregs.u.s.ppc32.sr[i];
- }
-
- /* Sync BATs */
- for (i = 0; i < 8; i++) {
- env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
- env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
- env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
- env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
- }
}
if (cap_hior) {
/* Always wake up soon in case the interrupt was level based */
timer_mod(idle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
- (get_ticks_per_sec() / 50));
+ (NANOSECONDS_PER_SECOND / 50));
}
/* We don't know if there are more interrupts pending after this. However,
{
char line[512];
char *ns;
- uint32_t retval = get_ticks_per_sec();
+ uint32_t retval = NANOSECONDS_PER_SECOND;
if (read_cpuinfo("timebase", line, sizeof(line))) {
return retval;
hc[2] = cpu_to_be32(0x48000008);
hc[3] = cpu_to_be32(bswap32(0x3860ffff));
- return 0;
+ return 1;
}
static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
kvmppc_enable_hcall(kvm_state, H_SET_MODE);
}
+void kvmppc_enable_clear_ref_mod_hcalls(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
+ kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
+}
+
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
/* We have a kernel that predates the htab reset calls. For PR
* KVM, we need to allocate the htab ourselves, for an HV KVM of
- * this era, it has allocated a 16MB fixed size hash table
- * already. Kernels of this era have the GET_PVINFO capability
- * only on PR, so we use this hack to determine the right
- * answer */
- if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ * this era, it has allocated a 16MB fixed size hash table already. */
+ if (kvmppc_is_pr(kvm_state)) {
/* PR - tell caller to allocate htab */
return 0;
} else {
return cap_fixup_hcalls;
}
+bool kvmppc_has_cap_htm(void)
+{
+ return cap_htm;
+}
+
static PowerPCCPUClass *ppc_cpu_get_family_class(PowerPCCPUClass *pcc)
{
ObjectClass *oc = OBJECT_CLASS(pcc);
return POWERPC_CPU_CLASS(oc);
}
+PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
+{
+ uint32_t host_pvr = mfpvr();
+ PowerPCCPUClass *pvr_pcc;
+
+ pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
+ if (pvr_pcc == NULL) {
+ pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
+ }
+
+ return pvr_pcc;
+}
+
static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
.instance_init = kvmppc_host_cpu_initfn,
.class_init = kvmppc_host_cpu_class_init,
};
- uint32_t host_pvr = mfpvr();
PowerPCCPUClass *pvr_pcc;
DeviceClass *dc;
- pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
- if (pvr_pcc == NULL) {
- pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
- }
+ pvr_pcc = kvm_ppc_get_host_cpu_class();
if (pvr_pcc == NULL) {
return -1;
}
type_info.name = g_strdup_printf("%s-"TYPE_POWERPC_CPU, dc->desc);
type_register(&type_info);
+#if defined(TARGET_PPC64)
+ type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, "host");
+ type_info.parent = TYPE_SPAPR_CPU_CORE,
+ type_info.instance_size = sizeof(sPAPRCPUCore);
+ type_info.instance_init = NULL;
+ type_info.class_init = spapr_cpu_core_class_init;
+ type_info.class_data = (void *) "host";
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+
+ /* Register generic spapr CPU family class for current host CPU type */
+ type_info.name = g_strdup_printf("%s-"TYPE_SPAPR_CPU_CORE, dc->desc);
+ type_info.class_data = (void *) dc->desc;
+ type_register(&type_info);
+ g_free((void *)type_info.name);
+#endif
+
return 0;
}
return 0;
}
+int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
+ int vector, PCIDevice *dev)
+{
+ return 0;
+}
+
+int kvm_arch_release_virq_post(int virq)
+{
+ return 0;
+}
+
int kvm_arch_msi_data_to_gsi(uint32_t data)
{
return data & 0xffff;