#include <sys/types.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <sys/vfs.h>
#include <linux/kvm.h>
#include "qemu-common.h"
-#include "qemu-timer.h"
-#include "sysemu.h"
-#include "kvm.h"
+#include "qemu/timer.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/kvm.h"
#include "kvm_ppc.h"
#include "cpu.h"
-#include "device_tree.h"
+#include "sysemu/cpus.h"
+#include "sysemu/device_tree.h"
#include "hw/sysbus.h"
#include "hw/spapr.h"
static int cap_ppc_smt;
static int cap_ppc_rma;
static int cap_spapr_tce;
+static int cap_hior;
/* XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
*/
static QEMUTimer *idle_timer;
-static void kvm_kick_env(void *env)
+static void kvm_kick_cpu(void *opaque)
{
- qemu_cpu_kick(env);
+ PowerPCCPU *cpu = opaque;
+
+ qemu_cpu_kick(CPU(cpu));
}
int kvm_arch_init(KVMState *s)
cap_ppc_smt = kvm_check_extension(s, KVM_CAP_PPC_SMT);
cap_ppc_rma = kvm_check_extension(s, KVM_CAP_PPC_RMA);
cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
+ cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
if (!cap_interrupt_level) {
fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
return 0;
}
-static int kvm_arch_sync_sregs(CPUState *cenv)
+static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
{
+ CPUPPCState *cenv = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_sregs sregs;
int ret;
}
}
- ret = kvm_vcpu_ioctl(cenv, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret) {
return ret;
}
sregs.pvr = cenv->spr[SPR_PVR];
- return kvm_vcpu_ioctl(cenv, KVM_SET_SREGS, &sregs);
+ return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
}
/* Set up a shared TLB array with KVM */
-static int kvm_booke206_tlb_init(CPUState *env)
+static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_book3e_206_tlb_params params = {};
struct kvm_config_tlb cfg = {};
struct kvm_enable_cap encap = {};
int ret, i;
if (!kvm_enabled() ||
- !kvm_check_extension(env->kvm_state, KVM_CAP_SW_TLB)) {
+ !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
return 0;
}
encap.cap = KVM_CAP_SW_TLB;
encap.args[0] = (uintptr_t)&cfg;
- ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &encap);
+ ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
if (ret < 0) {
fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
__func__, strerror(-ret));
return 0;
}
-int kvm_arch_init_vcpu(CPUState *cenv)
+
+#if defined(TARGET_PPC64)
+static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
+ struct kvm_ppc_smmu_info *info)
+{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
+
+ memset(info, 0, sizeof(*info));
+
+ /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
+ * need to "guess" what the supported page sizes are.
+ *
+ * For that to work we make a few assumptions:
+ *
+ * - If KVM_CAP_PPC_GET_PVINFO is supported we are running "PR"
+ * KVM which only supports 4K and 16M pages, but supports them
+ * regardless of the backing store characteritics. We also don't
+ * support 1T segments.
+ *
+ * This is safe as if HV KVM ever supports that capability or PR
+ * KVM grows supports for more page/segment sizes, those versions
+ * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
+ * will not hit this fallback
+ *
+ * - Else we are running HV KVM. This means we only support page
+ * sizes that fit in the backing store. Additionally we only
+ * advertize 64K pages if the processor is ARCH 2.06 and we assume
+ * P7 encodings for the SLB and hash table. Here too, we assume
+ * support for any newer processor will mean a kernel that
+ * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
+ * this fallback.
+ */
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ /* No flags */
+ info->flags = 0;
+ info->slb_size = 64;
+
+ /* Standard 4k base page size segment */
+ info->sps[0].page_shift = 12;
+ info->sps[0].slb_enc = 0;
+ info->sps[0].enc[0].page_shift = 12;
+ info->sps[0].enc[0].pte_enc = 0;
+
+ /* Standard 16M large page size segment */
+ info->sps[1].page_shift = 24;
+ info->sps[1].slb_enc = SLB_VSID_L;
+ info->sps[1].enc[0].page_shift = 24;
+ info->sps[1].enc[0].pte_enc = 0;
+ } else {
+ int i = 0;
+
+ /* HV KVM has backing store size restrictions */
+ info->flags = KVM_PPC_PAGE_SIZES_REAL;
+
+ if (env->mmu_model & POWERPC_MMU_1TSEG) {
+ info->flags |= KVM_PPC_1T_SEGMENTS;
+ }
+
+ if (env->mmu_model == POWERPC_MMU_2_06) {
+ info->slb_size = 32;
+ } else {
+ info->slb_size = 64;
+ }
+
+ /* Standard 4k base page size segment */
+ info->sps[i].page_shift = 12;
+ info->sps[i].slb_enc = 0;
+ info->sps[i].enc[0].page_shift = 12;
+ info->sps[i].enc[0].pte_enc = 0;
+ i++;
+
+ /* 64K on MMU 2.06 */
+ if (env->mmu_model == POWERPC_MMU_2_06) {
+ info->sps[i].page_shift = 16;
+ info->sps[i].slb_enc = 0x110;
+ info->sps[i].enc[0].page_shift = 16;
+ info->sps[i].enc[0].pte_enc = 1;
+ i++;
+ }
+
+ /* Standard 16M large page size segment */
+ info->sps[i].page_shift = 24;
+ info->sps[i].slb_enc = SLB_VSID_L;
+ info->sps[i].enc[0].page_shift = 24;
+ info->sps[i].enc[0].pte_enc = 0;
+ }
+}
+
+static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
+{
+ CPUState *cs = CPU(cpu);
+ int ret;
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
+ ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
+ if (ret == 0) {
+ return;
+ }
+ }
+
+ kvm_get_fallback_smmu_info(cpu, info);
+}
+
+static long getrampagesize(void)
+{
+ struct statfs fs;
+ int ret;
+
+ if (!mem_path) {
+ /* guest RAM is backed by normal anonymous pages */
+ return getpagesize();
+ }
+
+ do {
+ ret = statfs(mem_path, &fs);
+ } while (ret != 0 && errno == EINTR);
+
+ if (ret != 0) {
+ fprintf(stderr, "Couldn't statfs() memory path: %s\n",
+ strerror(errno));
+ exit(1);
+ }
+
+#define HUGETLBFS_MAGIC 0x958458f6
+
+ if (fs.f_type != HUGETLBFS_MAGIC) {
+ /* Explicit mempath, but it's ordinary pages */
+ return getpagesize();
+ }
+
+ /* It's hugepage, return the huge page size */
+ return fs.f_bsize;
+}
+
+static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
+{
+ if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
+ return true;
+ }
+
+ return (1ul << shift) <= rampgsize;
+}
+
+static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
{
+ static struct kvm_ppc_smmu_info smmu_info;
+ static bool has_smmu_info;
+ CPUPPCState *env = &cpu->env;
+ long rampagesize;
+ int iq, ik, jq, jk;
+
+ /* We only handle page sizes for 64-bit server guests for now */
+ if (!(env->mmu_model & POWERPC_MMU_64)) {
+ return;
+ }
+
+ /* Collect MMU info from kernel if not already */
+ if (!has_smmu_info) {
+ kvm_get_smmu_info(cpu, &smmu_info);
+ has_smmu_info = true;
+ }
+
+ rampagesize = getrampagesize();
+
+ /* Convert to QEMU form */
+ memset(&env->sps, 0, sizeof(env->sps));
+
+ for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
+ struct ppc_one_seg_page_size *qsps = &env->sps.sps[iq];
+ struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
+
+ if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
+ ksps->page_shift)) {
+ continue;
+ }
+ qsps->page_shift = ksps->page_shift;
+ qsps->slb_enc = ksps->slb_enc;
+ for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
+ if (!kvm_valid_page_size(smmu_info.flags, rampagesize,
+ ksps->enc[jk].page_shift)) {
+ continue;
+ }
+ qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
+ qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
+ if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
+ break;
+ }
+ }
+ if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
+ break;
+ }
+ }
+ env->slb_nr = smmu_info.slb_size;
+ if (smmu_info.flags & KVM_PPC_1T_SEGMENTS) {
+ env->mmu_model |= POWERPC_MMU_1TSEG;
+ } else {
+ env->mmu_model &= ~POWERPC_MMU_1TSEG;
+ }
+}
+#else /* defined (TARGET_PPC64) */
+
+static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
+{
+}
+
+#endif /* !defined (TARGET_PPC64) */
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *cenv = &cpu->env;
int ret;
- ret = kvm_arch_sync_sregs(cenv);
+ /* Gather server mmu info from KVM and update the CPU state */
+ kvm_fixup_page_sizes(cpu);
+
+ /* Synchronize sregs with kvm */
+ ret = kvm_arch_sync_sregs(cpu);
if (ret) {
return ret;
}
- idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_env, cenv);
+ idle_timer = qemu_new_timer_ns(vm_clock, kvm_kick_cpu, cpu);
/* Some targets support access to KVM's guest TLB. */
switch (cenv->mmu_model) {
case POWERPC_MMU_BOOKE206:
- ret = kvm_booke206_tlb_init(cenv);
+ ret = kvm_booke206_tlb_init(cpu);
break;
default:
break;
return ret;
}
-void kvm_arch_reset_vcpu(CPUState *env)
+void kvm_arch_reset_vcpu(CPUState *cpu)
{
}
-static void kvm_sw_tlb_put(CPUState *env)
+static void kvm_sw_tlb_put(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_dirty_tlb dirty_tlb;
unsigned char *bitmap;
int ret;
dirty_tlb.bitmap = (uintptr_t)bitmap;
dirty_tlb.num_dirty = env->nb_tlb;
- ret = kvm_vcpu_ioctl(env, KVM_DIRTY_TLB, &dirty_tlb);
+ ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
if (ret) {
fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
__func__, strerror(-ret));
g_free(bitmap);
}
-int kvm_arch_put_registers(CPUState *env, int level)
+int kvm_arch_put_registers(CPUState *cs, int level)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
int ret;
int i;
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
- if (ret < 0)
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
+ if (ret < 0) {
return ret;
+ }
regs.ctr = env->ctr;
regs.lr = env->lr;
for (i = 0;i < 32; i++)
regs.gpr[i] = env->gpr[i];
- ret = kvm_vcpu_ioctl(env, KVM_SET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
if (ret < 0)
return ret;
if (env->tlb_dirty) {
- kvm_sw_tlb_put(env);
+ kvm_sw_tlb_put(cpu);
env->tlb_dirty = false;
}
+ if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
+ struct kvm_sregs sregs;
+
+ sregs.pvr = env->spr[SPR_PVR];
+
+ sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+
+ /* Sync SLB */
+#ifdef TARGET_PPC64
+ for (i = 0; i < 64; i++) {
+ sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
+ sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
+ }
+#endif
+
+ /* Sync SRs */
+ for (i = 0; i < 16; i++) {
+ sregs.u.s.ppc32.sr[i] = env->sr[i];
+ }
+
+ /* Sync BATs */
+ for (i = 0; i < 8; i++) {
+ /* Beware. We have to swap upper and lower bits here */
+ sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
+ | env->DBAT[1][i];
+ sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
+ | env->IBAT[1][i];
+ }
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
+ uint64_t hior = env->spr[SPR_HIOR];
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_HIOR,
+ .addr = (uintptr_t) &hior,
+ };
+
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret) {
+ return ret;
+ }
+ }
+
return ret;
}
-int kvm_arch_get_registers(CPUState *env)
+int kvm_arch_get_registers(CPUState *cs)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
struct kvm_regs regs;
struct kvm_sregs sregs;
uint32_t cr;
int i, ret;
- ret = kvm_vcpu_ioctl(env, KVM_GET_REGS, ®s);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s);
if (ret < 0)
return ret;
env->gpr[i] = regs.gpr[i];
if (cap_booke_sregs) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
}
if (cap_segstate) {
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
if (ret < 0) {
return ret;
}
return 0;
}
-int kvmppc_set_interrupt(CPUState *env, int irq, int level)
+int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
{
unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
return 0;
}
- kvm_vcpu_ioctl(env, KVM_INTERRUPT, &virq);
+ kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
return 0;
}
#define PPC_INPUT_INT PPC6xx_INPUT_INT
#endif
-void kvm_arch_pre_run(CPUState *env, struct kvm_run *run)
+void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int r;
unsigned irq;
- /* PowerPC Qemu tracks the various core input pins (interrupt, critical
+ /* PowerPC QEMU tracks the various core input pins (interrupt, critical
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
if (!cap_interrupt_level &&
run->ready_for_interrupt_injection &&
irq = KVM_INTERRUPT_SET;
dprintf("injected interrupt %d\n", irq);
- r = kvm_vcpu_ioctl(env, KVM_INTERRUPT, &irq);
+ r = kvm_vcpu_ioctl(cs, KVM_INTERRUPT, &irq);
if (r < 0)
printf("cpu %d fail inject %x\n", env->cpu_index, irq);
* anyways, so we will get a chance to deliver the rest. */
}
-void kvm_arch_post_run(CPUState *env, struct kvm_run *run)
+void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
{
}
-int kvm_arch_process_async_events(CPUState *env)
+int kvm_arch_process_async_events(CPUState *cs)
{
- return env->halted;
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ return cpu->env.halted;
}
-static int kvmppc_handle_halt(CPUState *env)
+static int kvmppc_handle_halt(CPUPPCState *env)
{
if (!(env->interrupt_request & CPU_INTERRUPT_HARD) && (msr_ee)) {
env->halted = 1;
}
/* map dcr access to existing qemu dcr emulation */
-static int kvmppc_handle_dcr_read(CPUState *env, uint32_t dcrn, uint32_t *data)
+static int kvmppc_handle_dcr_read(CPUPPCState *env, uint32_t dcrn, uint32_t *data)
{
if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0)
fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
return 0;
}
-static int kvmppc_handle_dcr_write(CPUState *env, uint32_t dcrn, uint32_t data)
+static int kvmppc_handle_dcr_write(CPUPPCState *env, uint32_t dcrn, uint32_t data)
{
if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0)
fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
return 0;
}
-int kvm_arch_handle_exit(CPUState *env, struct kvm_run *run)
+int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
int ret;
switch (run->exit_reason) {
#ifdef CONFIG_PSERIES
case KVM_EXIT_PAPR_HCALL:
dprintf("handle PAPR hypercall\n");
- run->papr_hcall.ret = spapr_hypercall(env, run->papr_hcall.nr,
+ run->papr_hcall.ret = spapr_hypercall(cpu,
+ run->papr_hcall.nr,
run->papr_hcall.args);
- ret = 1;
+ ret = 0;
break;
#endif
default:
break;
}
if (!strncmp(line, field, field_len)) {
- strncpy(value, line, len);
+ pstrcpy(value, len, line);
ret = 0;
break;
}
return kvmppc_read_int_cpu_dt("ibm,dfp");
}
-int kvmppc_get_hypercall(CPUState *env, uint8_t *buf, int buf_len)
+static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
+ {
+ PowerPCCPU *cpu = ppc_env_get_cpu(env);
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
+ !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+int kvmppc_get_hasidle(CPUPPCState *env)
{
- uint32_t *hc = (uint32_t*)buf;
+ struct kvm_ppc_pvinfo pvinfo;
+
+ if (!kvmppc_get_pvinfo(env, &pvinfo) &&
+ (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
+ return 1;
+ }
+
+ return 0;
+}
+int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
+{
+ uint32_t *hc = (uint32_t*)buf;
struct kvm_ppc_pvinfo pvinfo;
- if (kvm_check_extension(env->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
- !kvm_vm_ioctl(env->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
+ if (!kvmppc_get_pvinfo(env, &pvinfo)) {
memcpy(buf, pvinfo.hcall, buf_len);
-
return 0;
}
return 0;
}
-void kvmppc_set_papr(CPUState *env)
+void kvmppc_set_papr(PowerPCCPU *cpu)
{
+ CPUPPCState *env = &cpu->env;
+ CPUState *cs = CPU(cpu);
struct kvm_enable_cap cap = {};
- struct kvm_one_reg reg = {};
- struct kvm_sregs sregs = {};
int ret;
- uint64_t hior = env->spr[SPR_HIOR];
cap.cap = KVM_CAP_PPC_PAPR;
- ret = kvm_vcpu_ioctl(env, KVM_ENABLE_CAP, &cap);
-
- if (ret) {
- goto fail;
- }
-
- /*
- * XXX We set HIOR here. It really should be a qdev property of
- * the CPU node, but we don't have CPUs converted to qdev yet.
- *
- * Once we have qdev CPUs, move HIOR to a qdev property and
- * remove this chunk.
- */
- reg.id = KVM_REG_PPC_HIOR;
- reg.addr = (uintptr_t)&hior;
- ret = kvm_vcpu_ioctl(env, KVM_SET_ONE_REG, ®);
- if (ret) {
- fprintf(stderr, "Couldn't set HIOR. Maybe you're running an old \n"
- "kernel with support for HV KVM but no PAPR PR \n"
- "KVM in which case things will work. If they don't \n"
- "please update your host kernel!\n");
- }
-
- /* Set SDR1 so kernel space finds the HTAB */
- ret = kvm_vcpu_ioctl(env, KVM_GET_SREGS, &sregs);
- if (ret) {
- goto fail;
- }
-
- sregs.u.s.sdr1 = env->spr[SPR_SDR1];
+ ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &cap);
- ret = kvm_vcpu_ioctl(env, KVM_SET_SREGS, &sregs);
if (ret) {
- goto fail;
+ cpu_abort(env, "This KVM version does not support PAPR\n");
}
-
- return;
-
-fail:
- cpu_abort(env, "This KVM version does not support PAPR\n");
}
int kvmppc_smt_threads(void)
return cap_ppc_smt ? cap_ppc_smt : 1;
}
+#ifdef TARGET_PPC64
off_t kvmppc_alloc_rma(const char *name, MemoryRegion *sysmem)
{
void *rma;
return size;
}
+uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
+{
+ if (cap_ppc_rma >= 2) {
+ return current_size;
+ }
+ return MIN(current_size,
+ getrampagesize() << (hash_shift - 7));
+}
+#endif
+
void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t window_size, int *pfd)
{
struct kvm_create_spapr_tce args = {
int fd;
void *table;
+ /* Must set fd to -1 so we don't try to munmap when called for
+ * destroying the table, which the upper layers -will- do
+ */
+ *pfd = -1;
if (!cap_spapr_tce) {
return NULL;
}
fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
if (fd < 0) {
+ fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
+ liobn);
return NULL;
}
- len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE) * sizeof(VIOsPAPR_RTCE);
+ len = (window_size / SPAPR_TCE_PAGE_SIZE) * sizeof(sPAPRTCE);
/* FIXME: round this up to page size */
table = mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (table == MAP_FAILED) {
+ fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
+ liobn);
close(fd);
return NULL;
}
return -1;
}
- len = (window_size / SPAPR_VIO_TCE_PAGE_SIZE)*sizeof(VIOsPAPR_RTCE);
+ len = (window_size / SPAPR_TCE_PAGE_SIZE)*sizeof(sPAPRTCE);
if ((munmap(table, len) < 0) ||
(close(fd) < 0)) {
- fprintf(stderr, "KVM: Unexpected error removing KVM SPAPR TCE "
- "table: %s", strerror(errno));
+ fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
+ strerror(errno));
/* Leak the table */
}
return 0;
}
+int kvmppc_reset_htab(int shift_hint)
+{
+ uint32_t shift = shift_hint;
+
+ if (!kvm_enabled()) {
+ /* Full emulation, tell caller to allocate htab itself */
+ return 0;
+ }
+ if (kvm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
+ int ret;
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
+ if (ret == -ENOTTY) {
+ /* At least some versions of PR KVM advertise the
+ * capability, but don't implement the ioctl(). Oops.
+ * Return 0 so that we allocate the htab in qemu, as is
+ * correct for PR. */
+ return 0;
+ } else if (ret < 0) {
+ return ret;
+ }
+ return shift;
+ }
+
+ /* We have a kernel that predates the htab reset calls. For PR
+ * KVM, we need to allocate the htab ourselves, for an HV KVM of
+ * this era, it has allocated a 16MB fixed size hash table
+ * already. Kernels of this era have the GET_PVINFO capability
+ * only on PR, so we use this hack to determine the right
+ * answer */
+ if (kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_PVINFO)) {
+ /* PR - tell caller to allocate htab */
+ return 0;
+ } else {
+ /* HV - assume 16MB kernel allocated htab */
+ return 24;
+ }
+}
+
static inline uint32_t mfpvr(void)
{
uint32_t pvr;
}
}
-const ppc_def_t *kvmppc_host_cpu_def(void)
+static void kvmppc_host_cpu_initfn(Object *obj)
{
+ PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(obj);
+
+ assert(kvm_enabled());
+
+ if (pcc->info->pvr != mfpvr()) {
+ fprintf(stderr, "Your host CPU is unsupported.\n"
+ "Please choose a supported model instead, see -cpu ?.\n");
+ exit(1);
+ }
+}
+
+static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
+{
+ PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
uint32_t host_pvr = mfpvr();
- const ppc_def_t *base_spec;
+ PowerPCCPUClass *pvr_pcc;
ppc_def_t *spec;
uint32_t vmx = kvmppc_get_vmx();
uint32_t dfp = kvmppc_get_dfp();
- base_spec = ppc_find_by_pvr(host_pvr);
-
spec = g_malloc0(sizeof(*spec));
- memcpy(spec, base_spec, sizeof(*spec));
+
+ pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
+ if (pvr_pcc != NULL) {
+ memcpy(spec, pvr_pcc->info, sizeof(*spec));
+ }
+ pcc->info = spec;
+ /* Override the display name for -cpu ? and QMP */
+ pcc->info->name = "host";
/* Now fix up the spec with information we can query from the host */
/* Only override when we know what the host supports */
alter_insns(&spec->insns_flags2, PPC2_DFP, dfp);
}
+}
- return spec;
+int kvmppc_fixup_cpu(CPUPPCState *env)
+{
+ int smt;
+
+ /* Adjust cpu index for SMT */
+ smt = kvmppc_smt_threads();
+ env->cpu_index = (env->cpu_index / smp_threads) * smt
+ + (env->cpu_index % smp_threads);
+
+ return 0;
}
-bool kvm_arch_stop_on_emulation_error(CPUState *env)
+
+bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
{
return true;
}
-int kvm_arch_on_sigbus_vcpu(CPUState *env, int code, void *addr)
+int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
{
return 1;
}
{
return 1;
}
+
+static const TypeInfo kvm_host_cpu_type_info = {
+ .name = TYPE_HOST_POWERPC_CPU,
+ .parent = TYPE_POWERPC_CPU,
+ .instance_init = kvmppc_host_cpu_initfn,
+ .class_init = kvmppc_host_cpu_class_init,
+};
+
+static void kvm_ppc_register_types(void)
+{
+ type_register_static(&kvm_host_cpu_type_info);
+}
+
+type_init(kvm_ppc_register_types)