#if defined(TARGET_PPC64)
-static void kvm_get_fallback_smmu_info(PowerPCCPU *cpu,
- struct kvm_ppc_smmu_info *info)
+static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
{
- CPUPPCState *env = &cpu->env;
- CPUState *cs = CPU(cpu);
-
- memset(info, 0, sizeof(*info));
-
- /* We don't have the new KVM_PPC_GET_SMMU_INFO ioctl, so
- * need to "guess" what the supported page sizes are.
- *
- * For that to work we make a few assumptions:
- *
- * - Check whether we are running "PR" KVM which only supports 4K
- * and 16M pages, but supports them regardless of the backing
- * store characteritics. We also don't support 1T segments.
- *
- * This is safe as if HV KVM ever supports that capability or PR
- * KVM grows supports for more page/segment sizes, those versions
- * will have implemented KVM_CAP_PPC_GET_SMMU_INFO and thus we
- * will not hit this fallback
- *
- * - Else we are running HV KVM. This means we only support page
- * sizes that fit in the backing store. Additionally we only
- * advertize 64K pages if the processor is ARCH 2.06 and we assume
- * P7 encodings for the SLB and hash table. Here too, we assume
- * support for any newer processor will mean a kernel that
- * implements KVM_CAP_PPC_GET_SMMU_INFO and thus doesn't hit
- * this fallback.
- */
- if (kvmppc_is_pr(cs->kvm_state)) {
- /* No flags */
- info->flags = 0;
- info->slb_size = 64;
-
- /* Standard 4k base page size segment */
- info->sps[0].page_shift = 12;
- info->sps[0].slb_enc = 0;
- info->sps[0].enc[0].page_shift = 12;
- info->sps[0].enc[0].pte_enc = 0;
-
- /* Standard 16M large page size segment */
- info->sps[1].page_shift = 24;
- info->sps[1].slb_enc = SLB_VSID_L;
- info->sps[1].enc[0].page_shift = 24;
- info->sps[1].enc[0].pte_enc = 0;
- } else {
- int i = 0;
-
- /* HV KVM has backing store size restrictions */
- info->flags = KVM_PPC_PAGE_SIZES_REAL;
-
- if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)) {
- info->flags |= KVM_PPC_1T_SEGMENTS;
- }
-
- if (env->mmu_model == POWERPC_MMU_2_06 ||
- env->mmu_model == POWERPC_MMU_2_07) {
- info->slb_size = 32;
- } else {
- info->slb_size = 64;
- }
+ int ret;
- /* Standard 4k base page size segment */
- info->sps[i].page_shift = 12;
- info->sps[i].slb_enc = 0;
- info->sps[i].enc[0].page_shift = 12;
- info->sps[i].enc[0].pte_enc = 0;
- i++;
-
- /* 64K on MMU 2.06 and later */
- if (env->mmu_model == POWERPC_MMU_2_06 ||
- env->mmu_model == POWERPC_MMU_2_07) {
- info->sps[i].page_shift = 16;
- info->sps[i].slb_enc = 0x110;
- info->sps[i].enc[0].page_shift = 16;
- info->sps[i].enc[0].pte_enc = 1;
- i++;
- }
+ assert(kvm_state != NULL);
- /* Standard 16M large page size segment */
- info->sps[i].page_shift = 24;
- info->sps[i].slb_enc = SLB_VSID_L;
- info->sps[i].enc[0].page_shift = 24;
- info->sps[i].enc[0].pte_enc = 0;
+ if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
+ error_setg(errp, "KVM doesn't expose the MMU features it supports");
+ error_append_hint(errp, "Consider switching to a newer KVM\n");
+ return;
}
-}
-
-static void kvm_get_smmu_info(PowerPCCPU *cpu, struct kvm_ppc_smmu_info *info)
-{
- CPUState *cs = CPU(cpu);
- int ret;
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
- ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_SMMU_INFO, info);
- if (ret == 0) {
- return;
- }
+ ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
+ if (ret == 0) {
+ return;
}
- kvm_get_fallback_smmu_info(cpu, info);
+ error_setg_errno(errp, -ret,
+ "KVM failed to provide the MMU features it supports");
}
struct ppc_radix_page_info *kvm_get_radix_page_info(void)
}
}
-static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
+bool kvmppc_hpt_needs_host_contiguous_pages(void)
{
- if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
- return true;
+ static struct kvm_ppc_smmu_info smmu_info;
+
+ if (!kvm_enabled()) {
+ return false;
}
- return (1ul << shift) <= rampgsize;
+ kvm_get_smmu_info(&smmu_info, &error_fatal);
+ return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
}
-static long max_cpu_page_size;
-
-static void kvm_fixup_page_sizes(PowerPCCPU *cpu)
+void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
{
- static struct kvm_ppc_smmu_info smmu_info;
- static bool has_smmu_info;
- CPUPPCState *env = &cpu->env;
+ struct kvm_ppc_smmu_info smmu_info;
int iq, ik, jq, jk;
+ Error *local_err = NULL;
- /* We only handle page sizes for 64-bit server guests for now */
- if (!(env->mmu_model & POWERPC_MMU_64)) {
+ /* For now, we only have anything to check on hash64 MMUs */
+ if (!cpu->hash64_opts || !kvm_enabled()) {
return;
}
- /* Collect MMU info from kernel if not already */
- if (!has_smmu_info) {
- kvm_get_smmu_info(cpu, &smmu_info);
- has_smmu_info = true;
+ kvm_get_smmu_info(&smmu_info, &local_err);
+ if (local_err) {
+ error_propagate(errp, local_err);
+ return;
}
- if (!max_cpu_page_size) {
- max_cpu_page_size = qemu_getrampagesize();
+ if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
+ && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
+ error_setg(errp,
+ "KVM does not support 1TiB segments which guest expects");
+ return;
}
- /* Convert to QEMU form */
- memset(cpu->hash64_opts->sps, 0, sizeof(*cpu->hash64_opts->sps));
-
- /* If we have HV KVM, we need to forbid CI large pages if our
- * host page size is smaller than 64K.
- */
- if (smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL) {
- if (getpagesize() >= 0x10000) {
- cpu->hash64_opts->flags |= PPC_HASH64_CI_LARGEPAGE;
- } else {
- cpu->hash64_opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
- }
+ if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
+ error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
+ smmu_info.slb_size, cpu->hash64_opts->slb_size);
+ return;
}
/*
- * XXX This loop should be an entry wide AND of the capabilities that
- * the selected CPU has with the capabilities that KVM supports.
+ * Verify that every pagesize supported by the cpu model is
+ * supported by KVM with the same encodings
*/
- for (ik = iq = 0; ik < KVM_PPC_PAGE_SIZES_MAX_SZ; ik++) {
+ for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
- struct kvm_ppc_one_seg_page_size *ksps = &smmu_info.sps[ik];
+ struct kvm_ppc_one_seg_page_size *ksps;
- if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
- ksps->page_shift)) {
- continue;
- }
- qsps->page_shift = ksps->page_shift;
- qsps->slb_enc = ksps->slb_enc;
- for (jk = jq = 0; jk < KVM_PPC_PAGE_SIZES_MAX_SZ; jk++) {
- if (!kvm_valid_page_size(smmu_info.flags, max_cpu_page_size,
- ksps->enc[jk].page_shift)) {
- continue;
- }
- qsps->enc[jq].page_shift = ksps->enc[jk].page_shift;
- qsps->enc[jq].pte_enc = ksps->enc[jk].pte_enc;
- if (++jq >= PPC_PAGE_SIZES_MAX_SZ) {
+ for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
+ if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
break;
}
}
- if (++iq >= PPC_PAGE_SIZES_MAX_SZ) {
- break;
+ if (ik >= ARRAY_SIZE(smmu_info.sps)) {
+ error_setg(errp, "KVM doesn't support for base page shift %u",
+ qsps->page_shift);
+ return;
}
- }
- cpu->hash64_opts->slb_size = smmu_info.slb_size;
- if (!(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
- cpu->hash64_opts->flags &= ~PPC_HASH64_1TSEG;
- }
-}
-
-bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
-{
- Object *mem_obj = object_resolve_path(obj_path, NULL);
- long pagesize = host_memory_backend_pagesize(MEMORY_BACKEND(mem_obj));
- return pagesize >= max_cpu_page_size;
-}
+ ksps = &smmu_info.sps[ik];
+ if (ksps->slb_enc != qsps->slb_enc) {
+ error_setg(errp,
+"KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
+ ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
+ return;
+ }
-#else /* defined (TARGET_PPC64) */
+ for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
+ for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
+ if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
+ break;
+ }
+ }
-static inline void kvm_fixup_page_sizes(PowerPCCPU *cpu)
-{
-}
+ if (jk >= ARRAY_SIZE(ksps->enc)) {
+ error_setg(errp, "KVM doesn't support page shift %u/%u",
+ qsps->enc[jq].page_shift, qsps->page_shift);
+ return;
+ }
+ if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
+ error_setg(errp,
+"KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
+ ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
+ qsps->page_shift, qsps->enc[jq].pte_enc);
+ return;
+ }
+ }
+ }
-bool kvmppc_is_mem_backend_page_size_ok(const char *obj_path)
-{
- return true;
+ if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
+ /* Mostly what guest pagesizes we can use are related to the
+ * host pages used to map guest RAM, which is handled in the
+ * platform code. Cache-Inhibited largepages (64k) however are
+ * used for I/O, so if they're mapped to the host at all it
+ * will be a normal mapping, not a special hugepage one used
+ * for RAM. */
+ if (getpagesize() < 0x10000) {
+ error_setg(errp,
+ "KVM can't supply 64kiB CI pages, which guest expects");
+ }
+ }
}
-
#endif /* !defined (TARGET_PPC64) */
unsigned long kvm_arch_vcpu_id(CPUState *cpu)
CPUPPCState *cenv = &cpu->env;
int ret;
- /* Gather server mmu info from KVM and update the CPU state */
- kvm_fixup_page_sizes(cpu);
-
/* Synchronize sregs with kvm */
ret = kvm_arch_sync_sregs(cpu);
if (ret) {
return 0;
}
-#if defined(TARGET_PPCEMB)
-#define PPC_INPUT_INT PPC40x_INPUT_INT
-#elif defined(TARGET_PPC64)
+#if defined(TARGET_PPC64)
#define PPC_INPUT_INT PPC970_INPUT_INT
#else
#define PPC_INPUT_INT PPC6xx_INPUT_INT
/* Find the largest hardware supported page size that's less than
* or equal to the (logical) backing page size of guest RAM */
- kvm_get_smmu_info(POWERPC_CPU(first_cpu), &info);
+ kvm_get_smmu_info(&info, &error_fatal);
rampagesize = qemu_getrampagesize();
best_page_shift = 0;
return !kvmppc_is_pr(cs->kvm_state);
}
+
+void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
+{
+ CPUState *cs = CPU(cpu);
+
+ if (kvm_enabled()) {
+ kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
+ }
+}