#include "cpu.h"
#include "sysemu/cpus.h"
#include "sysemu/device_tree.h"
-#include "hw/sysbus.h"
-#include "hw/spapr.h"
#include "mmu-hash64.h"
#include "hw/sysbus.h"
-#include "hw/spapr.h"
-#include "hw/spapr_vio.h"
+#include "hw/ppc/spapr.h"
+#include "hw/ppc/spapr_vio.h"
+#include "sysemu/watchdog.h"
//#define DEBUG_KVM
static int cap_spapr_tce;
static int cap_hior;
static int cap_one_reg;
+static int cap_epr;
+static int cap_ppc_watchdog;
+static int cap_papr;
/* XXX We have a race condition where we actually have a level triggered
* interrupt, but the infrastructure can't expose that yet, so the guest
cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
+ cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
+ cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
+ /* Note: we don't set cap_papr here, because this capability is
+ * only activated after this by kvmppc_set_papr() */
if (!cap_interrupt_level) {
fprintf(stderr, "KVM: Couldn't find level irq capability. Expect the "
return 0;
}
+#if defined(TARGET_PPC64)
+static int kvm_get_vpa(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&env->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to get VPA address from KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&env->slb_shadow_size
+ == ((uintptr_t)&env->slb_shadow_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_SLB;
+ reg.addr = (uintptr_t)&env->slb_shadow_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to get SLB shadow state from KVM: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_DTL;
+ reg.addr = (uintptr_t)&env->dtl_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to get dispatch trace log state from KVM: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ return 0;
+}
+
+static int kvm_put_vpa(CPUState *cs)
+{
+ PowerPCCPU *cpu = POWERPC_CPU(cs);
+ CPUPPCState *env = &cpu->env;
+ struct kvm_one_reg reg;
+ int ret;
+
+ /* SLB shadow or DTL can't be registered unless a master VPA is
+ * registered. That means when restoring state, if a VPA *is*
+ * registered, we need to set that up first. If not, we need to
+ * deregister the others before deregistering the master VPA */
+ assert(env->vpa_addr || !(env->slb_shadow_addr || env->dtl_addr));
+
+ if (env->vpa_addr) {
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&env->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to set VPA address to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+ }
+
+ assert((uintptr_t)&env->slb_shadow_size
+ == ((uintptr_t)&env->slb_shadow_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_SLB;
+ reg.addr = (uintptr_t)&env->slb_shadow_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to set SLB shadow state to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+
+ assert((uintptr_t)&env->dtl_size == ((uintptr_t)&env->dtl_addr + 8));
+ reg.id = KVM_REG_PPC_VPA_DTL;
+ reg.addr = (uintptr_t)&env->dtl_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to set dispatch trace log state to KVM: %s\n",
+ strerror(errno));
+ return ret;
+ }
+
+ if (!env->vpa_addr) {
+ reg.id = KVM_REG_PPC_VPA_ADDR;
+ reg.addr = (uintptr_t)&env->vpa_addr;
+ ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+ if (ret < 0) {
+ dprintf("Unable to set VPA address to KVM: %s\n", strerror(errno));
+ return ret;
+ }
+ }
+
+ return 0;
+}
+#endif /* TARGET_PPC64 */
+
int kvm_arch_put_registers(CPUState *cs, int level)
{
PowerPCCPU *cpu = POWERPC_CPU(cs);
for (i = 0;i < 32; i++)
regs.gpr[i] = env->gpr[i];
+ regs.cr = 0;
+ for (i = 0; i < 8; i++) {
+ regs.cr |= (env->crf[i] & 15) << (4 * (7 - i));
+ }
+
ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s);
if (ret < 0)
return ret;
kvm_put_one_spr(cs, id, i);
}
}
+
+#ifdef TARGET_PPC64
+ if (cap_papr) {
+ if (kvm_put_vpa(cs) < 0) {
+ dprintf("Warning: Unable to set VPA information to KVM\n");
+ }
+ }
+#endif /* TARGET_PPC64 */
}
return ret;
kvm_get_one_spr(cs, id, i);
}
}
+
+#ifdef TARGET_PPC64
+ if (cap_papr) {
+ if (kvm_get_vpa(cs) < 0) {
+ dprintf("Warning: Unable to get VPA information from KVM\n");
+ }
+ }
+#endif
}
return 0;
run->epr.epr = ldl_phys(env->mpic_iack);
ret = 0;
break;
+ case KVM_EXIT_WATCHDOG:
+ dprintf("handle watchdog expiry\n");
+ watchdog_perform_action();
+ ret = 0;
+ break;
+
default:
fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
ret = -1;
return ret;
}
+int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+ CPUState *cs = CPU(cpu);
+ uint32_t bits = tsr_bits;
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_OR_TSR,
+ .addr = (uintptr_t) &bits,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+}
+
+int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
+{
+
+ CPUState *cs = CPU(cpu);
+ uint32_t bits = tsr_bits;
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_CLEAR_TSR,
+ .addr = (uintptr_t) &bits,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+}
+
+int kvmppc_set_tcr(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ CPUPPCState *env = &cpu->env;
+ uint32_t tcr = env->spr[SPR_BOOKE_TCR];
+
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_PPC_TCR,
+ .addr = (uintptr_t) &tcr,
+ };
+
+ return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
+}
+
+int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ struct kvm_enable_cap encap = {};
+ int ret;
+
+ if (!kvm_enabled()) {
+ return -1;
+ }
+
+ if (!cap_ppc_watchdog) {
+ printf("warning: KVM does not support watchdog");
+ return -1;
+ }
+
+ encap.cap = KVM_CAP_PPC_BOOKE_WATCHDOG;
+ ret = kvm_vcpu_ioctl(cs, KVM_ENABLE_CAP, &encap);
+ if (ret < 0) {
+ fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
+ __func__, strerror(-ret));
+ return ret;
+ }
+
+ return ret;
+}
+
static int read_cpuinfo(const char *field, char *value, int len)
{
FILE *f;
if (ret) {
cpu_abort(env, "This KVM version does not support PAPR\n");
}
+
+ /* Update the capability flag so we sync the right information
+ * with kvm */
+ cap_papr = 1;
}
void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift)
{
+ struct kvm_ppc_smmu_info info;
+ long rampagesize, best_page_shift;
+ int i;
+
if (cap_ppc_rma >= 2) {
return current_size;
}
+
+ /* Find the largest hardware supported page size that's less than
+ * or equal to the (logical) backing page size of guest RAM */
+ kvm_get_smmu_info(ppc_env_get_cpu(first_cpu), &info);
+ rampagesize = getrampagesize();
+ best_page_shift = 0;
+
+ for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
+ struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
+
+ if (!sps->page_shift) {
+ continue;
+ }
+
+ if ((sps->page_shift > best_page_shift)
+ && ((1UL << sps->page_shift) <= rampagesize)) {
+ best_page_shift = sps->page_shift;
+ }
+ }
+
return MIN(current_size,
- getrampagesize() << (hash_shift - 7));
+ 1ULL << (best_page_shift + hash_shift - 7));
}
#endif
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
uint32_t vmx = kvmppc_get_vmx();
uint32_t dfp = kvmppc_get_dfp();
+ uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
+ uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
/* Now fix up the class with information we can query from the host */
/* Only override when we know what the host supports */
alter_insns(&pcc->insns_flags2, PPC2_DFP, dfp);
}
+
+ if (dcache_size != -1) {
+ pcc->l1_dcache_size = dcache_size;
+ }
+
+ if (icache_size != -1) {
+ pcc->l1_icache_size = icache_size;
+ }
}
int kvmppc_fixup_cpu(PowerPCCPU *cpu)
return 0;
}
+bool kvmppc_has_cap_epr(void)
+{
+ return cap_epr;
+}
+
static int kvm_ppc_register_host_cpu_type(void)
{
TypeInfo type_info = {
{
return 1;
}
+
+void kvm_arch_init_irq_routing(KVMState *s)
+{
+}