#include "sysemu/watchdog.h"
#include "trace.h"
#include "exec/gdbstub.h"
+#include "exec/memattrs.h"
+#include "sysemu/hostmem.h"
//#define DEBUG_KVM
static int kvm_ppc_register_host_cpu_type(void);
-int kvm_arch_init(KVMState *s)
+int kvm_arch_init(MachineState *ms, KVMState *s)
{
cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
cap_interrupt_level = kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL);
kvm_get_fallback_smmu_info(cpu, info);
}
-static long getrampagesize(void)
+static long gethugepagesize(const char *mem_path)
{
struct statfs fs;
int ret;
- if (!mem_path) {
- /* guest RAM is backed by normal anonymous pages */
- return getpagesize();
- }
-
do {
ret = statfs(mem_path, &fs);
} while (ret != 0 && errno == EINTR);
return fs.f_bsize;
}
+static int find_max_supported_pagesize(Object *obj, void *opaque)
+{
+ char *mem_path;
+ long *hpsize_min = opaque;
+
+ if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
+ mem_path = object_property_get_str(obj, "mem-path", NULL);
+ if (mem_path) {
+ long hpsize = gethugepagesize(mem_path);
+ if (hpsize < *hpsize_min) {
+ *hpsize_min = hpsize;
+ }
+ } else {
+ *hpsize_min = getpagesize();
+ }
+ }
+
+ return 0;
+}
+
+static long getrampagesize(void)
+{
+ long hpsize = LONG_MAX;
+ Object *memdev_root;
+
+ if (mem_path) {
+ return gethugepagesize(mem_path);
+ }
+
+ /* it's possible we have memory-backend objects with
+ * hugepage-backed RAM. these may get mapped into system
+ * address space via -numa parameters or memory hotplug
+ * hooks. we want to take these into account, but we
+ * also want to make sure these supported hugepage
+ * sizes are applicable across the entire range of memory
+ * we may boot from, so we take the min across all
+ * backends, and assume normal pages in cases where a
+ * backend isn't backed by hugepages.
+ */
+ memdev_root = object_resolve_path("/objects", NULL);
+ if (!memdev_root) {
+ return getpagesize();
+ }
+
+ object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
+
+ return (hpsize == LONG_MAX) ? getpagesize() : hpsize;
+}
+
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
{
if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
int r;
unsigned irq;
+ qemu_mutex_lock_iothread();
+
/* PowerPC QEMU tracks the various core input pins (interrupt, critical
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
if (!cap_interrupt_level &&
/* We don't know if there are more interrupts pending after this. However,
* the guest will return to userspace in the course of handling this one
* anyways, so we will get a chance to deliver the rest. */
+
+ qemu_mutex_unlock_iothread();
}
-void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
+MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
{
+ return MEMTXATTRS_UNSPECIFIED;
}
int kvm_arch_process_async_events(CPUState *cs)
CPUPPCState *env = &cpu->env;
int ret;
+ qemu_mutex_lock_iothread();
+
switch (run->exit_reason) {
case KVM_EXIT_DCR:
if (run->dcr.is_write) {
break;
}
+ qemu_mutex_unlock_iothread();
return ret;
}
* format) */
static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
{
- char buf[PATH_MAX];
+ char buf[PATH_MAX], *tmp;
union {
uint32_t v32;
uint64_t v64;
return -1;
}
- strncat(buf, "/", sizeof(buf) - strlen(buf));
- strncat(buf, propname, sizeof(buf) - strlen(buf));
+ tmp = g_strdup_printf("%s/%s", buf, propname);
- f = fopen(buf, "rb");
+ f = fopen(tmp, "rb");
+ g_free(tmp);
if (!f) {
return -1;
}
return 0;
}
+static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
+{
+ return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
+}
+
+void kvmppc_enable_logical_ci_hcalls(void)
+{
+ /*
+ * FIXME: it would be nice if we could detect the cases where
+ * we're using a device which requires the in kernel
+ * implementation of these hcalls, but the kernel lacks them and
+ * produce a warning.
+ */
+ kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
+ kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
+}
+
+void kvmppc_enable_set_mode_hcall(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_SET_MODE);
+}
+
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
strerror(errno));
return rc;
} else if (rc) {
- /* Kernel already retuns data in BE format for the file */
- qemu_put_buffer(f, buf, rc);
+ uint8_t *buffer = buf;
+ ssize_t n = rc;
+ while (n) {
+ struct kvm_get_htab_header *head =
+ (struct kvm_get_htab_header *) buffer;
+ size_t chunksize = sizeof(*head) +
+ HASH_PTE_SIZE_64 * head->n_valid;
+
+ qemu_put_be32(f, head->index);
+ qemu_put_be16(f, head->n_valid);
+ qemu_put_be16(f, head->n_invalid);
+ qemu_put_buffer(f, (void *)(head + 1),
+ HASH_PTE_SIZE_64 * head->n_valid);
+
+ buffer += chunksize;
+ n -= chunksize;
+ }
}
} while ((rc != 0)
&& ((max_ns < 0)
ssize_t rc;
buf = alloca(chunksize);
- /* This is KVM on ppc, so this is all big-endian */
buf->index = index;
buf->n_valid = n_valid;
buf->n_invalid = n_invalid;
error_out:
return;
}
+
+int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
+ uint64_t address, uint32_t data)
+{
+ return 0;
+}
+
+int kvm_arch_msi_data_to_gsi(uint32_t data)
+{
+ return data & 0xffff;
+}
+
+int kvmppc_enable_hwrng(void)
+{
+ if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
+ return -1;
+ }
+
+ return kvmppc_enable_hcall(kvm_state, H_RANDOM);
+}