#include "trace.h"
#include "exec/gdbstub.h"
#include "exec/memattrs.h"
+#include "sysemu/hostmem.h"
//#define DEBUG_KVM
kvm_get_fallback_smmu_info(cpu, info);
}
-static long getrampagesize(void)
+static long gethugepagesize(const char *mem_path)
{
struct statfs fs;
int ret;
- if (!mem_path) {
- /* guest RAM is backed by normal anonymous pages */
- return getpagesize();
- }
-
do {
ret = statfs(mem_path, &fs);
} while (ret != 0 && errno == EINTR);
return fs.f_bsize;
}
+static int find_max_supported_pagesize(Object *obj, void *opaque)
+{
+ char *mem_path;
+ long *hpsize_min = opaque;
+
+ if (object_dynamic_cast(obj, TYPE_MEMORY_BACKEND)) {
+ mem_path = object_property_get_str(obj, "mem-path", NULL);
+ if (mem_path) {
+ long hpsize = gethugepagesize(mem_path);
+ if (hpsize < *hpsize_min) {
+ *hpsize_min = hpsize;
+ }
+ } else {
+ *hpsize_min = getpagesize();
+ }
+ }
+
+ return 0;
+}
+
+static long getrampagesize(void)
+{
+ long hpsize = LONG_MAX;
+ Object *memdev_root;
+
+ if (mem_path) {
+ return gethugepagesize(mem_path);
+ }
+
+ /* it's possible we have memory-backend objects with
+ * hugepage-backed RAM. these may get mapped into system
+ * address space via -numa parameters or memory hotplug
+ * hooks. we want to take these into account, but we
+ * also want to make sure these supported hugepage
+ * sizes are applicable across the entire range of memory
+ * we may boot from, so we take the min across all
+ * backends, and assume normal pages in cases where a
+ * backend isn't backed by hugepages.
+ */
+ memdev_root = object_resolve_path("/objects", NULL);
+ if (!memdev_root) {
+ return getpagesize();
+ }
+
+ object_child_foreach(memdev_root, find_max_supported_pagesize, &hpsize);
+
+ return (hpsize == LONG_MAX) ? getpagesize() : hpsize;
+}
+
static bool kvm_valid_page_size(uint32_t flags, long rampgsize, uint32_t shift)
{
if (!(flags & KVM_PPC_PAGE_SIZES_REAL)) {
int r;
unsigned irq;
+ qemu_mutex_lock_iothread();
+
/* PowerPC QEMU tracks the various core input pins (interrupt, critical
* interrupt, reset, etc) in PPC-specific env->irq_input_state. */
if (!cap_interrupt_level &&
/* We don't know if there are more interrupts pending after this. However,
* the guest will return to userspace in the course of handling this one
* anyways, so we will get a chance to deliver the rest. */
+
+ qemu_mutex_unlock_iothread();
}
MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
CPUPPCState *env = &cpu->env;
int ret;
+ qemu_mutex_lock_iothread();
+
switch (run->exit_reason) {
case KVM_EXIT_DCR:
if (run->dcr.is_write) {
break;
}
+ qemu_mutex_unlock_iothread();
return ret;
}
kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
}
+void kvmppc_enable_set_mode_hcall(void)
+{
+ kvmppc_enable_hcall(kvm_state, H_SET_MODE);
+}
+
void kvmppc_set_papr(PowerPCCPU *cpu)
{
CPUState *cs = CPU(cpu);
{
return data & 0xffff;
}
+
+int kvmppc_enable_hwrng(void)
+{
+ if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
+ return -1;
+ }
+
+ return kvmppc_enable_hcall(kvm_state, H_RANDOM);
+}