#include "qemu/atomic.h"
#include "qemu/option.h"
#include "qemu/config-file.h"
-#include "sysemu/sysemu.h"
-#include "sysemu/accel.h"
+#include "qemu/error-report.h"
#include "hw/hw.h"
#include "hw/pci/msi.h"
#include "hw/s390x/adapter.h"
#include "exec/gdbstub.h"
-#include "sysemu/kvm.h"
+#include "sysemu/kvm_int.h"
#include "qemu/bswap.h"
#include "exec/memory.h"
#include "exec/ram_addr.h"
#include "exec/address-spaces.h"
#include "qemu/event_notifier.h"
#include "trace.h"
+#include "hw/irq.h"
#include "hw/boards.h"
#include <sys/eventfd.h>
#endif
-/* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
-#define PAGE_SIZE TARGET_PAGE_SIZE
+/* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
+ * need to use the real host PAGE_SIZE, as that's what KVM will use.
+ */
+#define PAGE_SIZE getpagesize()
//#define DEBUG_KVM
#define KVM_MSI_HASHTAB_SIZE 256
-typedef struct KVMSlot
-{
- hwaddr start_addr;
- ram_addr_t memory_size;
- void *ram;
- int slot;
- int flags;
-} KVMSlot;
-
-typedef struct kvm_dirty_log KVMDirtyLog;
-
struct KVMState
{
AccelState parent_obj;
- KVMSlot *slots;
int nr_slots;
int fd;
int vmfd;
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
#endif
- int pit_state2;
- int xsave, xcrs;
int many_ioeventfds;
int intx_set_mask;
/* The man page (and posix) say ioctl numbers are signed int, but
* unsigned, and treating them as signed here can break things */
unsigned irq_set_ioctl;
unsigned int sigmask_len;
+ GHashTable *gsimap;
#ifdef KVM_CAP_IRQ_ROUTING
struct kvm_irq_routing *irq_routes;
int nr_allocated_irq_routes;
uint32_t *used_gsi_bitmap;
unsigned int gsi_count;
QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
- bool direct_msi;
#endif
+ KVMMemoryListener memory_listener;
};
-#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
-
-#define KVM_STATE(obj) \
- OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL)
-
KVMState *kvm_state;
bool kvm_kernel_irqchip;
+bool kvm_split_irqchip;
bool kvm_async_interrupts_allowed;
bool kvm_halt_in_kernel_allowed;
bool kvm_eventfds_allowed;
bool kvm_allowed;
bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed;
+bool kvm_direct_msi_allowed;
+bool kvm_ioeventfd_any_length_allowed;
static const KVMCapabilityInfo kvm_required_capabilites[] = {
KVM_CAP_INFO(USER_MEMORY),
KVM_CAP_LAST_INFO
};
-static KVMSlot *kvm_get_free_slot(KVMState *s)
+static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
{
+ KVMState *s = kvm_state;
int i;
for (i = 0; i < s->nr_slots; i++) {
- if (s->slots[i].memory_size == 0) {
- return &s->slots[i];
+ if (kml->slots[i].memory_size == 0) {
+ return &kml->slots[i];
}
}
bool kvm_has_free_slot(MachineState *ms)
{
- return kvm_get_free_slot(KVM_STATE(ms->accelerator));
+ KVMState *s = KVM_STATE(ms->accelerator);
+
+ return kvm_get_free_slot(&s->memory_listener);
}
-static KVMSlot *kvm_alloc_slot(KVMState *s)
+static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
{
- KVMSlot *slot = kvm_get_free_slot(s);
+ KVMSlot *slot = kvm_get_free_slot(kml);
if (slot) {
return slot;
abort();
}
-static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
+static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
hwaddr start_addr,
hwaddr end_addr)
{
+ KVMState *s = kvm_state;
int i;
for (i = 0; i < s->nr_slots; i++) {
- KVMSlot *mem = &s->slots[i];
+ KVMSlot *mem = &kml->slots[i];
if (start_addr == mem->start_addr &&
end_addr == mem->start_addr + mem->memory_size) {
/*
* Find overlapping slot with lowest start address
*/
-static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
+static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml,
hwaddr start_addr,
hwaddr end_addr)
{
+ KVMState *s = kvm_state;
KVMSlot *found = NULL;
int i;
for (i = 0; i < s->nr_slots; i++) {
- KVMSlot *mem = &s->slots[i];
+ KVMSlot *mem = &kml->slots[i];
if (mem->memory_size == 0 ||
(found && found->start_addr < mem->start_addr)) {
int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
hwaddr *phys_addr)
{
+ KVMMemoryListener *kml = &s->memory_listener;
int i;
for (i = 0; i < s->nr_slots; i++) {
- KVMSlot *mem = &s->slots[i];
+ KVMSlot *mem = &kml->slots[i];
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
*phys_addr = mem->start_addr + (ram - mem->ram);
return 0;
}
-static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
+static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot)
{
+ KVMState *s = kvm_state;
struct kvm_userspace_memory_region mem;
- mem.slot = slot->slot;
+ mem.slot = slot->slot | (kml->as_id << 16);
mem.guest_phys_addr = slot->start_addr;
mem.userspace_addr = (unsigned long)slot->ram;
mem.flags = slot->flags;
* dirty pages logging control
*/
-static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly)
+static int kvm_mem_flags(MemoryRegion *mr)
{
+ bool readonly = mr->readonly || memory_region_is_romd(mr);
int flags = 0;
- flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
+
+ if (memory_region_get_dirty_log_mask(mr) != 0) {
+ flags |= KVM_MEM_LOG_DIRTY_PAGES;
+ }
if (readonly && kvm_readonly_mem_allowed) {
flags |= KVM_MEM_READONLY;
}
return flags;
}
-static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
+static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
+ MemoryRegion *mr)
{
- KVMState *s = kvm_state;
- int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
int old_flags;
old_flags = mem->flags;
-
- flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
- mem->flags = flags;
+ mem->flags = kvm_mem_flags(mr);
/* If nothing changed effectively, no need to issue ioctl */
- if (flags == old_flags) {
+ if (mem->flags == old_flags) {
return 0;
}
- return kvm_set_user_memory_region(s, mem);
+ return kvm_set_user_memory_region(kml, mem);
}
-static int kvm_dirty_pages_log_change(hwaddr phys_addr,
- ram_addr_t size, bool log_dirty)
+static int kvm_section_update_flags(KVMMemoryListener *kml,
+ MemoryRegionSection *section)
{
- KVMState *s = kvm_state;
- KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
+ hwaddr phys_addr = section->offset_within_address_space;
+ ram_addr_t size = int128_get64(section->size);
+ KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size);
if (mem == NULL) {
return 0;
} else {
- return kvm_slot_dirty_pages_log_change(mem, log_dirty);
+ return kvm_slot_update_flags(kml, mem, section->mr);
}
}
MemoryRegionSection *section,
int old, int new)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
if (old != 0) {
return;
}
- r = kvm_dirty_pages_log_change(section->offset_within_address_space,
- int128_get64(section->size), true);
+ r = kvm_section_update_flags(kml, section);
if (r < 0) {
abort();
}
MemoryRegionSection *section,
int old, int new)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
if (new != 0) {
return;
}
- r = kvm_dirty_pages_log_change(section->offset_within_address_space,
- int128_get64(section->size), false);
+ r = kvm_section_update_flags(kml, section);
if (r < 0) {
abort();
}
* @start_add: start of logged region.
* @end_addr: end of logged region.
*/
-static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
+static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
+ MemoryRegionSection *section)
{
KVMState *s = kvm_state;
unsigned long size, allocated_size = 0;
- KVMDirtyLog d = {};
+ struct kvm_dirty_log d = {};
KVMSlot *mem;
int ret = 0;
hwaddr start_addr = section->offset_within_address_space;
d.dirty_bitmap = NULL;
while (start_addr < end_addr) {
- mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
+ mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr);
if (mem == NULL) {
break;
}
allocated_size = size;
memset(d.dirty_bitmap, 0, allocated_size);
- d.slot = mem->slot;
-
+ d.slot = mem->slot | (kml->as_id << 16);
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
DPRINTF("ioctl failed %d\n", errno);
ret = -1;
return NULL;
}
-static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
+static void kvm_set_phys_mem(KVMMemoryListener *kml,
+ MemoryRegionSection *section, bool add)
{
KVMState *s = kvm_state;
KVMSlot *mem, old;
int err;
MemoryRegion *mr = section->mr;
- bool log_dirty = memory_region_get_dirty_log_mask(mr) != 0;
bool writeable = !mr->readonly && !mr->rom_device;
- bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
hwaddr start_addr = section->offset_within_address_space;
ram_addr_t size = int128_get64(section->size);
void *ram = NULL;
/* kvm works in page size chunks, but the function may be called
with sub-page size and unaligned start address. Pad the start
address to next and truncate size to previous page boundary. */
- delta = (TARGET_PAGE_SIZE - (start_addr & ~TARGET_PAGE_MASK));
- delta &= ~TARGET_PAGE_MASK;
+ delta = qemu_real_host_page_size - (start_addr & ~qemu_real_host_page_mask);
+ delta &= ~qemu_real_host_page_mask;
if (delta > size) {
return;
}
start_addr += delta;
size -= delta;
- size &= TARGET_PAGE_MASK;
- if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
+ size &= qemu_real_host_page_mask;
+ if (!size || (start_addr & ~qemu_real_host_page_mask)) {
return;
}
ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
while (1) {
- mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
+ mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size);
if (!mem) {
break;
}
(ram - start_addr == mem->ram - mem->start_addr)) {
/* The new slot fits into the existing one and comes with
* identical parameters - update flags and done. */
- kvm_slot_dirty_pages_log_change(mem, log_dirty);
+ kvm_slot_update_flags(kml, mem, mr);
return;
}
old = *mem;
if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
- kvm_physical_sync_dirty_bitmap(section);
+ kvm_physical_sync_dirty_bitmap(kml, section);
}
/* unregister the overlapping slot */
mem->memory_size = 0;
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
__func__, strerror(-err));
* - and actually require a recent KVM version. */
if (s->broken_set_mem_region &&
old.start_addr == start_addr && old.memory_size < size && add) {
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->memory_size = old.memory_size;
mem->start_addr = old.start_addr;
mem->ram = old.ram;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error updating slot: %s\n", __func__,
strerror(-err));
/* register prefix slot */
if (old.start_addr < start_addr) {
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->memory_size = start_addr - old.start_addr;
mem->start_addr = old.start_addr;
mem->ram = old.ram;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering prefix slot: %s\n",
__func__, strerror(-err));
if (old.start_addr + old.memory_size > start_addr + size) {
ram_addr_t size_delta;
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->start_addr = start_addr + size;
size_delta = mem->start_addr - old.start_addr;
mem->memory_size = old.memory_size - size_delta;
mem->ram = old.ram + size_delta;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering suffix slot: %s\n",
__func__, strerror(-err));
if (!add) {
return;
}
- mem = kvm_alloc_slot(s);
+ mem = kvm_alloc_slot(kml);
mem->memory_size = size;
mem->start_addr = start_addr;
mem->ram = ram;
- mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
+ mem->flags = kvm_mem_flags(mr);
- err = kvm_set_user_memory_region(s, mem);
+ err = kvm_set_user_memory_region(kml, mem);
if (err) {
fprintf(stderr, "%s: error registering slot: %s\n", __func__,
strerror(-err));
static void kvm_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
+
memory_region_ref(section->mr);
- kvm_set_phys_mem(section, true);
+ kvm_set_phys_mem(kml, section, true);
}
static void kvm_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
- kvm_set_phys_mem(section, false);
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
+
+ kvm_set_phys_mem(kml, section, false);
memory_region_unref(section->mr);
}
static void kvm_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
+ KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
int r;
- r = kvm_physical_sync_dirty_bitmap(section);
+ r = kvm_physical_sync_dirty_bitmap(kml, section);
if (r < 0) {
abort();
}
}
}
-static MemoryListener kvm_memory_listener = {
- .region_add = kvm_region_add,
- .region_del = kvm_region_del,
- .log_start = kvm_log_start,
- .log_stop = kvm_log_stop,
- .log_sync = kvm_log_sync,
- .eventfd_add = kvm_mem_ioeventfd_add,
- .eventfd_del = kvm_mem_ioeventfd_del,
- .coalesced_mmio_add = kvm_coalesce_mmio_region,
- .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
- .priority = 10,
-};
+void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
+ AddressSpace *as, int as_id)
+{
+ int i;
+
+ kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
+ kml->as_id = as_id;
+
+ for (i = 0; i < s->nr_slots; i++) {
+ kml->slots[i].slot = i;
+ }
+
+ kml->listener.region_add = kvm_region_add;
+ kml->listener.region_del = kvm_region_del;
+ kml->listener.log_start = kvm_log_start;
+ kml->listener.log_stop = kvm_log_stop;
+ kml->listener.log_sync = kvm_log_sync;
+ kml->listener.priority = 10;
+
+ memory_listener_register(&kml->listener, as);
+}
static MemoryListener kvm_io_listener = {
.eventfd_add = kvm_io_ioeventfd_add,
s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
s->nr_allocated_irq_routes = 0;
- if (!s->direct_msi) {
+ if (!kvm_direct_msi_allowed) {
for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
QTAILQ_INIT(&s->msi_hashtab[i]);
}
uint32_t *word = s->used_gsi_bitmap;
int max_words = ALIGN(s->gsi_count, 32) / 32;
int i, zeroes;
- bool retry = true;
-again:
+ /*
+ * PIC and IOAPIC share the first 16 GSI numbers, thus the available
+ * GSI numbers are more than the number of IRQ route. Allocating a GSI
+ * number can succeed even though a new route entry cannot be added.
+ * When this happens, flush dynamic MSI entries to free IRQ route entries.
+ */
+ if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
+ kvm_flush_dynamic_msi_routes(s);
+ }
+
/* Return the lowest unused GSI in the bitmap */
for (i = 0; i < max_words; i++) {
zeroes = ctz32(~word[i]);
return zeroes + i * 32;
}
- if (!s->direct_msi && retry) {
- retry = false;
- kvm_flush_dynamic_msi_routes(s);
- goto again;
- }
return -ENOSPC;
}
struct kvm_msi msi;
KVMMSIRoute *route;
- if (s->direct_msi) {
+ if (kvm_direct_msi_allowed) {
msi.address_lo = (uint32_t)msg.address;
msi.address_hi = msg.address >> 32;
msi.data = le32_to_cpu(msg.data);
return kvm_set_irq(s, route->kroute.gsi, 1);
}
-int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
+int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg, PCIDevice *dev)
{
struct kvm_irq_routing_entry kroute = {};
int virq;
kroute.u.msi.address_lo = (uint32_t)msg.address;
kroute.u.msi.address_hi = msg.address >> 32;
kroute.u.msi.data = le32_to_cpu(msg.data);
- if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) {
+ if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
kvm_irqchip_release_virq(s, virq);
return -EINVAL;
}
return virq;
}
-int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
+int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
+ PCIDevice *dev)
{
struct kvm_irq_routing_entry kroute = {};
kroute.u.msi.address_lo = (uint32_t)msg.address;
kroute.u.msi.address_hi = msg.address >> 32;
kroute.u.msi.data = le32_to_cpu(msg.data);
- if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) {
+ if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
return -EINVAL;
}
kroute.u.adapter.ind_offset = adapter->ind_offset;
kroute.u.adapter.adapter_id = adapter->adapter_id;
+ kvm_add_routing_entry(s, &kroute);
+
+ return virq;
+}
+
+int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
+{
+ struct kvm_irq_routing_entry kroute = {};
+ int virq;
+
+ if (!kvm_gsi_routing_enabled()) {
+ return -ENOSYS;
+ }
+ if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
+ return -ENOSYS;
+ }
+ virq = kvm_irqchip_get_virq(s);
+ if (virq < 0) {
+ return virq;
+ }
+
+ kroute.gsi = virq;
+ kroute.type = KVM_IRQ_ROUTING_HV_SINT;
+ kroute.flags = 0;
+ kroute.u.hv_sint.vcpu = vcpu;
+ kroute.u.hv_sint.sint = sint;
+
kvm_add_routing_entry(s, &kroute);
kvm_irqchip_commit_routes(s);
return -ENOSYS;
}
+int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
+{
+ return -ENOSYS;
+}
+
static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
{
abort();
}
#endif /* !KVM_CAP_IRQ_ROUTING */
-int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
- EventNotifier *rn, int virq)
+int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
+ EventNotifier *rn, int virq)
{
return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
rn ? event_notifier_get_fd(rn) : -1, virq, true);
}
-int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
+int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
+ int virq)
{
return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
false);
}
-static int kvm_irqchip_create(MachineState *machine, KVMState *s)
+int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
+ EventNotifier *rn, qemu_irq irq)
+{
+ gpointer key, gsi;
+ gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
+
+ if (!found) {
+ return -ENXIO;
+ }
+ return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
+}
+
+int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
+ qemu_irq irq)
+{
+ gpointer key, gsi;
+ gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
+
+ if (!found) {
+ return -ENXIO;
+ }
+ return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
+}
+
+void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
+{
+ g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
+}
+
+static void kvm_irqchip_create(MachineState *machine, KVMState *s)
{
int ret;
- if (!machine_kernel_irqchip_allowed(machine) ||
- (!kvm_check_extension(s, KVM_CAP_IRQCHIP) &&
- (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) {
- return 0;
+ if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
+ ;
+ } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
+ ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
+ if (ret < 0) {
+ fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
+ exit(1);
+ }
+ } else {
+ return;
}
/* First probe and see if there's a arch-specific hook to create the
* in-kernel irqchip for us */
- ret = kvm_arch_irqchip_create(s);
- if (ret < 0) {
- return ret;
- } else if (ret == 0) {
- ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
- if (ret < 0) {
- fprintf(stderr, "Create kernel irqchip failed\n");
- return ret;
+ ret = kvm_arch_irqchip_create(machine, s);
+ if (ret == 0) {
+ if (machine_kernel_irqchip_split(machine)) {
+ perror("Split IRQ chip mode not supported.");
+ exit(1);
+ } else {
+ ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
}
}
+ if (ret < 0) {
+ fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
+ exit(1);
+ }
kvm_kernel_irqchip = true;
/* If we have an in-kernel IRQ chip then we must have asynchronous
kvm_init_irq_routing(s);
- return 0;
+ s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
}
/* Find number of supported CPUs using the recommended
KVMState *s;
const KVMCapabilityInfo *missing_cap;
int ret;
- int i, type = 0;
+ int type = 0;
const char *kvm_type;
s = KVM_STATE(ms->accelerator);
* page size for the system though.
*/
assert(TARGET_PAGE_SIZE <= getpagesize());
- page_size_init();
s->sigmask_len = 8;
s->nr_slots = 32;
}
- s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
-
- for (i = 0; i < s->nr_slots; i++) {
- s->slots[i].slot = i;
- }
-
/* check the vcpu limits */
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
#endif
-#ifdef KVM_CAP_XSAVE
- s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
-#endif
-
-#ifdef KVM_CAP_XCRS
- s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
-#endif
-
-#ifdef KVM_CAP_PIT_STATE2
- s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
-#endif
-
#ifdef KVM_CAP_IRQ_ROUTING
- s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
+ kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
#endif
s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
kvm_vm_attributes_allowed =
(kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
+ kvm_ioeventfd_any_length_allowed =
+ (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
+
ret = kvm_arch_init(ms, s);
if (ret < 0) {
goto err;
}
- ret = kvm_irqchip_create(ms, s);
- if (ret < 0) {
- goto err;
+ if (machine_kernel_irqchip_allowed(ms)) {
+ kvm_irqchip_create(ms, s);
}
kvm_state = s;
- memory_listener_register(&kvm_memory_listener, &address_space_memory);
- memory_listener_register(&kvm_io_listener, &address_space_io);
+
+ if (kvm_eventfds_allowed) {
+ s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
+ s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
+ }
+ s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region;
+ s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region;
+
+ kvm_memory_listener_register(s, &s->memory_listener,
+ &address_space_memory, 0);
+ memory_listener_register(&kvm_io_listener,
+ &address_space_io);
s->many_ioeventfds = kvm_check_many_ioeventfds();
if (s->fd != -1) {
close(s->fd);
}
- g_free(s->slots);
+ g_free(s->memory_listener.slots);
return ret;
}
run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu);
}
-void kvm_cpu_clean_state(CPUState *cpu)
-{
- cpu->kvm_vcpu_dirty = false;
-}
-
int kvm_cpu_exec(CPUState *cpu)
{
struct kvm_run *run = cpu->kvm_run;
return EXCP_HLT;
}
+ qemu_mutex_unlock_iothread();
+
do {
MemTxAttrs attrs;
*/
qemu_cpu_kick_self();
}
- qemu_mutex_unlock_iothread();
run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
- qemu_mutex_lock_iothread();
attrs = kvm_arch_post_run(cpu, run);
if (run_ret < 0) {
switch (run->exit_reason) {
case KVM_EXIT_IO:
DPRINTF("handle_io\n");
+ /* Called outside BQL */
kvm_handle_io(run->io.port, attrs,
(uint8_t *)run + run->io.data_offset,
run->io.direction,
break;
case KVM_EXIT_MMIO:
DPRINTF("handle_mmio\n");
+ /* Called outside BQL */
address_space_rw(&address_space_memory,
run->mmio.phys_addr, attrs,
run->mmio.data,
qemu_system_reset_request();
ret = EXCP_INTERRUPT;
break;
+ case KVM_SYSTEM_EVENT_CRASH:
+ qemu_mutex_lock_iothread();
+ qemu_system_guest_panicked();
+ qemu_mutex_unlock_iothread();
+ ret = 0;
+ break;
default:
DPRINTF("kvm_arch_handle_exit\n");
ret = kvm_arch_handle_exit(cpu, run);
}
} while (ret == 0);
+ qemu_mutex_lock_iothread();
+
if (ret < 0) {
cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
vm_stop(RUN_STATE_INTERNAL_ERROR);
return ret ? 0 : 1;
}
+int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
+{
+ struct kvm_device_attr attribute = {
+ .group = group,
+ .attr = attr,
+ .flags = 0,
+ };
+
+ return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
+}
+
+void kvm_device_access(int fd, int group, uint64_t attr,
+ void *val, bool write)
+{
+ struct kvm_device_attr kvmattr;
+ int err;
+
+ kvmattr.flags = 0;
+ kvmattr.group = group;
+ kvmattr.attr = attr;
+ kvmattr.addr = (uintptr_t)val;
+
+ err = kvm_device_ioctl(fd,
+ write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
+ &kvmattr);
+ if (err < 0) {
+ error_report("KVM_%s_DEVICE_ATTR failed: %s",
+ write ? "SET" : "GET", strerror(-err));
+ error_printf("Group %d attr 0x%016" PRIx64, group, attr);
+ abort();
+ }
+}
+
int kvm_has_sync_mmu(void)
{
return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
return kvm_state->debugregs;
}
-int kvm_has_xsave(void)
-{
- return kvm_state->xsave;
-}
-
-int kvm_has_xcrs(void)
-{
- return kvm_state->xcrs;
-}
-
-int kvm_has_pit_state2(void)
-{
- return kvm_state->pit_state2;
-}
-
int kvm_has_many_ioeventfds(void)
{
if (!kvm_enabled()) {