X-Git-Url: https://repo.jachan.dev/qemu.git/blobdiff_plain/2c80e996e427ae31982f3405a762859578a6261d..54c54f8b56047d3c2420e1ae06a6a8890c220ac4:/kvm-all.c diff --git a/kvm-all.c b/kvm-all.c index 3f7061a180..c442838668 100644 --- a/kvm-all.c +++ b/kvm-all.c @@ -24,19 +24,19 @@ #include "qemu/atomic.h" #include "qemu/option.h" #include "qemu/config-file.h" -#include "sysemu/sysemu.h" -#include "sysemu/accel.h" +#include "qemu/error-report.h" #include "hw/hw.h" #include "hw/pci/msi.h" #include "hw/s390x/adapter.h" #include "exec/gdbstub.h" -#include "sysemu/kvm.h" +#include "sysemu/kvm_int.h" #include "qemu/bswap.h" #include "exec/memory.h" #include "exec/ram_addr.h" #include "exec/address-spaces.h" #include "qemu/event_notifier.h" #include "trace.h" +#include "hw/irq.h" #include "hw/boards.h" @@ -60,22 +60,10 @@ #define KVM_MSI_HASHTAB_SIZE 256 -typedef struct KVMSlot -{ - hwaddr start_addr; - ram_addr_t memory_size; - void *ram; - int slot; - int flags; -} KVMSlot; - -typedef struct kvm_dirty_log KVMDirtyLog; - struct KVMState { AccelState parent_obj; - KVMSlot *slots; int nr_slots; int fd; int vmfd; @@ -83,15 +71,12 @@ struct KVMState struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; bool coalesced_flush_in_progress; int broken_set_mem_region; - int migration_log; int vcpu_events; int robust_singlestep; int debugregs; #ifdef KVM_CAP_SET_GUEST_DEBUG struct kvm_sw_breakpoint_head kvm_sw_breakpoints; #endif - int pit_state2; - int xsave, xcrs; int many_ioeventfds; int intx_set_mask; /* The man page (and posix) say ioctl numbers are signed int, but @@ -99,21 +84,17 @@ struct KVMState * unsigned, and treating them as signed here can break things */ unsigned irq_set_ioctl; unsigned int sigmask_len; + GHashTable *gsimap; #ifdef KVM_CAP_IRQ_ROUTING struct kvm_irq_routing *irq_routes; int nr_allocated_irq_routes; uint32_t *used_gsi_bitmap; unsigned int gsi_count; QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE]; - bool direct_msi; #endif + KVMMemoryListener memory_listener; }; -#define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm") - -#define KVM_STATE(obj) \ - OBJECT_CHECK(KVMState, (obj), TYPE_KVM_ACCEL) - KVMState *kvm_state; bool kvm_kernel_irqchip; bool kvm_async_interrupts_allowed; @@ -127,6 +108,7 @@ bool kvm_gsi_direct_mapping; bool kvm_allowed; bool kvm_readonly_mem_allowed; bool kvm_vm_attributes_allowed; +bool kvm_direct_msi_allowed; static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_INFO(USER_MEMORY), @@ -134,13 +116,14 @@ static const KVMCapabilityInfo kvm_required_capabilites[] = { KVM_CAP_LAST_INFO }; -static KVMSlot *kvm_get_free_slot(KVMState *s) +static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml) { + KVMState *s = kvm_state; int i; for (i = 0; i < s->nr_slots; i++) { - if (s->slots[i].memory_size == 0) { - return &s->slots[i]; + if (kml->slots[i].memory_size == 0) { + return &kml->slots[i]; } } @@ -149,12 +132,14 @@ static KVMSlot *kvm_get_free_slot(KVMState *s) bool kvm_has_free_slot(MachineState *ms) { - return kvm_get_free_slot(KVM_STATE(ms->accelerator)); + KVMState *s = KVM_STATE(ms->accelerator); + + return kvm_get_free_slot(&s->memory_listener); } -static KVMSlot *kvm_alloc_slot(KVMState *s) +static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml) { - KVMSlot *slot = kvm_get_free_slot(s); + KVMSlot *slot = kvm_get_free_slot(kml); if (slot) { return slot; @@ -164,14 +149,15 @@ static KVMSlot *kvm_alloc_slot(KVMState *s) abort(); } -static KVMSlot *kvm_lookup_matching_slot(KVMState *s, +static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml, hwaddr start_addr, hwaddr end_addr) { + KVMState *s = kvm_state; int i; for (i = 0; i < s->nr_slots; i++) { - KVMSlot *mem = &s->slots[i]; + KVMSlot *mem = &kml->slots[i]; if (start_addr == mem->start_addr && end_addr == mem->start_addr + mem->memory_size) { @@ -185,15 +171,16 @@ static KVMSlot *kvm_lookup_matching_slot(KVMState *s, /* * Find overlapping slot with lowest start address */ -static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, +static KVMSlot *kvm_lookup_overlapping_slot(KVMMemoryListener *kml, hwaddr start_addr, hwaddr end_addr) { + KVMState *s = kvm_state; KVMSlot *found = NULL; int i; for (i = 0; i < s->nr_slots; i++) { - KVMSlot *mem = &s->slots[i]; + KVMSlot *mem = &kml->slots[i]; if (mem->memory_size == 0 || (found && found->start_addr < mem->start_addr)) { @@ -212,10 +199,11 @@ static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s, int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, hwaddr *phys_addr) { + KVMMemoryListener *kml = &s->memory_listener; int i; for (i = 0; i < s->nr_slots; i++) { - KVMSlot *mem = &s->slots[i]; + KVMSlot *mem = &kml->slots[i]; if (ram >= mem->ram && ram < mem->ram + mem->memory_size) { *phys_addr = mem->start_addr + (ram - mem->ram); @@ -226,17 +214,15 @@ int kvm_physical_memory_addr_from_host(KVMState *s, void *ram, return 0; } -static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot) +static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot) { + KVMState *s = kvm_state; struct kvm_userspace_memory_region mem; - mem.slot = slot->slot; + mem.slot = slot->slot | (kml->as_id << 16); mem.guest_phys_addr = slot->start_addr; mem.userspace_addr = (unsigned long)slot->ram; mem.flags = slot->flags; - if (s->migration_log) { - mem.flags |= KVM_MEM_LOG_DIRTY_PAGES; - } if (slot->memory_size && mem.flags & KVM_MEM_READONLY) { /* Set the slot size to 0 before setting the slot to the desired @@ -295,101 +281,82 @@ err: * dirty pages logging control */ -static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly) +static int kvm_mem_flags(MemoryRegion *mr) { + bool readonly = mr->readonly || memory_region_is_romd(mr); int flags = 0; - flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0; + + if (memory_region_get_dirty_log_mask(mr) != 0) { + flags |= KVM_MEM_LOG_DIRTY_PAGES; + } if (readonly && kvm_readonly_mem_allowed) { flags |= KVM_MEM_READONLY; } return flags; } -static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty) +static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem, + MemoryRegion *mr) { - KVMState *s = kvm_state; - int flags, mask = KVM_MEM_LOG_DIRTY_PAGES; int old_flags; old_flags = mem->flags; - - flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false); - mem->flags = flags; + mem->flags = kvm_mem_flags(mr); /* If nothing changed effectively, no need to issue ioctl */ - if (s->migration_log) { - flags |= KVM_MEM_LOG_DIRTY_PAGES; - } - - if (flags == old_flags) { + if (mem->flags == old_flags) { return 0; } - return kvm_set_user_memory_region(s, mem); + return kvm_set_user_memory_region(kml, mem); } -static int kvm_dirty_pages_log_change(hwaddr phys_addr, - ram_addr_t size, bool log_dirty) +static int kvm_section_update_flags(KVMMemoryListener *kml, + MemoryRegionSection *section) { - KVMState *s = kvm_state; - KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size); + hwaddr phys_addr = section->offset_within_address_space; + ram_addr_t size = int128_get64(section->size); + KVMSlot *mem = kvm_lookup_matching_slot(kml, phys_addr, phys_addr + size); if (mem == NULL) { - fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-" - TARGET_FMT_plx "\n", __func__, phys_addr, - (hwaddr)(phys_addr + size - 1)); - return -EINVAL; + return 0; + } else { + return kvm_slot_update_flags(kml, mem, section->mr); } - return kvm_slot_dirty_pages_log_change(mem, log_dirty); } static void kvm_log_start(MemoryListener *listener, - MemoryRegionSection *section) + MemoryRegionSection *section, + int old, int new) { + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); int r; - r = kvm_dirty_pages_log_change(section->offset_within_address_space, - int128_get64(section->size), true); + if (old != 0) { + return; + } + + r = kvm_section_update_flags(kml, section); if (r < 0) { abort(); } } static void kvm_log_stop(MemoryListener *listener, - MemoryRegionSection *section) + MemoryRegionSection *section, + int old, int new) { + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); int r; - r = kvm_dirty_pages_log_change(section->offset_within_address_space, - int128_get64(section->size), false); - if (r < 0) { - abort(); + if (new != 0) { + return; } -} -static int kvm_set_migration_log(bool enable) -{ - KVMState *s = kvm_state; - KVMSlot *mem; - int i, err; - - s->migration_log = enable; - - for (i = 0; i < s->nr_slots; i++) { - mem = &s->slots[i]; - - if (!mem->memory_size) { - continue; - } - if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) { - continue; - } - err = kvm_set_user_memory_region(s, mem); - if (err) { - return err; - } + r = kvm_section_update_flags(kml, section); + if (r < 0) { + abort(); } - return 0; } /* get kvm's dirty pages bitmap and update qemu's */ @@ -414,11 +381,12 @@ static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section, * @start_add: start of logged region. * @end_addr: end of logged region. */ -static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) +static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml, + MemoryRegionSection *section) { KVMState *s = kvm_state; unsigned long size, allocated_size = 0; - KVMDirtyLog d = {}; + struct kvm_dirty_log d = {}; KVMSlot *mem; int ret = 0; hwaddr start_addr = section->offset_within_address_space; @@ -426,7 +394,7 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) d.dirty_bitmap = NULL; while (start_addr < end_addr) { - mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr); + mem = kvm_lookup_overlapping_slot(kml, start_addr, end_addr); if (mem == NULL) { break; } @@ -453,8 +421,7 @@ static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section) allocated_size = size; memset(d.dirty_bitmap, 0, allocated_size); - d.slot = mem->slot; - + d.slot = mem->slot | (kml->as_id << 16); if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) { DPRINTF("ioctl failed %d\n", errno); ret = -1; @@ -552,13 +519,13 @@ static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val, bool assign, uint32_t size, bool datamatch) { int ret; - struct kvm_ioeventfd iofd; - - iofd.datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0; - iofd.addr = addr; - iofd.len = size; - iofd.flags = 0; - iofd.fd = fd; + struct kvm_ioeventfd iofd = { + .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0, + .addr = addr, + .len = size, + .flags = 0, + .fd = fd, + }; if (!kvm_enabled()) { return -ENOSYS; @@ -657,15 +624,14 @@ kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list) return NULL; } -static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) +static void kvm_set_phys_mem(KVMMemoryListener *kml, + MemoryRegionSection *section, bool add) { KVMState *s = kvm_state; KVMSlot *mem, old; int err; MemoryRegion *mr = section->mr; - bool log_dirty = memory_region_is_logging(mr); bool writeable = !mr->readonly && !mr->rom_device; - bool readonly_flag = mr->readonly || memory_region_is_romd(mr); hwaddr start_addr = section->offset_within_address_space; ram_addr_t size = int128_get64(section->size); void *ram = NULL; @@ -674,15 +640,15 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) /* kvm works in page size chunks, but the function may be called with sub-page size and unaligned start address. Pad the start address to next and truncate size to previous page boundary. */ - delta = (TARGET_PAGE_SIZE - (start_addr & ~TARGET_PAGE_MASK)); - delta &= ~TARGET_PAGE_MASK; + delta = qemu_real_host_page_size - (start_addr & ~qemu_real_host_page_mask); + delta &= ~qemu_real_host_page_mask; if (delta > size) { return; } start_addr += delta; size -= delta; - size &= TARGET_PAGE_MASK; - if (!size || (start_addr & ~TARGET_PAGE_MASK)) { + size &= qemu_real_host_page_mask; + if (!size || (start_addr & ~qemu_real_host_page_mask)) { return; } @@ -699,7 +665,7 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta; while (1) { - mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size); + mem = kvm_lookup_overlapping_slot(kml, start_addr, start_addr + size); if (!mem) { break; } @@ -709,19 +675,19 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) (ram - start_addr == mem->ram - mem->start_addr)) { /* The new slot fits into the existing one and comes with * identical parameters - update flags and done. */ - kvm_slot_dirty_pages_log_change(mem, log_dirty); + kvm_slot_update_flags(kml, mem, mr); return; } old = *mem; - if ((mem->flags & KVM_MEM_LOG_DIRTY_PAGES) || s->migration_log) { - kvm_physical_sync_dirty_bitmap(section); + if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) { + kvm_physical_sync_dirty_bitmap(kml, section); } /* unregister the overlapping slot */ mem->memory_size = 0; - err = kvm_set_user_memory_region(s, mem); + err = kvm_set_user_memory_region(kml, mem); if (err) { fprintf(stderr, "%s: error unregistering overlapping slot: %s\n", __func__, strerror(-err)); @@ -738,13 +704,13 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) * - and actually require a recent KVM version. */ if (s->broken_set_mem_region && old.start_addr == start_addr && old.memory_size < size && add) { - mem = kvm_alloc_slot(s); + mem = kvm_alloc_slot(kml); mem->memory_size = old.memory_size; mem->start_addr = old.start_addr; mem->ram = old.ram; - mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); + mem->flags = kvm_mem_flags(mr); - err = kvm_set_user_memory_region(s, mem); + err = kvm_set_user_memory_region(kml, mem); if (err) { fprintf(stderr, "%s: error updating slot: %s\n", __func__, strerror(-err)); @@ -759,13 +725,13 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) /* register prefix slot */ if (old.start_addr < start_addr) { - mem = kvm_alloc_slot(s); + mem = kvm_alloc_slot(kml); mem->memory_size = start_addr - old.start_addr; mem->start_addr = old.start_addr; mem->ram = old.ram; - mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); + mem->flags = kvm_mem_flags(mr); - err = kvm_set_user_memory_region(s, mem); + err = kvm_set_user_memory_region(kml, mem); if (err) { fprintf(stderr, "%s: error registering prefix slot: %s\n", __func__, strerror(-err)); @@ -782,14 +748,14 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) if (old.start_addr + old.memory_size > start_addr + size) { ram_addr_t size_delta; - mem = kvm_alloc_slot(s); + mem = kvm_alloc_slot(kml); mem->start_addr = start_addr + size; size_delta = mem->start_addr - old.start_addr; mem->memory_size = old.memory_size - size_delta; mem->ram = old.ram + size_delta; - mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); + mem->flags = kvm_mem_flags(mr); - err = kvm_set_user_memory_region(s, mem); + err = kvm_set_user_memory_region(kml, mem); if (err) { fprintf(stderr, "%s: error registering suffix slot: %s\n", __func__, strerror(-err)); @@ -805,13 +771,13 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) if (!add) { return; } - mem = kvm_alloc_slot(s); + mem = kvm_alloc_slot(kml); mem->memory_size = size; mem->start_addr = start_addr; mem->ram = ram; - mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag); + mem->flags = kvm_mem_flags(mr); - err = kvm_set_user_memory_region(s, mem); + err = kvm_set_user_memory_region(kml, mem); if (err) { fprintf(stderr, "%s: error registering slot: %s\n", __func__, strerror(-err)); @@ -822,44 +788,33 @@ static void kvm_set_phys_mem(MemoryRegionSection *section, bool add) static void kvm_region_add(MemoryListener *listener, MemoryRegionSection *section) { + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + memory_region_ref(section->mr); - kvm_set_phys_mem(section, true); + kvm_set_phys_mem(kml, section, true); } static void kvm_region_del(MemoryListener *listener, MemoryRegionSection *section) { - kvm_set_phys_mem(section, false); + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); + + kvm_set_phys_mem(kml, section, false); memory_region_unref(section->mr); } static void kvm_log_sync(MemoryListener *listener, MemoryRegionSection *section) { + KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener); int r; - r = kvm_physical_sync_dirty_bitmap(section); + r = kvm_physical_sync_dirty_bitmap(kml, section); if (r < 0) { abort(); } } -static void kvm_log_global_start(struct MemoryListener *listener) -{ - int r; - - r = kvm_set_migration_log(1); - assert(r >= 0); -} - -static void kvm_log_global_stop(struct MemoryListener *listener) -{ - int r; - - r = kvm_set_migration_log(0); - assert(r >= 0); -} - static void kvm_mem_ioeventfd_add(MemoryListener *listener, MemoryRegionSection *section, bool match_data, uint64_t data, @@ -929,20 +884,27 @@ static void kvm_io_ioeventfd_del(MemoryListener *listener, } } -static MemoryListener kvm_memory_listener = { - .region_add = kvm_region_add, - .region_del = kvm_region_del, - .log_start = kvm_log_start, - .log_stop = kvm_log_stop, - .log_sync = kvm_log_sync, - .log_global_start = kvm_log_global_start, - .log_global_stop = kvm_log_global_stop, - .eventfd_add = kvm_mem_ioeventfd_add, - .eventfd_del = kvm_mem_ioeventfd_del, - .coalesced_mmio_add = kvm_coalesce_mmio_region, - .coalesced_mmio_del = kvm_uncoalesce_mmio_region, - .priority = 10, -}; +void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml, + AddressSpace *as, int as_id) +{ + int i; + + kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); + kml->as_id = as_id; + + for (i = 0; i < s->nr_slots; i++) { + kml->slots[i].slot = i; + } + + kml->listener.region_add = kvm_region_add; + kml->listener.region_del = kvm_region_del; + kml->listener.log_start = kvm_log_start; + kml->listener.log_stop = kvm_log_stop; + kml->listener.log_sync = kvm_log_sync; + kml->listener.priority = 10; + + memory_listener_register(&kml->listener, as); +} static MemoryListener kvm_io_listener = { .eventfd_add = kvm_io_ioeventfd_add, @@ -1015,7 +977,7 @@ void kvm_init_irq_routing(KVMState *s) s->irq_routes = g_malloc0(sizeof(*s->irq_routes)); s->nr_allocated_irq_routes = 0; - if (!s->direct_msi) { + if (!kvm_direct_msi_allowed) { for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) { QTAILQ_INIT(&s->msi_hashtab[i]); } @@ -1142,9 +1104,17 @@ static int kvm_irqchip_get_virq(KVMState *s) uint32_t *word = s->used_gsi_bitmap; int max_words = ALIGN(s->gsi_count, 32) / 32; int i, zeroes; - bool retry = true; -again: + /* + * PIC and IOAPIC share the first 16 GSI numbers, thus the available + * GSI numbers are more than the number of IRQ route. Allocating a GSI + * number can succeed even though a new route entry cannot be added. + * When this happens, flush dynamic MSI entries to free IRQ route entries. + */ + if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) { + kvm_flush_dynamic_msi_routes(s); + } + /* Return the lowest unused GSI in the bitmap */ for (i = 0; i < max_words; i++) { zeroes = ctz32(~word[i]); @@ -1154,11 +1124,6 @@ again: return zeroes + i * 32; } - if (!s->direct_msi && retry) { - retry = false; - kvm_flush_dynamic_msi_routes(s); - goto again; - } return -ENOSPC; } @@ -1183,7 +1148,7 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) struct kvm_msi msi; KVMMSIRoute *route; - if (s->direct_msi) { + if (kvm_direct_msi_allowed) { msi.address_lo = (uint32_t)msg.address; msi.address_hi = msg.address >> 32; msi.data = le32_to_cpu(msg.data); @@ -1222,13 +1187,13 @@ int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg) return kvm_set_irq(s, route->kroute.gsi, 1); } -int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) +int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg, PCIDevice *dev) { struct kvm_irq_routing_entry kroute = {}; int virq; if (kvm_gsi_direct_mapping()) { - return msg.data & 0xffff; + return kvm_arch_msi_data_to_gsi(msg.data); } if (!kvm_gsi_routing_enabled()) { @@ -1246,7 +1211,7 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) kroute.u.msi.address_lo = (uint32_t)msg.address; kroute.u.msi.address_hi = msg.address >> 32; kroute.u.msi.data = le32_to_cpu(msg.data); - if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) { + if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { kvm_irqchip_release_virq(s, virq); return -EINVAL; } @@ -1257,7 +1222,8 @@ int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg) return virq; } -int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) +int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg, + PCIDevice *dev) { struct kvm_irq_routing_entry kroute = {}; @@ -1275,7 +1241,7 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) kroute.u.msi.address_lo = (uint32_t)msg.address; kroute.u.msi.address_hi = msg.address >> 32; kroute.u.msi.data = le32_to_cpu(msg.data); - if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data)) { + if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) { return -EINVAL; } @@ -1327,7 +1293,6 @@ int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter) kroute.u.adapter.adapter_id = adapter->adapter_id; kvm_add_routing_entry(s, &kroute); - kvm_irqchip_commit_routes(s); return virq; } @@ -1368,40 +1333,74 @@ int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg) } #endif /* !KVM_CAP_IRQ_ROUTING */ -int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, - EventNotifier *rn, int virq) +int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + EventNotifier *rn, int virq) { return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), rn ? event_notifier_get_fd(rn) : -1, virq, true); } -int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq) +int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n, + int virq) { return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq, false); } -static int kvm_irqchip_create(MachineState *machine, KVMState *s) +int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, + EventNotifier *rn, qemu_irq irq) +{ + gpointer key, gsi; + gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); + + if (!found) { + return -ENXIO; + } + return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi)); +} + +int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, + qemu_irq irq) +{ + gpointer key, gsi; + gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi); + + if (!found) { + return -ENXIO; + } + return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi)); +} + +void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi) +{ + g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi)); +} + +static void kvm_irqchip_create(MachineState *machine, KVMState *s) { int ret; - if (!machine_kernel_irqchip_allowed(machine) || - (!kvm_check_extension(s, KVM_CAP_IRQCHIP) && - (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) { - return 0; + if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) { + ; + } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) { + ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0); + if (ret < 0) { + fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret)); + exit(1); + } + } else { + return; } /* First probe and see if there's a arch-specific hook to create the * in-kernel irqchip for us */ ret = kvm_arch_irqchip_create(s); - if (ret < 0) { - return ret; - } else if (ret == 0) { + if (ret == 0) { ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP); - if (ret < 0) { - fprintf(stderr, "Create kernel irqchip failed\n"); - return ret; - } + } + if (ret < 0) { + fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret)); + exit(1); } kvm_kernel_irqchip = true; @@ -1413,7 +1412,7 @@ static int kvm_irqchip_create(MachineState *machine, KVMState *s) kvm_init_irq_routing(s); - return 0; + s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal); } /* Find number of supported CPUs using the recommended @@ -1450,7 +1449,7 @@ static int kvm_init(MachineState *ms) KVMState *s; const KVMCapabilityInfo *missing_cap; int ret; - int i, type = 0; + int type = 0; const char *kvm_type; s = KVM_STATE(ms->accelerator); @@ -1499,12 +1498,6 @@ static int kvm_init(MachineState *ms) s->nr_slots = 32; } - s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot)); - - for (i = 0; i < s->nr_slots; i++) { - s->slots[i].slot = i; - } - /* check the vcpu limits */ soft_vcpus_limit = kvm_recommended_vcpus(s); hard_vcpus_limit = kvm_max_vcpus(s); @@ -1591,20 +1584,8 @@ static int kvm_init(MachineState *ms) s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS); #endif -#ifdef KVM_CAP_XSAVE - s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE); -#endif - -#ifdef KVM_CAP_XCRS - s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS); -#endif - -#ifdef KVM_CAP_PIT_STATE2 - s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2); -#endif - #ifdef KVM_CAP_IRQ_ROUTING - s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); + kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0); #endif s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3); @@ -1636,14 +1617,21 @@ static int kvm_init(MachineState *ms) goto err; } - ret = kvm_irqchip_create(ms, s); - if (ret < 0) { - goto err; + if (machine_kernel_irqchip_allowed(ms)) { + kvm_irqchip_create(ms, s); } kvm_state = s; - memory_listener_register(&kvm_memory_listener, &address_space_memory); - memory_listener_register(&kvm_io_listener, &address_space_io); + + s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add; + s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del; + s->memory_listener.listener.coalesced_mmio_add = kvm_coalesce_mmio_region; + s->memory_listener.listener.coalesced_mmio_del = kvm_uncoalesce_mmio_region; + + kvm_memory_listener_register(s, &s->memory_listener, + &address_space_memory, 0); + memory_listener_register(&kvm_io_listener, + &address_space_io); s->many_ioeventfds = kvm_check_many_ioeventfds(); @@ -1659,7 +1647,7 @@ err: if (s->fd != -1) { close(s->fd); } - g_free(s->slots); + g_free(s->memory_listener.slots); return ret; } @@ -1669,14 +1657,14 @@ void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len) s->sigmask_len = sigmask_len; } -static void kvm_handle_io(uint16_t port, void *data, int direction, int size, - uint32_t count) +static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction, + int size, uint32_t count) { int i; uint8_t *ptr = data; for (i = 0; i < count; i++) { - address_space_rw(&address_space_io, port, MEMTXATTRS_UNSPECIFIED, + address_space_rw(&address_space_io, port, attrs, ptr, size, direction == KVM_EXIT_IO_OUT); ptr += size; @@ -1795,7 +1783,11 @@ int kvm_cpu_exec(CPUState *cpu) return EXCP_HLT; } + qemu_mutex_unlock_iothread(); + do { + MemTxAttrs attrs; + if (cpu->kvm_vcpu_dirty) { kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE); cpu->kvm_vcpu_dirty = false; @@ -1811,12 +1803,10 @@ int kvm_cpu_exec(CPUState *cpu) */ qemu_cpu_kick_self(); } - qemu_mutex_unlock_iothread(); run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0); - qemu_mutex_lock_iothread(); - kvm_arch_post_run(cpu, run); + attrs = kvm_arch_post_run(cpu, run); if (run_ret < 0) { if (run_ret == -EINTR || run_ret == -EAGAIN) { @@ -1826,6 +1816,14 @@ int kvm_cpu_exec(CPUState *cpu) } fprintf(stderr, "error: kvm run failed %s\n", strerror(-run_ret)); +#ifdef TARGET_PPC + if (run_ret == -EBUSY) { + fprintf(stderr, + "This is probably because your SMT is enabled.\n" + "VCPU can only run on primary threads with all " + "secondary threads offline.\n"); + } +#endif ret = -1; break; } @@ -1834,7 +1832,8 @@ int kvm_cpu_exec(CPUState *cpu) switch (run->exit_reason) { case KVM_EXIT_IO: DPRINTF("handle_io\n"); - kvm_handle_io(run->io.port, + /* Called outside BQL */ + kvm_handle_io(run->io.port, attrs, (uint8_t *)run + run->io.data_offset, run->io.direction, run->io.size, @@ -1843,10 +1842,12 @@ int kvm_cpu_exec(CPUState *cpu) break; case KVM_EXIT_MMIO: DPRINTF("handle_mmio\n"); - cpu_physical_memory_rw(run->mmio.phys_addr, - run->mmio.data, - run->mmio.len, - run->mmio.is_write); + /* Called outside BQL */ + address_space_rw(&address_space_memory, + run->mmio.phys_addr, attrs, + run->mmio.data, + run->mmio.len, + run->mmio.is_write); ret = 0; break; case KVM_EXIT_IRQ_WINDOW_OPEN: @@ -1876,6 +1877,12 @@ int kvm_cpu_exec(CPUState *cpu) qemu_system_reset_request(); ret = EXCP_INTERRUPT; break; + case KVM_SYSTEM_EVENT_CRASH: + qemu_mutex_lock_iothread(); + qemu_system_guest_panicked(); + qemu_mutex_unlock_iothread(); + ret = 0; + break; default: DPRINTF("kvm_arch_handle_exit\n"); ret = kvm_arch_handle_exit(cpu, run); @@ -1889,6 +1896,8 @@ int kvm_cpu_exec(CPUState *cpu) } } while (ret == 0); + qemu_mutex_lock_iothread(); + if (ret < 0) { cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE); vm_stop(RUN_STATE_INTERNAL_ERROR); @@ -1987,6 +1996,39 @@ int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr) return ret ? 0 : 1; } +int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr) +{ + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + .flags = 0, + }; + + return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1; +} + +void kvm_device_access(int fd, int group, uint64_t attr, + void *val, bool write) +{ + struct kvm_device_attr kvmattr; + int err; + + kvmattr.flags = 0; + kvmattr.group = group; + kvmattr.attr = attr; + kvmattr.addr = (uintptr_t)val; + + err = kvm_device_ioctl(fd, + write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR, + &kvmattr); + if (err < 0) { + error_report("KVM_%s_DEVICE_ATTR failed: %s\n" + "Group %d attr 0x%016" PRIx64, write ? "SET" : "GET", + strerror(-err), group, attr); + abort(); + } +} + int kvm_has_sync_mmu(void) { return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU); @@ -2007,21 +2049,6 @@ int kvm_has_debugregs(void) return kvm_state->debugregs; } -int kvm_has_xsave(void) -{ - return kvm_state->xsave; -} - -int kvm_has_xcrs(void) -{ - return kvm_state->xcrs; -} - -int kvm_has_pit_state2(void) -{ - return kvm_state->pit_state2; -} - int kvm_has_many_ioeventfds(void) { if (!kvm_enabled()) {