4 * Copyright IBM, Corp. 2008
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include "qemu/osdep.h"
17 #include <sys/ioctl.h>
19 #include <linux/kvm.h>
21 #include "qemu/atomic.h"
22 #include "qemu/option.h"
23 #include "qemu/config-file.h"
24 #include "qemu/error-report.h"
25 #include "qapi/error.h"
26 #include "hw/pci/msi.h"
27 #include "hw/pci/msix.h"
28 #include "hw/s390x/adapter.h"
29 #include "exec/gdbstub.h"
30 #include "sysemu/kvm_int.h"
31 #include "sysemu/runstate.h"
32 #include "sysemu/cpus.h"
33 #include "sysemu/sysemu.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "exec/address-spaces.h"
38 #include "qemu/event_notifier.h"
39 #include "qemu/main-loop.h"
42 #include "sysemu/sev.h"
43 #include "qapi/visitor.h"
44 #include "qapi/qapi-types-common.h"
45 #include "qapi/qapi-visit-common.h"
46 #include "sysemu/reset.h"
48 #include "hw/boards.h"
50 /* This check must be after config-host.h is included */
52 #include <sys/eventfd.h>
55 /* KVM uses PAGE_SIZE in its definition of KVM_COALESCED_MMIO_MAX. We
56 * need to use the real host PAGE_SIZE, as that's what KVM will use.
58 #define PAGE_SIZE qemu_real_host_page_size
63 #define DPRINTF(fmt, ...) \
64 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
66 #define DPRINTF(fmt, ...) \
70 #define KVM_MSI_HASHTAB_SIZE 256
72 struct KVMParkedVcpu {
73 unsigned long vcpu_id;
75 QLIST_ENTRY(KVMParkedVcpu) node;
80 AccelState parent_obj;
87 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
88 bool coalesced_flush_in_progress;
90 int robust_singlestep;
92 #ifdef KVM_CAP_SET_GUEST_DEBUG
93 QTAILQ_HEAD(, kvm_sw_breakpoint) kvm_sw_breakpoints;
95 int max_nested_state_len;
99 bool kernel_irqchip_allowed;
100 bool kernel_irqchip_required;
101 OnOffAuto kernel_irqchip_split;
103 uint64_t manual_dirty_log_protect;
104 /* The man page (and posix) say ioctl numbers are signed int, but
105 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
106 * unsigned, and treating them as signed here can break things */
107 unsigned irq_set_ioctl;
108 unsigned int sigmask_len;
110 #ifdef KVM_CAP_IRQ_ROUTING
111 struct kvm_irq_routing *irq_routes;
112 int nr_allocated_irq_routes;
113 unsigned long *used_gsi_bitmap;
114 unsigned int gsi_count;
115 QTAILQ_HEAD(, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
117 KVMMemoryListener memory_listener;
118 QLIST_HEAD(, KVMParkedVcpu) kvm_parked_vcpus;
120 /* memory encryption */
121 void *memcrypt_handle;
122 int (*memcrypt_encrypt_data)(void *handle, uint8_t *ptr, uint64_t len);
124 /* For "info mtree -f" to tell if an MR is registered in KVM */
127 KVMMemoryListener *ml;
133 bool kvm_kernel_irqchip;
134 bool kvm_split_irqchip;
135 bool kvm_async_interrupts_allowed;
136 bool kvm_halt_in_kernel_allowed;
137 bool kvm_eventfds_allowed;
138 bool kvm_irqfds_allowed;
139 bool kvm_resamplefds_allowed;
140 bool kvm_msi_via_irqfd_allowed;
141 bool kvm_gsi_routing_allowed;
142 bool kvm_gsi_direct_mapping;
144 bool kvm_readonly_mem_allowed;
145 bool kvm_vm_attributes_allowed;
146 bool kvm_direct_msi_allowed;
147 bool kvm_ioeventfd_any_length_allowed;
148 bool kvm_msi_use_devid;
149 static bool kvm_immediate_exit;
150 static hwaddr kvm_max_slot_size = ~0;
152 static const KVMCapabilityInfo kvm_required_capabilites[] = {
153 KVM_CAP_INFO(USER_MEMORY),
154 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
155 KVM_CAP_INFO(JOIN_MEMORY_REGIONS_WORKS),
159 static NotifierList kvm_irqchip_change_notifiers =
160 NOTIFIER_LIST_INITIALIZER(kvm_irqchip_change_notifiers);
162 struct KVMResampleFd {
164 EventNotifier *resample_event;
165 QLIST_ENTRY(KVMResampleFd) node;
167 typedef struct KVMResampleFd KVMResampleFd;
170 * Only used with split irqchip where we need to do the resample fd
171 * kick for the kernel from userspace.
173 static QLIST_HEAD(, KVMResampleFd) kvm_resample_fd_list =
174 QLIST_HEAD_INITIALIZER(kvm_resample_fd_list);
176 #define kvm_slots_lock(kml) qemu_mutex_lock(&(kml)->slots_lock)
177 #define kvm_slots_unlock(kml) qemu_mutex_unlock(&(kml)->slots_lock)
179 static inline void kvm_resample_fd_remove(int gsi)
183 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
184 if (rfd->gsi == gsi) {
185 QLIST_REMOVE(rfd, node);
192 static inline void kvm_resample_fd_insert(int gsi, EventNotifier *event)
194 KVMResampleFd *rfd = g_new0(KVMResampleFd, 1);
197 rfd->resample_event = event;
199 QLIST_INSERT_HEAD(&kvm_resample_fd_list, rfd, node);
202 void kvm_resample_fd_notify(int gsi)
206 QLIST_FOREACH(rfd, &kvm_resample_fd_list, node) {
207 if (rfd->gsi == gsi) {
208 event_notifier_set(rfd->resample_event);
209 trace_kvm_resample_fd_notify(gsi);
215 int kvm_get_max_memslots(void)
217 KVMState *s = KVM_STATE(current_accel());
222 bool kvm_memcrypt_enabled(void)
224 if (kvm_state && kvm_state->memcrypt_handle) {
231 int kvm_memcrypt_encrypt_data(uint8_t *ptr, uint64_t len)
233 if (kvm_state->memcrypt_handle &&
234 kvm_state->memcrypt_encrypt_data) {
235 return kvm_state->memcrypt_encrypt_data(kvm_state->memcrypt_handle,
242 /* Called with KVMMemoryListener.slots_lock held */
243 static KVMSlot *kvm_get_free_slot(KVMMemoryListener *kml)
245 KVMState *s = kvm_state;
248 for (i = 0; i < s->nr_slots; i++) {
249 if (kml->slots[i].memory_size == 0) {
250 return &kml->slots[i];
257 bool kvm_has_free_slot(MachineState *ms)
259 KVMState *s = KVM_STATE(ms->accelerator);
261 KVMMemoryListener *kml = &s->memory_listener;
264 result = !!kvm_get_free_slot(kml);
265 kvm_slots_unlock(kml);
270 /* Called with KVMMemoryListener.slots_lock held */
271 static KVMSlot *kvm_alloc_slot(KVMMemoryListener *kml)
273 KVMSlot *slot = kvm_get_free_slot(kml);
279 fprintf(stderr, "%s: no free slot available\n", __func__);
283 static KVMSlot *kvm_lookup_matching_slot(KVMMemoryListener *kml,
287 KVMState *s = kvm_state;
290 for (i = 0; i < s->nr_slots; i++) {
291 KVMSlot *mem = &kml->slots[i];
293 if (start_addr == mem->start_addr && size == mem->memory_size) {
302 * Calculate and align the start address and the size of the section.
303 * Return the size. If the size is 0, the aligned section is empty.
305 static hwaddr kvm_align_section(MemoryRegionSection *section,
308 hwaddr size = int128_get64(section->size);
309 hwaddr delta, aligned;
311 /* kvm works in page size chunks, but the function may be called
312 with sub-page size and unaligned start address. Pad the start
313 address to next and truncate size to previous page boundary. */
314 aligned = ROUND_UP(section->offset_within_address_space,
315 qemu_real_host_page_size);
316 delta = aligned - section->offset_within_address_space;
322 return (size - delta) & qemu_real_host_page_mask;
325 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
328 KVMMemoryListener *kml = &s->memory_listener;
332 for (i = 0; i < s->nr_slots; i++) {
333 KVMSlot *mem = &kml->slots[i];
335 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
336 *phys_addr = mem->start_addr + (ram - mem->ram);
341 kvm_slots_unlock(kml);
346 static int kvm_set_user_memory_region(KVMMemoryListener *kml, KVMSlot *slot, bool new)
348 KVMState *s = kvm_state;
349 struct kvm_userspace_memory_region mem;
352 mem.slot = slot->slot | (kml->as_id << 16);
353 mem.guest_phys_addr = slot->start_addr;
354 mem.userspace_addr = (unsigned long)slot->ram;
355 mem.flags = slot->flags;
357 if (slot->memory_size && !new && (mem.flags ^ slot->old_flags) & KVM_MEM_READONLY) {
358 /* Set the slot size to 0 before setting the slot to the desired
359 * value. This is needed based on KVM commit 75d61fbc. */
361 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
366 mem.memory_size = slot->memory_size;
367 ret = kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
368 slot->old_flags = mem.flags;
370 trace_kvm_set_user_memory(mem.slot, mem.flags, mem.guest_phys_addr,
371 mem.memory_size, mem.userspace_addr, ret);
373 error_report("%s: KVM_SET_USER_MEMORY_REGION failed, slot=%d,"
374 " start=0x%" PRIx64 ", size=0x%" PRIx64 ": %s",
375 __func__, mem.slot, slot->start_addr,
376 (uint64_t)mem.memory_size, strerror(errno));
381 int kvm_destroy_vcpu(CPUState *cpu)
383 KVMState *s = kvm_state;
385 struct KVMParkedVcpu *vcpu = NULL;
388 DPRINTF("kvm_destroy_vcpu\n");
390 ret = kvm_arch_destroy_vcpu(cpu);
395 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
398 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
402 ret = munmap(cpu->kvm_run, mmap_size);
407 vcpu = g_malloc0(sizeof(*vcpu));
408 vcpu->vcpu_id = kvm_arch_vcpu_id(cpu);
409 vcpu->kvm_fd = cpu->kvm_fd;
410 QLIST_INSERT_HEAD(&kvm_state->kvm_parked_vcpus, vcpu, node);
415 static int kvm_get_vcpu(KVMState *s, unsigned long vcpu_id)
417 struct KVMParkedVcpu *cpu;
419 QLIST_FOREACH(cpu, &s->kvm_parked_vcpus, node) {
420 if (cpu->vcpu_id == vcpu_id) {
423 QLIST_REMOVE(cpu, node);
424 kvm_fd = cpu->kvm_fd;
430 return kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)vcpu_id);
433 int kvm_init_vcpu(CPUState *cpu)
435 KVMState *s = kvm_state;
439 DPRINTF("kvm_init_vcpu\n");
441 ret = kvm_get_vcpu(s, kvm_arch_vcpu_id(cpu));
443 DPRINTF("kvm_create_vcpu failed\n");
449 cpu->vcpu_dirty = true;
451 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
454 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
458 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
460 if (cpu->kvm_run == MAP_FAILED) {
462 DPRINTF("mmap'ing vcpu state failed\n");
466 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
467 s->coalesced_mmio_ring =
468 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
471 ret = kvm_arch_init_vcpu(cpu);
477 * dirty pages logging control
480 static int kvm_mem_flags(MemoryRegion *mr)
482 bool readonly = mr->readonly || memory_region_is_romd(mr);
485 if (memory_region_get_dirty_log_mask(mr) != 0) {
486 flags |= KVM_MEM_LOG_DIRTY_PAGES;
488 if (readonly && kvm_readonly_mem_allowed) {
489 flags |= KVM_MEM_READONLY;
494 /* Called with KVMMemoryListener.slots_lock held */
495 static int kvm_slot_update_flags(KVMMemoryListener *kml, KVMSlot *mem,
498 mem->flags = kvm_mem_flags(mr);
500 /* If nothing changed effectively, no need to issue ioctl */
501 if (mem->flags == mem->old_flags) {
505 return kvm_set_user_memory_region(kml, mem, false);
508 static int kvm_section_update_flags(KVMMemoryListener *kml,
509 MemoryRegionSection *section)
511 hwaddr start_addr, size, slot_size;
515 size = kvm_align_section(section, &start_addr);
522 while (size && !ret) {
523 slot_size = MIN(kvm_max_slot_size, size);
524 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
526 /* We don't have a slot if we want to trap every access. */
530 ret = kvm_slot_update_flags(kml, mem, section->mr);
531 start_addr += slot_size;
536 kvm_slots_unlock(kml);
540 static void kvm_log_start(MemoryListener *listener,
541 MemoryRegionSection *section,
544 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
551 r = kvm_section_update_flags(kml, section);
557 static void kvm_log_stop(MemoryListener *listener,
558 MemoryRegionSection *section,
561 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
568 r = kvm_section_update_flags(kml, section);
574 /* get kvm's dirty pages bitmap and update qemu's */
575 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
576 unsigned long *bitmap)
578 ram_addr_t start = section->offset_within_region +
579 memory_region_get_ram_addr(section->mr);
580 ram_addr_t pages = int128_get64(section->size) / qemu_real_host_page_size;
582 cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
586 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
588 /* Allocate the dirty bitmap for a slot */
589 static void kvm_memslot_init_dirty_bitmap(KVMSlot *mem)
592 * XXX bad kernel interface alert
593 * For dirty bitmap, kernel allocates array of size aligned to
594 * bits-per-long. But for case when the kernel is 64bits and
595 * the userspace is 32bits, userspace can't align to the same
596 * bits-per-long, since sizeof(long) is different between kernel
597 * and user space. This way, userspace will provide buffer which
598 * may be 4 bytes less than the kernel will use, resulting in
599 * userspace memory corruption (which is not detectable by valgrind
600 * too, in most cases).
601 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
602 * a hope that sizeof(long) won't become >8 any time soon.
604 hwaddr bitmap_size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
605 /*HOST_LONG_BITS*/ 64) / 8;
606 mem->dirty_bmap = g_malloc0(bitmap_size);
610 * kvm_physical_sync_dirty_bitmap - Sync dirty bitmap from kernel space
612 * This function will first try to fetch dirty bitmap from the kernel,
613 * and then updates qemu's dirty bitmap.
615 * NOTE: caller must be with kml->slots_lock held.
617 * @kml: the KVM memory listener object
618 * @section: the memory section to sync the dirty bitmap with
620 static int kvm_physical_sync_dirty_bitmap(KVMMemoryListener *kml,
621 MemoryRegionSection *section)
623 KVMState *s = kvm_state;
624 struct kvm_dirty_log d = {};
626 hwaddr start_addr, size;
627 hwaddr slot_size, slot_offset = 0;
630 size = kvm_align_section(section, &start_addr);
632 MemoryRegionSection subsection = *section;
634 slot_size = MIN(kvm_max_slot_size, size);
635 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
637 /* We don't have a slot if we want to trap every access. */
641 if (!mem->dirty_bmap) {
642 /* Allocate on the first log_sync, once and for all */
643 kvm_memslot_init_dirty_bitmap(mem);
646 d.dirty_bitmap = mem->dirty_bmap;
647 d.slot = mem->slot | (kml->as_id << 16);
648 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
649 DPRINTF("ioctl failed %d\n", errno);
654 subsection.offset_within_region += slot_offset;
655 subsection.size = int128_make64(slot_size);
656 kvm_get_dirty_pages_log_range(&subsection, d.dirty_bitmap);
658 slot_offset += slot_size;
659 start_addr += slot_size;
666 /* Alignment requirement for KVM_CLEAR_DIRTY_LOG - 64 pages */
667 #define KVM_CLEAR_LOG_SHIFT 6
668 #define KVM_CLEAR_LOG_ALIGN (qemu_real_host_page_size << KVM_CLEAR_LOG_SHIFT)
669 #define KVM_CLEAR_LOG_MASK (-KVM_CLEAR_LOG_ALIGN)
671 static int kvm_log_clear_one_slot(KVMSlot *mem, int as_id, uint64_t start,
674 KVMState *s = kvm_state;
675 uint64_t end, bmap_start, start_delta, bmap_npages;
676 struct kvm_clear_dirty_log d;
677 unsigned long *bmap_clear = NULL, psize = qemu_real_host_page_size;
681 * We need to extend either the start or the size or both to
682 * satisfy the KVM interface requirement. Firstly, do the start
683 * page alignment on 64 host pages
685 bmap_start = start & KVM_CLEAR_LOG_MASK;
686 start_delta = start - bmap_start;
690 * The kernel interface has restriction on the size too, that either:
692 * (1) the size is 64 host pages aligned (just like the start), or
693 * (2) the size fills up until the end of the KVM memslot.
695 bmap_npages = DIV_ROUND_UP(size + start_delta, KVM_CLEAR_LOG_ALIGN)
696 << KVM_CLEAR_LOG_SHIFT;
697 end = mem->memory_size / psize;
698 if (bmap_npages > end - bmap_start) {
699 bmap_npages = end - bmap_start;
701 start_delta /= psize;
704 * Prepare the bitmap to clear dirty bits. Here we must guarantee
705 * that we won't clear any unknown dirty bits otherwise we might
706 * accidentally clear some set bits which are not yet synced from
707 * the kernel into QEMU's bitmap, then we'll lose track of the
708 * guest modifications upon those pages (which can directly lead
709 * to guest data loss or panic after migration).
711 * Layout of the KVMSlot.dirty_bmap:
713 * |<-------- bmap_npages -----------..>|
716 * |----------------|-------------|------------------|------------|
719 * start bmap_start (start) end
720 * of memslot of memslot
722 * [1] bmap_npages can be aligned to either 64 pages or the end of slot
725 assert(bmap_start % BITS_PER_LONG == 0);
726 /* We should never do log_clear before log_sync */
727 assert(mem->dirty_bmap);
729 /* Slow path - we need to manipulate a temp bitmap */
730 bmap_clear = bitmap_new(bmap_npages);
731 bitmap_copy_with_src_offset(bmap_clear, mem->dirty_bmap,
732 bmap_start, start_delta + size / psize);
734 * We need to fill the holes at start because that was not
735 * specified by the caller and we extended the bitmap only for
738 bitmap_clear(bmap_clear, 0, start_delta);
739 d.dirty_bitmap = bmap_clear;
741 /* Fast path - start address aligns well with BITS_PER_LONG */
742 d.dirty_bitmap = mem->dirty_bmap + BIT_WORD(bmap_start);
745 d.first_page = bmap_start;
746 /* It should never overflow. If it happens, say something */
747 assert(bmap_npages <= UINT32_MAX);
748 d.num_pages = bmap_npages;
749 d.slot = mem->slot | (as_id << 16);
751 if (kvm_vm_ioctl(s, KVM_CLEAR_DIRTY_LOG, &d) == -1) {
753 error_report("%s: KVM_CLEAR_DIRTY_LOG failed, slot=%d, "
754 "start=0x%"PRIx64", size=0x%"PRIx32", errno=%d",
755 __func__, d.slot, (uint64_t)d.first_page,
756 (uint32_t)d.num_pages, ret);
759 trace_kvm_clear_dirty_log(d.slot, d.first_page, d.num_pages);
763 * After we have updated the remote dirty bitmap, we update the
764 * cached bitmap as well for the memslot, then if another user
765 * clears the same region we know we shouldn't clear it again on
766 * the remote otherwise it's data loss as well.
768 bitmap_clear(mem->dirty_bmap, bmap_start + start_delta,
770 /* This handles the NULL case well */
777 * kvm_physical_log_clear - Clear the kernel's dirty bitmap for range
779 * NOTE: this will be a no-op if we haven't enabled manual dirty log
780 * protection in the host kernel because in that case this operation
781 * will be done within log_sync().
783 * @kml: the kvm memory listener
784 * @section: the memory range to clear dirty bitmap
786 static int kvm_physical_log_clear(KVMMemoryListener *kml,
787 MemoryRegionSection *section)
789 KVMState *s = kvm_state;
790 uint64_t start, size, offset, count;
794 if (!s->manual_dirty_log_protect) {
795 /* No need to do explicit clear */
799 start = section->offset_within_address_space;
800 size = int128_get64(section->size);
803 /* Nothing more we can do... */
809 for (i = 0; i < s->nr_slots; i++) {
810 mem = &kml->slots[i];
811 /* Discard slots that are empty or do not overlap the section */
812 if (!mem->memory_size ||
813 mem->start_addr > start + size - 1 ||
814 start > mem->start_addr + mem->memory_size - 1) {
818 if (start >= mem->start_addr) {
819 /* The slot starts before section or is aligned to it. */
820 offset = start - mem->start_addr;
821 count = MIN(mem->memory_size - offset, size);
823 /* The slot starts after section. */
825 count = MIN(mem->memory_size, size - (mem->start_addr - start));
827 ret = kvm_log_clear_one_slot(mem, kml->as_id, offset, count);
833 kvm_slots_unlock(kml);
838 static void kvm_coalesce_mmio_region(MemoryListener *listener,
839 MemoryRegionSection *secion,
840 hwaddr start, hwaddr size)
842 KVMState *s = kvm_state;
844 if (s->coalesced_mmio) {
845 struct kvm_coalesced_mmio_zone zone;
851 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
855 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
856 MemoryRegionSection *secion,
857 hwaddr start, hwaddr size)
859 KVMState *s = kvm_state;
861 if (s->coalesced_mmio) {
862 struct kvm_coalesced_mmio_zone zone;
868 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
872 static void kvm_coalesce_pio_add(MemoryListener *listener,
873 MemoryRegionSection *section,
874 hwaddr start, hwaddr size)
876 KVMState *s = kvm_state;
878 if (s->coalesced_pio) {
879 struct kvm_coalesced_mmio_zone zone;
885 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
889 static void kvm_coalesce_pio_del(MemoryListener *listener,
890 MemoryRegionSection *section,
891 hwaddr start, hwaddr size)
893 KVMState *s = kvm_state;
895 if (s->coalesced_pio) {
896 struct kvm_coalesced_mmio_zone zone;
902 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
906 static MemoryListener kvm_coalesced_pio_listener = {
907 .coalesced_io_add = kvm_coalesce_pio_add,
908 .coalesced_io_del = kvm_coalesce_pio_del,
911 int kvm_check_extension(KVMState *s, unsigned int extension)
915 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
923 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
927 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
929 /* VM wide version not implemented, use global one instead */
930 ret = kvm_check_extension(s, extension);
936 typedef struct HWPoisonPage {
938 QLIST_ENTRY(HWPoisonPage) list;
941 static QLIST_HEAD(, HWPoisonPage) hwpoison_page_list =
942 QLIST_HEAD_INITIALIZER(hwpoison_page_list);
944 static void kvm_unpoison_all(void *param)
946 HWPoisonPage *page, *next_page;
948 QLIST_FOREACH_SAFE(page, &hwpoison_page_list, list, next_page) {
949 QLIST_REMOVE(page, list);
950 qemu_ram_remap(page->ram_addr, TARGET_PAGE_SIZE);
955 void kvm_hwpoison_page_add(ram_addr_t ram_addr)
959 QLIST_FOREACH(page, &hwpoison_page_list, list) {
960 if (page->ram_addr == ram_addr) {
964 page = g_new(HWPoisonPage, 1);
965 page->ram_addr = ram_addr;
966 QLIST_INSERT_HEAD(&hwpoison_page_list, page, list);
969 static uint32_t adjust_ioeventfd_endianness(uint32_t val, uint32_t size)
971 #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
972 /* The kernel expects ioeventfd values in HOST_WORDS_BIGENDIAN
973 * endianness, but the memory core hands them in target endianness.
974 * For example, PPC is always treated as big-endian even if running
975 * on KVM and on PPC64LE. Correct here.
989 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
990 bool assign, uint32_t size, bool datamatch)
993 struct kvm_ioeventfd iofd = {
994 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1001 trace_kvm_set_ioeventfd_mmio(fd, (uint64_t)addr, val, assign, size,
1003 if (!kvm_enabled()) {
1008 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1011 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1014 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1023 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
1024 bool assign, uint32_t size, bool datamatch)
1026 struct kvm_ioeventfd kick = {
1027 .datamatch = datamatch ? adjust_ioeventfd_endianness(val, size) : 0,
1029 .flags = KVM_IOEVENTFD_FLAG_PIO,
1034 trace_kvm_set_ioeventfd_pio(fd, addr, val, assign, size, datamatch);
1035 if (!kvm_enabled()) {
1039 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
1042 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1044 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1052 static int kvm_check_many_ioeventfds(void)
1054 /* Userspace can use ioeventfd for io notification. This requires a host
1055 * that supports eventfd(2) and an I/O thread; since eventfd does not
1056 * support SIGIO it cannot interrupt the vcpu.
1058 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
1059 * can avoid creating too many ioeventfds.
1061 #if defined(CONFIG_EVENTFD)
1064 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
1065 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
1066 if (ioeventfds[i] < 0) {
1069 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
1071 close(ioeventfds[i]);
1076 /* Decide whether many devices are supported or not */
1077 ret = i == ARRAY_SIZE(ioeventfds);
1080 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
1081 close(ioeventfds[i]);
1089 static const KVMCapabilityInfo *
1090 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
1092 while (list->name) {
1093 if (!kvm_check_extension(s, list->value)) {
1101 void kvm_set_max_memslot_size(hwaddr max_slot_size)
1104 ROUND_UP(max_slot_size, qemu_real_host_page_size) == max_slot_size
1106 kvm_max_slot_size = max_slot_size;
1109 static void kvm_set_phys_mem(KVMMemoryListener *kml,
1110 MemoryRegionSection *section, bool add)
1114 MemoryRegion *mr = section->mr;
1115 bool writeable = !mr->readonly && !mr->rom_device;
1116 hwaddr start_addr, size, slot_size;
1119 if (!memory_region_is_ram(mr)) {
1120 if (writeable || !kvm_readonly_mem_allowed) {
1122 } else if (!mr->romd_mode) {
1123 /* If the memory device is not in romd_mode, then we actually want
1124 * to remove the kvm memory slot so all accesses will trap. */
1129 size = kvm_align_section(section, &start_addr);
1134 /* use aligned delta to align the ram address */
1135 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region +
1136 (start_addr - section->offset_within_address_space);
1138 kvm_slots_lock(kml);
1142 slot_size = MIN(kvm_max_slot_size, size);
1143 mem = kvm_lookup_matching_slot(kml, start_addr, slot_size);
1147 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1148 kvm_physical_sync_dirty_bitmap(kml, section);
1151 /* unregister the slot */
1152 g_free(mem->dirty_bmap);
1153 mem->dirty_bmap = NULL;
1154 mem->memory_size = 0;
1156 err = kvm_set_user_memory_region(kml, mem, false);
1158 fprintf(stderr, "%s: error unregistering slot: %s\n",
1159 __func__, strerror(-err));
1162 start_addr += slot_size;
1168 /* register the new slot */
1170 slot_size = MIN(kvm_max_slot_size, size);
1171 mem = kvm_alloc_slot(kml);
1172 mem->memory_size = slot_size;
1173 mem->start_addr = start_addr;
1175 mem->flags = kvm_mem_flags(mr);
1177 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1179 * Reallocate the bmap; it means it doesn't disappear in
1180 * middle of a migrate.
1182 kvm_memslot_init_dirty_bitmap(mem);
1184 err = kvm_set_user_memory_region(kml, mem, true);
1186 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
1190 start_addr += slot_size;
1196 kvm_slots_unlock(kml);
1199 static void kvm_region_add(MemoryListener *listener,
1200 MemoryRegionSection *section)
1202 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1204 memory_region_ref(section->mr);
1205 kvm_set_phys_mem(kml, section, true);
1208 static void kvm_region_del(MemoryListener *listener,
1209 MemoryRegionSection *section)
1211 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1213 kvm_set_phys_mem(kml, section, false);
1214 memory_region_unref(section->mr);
1217 static void kvm_log_sync(MemoryListener *listener,
1218 MemoryRegionSection *section)
1220 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1223 kvm_slots_lock(kml);
1224 r = kvm_physical_sync_dirty_bitmap(kml, section);
1225 kvm_slots_unlock(kml);
1231 static void kvm_log_clear(MemoryListener *listener,
1232 MemoryRegionSection *section)
1234 KVMMemoryListener *kml = container_of(listener, KVMMemoryListener, listener);
1237 r = kvm_physical_log_clear(kml, section);
1239 error_report_once("%s: kvm log clear failed: mr=%s "
1240 "offset=%"HWADDR_PRIx" size=%"PRIx64, __func__,
1241 section->mr->name, section->offset_within_region,
1242 int128_get64(section->size));
1247 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
1248 MemoryRegionSection *section,
1249 bool match_data, uint64_t data,
1252 int fd = event_notifier_get_fd(e);
1255 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1256 data, true, int128_get64(section->size),
1259 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1260 __func__, strerror(-r), -r);
1265 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
1266 MemoryRegionSection *section,
1267 bool match_data, uint64_t data,
1270 int fd = event_notifier_get_fd(e);
1273 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
1274 data, false, int128_get64(section->size),
1277 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1278 __func__, strerror(-r), -r);
1283 static void kvm_io_ioeventfd_add(MemoryListener *listener,
1284 MemoryRegionSection *section,
1285 bool match_data, uint64_t data,
1288 int fd = event_notifier_get_fd(e);
1291 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1292 data, true, int128_get64(section->size),
1295 fprintf(stderr, "%s: error adding ioeventfd: %s (%d)\n",
1296 __func__, strerror(-r), -r);
1301 static void kvm_io_ioeventfd_del(MemoryListener *listener,
1302 MemoryRegionSection *section,
1303 bool match_data, uint64_t data,
1307 int fd = event_notifier_get_fd(e);
1310 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
1311 data, false, int128_get64(section->size),
1314 fprintf(stderr, "%s: error deleting ioeventfd: %s (%d)\n",
1315 __func__, strerror(-r), -r);
1320 void kvm_memory_listener_register(KVMState *s, KVMMemoryListener *kml,
1321 AddressSpace *as, int as_id)
1325 qemu_mutex_init(&kml->slots_lock);
1326 kml->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1329 for (i = 0; i < s->nr_slots; i++) {
1330 kml->slots[i].slot = i;
1333 kml->listener.region_add = kvm_region_add;
1334 kml->listener.region_del = kvm_region_del;
1335 kml->listener.log_start = kvm_log_start;
1336 kml->listener.log_stop = kvm_log_stop;
1337 kml->listener.log_sync = kvm_log_sync;
1338 kml->listener.log_clear = kvm_log_clear;
1339 kml->listener.priority = 10;
1341 memory_listener_register(&kml->listener, as);
1343 for (i = 0; i < s->nr_as; ++i) {
1352 static MemoryListener kvm_io_listener = {
1353 .eventfd_add = kvm_io_ioeventfd_add,
1354 .eventfd_del = kvm_io_ioeventfd_del,
1358 int kvm_set_irq(KVMState *s, int irq, int level)
1360 struct kvm_irq_level event;
1363 assert(kvm_async_interrupts_enabled());
1365 event.level = level;
1367 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
1369 perror("kvm_set_irq");
1373 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
1376 #ifdef KVM_CAP_IRQ_ROUTING
1377 typedef struct KVMMSIRoute {
1378 struct kvm_irq_routing_entry kroute;
1379 QTAILQ_ENTRY(KVMMSIRoute) entry;
1382 static void set_gsi(KVMState *s, unsigned int gsi)
1384 set_bit(gsi, s->used_gsi_bitmap);
1387 static void clear_gsi(KVMState *s, unsigned int gsi)
1389 clear_bit(gsi, s->used_gsi_bitmap);
1392 void kvm_init_irq_routing(KVMState *s)
1396 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
1397 if (gsi_count > 0) {
1398 /* Round up so we can search ints using ffs */
1399 s->used_gsi_bitmap = bitmap_new(gsi_count);
1400 s->gsi_count = gsi_count;
1403 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
1404 s->nr_allocated_irq_routes = 0;
1406 if (!kvm_direct_msi_allowed) {
1407 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
1408 QTAILQ_INIT(&s->msi_hashtab[i]);
1412 kvm_arch_init_irq_routing(s);
1415 void kvm_irqchip_commit_routes(KVMState *s)
1419 if (kvm_gsi_direct_mapping()) {
1423 if (!kvm_gsi_routing_enabled()) {
1427 s->irq_routes->flags = 0;
1428 trace_kvm_irqchip_commit_routes();
1429 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
1433 static void kvm_add_routing_entry(KVMState *s,
1434 struct kvm_irq_routing_entry *entry)
1436 struct kvm_irq_routing_entry *new;
1439 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1440 n = s->nr_allocated_irq_routes * 2;
1444 size = sizeof(struct kvm_irq_routing);
1445 size += n * sizeof(*new);
1446 s->irq_routes = g_realloc(s->irq_routes, size);
1447 s->nr_allocated_irq_routes = n;
1449 n = s->irq_routes->nr++;
1450 new = &s->irq_routes->entries[n];
1454 set_gsi(s, entry->gsi);
1457 static int kvm_update_routing_entry(KVMState *s,
1458 struct kvm_irq_routing_entry *new_entry)
1460 struct kvm_irq_routing_entry *entry;
1463 for (n = 0; n < s->irq_routes->nr; n++) {
1464 entry = &s->irq_routes->entries[n];
1465 if (entry->gsi != new_entry->gsi) {
1469 if(!memcmp(entry, new_entry, sizeof *entry)) {
1473 *entry = *new_entry;
1481 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1483 struct kvm_irq_routing_entry e = {};
1485 assert(pin < s->gsi_count);
1488 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1490 e.u.irqchip.irqchip = irqchip;
1491 e.u.irqchip.pin = pin;
1492 kvm_add_routing_entry(s, &e);
1495 void kvm_irqchip_release_virq(KVMState *s, int virq)
1497 struct kvm_irq_routing_entry *e;
1500 if (kvm_gsi_direct_mapping()) {
1504 for (i = 0; i < s->irq_routes->nr; i++) {
1505 e = &s->irq_routes->entries[i];
1506 if (e->gsi == virq) {
1507 s->irq_routes->nr--;
1508 *e = s->irq_routes->entries[s->irq_routes->nr];
1512 kvm_arch_release_virq_post(virq);
1513 trace_kvm_irqchip_release_virq(virq);
1516 void kvm_irqchip_add_change_notifier(Notifier *n)
1518 notifier_list_add(&kvm_irqchip_change_notifiers, n);
1521 void kvm_irqchip_remove_change_notifier(Notifier *n)
1526 void kvm_irqchip_change_notify(void)
1528 notifier_list_notify(&kvm_irqchip_change_notifiers, NULL);
1531 static unsigned int kvm_hash_msi(uint32_t data)
1533 /* This is optimized for IA32 MSI layout. However, no other arch shall
1534 * repeat the mistake of not providing a direct MSI injection API. */
1538 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1540 KVMMSIRoute *route, *next;
1543 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1544 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1545 kvm_irqchip_release_virq(s, route->kroute.gsi);
1546 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1552 static int kvm_irqchip_get_virq(KVMState *s)
1557 * PIC and IOAPIC share the first 16 GSI numbers, thus the available
1558 * GSI numbers are more than the number of IRQ route. Allocating a GSI
1559 * number can succeed even though a new route entry cannot be added.
1560 * When this happens, flush dynamic MSI entries to free IRQ route entries.
1562 if (!kvm_direct_msi_allowed && s->irq_routes->nr == s->gsi_count) {
1563 kvm_flush_dynamic_msi_routes(s);
1566 /* Return the lowest unused GSI in the bitmap */
1567 next_virq = find_first_zero_bit(s->used_gsi_bitmap, s->gsi_count);
1568 if (next_virq >= s->gsi_count) {
1575 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1577 unsigned int hash = kvm_hash_msi(msg.data);
1580 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1581 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1582 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1583 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1590 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1595 if (kvm_direct_msi_allowed) {
1596 msi.address_lo = (uint32_t)msg.address;
1597 msi.address_hi = msg.address >> 32;
1598 msi.data = le32_to_cpu(msg.data);
1600 memset(msi.pad, 0, sizeof(msi.pad));
1602 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1605 route = kvm_lookup_msi_route(s, msg);
1609 virq = kvm_irqchip_get_virq(s);
1614 route = g_malloc0(sizeof(KVMMSIRoute));
1615 route->kroute.gsi = virq;
1616 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1617 route->kroute.flags = 0;
1618 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1619 route->kroute.u.msi.address_hi = msg.address >> 32;
1620 route->kroute.u.msi.data = le32_to_cpu(msg.data);
1622 kvm_add_routing_entry(s, &route->kroute);
1623 kvm_irqchip_commit_routes(s);
1625 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1629 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1631 return kvm_set_irq(s, route->kroute.gsi, 1);
1634 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1636 struct kvm_irq_routing_entry kroute = {};
1638 MSIMessage msg = {0, 0};
1640 if (pci_available && dev) {
1641 msg = pci_get_msi_message(dev, vector);
1644 if (kvm_gsi_direct_mapping()) {
1645 return kvm_arch_msi_data_to_gsi(msg.data);
1648 if (!kvm_gsi_routing_enabled()) {
1652 virq = kvm_irqchip_get_virq(s);
1658 kroute.type = KVM_IRQ_ROUTING_MSI;
1660 kroute.u.msi.address_lo = (uint32_t)msg.address;
1661 kroute.u.msi.address_hi = msg.address >> 32;
1662 kroute.u.msi.data = le32_to_cpu(msg.data);
1663 if (pci_available && kvm_msi_devid_required()) {
1664 kroute.flags = KVM_MSI_VALID_DEVID;
1665 kroute.u.msi.devid = pci_requester_id(dev);
1667 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1668 kvm_irqchip_release_virq(s, virq);
1672 trace_kvm_irqchip_add_msi_route(dev ? dev->name : (char *)"N/A",
1675 kvm_add_routing_entry(s, &kroute);
1676 kvm_arch_add_msi_route_post(&kroute, vector, dev);
1677 kvm_irqchip_commit_routes(s);
1682 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
1685 struct kvm_irq_routing_entry kroute = {};
1687 if (kvm_gsi_direct_mapping()) {
1691 if (!kvm_irqchip_in_kernel()) {
1696 kroute.type = KVM_IRQ_ROUTING_MSI;
1698 kroute.u.msi.address_lo = (uint32_t)msg.address;
1699 kroute.u.msi.address_hi = msg.address >> 32;
1700 kroute.u.msi.data = le32_to_cpu(msg.data);
1701 if (pci_available && kvm_msi_devid_required()) {
1702 kroute.flags = KVM_MSI_VALID_DEVID;
1703 kroute.u.msi.devid = pci_requester_id(dev);
1705 if (kvm_arch_fixup_msi_route(&kroute, msg.address, msg.data, dev)) {
1709 trace_kvm_irqchip_update_msi_route(virq);
1711 return kvm_update_routing_entry(s, &kroute);
1714 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
1715 EventNotifier *resample, int virq,
1718 int fd = event_notifier_get_fd(event);
1719 int rfd = resample ? event_notifier_get_fd(resample) : -1;
1721 struct kvm_irqfd irqfd = {
1724 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1729 if (kvm_irqchip_is_split()) {
1731 * When the slow irqchip (e.g. IOAPIC) is in the
1732 * userspace, KVM kernel resamplefd will not work because
1733 * the EOI of the interrupt will be delivered to userspace
1734 * instead, so the KVM kernel resamplefd kick will be
1735 * skipped. The userspace here mimics what the kernel
1736 * provides with resamplefd, remember the resamplefd and
1737 * kick it when we receive EOI of this IRQ.
1739 * This is hackery because IOAPIC is mostly bypassed
1740 * (except EOI broadcasts) when irqfd is used. However
1741 * this can bring much performance back for split irqchip
1742 * with INTx IRQs (for VFIO, this gives 93% perf of the
1743 * full fast path, which is 46% perf boost comparing to
1744 * the INTx slow path).
1746 kvm_resample_fd_insert(virq, resample);
1748 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1749 irqfd.resamplefd = rfd;
1751 } else if (!assign) {
1752 if (kvm_irqchip_is_split()) {
1753 kvm_resample_fd_remove(virq);
1757 if (!kvm_irqfds_enabled()) {
1761 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1764 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1766 struct kvm_irq_routing_entry kroute = {};
1769 if (!kvm_gsi_routing_enabled()) {
1773 virq = kvm_irqchip_get_virq(s);
1779 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1781 kroute.u.adapter.summary_addr = adapter->summary_addr;
1782 kroute.u.adapter.ind_addr = adapter->ind_addr;
1783 kroute.u.adapter.summary_offset = adapter->summary_offset;
1784 kroute.u.adapter.ind_offset = adapter->ind_offset;
1785 kroute.u.adapter.adapter_id = adapter->adapter_id;
1787 kvm_add_routing_entry(s, &kroute);
1792 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1794 struct kvm_irq_routing_entry kroute = {};
1797 if (!kvm_gsi_routing_enabled()) {
1800 if (!kvm_check_extension(s, KVM_CAP_HYPERV_SYNIC)) {
1803 virq = kvm_irqchip_get_virq(s);
1809 kroute.type = KVM_IRQ_ROUTING_HV_SINT;
1811 kroute.u.hv_sint.vcpu = vcpu;
1812 kroute.u.hv_sint.sint = sint;
1814 kvm_add_routing_entry(s, &kroute);
1815 kvm_irqchip_commit_routes(s);
1820 #else /* !KVM_CAP_IRQ_ROUTING */
1822 void kvm_init_irq_routing(KVMState *s)
1826 void kvm_irqchip_release_virq(KVMState *s, int virq)
1830 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1835 int kvm_irqchip_add_msi_route(KVMState *s, int vector, PCIDevice *dev)
1840 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1845 int kvm_irqchip_add_hv_sint_route(KVMState *s, uint32_t vcpu, uint32_t sint)
1850 static int kvm_irqchip_assign_irqfd(KVMState *s, EventNotifier *event,
1851 EventNotifier *resample, int virq,
1857 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1861 #endif /* !KVM_CAP_IRQ_ROUTING */
1863 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1864 EventNotifier *rn, int virq)
1866 return kvm_irqchip_assign_irqfd(s, n, rn, virq, true);
1869 int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
1872 return kvm_irqchip_assign_irqfd(s, n, NULL, virq, false);
1875 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1876 EventNotifier *rn, qemu_irq irq)
1879 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1884 return kvm_irqchip_add_irqfd_notifier_gsi(s, n, rn, GPOINTER_TO_INT(gsi));
1887 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n,
1891 gboolean found = g_hash_table_lookup_extended(s->gsimap, irq, &key, &gsi);
1896 return kvm_irqchip_remove_irqfd_notifier_gsi(s, n, GPOINTER_TO_INT(gsi));
1899 void kvm_irqchip_set_qemuirq_gsi(KVMState *s, qemu_irq irq, int gsi)
1901 g_hash_table_insert(s->gsimap, irq, GINT_TO_POINTER(gsi));
1904 static void kvm_irqchip_create(KVMState *s)
1908 assert(s->kernel_irqchip_split != ON_OFF_AUTO_AUTO);
1909 if (kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1911 } else if (kvm_check_extension(s, KVM_CAP_S390_IRQCHIP)) {
1912 ret = kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0);
1914 fprintf(stderr, "Enable kernel irqchip failed: %s\n", strerror(-ret));
1921 /* First probe and see if there's a arch-specific hook to create the
1922 * in-kernel irqchip for us */
1923 ret = kvm_arch_irqchip_create(s);
1925 if (s->kernel_irqchip_split == ON_OFF_AUTO_ON) {
1926 perror("Split IRQ chip mode not supported.");
1929 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1933 fprintf(stderr, "Create kernel irqchip failed: %s\n", strerror(-ret));
1937 kvm_kernel_irqchip = true;
1938 /* If we have an in-kernel IRQ chip then we must have asynchronous
1939 * interrupt delivery (though the reverse is not necessarily true)
1941 kvm_async_interrupts_allowed = true;
1942 kvm_halt_in_kernel_allowed = true;
1944 kvm_init_irq_routing(s);
1946 s->gsimap = g_hash_table_new(g_direct_hash, g_direct_equal);
1949 /* Find number of supported CPUs using the recommended
1950 * procedure from the kernel API documentation to cope with
1951 * older kernels that may be missing capabilities.
1953 static int kvm_recommended_vcpus(KVMState *s)
1955 int ret = kvm_vm_check_extension(s, KVM_CAP_NR_VCPUS);
1956 return (ret) ? ret : 4;
1959 static int kvm_max_vcpus(KVMState *s)
1961 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1962 return (ret) ? ret : kvm_recommended_vcpus(s);
1965 static int kvm_max_vcpu_id(KVMState *s)
1967 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPU_ID);
1968 return (ret) ? ret : kvm_max_vcpus(s);
1971 bool kvm_vcpu_id_is_valid(int vcpu_id)
1973 KVMState *s = KVM_STATE(current_accel());
1974 return vcpu_id >= 0 && vcpu_id < kvm_max_vcpu_id(s);
1977 static int kvm_init(MachineState *ms)
1979 MachineClass *mc = MACHINE_GET_CLASS(ms);
1980 static const char upgrade_note[] =
1981 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1982 "(see http://sourceforge.net/projects/kvm).\n";
1987 { "SMP", ms->smp.cpus },
1988 { "hotpluggable", ms->smp.max_cpus },
1991 int soft_vcpus_limit, hard_vcpus_limit;
1993 const KVMCapabilityInfo *missing_cap;
1996 const char *kvm_type;
1997 uint64_t dirty_log_manual_caps;
1999 s = KVM_STATE(ms->accelerator);
2002 * On systems where the kernel can support different base page
2003 * sizes, host page size may be different from TARGET_PAGE_SIZE,
2004 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
2005 * page size for the system though.
2007 assert(TARGET_PAGE_SIZE <= qemu_real_host_page_size);
2011 #ifdef KVM_CAP_SET_GUEST_DEBUG
2012 QTAILQ_INIT(&s->kvm_sw_breakpoints);
2014 QLIST_INIT(&s->kvm_parked_vcpus);
2016 s->fd = qemu_open("/dev/kvm", O_RDWR);
2018 fprintf(stderr, "Could not access KVM kernel module: %m\n");
2023 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
2024 if (ret < KVM_API_VERSION) {
2028 fprintf(stderr, "kvm version too old\n");
2032 if (ret > KVM_API_VERSION) {
2034 fprintf(stderr, "kvm version not supported\n");
2038 kvm_immediate_exit = kvm_check_extension(s, KVM_CAP_IMMEDIATE_EXIT);
2039 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2041 /* If unspecified, use the default value */
2046 s->nr_as = kvm_check_extension(s, KVM_CAP_MULTI_ADDRESS_SPACE);
2047 if (s->nr_as <= 1) {
2050 s->as = g_new0(struct KVMAs, s->nr_as);
2052 kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
2054 type = mc->kvm_type(ms, kvm_type);
2055 } else if (kvm_type) {
2057 fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
2062 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
2063 } while (ret == -EINTR);
2066 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
2070 if (ret == -EINVAL) {
2072 "Host kernel setup problem detected. Please verify:\n");
2073 fprintf(stderr, "- for kernels supporting the switch_amode or"
2074 " user_mode parameters, whether\n");
2076 " user space is running in primary address space\n");
2078 "- for kernels supporting the vm.allocate_pgste sysctl, "
2079 "whether it is enabled\n");
2087 /* check the vcpu limits */
2088 soft_vcpus_limit = kvm_recommended_vcpus(s);
2089 hard_vcpus_limit = kvm_max_vcpus(s);
2092 if (nc->num > soft_vcpus_limit) {
2093 warn_report("Number of %s cpus requested (%d) exceeds "
2094 "the recommended cpus supported by KVM (%d)",
2095 nc->name, nc->num, soft_vcpus_limit);
2097 if (nc->num > hard_vcpus_limit) {
2098 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
2099 "the maximum cpus supported by KVM (%d)\n",
2100 nc->name, nc->num, hard_vcpus_limit);
2107 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
2110 kvm_check_extension_list(s, kvm_arch_required_capabilities);
2114 fprintf(stderr, "kvm does not support %s\n%s",
2115 missing_cap->name, upgrade_note);
2119 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
2120 s->coalesced_pio = s->coalesced_mmio &&
2121 kvm_check_extension(s, KVM_CAP_COALESCED_PIO);
2123 dirty_log_manual_caps =
2124 kvm_check_extension(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
2125 dirty_log_manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
2126 KVM_DIRTY_LOG_INITIALLY_SET);
2127 s->manual_dirty_log_protect = dirty_log_manual_caps;
2128 if (dirty_log_manual_caps) {
2129 ret = kvm_vm_enable_cap(s, KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2, 0,
2130 dirty_log_manual_caps);
2132 warn_report("Trying to enable capability %"PRIu64" of "
2133 "KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 but failed. "
2134 "Falling back to the legacy mode. ",
2135 dirty_log_manual_caps);
2136 s->manual_dirty_log_protect = 0;
2140 #ifdef KVM_CAP_VCPU_EVENTS
2141 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
2144 s->robust_singlestep =
2145 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
2147 #ifdef KVM_CAP_DEBUGREGS
2148 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
2151 s->max_nested_state_len = kvm_check_extension(s, KVM_CAP_NESTED_STATE);
2153 #ifdef KVM_CAP_IRQ_ROUTING
2154 kvm_direct_msi_allowed = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
2157 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
2159 s->irq_set_ioctl = KVM_IRQ_LINE;
2160 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
2161 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
2164 kvm_readonly_mem_allowed =
2165 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
2167 kvm_eventfds_allowed =
2168 (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
2170 kvm_irqfds_allowed =
2171 (kvm_check_extension(s, KVM_CAP_IRQFD) > 0);
2173 kvm_resamplefds_allowed =
2174 (kvm_check_extension(s, KVM_CAP_IRQFD_RESAMPLE) > 0);
2176 kvm_vm_attributes_allowed =
2177 (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES) > 0);
2179 kvm_ioeventfd_any_length_allowed =
2180 (kvm_check_extension(s, KVM_CAP_IOEVENTFD_ANY_LENGTH) > 0);
2185 * if memory encryption object is specified then initialize the memory
2186 * encryption context.
2188 if (ms->memory_encryption) {
2189 kvm_state->memcrypt_handle = sev_guest_init(ms->memory_encryption);
2190 if (!kvm_state->memcrypt_handle) {
2195 kvm_state->memcrypt_encrypt_data = sev_encrypt_data;
2198 ret = kvm_arch_init(ms, s);
2203 if (s->kernel_irqchip_split == ON_OFF_AUTO_AUTO) {
2204 s->kernel_irqchip_split = mc->default_kernel_irqchip_split ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2207 qemu_register_reset(kvm_unpoison_all, NULL);
2209 if (s->kernel_irqchip_allowed) {
2210 kvm_irqchip_create(s);
2213 if (kvm_eventfds_allowed) {
2214 s->memory_listener.listener.eventfd_add = kvm_mem_ioeventfd_add;
2215 s->memory_listener.listener.eventfd_del = kvm_mem_ioeventfd_del;
2217 s->memory_listener.listener.coalesced_io_add = kvm_coalesce_mmio_region;
2218 s->memory_listener.listener.coalesced_io_del = kvm_uncoalesce_mmio_region;
2220 kvm_memory_listener_register(s, &s->memory_listener,
2221 &address_space_memory, 0);
2222 memory_listener_register(&kvm_io_listener,
2224 memory_listener_register(&kvm_coalesced_pio_listener,
2227 s->many_ioeventfds = kvm_check_many_ioeventfds();
2229 s->sync_mmu = !!kvm_vm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
2231 ret = ram_block_discard_disable(true);
2245 g_free(s->memory_listener.slots);
2250 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
2252 s->sigmask_len = sigmask_len;
2255 static void kvm_handle_io(uint16_t port, MemTxAttrs attrs, void *data, int direction,
2256 int size, uint32_t count)
2259 uint8_t *ptr = data;
2261 for (i = 0; i < count; i++) {
2262 address_space_rw(&address_space_io, port, attrs,
2264 direction == KVM_EXIT_IO_OUT);
2269 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
2271 fprintf(stderr, "KVM internal error. Suberror: %d\n",
2272 run->internal.suberror);
2274 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
2277 for (i = 0; i < run->internal.ndata; ++i) {
2278 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
2279 i, (uint64_t)run->internal.data[i]);
2282 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
2283 fprintf(stderr, "emulation failure\n");
2284 if (!kvm_arch_stop_on_emulation_error(cpu)) {
2285 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2286 return EXCP_INTERRUPT;
2289 /* FIXME: Should trigger a qmp message to let management know
2290 * something went wrong.
2295 void kvm_flush_coalesced_mmio_buffer(void)
2297 KVMState *s = kvm_state;
2299 if (s->coalesced_flush_in_progress) {
2303 s->coalesced_flush_in_progress = true;
2305 if (s->coalesced_mmio_ring) {
2306 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
2307 while (ring->first != ring->last) {
2308 struct kvm_coalesced_mmio *ent;
2310 ent = &ring->coalesced_mmio[ring->first];
2312 if (ent->pio == 1) {
2313 address_space_write(&address_space_io, ent->phys_addr,
2314 MEMTXATTRS_UNSPECIFIED, ent->data,
2317 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
2320 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
2324 s->coalesced_flush_in_progress = false;
2327 static void do_kvm_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
2329 if (!cpu->vcpu_dirty) {
2330 kvm_arch_get_registers(cpu);
2331 cpu->vcpu_dirty = true;
2335 void kvm_cpu_synchronize_state(CPUState *cpu)
2337 if (!cpu->vcpu_dirty) {
2338 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, RUN_ON_CPU_NULL);
2342 static void do_kvm_cpu_synchronize_post_reset(CPUState *cpu, run_on_cpu_data arg)
2344 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
2345 cpu->vcpu_dirty = false;
2348 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
2350 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, RUN_ON_CPU_NULL);
2353 static void do_kvm_cpu_synchronize_post_init(CPUState *cpu, run_on_cpu_data arg)
2355 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
2356 cpu->vcpu_dirty = false;
2359 void kvm_cpu_synchronize_post_init(CPUState *cpu)
2361 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, RUN_ON_CPU_NULL);
2364 static void do_kvm_cpu_synchronize_pre_loadvm(CPUState *cpu, run_on_cpu_data arg)
2366 cpu->vcpu_dirty = true;
2369 void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu)
2371 run_on_cpu(cpu, do_kvm_cpu_synchronize_pre_loadvm, RUN_ON_CPU_NULL);
2374 #ifdef KVM_HAVE_MCE_INJECTION
2375 static __thread void *pending_sigbus_addr;
2376 static __thread int pending_sigbus_code;
2377 static __thread bool have_sigbus_pending;
2380 static void kvm_cpu_kick(CPUState *cpu)
2382 atomic_set(&cpu->kvm_run->immediate_exit, 1);
2385 static void kvm_cpu_kick_self(void)
2387 if (kvm_immediate_exit) {
2388 kvm_cpu_kick(current_cpu);
2390 qemu_cpu_kick_self();
2394 static void kvm_eat_signals(CPUState *cpu)
2396 struct timespec ts = { 0, 0 };
2402 if (kvm_immediate_exit) {
2403 atomic_set(&cpu->kvm_run->immediate_exit, 0);
2404 /* Write kvm_run->immediate_exit before the cpu->exit_request
2405 * write in kvm_cpu_exec.
2411 sigemptyset(&waitset);
2412 sigaddset(&waitset, SIG_IPI);
2415 r = sigtimedwait(&waitset, &siginfo, &ts);
2416 if (r == -1 && !(errno == EAGAIN || errno == EINTR)) {
2417 perror("sigtimedwait");
2421 r = sigpending(&chkset);
2423 perror("sigpending");
2426 } while (sigismember(&chkset, SIG_IPI));
2429 int kvm_cpu_exec(CPUState *cpu)
2431 struct kvm_run *run = cpu->kvm_run;
2434 DPRINTF("kvm_cpu_exec()\n");
2436 if (kvm_arch_process_async_events(cpu)) {
2437 atomic_set(&cpu->exit_request, 0);
2441 qemu_mutex_unlock_iothread();
2442 cpu_exec_start(cpu);
2447 if (cpu->vcpu_dirty) {
2448 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
2449 cpu->vcpu_dirty = false;
2452 kvm_arch_pre_run(cpu, run);
2453 if (atomic_read(&cpu->exit_request)) {
2454 DPRINTF("interrupt exit requested\n");
2456 * KVM requires us to reenter the kernel after IO exits to complete
2457 * instruction emulation. This self-signal will ensure that we
2460 kvm_cpu_kick_self();
2463 /* Read cpu->exit_request before KVM_RUN reads run->immediate_exit.
2464 * Matching barrier in kvm_eat_signals.
2468 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
2470 attrs = kvm_arch_post_run(cpu, run);
2472 #ifdef KVM_HAVE_MCE_INJECTION
2473 if (unlikely(have_sigbus_pending)) {
2474 qemu_mutex_lock_iothread();
2475 kvm_arch_on_sigbus_vcpu(cpu, pending_sigbus_code,
2476 pending_sigbus_addr);
2477 have_sigbus_pending = false;
2478 qemu_mutex_unlock_iothread();
2483 if (run_ret == -EINTR || run_ret == -EAGAIN) {
2484 DPRINTF("io window exit\n");
2485 kvm_eat_signals(cpu);
2486 ret = EXCP_INTERRUPT;
2489 fprintf(stderr, "error: kvm run failed %s\n",
2490 strerror(-run_ret));
2492 if (run_ret == -EBUSY) {
2494 "This is probably because your SMT is enabled.\n"
2495 "VCPU can only run on primary threads with all "
2496 "secondary threads offline.\n");
2503 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
2504 switch (run->exit_reason) {
2506 DPRINTF("handle_io\n");
2507 /* Called outside BQL */
2508 kvm_handle_io(run->io.port, attrs,
2509 (uint8_t *)run + run->io.data_offset,
2516 DPRINTF("handle_mmio\n");
2517 /* Called outside BQL */
2518 address_space_rw(&address_space_memory,
2519 run->mmio.phys_addr, attrs,
2522 run->mmio.is_write);
2525 case KVM_EXIT_IRQ_WINDOW_OPEN:
2526 DPRINTF("irq_window_open\n");
2527 ret = EXCP_INTERRUPT;
2529 case KVM_EXIT_SHUTDOWN:
2530 DPRINTF("shutdown\n");
2531 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2532 ret = EXCP_INTERRUPT;
2534 case KVM_EXIT_UNKNOWN:
2535 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
2536 (uint64_t)run->hw.hardware_exit_reason);
2539 case KVM_EXIT_INTERNAL_ERROR:
2540 ret = kvm_handle_internal_error(cpu, run);
2542 case KVM_EXIT_SYSTEM_EVENT:
2543 switch (run->system_event.type) {
2544 case KVM_SYSTEM_EVENT_SHUTDOWN:
2545 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
2546 ret = EXCP_INTERRUPT;
2548 case KVM_SYSTEM_EVENT_RESET:
2549 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET);
2550 ret = EXCP_INTERRUPT;
2552 case KVM_SYSTEM_EVENT_CRASH:
2553 kvm_cpu_synchronize_state(cpu);
2554 qemu_mutex_lock_iothread();
2555 qemu_system_guest_panicked(cpu_get_crash_info(cpu));
2556 qemu_mutex_unlock_iothread();
2560 DPRINTF("kvm_arch_handle_exit\n");
2561 ret = kvm_arch_handle_exit(cpu, run);
2566 DPRINTF("kvm_arch_handle_exit\n");
2567 ret = kvm_arch_handle_exit(cpu, run);
2573 qemu_mutex_lock_iothread();
2576 cpu_dump_state(cpu, stderr, CPU_DUMP_CODE);
2577 vm_stop(RUN_STATE_INTERNAL_ERROR);
2580 atomic_set(&cpu->exit_request, 0);
2584 int kvm_ioctl(KVMState *s, int type, ...)
2591 arg = va_arg(ap, void *);
2594 trace_kvm_ioctl(type, arg);
2595 ret = ioctl(s->fd, type, arg);
2602 int kvm_vm_ioctl(KVMState *s, int type, ...)
2609 arg = va_arg(ap, void *);
2612 trace_kvm_vm_ioctl(type, arg);
2613 ret = ioctl(s->vmfd, type, arg);
2620 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
2627 arg = va_arg(ap, void *);
2630 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
2631 ret = ioctl(cpu->kvm_fd, type, arg);
2638 int kvm_device_ioctl(int fd, int type, ...)
2645 arg = va_arg(ap, void *);
2648 trace_kvm_device_ioctl(fd, type, arg);
2649 ret = ioctl(fd, type, arg);
2656 int kvm_vm_check_attr(KVMState *s, uint32_t group, uint64_t attr)
2659 struct kvm_device_attr attribute = {
2664 if (!kvm_vm_attributes_allowed) {
2668 ret = kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attribute);
2669 /* kvm returns 0 on success for HAS_DEVICE_ATTR */
2673 int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2675 struct kvm_device_attr attribute = {
2681 return kvm_device_ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute) ? 0 : 1;
2684 int kvm_device_access(int fd, int group, uint64_t attr,
2685 void *val, bool write, Error **errp)
2687 struct kvm_device_attr kvmattr;
2691 kvmattr.group = group;
2692 kvmattr.attr = attr;
2693 kvmattr.addr = (uintptr_t)val;
2695 err = kvm_device_ioctl(fd,
2696 write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2699 error_setg_errno(errp, -err,
2700 "KVM_%s_DEVICE_ATTR failed: Group %d "
2701 "attr 0x%016" PRIx64,
2702 write ? "SET" : "GET", group, attr);
2707 bool kvm_has_sync_mmu(void)
2709 return kvm_state->sync_mmu;
2712 int kvm_has_vcpu_events(void)
2714 return kvm_state->vcpu_events;
2717 int kvm_has_robust_singlestep(void)
2719 return kvm_state->robust_singlestep;
2722 int kvm_has_debugregs(void)
2724 return kvm_state->debugregs;
2727 int kvm_max_nested_state_length(void)
2729 return kvm_state->max_nested_state_len;
2732 int kvm_has_many_ioeventfds(void)
2734 if (!kvm_enabled()) {
2737 return kvm_state->many_ioeventfds;
2740 int kvm_has_gsi_routing(void)
2742 #ifdef KVM_CAP_IRQ_ROUTING
2743 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
2749 int kvm_has_intx_set_mask(void)
2751 return kvm_state->intx_set_mask;
2754 bool kvm_arm_supports_user_irq(void)
2756 return kvm_check_extension(kvm_state, KVM_CAP_ARM_USER_IRQ);
2759 #ifdef KVM_CAP_SET_GUEST_DEBUG
2760 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
2763 struct kvm_sw_breakpoint *bp;
2765 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
2773 int kvm_sw_breakpoints_active(CPUState *cpu)
2775 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
2778 struct kvm_set_guest_debug_data {
2779 struct kvm_guest_debug dbg;
2783 static void kvm_invoke_set_guest_debug(CPUState *cpu, run_on_cpu_data data)
2785 struct kvm_set_guest_debug_data *dbg_data =
2786 (struct kvm_set_guest_debug_data *) data.host_ptr;
2788 dbg_data->err = kvm_vcpu_ioctl(cpu, KVM_SET_GUEST_DEBUG,
2792 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2794 struct kvm_set_guest_debug_data data;
2796 data.dbg.control = reinject_trap;
2798 if (cpu->singlestep_enabled) {
2799 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2801 kvm_arch_update_guest_debug(cpu, &data.dbg);
2803 run_on_cpu(cpu, kvm_invoke_set_guest_debug,
2804 RUN_ON_CPU_HOST_PTR(&data));
2808 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2809 target_ulong len, int type)
2811 struct kvm_sw_breakpoint *bp;
2814 if (type == GDB_BREAKPOINT_SW) {
2815 bp = kvm_find_sw_breakpoint(cpu, addr);
2821 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2824 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2830 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2832 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2839 err = kvm_update_guest_debug(cpu, 0);
2847 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2848 target_ulong len, int type)
2850 struct kvm_sw_breakpoint *bp;
2853 if (type == GDB_BREAKPOINT_SW) {
2854 bp = kvm_find_sw_breakpoint(cpu, addr);
2859 if (bp->use_count > 1) {
2864 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2869 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2872 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2879 err = kvm_update_guest_debug(cpu, 0);
2887 void kvm_remove_all_breakpoints(CPUState *cpu)
2889 struct kvm_sw_breakpoint *bp, *next;
2890 KVMState *s = cpu->kvm_state;
2893 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2894 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2895 /* Try harder to find a CPU that currently sees the breakpoint. */
2896 CPU_FOREACH(tmpcpu) {
2897 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2902 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2905 kvm_arch_remove_all_hw_breakpoints();
2908 kvm_update_guest_debug(cpu, 0);
2912 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2914 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2919 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2920 target_ulong len, int type)
2925 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2926 target_ulong len, int type)
2931 void kvm_remove_all_breakpoints(CPUState *cpu)
2934 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2936 static int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2938 KVMState *s = kvm_state;
2939 struct kvm_signal_mask *sigmask;
2942 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2944 sigmask->len = s->sigmask_len;
2945 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2946 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2952 static void kvm_ipi_signal(int sig)
2955 assert(kvm_immediate_exit);
2956 kvm_cpu_kick(current_cpu);
2960 void kvm_init_cpu_signals(CPUState *cpu)
2964 struct sigaction sigact;
2966 memset(&sigact, 0, sizeof(sigact));
2967 sigact.sa_handler = kvm_ipi_signal;
2968 sigaction(SIG_IPI, &sigact, NULL);
2970 pthread_sigmask(SIG_BLOCK, NULL, &set);
2971 #if defined KVM_HAVE_MCE_INJECTION
2972 sigdelset(&set, SIGBUS);
2973 pthread_sigmask(SIG_SETMASK, &set, NULL);
2975 sigdelset(&set, SIG_IPI);
2976 if (kvm_immediate_exit) {
2977 r = pthread_sigmask(SIG_SETMASK, &set, NULL);
2979 r = kvm_set_signal_mask(cpu, &set);
2982 fprintf(stderr, "kvm_set_signal_mask: %s\n", strerror(-r));
2987 /* Called asynchronously in VCPU thread. */
2988 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2990 #ifdef KVM_HAVE_MCE_INJECTION
2991 if (have_sigbus_pending) {
2994 have_sigbus_pending = true;
2995 pending_sigbus_addr = addr;
2996 pending_sigbus_code = code;
2997 atomic_set(&cpu->exit_request, 1);
3004 /* Called synchronously (via signalfd) in main thread. */
3005 int kvm_on_sigbus(int code, void *addr)
3007 #ifdef KVM_HAVE_MCE_INJECTION
3008 /* Action required MCE kills the process if SIGBUS is blocked. Because
3009 * that's what happens in the I/O thread, where we handle MCE via signalfd,
3010 * we can only get action optional here.
3012 assert(code != BUS_MCEERR_AR);
3013 kvm_arch_on_sigbus_vcpu(first_cpu, code, addr);
3020 int kvm_create_device(KVMState *s, uint64_t type, bool test)
3023 struct kvm_create_device create_dev;
3025 create_dev.type = type;
3027 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
3029 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
3033 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
3038 return test ? 0 : create_dev.fd;
3041 bool kvm_device_supported(int vmfd, uint64_t type)
3043 struct kvm_create_device create_dev = {
3046 .flags = KVM_CREATE_DEVICE_TEST,
3049 if (ioctl(vmfd, KVM_CHECK_EXTENSION, KVM_CAP_DEVICE_CTRL) <= 0) {
3053 return (ioctl(vmfd, KVM_CREATE_DEVICE, &create_dev) >= 0);
3056 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
3058 struct kvm_one_reg reg;
3062 reg.addr = (uintptr_t) source;
3063 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
3065 trace_kvm_failed_reg_set(id, strerror(-r));
3070 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
3072 struct kvm_one_reg reg;
3076 reg.addr = (uintptr_t) target;
3077 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
3079 trace_kvm_failed_reg_get(id, strerror(-r));
3084 static bool kvm_accel_has_memory(MachineState *ms, AddressSpace *as,
3085 hwaddr start_addr, hwaddr size)
3087 KVMState *kvm = KVM_STATE(ms->accelerator);
3090 for (i = 0; i < kvm->nr_as; ++i) {
3091 if (kvm->as[i].as == as && kvm->as[i].ml) {
3092 size = MIN(kvm_max_slot_size, size);
3093 return NULL != kvm_lookup_matching_slot(kvm->as[i].ml,
3101 static void kvm_get_kvm_shadow_mem(Object *obj, Visitor *v,
3102 const char *name, void *opaque,
3105 KVMState *s = KVM_STATE(obj);
3106 int64_t value = s->kvm_shadow_mem;
3108 visit_type_int(v, name, &value, errp);
3111 static void kvm_set_kvm_shadow_mem(Object *obj, Visitor *v,
3112 const char *name, void *opaque,
3115 KVMState *s = KVM_STATE(obj);
3118 if (!visit_type_int(v, name, &value, errp)) {
3122 s->kvm_shadow_mem = value;
3125 static void kvm_set_kernel_irqchip(Object *obj, Visitor *v,
3126 const char *name, void *opaque,
3129 KVMState *s = KVM_STATE(obj);
3132 if (!visit_type_OnOffSplit(v, name, &mode, errp)) {
3136 case ON_OFF_SPLIT_ON:
3137 s->kernel_irqchip_allowed = true;
3138 s->kernel_irqchip_required = true;
3139 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3141 case ON_OFF_SPLIT_OFF:
3142 s->kernel_irqchip_allowed = false;
3143 s->kernel_irqchip_required = false;
3144 s->kernel_irqchip_split = ON_OFF_AUTO_OFF;
3146 case ON_OFF_SPLIT_SPLIT:
3147 s->kernel_irqchip_allowed = true;
3148 s->kernel_irqchip_required = true;
3149 s->kernel_irqchip_split = ON_OFF_AUTO_ON;
3152 /* The value was checked in visit_type_OnOffSplit() above. If
3153 * we get here, then something is wrong in QEMU.
3159 bool kvm_kernel_irqchip_allowed(void)
3161 return kvm_state->kernel_irqchip_allowed;
3164 bool kvm_kernel_irqchip_required(void)
3166 return kvm_state->kernel_irqchip_required;
3169 bool kvm_kernel_irqchip_split(void)
3171 return kvm_state->kernel_irqchip_split == ON_OFF_AUTO_ON;
3174 static void kvm_accel_instance_init(Object *obj)
3176 KVMState *s = KVM_STATE(obj);
3178 s->kvm_shadow_mem = -1;
3179 s->kernel_irqchip_allowed = true;
3180 s->kernel_irqchip_split = ON_OFF_AUTO_AUTO;
3183 static void kvm_accel_class_init(ObjectClass *oc, void *data)
3185 AccelClass *ac = ACCEL_CLASS(oc);
3187 ac->init_machine = kvm_init;
3188 ac->has_memory = kvm_accel_has_memory;
3189 ac->allowed = &kvm_allowed;
3191 object_class_property_add(oc, "kernel-irqchip", "on|off|split",
3192 NULL, kvm_set_kernel_irqchip,
3194 object_class_property_set_description(oc, "kernel-irqchip",
3195 "Configure KVM in-kernel irqchip");
3197 object_class_property_add(oc, "kvm-shadow-mem", "int",
3198 kvm_get_kvm_shadow_mem, kvm_set_kvm_shadow_mem,
3200 object_class_property_set_description(oc, "kvm-shadow-mem",
3201 "KVM shadow MMU size");
3204 static const TypeInfo kvm_accel_type = {
3205 .name = TYPE_KVM_ACCEL,
3206 .parent = TYPE_ACCEL,
3207 .instance_init = kvm_accel_instance_init,
3208 .class_init = kvm_accel_class_init,
3209 .instance_size = sizeof(KVMState),
3212 static void kvm_type_init(void)
3214 type_register_static(&kvm_accel_type);
3217 type_init(kvm_type_init);