4 * Copyright IBM, Corp. 2008
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu-barrier.h"
25 #include "qemu-option.h"
26 #include "qemu-config.h"
34 #include "exec-memory.h"
35 #include "event_notifier.h"
37 /* This check must be after config-host.h is included */
39 #include <sys/eventfd.h>
42 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
43 #define PAGE_SIZE TARGET_PAGE_SIZE
48 #define DPRINTF(fmt, ...) \
49 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
51 #define DPRINTF(fmt, ...) \
55 #define KVM_MSI_HASHTAB_SIZE 256
57 typedef struct KVMSlot
59 target_phys_addr_t start_addr;
60 ram_addr_t memory_size;
66 typedef struct kvm_dirty_log KVMDirtyLog;
74 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
75 bool coalesced_flush_in_progress;
76 int broken_set_mem_region;
79 int robust_singlestep;
81 #ifdef KVM_CAP_SET_GUEST_DEBUG
82 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
87 /* The man page (and posix) say ioctl numbers are signed int, but
88 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
89 * unsigned, and treating them as signed here can break things */
90 unsigned irqchip_inject_ioctl;
91 #ifdef KVM_CAP_IRQ_ROUTING
92 struct kvm_irq_routing *irq_routes;
93 int nr_allocated_irq_routes;
94 uint32_t *used_gsi_bitmap;
95 unsigned int gsi_count;
96 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
102 bool kvm_kernel_irqchip;
103 bool kvm_async_interrupts_allowed;
104 bool kvm_irqfds_allowed;
105 bool kvm_msi_via_irqfd_allowed;
106 bool kvm_gsi_routing_allowed;
108 static const KVMCapabilityInfo kvm_required_capabilites[] = {
109 KVM_CAP_INFO(USER_MEMORY),
110 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
114 static KVMSlot *kvm_alloc_slot(KVMState *s)
118 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
119 if (s->slots[i].memory_size == 0) {
124 fprintf(stderr, "%s: no free slot available\n", __func__);
128 static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
129 target_phys_addr_t start_addr,
130 target_phys_addr_t end_addr)
134 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
135 KVMSlot *mem = &s->slots[i];
137 if (start_addr == mem->start_addr &&
138 end_addr == mem->start_addr + mem->memory_size) {
147 * Find overlapping slot with lowest start address
149 static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
150 target_phys_addr_t start_addr,
151 target_phys_addr_t end_addr)
153 KVMSlot *found = NULL;
156 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
157 KVMSlot *mem = &s->slots[i];
159 if (mem->memory_size == 0 ||
160 (found && found->start_addr < mem->start_addr)) {
164 if (end_addr > mem->start_addr &&
165 start_addr < mem->start_addr + mem->memory_size) {
173 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
174 target_phys_addr_t *phys_addr)
178 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
179 KVMSlot *mem = &s->slots[i];
181 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
182 *phys_addr = mem->start_addr + (ram - mem->ram);
190 static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
192 struct kvm_userspace_memory_region mem;
194 mem.slot = slot->slot;
195 mem.guest_phys_addr = slot->start_addr;
196 mem.memory_size = slot->memory_size;
197 mem.userspace_addr = (unsigned long)slot->ram;
198 mem.flags = slot->flags;
199 if (s->migration_log) {
200 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
202 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
205 static void kvm_reset_vcpu(void *opaque)
207 CPUArchState *env = opaque;
209 kvm_arch_reset_vcpu(env);
212 int kvm_init_vcpu(CPUArchState *env)
214 KVMState *s = kvm_state;
218 DPRINTF("kvm_init_vcpu\n");
220 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
222 DPRINTF("kvm_create_vcpu failed\n");
228 env->kvm_vcpu_dirty = 1;
230 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
233 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
237 env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
239 if (env->kvm_run == MAP_FAILED) {
241 DPRINTF("mmap'ing vcpu state failed\n");
245 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
246 s->coalesced_mmio_ring =
247 (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
250 ret = kvm_arch_init_vcpu(env);
252 qemu_register_reset(kvm_reset_vcpu, env);
253 kvm_arch_reset_vcpu(env);
260 * dirty pages logging control
263 static int kvm_mem_flags(KVMState *s, bool log_dirty)
265 return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
268 static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
270 KVMState *s = kvm_state;
271 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
274 old_flags = mem->flags;
276 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
279 /* If nothing changed effectively, no need to issue ioctl */
280 if (s->migration_log) {
281 flags |= KVM_MEM_LOG_DIRTY_PAGES;
284 if (flags == old_flags) {
288 return kvm_set_user_memory_region(s, mem);
291 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
292 ram_addr_t size, bool log_dirty)
294 KVMState *s = kvm_state;
295 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
298 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
299 TARGET_FMT_plx "\n", __func__, phys_addr,
300 (target_phys_addr_t)(phys_addr + size - 1));
303 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
306 static void kvm_log_start(MemoryListener *listener,
307 MemoryRegionSection *section)
311 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
312 section->size, true);
318 static void kvm_log_stop(MemoryListener *listener,
319 MemoryRegionSection *section)
323 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
324 section->size, false);
330 static int kvm_set_migration_log(int enable)
332 KVMState *s = kvm_state;
336 s->migration_log = enable;
338 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
341 if (!mem->memory_size) {
344 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
347 err = kvm_set_user_memory_region(s, mem);
355 /* get kvm's dirty pages bitmap and update qemu's */
356 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
357 unsigned long *bitmap)
360 unsigned long page_number, c;
361 target_phys_addr_t addr, addr1;
362 unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
363 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
366 * bitmap-traveling is faster than memory-traveling (for addr...)
367 * especially when most of the memory is not dirty.
369 for (i = 0; i < len; i++) {
370 if (bitmap[i] != 0) {
371 c = leul_to_cpu(bitmap[i]);
375 page_number = (i * HOST_LONG_BITS + j) * hpratio;
376 addr1 = page_number * TARGET_PAGE_SIZE;
377 addr = section->offset_within_region + addr1;
378 memory_region_set_dirty(section->mr, addr,
379 TARGET_PAGE_SIZE * hpratio);
386 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
389 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
390 * This function updates qemu's dirty bitmap using
391 * memory_region_set_dirty(). This means all bits are set
394 * @start_add: start of logged region.
395 * @end_addr: end of logged region.
397 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
399 KVMState *s = kvm_state;
400 unsigned long size, allocated_size = 0;
404 target_phys_addr_t start_addr = section->offset_within_address_space;
405 target_phys_addr_t end_addr = start_addr + section->size;
407 d.dirty_bitmap = NULL;
408 while (start_addr < end_addr) {
409 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
414 /* XXX bad kernel interface alert
415 * For dirty bitmap, kernel allocates array of size aligned to
416 * bits-per-long. But for case when the kernel is 64bits and
417 * the userspace is 32bits, userspace can't align to the same
418 * bits-per-long, since sizeof(long) is different between kernel
419 * and user space. This way, userspace will provide buffer which
420 * may be 4 bytes less than the kernel will use, resulting in
421 * userspace memory corruption (which is not detectable by valgrind
422 * too, in most cases).
423 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
424 * a hope that sizeof(long) wont become >8 any time soon.
426 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
427 /*HOST_LONG_BITS*/ 64) / 8;
428 if (!d.dirty_bitmap) {
429 d.dirty_bitmap = g_malloc(size);
430 } else if (size > allocated_size) {
431 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
433 allocated_size = size;
434 memset(d.dirty_bitmap, 0, allocated_size);
438 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
439 DPRINTF("ioctl failed %d\n", errno);
444 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
445 start_addr = mem->start_addr + mem->memory_size;
447 g_free(d.dirty_bitmap);
452 int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
455 KVMState *s = kvm_state;
457 if (s->coalesced_mmio) {
458 struct kvm_coalesced_mmio_zone zone;
464 ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
470 int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
473 KVMState *s = kvm_state;
475 if (s->coalesced_mmio) {
476 struct kvm_coalesced_mmio_zone zone;
482 ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
488 int kvm_check_extension(KVMState *s, unsigned int extension)
492 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
500 static int kvm_check_many_ioeventfds(void)
502 /* Userspace can use ioeventfd for io notification. This requires a host
503 * that supports eventfd(2) and an I/O thread; since eventfd does not
504 * support SIGIO it cannot interrupt the vcpu.
506 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
507 * can avoid creating too many ioeventfds.
509 #if defined(CONFIG_EVENTFD)
512 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
513 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
514 if (ioeventfds[i] < 0) {
517 ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
519 close(ioeventfds[i]);
524 /* Decide whether many devices are supported or not */
525 ret = i == ARRAY_SIZE(ioeventfds);
528 kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
529 close(ioeventfds[i]);
537 static const KVMCapabilityInfo *
538 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
541 if (!kvm_check_extension(s, list->value)) {
549 static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
551 KVMState *s = kvm_state;
554 MemoryRegion *mr = section->mr;
555 bool log_dirty = memory_region_is_logging(mr);
556 target_phys_addr_t start_addr = section->offset_within_address_space;
557 ram_addr_t size = section->size;
561 /* kvm works in page size chunks, but the function may be called
562 with sub-page size and unaligned start address. */
563 delta = TARGET_PAGE_ALIGN(size) - size;
569 size &= TARGET_PAGE_MASK;
570 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
574 if (!memory_region_is_ram(mr)) {
578 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
581 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
586 if (add && start_addr >= mem->start_addr &&
587 (start_addr + size <= mem->start_addr + mem->memory_size) &&
588 (ram - start_addr == mem->ram - mem->start_addr)) {
589 /* The new slot fits into the existing one and comes with
590 * identical parameters - update flags and done. */
591 kvm_slot_dirty_pages_log_change(mem, log_dirty);
597 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
598 kvm_physical_sync_dirty_bitmap(section);
601 /* unregister the overlapping slot */
602 mem->memory_size = 0;
603 err = kvm_set_user_memory_region(s, mem);
605 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
606 __func__, strerror(-err));
610 /* Workaround for older KVM versions: we can't join slots, even not by
611 * unregistering the previous ones and then registering the larger
612 * slot. We have to maintain the existing fragmentation. Sigh.
614 * This workaround assumes that the new slot starts at the same
615 * address as the first existing one. If not or if some overlapping
616 * slot comes around later, we will fail (not seen in practice so far)
617 * - and actually require a recent KVM version. */
618 if (s->broken_set_mem_region &&
619 old.start_addr == start_addr && old.memory_size < size && add) {
620 mem = kvm_alloc_slot(s);
621 mem->memory_size = old.memory_size;
622 mem->start_addr = old.start_addr;
624 mem->flags = kvm_mem_flags(s, log_dirty);
626 err = kvm_set_user_memory_region(s, mem);
628 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
633 start_addr += old.memory_size;
634 ram += old.memory_size;
635 size -= old.memory_size;
639 /* register prefix slot */
640 if (old.start_addr < start_addr) {
641 mem = kvm_alloc_slot(s);
642 mem->memory_size = start_addr - old.start_addr;
643 mem->start_addr = old.start_addr;
645 mem->flags = kvm_mem_flags(s, log_dirty);
647 err = kvm_set_user_memory_region(s, mem);
649 fprintf(stderr, "%s: error registering prefix slot: %s\n",
650 __func__, strerror(-err));
652 fprintf(stderr, "%s: This is probably because your kernel's " \
653 "PAGE_SIZE is too big. Please try to use 4k " \
654 "PAGE_SIZE!\n", __func__);
660 /* register suffix slot */
661 if (old.start_addr + old.memory_size > start_addr + size) {
662 ram_addr_t size_delta;
664 mem = kvm_alloc_slot(s);
665 mem->start_addr = start_addr + size;
666 size_delta = mem->start_addr - old.start_addr;
667 mem->memory_size = old.memory_size - size_delta;
668 mem->ram = old.ram + size_delta;
669 mem->flags = kvm_mem_flags(s, log_dirty);
671 err = kvm_set_user_memory_region(s, mem);
673 fprintf(stderr, "%s: error registering suffix slot: %s\n",
674 __func__, strerror(-err));
680 /* in case the KVM bug workaround already "consumed" the new slot */
687 mem = kvm_alloc_slot(s);
688 mem->memory_size = size;
689 mem->start_addr = start_addr;
691 mem->flags = kvm_mem_flags(s, log_dirty);
693 err = kvm_set_user_memory_region(s, mem);
695 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
701 static void kvm_begin(MemoryListener *listener)
705 static void kvm_commit(MemoryListener *listener)
709 static void kvm_region_add(MemoryListener *listener,
710 MemoryRegionSection *section)
712 kvm_set_phys_mem(section, true);
715 static void kvm_region_del(MemoryListener *listener,
716 MemoryRegionSection *section)
718 kvm_set_phys_mem(section, false);
721 static void kvm_region_nop(MemoryListener *listener,
722 MemoryRegionSection *section)
726 static void kvm_log_sync(MemoryListener *listener,
727 MemoryRegionSection *section)
731 r = kvm_physical_sync_dirty_bitmap(section);
737 static void kvm_log_global_start(struct MemoryListener *listener)
741 r = kvm_set_migration_log(1);
745 static void kvm_log_global_stop(struct MemoryListener *listener)
749 r = kvm_set_migration_log(0);
753 static void kvm_mem_ioeventfd_add(MemoryRegionSection *section,
754 bool match_data, uint64_t data, int fd)
758 assert(match_data && section->size <= 8);
760 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
761 data, true, section->size);
767 static void kvm_mem_ioeventfd_del(MemoryRegionSection *section,
768 bool match_data, uint64_t data, int fd)
772 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
773 data, false, section->size);
779 static void kvm_io_ioeventfd_add(MemoryRegionSection *section,
780 bool match_data, uint64_t data, int fd)
784 assert(match_data && section->size == 2);
786 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
793 static void kvm_io_ioeventfd_del(MemoryRegionSection *section,
794 bool match_data, uint64_t data, int fd)
799 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
806 static void kvm_eventfd_add(MemoryListener *listener,
807 MemoryRegionSection *section,
808 bool match_data, uint64_t data,
811 if (section->address_space == get_system_memory()) {
812 kvm_mem_ioeventfd_add(section, match_data, data,
813 event_notifier_get_fd(e));
815 kvm_io_ioeventfd_add(section, match_data, data,
816 event_notifier_get_fd(e));
820 static void kvm_eventfd_del(MemoryListener *listener,
821 MemoryRegionSection *section,
822 bool match_data, uint64_t data,
825 if (section->address_space == get_system_memory()) {
826 kvm_mem_ioeventfd_del(section, match_data, data,
827 event_notifier_get_fd(e));
829 kvm_io_ioeventfd_del(section, match_data, data,
830 event_notifier_get_fd(e));
834 static MemoryListener kvm_memory_listener = {
836 .commit = kvm_commit,
837 .region_add = kvm_region_add,
838 .region_del = kvm_region_del,
839 .region_nop = kvm_region_nop,
840 .log_start = kvm_log_start,
841 .log_stop = kvm_log_stop,
842 .log_sync = kvm_log_sync,
843 .log_global_start = kvm_log_global_start,
844 .log_global_stop = kvm_log_global_stop,
845 .eventfd_add = kvm_eventfd_add,
846 .eventfd_del = kvm_eventfd_del,
850 static void kvm_handle_interrupt(CPUArchState *env, int mask)
852 env->interrupt_request |= mask;
854 if (!qemu_cpu_is_self(env)) {
859 int kvm_set_irq(KVMState *s, int irq, int level)
861 struct kvm_irq_level event;
864 assert(kvm_async_interrupts_enabled());
868 ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
870 perror("kvm_set_irq");
874 return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
877 #ifdef KVM_CAP_IRQ_ROUTING
878 typedef struct KVMMSIRoute {
879 struct kvm_irq_routing_entry kroute;
880 QTAILQ_ENTRY(KVMMSIRoute) entry;
883 static void set_gsi(KVMState *s, unsigned int gsi)
885 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
888 static void clear_gsi(KVMState *s, unsigned int gsi)
890 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
893 static void kvm_init_irq_routing(KVMState *s)
897 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
899 unsigned int gsi_bits, i;
901 /* Round up so we can search ints using ffs */
902 gsi_bits = ALIGN(gsi_count, 32);
903 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
904 s->gsi_count = gsi_count;
906 /* Mark any over-allocated bits as already in use */
907 for (i = gsi_count; i < gsi_bits; i++) {
912 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
913 s->nr_allocated_irq_routes = 0;
915 if (!s->direct_msi) {
916 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
917 QTAILQ_INIT(&s->msi_hashtab[i]);
921 kvm_arch_init_irq_routing(s);
924 static void kvm_irqchip_commit_routes(KVMState *s)
928 s->irq_routes->flags = 0;
929 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
933 static void kvm_add_routing_entry(KVMState *s,
934 struct kvm_irq_routing_entry *entry)
936 struct kvm_irq_routing_entry *new;
939 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
940 n = s->nr_allocated_irq_routes * 2;
944 size = sizeof(struct kvm_irq_routing);
945 size += n * sizeof(*new);
946 s->irq_routes = g_realloc(s->irq_routes, size);
947 s->nr_allocated_irq_routes = n;
949 n = s->irq_routes->nr++;
950 new = &s->irq_routes->entries[n];
951 memset(new, 0, sizeof(*new));
952 new->gsi = entry->gsi;
953 new->type = entry->type;
954 new->flags = entry->flags;
957 set_gsi(s, entry->gsi);
959 kvm_irqchip_commit_routes(s);
962 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
964 struct kvm_irq_routing_entry e;
966 assert(pin < s->gsi_count);
969 e.type = KVM_IRQ_ROUTING_IRQCHIP;
971 e.u.irqchip.irqchip = irqchip;
972 e.u.irqchip.pin = pin;
973 kvm_add_routing_entry(s, &e);
976 void kvm_irqchip_release_virq(KVMState *s, int virq)
978 struct kvm_irq_routing_entry *e;
981 for (i = 0; i < s->irq_routes->nr; i++) {
982 e = &s->irq_routes->entries[i];
983 if (e->gsi == virq) {
985 *e = s->irq_routes->entries[s->irq_routes->nr];
990 kvm_irqchip_commit_routes(s);
993 static unsigned int kvm_hash_msi(uint32_t data)
995 /* This is optimized for IA32 MSI layout. However, no other arch shall
996 * repeat the mistake of not providing a direct MSI injection API. */
1000 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1002 KVMMSIRoute *route, *next;
1005 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1006 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1007 kvm_irqchip_release_virq(s, route->kroute.gsi);
1008 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1014 static int kvm_irqchip_get_virq(KVMState *s)
1016 uint32_t *word = s->used_gsi_bitmap;
1017 int max_words = ALIGN(s->gsi_count, 32) / 32;
1022 /* Return the lowest unused GSI in the bitmap */
1023 for (i = 0; i < max_words; i++) {
1024 bit = ffs(~word[i]);
1029 return bit - 1 + i * 32;
1031 if (!s->direct_msi && retry) {
1033 kvm_flush_dynamic_msi_routes(s);
1040 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1042 unsigned int hash = kvm_hash_msi(msg.data);
1045 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1046 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1047 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1048 route->kroute.u.msi.data == msg.data) {
1055 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1060 if (s->direct_msi) {
1061 msi.address_lo = (uint32_t)msg.address;
1062 msi.address_hi = msg.address >> 32;
1063 msi.data = msg.data;
1065 memset(msi.pad, 0, sizeof(msi.pad));
1067 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1070 route = kvm_lookup_msi_route(s, msg);
1074 virq = kvm_irqchip_get_virq(s);
1079 route = g_malloc(sizeof(KVMMSIRoute));
1080 route->kroute.gsi = virq;
1081 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1082 route->kroute.flags = 0;
1083 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1084 route->kroute.u.msi.address_hi = msg.address >> 32;
1085 route->kroute.u.msi.data = msg.data;
1087 kvm_add_routing_entry(s, &route->kroute);
1089 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1093 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1095 return kvm_set_irq(s, route->kroute.gsi, 1);
1098 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1100 struct kvm_irq_routing_entry kroute;
1103 if (!kvm_gsi_routing_enabled()) {
1107 virq = kvm_irqchip_get_virq(s);
1113 kroute.type = KVM_IRQ_ROUTING_MSI;
1115 kroute.u.msi.address_lo = (uint32_t)msg.address;
1116 kroute.u.msi.address_hi = msg.address >> 32;
1117 kroute.u.msi.data = msg.data;
1119 kvm_add_routing_entry(s, &kroute);
1124 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1126 struct kvm_irqfd irqfd = {
1129 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1132 if (!kvm_irqfds_enabled()) {
1136 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1139 #else /* !KVM_CAP_IRQ_ROUTING */
1141 static void kvm_init_irq_routing(KVMState *s)
1145 void kvm_irqchip_release_virq(KVMState *s, int virq)
1149 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1154 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1159 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1163 #endif /* !KVM_CAP_IRQ_ROUTING */
1165 int kvm_irqchip_add_irqfd(KVMState *s, int fd, int virq)
1167 return kvm_irqchip_assign_irqfd(s, fd, virq, true);
1170 int kvm_irqchip_add_irq_notifier(KVMState *s, EventNotifier *n, int virq)
1172 return kvm_irqchip_add_irqfd(s, event_notifier_get_fd(n), virq);
1175 int kvm_irqchip_remove_irqfd(KVMState *s, int fd, int virq)
1177 return kvm_irqchip_assign_irqfd(s, fd, virq, false);
1180 int kvm_irqchip_remove_irq_notifier(KVMState *s, EventNotifier *n, int virq)
1182 return kvm_irqchip_remove_irqfd(s, event_notifier_get_fd(n), virq);
1185 static int kvm_irqchip_create(KVMState *s)
1187 QemuOptsList *list = qemu_find_opts("machine");
1190 if (QTAILQ_EMPTY(&list->head) ||
1191 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
1192 "kernel_irqchip", true) ||
1193 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1197 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1199 fprintf(stderr, "Create kernel irqchip failed\n");
1203 s->irqchip_inject_ioctl = KVM_IRQ_LINE;
1204 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1205 s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
1207 kvm_kernel_irqchip = true;
1208 /* If we have an in-kernel IRQ chip then we must have asynchronous
1209 * interrupt delivery (though the reverse is not necessarily true)
1211 kvm_async_interrupts_allowed = true;
1213 kvm_init_irq_routing(s);
1218 static int kvm_max_vcpus(KVMState *s)
1222 /* Find number of supported CPUs using the recommended
1223 * procedure from the kernel API documentation to cope with
1224 * older kernels that may be missing capabilities.
1226 ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1230 ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1240 static const char upgrade_note[] =
1241 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1242 "(see http://sourceforge.net/projects/kvm).\n";
1244 const KVMCapabilityInfo *missing_cap;
1249 s = g_malloc0(sizeof(KVMState));
1252 * On systems where the kernel can support different base page
1253 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1254 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1255 * page size for the system though.
1257 assert(TARGET_PAGE_SIZE <= getpagesize());
1259 #ifdef KVM_CAP_SET_GUEST_DEBUG
1260 QTAILQ_INIT(&s->kvm_sw_breakpoints);
1262 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
1263 s->slots[i].slot = i;
1266 s->fd = qemu_open("/dev/kvm", O_RDWR);
1268 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1273 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1274 if (ret < KVM_API_VERSION) {
1278 fprintf(stderr, "kvm version too old\n");
1282 if (ret > KVM_API_VERSION) {
1284 fprintf(stderr, "kvm version not supported\n");
1288 max_vcpus = kvm_max_vcpus(s);
1289 if (smp_cpus > max_vcpus) {
1291 fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
1292 "supported by KVM (%d)\n", smp_cpus, max_vcpus);
1296 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
1299 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1300 "your host kernel command line\n");
1306 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1309 kvm_check_extension_list(s, kvm_arch_required_capabilities);
1313 fprintf(stderr, "kvm does not support %s\n%s",
1314 missing_cap->name, upgrade_note);
1318 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1320 s->broken_set_mem_region = 1;
1321 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1323 s->broken_set_mem_region = 0;
1326 #ifdef KVM_CAP_VCPU_EVENTS
1327 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1330 s->robust_singlestep =
1331 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1333 #ifdef KVM_CAP_DEBUGREGS
1334 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1337 #ifdef KVM_CAP_XSAVE
1338 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1342 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1345 #ifdef KVM_CAP_PIT_STATE2
1346 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1349 #ifdef KVM_CAP_IRQ_ROUTING
1350 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1353 ret = kvm_arch_init(s);
1358 ret = kvm_irqchip_create(s);
1364 memory_listener_register(&kvm_memory_listener, NULL);
1366 s->many_ioeventfds = kvm_check_many_ioeventfds();
1368 cpu_interrupt_handler = kvm_handle_interrupt;
1386 static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1390 uint8_t *ptr = data;
1392 for (i = 0; i < count; i++) {
1393 if (direction == KVM_EXIT_IO_IN) {
1396 stb_p(ptr, cpu_inb(port));
1399 stw_p(ptr, cpu_inw(port));
1402 stl_p(ptr, cpu_inl(port));
1408 cpu_outb(port, ldub_p(ptr));
1411 cpu_outw(port, lduw_p(ptr));
1414 cpu_outl(port, ldl_p(ptr));
1423 static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
1425 fprintf(stderr, "KVM internal error.");
1426 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1429 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
1430 for (i = 0; i < run->internal.ndata; ++i) {
1431 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1432 i, (uint64_t)run->internal.data[i]);
1435 fprintf(stderr, "\n");
1437 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1438 fprintf(stderr, "emulation failure\n");
1439 if (!kvm_arch_stop_on_emulation_error(env)) {
1440 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1441 return EXCP_INTERRUPT;
1444 /* FIXME: Should trigger a qmp message to let management know
1445 * something went wrong.
1450 void kvm_flush_coalesced_mmio_buffer(void)
1452 KVMState *s = kvm_state;
1454 if (s->coalesced_flush_in_progress) {
1458 s->coalesced_flush_in_progress = true;
1460 if (s->coalesced_mmio_ring) {
1461 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1462 while (ring->first != ring->last) {
1463 struct kvm_coalesced_mmio *ent;
1465 ent = &ring->coalesced_mmio[ring->first];
1467 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1469 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1473 s->coalesced_flush_in_progress = false;
1476 static void do_kvm_cpu_synchronize_state(void *_env)
1478 CPUArchState *env = _env;
1480 if (!env->kvm_vcpu_dirty) {
1481 kvm_arch_get_registers(env);
1482 env->kvm_vcpu_dirty = 1;
1486 void kvm_cpu_synchronize_state(CPUArchState *env)
1488 if (!env->kvm_vcpu_dirty) {
1489 run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
1493 void kvm_cpu_synchronize_post_reset(CPUArchState *env)
1495 kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
1496 env->kvm_vcpu_dirty = 0;
1499 void kvm_cpu_synchronize_post_init(CPUArchState *env)
1501 kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
1502 env->kvm_vcpu_dirty = 0;
1505 int kvm_cpu_exec(CPUArchState *env)
1507 struct kvm_run *run = env->kvm_run;
1510 DPRINTF("kvm_cpu_exec()\n");
1512 if (kvm_arch_process_async_events(env)) {
1513 env->exit_request = 0;
1518 if (env->kvm_vcpu_dirty) {
1519 kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
1520 env->kvm_vcpu_dirty = 0;
1523 kvm_arch_pre_run(env, run);
1524 if (env->exit_request) {
1525 DPRINTF("interrupt exit requested\n");
1527 * KVM requires us to reenter the kernel after IO exits to complete
1528 * instruction emulation. This self-signal will ensure that we
1531 qemu_cpu_kick_self();
1533 qemu_mutex_unlock_iothread();
1535 run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
1537 qemu_mutex_lock_iothread();
1538 kvm_arch_post_run(env, run);
1540 kvm_flush_coalesced_mmio_buffer();
1543 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1544 DPRINTF("io window exit\n");
1545 ret = EXCP_INTERRUPT;
1548 fprintf(stderr, "error: kvm run failed %s\n",
1549 strerror(-run_ret));
1553 switch (run->exit_reason) {
1555 DPRINTF("handle_io\n");
1556 kvm_handle_io(run->io.port,
1557 (uint8_t *)run + run->io.data_offset,
1564 DPRINTF("handle_mmio\n");
1565 cpu_physical_memory_rw(run->mmio.phys_addr,
1568 run->mmio.is_write);
1571 case KVM_EXIT_IRQ_WINDOW_OPEN:
1572 DPRINTF("irq_window_open\n");
1573 ret = EXCP_INTERRUPT;
1575 case KVM_EXIT_SHUTDOWN:
1576 DPRINTF("shutdown\n");
1577 qemu_system_reset_request();
1578 ret = EXCP_INTERRUPT;
1580 case KVM_EXIT_UNKNOWN:
1581 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1582 (uint64_t)run->hw.hardware_exit_reason);
1585 case KVM_EXIT_INTERNAL_ERROR:
1586 ret = kvm_handle_internal_error(env, run);
1589 DPRINTF("kvm_arch_handle_exit\n");
1590 ret = kvm_arch_handle_exit(env, run);
1596 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1597 vm_stop(RUN_STATE_INTERNAL_ERROR);
1600 env->exit_request = 0;
1604 int kvm_ioctl(KVMState *s, int type, ...)
1611 arg = va_arg(ap, void *);
1614 ret = ioctl(s->fd, type, arg);
1621 int kvm_vm_ioctl(KVMState *s, int type, ...)
1628 arg = va_arg(ap, void *);
1631 ret = ioctl(s->vmfd, type, arg);
1638 int kvm_vcpu_ioctl(CPUArchState *env, int type, ...)
1645 arg = va_arg(ap, void *);
1648 ret = ioctl(env->kvm_fd, type, arg);
1655 int kvm_has_sync_mmu(void)
1657 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1660 int kvm_has_vcpu_events(void)
1662 return kvm_state->vcpu_events;
1665 int kvm_has_robust_singlestep(void)
1667 return kvm_state->robust_singlestep;
1670 int kvm_has_debugregs(void)
1672 return kvm_state->debugregs;
1675 int kvm_has_xsave(void)
1677 return kvm_state->xsave;
1680 int kvm_has_xcrs(void)
1682 return kvm_state->xcrs;
1685 int kvm_has_pit_state2(void)
1687 return kvm_state->pit_state2;
1690 int kvm_has_many_ioeventfds(void)
1692 if (!kvm_enabled()) {
1695 return kvm_state->many_ioeventfds;
1698 int kvm_has_gsi_routing(void)
1700 #ifdef KVM_CAP_IRQ_ROUTING
1701 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1707 void *kvm_vmalloc(ram_addr_t size)
1712 mem = kvm_arch_vmalloc(size);
1717 return qemu_vmalloc(size);
1720 void kvm_setup_guest_memory(void *start, size_t size)
1722 if (!kvm_has_sync_mmu()) {
1723 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1726 perror("qemu_madvise");
1728 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1734 #ifdef KVM_CAP_SET_GUEST_DEBUG
1735 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
1738 struct kvm_sw_breakpoint *bp;
1740 QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
1748 int kvm_sw_breakpoints_active(CPUArchState *env)
1750 return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
1753 struct kvm_set_guest_debug_data {
1754 struct kvm_guest_debug dbg;
1759 static void kvm_invoke_set_guest_debug(void *data)
1761 struct kvm_set_guest_debug_data *dbg_data = data;
1762 CPUArchState *env = dbg_data->env;
1764 dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
1767 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
1769 struct kvm_set_guest_debug_data data;
1771 data.dbg.control = reinject_trap;
1773 if (env->singlestep_enabled) {
1774 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1776 kvm_arch_update_guest_debug(env, &data.dbg);
1779 run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
1783 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
1784 target_ulong len, int type)
1786 struct kvm_sw_breakpoint *bp;
1790 if (type == GDB_BREAKPOINT_SW) {
1791 bp = kvm_find_sw_breakpoint(current_env, addr);
1797 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
1804 err = kvm_arch_insert_sw_breakpoint(current_env, bp);
1810 QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints,
1813 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
1819 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1820 err = kvm_update_guest_debug(env, 0);
1828 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
1829 target_ulong len, int type)
1831 struct kvm_sw_breakpoint *bp;
1835 if (type == GDB_BREAKPOINT_SW) {
1836 bp = kvm_find_sw_breakpoint(current_env, addr);
1841 if (bp->use_count > 1) {
1846 err = kvm_arch_remove_sw_breakpoint(current_env, bp);
1851 QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry);
1854 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
1860 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1861 err = kvm_update_guest_debug(env, 0);
1869 void kvm_remove_all_breakpoints(CPUArchState *current_env)
1871 struct kvm_sw_breakpoint *bp, *next;
1872 KVMState *s = current_env->kvm_state;
1875 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
1876 if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
1877 /* Try harder to find a CPU that currently sees the breakpoint. */
1878 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1879 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
1885 kvm_arch_remove_all_hw_breakpoints();
1887 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1888 kvm_update_guest_debug(env, 0);
1892 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1894 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
1899 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
1900 target_ulong len, int type)
1905 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
1906 target_ulong len, int type)
1911 void kvm_remove_all_breakpoints(CPUArchState *current_env)
1914 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1916 int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
1918 struct kvm_signal_mask *sigmask;
1922 return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
1925 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
1928 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1929 r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
1935 int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val, bool assign,
1939 struct kvm_ioeventfd iofd;
1941 iofd.datamatch = val;
1944 iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
1947 if (!kvm_enabled()) {
1952 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1955 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1964 int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
1966 struct kvm_ioeventfd kick = {
1970 .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
1974 if (!kvm_enabled()) {
1978 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1980 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1987 int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
1989 return kvm_arch_on_sigbus_vcpu(env, code, addr);
1992 int kvm_on_sigbus(int code, void *addr)
1994 return kvm_arch_on_sigbus(code, addr);