4 * Copyright IBM, Corp. 2008
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu-barrier.h"
31 #include "exec-memory.h"
33 /* This check must be after config-host.h is included */
35 #include <sys/eventfd.h>
38 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
39 #define PAGE_SIZE TARGET_PAGE_SIZE
44 #define DPRINTF(fmt, ...) \
45 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
47 #define DPRINTF(fmt, ...) \
51 typedef struct KVMSlot
53 target_phys_addr_t start_addr;
54 ram_addr_t memory_size;
60 typedef struct kvm_dirty_log KVMDirtyLog;
68 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
69 bool coalesced_flush_in_progress;
70 int broken_set_mem_region;
73 int robust_singlestep;
75 #ifdef KVM_CAP_SET_GUEST_DEBUG
76 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
82 int irqchip_inject_ioctl;
83 #ifdef KVM_CAP_IRQ_ROUTING
84 struct kvm_irq_routing *irq_routes;
85 int nr_allocated_irq_routes;
86 uint32_t *used_gsi_bitmap;
92 bool kvm_kernel_irqchip;
94 static const KVMCapabilityInfo kvm_required_capabilites[] = {
95 KVM_CAP_INFO(USER_MEMORY),
96 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
100 static KVMSlot *kvm_alloc_slot(KVMState *s)
104 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
105 if (s->slots[i].memory_size == 0) {
110 fprintf(stderr, "%s: no free slot available\n", __func__);
114 static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
115 target_phys_addr_t start_addr,
116 target_phys_addr_t end_addr)
120 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
121 KVMSlot *mem = &s->slots[i];
123 if (start_addr == mem->start_addr &&
124 end_addr == mem->start_addr + mem->memory_size) {
133 * Find overlapping slot with lowest start address
135 static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
136 target_phys_addr_t start_addr,
137 target_phys_addr_t end_addr)
139 KVMSlot *found = NULL;
142 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
143 KVMSlot *mem = &s->slots[i];
145 if (mem->memory_size == 0 ||
146 (found && found->start_addr < mem->start_addr)) {
150 if (end_addr > mem->start_addr &&
151 start_addr < mem->start_addr + mem->memory_size) {
159 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
160 target_phys_addr_t *phys_addr)
164 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
165 KVMSlot *mem = &s->slots[i];
167 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
168 *phys_addr = mem->start_addr + (ram - mem->ram);
176 static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
178 struct kvm_userspace_memory_region mem;
180 mem.slot = slot->slot;
181 mem.guest_phys_addr = slot->start_addr;
182 mem.memory_size = slot->memory_size;
183 mem.userspace_addr = (unsigned long)slot->ram;
184 mem.flags = slot->flags;
185 if (s->migration_log) {
186 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
188 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
191 static void kvm_reset_vcpu(void *opaque)
193 CPUArchState *env = opaque;
195 kvm_arch_reset_vcpu(env);
198 int kvm_pit_in_kernel(void)
200 return kvm_state->pit_in_kernel;
203 int kvm_init_vcpu(CPUArchState *env)
205 KVMState *s = kvm_state;
209 DPRINTF("kvm_init_vcpu\n");
211 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
213 DPRINTF("kvm_create_vcpu failed\n");
219 env->kvm_vcpu_dirty = 1;
221 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
224 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
228 env->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
230 if (env->kvm_run == MAP_FAILED) {
232 DPRINTF("mmap'ing vcpu state failed\n");
236 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
237 s->coalesced_mmio_ring =
238 (void *)env->kvm_run + s->coalesced_mmio * PAGE_SIZE;
241 ret = kvm_arch_init_vcpu(env);
243 qemu_register_reset(kvm_reset_vcpu, env);
244 kvm_arch_reset_vcpu(env);
251 * dirty pages logging control
254 static int kvm_mem_flags(KVMState *s, bool log_dirty)
256 return log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
259 static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
261 KVMState *s = kvm_state;
262 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
265 old_flags = mem->flags;
267 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty);
270 /* If nothing changed effectively, no need to issue ioctl */
271 if (s->migration_log) {
272 flags |= KVM_MEM_LOG_DIRTY_PAGES;
275 if (flags == old_flags) {
279 return kvm_set_user_memory_region(s, mem);
282 static int kvm_dirty_pages_log_change(target_phys_addr_t phys_addr,
283 ram_addr_t size, bool log_dirty)
285 KVMState *s = kvm_state;
286 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
289 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
290 TARGET_FMT_plx "\n", __func__, phys_addr,
291 (target_phys_addr_t)(phys_addr + size - 1));
294 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
297 static void kvm_log_start(MemoryListener *listener,
298 MemoryRegionSection *section)
302 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
303 section->size, true);
309 static void kvm_log_stop(MemoryListener *listener,
310 MemoryRegionSection *section)
314 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
315 section->size, false);
321 static int kvm_set_migration_log(int enable)
323 KVMState *s = kvm_state;
327 s->migration_log = enable;
329 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
332 if (!mem->memory_size) {
335 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
338 err = kvm_set_user_memory_region(s, mem);
346 /* get kvm's dirty pages bitmap and update qemu's */
347 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
348 unsigned long *bitmap)
351 unsigned long page_number, c;
352 target_phys_addr_t addr, addr1;
353 unsigned int len = ((section->size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
356 * bitmap-traveling is faster than memory-traveling (for addr...)
357 * especially when most of the memory is not dirty.
359 for (i = 0; i < len; i++) {
360 if (bitmap[i] != 0) {
361 c = leul_to_cpu(bitmap[i]);
365 page_number = i * HOST_LONG_BITS + j;
366 addr1 = page_number * TARGET_PAGE_SIZE;
367 addr = section->offset_within_region + addr1;
368 memory_region_set_dirty(section->mr, addr, TARGET_PAGE_SIZE);
375 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
378 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
379 * This function updates qemu's dirty bitmap using
380 * memory_region_set_dirty(). This means all bits are set
383 * @start_add: start of logged region.
384 * @end_addr: end of logged region.
386 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
388 KVMState *s = kvm_state;
389 unsigned long size, allocated_size = 0;
393 target_phys_addr_t start_addr = section->offset_within_address_space;
394 target_phys_addr_t end_addr = start_addr + section->size;
396 d.dirty_bitmap = NULL;
397 while (start_addr < end_addr) {
398 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
403 /* XXX bad kernel interface alert
404 * For dirty bitmap, kernel allocates array of size aligned to
405 * bits-per-long. But for case when the kernel is 64bits and
406 * the userspace is 32bits, userspace can't align to the same
407 * bits-per-long, since sizeof(long) is different between kernel
408 * and user space. This way, userspace will provide buffer which
409 * may be 4 bytes less than the kernel will use, resulting in
410 * userspace memory corruption (which is not detectable by valgrind
411 * too, in most cases).
412 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
413 * a hope that sizeof(long) wont become >8 any time soon.
415 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
416 /*HOST_LONG_BITS*/ 64) / 8;
417 if (!d.dirty_bitmap) {
418 d.dirty_bitmap = g_malloc(size);
419 } else if (size > allocated_size) {
420 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
422 allocated_size = size;
423 memset(d.dirty_bitmap, 0, allocated_size);
427 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
428 DPRINTF("ioctl failed %d\n", errno);
433 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
434 start_addr = mem->start_addr + mem->memory_size;
436 g_free(d.dirty_bitmap);
441 int kvm_coalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
444 KVMState *s = kvm_state;
446 if (s->coalesced_mmio) {
447 struct kvm_coalesced_mmio_zone zone;
453 ret = kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
459 int kvm_uncoalesce_mmio_region(target_phys_addr_t start, ram_addr_t size)
462 KVMState *s = kvm_state;
464 if (s->coalesced_mmio) {
465 struct kvm_coalesced_mmio_zone zone;
471 ret = kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
477 int kvm_check_extension(KVMState *s, unsigned int extension)
481 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
489 static int kvm_check_many_ioeventfds(void)
491 /* Userspace can use ioeventfd for io notification. This requires a host
492 * that supports eventfd(2) and an I/O thread; since eventfd does not
493 * support SIGIO it cannot interrupt the vcpu.
495 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
496 * can avoid creating too many ioeventfds.
498 #if defined(CONFIG_EVENTFD)
501 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
502 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
503 if (ioeventfds[i] < 0) {
506 ret = kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, true);
508 close(ioeventfds[i]);
513 /* Decide whether many devices are supported or not */
514 ret = i == ARRAY_SIZE(ioeventfds);
517 kvm_set_ioeventfd_pio_word(ioeventfds[i], 0, i, false);
518 close(ioeventfds[i]);
526 static const KVMCapabilityInfo *
527 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
530 if (!kvm_check_extension(s, list->value)) {
538 static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
540 KVMState *s = kvm_state;
543 MemoryRegion *mr = section->mr;
544 bool log_dirty = memory_region_is_logging(mr);
545 target_phys_addr_t start_addr = section->offset_within_address_space;
546 ram_addr_t size = section->size;
550 /* kvm works in page size chunks, but the function may be called
551 with sub-page size and unaligned start address. */
552 delta = TARGET_PAGE_ALIGN(size) - size;
558 size &= TARGET_PAGE_MASK;
559 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
563 if (!memory_region_is_ram(mr)) {
567 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
570 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
575 if (add && start_addr >= mem->start_addr &&
576 (start_addr + size <= mem->start_addr + mem->memory_size) &&
577 (ram - start_addr == mem->ram - mem->start_addr)) {
578 /* The new slot fits into the existing one and comes with
579 * identical parameters - update flags and done. */
580 kvm_slot_dirty_pages_log_change(mem, log_dirty);
586 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
587 kvm_physical_sync_dirty_bitmap(section);
590 /* unregister the overlapping slot */
591 mem->memory_size = 0;
592 err = kvm_set_user_memory_region(s, mem);
594 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
595 __func__, strerror(-err));
599 /* Workaround for older KVM versions: we can't join slots, even not by
600 * unregistering the previous ones and then registering the larger
601 * slot. We have to maintain the existing fragmentation. Sigh.
603 * This workaround assumes that the new slot starts at the same
604 * address as the first existing one. If not or if some overlapping
605 * slot comes around later, we will fail (not seen in practice so far)
606 * - and actually require a recent KVM version. */
607 if (s->broken_set_mem_region &&
608 old.start_addr == start_addr && old.memory_size < size && add) {
609 mem = kvm_alloc_slot(s);
610 mem->memory_size = old.memory_size;
611 mem->start_addr = old.start_addr;
613 mem->flags = kvm_mem_flags(s, log_dirty);
615 err = kvm_set_user_memory_region(s, mem);
617 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
622 start_addr += old.memory_size;
623 ram += old.memory_size;
624 size -= old.memory_size;
628 /* register prefix slot */
629 if (old.start_addr < start_addr) {
630 mem = kvm_alloc_slot(s);
631 mem->memory_size = start_addr - old.start_addr;
632 mem->start_addr = old.start_addr;
634 mem->flags = kvm_mem_flags(s, log_dirty);
636 err = kvm_set_user_memory_region(s, mem);
638 fprintf(stderr, "%s: error registering prefix slot: %s\n",
639 __func__, strerror(-err));
641 fprintf(stderr, "%s: This is probably because your kernel's " \
642 "PAGE_SIZE is too big. Please try to use 4k " \
643 "PAGE_SIZE!\n", __func__);
649 /* register suffix slot */
650 if (old.start_addr + old.memory_size > start_addr + size) {
651 ram_addr_t size_delta;
653 mem = kvm_alloc_slot(s);
654 mem->start_addr = start_addr + size;
655 size_delta = mem->start_addr - old.start_addr;
656 mem->memory_size = old.memory_size - size_delta;
657 mem->ram = old.ram + size_delta;
658 mem->flags = kvm_mem_flags(s, log_dirty);
660 err = kvm_set_user_memory_region(s, mem);
662 fprintf(stderr, "%s: error registering suffix slot: %s\n",
663 __func__, strerror(-err));
669 /* in case the KVM bug workaround already "consumed" the new slot */
676 mem = kvm_alloc_slot(s);
677 mem->memory_size = size;
678 mem->start_addr = start_addr;
680 mem->flags = kvm_mem_flags(s, log_dirty);
682 err = kvm_set_user_memory_region(s, mem);
684 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
690 static void kvm_begin(MemoryListener *listener)
694 static void kvm_commit(MemoryListener *listener)
698 static void kvm_region_add(MemoryListener *listener,
699 MemoryRegionSection *section)
701 kvm_set_phys_mem(section, true);
704 static void kvm_region_del(MemoryListener *listener,
705 MemoryRegionSection *section)
707 kvm_set_phys_mem(section, false);
710 static void kvm_region_nop(MemoryListener *listener,
711 MemoryRegionSection *section)
715 static void kvm_log_sync(MemoryListener *listener,
716 MemoryRegionSection *section)
720 r = kvm_physical_sync_dirty_bitmap(section);
726 static void kvm_log_global_start(struct MemoryListener *listener)
730 r = kvm_set_migration_log(1);
734 static void kvm_log_global_stop(struct MemoryListener *listener)
738 r = kvm_set_migration_log(0);
742 static void kvm_mem_ioeventfd_add(MemoryRegionSection *section,
743 bool match_data, uint64_t data, int fd)
747 assert(match_data && section->size == 4);
749 r = kvm_set_ioeventfd_mmio_long(fd, section->offset_within_address_space,
756 static void kvm_mem_ioeventfd_del(MemoryRegionSection *section,
757 bool match_data, uint64_t data, int fd)
761 r = kvm_set_ioeventfd_mmio_long(fd, section->offset_within_address_space,
768 static void kvm_io_ioeventfd_add(MemoryRegionSection *section,
769 bool match_data, uint64_t data, int fd)
773 assert(match_data && section->size == 2);
775 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
782 static void kvm_io_ioeventfd_del(MemoryRegionSection *section,
783 bool match_data, uint64_t data, int fd)
788 r = kvm_set_ioeventfd_pio_word(fd, section->offset_within_address_space,
795 static void kvm_eventfd_add(MemoryListener *listener,
796 MemoryRegionSection *section,
797 bool match_data, uint64_t data, int fd)
799 if (section->address_space == get_system_memory()) {
800 kvm_mem_ioeventfd_add(section, match_data, data, fd);
802 kvm_io_ioeventfd_add(section, match_data, data, fd);
806 static void kvm_eventfd_del(MemoryListener *listener,
807 MemoryRegionSection *section,
808 bool match_data, uint64_t data, int fd)
810 if (section->address_space == get_system_memory()) {
811 kvm_mem_ioeventfd_del(section, match_data, data, fd);
813 kvm_io_ioeventfd_del(section, match_data, data, fd);
817 static MemoryListener kvm_memory_listener = {
819 .commit = kvm_commit,
820 .region_add = kvm_region_add,
821 .region_del = kvm_region_del,
822 .region_nop = kvm_region_nop,
823 .log_start = kvm_log_start,
824 .log_stop = kvm_log_stop,
825 .log_sync = kvm_log_sync,
826 .log_global_start = kvm_log_global_start,
827 .log_global_stop = kvm_log_global_stop,
828 .eventfd_add = kvm_eventfd_add,
829 .eventfd_del = kvm_eventfd_del,
833 static void kvm_handle_interrupt(CPUArchState *env, int mask)
835 env->interrupt_request |= mask;
837 if (!qemu_cpu_is_self(env)) {
842 int kvm_irqchip_set_irq(KVMState *s, int irq, int level)
844 struct kvm_irq_level event;
847 assert(kvm_irqchip_in_kernel());
851 ret = kvm_vm_ioctl(s, s->irqchip_inject_ioctl, &event);
853 perror("kvm_set_irqchip_line");
857 return (s->irqchip_inject_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
860 #ifdef KVM_CAP_IRQ_ROUTING
861 static void set_gsi(KVMState *s, unsigned int gsi)
863 assert(gsi < s->max_gsi);
865 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
868 static void kvm_init_irq_routing(KVMState *s)
872 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
874 unsigned int gsi_bits, i;
876 /* Round up so we can search ints using ffs */
877 gsi_bits = (gsi_count + 31) / 32;
878 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
879 s->max_gsi = gsi_bits;
881 /* Mark any over-allocated bits as already in use */
882 for (i = gsi_count; i < gsi_bits; i++) {
887 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
888 s->nr_allocated_irq_routes = 0;
890 kvm_arch_init_irq_routing(s);
893 static void kvm_add_routing_entry(KVMState *s,
894 struct kvm_irq_routing_entry *entry)
896 struct kvm_irq_routing_entry *new;
899 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
900 n = s->nr_allocated_irq_routes * 2;
904 size = sizeof(struct kvm_irq_routing);
905 size += n * sizeof(*new);
906 s->irq_routes = g_realloc(s->irq_routes, size);
907 s->nr_allocated_irq_routes = n;
909 n = s->irq_routes->nr++;
910 new = &s->irq_routes->entries[n];
911 memset(new, 0, sizeof(*new));
912 new->gsi = entry->gsi;
913 new->type = entry->type;
914 new->flags = entry->flags;
917 set_gsi(s, entry->gsi);
920 void kvm_irqchip_add_route(KVMState *s, int irq, int irqchip, int pin)
922 struct kvm_irq_routing_entry e;
925 e.type = KVM_IRQ_ROUTING_IRQCHIP;
927 e.u.irqchip.irqchip = irqchip;
928 e.u.irqchip.pin = pin;
929 kvm_add_routing_entry(s, &e);
932 int kvm_irqchip_commit_routes(KVMState *s)
934 s->irq_routes->flags = 0;
935 return kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
938 #else /* !KVM_CAP_IRQ_ROUTING */
940 static void kvm_init_irq_routing(KVMState *s)
943 #endif /* !KVM_CAP_IRQ_ROUTING */
945 static int kvm_irqchip_create(KVMState *s)
947 QemuOptsList *list = qemu_find_opts("machine");
950 if (QTAILQ_EMPTY(&list->head) ||
951 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
952 "kernel_irqchip", false) ||
953 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
957 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
959 fprintf(stderr, "Create kernel irqchip failed\n");
963 s->irqchip_inject_ioctl = KVM_IRQ_LINE;
964 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
965 s->irqchip_inject_ioctl = KVM_IRQ_LINE_STATUS;
967 kvm_kernel_irqchip = true;
969 kvm_init_irq_routing(s);
976 static const char upgrade_note[] =
977 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
978 "(see http://sourceforge.net/projects/kvm).\n";
980 const KVMCapabilityInfo *missing_cap;
984 s = g_malloc0(sizeof(KVMState));
986 #ifdef KVM_CAP_SET_GUEST_DEBUG
987 QTAILQ_INIT(&s->kvm_sw_breakpoints);
989 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
990 s->slots[i].slot = i;
993 s->fd = qemu_open("/dev/kvm", O_RDWR);
995 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1000 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1001 if (ret < KVM_API_VERSION) {
1005 fprintf(stderr, "kvm version too old\n");
1009 if (ret > KVM_API_VERSION) {
1011 fprintf(stderr, "kvm version not supported\n");
1015 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
1018 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1019 "your host kernel command line\n");
1025 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1028 kvm_check_extension_list(s, kvm_arch_required_capabilities);
1032 fprintf(stderr, "kvm does not support %s\n%s",
1033 missing_cap->name, upgrade_note);
1037 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1039 s->broken_set_mem_region = 1;
1040 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1042 s->broken_set_mem_region = 0;
1045 #ifdef KVM_CAP_VCPU_EVENTS
1046 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1049 s->robust_singlestep =
1050 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1052 #ifdef KVM_CAP_DEBUGREGS
1053 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1056 #ifdef KVM_CAP_XSAVE
1057 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1061 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1064 #ifdef KVM_CAP_PIT_STATE2
1065 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1068 ret = kvm_arch_init(s);
1073 ret = kvm_irqchip_create(s);
1079 memory_listener_register(&kvm_memory_listener, NULL);
1081 s->many_ioeventfds = kvm_check_many_ioeventfds();
1083 cpu_interrupt_handler = kvm_handle_interrupt;
1101 static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1105 uint8_t *ptr = data;
1107 for (i = 0; i < count; i++) {
1108 if (direction == KVM_EXIT_IO_IN) {
1111 stb_p(ptr, cpu_inb(port));
1114 stw_p(ptr, cpu_inw(port));
1117 stl_p(ptr, cpu_inl(port));
1123 cpu_outb(port, ldub_p(ptr));
1126 cpu_outw(port, lduw_p(ptr));
1129 cpu_outl(port, ldl_p(ptr));
1138 static int kvm_handle_internal_error(CPUArchState *env, struct kvm_run *run)
1140 fprintf(stderr, "KVM internal error.");
1141 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1144 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
1145 for (i = 0; i < run->internal.ndata; ++i) {
1146 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1147 i, (uint64_t)run->internal.data[i]);
1150 fprintf(stderr, "\n");
1152 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1153 fprintf(stderr, "emulation failure\n");
1154 if (!kvm_arch_stop_on_emulation_error(env)) {
1155 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1156 return EXCP_INTERRUPT;
1159 /* FIXME: Should trigger a qmp message to let management know
1160 * something went wrong.
1165 void kvm_flush_coalesced_mmio_buffer(void)
1167 KVMState *s = kvm_state;
1169 if (s->coalesced_flush_in_progress) {
1173 s->coalesced_flush_in_progress = true;
1175 if (s->coalesced_mmio_ring) {
1176 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1177 while (ring->first != ring->last) {
1178 struct kvm_coalesced_mmio *ent;
1180 ent = &ring->coalesced_mmio[ring->first];
1182 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1184 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1188 s->coalesced_flush_in_progress = false;
1191 static void do_kvm_cpu_synchronize_state(void *_env)
1193 CPUArchState *env = _env;
1195 if (!env->kvm_vcpu_dirty) {
1196 kvm_arch_get_registers(env);
1197 env->kvm_vcpu_dirty = 1;
1201 void kvm_cpu_synchronize_state(CPUArchState *env)
1203 if (!env->kvm_vcpu_dirty) {
1204 run_on_cpu(env, do_kvm_cpu_synchronize_state, env);
1208 void kvm_cpu_synchronize_post_reset(CPUArchState *env)
1210 kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
1211 env->kvm_vcpu_dirty = 0;
1214 void kvm_cpu_synchronize_post_init(CPUArchState *env)
1216 kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
1217 env->kvm_vcpu_dirty = 0;
1220 int kvm_cpu_exec(CPUArchState *env)
1222 struct kvm_run *run = env->kvm_run;
1225 DPRINTF("kvm_cpu_exec()\n");
1227 if (kvm_arch_process_async_events(env)) {
1228 env->exit_request = 0;
1233 if (env->kvm_vcpu_dirty) {
1234 kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
1235 env->kvm_vcpu_dirty = 0;
1238 kvm_arch_pre_run(env, run);
1239 if (env->exit_request) {
1240 DPRINTF("interrupt exit requested\n");
1242 * KVM requires us to reenter the kernel after IO exits to complete
1243 * instruction emulation. This self-signal will ensure that we
1246 qemu_cpu_kick_self();
1248 qemu_mutex_unlock_iothread();
1250 run_ret = kvm_vcpu_ioctl(env, KVM_RUN, 0);
1252 qemu_mutex_lock_iothread();
1253 kvm_arch_post_run(env, run);
1255 kvm_flush_coalesced_mmio_buffer();
1258 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1259 DPRINTF("io window exit\n");
1260 ret = EXCP_INTERRUPT;
1263 fprintf(stderr, "error: kvm run failed %s\n",
1264 strerror(-run_ret));
1268 switch (run->exit_reason) {
1270 DPRINTF("handle_io\n");
1271 kvm_handle_io(run->io.port,
1272 (uint8_t *)run + run->io.data_offset,
1279 DPRINTF("handle_mmio\n");
1280 cpu_physical_memory_rw(run->mmio.phys_addr,
1283 run->mmio.is_write);
1286 case KVM_EXIT_IRQ_WINDOW_OPEN:
1287 DPRINTF("irq_window_open\n");
1288 ret = EXCP_INTERRUPT;
1290 case KVM_EXIT_SHUTDOWN:
1291 DPRINTF("shutdown\n");
1292 qemu_system_reset_request();
1293 ret = EXCP_INTERRUPT;
1295 case KVM_EXIT_UNKNOWN:
1296 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1297 (uint64_t)run->hw.hardware_exit_reason);
1300 case KVM_EXIT_INTERNAL_ERROR:
1301 ret = kvm_handle_internal_error(env, run);
1304 DPRINTF("kvm_arch_handle_exit\n");
1305 ret = kvm_arch_handle_exit(env, run);
1311 cpu_dump_state(env, stderr, fprintf, CPU_DUMP_CODE);
1312 vm_stop(RUN_STATE_INTERNAL_ERROR);
1315 env->exit_request = 0;
1319 int kvm_ioctl(KVMState *s, int type, ...)
1326 arg = va_arg(ap, void *);
1329 ret = ioctl(s->fd, type, arg);
1336 int kvm_vm_ioctl(KVMState *s, int type, ...)
1343 arg = va_arg(ap, void *);
1346 ret = ioctl(s->vmfd, type, arg);
1353 int kvm_vcpu_ioctl(CPUArchState *env, int type, ...)
1360 arg = va_arg(ap, void *);
1363 ret = ioctl(env->kvm_fd, type, arg);
1370 int kvm_has_sync_mmu(void)
1372 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1375 int kvm_has_vcpu_events(void)
1377 return kvm_state->vcpu_events;
1380 int kvm_has_robust_singlestep(void)
1382 return kvm_state->robust_singlestep;
1385 int kvm_has_debugregs(void)
1387 return kvm_state->debugregs;
1390 int kvm_has_xsave(void)
1392 return kvm_state->xsave;
1395 int kvm_has_xcrs(void)
1397 return kvm_state->xcrs;
1400 int kvm_has_pit_state2(void)
1402 return kvm_state->pit_state2;
1405 int kvm_has_many_ioeventfds(void)
1407 if (!kvm_enabled()) {
1410 return kvm_state->many_ioeventfds;
1413 int kvm_has_gsi_routing(void)
1415 #ifdef KVM_CAP_IRQ_ROUTING
1416 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1422 int kvm_allows_irq0_override(void)
1424 return !kvm_irqchip_in_kernel() || kvm_has_gsi_routing();
1427 void kvm_setup_guest_memory(void *start, size_t size)
1429 if (!kvm_has_sync_mmu()) {
1430 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1433 perror("qemu_madvise");
1435 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1441 #ifdef KVM_CAP_SET_GUEST_DEBUG
1442 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUArchState *env,
1445 struct kvm_sw_breakpoint *bp;
1447 QTAILQ_FOREACH(bp, &env->kvm_state->kvm_sw_breakpoints, entry) {
1455 int kvm_sw_breakpoints_active(CPUArchState *env)
1457 return !QTAILQ_EMPTY(&env->kvm_state->kvm_sw_breakpoints);
1460 struct kvm_set_guest_debug_data {
1461 struct kvm_guest_debug dbg;
1466 static void kvm_invoke_set_guest_debug(void *data)
1468 struct kvm_set_guest_debug_data *dbg_data = data;
1469 CPUArchState *env = dbg_data->env;
1471 dbg_data->err = kvm_vcpu_ioctl(env, KVM_SET_GUEST_DEBUG, &dbg_data->dbg);
1474 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
1476 struct kvm_set_guest_debug_data data;
1478 data.dbg.control = reinject_trap;
1480 if (env->singlestep_enabled) {
1481 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1483 kvm_arch_update_guest_debug(env, &data.dbg);
1486 run_on_cpu(env, kvm_invoke_set_guest_debug, &data);
1490 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
1491 target_ulong len, int type)
1493 struct kvm_sw_breakpoint *bp;
1497 if (type == GDB_BREAKPOINT_SW) {
1498 bp = kvm_find_sw_breakpoint(current_env, addr);
1504 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
1511 err = kvm_arch_insert_sw_breakpoint(current_env, bp);
1517 QTAILQ_INSERT_HEAD(¤t_env->kvm_state->kvm_sw_breakpoints,
1520 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
1526 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1527 err = kvm_update_guest_debug(env, 0);
1535 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
1536 target_ulong len, int type)
1538 struct kvm_sw_breakpoint *bp;
1542 if (type == GDB_BREAKPOINT_SW) {
1543 bp = kvm_find_sw_breakpoint(current_env, addr);
1548 if (bp->use_count > 1) {
1553 err = kvm_arch_remove_sw_breakpoint(current_env, bp);
1558 QTAILQ_REMOVE(¤t_env->kvm_state->kvm_sw_breakpoints, bp, entry);
1561 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
1567 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1568 err = kvm_update_guest_debug(env, 0);
1576 void kvm_remove_all_breakpoints(CPUArchState *current_env)
1578 struct kvm_sw_breakpoint *bp, *next;
1579 KVMState *s = current_env->kvm_state;
1582 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
1583 if (kvm_arch_remove_sw_breakpoint(current_env, bp) != 0) {
1584 /* Try harder to find a CPU that currently sees the breakpoint. */
1585 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1586 if (kvm_arch_remove_sw_breakpoint(env, bp) == 0) {
1592 kvm_arch_remove_all_hw_breakpoints();
1594 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1595 kvm_update_guest_debug(env, 0);
1599 #else /* !KVM_CAP_SET_GUEST_DEBUG */
1601 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
1606 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
1607 target_ulong len, int type)
1612 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
1613 target_ulong len, int type)
1618 void kvm_remove_all_breakpoints(CPUArchState *current_env)
1621 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
1623 int kvm_set_signal_mask(CPUArchState *env, const sigset_t *sigset)
1625 struct kvm_signal_mask *sigmask;
1629 return kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, NULL);
1632 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
1635 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
1636 r = kvm_vcpu_ioctl(env, KVM_SET_SIGNAL_MASK, sigmask);
1642 int kvm_set_ioeventfd_mmio_long(int fd, uint32_t addr, uint32_t val, bool assign)
1645 struct kvm_ioeventfd iofd;
1647 iofd.datamatch = val;
1650 iofd.flags = KVM_IOEVENTFD_FLAG_DATAMATCH;
1653 if (!kvm_enabled()) {
1658 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1661 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
1670 int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
1672 struct kvm_ioeventfd kick = {
1676 .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
1680 if (!kvm_enabled()) {
1684 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1686 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1693 int kvm_on_sigbus_vcpu(CPUArchState *env, int code, void *addr)
1695 return kvm_arch_on_sigbus_vcpu(env, code, addr);
1698 int kvm_on_sigbus(int code, void *addr)
1700 return kvm_arch_on_sigbus(code, addr);