4 * Copyright IBM, Corp. 2008
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu/atomic.h"
25 #include "qemu/option.h"
26 #include "qemu/config-file.h"
27 #include "sysemu/sysemu.h"
28 #include "sysemu/accel.h"
30 #include "hw/pci/msi.h"
31 #include "hw/s390x/adapter.h"
32 #include "exec/gdbstub.h"
33 #include "sysemu/kvm.h"
34 #include "qemu/bswap.h"
35 #include "exec/memory.h"
36 #include "exec/ram_addr.h"
37 #include "exec/address-spaces.h"
38 #include "qemu/event_notifier.h"
41 #include "hw/boards.h"
43 /* This check must be after config-host.h is included */
45 #include <sys/eventfd.h>
48 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
49 #define PAGE_SIZE TARGET_PAGE_SIZE
54 #define DPRINTF(fmt, ...) \
55 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
57 #define DPRINTF(fmt, ...) \
61 #define KVM_MSI_HASHTAB_SIZE 256
63 typedef struct KVMSlot
66 ram_addr_t memory_size;
72 typedef struct kvm_dirty_log KVMDirtyLog;
81 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
82 bool coalesced_flush_in_progress;
83 int broken_set_mem_region;
86 int robust_singlestep;
88 #ifdef KVM_CAP_SET_GUEST_DEBUG
89 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
95 /* The man page (and posix) say ioctl numbers are signed int, but
96 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
97 * unsigned, and treating them as signed here can break things */
98 unsigned irq_set_ioctl;
99 unsigned int sigmask_len;
100 #ifdef KVM_CAP_IRQ_ROUTING
101 struct kvm_irq_routing *irq_routes;
102 int nr_allocated_irq_routes;
103 uint32_t *used_gsi_bitmap;
104 unsigned int gsi_count;
105 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
110 #define TYPE_KVM_ACCEL ACCEL_CLASS_NAME("kvm")
113 bool kvm_kernel_irqchip;
114 bool kvm_async_interrupts_allowed;
115 bool kvm_halt_in_kernel_allowed;
116 bool kvm_eventfds_allowed;
117 bool kvm_irqfds_allowed;
118 bool kvm_msi_via_irqfd_allowed;
119 bool kvm_gsi_routing_allowed;
120 bool kvm_gsi_direct_mapping;
122 bool kvm_readonly_mem_allowed;
124 static const KVMCapabilityInfo kvm_required_capabilites[] = {
125 KVM_CAP_INFO(USER_MEMORY),
126 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
130 static KVMSlot *kvm_alloc_slot(KVMState *s)
134 for (i = 0; i < s->nr_slots; i++) {
135 if (s->slots[i].memory_size == 0) {
140 fprintf(stderr, "%s: no free slot available\n", __func__);
144 static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
150 for (i = 0; i < s->nr_slots; i++) {
151 KVMSlot *mem = &s->slots[i];
153 if (start_addr == mem->start_addr &&
154 end_addr == mem->start_addr + mem->memory_size) {
163 * Find overlapping slot with lowest start address
165 static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
169 KVMSlot *found = NULL;
172 for (i = 0; i < s->nr_slots; i++) {
173 KVMSlot *mem = &s->slots[i];
175 if (mem->memory_size == 0 ||
176 (found && found->start_addr < mem->start_addr)) {
180 if (end_addr > mem->start_addr &&
181 start_addr < mem->start_addr + mem->memory_size) {
189 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
194 for (i = 0; i < s->nr_slots; i++) {
195 KVMSlot *mem = &s->slots[i];
197 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
198 *phys_addr = mem->start_addr + (ram - mem->ram);
206 static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
208 struct kvm_userspace_memory_region mem;
210 mem.slot = slot->slot;
211 mem.guest_phys_addr = slot->start_addr;
212 mem.userspace_addr = (unsigned long)slot->ram;
213 mem.flags = slot->flags;
214 if (s->migration_log) {
215 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
218 if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
219 /* Set the slot size to 0 before setting the slot to the desired
220 * value. This is needed based on KVM commit 75d61fbc. */
222 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
224 mem.memory_size = slot->memory_size;
225 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
228 int kvm_init_vcpu(CPUState *cpu)
230 KVMState *s = kvm_state;
234 DPRINTF("kvm_init_vcpu\n");
236 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
238 DPRINTF("kvm_create_vcpu failed\n");
244 cpu->kvm_vcpu_dirty = true;
246 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
249 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
253 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
255 if (cpu->kvm_run == MAP_FAILED) {
257 DPRINTF("mmap'ing vcpu state failed\n");
261 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
262 s->coalesced_mmio_ring =
263 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
266 ret = kvm_arch_init_vcpu(cpu);
272 * dirty pages logging control
275 static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly)
278 flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
279 if (readonly && kvm_readonly_mem_allowed) {
280 flags |= KVM_MEM_READONLY;
285 static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
287 KVMState *s = kvm_state;
288 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
291 old_flags = mem->flags;
293 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
296 /* If nothing changed effectively, no need to issue ioctl */
297 if (s->migration_log) {
298 flags |= KVM_MEM_LOG_DIRTY_PAGES;
301 if (flags == old_flags) {
305 return kvm_set_user_memory_region(s, mem);
308 static int kvm_dirty_pages_log_change(hwaddr phys_addr,
309 ram_addr_t size, bool log_dirty)
311 KVMState *s = kvm_state;
312 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
315 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
316 TARGET_FMT_plx "\n", __func__, phys_addr,
317 (hwaddr)(phys_addr + size - 1));
320 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
323 static void kvm_log_start(MemoryListener *listener,
324 MemoryRegionSection *section)
328 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
329 int128_get64(section->size), true);
335 static void kvm_log_stop(MemoryListener *listener,
336 MemoryRegionSection *section)
340 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
341 int128_get64(section->size), false);
347 static int kvm_set_migration_log(int enable)
349 KVMState *s = kvm_state;
353 s->migration_log = enable;
355 for (i = 0; i < s->nr_slots; i++) {
358 if (!mem->memory_size) {
361 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
364 err = kvm_set_user_memory_region(s, mem);
372 /* get kvm's dirty pages bitmap and update qemu's */
373 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
374 unsigned long *bitmap)
376 ram_addr_t start = section->offset_within_region + section->mr->ram_addr;
377 ram_addr_t pages = int128_get64(section->size) / getpagesize();
379 cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
383 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
386 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
387 * This function updates qemu's dirty bitmap using
388 * memory_region_set_dirty(). This means all bits are set
391 * @start_add: start of logged region.
392 * @end_addr: end of logged region.
394 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
396 KVMState *s = kvm_state;
397 unsigned long size, allocated_size = 0;
401 hwaddr start_addr = section->offset_within_address_space;
402 hwaddr end_addr = start_addr + int128_get64(section->size);
404 d.dirty_bitmap = NULL;
405 while (start_addr < end_addr) {
406 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
411 /* XXX bad kernel interface alert
412 * For dirty bitmap, kernel allocates array of size aligned to
413 * bits-per-long. But for case when the kernel is 64bits and
414 * the userspace is 32bits, userspace can't align to the same
415 * bits-per-long, since sizeof(long) is different between kernel
416 * and user space. This way, userspace will provide buffer which
417 * may be 4 bytes less than the kernel will use, resulting in
418 * userspace memory corruption (which is not detectable by valgrind
419 * too, in most cases).
420 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
421 * a hope that sizeof(long) wont become >8 any time soon.
423 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
424 /*HOST_LONG_BITS*/ 64) / 8;
425 if (!d.dirty_bitmap) {
426 d.dirty_bitmap = g_malloc(size);
427 } else if (size > allocated_size) {
428 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
430 allocated_size = size;
431 memset(d.dirty_bitmap, 0, allocated_size);
435 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
436 DPRINTF("ioctl failed %d\n", errno);
441 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
442 start_addr = mem->start_addr + mem->memory_size;
444 g_free(d.dirty_bitmap);
449 static void kvm_coalesce_mmio_region(MemoryListener *listener,
450 MemoryRegionSection *secion,
451 hwaddr start, hwaddr size)
453 KVMState *s = kvm_state;
455 if (s->coalesced_mmio) {
456 struct kvm_coalesced_mmio_zone zone;
462 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
466 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
467 MemoryRegionSection *secion,
468 hwaddr start, hwaddr size)
470 KVMState *s = kvm_state;
472 if (s->coalesced_mmio) {
473 struct kvm_coalesced_mmio_zone zone;
479 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
483 int kvm_check_extension(KVMState *s, unsigned int extension)
487 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
495 int kvm_vm_check_extension(KVMState *s, unsigned int extension)
499 ret = kvm_vm_ioctl(s, KVM_CHECK_EXTENSION, extension);
501 /* VM wide version not implemented, use global one instead */
502 ret = kvm_check_extension(s, extension);
508 static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
509 bool assign, uint32_t size, bool datamatch)
512 struct kvm_ioeventfd iofd;
514 iofd.datamatch = datamatch ? val : 0;
520 if (!kvm_enabled()) {
525 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
528 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
531 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
540 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
541 bool assign, uint32_t size, bool datamatch)
543 struct kvm_ioeventfd kick = {
544 .datamatch = datamatch ? val : 0,
546 .flags = KVM_IOEVENTFD_FLAG_PIO,
551 if (!kvm_enabled()) {
555 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
558 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
560 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
568 static int kvm_check_many_ioeventfds(void)
570 /* Userspace can use ioeventfd for io notification. This requires a host
571 * that supports eventfd(2) and an I/O thread; since eventfd does not
572 * support SIGIO it cannot interrupt the vcpu.
574 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
575 * can avoid creating too many ioeventfds.
577 #if defined(CONFIG_EVENTFD)
580 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
581 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
582 if (ioeventfds[i] < 0) {
585 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
587 close(ioeventfds[i]);
592 /* Decide whether many devices are supported or not */
593 ret = i == ARRAY_SIZE(ioeventfds);
596 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
597 close(ioeventfds[i]);
605 static const KVMCapabilityInfo *
606 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
609 if (!kvm_check_extension(s, list->value)) {
617 static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
619 KVMState *s = kvm_state;
622 MemoryRegion *mr = section->mr;
623 bool log_dirty = memory_region_is_logging(mr);
624 bool writeable = !mr->readonly && !mr->rom_device;
625 bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
626 hwaddr start_addr = section->offset_within_address_space;
627 ram_addr_t size = int128_get64(section->size);
631 /* kvm works in page size chunks, but the function may be called
632 with sub-page size and unaligned start address. */
633 delta = TARGET_PAGE_ALIGN(size) - size;
639 size &= TARGET_PAGE_MASK;
640 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
644 if (!memory_region_is_ram(mr)) {
645 if (writeable || !kvm_readonly_mem_allowed) {
647 } else if (!mr->romd_mode) {
648 /* If the memory device is not in romd_mode, then we actually want
649 * to remove the kvm memory slot so all accesses will trap. */
654 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
657 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
662 if (add && start_addr >= mem->start_addr &&
663 (start_addr + size <= mem->start_addr + mem->memory_size) &&
664 (ram - start_addr == mem->ram - mem->start_addr)) {
665 /* The new slot fits into the existing one and comes with
666 * identical parameters - update flags and done. */
667 kvm_slot_dirty_pages_log_change(mem, log_dirty);
673 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
674 kvm_physical_sync_dirty_bitmap(section);
677 /* unregister the overlapping slot */
678 mem->memory_size = 0;
679 err = kvm_set_user_memory_region(s, mem);
681 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
682 __func__, strerror(-err));
686 /* Workaround for older KVM versions: we can't join slots, even not by
687 * unregistering the previous ones and then registering the larger
688 * slot. We have to maintain the existing fragmentation. Sigh.
690 * This workaround assumes that the new slot starts at the same
691 * address as the first existing one. If not or if some overlapping
692 * slot comes around later, we will fail (not seen in practice so far)
693 * - and actually require a recent KVM version. */
694 if (s->broken_set_mem_region &&
695 old.start_addr == start_addr && old.memory_size < size && add) {
696 mem = kvm_alloc_slot(s);
697 mem->memory_size = old.memory_size;
698 mem->start_addr = old.start_addr;
700 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
702 err = kvm_set_user_memory_region(s, mem);
704 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
709 start_addr += old.memory_size;
710 ram += old.memory_size;
711 size -= old.memory_size;
715 /* register prefix slot */
716 if (old.start_addr < start_addr) {
717 mem = kvm_alloc_slot(s);
718 mem->memory_size = start_addr - old.start_addr;
719 mem->start_addr = old.start_addr;
721 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
723 err = kvm_set_user_memory_region(s, mem);
725 fprintf(stderr, "%s: error registering prefix slot: %s\n",
726 __func__, strerror(-err));
728 fprintf(stderr, "%s: This is probably because your kernel's " \
729 "PAGE_SIZE is too big. Please try to use 4k " \
730 "PAGE_SIZE!\n", __func__);
736 /* register suffix slot */
737 if (old.start_addr + old.memory_size > start_addr + size) {
738 ram_addr_t size_delta;
740 mem = kvm_alloc_slot(s);
741 mem->start_addr = start_addr + size;
742 size_delta = mem->start_addr - old.start_addr;
743 mem->memory_size = old.memory_size - size_delta;
744 mem->ram = old.ram + size_delta;
745 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
747 err = kvm_set_user_memory_region(s, mem);
749 fprintf(stderr, "%s: error registering suffix slot: %s\n",
750 __func__, strerror(-err));
756 /* in case the KVM bug workaround already "consumed" the new slot */
763 mem = kvm_alloc_slot(s);
764 mem->memory_size = size;
765 mem->start_addr = start_addr;
767 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
769 err = kvm_set_user_memory_region(s, mem);
771 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
777 static void kvm_region_add(MemoryListener *listener,
778 MemoryRegionSection *section)
780 memory_region_ref(section->mr);
781 kvm_set_phys_mem(section, true);
784 static void kvm_region_del(MemoryListener *listener,
785 MemoryRegionSection *section)
787 kvm_set_phys_mem(section, false);
788 memory_region_unref(section->mr);
791 static void kvm_log_sync(MemoryListener *listener,
792 MemoryRegionSection *section)
796 r = kvm_physical_sync_dirty_bitmap(section);
802 static void kvm_log_global_start(struct MemoryListener *listener)
806 r = kvm_set_migration_log(1);
810 static void kvm_log_global_stop(struct MemoryListener *listener)
814 r = kvm_set_migration_log(0);
818 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
819 MemoryRegionSection *section,
820 bool match_data, uint64_t data,
823 int fd = event_notifier_get_fd(e);
826 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
827 data, true, int128_get64(section->size),
830 fprintf(stderr, "%s: error adding ioeventfd: %s\n",
831 __func__, strerror(-r));
836 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
837 MemoryRegionSection *section,
838 bool match_data, uint64_t data,
841 int fd = event_notifier_get_fd(e);
844 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
845 data, false, int128_get64(section->size),
852 static void kvm_io_ioeventfd_add(MemoryListener *listener,
853 MemoryRegionSection *section,
854 bool match_data, uint64_t data,
857 int fd = event_notifier_get_fd(e);
860 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
861 data, true, int128_get64(section->size),
864 fprintf(stderr, "%s: error adding ioeventfd: %s\n",
865 __func__, strerror(-r));
870 static void kvm_io_ioeventfd_del(MemoryListener *listener,
871 MemoryRegionSection *section,
872 bool match_data, uint64_t data,
876 int fd = event_notifier_get_fd(e);
879 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
880 data, false, int128_get64(section->size),
887 static MemoryListener kvm_memory_listener = {
888 .region_add = kvm_region_add,
889 .region_del = kvm_region_del,
890 .log_start = kvm_log_start,
891 .log_stop = kvm_log_stop,
892 .log_sync = kvm_log_sync,
893 .log_global_start = kvm_log_global_start,
894 .log_global_stop = kvm_log_global_stop,
895 .eventfd_add = kvm_mem_ioeventfd_add,
896 .eventfd_del = kvm_mem_ioeventfd_del,
897 .coalesced_mmio_add = kvm_coalesce_mmio_region,
898 .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
902 static MemoryListener kvm_io_listener = {
903 .eventfd_add = kvm_io_ioeventfd_add,
904 .eventfd_del = kvm_io_ioeventfd_del,
908 static void kvm_handle_interrupt(CPUState *cpu, int mask)
910 cpu->interrupt_request |= mask;
912 if (!qemu_cpu_is_self(cpu)) {
917 int kvm_set_irq(KVMState *s, int irq, int level)
919 struct kvm_irq_level event;
922 assert(kvm_async_interrupts_enabled());
926 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
928 perror("kvm_set_irq");
932 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
935 #ifdef KVM_CAP_IRQ_ROUTING
936 typedef struct KVMMSIRoute {
937 struct kvm_irq_routing_entry kroute;
938 QTAILQ_ENTRY(KVMMSIRoute) entry;
941 static void set_gsi(KVMState *s, unsigned int gsi)
943 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
946 static void clear_gsi(KVMState *s, unsigned int gsi)
948 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
951 void kvm_init_irq_routing(KVMState *s)
955 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING) - 1;
957 unsigned int gsi_bits, i;
959 /* Round up so we can search ints using ffs */
960 gsi_bits = ALIGN(gsi_count, 32);
961 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
962 s->gsi_count = gsi_count;
964 /* Mark any over-allocated bits as already in use */
965 for (i = gsi_count; i < gsi_bits; i++) {
970 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
971 s->nr_allocated_irq_routes = 0;
973 if (!s->direct_msi) {
974 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
975 QTAILQ_INIT(&s->msi_hashtab[i]);
979 kvm_arch_init_irq_routing(s);
982 void kvm_irqchip_commit_routes(KVMState *s)
986 s->irq_routes->flags = 0;
987 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
991 static void kvm_add_routing_entry(KVMState *s,
992 struct kvm_irq_routing_entry *entry)
994 struct kvm_irq_routing_entry *new;
997 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
998 n = s->nr_allocated_irq_routes * 2;
1002 size = sizeof(struct kvm_irq_routing);
1003 size += n * sizeof(*new);
1004 s->irq_routes = g_realloc(s->irq_routes, size);
1005 s->nr_allocated_irq_routes = n;
1007 n = s->irq_routes->nr++;
1008 new = &s->irq_routes->entries[n];
1012 set_gsi(s, entry->gsi);
1015 static int kvm_update_routing_entry(KVMState *s,
1016 struct kvm_irq_routing_entry *new_entry)
1018 struct kvm_irq_routing_entry *entry;
1021 for (n = 0; n < s->irq_routes->nr; n++) {
1022 entry = &s->irq_routes->entries[n];
1023 if (entry->gsi != new_entry->gsi) {
1027 if(!memcmp(entry, new_entry, sizeof *entry)) {
1031 *entry = *new_entry;
1033 kvm_irqchip_commit_routes(s);
1041 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1043 struct kvm_irq_routing_entry e = {};
1045 assert(pin < s->gsi_count);
1048 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1050 e.u.irqchip.irqchip = irqchip;
1051 e.u.irqchip.pin = pin;
1052 kvm_add_routing_entry(s, &e);
1055 void kvm_irqchip_release_virq(KVMState *s, int virq)
1057 struct kvm_irq_routing_entry *e;
1060 if (kvm_gsi_direct_mapping()) {
1064 for (i = 0; i < s->irq_routes->nr; i++) {
1065 e = &s->irq_routes->entries[i];
1066 if (e->gsi == virq) {
1067 s->irq_routes->nr--;
1068 *e = s->irq_routes->entries[s->irq_routes->nr];
1074 static unsigned int kvm_hash_msi(uint32_t data)
1076 /* This is optimized for IA32 MSI layout. However, no other arch shall
1077 * repeat the mistake of not providing a direct MSI injection API. */
1081 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1083 KVMMSIRoute *route, *next;
1086 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1087 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1088 kvm_irqchip_release_virq(s, route->kroute.gsi);
1089 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1095 static int kvm_irqchip_get_virq(KVMState *s)
1097 uint32_t *word = s->used_gsi_bitmap;
1098 int max_words = ALIGN(s->gsi_count, 32) / 32;
1103 /* Return the lowest unused GSI in the bitmap */
1104 for (i = 0; i < max_words; i++) {
1105 bit = ffs(~word[i]);
1110 return bit - 1 + i * 32;
1112 if (!s->direct_msi && retry) {
1114 kvm_flush_dynamic_msi_routes(s);
1121 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1123 unsigned int hash = kvm_hash_msi(msg.data);
1126 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1127 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1128 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1129 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1136 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1141 if (s->direct_msi) {
1142 msi.address_lo = (uint32_t)msg.address;
1143 msi.address_hi = msg.address >> 32;
1144 msi.data = le32_to_cpu(msg.data);
1146 memset(msi.pad, 0, sizeof(msi.pad));
1148 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1151 route = kvm_lookup_msi_route(s, msg);
1155 virq = kvm_irqchip_get_virq(s);
1160 route = g_malloc0(sizeof(KVMMSIRoute));
1161 route->kroute.gsi = virq;
1162 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1163 route->kroute.flags = 0;
1164 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1165 route->kroute.u.msi.address_hi = msg.address >> 32;
1166 route->kroute.u.msi.data = le32_to_cpu(msg.data);
1168 kvm_add_routing_entry(s, &route->kroute);
1169 kvm_irqchip_commit_routes(s);
1171 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1175 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1177 return kvm_set_irq(s, route->kroute.gsi, 1);
1180 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1182 struct kvm_irq_routing_entry kroute = {};
1185 if (kvm_gsi_direct_mapping()) {
1186 return msg.data & 0xffff;
1189 if (!kvm_gsi_routing_enabled()) {
1193 virq = kvm_irqchip_get_virq(s);
1199 kroute.type = KVM_IRQ_ROUTING_MSI;
1201 kroute.u.msi.address_lo = (uint32_t)msg.address;
1202 kroute.u.msi.address_hi = msg.address >> 32;
1203 kroute.u.msi.data = le32_to_cpu(msg.data);
1205 kvm_add_routing_entry(s, &kroute);
1206 kvm_irqchip_commit_routes(s);
1211 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1213 struct kvm_irq_routing_entry kroute = {};
1215 if (kvm_gsi_direct_mapping()) {
1219 if (!kvm_irqchip_in_kernel()) {
1224 kroute.type = KVM_IRQ_ROUTING_MSI;
1226 kroute.u.msi.address_lo = (uint32_t)msg.address;
1227 kroute.u.msi.address_hi = msg.address >> 32;
1228 kroute.u.msi.data = le32_to_cpu(msg.data);
1230 return kvm_update_routing_entry(s, &kroute);
1233 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int rfd, int virq,
1236 struct kvm_irqfd irqfd = {
1239 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1243 irqfd.flags |= KVM_IRQFD_FLAG_RESAMPLE;
1244 irqfd.resamplefd = rfd;
1247 if (!kvm_irqfds_enabled()) {
1251 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1254 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1256 struct kvm_irq_routing_entry kroute;
1259 if (!kvm_gsi_routing_enabled()) {
1263 virq = kvm_irqchip_get_virq(s);
1269 kroute.type = KVM_IRQ_ROUTING_S390_ADAPTER;
1271 kroute.u.adapter.summary_addr = adapter->summary_addr;
1272 kroute.u.adapter.ind_addr = adapter->ind_addr;
1273 kroute.u.adapter.summary_offset = adapter->summary_offset;
1274 kroute.u.adapter.ind_offset = adapter->ind_offset;
1275 kroute.u.adapter.adapter_id = adapter->adapter_id;
1277 kvm_add_routing_entry(s, &kroute);
1278 kvm_irqchip_commit_routes(s);
1283 #else /* !KVM_CAP_IRQ_ROUTING */
1285 void kvm_init_irq_routing(KVMState *s)
1289 void kvm_irqchip_release_virq(KVMState *s, int virq)
1293 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1298 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1303 int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
1308 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1313 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1317 #endif /* !KVM_CAP_IRQ_ROUTING */
1319 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n,
1320 EventNotifier *rn, int virq)
1322 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n),
1323 rn ? event_notifier_get_fd(rn) : -1, virq, true);
1326 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
1328 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), -1, virq,
1332 static int kvm_irqchip_create(KVMState *s)
1336 if (!qemu_opt_get_bool(qemu_get_machine_opts(), "kernel_irqchip", true) ||
1337 (!kvm_check_extension(s, KVM_CAP_IRQCHIP) &&
1338 (kvm_vm_enable_cap(s, KVM_CAP_S390_IRQCHIP, 0) < 0))) {
1342 /* First probe and see if there's a arch-specific hook to create the
1343 * in-kernel irqchip for us */
1344 ret = kvm_arch_irqchip_create(s);
1347 } else if (ret == 0) {
1348 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1350 fprintf(stderr, "Create kernel irqchip failed\n");
1355 kvm_kernel_irqchip = true;
1356 /* If we have an in-kernel IRQ chip then we must have asynchronous
1357 * interrupt delivery (though the reverse is not necessarily true)
1359 kvm_async_interrupts_allowed = true;
1360 kvm_halt_in_kernel_allowed = true;
1362 kvm_init_irq_routing(s);
1367 /* Find number of supported CPUs using the recommended
1368 * procedure from the kernel API documentation to cope with
1369 * older kernels that may be missing capabilities.
1371 static int kvm_recommended_vcpus(KVMState *s)
1373 int ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1374 return (ret) ? ret : 4;
1377 static int kvm_max_vcpus(KVMState *s)
1379 int ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1380 return (ret) ? ret : kvm_recommended_vcpus(s);
1383 static int kvm_init(MachineClass *mc)
1385 static const char upgrade_note[] =
1386 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1387 "(see http://sourceforge.net/projects/kvm).\n";
1392 { "SMP", smp_cpus },
1393 { "hotpluggable", max_cpus },
1396 int soft_vcpus_limit, hard_vcpus_limit;
1398 const KVMCapabilityInfo *missing_cap;
1401 const char *kvm_type;
1403 s = g_malloc0(sizeof(KVMState));
1406 * On systems where the kernel can support different base page
1407 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1408 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1409 * page size for the system though.
1411 assert(TARGET_PAGE_SIZE <= getpagesize());
1416 #ifdef KVM_CAP_SET_GUEST_DEBUG
1417 QTAILQ_INIT(&s->kvm_sw_breakpoints);
1420 s->fd = qemu_open("/dev/kvm", O_RDWR);
1422 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1427 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1428 if (ret < KVM_API_VERSION) {
1432 fprintf(stderr, "kvm version too old\n");
1436 if (ret > KVM_API_VERSION) {
1438 fprintf(stderr, "kvm version not supported\n");
1442 s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1444 /* If unspecified, use the default value */
1449 s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
1451 for (i = 0; i < s->nr_slots; i++) {
1452 s->slots[i].slot = i;
1455 /* check the vcpu limits */
1456 soft_vcpus_limit = kvm_recommended_vcpus(s);
1457 hard_vcpus_limit = kvm_max_vcpus(s);
1460 if (nc->num > soft_vcpus_limit) {
1462 "Warning: Number of %s cpus requested (%d) exceeds "
1463 "the recommended cpus supported by KVM (%d)\n",
1464 nc->name, nc->num, soft_vcpus_limit);
1466 if (nc->num > hard_vcpus_limit) {
1467 fprintf(stderr, "Number of %s cpus requested (%d) exceeds "
1468 "the maximum cpus supported by KVM (%d)\n",
1469 nc->name, nc->num, hard_vcpus_limit);
1476 kvm_type = qemu_opt_get(qemu_get_machine_opts(), "kvm-type");
1478 type = mc->kvm_type(kvm_type);
1479 } else if (kvm_type) {
1481 fprintf(stderr, "Invalid argument kvm-type=%s\n", kvm_type);
1486 ret = kvm_ioctl(s, KVM_CREATE_VM, type);
1487 } while (ret == -EINTR);
1490 fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -ret,
1494 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1495 "your host kernel command line\n");
1501 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1504 kvm_check_extension_list(s, kvm_arch_required_capabilities);
1508 fprintf(stderr, "kvm does not support %s\n%s",
1509 missing_cap->name, upgrade_note);
1513 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1515 s->broken_set_mem_region = 1;
1516 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1518 s->broken_set_mem_region = 0;
1521 #ifdef KVM_CAP_VCPU_EVENTS
1522 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1525 s->robust_singlestep =
1526 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1528 #ifdef KVM_CAP_DEBUGREGS
1529 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1532 #ifdef KVM_CAP_XSAVE
1533 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1537 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1540 #ifdef KVM_CAP_PIT_STATE2
1541 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1544 #ifdef KVM_CAP_IRQ_ROUTING
1545 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1548 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1550 s->irq_set_ioctl = KVM_IRQ_LINE;
1551 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1552 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1555 #ifdef KVM_CAP_READONLY_MEM
1556 kvm_readonly_mem_allowed =
1557 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1560 kvm_eventfds_allowed =
1561 (kvm_check_extension(s, KVM_CAP_IOEVENTFD) > 0);
1563 ret = kvm_arch_init(s);
1568 ret = kvm_irqchip_create(s);
1574 memory_listener_register(&kvm_memory_listener, &address_space_memory);
1575 memory_listener_register(&kvm_io_listener, &address_space_io);
1577 s->many_ioeventfds = kvm_check_many_ioeventfds();
1579 cpu_interrupt_handler = kvm_handle_interrupt;
1597 void kvm_set_sigmask_len(KVMState *s, unsigned int sigmask_len)
1599 s->sigmask_len = sigmask_len;
1602 static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1606 uint8_t *ptr = data;
1608 for (i = 0; i < count; i++) {
1609 address_space_rw(&address_space_io, port, ptr, size,
1610 direction == KVM_EXIT_IO_OUT);
1615 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1617 fprintf(stderr, "KVM internal error. Suberror: %d\n",
1618 run->internal.suberror);
1620 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1623 for (i = 0; i < run->internal.ndata; ++i) {
1624 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1625 i, (uint64_t)run->internal.data[i]);
1628 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1629 fprintf(stderr, "emulation failure\n");
1630 if (!kvm_arch_stop_on_emulation_error(cpu)) {
1631 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1632 return EXCP_INTERRUPT;
1635 /* FIXME: Should trigger a qmp message to let management know
1636 * something went wrong.
1641 void kvm_flush_coalesced_mmio_buffer(void)
1643 KVMState *s = kvm_state;
1645 if (s->coalesced_flush_in_progress) {
1649 s->coalesced_flush_in_progress = true;
1651 if (s->coalesced_mmio_ring) {
1652 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1653 while (ring->first != ring->last) {
1654 struct kvm_coalesced_mmio *ent;
1656 ent = &ring->coalesced_mmio[ring->first];
1658 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1660 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1664 s->coalesced_flush_in_progress = false;
1667 static void do_kvm_cpu_synchronize_state(void *arg)
1669 CPUState *cpu = arg;
1671 if (!cpu->kvm_vcpu_dirty) {
1672 kvm_arch_get_registers(cpu);
1673 cpu->kvm_vcpu_dirty = true;
1677 void kvm_cpu_synchronize_state(CPUState *cpu)
1679 if (!cpu->kvm_vcpu_dirty) {
1680 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
1684 static void do_kvm_cpu_synchronize_post_reset(void *arg)
1686 CPUState *cpu = arg;
1688 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1689 cpu->kvm_vcpu_dirty = false;
1692 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1694 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_reset, cpu);
1697 static void do_kvm_cpu_synchronize_post_init(void *arg)
1699 CPUState *cpu = arg;
1701 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1702 cpu->kvm_vcpu_dirty = false;
1705 void kvm_cpu_synchronize_post_init(CPUState *cpu)
1707 run_on_cpu(cpu, do_kvm_cpu_synchronize_post_init, cpu);
1710 void kvm_cpu_clean_state(CPUState *cpu)
1712 cpu->kvm_vcpu_dirty = false;
1715 int kvm_cpu_exec(CPUState *cpu)
1717 struct kvm_run *run = cpu->kvm_run;
1720 DPRINTF("kvm_cpu_exec()\n");
1722 if (kvm_arch_process_async_events(cpu)) {
1723 cpu->exit_request = 0;
1728 if (cpu->kvm_vcpu_dirty) {
1729 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1730 cpu->kvm_vcpu_dirty = false;
1733 kvm_arch_pre_run(cpu, run);
1734 if (cpu->exit_request) {
1735 DPRINTF("interrupt exit requested\n");
1737 * KVM requires us to reenter the kernel after IO exits to complete
1738 * instruction emulation. This self-signal will ensure that we
1741 qemu_cpu_kick_self();
1743 qemu_mutex_unlock_iothread();
1745 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1747 qemu_mutex_lock_iothread();
1748 kvm_arch_post_run(cpu, run);
1751 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1752 DPRINTF("io window exit\n");
1753 ret = EXCP_INTERRUPT;
1756 fprintf(stderr, "error: kvm run failed %s\n",
1757 strerror(-run_ret));
1762 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
1763 switch (run->exit_reason) {
1765 DPRINTF("handle_io\n");
1766 kvm_handle_io(run->io.port,
1767 (uint8_t *)run + run->io.data_offset,
1774 DPRINTF("handle_mmio\n");
1775 cpu_physical_memory_rw(run->mmio.phys_addr,
1778 run->mmio.is_write);
1781 case KVM_EXIT_IRQ_WINDOW_OPEN:
1782 DPRINTF("irq_window_open\n");
1783 ret = EXCP_INTERRUPT;
1785 case KVM_EXIT_SHUTDOWN:
1786 DPRINTF("shutdown\n");
1787 qemu_system_reset_request();
1788 ret = EXCP_INTERRUPT;
1790 case KVM_EXIT_UNKNOWN:
1791 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1792 (uint64_t)run->hw.hardware_exit_reason);
1795 case KVM_EXIT_INTERNAL_ERROR:
1796 ret = kvm_handle_internal_error(cpu, run);
1798 case KVM_EXIT_SYSTEM_EVENT:
1799 switch (run->system_event.type) {
1800 case KVM_SYSTEM_EVENT_SHUTDOWN:
1801 qemu_system_shutdown_request();
1802 ret = EXCP_INTERRUPT;
1804 case KVM_SYSTEM_EVENT_RESET:
1805 qemu_system_reset_request();
1806 ret = EXCP_INTERRUPT;
1809 DPRINTF("kvm_arch_handle_exit\n");
1810 ret = kvm_arch_handle_exit(cpu, run);
1815 DPRINTF("kvm_arch_handle_exit\n");
1816 ret = kvm_arch_handle_exit(cpu, run);
1822 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1823 vm_stop(RUN_STATE_INTERNAL_ERROR);
1826 cpu->exit_request = 0;
1830 int kvm_ioctl(KVMState *s, int type, ...)
1837 arg = va_arg(ap, void *);
1840 trace_kvm_ioctl(type, arg);
1841 ret = ioctl(s->fd, type, arg);
1848 int kvm_vm_ioctl(KVMState *s, int type, ...)
1855 arg = va_arg(ap, void *);
1858 trace_kvm_vm_ioctl(type, arg);
1859 ret = ioctl(s->vmfd, type, arg);
1866 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
1873 arg = va_arg(ap, void *);
1876 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
1877 ret = ioctl(cpu->kvm_fd, type, arg);
1884 int kvm_device_ioctl(int fd, int type, ...)
1891 arg = va_arg(ap, void *);
1894 trace_kvm_device_ioctl(fd, type, arg);
1895 ret = ioctl(fd, type, arg);
1902 int kvm_has_sync_mmu(void)
1904 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1907 int kvm_has_vcpu_events(void)
1909 return kvm_state->vcpu_events;
1912 int kvm_has_robust_singlestep(void)
1914 return kvm_state->robust_singlestep;
1917 int kvm_has_debugregs(void)
1919 return kvm_state->debugregs;
1922 int kvm_has_xsave(void)
1924 return kvm_state->xsave;
1927 int kvm_has_xcrs(void)
1929 return kvm_state->xcrs;
1932 int kvm_has_pit_state2(void)
1934 return kvm_state->pit_state2;
1937 int kvm_has_many_ioeventfds(void)
1939 if (!kvm_enabled()) {
1942 return kvm_state->many_ioeventfds;
1945 int kvm_has_gsi_routing(void)
1947 #ifdef KVM_CAP_IRQ_ROUTING
1948 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1954 int kvm_has_intx_set_mask(void)
1956 return kvm_state->intx_set_mask;
1959 void kvm_setup_guest_memory(void *start, size_t size)
1961 if (!kvm_has_sync_mmu()) {
1962 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1965 perror("qemu_madvise");
1967 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1973 #ifdef KVM_CAP_SET_GUEST_DEBUG
1974 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
1977 struct kvm_sw_breakpoint *bp;
1979 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
1987 int kvm_sw_breakpoints_active(CPUState *cpu)
1989 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
1992 struct kvm_set_guest_debug_data {
1993 struct kvm_guest_debug dbg;
1998 static void kvm_invoke_set_guest_debug(void *data)
2000 struct kvm_set_guest_debug_data *dbg_data = data;
2002 dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
2006 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2008 struct kvm_set_guest_debug_data data;
2010 data.dbg.control = reinject_trap;
2012 if (cpu->singlestep_enabled) {
2013 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
2015 kvm_arch_update_guest_debug(cpu, &data.dbg);
2018 run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
2022 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2023 target_ulong len, int type)
2025 struct kvm_sw_breakpoint *bp;
2028 if (type == GDB_BREAKPOINT_SW) {
2029 bp = kvm_find_sw_breakpoint(cpu, addr);
2035 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
2042 err = kvm_arch_insert_sw_breakpoint(cpu, bp);
2048 QTAILQ_INSERT_HEAD(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2050 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
2057 err = kvm_update_guest_debug(cpu, 0);
2065 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2066 target_ulong len, int type)
2068 struct kvm_sw_breakpoint *bp;
2071 if (type == GDB_BREAKPOINT_SW) {
2072 bp = kvm_find_sw_breakpoint(cpu, addr);
2077 if (bp->use_count > 1) {
2082 err = kvm_arch_remove_sw_breakpoint(cpu, bp);
2087 QTAILQ_REMOVE(&cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
2090 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
2097 err = kvm_update_guest_debug(cpu, 0);
2105 void kvm_remove_all_breakpoints(CPUState *cpu)
2107 struct kvm_sw_breakpoint *bp, *next;
2108 KVMState *s = cpu->kvm_state;
2111 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2112 if (kvm_arch_remove_sw_breakpoint(cpu, bp) != 0) {
2113 /* Try harder to find a CPU that currently sees the breakpoint. */
2114 CPU_FOREACH(tmpcpu) {
2115 if (kvm_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
2120 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2123 kvm_arch_remove_all_hw_breakpoints();
2126 kvm_update_guest_debug(cpu, 0);
2130 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2132 int kvm_update_guest_debug(CPUState *cpu, unsigned long reinject_trap)
2137 int kvm_insert_breakpoint(CPUState *cpu, target_ulong addr,
2138 target_ulong len, int type)
2143 int kvm_remove_breakpoint(CPUState *cpu, target_ulong addr,
2144 target_ulong len, int type)
2149 void kvm_remove_all_breakpoints(CPUState *cpu)
2152 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2154 int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2156 KVMState *s = kvm_state;
2157 struct kvm_signal_mask *sigmask;
2161 return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
2164 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2166 sigmask->len = s->sigmask_len;
2167 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2168 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2173 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2175 return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
2178 int kvm_on_sigbus(int code, void *addr)
2180 return kvm_arch_on_sigbus(code, addr);
2183 int kvm_create_device(KVMState *s, uint64_t type, bool test)
2186 struct kvm_create_device create_dev;
2188 create_dev.type = type;
2190 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2192 if (!kvm_check_extension(s, KVM_CAP_DEVICE_CTRL)) {
2196 ret = kvm_vm_ioctl(s, KVM_CREATE_DEVICE, &create_dev);
2201 return test ? 0 : create_dev.fd;
2204 int kvm_set_one_reg(CPUState *cs, uint64_t id, void *source)
2206 struct kvm_one_reg reg;
2210 reg.addr = (uintptr_t) source;
2211 r = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, ®);
2213 trace_kvm_failed_reg_set(id, strerror(r));
2218 int kvm_get_one_reg(CPUState *cs, uint64_t id, void *target)
2220 struct kvm_one_reg reg;
2224 reg.addr = (uintptr_t) target;
2225 r = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, ®);
2227 trace_kvm_failed_reg_get(id, strerror(r));
2232 static void kvm_accel_class_init(ObjectClass *oc, void *data)
2234 AccelClass *ac = ACCEL_CLASS(oc);
2236 ac->init = kvm_init;
2237 ac->allowed = &kvm_allowed;
2240 static const TypeInfo kvm_accel_type = {
2241 .name = TYPE_KVM_ACCEL,
2242 .parent = TYPE_ACCEL,
2243 .class_init = kvm_accel_class_init,
2246 static void kvm_type_init(void)
2248 type_register_static(&kvm_accel_type);
2251 type_init(kvm_type_init);