4 * Copyright IBM, Corp. 2008
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
16 #include <sys/types.h>
17 #include <sys/ioctl.h>
21 #include <linux/kvm.h>
23 #include "qemu-common.h"
24 #include "qemu/atomic.h"
25 #include "qemu/option.h"
26 #include "qemu/config-file.h"
27 #include "sysemu/sysemu.h"
29 #include "hw/pci/msi.h"
30 #include "exec/gdbstub.h"
31 #include "sysemu/kvm.h"
32 #include "qemu/bswap.h"
33 #include "exec/memory.h"
34 #include "exec/address-spaces.h"
35 #include "qemu/event_notifier.h"
38 /* This check must be after config-host.h is included */
40 #include <sys/eventfd.h>
43 #ifdef CONFIG_VALGRIND_H
44 #include <valgrind/memcheck.h>
47 /* KVM uses PAGE_SIZE in its definition of COALESCED_MMIO_MAX */
48 #define PAGE_SIZE TARGET_PAGE_SIZE
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
56 #define DPRINTF(fmt, ...) \
60 #define KVM_MSI_HASHTAB_SIZE 256
62 typedef struct KVMSlot
65 ram_addr_t memory_size;
71 typedef struct kvm_dirty_log KVMDirtyLog;
79 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
80 bool coalesced_flush_in_progress;
81 int broken_set_mem_region;
84 int robust_singlestep;
86 #ifdef KVM_CAP_SET_GUEST_DEBUG
87 struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
93 /* The man page (and posix) say ioctl numbers are signed int, but
94 * they're not. Linux, glibc and *BSD all treat ioctl numbers as
95 * unsigned, and treating them as signed here can break things */
96 unsigned irq_set_ioctl;
97 #ifdef KVM_CAP_IRQ_ROUTING
98 struct kvm_irq_routing *irq_routes;
99 int nr_allocated_irq_routes;
100 uint32_t *used_gsi_bitmap;
101 unsigned int gsi_count;
102 QTAILQ_HEAD(msi_hashtab, KVMMSIRoute) msi_hashtab[KVM_MSI_HASHTAB_SIZE];
108 bool kvm_kernel_irqchip;
109 bool kvm_async_interrupts_allowed;
110 bool kvm_halt_in_kernel_allowed;
111 bool kvm_irqfds_allowed;
112 bool kvm_msi_via_irqfd_allowed;
113 bool kvm_gsi_routing_allowed;
115 bool kvm_readonly_mem_allowed;
117 static const KVMCapabilityInfo kvm_required_capabilites[] = {
118 KVM_CAP_INFO(USER_MEMORY),
119 KVM_CAP_INFO(DESTROY_MEMORY_REGION_WORKS),
123 static KVMSlot *kvm_alloc_slot(KVMState *s)
127 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
128 if (s->slots[i].memory_size == 0) {
133 fprintf(stderr, "%s: no free slot available\n", __func__);
137 static KVMSlot *kvm_lookup_matching_slot(KVMState *s,
143 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
144 KVMSlot *mem = &s->slots[i];
146 if (start_addr == mem->start_addr &&
147 end_addr == mem->start_addr + mem->memory_size) {
156 * Find overlapping slot with lowest start address
158 static KVMSlot *kvm_lookup_overlapping_slot(KVMState *s,
162 KVMSlot *found = NULL;
165 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
166 KVMSlot *mem = &s->slots[i];
168 if (mem->memory_size == 0 ||
169 (found && found->start_addr < mem->start_addr)) {
173 if (end_addr > mem->start_addr &&
174 start_addr < mem->start_addr + mem->memory_size) {
182 int kvm_physical_memory_addr_from_host(KVMState *s, void *ram,
187 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
188 KVMSlot *mem = &s->slots[i];
190 if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
191 *phys_addr = mem->start_addr + (ram - mem->ram);
199 static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
201 struct kvm_userspace_memory_region mem;
203 mem.slot = slot->slot;
204 mem.guest_phys_addr = slot->start_addr;
205 mem.userspace_addr = (unsigned long)slot->ram;
206 mem.flags = slot->flags;
207 if (s->migration_log) {
208 mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
211 if (slot->memory_size && mem.flags & KVM_MEM_READONLY) {
212 /* Set the slot size to 0 before setting the slot to the desired
213 * value. This is needed based on KVM commit 75d61fbc. */
215 kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
217 mem.memory_size = slot->memory_size;
218 return kvm_vm_ioctl(s, KVM_SET_USER_MEMORY_REGION, &mem);
221 static void kvm_reset_vcpu(void *opaque)
223 CPUState *cpu = opaque;
225 kvm_arch_reset_vcpu(cpu);
228 int kvm_init_vcpu(CPUState *cpu)
230 KVMState *s = kvm_state;
234 DPRINTF("kvm_init_vcpu\n");
236 ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, (void *)kvm_arch_vcpu_id(cpu));
238 DPRINTF("kvm_create_vcpu failed\n");
244 cpu->kvm_vcpu_dirty = true;
246 mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
249 DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
253 cpu->kvm_run = mmap(NULL, mmap_size, PROT_READ | PROT_WRITE, MAP_SHARED,
255 if (cpu->kvm_run == MAP_FAILED) {
257 DPRINTF("mmap'ing vcpu state failed\n");
261 if (s->coalesced_mmio && !s->coalesced_mmio_ring) {
262 s->coalesced_mmio_ring =
263 (void *)cpu->kvm_run + s->coalesced_mmio * PAGE_SIZE;
266 ret = kvm_arch_init_vcpu(cpu);
268 qemu_register_reset(kvm_reset_vcpu, cpu);
269 kvm_arch_reset_vcpu(cpu);
276 * dirty pages logging control
279 static int kvm_mem_flags(KVMState *s, bool log_dirty, bool readonly)
282 flags = log_dirty ? KVM_MEM_LOG_DIRTY_PAGES : 0;
283 if (readonly && kvm_readonly_mem_allowed) {
284 flags |= KVM_MEM_READONLY;
289 static int kvm_slot_dirty_pages_log_change(KVMSlot *mem, bool log_dirty)
291 KVMState *s = kvm_state;
292 int flags, mask = KVM_MEM_LOG_DIRTY_PAGES;
295 old_flags = mem->flags;
297 flags = (mem->flags & ~mask) | kvm_mem_flags(s, log_dirty, false);
300 /* If nothing changed effectively, no need to issue ioctl */
301 if (s->migration_log) {
302 flags |= KVM_MEM_LOG_DIRTY_PAGES;
305 if (flags == old_flags) {
309 return kvm_set_user_memory_region(s, mem);
312 static int kvm_dirty_pages_log_change(hwaddr phys_addr,
313 ram_addr_t size, bool log_dirty)
315 KVMState *s = kvm_state;
316 KVMSlot *mem = kvm_lookup_matching_slot(s, phys_addr, phys_addr + size);
319 fprintf(stderr, "BUG: %s: invalid parameters " TARGET_FMT_plx "-"
320 TARGET_FMT_plx "\n", __func__, phys_addr,
321 (hwaddr)(phys_addr + size - 1));
324 return kvm_slot_dirty_pages_log_change(mem, log_dirty);
327 static void kvm_log_start(MemoryListener *listener,
328 MemoryRegionSection *section)
332 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
333 int128_get64(section->size), true);
339 static void kvm_log_stop(MemoryListener *listener,
340 MemoryRegionSection *section)
344 r = kvm_dirty_pages_log_change(section->offset_within_address_space,
345 int128_get64(section->size), false);
351 static int kvm_set_migration_log(int enable)
353 KVMState *s = kvm_state;
357 s->migration_log = enable;
359 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
362 if (!mem->memory_size) {
365 if (!!(mem->flags & KVM_MEM_LOG_DIRTY_PAGES) == enable) {
368 err = kvm_set_user_memory_region(s, mem);
376 /* get kvm's dirty pages bitmap and update qemu's */
377 static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
378 unsigned long *bitmap)
381 unsigned long page_number, c;
383 unsigned int pages = int128_get64(section->size) / getpagesize();
384 unsigned int len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
385 unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
388 * bitmap-traveling is faster than memory-traveling (for addr...)
389 * especially when most of the memory is not dirty.
391 for (i = 0; i < len; i++) {
392 if (bitmap[i] != 0) {
393 c = leul_to_cpu(bitmap[i]);
397 page_number = (i * HOST_LONG_BITS + j) * hpratio;
398 addr1 = page_number * TARGET_PAGE_SIZE;
399 addr = section->offset_within_region + addr1;
400 memory_region_set_dirty(section->mr, addr,
401 TARGET_PAGE_SIZE * hpratio);
408 #define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
411 * kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
412 * This function updates qemu's dirty bitmap using
413 * memory_region_set_dirty(). This means all bits are set
416 * @start_add: start of logged region.
417 * @end_addr: end of logged region.
419 static int kvm_physical_sync_dirty_bitmap(MemoryRegionSection *section)
421 KVMState *s = kvm_state;
422 unsigned long size, allocated_size = 0;
426 hwaddr start_addr = section->offset_within_address_space;
427 hwaddr end_addr = start_addr + int128_get64(section->size);
429 d.dirty_bitmap = NULL;
430 while (start_addr < end_addr) {
431 mem = kvm_lookup_overlapping_slot(s, start_addr, end_addr);
436 /* XXX bad kernel interface alert
437 * For dirty bitmap, kernel allocates array of size aligned to
438 * bits-per-long. But for case when the kernel is 64bits and
439 * the userspace is 32bits, userspace can't align to the same
440 * bits-per-long, since sizeof(long) is different between kernel
441 * and user space. This way, userspace will provide buffer which
442 * may be 4 bytes less than the kernel will use, resulting in
443 * userspace memory corruption (which is not detectable by valgrind
444 * too, in most cases).
445 * So for now, let's align to 64 instead of HOST_LONG_BITS here, in
446 * a hope that sizeof(long) wont become >8 any time soon.
448 size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS),
449 /*HOST_LONG_BITS*/ 64) / 8;
450 if (!d.dirty_bitmap) {
451 d.dirty_bitmap = g_malloc(size);
452 } else if (size > allocated_size) {
453 d.dirty_bitmap = g_realloc(d.dirty_bitmap, size);
455 allocated_size = size;
456 memset(d.dirty_bitmap, 0, allocated_size);
460 if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
461 DPRINTF("ioctl failed %d\n", errno);
466 kvm_get_dirty_pages_log_range(section, d.dirty_bitmap);
467 start_addr = mem->start_addr + mem->memory_size;
469 g_free(d.dirty_bitmap);
474 static void kvm_coalesce_mmio_region(MemoryListener *listener,
475 MemoryRegionSection *secion,
476 hwaddr start, hwaddr size)
478 KVMState *s = kvm_state;
480 if (s->coalesced_mmio) {
481 struct kvm_coalesced_mmio_zone zone;
487 (void)kvm_vm_ioctl(s, KVM_REGISTER_COALESCED_MMIO, &zone);
491 static void kvm_uncoalesce_mmio_region(MemoryListener *listener,
492 MemoryRegionSection *secion,
493 hwaddr start, hwaddr size)
495 KVMState *s = kvm_state;
497 if (s->coalesced_mmio) {
498 struct kvm_coalesced_mmio_zone zone;
504 (void)kvm_vm_ioctl(s, KVM_UNREGISTER_COALESCED_MMIO, &zone);
508 int kvm_check_extension(KVMState *s, unsigned int extension)
512 ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, extension);
520 static int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val,
521 bool assign, uint32_t size, bool datamatch)
524 struct kvm_ioeventfd iofd;
526 iofd.datamatch = datamatch ? val : 0;
532 if (!kvm_enabled()) {
537 iofd.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
540 iofd.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
543 ret = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &iofd);
552 static int kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint16_t val,
553 bool assign, uint32_t size, bool datamatch)
555 struct kvm_ioeventfd kick = {
556 .datamatch = datamatch ? val : 0,
558 .flags = KVM_IOEVENTFD_FLAG_PIO,
563 if (!kvm_enabled()) {
567 kick.flags |= KVM_IOEVENTFD_FLAG_DATAMATCH;
570 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
572 r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
580 static int kvm_check_many_ioeventfds(void)
582 /* Userspace can use ioeventfd for io notification. This requires a host
583 * that supports eventfd(2) and an I/O thread; since eventfd does not
584 * support SIGIO it cannot interrupt the vcpu.
586 * Older kernels have a 6 device limit on the KVM io bus. Find out so we
587 * can avoid creating too many ioeventfds.
589 #if defined(CONFIG_EVENTFD)
592 for (i = 0; i < ARRAY_SIZE(ioeventfds); i++) {
593 ioeventfds[i] = eventfd(0, EFD_CLOEXEC);
594 if (ioeventfds[i] < 0) {
597 ret = kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, true, 2, true);
599 close(ioeventfds[i]);
604 /* Decide whether many devices are supported or not */
605 ret = i == ARRAY_SIZE(ioeventfds);
608 kvm_set_ioeventfd_pio(ioeventfds[i], 0, i, false, 2, true);
609 close(ioeventfds[i]);
617 static const KVMCapabilityInfo *
618 kvm_check_extension_list(KVMState *s, const KVMCapabilityInfo *list)
621 if (!kvm_check_extension(s, list->value)) {
629 static void kvm_set_phys_mem(MemoryRegionSection *section, bool add)
631 KVMState *s = kvm_state;
634 MemoryRegion *mr = section->mr;
635 bool log_dirty = memory_region_is_logging(mr);
636 bool writeable = !mr->readonly && !mr->rom_device;
637 bool readonly_flag = mr->readonly || memory_region_is_romd(mr);
638 hwaddr start_addr = section->offset_within_address_space;
639 ram_addr_t size = int128_get64(section->size);
643 /* kvm works in page size chunks, but the function may be called
644 with sub-page size and unaligned start address. */
645 delta = TARGET_PAGE_ALIGN(size) - size;
651 size &= TARGET_PAGE_MASK;
652 if (!size || (start_addr & ~TARGET_PAGE_MASK)) {
656 if (!memory_region_is_ram(mr)) {
657 if (writeable || !kvm_readonly_mem_allowed) {
659 } else if (!mr->romd_mode) {
660 /* If the memory device is not in romd_mode, then we actually want
661 * to remove the kvm memory slot so all accesses will trap. */
666 ram = memory_region_get_ram_ptr(mr) + section->offset_within_region + delta;
669 mem = kvm_lookup_overlapping_slot(s, start_addr, start_addr + size);
674 if (add && start_addr >= mem->start_addr &&
675 (start_addr + size <= mem->start_addr + mem->memory_size) &&
676 (ram - start_addr == mem->ram - mem->start_addr)) {
677 /* The new slot fits into the existing one and comes with
678 * identical parameters - update flags and done. */
679 kvm_slot_dirty_pages_log_change(mem, log_dirty);
685 if (mem->flags & KVM_MEM_LOG_DIRTY_PAGES) {
686 kvm_physical_sync_dirty_bitmap(section);
689 /* unregister the overlapping slot */
690 mem->memory_size = 0;
691 err = kvm_set_user_memory_region(s, mem);
693 fprintf(stderr, "%s: error unregistering overlapping slot: %s\n",
694 __func__, strerror(-err));
698 /* Workaround for older KVM versions: we can't join slots, even not by
699 * unregistering the previous ones and then registering the larger
700 * slot. We have to maintain the existing fragmentation. Sigh.
702 * This workaround assumes that the new slot starts at the same
703 * address as the first existing one. If not or if some overlapping
704 * slot comes around later, we will fail (not seen in practice so far)
705 * - and actually require a recent KVM version. */
706 if (s->broken_set_mem_region &&
707 old.start_addr == start_addr && old.memory_size < size && add) {
708 mem = kvm_alloc_slot(s);
709 mem->memory_size = old.memory_size;
710 mem->start_addr = old.start_addr;
712 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
714 err = kvm_set_user_memory_region(s, mem);
716 fprintf(stderr, "%s: error updating slot: %s\n", __func__,
721 start_addr += old.memory_size;
722 ram += old.memory_size;
723 size -= old.memory_size;
727 /* register prefix slot */
728 if (old.start_addr < start_addr) {
729 mem = kvm_alloc_slot(s);
730 mem->memory_size = start_addr - old.start_addr;
731 mem->start_addr = old.start_addr;
733 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
735 err = kvm_set_user_memory_region(s, mem);
737 fprintf(stderr, "%s: error registering prefix slot: %s\n",
738 __func__, strerror(-err));
740 fprintf(stderr, "%s: This is probably because your kernel's " \
741 "PAGE_SIZE is too big. Please try to use 4k " \
742 "PAGE_SIZE!\n", __func__);
748 /* register suffix slot */
749 if (old.start_addr + old.memory_size > start_addr + size) {
750 ram_addr_t size_delta;
752 mem = kvm_alloc_slot(s);
753 mem->start_addr = start_addr + size;
754 size_delta = mem->start_addr - old.start_addr;
755 mem->memory_size = old.memory_size - size_delta;
756 mem->ram = old.ram + size_delta;
757 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
759 err = kvm_set_user_memory_region(s, mem);
761 fprintf(stderr, "%s: error registering suffix slot: %s\n",
762 __func__, strerror(-err));
768 /* in case the KVM bug workaround already "consumed" the new slot */
775 mem = kvm_alloc_slot(s);
776 mem->memory_size = size;
777 mem->start_addr = start_addr;
779 mem->flags = kvm_mem_flags(s, log_dirty, readonly_flag);
781 err = kvm_set_user_memory_region(s, mem);
783 fprintf(stderr, "%s: error registering slot: %s\n", __func__,
789 static void kvm_region_add(MemoryListener *listener,
790 MemoryRegionSection *section)
792 memory_region_ref(section->mr);
793 kvm_set_phys_mem(section, true);
796 static void kvm_region_del(MemoryListener *listener,
797 MemoryRegionSection *section)
799 kvm_set_phys_mem(section, false);
800 memory_region_unref(section->mr);
803 static void kvm_log_sync(MemoryListener *listener,
804 MemoryRegionSection *section)
808 r = kvm_physical_sync_dirty_bitmap(section);
814 static void kvm_log_global_start(struct MemoryListener *listener)
818 r = kvm_set_migration_log(1);
822 static void kvm_log_global_stop(struct MemoryListener *listener)
826 r = kvm_set_migration_log(0);
830 static void kvm_mem_ioeventfd_add(MemoryListener *listener,
831 MemoryRegionSection *section,
832 bool match_data, uint64_t data,
835 int fd = event_notifier_get_fd(e);
838 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
839 data, true, int128_get64(section->size),
846 static void kvm_mem_ioeventfd_del(MemoryListener *listener,
847 MemoryRegionSection *section,
848 bool match_data, uint64_t data,
851 int fd = event_notifier_get_fd(e);
854 r = kvm_set_ioeventfd_mmio(fd, section->offset_within_address_space,
855 data, false, int128_get64(section->size),
862 static void kvm_io_ioeventfd_add(MemoryListener *listener,
863 MemoryRegionSection *section,
864 bool match_data, uint64_t data,
867 int fd = event_notifier_get_fd(e);
870 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
871 data, true, int128_get64(section->size),
878 static void kvm_io_ioeventfd_del(MemoryListener *listener,
879 MemoryRegionSection *section,
880 bool match_data, uint64_t data,
884 int fd = event_notifier_get_fd(e);
887 r = kvm_set_ioeventfd_pio(fd, section->offset_within_address_space,
888 data, false, int128_get64(section->size),
895 static MemoryListener kvm_memory_listener = {
896 .region_add = kvm_region_add,
897 .region_del = kvm_region_del,
898 .log_start = kvm_log_start,
899 .log_stop = kvm_log_stop,
900 .log_sync = kvm_log_sync,
901 .log_global_start = kvm_log_global_start,
902 .log_global_stop = kvm_log_global_stop,
903 .eventfd_add = kvm_mem_ioeventfd_add,
904 .eventfd_del = kvm_mem_ioeventfd_del,
905 .coalesced_mmio_add = kvm_coalesce_mmio_region,
906 .coalesced_mmio_del = kvm_uncoalesce_mmio_region,
910 static MemoryListener kvm_io_listener = {
911 .eventfd_add = kvm_io_ioeventfd_add,
912 .eventfd_del = kvm_io_ioeventfd_del,
916 static void kvm_handle_interrupt(CPUState *cpu, int mask)
918 cpu->interrupt_request |= mask;
920 if (!qemu_cpu_is_self(cpu)) {
925 int kvm_set_irq(KVMState *s, int irq, int level)
927 struct kvm_irq_level event;
930 assert(kvm_async_interrupts_enabled());
934 ret = kvm_vm_ioctl(s, s->irq_set_ioctl, &event);
936 perror("kvm_set_irq");
940 return (s->irq_set_ioctl == KVM_IRQ_LINE) ? 1 : event.status;
943 #ifdef KVM_CAP_IRQ_ROUTING
944 typedef struct KVMMSIRoute {
945 struct kvm_irq_routing_entry kroute;
946 QTAILQ_ENTRY(KVMMSIRoute) entry;
949 static void set_gsi(KVMState *s, unsigned int gsi)
951 s->used_gsi_bitmap[gsi / 32] |= 1U << (gsi % 32);
954 static void clear_gsi(KVMState *s, unsigned int gsi)
956 s->used_gsi_bitmap[gsi / 32] &= ~(1U << (gsi % 32));
959 void kvm_init_irq_routing(KVMState *s)
963 gsi_count = kvm_check_extension(s, KVM_CAP_IRQ_ROUTING);
965 unsigned int gsi_bits, i;
967 /* Round up so we can search ints using ffs */
968 gsi_bits = ALIGN(gsi_count, 32);
969 s->used_gsi_bitmap = g_malloc0(gsi_bits / 8);
970 s->gsi_count = gsi_count;
972 /* Mark any over-allocated bits as already in use */
973 for (i = gsi_count; i < gsi_bits; i++) {
978 s->irq_routes = g_malloc0(sizeof(*s->irq_routes));
979 s->nr_allocated_irq_routes = 0;
981 if (!s->direct_msi) {
982 for (i = 0; i < KVM_MSI_HASHTAB_SIZE; i++) {
983 QTAILQ_INIT(&s->msi_hashtab[i]);
987 kvm_arch_init_irq_routing(s);
990 void kvm_irqchip_commit_routes(KVMState *s)
994 s->irq_routes->flags = 0;
995 ret = kvm_vm_ioctl(s, KVM_SET_GSI_ROUTING, s->irq_routes);
999 static void kvm_add_routing_entry(KVMState *s,
1000 struct kvm_irq_routing_entry *entry)
1002 struct kvm_irq_routing_entry *new;
1005 if (s->irq_routes->nr == s->nr_allocated_irq_routes) {
1006 n = s->nr_allocated_irq_routes * 2;
1010 size = sizeof(struct kvm_irq_routing);
1011 size += n * sizeof(*new);
1012 s->irq_routes = g_realloc(s->irq_routes, size);
1013 s->nr_allocated_irq_routes = n;
1015 n = s->irq_routes->nr++;
1016 new = &s->irq_routes->entries[n];
1017 memset(new, 0, sizeof(*new));
1018 new->gsi = entry->gsi;
1019 new->type = entry->type;
1020 new->flags = entry->flags;
1023 set_gsi(s, entry->gsi);
1026 static int kvm_update_routing_entry(KVMState *s,
1027 struct kvm_irq_routing_entry *new_entry)
1029 struct kvm_irq_routing_entry *entry;
1032 for (n = 0; n < s->irq_routes->nr; n++) {
1033 entry = &s->irq_routes->entries[n];
1034 if (entry->gsi != new_entry->gsi) {
1038 entry->type = new_entry->type;
1039 entry->flags = new_entry->flags;
1040 entry->u = new_entry->u;
1042 kvm_irqchip_commit_routes(s);
1050 void kvm_irqchip_add_irq_route(KVMState *s, int irq, int irqchip, int pin)
1052 struct kvm_irq_routing_entry e;
1054 assert(pin < s->gsi_count);
1057 e.type = KVM_IRQ_ROUTING_IRQCHIP;
1059 e.u.irqchip.irqchip = irqchip;
1060 e.u.irqchip.pin = pin;
1061 kvm_add_routing_entry(s, &e);
1064 void kvm_irqchip_release_virq(KVMState *s, int virq)
1066 struct kvm_irq_routing_entry *e;
1069 for (i = 0; i < s->irq_routes->nr; i++) {
1070 e = &s->irq_routes->entries[i];
1071 if (e->gsi == virq) {
1072 s->irq_routes->nr--;
1073 *e = s->irq_routes->entries[s->irq_routes->nr];
1079 static unsigned int kvm_hash_msi(uint32_t data)
1081 /* This is optimized for IA32 MSI layout. However, no other arch shall
1082 * repeat the mistake of not providing a direct MSI injection API. */
1086 static void kvm_flush_dynamic_msi_routes(KVMState *s)
1088 KVMMSIRoute *route, *next;
1091 for (hash = 0; hash < KVM_MSI_HASHTAB_SIZE; hash++) {
1092 QTAILQ_FOREACH_SAFE(route, &s->msi_hashtab[hash], entry, next) {
1093 kvm_irqchip_release_virq(s, route->kroute.gsi);
1094 QTAILQ_REMOVE(&s->msi_hashtab[hash], route, entry);
1100 static int kvm_irqchip_get_virq(KVMState *s)
1102 uint32_t *word = s->used_gsi_bitmap;
1103 int max_words = ALIGN(s->gsi_count, 32) / 32;
1108 /* Return the lowest unused GSI in the bitmap */
1109 for (i = 0; i < max_words; i++) {
1110 bit = ffs(~word[i]);
1115 return bit - 1 + i * 32;
1117 if (!s->direct_msi && retry) {
1119 kvm_flush_dynamic_msi_routes(s);
1126 static KVMMSIRoute *kvm_lookup_msi_route(KVMState *s, MSIMessage msg)
1128 unsigned int hash = kvm_hash_msi(msg.data);
1131 QTAILQ_FOREACH(route, &s->msi_hashtab[hash], entry) {
1132 if (route->kroute.u.msi.address_lo == (uint32_t)msg.address &&
1133 route->kroute.u.msi.address_hi == (msg.address >> 32) &&
1134 route->kroute.u.msi.data == le32_to_cpu(msg.data)) {
1141 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1146 if (s->direct_msi) {
1147 msi.address_lo = (uint32_t)msg.address;
1148 msi.address_hi = msg.address >> 32;
1149 msi.data = le32_to_cpu(msg.data);
1151 memset(msi.pad, 0, sizeof(msi.pad));
1153 return kvm_vm_ioctl(s, KVM_SIGNAL_MSI, &msi);
1156 route = kvm_lookup_msi_route(s, msg);
1160 virq = kvm_irqchip_get_virq(s);
1165 route = g_malloc(sizeof(KVMMSIRoute));
1166 route->kroute.gsi = virq;
1167 route->kroute.type = KVM_IRQ_ROUTING_MSI;
1168 route->kroute.flags = 0;
1169 route->kroute.u.msi.address_lo = (uint32_t)msg.address;
1170 route->kroute.u.msi.address_hi = msg.address >> 32;
1171 route->kroute.u.msi.data = le32_to_cpu(msg.data);
1173 kvm_add_routing_entry(s, &route->kroute);
1174 kvm_irqchip_commit_routes(s);
1176 QTAILQ_INSERT_TAIL(&s->msi_hashtab[kvm_hash_msi(msg.data)], route,
1180 assert(route->kroute.type == KVM_IRQ_ROUTING_MSI);
1182 return kvm_set_irq(s, route->kroute.gsi, 1);
1185 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1187 struct kvm_irq_routing_entry kroute;
1190 if (!kvm_gsi_routing_enabled()) {
1194 virq = kvm_irqchip_get_virq(s);
1200 kroute.type = KVM_IRQ_ROUTING_MSI;
1202 kroute.u.msi.address_lo = (uint32_t)msg.address;
1203 kroute.u.msi.address_hi = msg.address >> 32;
1204 kroute.u.msi.data = le32_to_cpu(msg.data);
1206 kvm_add_routing_entry(s, &kroute);
1207 kvm_irqchip_commit_routes(s);
1212 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1214 struct kvm_irq_routing_entry kroute;
1216 if (!kvm_irqchip_in_kernel()) {
1221 kroute.type = KVM_IRQ_ROUTING_MSI;
1223 kroute.u.msi.address_lo = (uint32_t)msg.address;
1224 kroute.u.msi.address_hi = msg.address >> 32;
1225 kroute.u.msi.data = le32_to_cpu(msg.data);
1227 return kvm_update_routing_entry(s, &kroute);
1230 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1232 struct kvm_irqfd irqfd = {
1235 .flags = assign ? 0 : KVM_IRQFD_FLAG_DEASSIGN,
1238 if (!kvm_irqfds_enabled()) {
1242 return kvm_vm_ioctl(s, KVM_IRQFD, &irqfd);
1245 #else /* !KVM_CAP_IRQ_ROUTING */
1247 void kvm_init_irq_routing(KVMState *s)
1251 void kvm_irqchip_release_virq(KVMState *s, int virq)
1255 int kvm_irqchip_send_msi(KVMState *s, MSIMessage msg)
1260 int kvm_irqchip_add_msi_route(KVMState *s, MSIMessage msg)
1265 static int kvm_irqchip_assign_irqfd(KVMState *s, int fd, int virq, bool assign)
1270 int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg)
1274 #endif /* !KVM_CAP_IRQ_ROUTING */
1276 int kvm_irqchip_add_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
1278 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, true);
1281 int kvm_irqchip_remove_irqfd_notifier(KVMState *s, EventNotifier *n, int virq)
1283 return kvm_irqchip_assign_irqfd(s, event_notifier_get_fd(n), virq, false);
1286 static int kvm_irqchip_create(KVMState *s)
1288 QemuOptsList *list = qemu_find_opts("machine");
1291 if (QTAILQ_EMPTY(&list->head) ||
1292 !qemu_opt_get_bool(QTAILQ_FIRST(&list->head),
1293 "kernel_irqchip", true) ||
1294 !kvm_check_extension(s, KVM_CAP_IRQCHIP)) {
1298 ret = kvm_vm_ioctl(s, KVM_CREATE_IRQCHIP);
1300 fprintf(stderr, "Create kernel irqchip failed\n");
1304 kvm_kernel_irqchip = true;
1305 /* If we have an in-kernel IRQ chip then we must have asynchronous
1306 * interrupt delivery (though the reverse is not necessarily true)
1308 kvm_async_interrupts_allowed = true;
1309 kvm_halt_in_kernel_allowed = true;
1311 kvm_init_irq_routing(s);
1316 static int kvm_max_vcpus(KVMState *s)
1320 /* Find number of supported CPUs using the recommended
1321 * procedure from the kernel API documentation to cope with
1322 * older kernels that may be missing capabilities.
1324 ret = kvm_check_extension(s, KVM_CAP_MAX_VCPUS);
1328 ret = kvm_check_extension(s, KVM_CAP_NR_VCPUS);
1338 static const char upgrade_note[] =
1339 "Please upgrade to at least kernel 2.6.29 or recent kvm-kmod\n"
1340 "(see http://sourceforge.net/projects/kvm).\n";
1342 const KVMCapabilityInfo *missing_cap;
1347 s = g_malloc0(sizeof(KVMState));
1350 * On systems where the kernel can support different base page
1351 * sizes, host page size may be different from TARGET_PAGE_SIZE,
1352 * even with KVM. TARGET_PAGE_SIZE is assumed to be the minimum
1353 * page size for the system though.
1355 assert(TARGET_PAGE_SIZE <= getpagesize());
1357 #ifdef KVM_CAP_SET_GUEST_DEBUG
1358 QTAILQ_INIT(&s->kvm_sw_breakpoints);
1360 for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
1361 s->slots[i].slot = i;
1364 s->fd = qemu_open("/dev/kvm", O_RDWR);
1366 fprintf(stderr, "Could not access KVM kernel module: %m\n");
1371 ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
1372 if (ret < KVM_API_VERSION) {
1376 fprintf(stderr, "kvm version too old\n");
1380 if (ret > KVM_API_VERSION) {
1382 fprintf(stderr, "kvm version not supported\n");
1386 max_vcpus = kvm_max_vcpus(s);
1387 if (smp_cpus > max_vcpus) {
1389 fprintf(stderr, "Number of SMP cpus requested (%d) exceeds max cpus "
1390 "supported by KVM (%d)\n", smp_cpus, max_vcpus);
1394 s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
1397 fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
1398 "your host kernel command line\n");
1404 missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
1407 kvm_check_extension_list(s, kvm_arch_required_capabilities);
1411 fprintf(stderr, "kvm does not support %s\n%s",
1412 missing_cap->name, upgrade_note);
1416 s->coalesced_mmio = kvm_check_extension(s, KVM_CAP_COALESCED_MMIO);
1418 s->broken_set_mem_region = 1;
1419 ret = kvm_check_extension(s, KVM_CAP_JOIN_MEMORY_REGIONS_WORKS);
1421 s->broken_set_mem_region = 0;
1424 #ifdef KVM_CAP_VCPU_EVENTS
1425 s->vcpu_events = kvm_check_extension(s, KVM_CAP_VCPU_EVENTS);
1428 s->robust_singlestep =
1429 kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
1431 #ifdef KVM_CAP_DEBUGREGS
1432 s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
1435 #ifdef KVM_CAP_XSAVE
1436 s->xsave = kvm_check_extension(s, KVM_CAP_XSAVE);
1440 s->xcrs = kvm_check_extension(s, KVM_CAP_XCRS);
1443 #ifdef KVM_CAP_PIT_STATE2
1444 s->pit_state2 = kvm_check_extension(s, KVM_CAP_PIT_STATE2);
1447 #ifdef KVM_CAP_IRQ_ROUTING
1448 s->direct_msi = (kvm_check_extension(s, KVM_CAP_SIGNAL_MSI) > 0);
1451 s->intx_set_mask = kvm_check_extension(s, KVM_CAP_PCI_2_3);
1453 s->irq_set_ioctl = KVM_IRQ_LINE;
1454 if (kvm_check_extension(s, KVM_CAP_IRQ_INJECT_STATUS)) {
1455 s->irq_set_ioctl = KVM_IRQ_LINE_STATUS;
1458 #ifdef KVM_CAP_READONLY_MEM
1459 kvm_readonly_mem_allowed =
1460 (kvm_check_extension(s, KVM_CAP_READONLY_MEM) > 0);
1463 ret = kvm_arch_init(s);
1468 ret = kvm_irqchip_create(s);
1474 memory_listener_register(&kvm_memory_listener, &address_space_memory);
1475 memory_listener_register(&kvm_io_listener, &address_space_io);
1477 s->many_ioeventfds = kvm_check_many_ioeventfds();
1479 cpu_interrupt_handler = kvm_handle_interrupt;
1495 static void kvm_handle_io(uint16_t port, void *data, int direction, int size,
1499 uint8_t *ptr = data;
1501 for (i = 0; i < count; i++) {
1502 if (direction == KVM_EXIT_IO_IN) {
1505 stb_p(ptr, cpu_inb(port));
1508 stw_p(ptr, cpu_inw(port));
1511 stl_p(ptr, cpu_inl(port));
1517 cpu_outb(port, ldub_p(ptr));
1520 cpu_outw(port, lduw_p(ptr));
1523 cpu_outl(port, ldl_p(ptr));
1532 static int kvm_handle_internal_error(CPUState *cpu, struct kvm_run *run)
1534 fprintf(stderr, "KVM internal error.");
1535 if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
1538 fprintf(stderr, " Suberror: %d\n", run->internal.suberror);
1539 for (i = 0; i < run->internal.ndata; ++i) {
1540 fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
1541 i, (uint64_t)run->internal.data[i]);
1544 fprintf(stderr, "\n");
1546 if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
1547 fprintf(stderr, "emulation failure\n");
1548 if (!kvm_arch_stop_on_emulation_error(cpu)) {
1549 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1550 return EXCP_INTERRUPT;
1553 /* FIXME: Should trigger a qmp message to let management know
1554 * something went wrong.
1559 void kvm_flush_coalesced_mmio_buffer(void)
1561 KVMState *s = kvm_state;
1563 if (s->coalesced_flush_in_progress) {
1567 s->coalesced_flush_in_progress = true;
1569 if (s->coalesced_mmio_ring) {
1570 struct kvm_coalesced_mmio_ring *ring = s->coalesced_mmio_ring;
1571 while (ring->first != ring->last) {
1572 struct kvm_coalesced_mmio *ent;
1574 ent = &ring->coalesced_mmio[ring->first];
1576 cpu_physical_memory_write(ent->phys_addr, ent->data, ent->len);
1578 ring->first = (ring->first + 1) % KVM_COALESCED_MMIO_MAX;
1582 s->coalesced_flush_in_progress = false;
1585 static void do_kvm_cpu_synchronize_state(void *arg)
1587 CPUState *cpu = arg;
1589 if (!cpu->kvm_vcpu_dirty) {
1590 kvm_arch_get_registers(cpu);
1591 cpu->kvm_vcpu_dirty = true;
1595 void kvm_cpu_synchronize_state(CPUState *cpu)
1597 if (!cpu->kvm_vcpu_dirty) {
1598 run_on_cpu(cpu, do_kvm_cpu_synchronize_state, cpu);
1602 void kvm_cpu_synchronize_post_reset(CPUState *cpu)
1604 kvm_arch_put_registers(cpu, KVM_PUT_RESET_STATE);
1605 cpu->kvm_vcpu_dirty = false;
1608 void kvm_cpu_synchronize_post_init(CPUState *cpu)
1610 kvm_arch_put_registers(cpu, KVM_PUT_FULL_STATE);
1611 cpu->kvm_vcpu_dirty = false;
1614 int kvm_cpu_exec(CPUState *cpu)
1616 struct kvm_run *run = cpu->kvm_run;
1619 DPRINTF("kvm_cpu_exec()\n");
1621 if (kvm_arch_process_async_events(cpu)) {
1622 cpu->exit_request = 0;
1627 if (cpu->kvm_vcpu_dirty) {
1628 kvm_arch_put_registers(cpu, KVM_PUT_RUNTIME_STATE);
1629 cpu->kvm_vcpu_dirty = false;
1632 kvm_arch_pre_run(cpu, run);
1633 if (cpu->exit_request) {
1634 DPRINTF("interrupt exit requested\n");
1636 * KVM requires us to reenter the kernel after IO exits to complete
1637 * instruction emulation. This self-signal will ensure that we
1640 qemu_cpu_kick_self();
1642 qemu_mutex_unlock_iothread();
1644 run_ret = kvm_vcpu_ioctl(cpu, KVM_RUN, 0);
1646 qemu_mutex_lock_iothread();
1647 kvm_arch_post_run(cpu, run);
1650 if (run_ret == -EINTR || run_ret == -EAGAIN) {
1651 DPRINTF("io window exit\n");
1652 ret = EXCP_INTERRUPT;
1655 fprintf(stderr, "error: kvm run failed %s\n",
1656 strerror(-run_ret));
1660 trace_kvm_run_exit(cpu->cpu_index, run->exit_reason);
1661 switch (run->exit_reason) {
1663 DPRINTF("handle_io\n");
1664 kvm_handle_io(run->io.port,
1665 (uint8_t *)run + run->io.data_offset,
1672 DPRINTF("handle_mmio\n");
1673 cpu_physical_memory_rw(run->mmio.phys_addr,
1676 run->mmio.is_write);
1679 case KVM_EXIT_IRQ_WINDOW_OPEN:
1680 DPRINTF("irq_window_open\n");
1681 ret = EXCP_INTERRUPT;
1683 case KVM_EXIT_SHUTDOWN:
1684 DPRINTF("shutdown\n");
1685 qemu_system_reset_request();
1686 ret = EXCP_INTERRUPT;
1688 case KVM_EXIT_UNKNOWN:
1689 fprintf(stderr, "KVM: unknown exit, hardware reason %" PRIx64 "\n",
1690 (uint64_t)run->hw.hardware_exit_reason);
1693 case KVM_EXIT_INTERNAL_ERROR:
1694 ret = kvm_handle_internal_error(cpu, run);
1697 DPRINTF("kvm_arch_handle_exit\n");
1698 ret = kvm_arch_handle_exit(cpu, run);
1704 cpu_dump_state(cpu, stderr, fprintf, CPU_DUMP_CODE);
1705 vm_stop(RUN_STATE_INTERNAL_ERROR);
1708 cpu->exit_request = 0;
1712 int kvm_ioctl(KVMState *s, int type, ...)
1719 arg = va_arg(ap, void *);
1722 trace_kvm_ioctl(type, arg);
1723 ret = ioctl(s->fd, type, arg);
1730 int kvm_vm_ioctl(KVMState *s, int type, ...)
1737 arg = va_arg(ap, void *);
1740 trace_kvm_vm_ioctl(type, arg);
1741 ret = ioctl(s->vmfd, type, arg);
1748 int kvm_vcpu_ioctl(CPUState *cpu, int type, ...)
1755 arg = va_arg(ap, void *);
1758 trace_kvm_vcpu_ioctl(cpu->cpu_index, type, arg);
1759 ret = ioctl(cpu->kvm_fd, type, arg);
1766 int kvm_has_sync_mmu(void)
1768 return kvm_check_extension(kvm_state, KVM_CAP_SYNC_MMU);
1771 int kvm_has_vcpu_events(void)
1773 return kvm_state->vcpu_events;
1776 int kvm_has_robust_singlestep(void)
1778 return kvm_state->robust_singlestep;
1781 int kvm_has_debugregs(void)
1783 return kvm_state->debugregs;
1786 int kvm_has_xsave(void)
1788 return kvm_state->xsave;
1791 int kvm_has_xcrs(void)
1793 return kvm_state->xcrs;
1796 int kvm_has_pit_state2(void)
1798 return kvm_state->pit_state2;
1801 int kvm_has_many_ioeventfds(void)
1803 if (!kvm_enabled()) {
1806 return kvm_state->many_ioeventfds;
1809 int kvm_has_gsi_routing(void)
1811 #ifdef KVM_CAP_IRQ_ROUTING
1812 return kvm_check_extension(kvm_state, KVM_CAP_IRQ_ROUTING);
1818 int kvm_has_intx_set_mask(void)
1820 return kvm_state->intx_set_mask;
1823 void *kvm_ram_alloc(ram_addr_t size)
1828 mem = kvm_arch_ram_alloc(size);
1833 return qemu_anon_ram_alloc(size);
1836 void kvm_setup_guest_memory(void *start, size_t size)
1838 #ifdef CONFIG_VALGRIND_H
1839 VALGRIND_MAKE_MEM_DEFINED(start, size);
1841 if (!kvm_has_sync_mmu()) {
1842 int ret = qemu_madvise(start, size, QEMU_MADV_DONTFORK);
1845 perror("qemu_madvise");
1847 "Need MADV_DONTFORK in absence of synchronous KVM MMU\n");
1853 #ifdef KVM_CAP_SET_GUEST_DEBUG
1854 struct kvm_sw_breakpoint *kvm_find_sw_breakpoint(CPUState *cpu,
1857 struct kvm_sw_breakpoint *bp;
1859 QTAILQ_FOREACH(bp, &cpu->kvm_state->kvm_sw_breakpoints, entry) {
1867 int kvm_sw_breakpoints_active(CPUState *cpu)
1869 return !QTAILQ_EMPTY(&cpu->kvm_state->kvm_sw_breakpoints);
1872 struct kvm_set_guest_debug_data {
1873 struct kvm_guest_debug dbg;
1878 static void kvm_invoke_set_guest_debug(void *data)
1880 struct kvm_set_guest_debug_data *dbg_data = data;
1882 dbg_data->err = kvm_vcpu_ioctl(dbg_data->cpu, KVM_SET_GUEST_DEBUG,
1886 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
1888 CPUState *cpu = ENV_GET_CPU(env);
1889 struct kvm_set_guest_debug_data data;
1891 data.dbg.control = reinject_trap;
1893 if (env->singlestep_enabled) {
1894 data.dbg.control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_SINGLESTEP;
1896 kvm_arch_update_guest_debug(cpu, &data.dbg);
1899 run_on_cpu(cpu, kvm_invoke_set_guest_debug, &data);
1903 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
1904 target_ulong len, int type)
1906 CPUState *current_cpu = ENV_GET_CPU(current_env);
1907 struct kvm_sw_breakpoint *bp;
1911 if (type == GDB_BREAKPOINT_SW) {
1912 bp = kvm_find_sw_breakpoint(current_cpu, addr);
1918 bp = g_malloc(sizeof(struct kvm_sw_breakpoint));
1925 err = kvm_arch_insert_sw_breakpoint(current_cpu, bp);
1931 QTAILQ_INSERT_HEAD(¤t_cpu->kvm_state->kvm_sw_breakpoints,
1934 err = kvm_arch_insert_hw_breakpoint(addr, len, type);
1940 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1941 err = kvm_update_guest_debug(env, 0);
1949 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
1950 target_ulong len, int type)
1952 CPUState *current_cpu = ENV_GET_CPU(current_env);
1953 struct kvm_sw_breakpoint *bp;
1957 if (type == GDB_BREAKPOINT_SW) {
1958 bp = kvm_find_sw_breakpoint(current_cpu, addr);
1963 if (bp->use_count > 1) {
1968 err = kvm_arch_remove_sw_breakpoint(current_cpu, bp);
1973 QTAILQ_REMOVE(¤t_cpu->kvm_state->kvm_sw_breakpoints, bp, entry);
1976 err = kvm_arch_remove_hw_breakpoint(addr, len, type);
1982 for (env = first_cpu; env != NULL; env = env->next_cpu) {
1983 err = kvm_update_guest_debug(env, 0);
1991 void kvm_remove_all_breakpoints(CPUArchState *current_env)
1993 CPUState *current_cpu = ENV_GET_CPU(current_env);
1994 struct kvm_sw_breakpoint *bp, *next;
1995 KVMState *s = current_cpu->kvm_state;
1999 QTAILQ_FOREACH_SAFE(bp, &s->kvm_sw_breakpoints, entry, next) {
2000 if (kvm_arch_remove_sw_breakpoint(current_cpu, bp) != 0) {
2001 /* Try harder to find a CPU that currently sees the breakpoint. */
2002 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2003 cpu = ENV_GET_CPU(env);
2004 if (kvm_arch_remove_sw_breakpoint(cpu, bp) == 0) {
2009 QTAILQ_REMOVE(&s->kvm_sw_breakpoints, bp, entry);
2012 kvm_arch_remove_all_hw_breakpoints();
2014 for (env = first_cpu; env != NULL; env = env->next_cpu) {
2015 kvm_update_guest_debug(env, 0);
2019 #else /* !KVM_CAP_SET_GUEST_DEBUG */
2021 int kvm_update_guest_debug(CPUArchState *env, unsigned long reinject_trap)
2026 int kvm_insert_breakpoint(CPUArchState *current_env, target_ulong addr,
2027 target_ulong len, int type)
2032 int kvm_remove_breakpoint(CPUArchState *current_env, target_ulong addr,
2033 target_ulong len, int type)
2038 void kvm_remove_all_breakpoints(CPUArchState *current_env)
2041 #endif /* !KVM_CAP_SET_GUEST_DEBUG */
2043 int kvm_set_signal_mask(CPUState *cpu, const sigset_t *sigset)
2045 struct kvm_signal_mask *sigmask;
2049 return kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, NULL);
2052 sigmask = g_malloc(sizeof(*sigmask) + sizeof(*sigset));
2055 memcpy(sigmask->sigset, sigset, sizeof(*sigset));
2056 r = kvm_vcpu_ioctl(cpu, KVM_SET_SIGNAL_MASK, sigmask);
2061 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
2063 return kvm_arch_on_sigbus_vcpu(cpu, code, addr);
2066 int kvm_on_sigbus(int code, void *addr)
2068 return kvm_arch_on_sigbus(code, addr);