#include "hw/hw.h"
#include "gdbstub.h"
#include "kvm.h"
+#include "bswap.h"
/* KVM uses PAGE_SIZE in it's definition of COALESCED_MMIO_MAX */
#define PAGE_SIZE TARGET_PAGE_SIZE
//#define DEBUG_KVM
#ifdef DEBUG_KVM
-#define dprintf(fmt, ...) \
+#define DPRINTF(fmt, ...) \
do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
#else
-#define dprintf(fmt, ...) \
+#define DPRINTF(fmt, ...) \
do { } while (0)
#endif
typedef struct kvm_dirty_log KVMDirtyLog;
-int kvm_allowed = 0;
-
struct KVMState
{
KVMSlot slots[32];
int migration_log;
int vcpu_events;
int robust_singlestep;
+ int debugregs;
#ifdef KVM_CAP_SET_GUEST_DEBUG
struct kvm_sw_breakpoint_head kvm_sw_breakpoints;
#endif
CPUState *env = opaque;
kvm_arch_reset_vcpu(env);
- if (kvm_arch_put_registers(env)) {
- fprintf(stderr, "Fatal: kvm vcpu reset failed\n");
- abort();
- }
}
int kvm_irqchip_in_kernel(void)
long mmap_size;
int ret;
- dprintf("kvm_init_vcpu\n");
+ DPRINTF("kvm_init_vcpu\n");
ret = kvm_vm_ioctl(s, KVM_CREATE_VCPU, env->cpu_index);
if (ret < 0) {
- dprintf("kvm_create_vcpu failed\n");
+ DPRINTF("kvm_create_vcpu failed\n");
goto err;
}
mmap_size = kvm_ioctl(s, KVM_GET_VCPU_MMAP_SIZE, 0);
if (mmap_size < 0) {
- dprintf("KVM_GET_VCPU_MMAP_SIZE failed\n");
+ DPRINTF("KVM_GET_VCPU_MMAP_SIZE failed\n");
goto err;
}
env->kvm_fd, 0);
if (env->kvm_run == MAP_FAILED) {
ret = -errno;
- dprintf("mmap'ing vcpu state failed\n");
+ DPRINTF("mmap'ing vcpu state failed\n");
goto err;
}
if (ret == 0) {
qemu_register_reset(kvm_reset_vcpu, env);
kvm_arch_reset_vcpu(env);
- ret = kvm_arch_put_registers(env);
}
err:
return ret;
return 0;
}
-static int test_le_bit(unsigned long nr, unsigned char *addr)
+/* get kvm's dirty pages bitmap and update qemu's */
+static int kvm_get_dirty_pages_log_range(unsigned long start_addr,
+ unsigned long *bitmap,
+ unsigned long offset,
+ unsigned long mem_size)
{
- return (addr[nr >> 3] >> (nr & 7)) & 1;
+ unsigned int i, j;
+ unsigned long page_number, addr, addr1, c;
+ ram_addr_t ram_addr;
+ unsigned int len = ((mem_size / TARGET_PAGE_SIZE) + HOST_LONG_BITS - 1) /
+ HOST_LONG_BITS;
+
+ /*
+ * bitmap-traveling is faster than memory-traveling (for addr...)
+ * especially when most of the memory is not dirty.
+ */
+ for (i = 0; i < len; i++) {
+ if (bitmap[i] != 0) {
+ c = leul_to_cpu(bitmap[i]);
+ do {
+ j = ffsl(c) - 1;
+ c &= ~(1ul << j);
+ page_number = i * HOST_LONG_BITS + j;
+ addr1 = page_number * TARGET_PAGE_SIZE;
+ addr = offset + addr1;
+ ram_addr = cpu_get_physical_page_desc(addr);
+ cpu_physical_memory_set_dirty(ram_addr);
+ } while (c != 0);
+ }
+ }
+ return 0;
}
+#define ALIGN(x, y) (((x)+(y)-1) & ~((y)-1))
+
/**
* kvm_physical_sync_dirty_bitmap - Grab dirty bitmap from kernel space
* This function updates qemu's dirty bitmap using cpu_physical_memory_set_dirty().
{
KVMState *s = kvm_state;
unsigned long size, allocated_size = 0;
- target_phys_addr_t phys_addr;
- ram_addr_t addr;
KVMDirtyLog d;
KVMSlot *mem;
int ret = 0;
break;
}
- size = ((mem->memory_size >> TARGET_PAGE_BITS) + 7) / 8;
+ size = ALIGN(((mem->memory_size) >> TARGET_PAGE_BITS), HOST_LONG_BITS) / 8;
if (!d.dirty_bitmap) {
d.dirty_bitmap = qemu_malloc(size);
} else if (size > allocated_size) {
d.slot = mem->slot;
if (kvm_vm_ioctl(s, KVM_GET_DIRTY_LOG, &d) == -1) {
- dprintf("ioctl failed %d\n", errno);
+ DPRINTF("ioctl failed %d\n", errno);
ret = -1;
break;
}
- for (phys_addr = mem->start_addr, addr = mem->phys_offset;
- phys_addr < mem->start_addr + mem->memory_size;
- phys_addr += TARGET_PAGE_SIZE, addr += TARGET_PAGE_SIZE) {
- unsigned char *bitmap = (unsigned char *)d.dirty_bitmap;
- unsigned nr = (phys_addr - mem->start_addr) >> TARGET_PAGE_BITS;
-
- if (test_le_bit(nr, bitmap)) {
- cpu_physical_memory_set_dirty(addr);
- }
- }
- start_addr = phys_addr;
+ kvm_get_dirty_pages_log_range(mem->start_addr, d.dirty_bitmap,
+ mem->start_addr, mem->memory_size);
+ start_addr = mem->start_addr + mem->memory_size;
}
qemu_free(d.dirty_bitmap);
}
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
- if (s->vmfd < 0)
+ if (s->vmfd < 0) {
+#ifdef TARGET_S390X
+ fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
+ "your host kernel command line\n");
+#endif
goto err;
+ }
/* initially, KVM allocated its own memory and we had to jump through
* hooks to make phys_ram_base point to this. Modern versions of KVM
kvm_check_extension(s, KVM_CAP_X86_ROBUST_SINGLESTEP);
#endif
+ s->debugregs = 0;
+#ifdef KVM_CAP_DEBUGREGS
+ s->debugregs = kvm_check_extension(s, KVM_CAP_DEBUGREGS);
+#endif
+
ret = kvm_arch_init(s, smp_cpus);
if (ret < 0)
goto err;
return 1;
}
+#ifdef KVM_CAP_INTERNAL_ERROR_DATA
+static void kvm_handle_internal_error(CPUState *env, struct kvm_run *run)
+{
+
+ if (kvm_check_extension(kvm_state, KVM_CAP_INTERNAL_ERROR_DATA)) {
+ int i;
+
+ fprintf(stderr, "KVM internal error. Suberror: %d\n",
+ run->internal.suberror);
+
+ for (i = 0; i < run->internal.ndata; ++i) {
+ fprintf(stderr, "extra data[%d]: %"PRIx64"\n",
+ i, (uint64_t)run->internal.data[i]);
+ }
+ }
+ cpu_dump_state(env, stderr, fprintf, 0);
+ if (run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION) {
+ fprintf(stderr, "emulation failure\n");
+ }
+ /* FIXME: Should trigger a qmp message to let management know
+ * something went wrong.
+ */
+ vm_stop(0);
+}
+#endif
+
void kvm_flush_coalesced_mmio_buffer(void)
{
#ifdef KVM_CAP_COALESCED_MMIO
}
}
+void kvm_cpu_synchronize_post_reset(CPUState *env)
+{
+ kvm_arch_put_registers(env, KVM_PUT_RESET_STATE);
+ env->kvm_vcpu_dirty = 0;
+}
+
+void kvm_cpu_synchronize_post_init(CPUState *env)
+{
+ kvm_arch_put_registers(env, KVM_PUT_FULL_STATE);
+ env->kvm_vcpu_dirty = 0;
+}
+
int kvm_cpu_exec(CPUState *env)
{
struct kvm_run *run = env->kvm_run;
int ret;
- dprintf("kvm_cpu_exec()\n");
+ DPRINTF("kvm_cpu_exec()\n");
do {
#ifndef CONFIG_IOTHREAD
if (env->exit_request) {
- dprintf("interrupt exit requested\n");
+ DPRINTF("interrupt exit requested\n");
ret = 0;
break;
}
#endif
if (env->kvm_vcpu_dirty) {
- kvm_arch_put_registers(env);
+ kvm_arch_put_registers(env, KVM_PUT_RUNTIME_STATE);
env->kvm_vcpu_dirty = 0;
}
if (ret == -EINTR || ret == -EAGAIN) {
cpu_exit(env);
- dprintf("io window exit\n");
+ DPRINTF("io window exit\n");
ret = 0;
break;
}
if (ret < 0) {
- dprintf("kvm run failed %s\n", strerror(-ret));
+ DPRINTF("kvm run failed %s\n", strerror(-ret));
abort();
}
ret = 0; /* exit loop */
switch (run->exit_reason) {
case KVM_EXIT_IO:
- dprintf("handle_io\n");
+ DPRINTF("handle_io\n");
ret = kvm_handle_io(run->io.port,
(uint8_t *)run + run->io.data_offset,
run->io.direction,
run->io.count);
break;
case KVM_EXIT_MMIO:
- dprintf("handle_mmio\n");
+ DPRINTF("handle_mmio\n");
cpu_physical_memory_rw(run->mmio.phys_addr,
run->mmio.data,
run->mmio.len,
ret = 1;
break;
case KVM_EXIT_IRQ_WINDOW_OPEN:
- dprintf("irq_window_open\n");
+ DPRINTF("irq_window_open\n");
break;
case KVM_EXIT_SHUTDOWN:
- dprintf("shutdown\n");
+ DPRINTF("shutdown\n");
qemu_system_reset_request();
ret = 1;
break;
case KVM_EXIT_UNKNOWN:
- dprintf("kvm_exit_unknown\n");
+ DPRINTF("kvm_exit_unknown\n");
break;
case KVM_EXIT_FAIL_ENTRY:
- dprintf("kvm_exit_fail_entry\n");
+ DPRINTF("kvm_exit_fail_entry\n");
break;
case KVM_EXIT_EXCEPTION:
- dprintf("kvm_exit_exception\n");
+ DPRINTF("kvm_exit_exception\n");
+ break;
+#ifdef KVM_CAP_INTERNAL_ERROR_DATA
+ case KVM_EXIT_INTERNAL_ERROR:
+ kvm_handle_internal_error(env, run);
break;
+#endif
case KVM_EXIT_DEBUG:
- dprintf("kvm_exit_debug\n");
+ DPRINTF("kvm_exit_debug\n");
#ifdef KVM_CAP_SET_GUEST_DEBUG
if (kvm_arch_debug(&run->debug.arch)) {
gdb_set_stop_cpu(env);
#endif /* KVM_CAP_SET_GUEST_DEBUG */
break;
default:
- dprintf("kvm_arch_handle_exit\n");
+ DPRINTF("kvm_arch_handle_exit\n");
ret = kvm_arch_handle_exit(env, run);
break;
}
return kvm_state->robust_singlestep;
}
+int kvm_has_debugregs(void)
+{
+ return kvm_state->debugregs;
+}
+
void kvm_setup_guest_memory(void *start, size_t size)
{
if (!kvm_has_sync_mmu()) {
return r;
}
+
+int kvm_set_ioeventfd_pio_word(int fd, uint16_t addr, uint16_t val, bool assign)
+{
+#ifdef KVM_IOEVENTFD
+ struct kvm_ioeventfd kick = {
+ .datamatch = val,
+ .addr = addr,
+ .len = 2,
+ .flags = KVM_IOEVENTFD_FLAG_DATAMATCH | KVM_IOEVENTFD_FLAG_PIO,
+ .fd = fd,
+ };
+ int r;
+ if (!kvm_enabled())
+ return -ENOSYS;
+ if (!assign)
+ kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
+ r = kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
+ if (r < 0)
+ return r;
+ return 0;
+#else
+ return -ENOSYS;
+#endif
+}