#include "sysemu/kvm.h"
#include "qemu/bswap.h"
#include "exec/memory.h"
+#include "exec/ram_addr.h"
#include "exec/address-spaces.h"
#include "qemu/event_notifier.h"
#include "trace.h"
struct KVMState
{
- KVMSlot slots[32];
+ KVMSlot *slots;
+ int nr_slots;
int fd;
int vmfd;
int coalesced_mmio;
{
int i;
- for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
+ for (i = 0; i < s->nr_slots; i++) {
if (s->slots[i].memory_size == 0) {
return &s->slots[i];
}
{
int i;
- for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
+ for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &s->slots[i];
if (start_addr == mem->start_addr &&
KVMSlot *found = NULL;
int i;
- for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
+ for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &s->slots[i];
if (mem->memory_size == 0 ||
{
int i;
- for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
+ for (i = 0; i < s->nr_slots; i++) {
KVMSlot *mem = &s->slots[i];
if (ram >= mem->ram && ram < mem->ram + mem->memory_size) {
s->migration_log = enable;
- for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
+ for (i = 0; i < s->nr_slots; i++) {
mem = &s->slots[i];
if (!mem->memory_size) {
static int kvm_get_dirty_pages_log_range(MemoryRegionSection *section,
unsigned long *bitmap)
{
- unsigned int i, j;
- unsigned long page_number, c;
- hwaddr addr, addr1;
- unsigned int pages = int128_get64(section->size) / getpagesize();
- unsigned int len = (pages + HOST_LONG_BITS - 1) / HOST_LONG_BITS;
- unsigned long hpratio = getpagesize() / TARGET_PAGE_SIZE;
+ ram_addr_t start = section->offset_within_region + section->mr->ram_addr;
+ ram_addr_t pages = int128_get64(section->size) / getpagesize();
- /*
- * bitmap-traveling is faster than memory-traveling (for addr...)
- * especially when most of the memory is not dirty.
- */
- for (i = 0; i < len; i++) {
- if (bitmap[i] != 0) {
- c = leul_to_cpu(bitmap[i]);
- do {
- j = ffsl(c) - 1;
- c &= ~(1ul << j);
- page_number = (i * HOST_LONG_BITS + j) * hpratio;
- addr1 = page_number * TARGET_PAGE_SIZE;
- addr = section->offset_within_region + addr1;
- memory_region_set_dirty(section->mr, addr,
- TARGET_PAGE_SIZE * hpratio);
- } while (c != 0);
- }
- }
+ cpu_physical_memory_set_dirty_lebitmap(bitmap, start, pages);
return 0;
}
return ret;
}
-static int kvm_set_ioeventfd_mmio(int fd, uint32_t addr, uint32_t val,
+static int kvm_set_ioeventfd_mmio(int fd, hwaddr addr, uint32_t val,
bool assign, uint32_t size, bool datamatch)
{
int ret;
* page size for the system though.
*/
assert(TARGET_PAGE_SIZE <= getpagesize());
+ page_size_init();
#ifdef KVM_CAP_SET_GUEST_DEBUG
QTAILQ_INIT(&s->kvm_sw_breakpoints);
#endif
- for (i = 0; i < ARRAY_SIZE(s->slots); i++) {
- s->slots[i].slot = i;
- }
s->vmfd = -1;
s->fd = qemu_open("/dev/kvm", O_RDWR);
if (s->fd == -1) {
goto err;
}
+ s->nr_slots = kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
+
+ /* If unspecified, use the default value */
+ if (!s->nr_slots) {
+ s->nr_slots = 32;
+ }
+
+ s->slots = g_malloc0(s->nr_slots * sizeof(KVMSlot));
+
+ for (i = 0; i < s->nr_slots; i++) {
+ s->slots[i].slot = i;
+ }
+
/* check the vcpu limits */
soft_vcpus_limit = kvm_recommended_vcpus(s);
hard_vcpus_limit = kvm_max_vcpus(s);
nc++;
}
- s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
- if (s->vmfd < 0) {
+ do {
+ ret = kvm_ioctl(s, KVM_CREATE_VM, 0);
+ } while (ret == -EINTR);
+
+ if (ret < 0) {
+ fprintf(stderr, "ioctl(KVM_CREATE_VM) failed: %d %s\n", -s->vmfd,
+ strerror(-ret));
+
#ifdef TARGET_S390X
fprintf(stderr, "Please add the 'switch_amode' kernel parameter to "
"your host kernel command line\n");
#endif
- ret = s->vmfd;
goto err;
}
+ s->vmfd = ret;
missing_cap = kvm_check_extension_list(s, kvm_required_capabilites);
if (!missing_cap) {
missing_cap =
if (s->fd != -1) {
close(s->fd);
}
+ g_free(s->slots);
g_free(s);
return ret;