#include "qmp-commands.h"
#include "trace.h"
#include "exec/cpu-all.h"
+#include "exec/ram_addr.h"
#include "hw/acpi/acpi.h"
+#include "qemu/host-utils.h"
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
#else
int graphic_width = 800;
int graphic_height = 600;
-int graphic_depth = 15;
+int graphic_depth = 32;
#endif
#endif
const uint32_t arch_type = QEMU_ARCH;
+static bool mig_throttle_on;
+static int dirty_rate_high_cnt;
+static void check_guest_throttling(void);
/***********************************************************/
/* ram save/restore */
#define RAM_SAVE_FLAG_EOS 0x10
#define RAM_SAVE_FLAG_CONTINUE 0x20
#define RAM_SAVE_FLAG_XBZRLE 0x40
-
+/* 0x80 is reserved in migration.h start with 0x100 next */
static struct defconfig_file {
const char *filename;
{ NULL }, /* end of list */
};
+static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
int qemu_read_default_config_files(bool userconfig)
{
return 0;
}
-static inline bool is_zero_page(uint8_t *p)
+static inline bool is_zero_range(uint8_t *p, uint64_t size)
{
- return buffer_find_nonzero_offset(p, TARGET_PAGE_SIZE) ==
- TARGET_PAGE_SIZE;
+ return buffer_find_nonzero_offset(p, size) == size;
}
/* struct contains XBZRLE cache and a static page
uint8_t *encoded_buf;
/* buffer for storing page content */
uint8_t *current_buf;
- /* buffer used for XBZRLE decoding */
- uint8_t *decoded_buf;
- /* Cache for XBZRLE */
+ /* Cache for XBZRLE, Protected by lock. */
PageCache *cache;
+ QemuMutex lock;
} XBZRLE = {
.encoded_buf = NULL,
.current_buf = NULL,
- .decoded_buf = NULL,
.cache = NULL,
};
+/* buffer used for XBZRLE decoding */
+static uint8_t *xbzrle_decoded_buf;
+static void XBZRLE_cache_lock(void)
+{
+ if (migrate_use_xbzrle())
+ qemu_mutex_lock(&XBZRLE.lock);
+}
+
+static void XBZRLE_cache_unlock(void)
+{
+ if (migrate_use_xbzrle())
+ qemu_mutex_unlock(&XBZRLE.lock);
+}
int64_t xbzrle_cache_resize(int64_t new_size)
{
+ PageCache *new_cache, *cache_to_free;
+
+ if (new_size < TARGET_PAGE_SIZE) {
+ return -1;
+ }
+
+ /* no need to lock, the current thread holds qemu big lock */
if (XBZRLE.cache != NULL) {
- return cache_resize(XBZRLE.cache, new_size / TARGET_PAGE_SIZE) *
- TARGET_PAGE_SIZE;
+ /* check XBZRLE.cache again later */
+ if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
+ return pow2floor(new_size);
+ }
+ new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
+ TARGET_PAGE_SIZE);
+ if (!new_cache) {
+ DPRINTF("Error creating cache\n");
+ return -1;
+ }
+
+ XBZRLE_cache_lock();
+ /* the XBZRLE.cache may have be destroyed, check it again */
+ if (XBZRLE.cache != NULL) {
+ cache_to_free = XBZRLE.cache;
+ XBZRLE.cache = new_cache;
+ } else {
+ cache_to_free = new_cache;
+ }
+ XBZRLE_cache_unlock();
+
+ cache_fini(cache_to_free);
}
+
return pow2floor(new_size);
}
return size;
}
+/* This is the last block that we have visited serching for dirty pages
+ */
+static RAMBlock *last_seen_block;
+/* This is the last block from where we have sent data */
+static RAMBlock *last_sent_block;
+static ram_addr_t last_offset;
+static unsigned long *migration_bitmap;
+static uint64_t migration_dirty_pages;
+static uint32_t last_version;
+static bool ram_bulk_stage;
+
+/* Update the xbzrle cache to reflect a page that's been sent as all 0.
+ * The important thing is that a stale (not-yet-0'd) page be replaced
+ * by the new data.
+ * As a bonus, if the page wasn't in the cache it gets added so that
+ * when a small write is made into the 0'd page it gets XBZRLE sent
+ */
+static void xbzrle_cache_zero_page(ram_addr_t current_addr)
+{
+ if (ram_bulk_stage || !migrate_use_xbzrle()) {
+ return;
+ }
+
+ /* We don't care if this fails to allocate a new cache page
+ * as long as it updated an old one */
+ cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
+}
+
#define ENCODING_FLAG_XBZRLE 0x1
static int save_xbzrle_page(QEMUFile *f, uint8_t *current_data,
if (!cache_is_cached(XBZRLE.cache, current_addr)) {
if (!last_stage) {
- cache_insert(XBZRLE.cache, current_addr, current_data);
+ if (cache_insert(XBZRLE.cache, current_addr, current_data) == -1) {
+ return -1;
+ }
}
acct_info.xbzrle_cache_miss++;
return -1;
return bytes_sent;
}
-
-/* This is the last block that we have visited serching for dirty pages
- */
-static RAMBlock *last_seen_block;
-/* This is the last block from where we have sent data */
-static RAMBlock *last_sent_block;
-static ram_addr_t last_offset;
-static unsigned long *migration_bitmap;
-static uint64_t migration_dirty_pages;
-static uint32_t last_version;
-static bool ram_bulk_stage;
-
static inline
ram_addr_t migration_bitmap_find_and_reset_dirty(MemoryRegion *mr,
ram_addr_t start)
{
unsigned long base = mr->ram_addr >> TARGET_PAGE_BITS;
unsigned long nr = base + (start >> TARGET_PAGE_BITS);
- unsigned long size = base + (int128_get64(mr->size) >> TARGET_PAGE_BITS);
+ uint64_t mr_size = TARGET_PAGE_ALIGN(memory_region_size(mr));
+ unsigned long size = base + (mr_size >> TARGET_PAGE_BITS);
unsigned long next;
return (next - base) << TARGET_PAGE_BITS;
}
-static inline bool migration_bitmap_set_dirty(MemoryRegion *mr,
- ram_addr_t offset)
+static inline bool migration_bitmap_set_dirty(ram_addr_t addr)
{
bool ret;
- int nr = (mr->ram_addr + offset) >> TARGET_PAGE_BITS;
+ int nr = addr >> TARGET_PAGE_BITS;
ret = test_and_set_bit(nr, migration_bitmap);
return ret;
}
+static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
+{
+ ram_addr_t addr;
+ unsigned long page = BIT_WORD(start >> TARGET_PAGE_BITS);
+
+ /* start address is aligned at the start of a word? */
+ if (((page * BITS_PER_LONG) << TARGET_PAGE_BITS) == start) {
+ int k;
+ int nr = BITS_TO_LONGS(length >> TARGET_PAGE_BITS);
+ unsigned long *src = ram_list.dirty_memory[DIRTY_MEMORY_MIGRATION];
+
+ for (k = page; k < page + nr; k++) {
+ if (src[k]) {
+ unsigned long new_dirty;
+ new_dirty = ~migration_bitmap[k];
+ migration_bitmap[k] |= src[k];
+ new_dirty &= src[k];
+ migration_dirty_pages += ctpopl(new_dirty);
+ src[k] = 0;
+ }
+ }
+ } else {
+ for (addr = 0; addr < length; addr += TARGET_PAGE_SIZE) {
+ if (cpu_physical_memory_get_dirty(start + addr,
+ TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION)) {
+ cpu_physical_memory_reset_dirty(start + addr,
+ TARGET_PAGE_SIZE,
+ DIRTY_MEMORY_MIGRATION);
+ migration_bitmap_set_dirty(start + addr);
+ }
+ }
+ }
+}
+
+
/* Needs iothread lock! */
static void migration_bitmap_sync(void)
{
RAMBlock *block;
- ram_addr_t addr;
uint64_t num_dirty_pages_init = migration_dirty_pages;
MigrationState *s = migrate_get_current();
static int64_t start_time;
+ static int64_t bytes_xfer_prev;
static int64_t num_dirty_pages_period;
int64_t end_time;
+ int64_t bytes_xfer_now;
+
+ if (!bytes_xfer_prev) {
+ bytes_xfer_prev = ram_bytes_transferred();
+ }
if (!start_time) {
- start_time = qemu_get_clock_ms(rt_clock);
+ start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
}
trace_migration_bitmap_sync_start();
address_space_sync_dirty_bitmap(&address_space_memory);
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- for (addr = 0; addr < block->length; addr += TARGET_PAGE_SIZE) {
- if (memory_region_test_and_clear_dirty(block->mr,
- addr, TARGET_PAGE_SIZE,
- DIRTY_MEMORY_MIGRATION)) {
- migration_bitmap_set_dirty(block->mr, addr);
- }
- }
+ migration_bitmap_sync_range(block->mr->ram_addr, block->length);
}
trace_migration_bitmap_sync_end(migration_dirty_pages
- num_dirty_pages_init);
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
- end_time = qemu_get_clock_ms(rt_clock);
+ end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
/* more than 1 second = 1000 millisecons */
if (end_time > start_time + 1000) {
+ if (migrate_auto_converge()) {
+ /* The following detection logic can be refined later. For now:
+ Check to see if the dirtied bytes is 50% more than the approx.
+ amount of bytes that just got transferred since the last time we
+ were in this routine. If that happens >N times (for now N==4)
+ we turn on the throttle down logic */
+ bytes_xfer_now = ram_bytes_transferred();
+ if (s->dirty_pages_rate &&
+ (num_dirty_pages_period * TARGET_PAGE_SIZE >
+ (bytes_xfer_now - bytes_xfer_prev)/2) &&
+ (dirty_rate_high_cnt++ > 4)) {
+ trace_migration_throttle();
+ mig_throttle_on = true;
+ dirty_rate_high_cnt = 0;
+ }
+ bytes_xfer_prev = bytes_xfer_now;
+ } else {
+ mig_throttle_on = false;
+ }
s->dirty_pages_rate = num_dirty_pages_period * 1000
/ (end_time - start_time);
s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
ram_bulk_stage = false;
}
} else {
+ int ret;
uint8_t *p;
+ bool send_async = true;
int cont = (block == last_sent_block) ?
RAM_SAVE_FLAG_CONTINUE : 0;
/* In doubt sent page as normal */
bytes_sent = -1;
- if (is_zero_page(p)) {
- acct_info.dup_pages++;
- if (!ram_bulk_stage) {
- bytes_sent = save_block_hdr(f, block, offset, cont,
- RAM_SAVE_FLAG_COMPRESS);
- qemu_put_byte(f, 0);
- bytes_sent++;
- } else {
- acct_info.skipped_pages++;
- bytes_sent = 0;
+ ret = ram_control_save_page(f, block->offset,
+ offset, TARGET_PAGE_SIZE, &bytes_sent);
+
+ XBZRLE_cache_lock();
+
+ current_addr = block->offset + offset;
+ if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
+ if (ret != RAM_SAVE_CONTROL_DELAYED) {
+ if (bytes_sent > 0) {
+ acct_info.norm_pages++;
+ } else if (bytes_sent == 0) {
+ acct_info.dup_pages++;
+ }
}
+ } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
+ acct_info.dup_pages++;
+ bytes_sent = save_block_hdr(f, block, offset, cont,
+ RAM_SAVE_FLAG_COMPRESS);
+ qemu_put_byte(f, 0);
+ bytes_sent++;
+ /* Must let xbzrle know, otherwise a previous (now 0'd) cached
+ * page would be stale
+ */
+ xbzrle_cache_zero_page(current_addr);
} else if (!ram_bulk_stage && migrate_use_xbzrle()) {
- current_addr = block->offset + offset;
bytes_sent = save_xbzrle_page(f, p, current_addr, block,
offset, cont, last_stage);
if (!last_stage) {
+ /* We must send exactly what's in the xbzrle cache
+ * even if the page wasn't xbzrle compressed, so that
+ * it's right next time.
+ */
p = get_cached_data(XBZRLE.cache, current_addr);
+
+ /* Can't send this cached data async, since the cache page
+ * might get updated before it gets to the wire
+ */
+ send_async = false;
}
}
/* XBZRLE overflow or normal page */
if (bytes_sent == -1) {
bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
- qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
+ if (send_async) {
+ qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
+ } else {
+ qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+ }
bytes_sent += TARGET_PAGE_SIZE;
acct_info.norm_pages++;
}
+ XBZRLE_cache_unlock();
/* if page is unmodified, continue to the next */
if (bytes_sent > 0) {
last_sent_block = block;
static uint64_t bytes_transferred;
+void acct_update_position(QEMUFile *f, size_t size, bool zero)
+{
+ uint64_t pages = size / TARGET_PAGE_SIZE;
+ if (zero) {
+ acct_info.dup_pages += pages;
+ } else {
+ acct_info.norm_pages += pages;
+ bytes_transferred += size;
+ qemu_update_position(f, size);
+ }
+}
+
static ram_addr_t ram_save_remaining(void)
{
return migration_dirty_pages;
return total;
}
+void free_xbzrle_decoded_buf(void)
+{
+ g_free(xbzrle_decoded_buf);
+ xbzrle_decoded_buf = NULL;
+}
+
static void migration_end(void)
{
if (migration_bitmap) {
migration_bitmap = NULL;
}
+ XBZRLE_cache_lock();
if (XBZRLE.cache) {
cache_fini(XBZRLE.cache);
g_free(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
- g_free(XBZRLE.decoded_buf);
XBZRLE.cache = NULL;
+ XBZRLE.encoded_buf = NULL;
+ XBZRLE.current_buf = NULL;
}
+ XBZRLE_cache_unlock();
}
static void ram_migration_cancel(void *opaque)
migration_bitmap = bitmap_new(ram_pages);
bitmap_set(migration_bitmap, 0, ram_pages);
migration_dirty_pages = ram_pages;
+ mig_throttle_on = false;
+ dirty_rate_high_cnt = 0;
if (migrate_use_xbzrle()) {
+ qemu_mutex_lock_iothread();
XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
TARGET_PAGE_SIZE,
TARGET_PAGE_SIZE);
if (!XBZRLE.cache) {
+ qemu_mutex_unlock_iothread();
DPRINTF("Error creating cache\n");
return -1;
}
- XBZRLE.encoded_buf = g_malloc0(TARGET_PAGE_SIZE);
- XBZRLE.current_buf = g_malloc(TARGET_PAGE_SIZE);
+ qemu_mutex_init(&XBZRLE.lock);
+ qemu_mutex_unlock_iothread();
+
+ /* We prefer not to abort if there is no memory */
+ XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
+ if (!XBZRLE.encoded_buf) {
+ DPRINTF("Error allocating encoded_buf\n");
+ return -1;
+ }
+
+ XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
+ if (!XBZRLE.current_buf) {
+ DPRINTF("Error allocating current_buf\n");
+ g_free(XBZRLE.encoded_buf);
+ XBZRLE.encoded_buf = NULL;
+ return -1;
+ }
+
acct_clear();
}
}
qemu_mutex_unlock_ramlist();
+
+ ram_control_before_iterate(f, RAM_CONTROL_SETUP);
+ ram_control_after_iterate(f, RAM_CONTROL_SETUP);
+
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
reset_ram_globals();
}
- t0 = qemu_get_clock_ns(rt_clock);
+ ram_control_before_iterate(f, RAM_CONTROL_ROUND);
+
+ t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
i = 0;
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
}
total_sent += bytes_sent;
acct_info.iterations++;
+ check_guest_throttling();
/* we want to check in the 1st loop, just in case it was the 1st time
and we had to sync the dirty bitmap.
qemu_get_clock_ns() is a bit expensive, so we only check each some
iterations
*/
if ((i & 63) == 0) {
- uint64_t t1 = (qemu_get_clock_ns(rt_clock) - t0) / 1000000;
+ uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
if (t1 > MAX_WAIT) {
DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
t1, i);
qemu_mutex_unlock_ramlist();
+ /*
+ * Must occur before EOS (or any QEMUFile operation)
+ * because of RDMA protocol.
+ */
+ ram_control_after_iterate(f, RAM_CONTROL_ROUND);
+
+ bytes_transferred += total_sent;
+
+ /*
+ * Do not count these 8 bytes into total_sent, so that we can
+ * return 0 if no page had been dirtied.
+ */
+ qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
+ bytes_transferred += 8;
+
+ ret = qemu_file_get_error(f);
if (ret < 0) {
- bytes_transferred += total_sent;
return ret;
}
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
- total_sent += 8;
- bytes_transferred += total_sent;
-
return total_sent;
}
qemu_mutex_lock_ramlist();
migration_bitmap_sync();
+ ram_control_before_iterate(f, RAM_CONTROL_FINISH);
+
/* try transferring iterative blocks of memory */
/* flush all remaining blocks regardless of rate limiting */
}
bytes_transferred += bytes_sent;
}
+
+ ram_control_after_iterate(f, RAM_CONTROL_FINISH);
migration_end();
qemu_mutex_unlock_ramlist();
unsigned int xh_len;
int xh_flags;
- if (!XBZRLE.decoded_buf) {
- XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
+ if (!xbzrle_decoded_buf) {
+ xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
/* extract RLE header */
return -1;
}
/* load data and decode */
- qemu_get_buffer(f, XBZRLE.decoded_buf, xh_len);
+ qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
/* decode RLE */
- ret = xbzrle_decode_buffer(XBZRLE.decoded_buf, xh_len, host,
+ ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
TARGET_PAGE_SIZE);
if (ret == -1) {
fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
return NULL;
}
+/*
+ * If a page (or a whole RDMA chunk) has been
+ * determined to be zero, then zap it.
+ */
+void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
+{
+ if (ch != 0 || !is_zero_range(host, size)) {
+ memset(host, ch, size);
+ }
+}
+
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
ram_addr_t addr;
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
if (block->length != length) {
+ fprintf(stderr,
+ "Length mismatch: %s: " RAM_ADDR_FMT
+ " in != " RAM_ADDR_FMT "\n", id, length,
+ block->length);
ret = -EINVAL;
goto done;
}
}
ch = qemu_get_byte(f);
- memset(host, ch, TARGET_PAGE_SIZE);
-#ifndef _WIN32
- if (ch == 0 &&
- (!kvm_enabled() || kvm_has_sync_mmu()) &&
- getpagesize() <= TARGET_PAGE_SIZE) {
- qemu_madvise(host, TARGET_PAGE_SIZE, QEMU_MADV_DONTNEED);
- }
-#endif
+ ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
} else if (flags & RAM_SAVE_FLAG_PAGE) {
void *host;
ret = -EINVAL;
goto done;
}
+ } else if (flags & RAM_SAVE_FLAG_HOOK) {
+ ram_control_load_hook(f, flags);
}
error = qemu_file_get_error(f);
if (error) {
if (ret != 16) {
return -1;
}
-#ifdef TARGET_I386
- smbios_add_field(1, offsetof(struct smbios_type_1, uuid), uuid, 16);
-#endif
return 0;
}
acpi_table_add(opts, &err);
if (err) {
- fprintf(stderr, "Wrong acpi table provided: %s\n",
- error_get_pretty(err));
+ error_report("Wrong acpi table provided: %s",
+ error_get_pretty(err));
error_free(err);
exit(1);
}
#endif
}
-void do_smbios_option(const char *optarg)
+void do_smbios_option(QemuOpts *opts)
{
#ifdef TARGET_I386
- if (smbios_entry_add(optarg) < 0) {
- exit(1);
- }
+ smbios_entry_add(opts);
#endif
}
return info;
}
+
+/* Stub function that's gets run on the vcpu when its brought out of the
+ VM to run inside qemu via async_run_on_cpu()*/
+static void mig_sleep_cpu(void *opq)
+{
+ qemu_mutex_unlock_iothread();
+ g_usleep(30*1000);
+ qemu_mutex_lock_iothread();
+}
+
+/* To reduce the dirty rate explicitly disallow the VCPUs from spending
+ much time in the VM. The migration thread will try to catchup.
+ Workload will experience a performance drop.
+*/
+static void mig_throttle_guest_down(void)
+{
+ CPUState *cpu;
+
+ qemu_mutex_lock_iothread();
+ CPU_FOREACH(cpu) {
+ async_run_on_cpu(cpu, mig_sleep_cpu, NULL);
+ }
+ qemu_mutex_unlock_iothread();
+}
+
+static void check_guest_throttling(void)
+{
+ static int64_t t0;
+ int64_t t1;
+
+ if (!mig_throttle_on) {
+ return;
+ }
+
+ if (!t0) {
+ t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+ return;
+ }
+
+ t1 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
+
+ /* If it has been more than 40 ms since the last time the guest
+ * was throttled then do it again.
+ */
+ if (40 < (t1-t0)/1000000) {
+ mig_throttle_guest_down();
+ t0 = t1;
+ }
+}