#include "exec/ram_addr.h"
#include "hw/acpi/acpi.h"
#include "qemu/host-utils.h"
+#include "qemu/rcu_queue.h"
#ifdef DEBUG_ARCH_INIT
#define DPRINTF(fmt, ...) \
#define QEMU_ARCH QEMU_ARCH_XTENSA
#elif defined(TARGET_UNICORE32)
#define QEMU_ARCH QEMU_ARCH_UNICORE32
+#elif defined(TARGET_TRICORE)
+#define QEMU_ARCH QEMU_ARCH_TRICORE
#endif
const uint32_t arch_type = QEMU_ARCH;
/* We don't care if this fails to allocate a new cache page
* as long as it updated an old one */
- cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE);
+ cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
+ bitmap_sync_count);
}
#define ENCODING_FLAG_XBZRLE 0x1
int encoded_len = 0, bytes_sent = -1;
uint8_t *prev_cached_page;
- if (!cache_is_cached(XBZRLE.cache, current_addr)) {
+ if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
acct_info.xbzrle_cache_miss++;
if (!last_stage) {
- if (cache_insert(XBZRLE.cache, current_addr, *current_data) == -1) {
+ if (cache_insert(XBZRLE.cache, current_addr, *current_data,
+ bitmap_sync_count) == -1) {
return -1;
} else {
/* update *current_data when the page has been
}
-/* Needs iothread lock! */
+/* Fix me: there are too many global variables used in migration process. */
+static int64_t start_time;
+static int64_t bytes_xfer_prev;
+static int64_t num_dirty_pages_period;
+static void migration_bitmap_sync_init(void)
+{
+ start_time = 0;
+ bytes_xfer_prev = 0;
+ num_dirty_pages_period = 0;
+}
+
+/* Called with iothread lock held, to protect ram_list.dirty_memory[] */
static void migration_bitmap_sync(void)
{
RAMBlock *block;
uint64_t num_dirty_pages_init = migration_dirty_pages;
MigrationState *s = migrate_get_current();
- static int64_t start_time;
- static int64_t bytes_xfer_prev;
- static int64_t num_dirty_pages_period;
int64_t end_time;
int64_t bytes_xfer_now;
static uint64_t xbzrle_cache_miss_prev;
trace_migration_bitmap_sync_start();
address_space_sync_dirty_bitmap(&address_space_memory);
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- migration_bitmap_sync_range(block->mr->ram_addr, block->length);
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ migration_bitmap_sync_range(block->mr->ram_addr, block->used_length);
}
+ rcu_read_unlock();
+
trace_migration_bitmap_sync_end(migration_dirty_pages
- num_dirty_pages_init);
num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
}
/*
- * ram_save_block: Writes a page of memory to the stream f
+ * ram_save_page: Send the given page to the stream
+ *
+ * Returns: Number of bytes written.
+ */
+static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
+ bool last_stage)
+{
+ int bytes_sent;
+ int cont;
+ ram_addr_t current_addr;
+ MemoryRegion *mr = block->mr;
+ uint8_t *p;
+ int ret;
+ bool send_async = true;
+
+ cont = (block == last_sent_block) ? RAM_SAVE_FLAG_CONTINUE : 0;
+
+ p = memory_region_get_ram_ptr(mr) + offset;
+
+ /* In doubt sent page as normal */
+ bytes_sent = -1;
+ ret = ram_control_save_page(f, block->offset,
+ offset, TARGET_PAGE_SIZE, &bytes_sent);
+
+ XBZRLE_cache_lock();
+
+ current_addr = block->offset + offset;
+ if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
+ if (ret != RAM_SAVE_CONTROL_DELAYED) {
+ if (bytes_sent > 0) {
+ acct_info.norm_pages++;
+ } else if (bytes_sent == 0) {
+ acct_info.dup_pages++;
+ }
+ }
+ } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
+ acct_info.dup_pages++;
+ bytes_sent = save_block_hdr(f, block, offset, cont,
+ RAM_SAVE_FLAG_COMPRESS);
+ qemu_put_byte(f, 0);
+ bytes_sent++;
+ /* Must let xbzrle know, otherwise a previous (now 0'd) cached
+ * page would be stale
+ */
+ xbzrle_cache_zero_page(current_addr);
+ } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
+ bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
+ offset, cont, last_stage);
+ if (!last_stage) {
+ /* Can't send this cached data async, since the cache page
+ * might get updated before it gets to the wire
+ */
+ send_async = false;
+ }
+ }
+
+ /* XBZRLE overflow or normal page */
+ if (bytes_sent == -1) {
+ bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
+ if (send_async) {
+ qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
+ } else {
+ qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
+ }
+ bytes_sent += TARGET_PAGE_SIZE;
+ acct_info.norm_pages++;
+ }
+
+ XBZRLE_cache_unlock();
+
+ return bytes_sent;
+}
+
+/*
+ * ram_find_and_save_block: Finds a page to send and sends it to f
+ *
+ * Called within an RCU critical section.
*
* Returns: The number of bytes written.
* 0 means no dirty pages
*/
-static int ram_save_block(QEMUFile *f, bool last_stage)
+static int ram_find_and_save_block(QEMUFile *f, bool last_stage)
{
RAMBlock *block = last_seen_block;
ram_addr_t offset = last_offset;
bool complete_round = false;
int bytes_sent = 0;
MemoryRegion *mr;
- ram_addr_t current_addr;
if (!block)
- block = QTAILQ_FIRST(&ram_list.blocks);
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
while (true) {
mr = block->mr;
offset >= last_offset) {
break;
}
- if (offset >= block->length) {
+ if (offset >= block->used_length) {
offset = 0;
- block = QTAILQ_NEXT(block, next);
+ block = QLIST_NEXT_RCU(block, next);
if (!block) {
- block = QTAILQ_FIRST(&ram_list.blocks);
+ block = QLIST_FIRST_RCU(&ram_list.blocks);
complete_round = true;
ram_bulk_stage = false;
}
} else {
- int ret;
- uint8_t *p;
- bool send_async = true;
- int cont = (block == last_sent_block) ?
- RAM_SAVE_FLAG_CONTINUE : 0;
-
- p = memory_region_get_ram_ptr(mr) + offset;
-
- /* In doubt sent page as normal */
- bytes_sent = -1;
- ret = ram_control_save_page(f, block->offset,
- offset, TARGET_PAGE_SIZE, &bytes_sent);
-
- XBZRLE_cache_lock();
-
- current_addr = block->offset + offset;
- if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
- if (ret != RAM_SAVE_CONTROL_DELAYED) {
- if (bytes_sent > 0) {
- acct_info.norm_pages++;
- } else if (bytes_sent == 0) {
- acct_info.dup_pages++;
- }
- }
- } else if (is_zero_range(p, TARGET_PAGE_SIZE)) {
- acct_info.dup_pages++;
- bytes_sent = save_block_hdr(f, block, offset, cont,
- RAM_SAVE_FLAG_COMPRESS);
- qemu_put_byte(f, 0);
- bytes_sent++;
- /* Must let xbzrle know, otherwise a previous (now 0'd) cached
- * page would be stale
- */
- xbzrle_cache_zero_page(current_addr);
- } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
- bytes_sent = save_xbzrle_page(f, &p, current_addr, block,
- offset, cont, last_stage);
- if (!last_stage) {
- /* Can't send this cached data async, since the cache page
- * might get updated before it gets to the wire
- */
- send_async = false;
- }
- }
-
- /* XBZRLE overflow or normal page */
- if (bytes_sent == -1) {
- bytes_sent = save_block_hdr(f, block, offset, cont, RAM_SAVE_FLAG_PAGE);
- if (send_async) {
- qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
- } else {
- qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
- }
- bytes_sent += TARGET_PAGE_SIZE;
- acct_info.norm_pages++;
- }
+ bytes_sent = ram_save_page(f, block, offset, last_stage);
- XBZRLE_cache_unlock();
/* if page is unmodified, continue to the next */
if (bytes_sent > 0) {
last_sent_block = block;
}
}
}
+
last_seen_block = block;
last_offset = offset;
-
return bytes_sent;
}
RAMBlock *block;
uint64_t total = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next)
- total += block->length;
-
+ rcu_read_lock();
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
+ total += block->used_length;
+ rcu_read_unlock();
return total;
}
XBZRLE_cache_lock();
if (XBZRLE.cache) {
cache_fini(XBZRLE.cache);
- g_free(XBZRLE.cache);
g_free(XBZRLE.encoded_buf);
g_free(XBZRLE.current_buf);
XBZRLE.cache = NULL;
#define MAX_WAIT 50 /* ms, half buffered_file limit */
+
+/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
+ * long-running RCU critical section. When rcu-reclaims in the code
+ * start to become numerous it will be necessary to reduce the
+ * granularity of these critical sections.
+ */
+
static int ram_save_setup(QEMUFile *f, void *opaque)
{
RAMBlock *block;
mig_throttle_on = false;
dirty_rate_high_cnt = 0;
bitmap_sync_count = 0;
+ migration_bitmap_sync_init();
if (migrate_use_xbzrle()) {
XBZRLE_cache_lock();
acct_clear();
}
+ /* iothread lock needed for ram_list.dirty_memory[] */
qemu_mutex_lock_iothread();
qemu_mutex_lock_ramlist();
+ rcu_read_lock();
bytes_transferred = 0;
reset_ram_globals();
* gaps due to alignment or unplugs.
*/
migration_dirty_pages = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
uint64_t block_pages;
- block_pages = block->length >> TARGET_PAGE_BITS;
+ block_pages = block->used_length >> TARGET_PAGE_BITS;
migration_dirty_pages += block_pages;
}
memory_global_dirty_log_start();
migration_bitmap_sync();
+ qemu_mutex_unlock_ramlist();
qemu_mutex_unlock_iothread();
qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
qemu_put_byte(f, strlen(block->idstr));
qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
- qemu_put_be64(f, block->length);
+ qemu_put_be64(f, block->used_length);
}
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP);
int64_t t0;
int total_sent = 0;
- qemu_mutex_lock_ramlist();
-
+ rcu_read_lock();
if (ram_list.version != last_version) {
reset_ram_globals();
}
+ /* Read version before ram_list.blocks */
+ smp_rmb();
+
ram_control_before_iterate(f, RAM_CONTROL_ROUND);
t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
while ((ret = qemu_file_rate_limit(f)) == 0) {
int bytes_sent;
- bytes_sent = ram_save_block(f, false);
+ bytes_sent = ram_find_and_save_block(f, false);
/* no more blocks to sent */
if (bytes_sent == 0) {
break;
}
i++;
}
-
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
/*
* Must occur before EOS (or any QEMUFile operation)
return total_sent;
}
+/* Called with iothread lock */
static int ram_save_complete(QEMUFile *f, void *opaque)
{
- qemu_mutex_lock_ramlist();
+ rcu_read_lock();
+
migration_bitmap_sync();
ram_control_before_iterate(f, RAM_CONTROL_FINISH);
while (true) {
int bytes_sent;
- bytes_sent = ram_save_block(f, true);
+ bytes_sent = ram_find_and_save_block(f, true);
/* no more blocks to sent */
if (bytes_sent == 0) {
break;
ram_control_after_iterate(f, RAM_CONTROL_FINISH);
migration_end();
- qemu_mutex_unlock_ramlist();
+ rcu_read_unlock();
qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
return 0;
if (remaining_size < max_size) {
qemu_mutex_lock_iothread();
+ rcu_read_lock();
migration_bitmap_sync();
+ rcu_read_unlock();
qemu_mutex_unlock_iothread();
remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
}
static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
{
- int ret, rc = 0;
unsigned int xh_len;
int xh_flags;
xh_len = qemu_get_be16(f);
if (xh_flags != ENCODING_FLAG_XBZRLE) {
- fprintf(stderr, "Failed to load XBZRLE page - wrong compression!\n");
+ error_report("Failed to load XBZRLE page - wrong compression!");
return -1;
}
if (xh_len > TARGET_PAGE_SIZE) {
- fprintf(stderr, "Failed to load XBZRLE page - len overflow!\n");
+ error_report("Failed to load XBZRLE page - len overflow!");
return -1;
}
/* load data and decode */
qemu_get_buffer(f, xbzrle_decoded_buf, xh_len);
/* decode RLE */
- ret = xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
- TARGET_PAGE_SIZE);
- if (ret == -1) {
- fprintf(stderr, "Failed to load XBZRLE page - decode error!\n");
- rc = -1;
- } else if (ret > TARGET_PAGE_SIZE) {
- fprintf(stderr, "Failed to load XBZRLE page - size %d exceeds %d!\n",
- ret, TARGET_PAGE_SIZE);
- abort();
+ if (xbzrle_decode_buffer(xbzrle_decoded_buf, xh_len, host,
+ TARGET_PAGE_SIZE) == -1) {
+ error_report("Failed to load XBZRLE page - decode error!");
+ return -1;
}
- return rc;
+ return 0;
}
+/* Must be called from within a rcu critical section.
+ * Returns a pointer from within the RCU-protected ram_list.
+ */
static inline void *host_from_stream_offset(QEMUFile *f,
ram_addr_t offset,
int flags)
uint8_t len;
if (flags & RAM_SAVE_FLAG_CONTINUE) {
- if (!block) {
- fprintf(stderr, "Ack, bad migration stream!\n");
+ if (!block || block->max_length <= offset) {
+ error_report("Ack, bad migration stream!");
return NULL;
}
qemu_get_buffer(f, (uint8_t *)id, len);
id[len] = 0;
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
- if (!strncmp(id, block->idstr, sizeof(id)))
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
+ if (!strncmp(id, block->idstr, sizeof(id)) &&
+ block->max_length > offset) {
return memory_region_get_ram_ptr(block->mr) + offset;
+ }
}
- fprintf(stderr, "Can't find block %s!\n", id);
+ error_report("Can't find block %s!", id);
return NULL;
}
static int ram_load(QEMUFile *f, void *opaque, int version_id)
{
- ram_addr_t addr;
- int flags, ret = 0;
- int error;
+ int flags = 0, ret = 0;
static uint64_t seq_iter;
seq_iter++;
if (version_id != 4) {
ret = -EINVAL;
- goto done;
}
- do {
- addr = qemu_get_be64(f);
+ /* This RCU critical section can be very long running.
+ * When RCU reclaims in the code start to become numerous,
+ * it will be necessary to reduce the granularity of this
+ * critical section.
+ */
+ rcu_read_lock();
+ while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
+ ram_addr_t addr, total_ram_bytes;
+ void *host;
+ uint8_t ch;
+ addr = qemu_get_be64(f);
flags = addr & ~TARGET_PAGE_MASK;
addr &= TARGET_PAGE_MASK;
- if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
+ switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
+ case RAM_SAVE_FLAG_MEM_SIZE:
/* Synchronize RAM block list */
- char id[256];
- ram_addr_t length;
- ram_addr_t total_ram_bytes = addr;
-
- while (total_ram_bytes) {
+ total_ram_bytes = addr;
+ while (!ret && total_ram_bytes) {
RAMBlock *block;
uint8_t len;
+ char id[256];
+ ram_addr_t length;
len = qemu_get_byte(f);
qemu_get_buffer(f, (uint8_t *)id, len);
id[len] = 0;
length = qemu_get_be64(f);
- QTAILQ_FOREACH(block, &ram_list.blocks, next) {
+ QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
if (!strncmp(id, block->idstr, sizeof(id))) {
- if (block->length != length) {
- fprintf(stderr,
- "Length mismatch: %s: " RAM_ADDR_FMT
- " in != " RAM_ADDR_FMT "\n", id, length,
- block->length);
- ret = -EINVAL;
- goto done;
+ if (length != block->used_length) {
+ Error *local_err = NULL;
+
+ ret = qemu_ram_resize(block->offset, length, &local_err);
+ if (local_err) {
+ error_report_err(local_err);
+ }
}
break;
}
}
if (!block) {
- fprintf(stderr, "Unknown ramblock \"%s\", cannot "
- "accept migration\n", id);
+ error_report("Unknown ramblock \"%s\", cannot "
+ "accept migration", id);
ret = -EINVAL;
- goto done;
}
total_ram_bytes -= length;
}
- }
-
- if (flags & RAM_SAVE_FLAG_COMPRESS) {
- void *host;
- uint8_t ch;
-
+ break;
+ case RAM_SAVE_FLAG_COMPRESS:
host = host_from_stream_offset(f, addr, flags);
if (!host) {
+ error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
- goto done;
+ break;
}
-
ch = qemu_get_byte(f);
ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
- } else if (flags & RAM_SAVE_FLAG_PAGE) {
- void *host;
-
+ break;
+ case RAM_SAVE_FLAG_PAGE:
host = host_from_stream_offset(f, addr, flags);
if (!host) {
+ error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
- goto done;
+ break;
}
-
qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
- } else if (flags & RAM_SAVE_FLAG_XBZRLE) {
- void *host = host_from_stream_offset(f, addr, flags);
+ break;
+ case RAM_SAVE_FLAG_XBZRLE:
+ host = host_from_stream_offset(f, addr, flags);
if (!host) {
+ error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
ret = -EINVAL;
- goto done;
+ break;
}
-
if (load_xbzrle(f, addr, host) < 0) {
+ error_report("Failed to decompress XBZRLE page at "
+ RAM_ADDR_FMT, addr);
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case RAM_SAVE_FLAG_EOS:
+ /* normal exit */
+ break;
+ default:
+ if (flags & RAM_SAVE_FLAG_HOOK) {
+ ram_control_load_hook(f, flags);
+ } else {
+ error_report("Unknown combination of migration flags: %#x",
+ flags);
ret = -EINVAL;
- goto done;
}
- } else if (flags & RAM_SAVE_FLAG_HOOK) {
- ram_control_load_hook(f, flags);
}
- error = qemu_file_get_error(f);
- if (error) {
- ret = error;
- goto done;
+ if (!ret) {
+ ret = qemu_file_get_error(f);
}
- } while (!(flags & RAM_SAVE_FLAG_EOS));
+ }
-done:
+ rcu_read_unlock();
DPRINTF("Completed load of VM with exit code %d seq iteration "
"%" PRIu64 "\n", ret, seq_iter);
return ret;
if (!c->name) {
if (l > 80) {
- fprintf(stderr,
- "Unknown sound card name (too big to show)\n");
+ error_report("Unknown sound card name (too big to show)");
}
else {
- fprintf(stderr, "Unknown sound card name `%.*s'\n",
- (int) l, p);
+ error_report("Unknown sound card name `%.*s'",
+ (int) l, p);
}
bad_card = 1;
}
if (c->enabled) {
if (c->isa) {
if (!isa_bus) {
- fprintf(stderr, "ISA bus not available for %s\n", c->name);
+ error_report("ISA bus not available for %s", c->name);
exit(1);
}
c->init.init_isa(isa_bus);
} else {
if (!pci_bus) {
- fprintf(stderr, "PCI bus not available for %s\n", c->name);
+ error_report("PCI bus not available for %s", c->name);
exit(1);
}
c->init.init_pci(pci_bus);
#endif
}
-int tcg_available(void)
-{
- return 1;
-}
-
int kvm_available(void)
{
#ifdef CONFIG_KVM