return ctx;
}
+static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx)
+{
+ uint32List *list = NULL, *entry = NULL;
+ int i;
+
+ for (i = smp_cpus - 1; i >= 0; i--) {
+ entry = g_new0(uint32List, 1);
+ entry->value = ctx->vcpu_blocktime[i];
+ entry->next = list;
+ list = entry;
+ }
+
+ return list;
+}
+
+/*
+ * This function just populates MigrationInfo from postcopy's
+ * blocktime context. It will not populate MigrationInfo,
+ * unless postcopy-blocktime capability was set.
+ *
+ * @info: pointer to MigrationInfo to populate
+ */
+void fill_destination_postcopy_migration_info(MigrationInfo *info)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
+
+ if (!bc) {
+ return;
+ }
+
+ info->has_postcopy_blocktime = true;
+ info->postcopy_blocktime = bc->total_blocktime;
+ info->has_postcopy_vcpu_blocktime = true;
+ info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc);
+}
+
+static uint32_t get_postcopy_total_blocktime(void)
+{
+ MigrationIncomingState *mis = migration_incoming_get_current();
+ PostcopyBlocktimeContext *bc = mis->blocktime_ctx;
+
+ if (!bc) {
+ return 0;
+ }
+
+ return bc->total_blocktime;
+}
+
/**
* receive_ufd_features: check userfault fd features, to request only supported
* features in the future.
}
/* We don't support postcopy with shared RAM yet */
- if (qemu_ram_foreach_block(test_ramblock_postcopiable, NULL)) {
+ if (qemu_ram_foreach_migratable_block(test_ramblock_postcopiable, NULL)) {
goto out;
}
* postcopy later; must be called prior to any precopy.
* called from arch_init's similarly named ram_postcopy_incoming_init
*/
-int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
+int postcopy_ram_incoming_init(MigrationIncomingState *mis)
{
- if (qemu_ram_foreach_block(init_range, NULL)) {
+ if (qemu_ram_foreach_migratable_block(init_range, NULL)) {
return -1;
}
return -1;
}
- if (qemu_ram_foreach_block(cleanup_range, mis)) {
+ if (qemu_ram_foreach_migratable_block(cleanup_range, mis)) {
return -1;
}
/* Let the fault thread quit */
munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size);
mis->postcopy_tmp_zero_page = NULL;
}
+ trace_postcopy_ram_incoming_cleanup_blocktime(
+ get_postcopy_total_blocktime());
+
trace_postcopy_ram_incoming_cleanup_exit();
return 0;
}
*/
int postcopy_ram_prepare_discard(MigrationIncomingState *mis)
{
- if (qemu_ram_foreach_block(nhp_range, mis)) {
+ if (qemu_ram_foreach_migratable_block(nhp_range, mis)) {
return -1;
}
/*
* Mark the given area of RAM as requiring notification to unwritten areas
- * Used as a callback on qemu_ram_foreach_block.
+ * Used as a callback on qemu_ram_foreach_migratable_block.
* host_addr: Base of area to mark
* offset: Offset in the whole ram arena
* length: Length of the section
affected_cpu);
}
+static bool postcopy_pause_fault_thread(MigrationIncomingState *mis)
+{
+ trace_postcopy_pause_fault_thread();
+
+ qemu_sem_wait(&mis->postcopy_pause_sem_fault);
+
+ trace_postcopy_pause_fault_thread_continued();
+
+ return true;
+}
+
/*
* Handle faults detected by the USERFAULT markings
*/
break;
}
+ if (!mis->to_src_file) {
+ /*
+ * Possibly someone tells us that the return path is
+ * broken already using the event. We should hold until
+ * the channel is rebuilt.
+ */
+ if (postcopy_pause_fault_thread(mis)) {
+ mis->last_rb = NULL;
+ /* Continue to read the userfaultfd */
+ } else {
+ error_report("%s: paused but don't allow to continue",
+ __func__);
+ break;
+ }
+ }
+
if (pfd[1].revents) {
uint64_t tmp64 = 0;
(uintptr_t)(msg.arg.pagefault.address),
msg.arg.pagefault.feat.ptid, rb);
+retry:
/*
* Send the request to the source - we want to request one
* of our host page sizes (which is >= TPS)
*/
if (rb != mis->last_rb) {
mis->last_rb = rb;
- migrate_send_rp_req_pages(mis, qemu_ram_get_idstr(rb),
- rb_offset, qemu_ram_pagesize(rb));
+ ret = migrate_send_rp_req_pages(mis,
+ qemu_ram_get_idstr(rb),
+ rb_offset,
+ qemu_ram_pagesize(rb));
} else {
/* Save some space */
- migrate_send_rp_req_pages(mis, NULL,
- rb_offset, qemu_ram_pagesize(rb));
+ ret = migrate_send_rp_req_pages(mis,
+ NULL,
+ rb_offset,
+ qemu_ram_pagesize(rb));
+ }
+
+ if (ret) {
+ /* May be network failure, try to wait for recovery */
+ if (ret == -EIO && postcopy_pause_fault_thread(mis)) {
+ /* We got reconnected somehow, try to continue */
+ mis->last_rb = NULL;
+ goto retry;
+ } else {
+ /* This is a unavoidable fault */
+ error_report("%s: migrate_send_rp_req_pages() get %d",
+ __func__, ret);
+ break;
+ }
}
}
mis->have_fault_thread = true;
/* Mark so that we get notified of accesses to unwritten areas */
- if (qemu_ram_foreach_block(ram_block_enable_notify, mis)) {
+ if (qemu_ram_foreach_migratable_block(ram_block_enable_notify, mis)) {
return -1;
}
#else
/* No target OS support, stubs just fail */
+void fill_destination_postcopy_migration_info(MigrationInfo *info)
+{
+}
+
bool postcopy_ram_supported_by_host(MigrationIncomingState *mis)
{
error_report("%s: No OS support", __func__);
return false;
}
-int postcopy_ram_incoming_init(MigrationIncomingState *mis, size_t ram_pages)
+int postcopy_ram_incoming_init(MigrationIncomingState *mis)
{
error_report("postcopy_ram_incoming_init: No OS support");
return -1;