.state = MIG_STATE_SETUP,
.bandwidth_limit = MAX_THROTTLE,
.xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
+ .mbps = -1,
};
return ¤t_migration;
qemu_fclose(f);
if (ret < 0) {
fprintf(stderr, "load of migration failed\n");
- exit(0);
+ exit(EXIT_FAILURE);
}
qemu_announce_self();
DPRINTF("successfully loaded vm state\n");
int fd = qemu_get_fd(f);
assert(fd != -1);
- socket_set_nonblock(fd);
+ qemu_set_nonblock(fd);
qemu_coroutine_enter(co, f);
}
info->ram->normal = norm_mig_pages_transferred();
info->ram->normal_bytes = norm_mig_bytes_transferred();
info->ram->dirty_pages_rate = s->dirty_pages_rate;
+ info->ram->mbps = s->mbps;
if (blk_mig_active()) {
info->has_disk = true;
info->ram->skipped = skipped_mig_pages_transferred();
info->ram->normal = norm_mig_pages_transferred();
info->ram->normal_bytes = norm_mig_bytes_transferred();
+ info->ram->mbps = s->mbps;
break;
case MIG_STATE_ERROR:
info->has_status = true;
static void migrate_finish_set_state(MigrationState *s, int new_state)
{
- if (__sync_val_compare_and_swap(&s->state, MIG_STATE_ACTIVE,
- new_state) == new_state) {
+ if (atomic_cmpxchg(&s->state, MIG_STATE_ACTIVE, new_state) == new_state) {
trace_migrate_set_state(new_state);
}
}
sizeof(enabled_capabilities));
memset(s, 0, sizeof(*s));
- s->bandwidth_limit = bandwidth_limit;
s->params = *params;
memcpy(s->enabled_capabilities, enabled_capabilities,
sizeof(enabled_capabilities));
max_downtime = (uint64_t)value;
}
+bool migrate_rdma_pin_all(void)
+{
+ MigrationState *s;
+
+ s = migrate_get_current();
+
+ return s->enabled_capabilities[MIGRATION_CAPABILITY_X_RDMA_PIN_ALL];
+}
+
+bool migrate_auto_converge(void)
+{
+ MigrationState *s;
+
+ s = migrate_get_current();
+
+ return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
+}
+
+bool migrate_zero_blocks(void)
+{
+ MigrationState *s;
+
+ s = migrate_get_current();
+
+ return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
+}
+
int migrate_use_xbzrle(void)
{
MigrationState *s;
{
MigrationState *s = opaque;
int64_t initial_time = qemu_get_clock_ms(rt_clock);
- int64_t sleep_time = 0;
int64_t initial_bytes = 0;
int64_t max_size = 0;
int64_t start_time = initial_time;
if (pending_size && pending_size >= max_size) {
qemu_savevm_state_iterate(s->file);
} else {
+ int ret;
+
DPRINTF("done iterating\n");
qemu_mutex_lock_iothread();
start_time = qemu_get_clock_ms(rt_clock);
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
old_vm_running = runstate_is_running();
- vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
- qemu_file_set_rate_limit(s->file, INT_MAX);
- qemu_savevm_state_complete(s->file);
+
+ ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
+ if (ret >= 0) {
+ qemu_file_set_rate_limit(s->file, INT_MAX);
+ qemu_savevm_state_complete(s->file);
+ }
qemu_mutex_unlock_iothread();
+
+ if (ret < 0) {
+ migrate_finish_set_state(s, MIG_STATE_ERROR);
+ break;
+ }
+
if (!qemu_file_get_error(s->file)) {
migrate_finish_set_state(s, MIG_STATE_COMPLETED);
break;
current_time = qemu_get_clock_ms(rt_clock);
if (current_time >= initial_time + BUFFER_DELAY) {
uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
- uint64_t time_spent = current_time - initial_time - sleep_time;
+ uint64_t time_spent = current_time - initial_time;
double bandwidth = transferred_bytes / time_spent;
max_size = bandwidth * migrate_max_downtime() / 1000000;
+ s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
+ ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
+
DPRINTF("transferred %" PRIu64 " time_spent %" PRIu64
" bandwidth %g max_size %" PRId64 "\n",
transferred_bytes, time_spent, bandwidth, max_size);
}
qemu_file_reset_rate_limit(s->file);
- sleep_time = 0;
initial_time = current_time;
initial_bytes = qemu_ftell(s->file);
}
if (qemu_file_rate_limit(s->file)) {
/* usleep expects microseconds */
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
- sleep_time += qemu_get_clock_ms(rt_clock) - current_time;
}
}