4 * Copyright IBM, Corp. 2008
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
12 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
16 #include "qemu-common.h"
17 #include "qemu/error-report.h"
18 #include "qemu/main-loop.h"
19 #include "migration/migration.h"
20 #include "migration/qemu-file.h"
21 #include "sysemu/sysemu.h"
22 #include "block/block.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/sockets.h"
25 #include "migration/block.h"
26 #include "qemu/thread.h"
27 #include "qmp-commands.h"
29 #include "qapi/util.h"
30 #include "qapi-event.h"
32 #define MAX_THROTTLE (32 << 20) /* Migration speed throttling */
34 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
36 #define BUFFER_DELAY 100
37 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
39 /* Default compression thread count */
40 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
41 /* Default decompression thread count, usually decompression is at
42 * least 4 times as fast as compression.*/
43 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
44 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
45 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
47 /* Migration XBZRLE default cache size */
48 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
50 static NotifierList migration_state_notifiers =
51 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
53 static bool deferred_incoming;
55 /* When we add fault tolerance, we could have several
56 migrations at once. For now we don't need to add
57 dynamic creation of migration */
60 MigrationState *migrate_get_current(void)
62 static MigrationState current_migration = {
63 .state = MIGRATION_STATUS_NONE,
64 .bandwidth_limit = MAX_THROTTLE,
65 .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
67 .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
68 DEFAULT_MIGRATE_COMPRESS_LEVEL,
69 .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
70 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
71 .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
72 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
75 return ¤t_migration;
79 static MigrationIncomingState *mis_current;
81 MigrationIncomingState *migration_incoming_get_current(void)
86 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
88 mis_current = g_malloc0(sizeof(MigrationIncomingState));
89 mis_current->file = f;
90 QLIST_INIT(&mis_current->loadvm_handlers);
95 void migration_incoming_state_destroy(void)
97 loadvm_free_handlers(mis_current);
106 uint8_t runstate[100];
109 static GlobalState global_state;
111 static int global_state_store(void)
113 if (!runstate_store((char *)global_state.runstate,
114 sizeof(global_state.runstate))) {
115 error_report("runstate name too big: %s", global_state.runstate);
116 trace_migrate_state_too_big();
122 static char *global_state_get_runstate(void)
124 return (char *)global_state.runstate;
127 void global_state_set_optional(void)
129 global_state.optional = true;
132 static bool global_state_needed(void *opaque)
134 GlobalState *s = opaque;
135 char *runstate = (char *)s->runstate;
137 /* If it is not optional, it is mandatory */
139 if (s->optional == false) {
143 /* If state is running or paused, it is not needed */
145 if (strcmp(runstate, "running") == 0 ||
146 strcmp(runstate, "paused") == 0) {
150 /* for any other state it is needed */
154 static int global_state_post_load(void *opaque, int version_id)
156 GlobalState *s = opaque;
158 char *runstate = (char *)s->runstate;
160 trace_migrate_global_state_post_load(runstate);
162 if (strcmp(runstate, "running") != 0) {
163 Error *local_err = NULL;
164 int r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX,
169 error_report_err(local_err);
173 ret = vm_stop_force_state(r);
179 static void global_state_pre_save(void *opaque)
181 GlobalState *s = opaque;
183 trace_migrate_global_state_pre_save((char *)s->runstate);
184 s->size = strlen((char *)s->runstate) + 1;
187 static const VMStateDescription vmstate_globalstate = {
188 .name = "globalstate",
190 .minimum_version_id = 1,
191 .post_load = global_state_post_load,
192 .pre_save = global_state_pre_save,
193 .needed = global_state_needed,
194 .fields = (VMStateField[]) {
195 VMSTATE_UINT32(size, GlobalState),
196 VMSTATE_BUFFER(runstate, GlobalState),
197 VMSTATE_END_OF_LIST()
201 void register_global_state(void)
203 /* We would use it independently that we receive it */
204 strcpy((char *)&global_state.runstate, "");
205 vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
209 * Called on -incoming with a defer: uri.
210 * The migration can be started later after any parameters have been
213 static void deferred_incoming_migration(Error **errp)
215 if (deferred_incoming) {
216 error_setg(errp, "Incoming migration already deferred");
218 deferred_incoming = true;
221 void qemu_start_incoming_migration(const char *uri, Error **errp)
225 if (!strcmp(uri, "defer")) {
226 deferred_incoming_migration(errp);
227 } else if (strstart(uri, "tcp:", &p)) {
228 tcp_start_incoming_migration(p, errp);
230 } else if (strstart(uri, "rdma:", &p)) {
231 rdma_start_incoming_migration(p, errp);
234 } else if (strstart(uri, "exec:", &p)) {
235 exec_start_incoming_migration(p, errp);
236 } else if (strstart(uri, "unix:", &p)) {
237 unix_start_incoming_migration(p, errp);
238 } else if (strstart(uri, "fd:", &p)) {
239 fd_start_incoming_migration(p, errp);
242 error_setg(errp, "unknown migration protocol: %s", uri);
246 static void process_incoming_migration_co(void *opaque)
248 QEMUFile *f = opaque;
249 Error *local_err = NULL;
252 migration_incoming_state_new(f);
254 ret = qemu_loadvm_state(f);
257 free_xbzrle_decoded_buf();
258 migration_incoming_state_destroy();
261 error_report("load of migration failed: %s", strerror(-ret));
262 migrate_decompress_threads_join();
265 qemu_announce_self();
267 /* Make sure all file formats flush their mutable metadata */
268 bdrv_invalidate_cache_all(&local_err);
270 error_report_err(local_err);
271 migrate_decompress_threads_join();
275 /* runstate == "" means that we haven't received it through the
276 * wire, so we obey autostart. runstate == runing means that we
277 * need to run it, we need to make sure that we do it after
278 * everything else has finished. Every other state change is done
279 * at the post_load function */
281 if (strcmp(global_state_get_runstate(), "running") == 0) {
283 } else if (strcmp(global_state_get_runstate(), "") == 0) {
287 runstate_set(RUN_STATE_PAUSED);
290 migrate_decompress_threads_join();
293 void process_incoming_migration(QEMUFile *f)
295 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
296 int fd = qemu_get_fd(f);
299 migrate_decompress_threads_create();
300 qemu_set_nonblock(fd);
301 qemu_coroutine_enter(co, f);
304 /* amount of nanoseconds we are willing to wait for migration to be down.
305 * the choice of nanoseconds is because it is the maximum resolution that
306 * get_clock() can achieve. It is an internal measure. All user-visible
307 * units must be in seconds */
308 static uint64_t max_downtime = 300000000;
310 uint64_t migrate_max_downtime(void)
315 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
317 MigrationCapabilityStatusList *head = NULL;
318 MigrationCapabilityStatusList *caps;
319 MigrationState *s = migrate_get_current();
322 caps = NULL; /* silence compiler warning */
323 for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
325 head = g_malloc0(sizeof(*caps));
328 caps->next = g_malloc0(sizeof(*caps));
332 g_malloc(sizeof(*caps->value));
333 caps->value->capability = i;
334 caps->value->state = s->enabled_capabilities[i];
340 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
342 MigrationParameters *params;
343 MigrationState *s = migrate_get_current();
345 params = g_malloc0(sizeof(*params));
346 params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
347 params->compress_threads =
348 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
349 params->decompress_threads =
350 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
355 static void get_xbzrle_cache_stats(MigrationInfo *info)
357 if (migrate_use_xbzrle()) {
358 info->has_xbzrle_cache = true;
359 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
360 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
361 info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
362 info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
363 info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
364 info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
365 info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
369 MigrationInfo *qmp_query_migrate(Error **errp)
371 MigrationInfo *info = g_malloc0(sizeof(*info));
372 MigrationState *s = migrate_get_current();
375 case MIGRATION_STATUS_NONE:
376 /* no migration has happened ever */
378 case MIGRATION_STATUS_SETUP:
379 info->has_status = true;
380 info->has_total_time = false;
382 case MIGRATION_STATUS_ACTIVE:
383 case MIGRATION_STATUS_CANCELLING:
384 info->has_status = true;
385 info->has_total_time = true;
386 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
388 info->has_expected_downtime = true;
389 info->expected_downtime = s->expected_downtime;
390 info->has_setup_time = true;
391 info->setup_time = s->setup_time;
393 info->has_ram = true;
394 info->ram = g_malloc0(sizeof(*info->ram));
395 info->ram->transferred = ram_bytes_transferred();
396 info->ram->remaining = ram_bytes_remaining();
397 info->ram->total = ram_bytes_total();
398 info->ram->duplicate = dup_mig_pages_transferred();
399 info->ram->skipped = skipped_mig_pages_transferred();
400 info->ram->normal = norm_mig_pages_transferred();
401 info->ram->normal_bytes = norm_mig_bytes_transferred();
402 info->ram->dirty_pages_rate = s->dirty_pages_rate;
403 info->ram->mbps = s->mbps;
404 info->ram->dirty_sync_count = s->dirty_sync_count;
406 if (blk_mig_active()) {
407 info->has_disk = true;
408 info->disk = g_malloc0(sizeof(*info->disk));
409 info->disk->transferred = blk_mig_bytes_transferred();
410 info->disk->remaining = blk_mig_bytes_remaining();
411 info->disk->total = blk_mig_bytes_total();
414 get_xbzrle_cache_stats(info);
416 case MIGRATION_STATUS_COMPLETED:
417 get_xbzrle_cache_stats(info);
419 info->has_status = true;
420 info->has_total_time = true;
421 info->total_time = s->total_time;
422 info->has_downtime = true;
423 info->downtime = s->downtime;
424 info->has_setup_time = true;
425 info->setup_time = s->setup_time;
427 info->has_ram = true;
428 info->ram = g_malloc0(sizeof(*info->ram));
429 info->ram->transferred = ram_bytes_transferred();
430 info->ram->remaining = 0;
431 info->ram->total = ram_bytes_total();
432 info->ram->duplicate = dup_mig_pages_transferred();
433 info->ram->skipped = skipped_mig_pages_transferred();
434 info->ram->normal = norm_mig_pages_transferred();
435 info->ram->normal_bytes = norm_mig_bytes_transferred();
436 info->ram->mbps = s->mbps;
437 info->ram->dirty_sync_count = s->dirty_sync_count;
439 case MIGRATION_STATUS_FAILED:
440 info->has_status = true;
442 case MIGRATION_STATUS_CANCELLED:
443 info->has_status = true;
446 info->status = s->state;
451 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
454 MigrationState *s = migrate_get_current();
455 MigrationCapabilityStatusList *cap;
457 if (s->state == MIGRATION_STATUS_ACTIVE ||
458 s->state == MIGRATION_STATUS_SETUP) {
459 error_setg(errp, QERR_MIGRATION_ACTIVE);
463 for (cap = params; cap; cap = cap->next) {
464 s->enabled_capabilities[cap->value->capability] = cap->value->state;
468 void qmp_migrate_set_parameters(bool has_compress_level,
469 int64_t compress_level,
470 bool has_compress_threads,
471 int64_t compress_threads,
472 bool has_decompress_threads,
473 int64_t decompress_threads, Error **errp)
475 MigrationState *s = migrate_get_current();
477 if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
478 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
479 "is invalid, it should be in the range of 0 to 9");
482 if (has_compress_threads &&
483 (compress_threads < 1 || compress_threads > 255)) {
484 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
486 "is invalid, it should be in the range of 1 to 255");
489 if (has_decompress_threads &&
490 (decompress_threads < 1 || decompress_threads > 255)) {
491 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
492 "decompress_threads",
493 "is invalid, it should be in the range of 1 to 255");
497 if (has_compress_level) {
498 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
500 if (has_compress_threads) {
501 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
503 if (has_decompress_threads) {
504 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
509 /* shared migration helpers */
511 static void migrate_set_state(MigrationState *s, int old_state, int new_state)
513 if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) {
514 qapi_event_send_migration(new_state, &error_abort);
515 trace_migrate_set_state(new_state);
519 static void migrate_fd_cleanup(void *opaque)
521 MigrationState *s = opaque;
523 qemu_bh_delete(s->cleanup_bh);
524 s->cleanup_bh = NULL;
527 trace_migrate_fd_cleanup();
528 qemu_mutex_unlock_iothread();
529 qemu_thread_join(&s->thread);
530 qemu_mutex_lock_iothread();
532 migrate_compress_threads_join();
533 qemu_fclose(s->file);
537 assert(s->state != MIGRATION_STATUS_ACTIVE);
539 if (s->state != MIGRATION_STATUS_COMPLETED) {
540 qemu_savevm_state_cancel();
541 if (s->state == MIGRATION_STATUS_CANCELLING) {
542 migrate_set_state(s, MIGRATION_STATUS_CANCELLING,
543 MIGRATION_STATUS_CANCELLED);
547 notifier_list_notify(&migration_state_notifiers, s);
550 void migrate_fd_error(MigrationState *s)
552 trace_migrate_fd_error();
553 assert(s->file == NULL);
554 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
555 notifier_list_notify(&migration_state_notifiers, s);
558 static void migrate_fd_cancel(MigrationState *s)
561 QEMUFile *f = migrate_get_current()->file;
562 trace_migrate_fd_cancel();
565 old_state = s->state;
566 if (old_state != MIGRATION_STATUS_SETUP &&
567 old_state != MIGRATION_STATUS_ACTIVE) {
570 migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING);
571 } while (s->state != MIGRATION_STATUS_CANCELLING);
574 * If we're unlucky the migration code might be stuck somewhere in a
575 * send/write while the network has failed and is waiting to timeout;
576 * if we've got shutdown(2) available then we can force it to quit.
577 * The outgoing qemu file gets closed in migrate_fd_cleanup that is
578 * called in a bh, so there is no race against this cancel.
580 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
581 qemu_file_shutdown(f);
585 void add_migration_state_change_notifier(Notifier *notify)
587 notifier_list_add(&migration_state_notifiers, notify);
590 void remove_migration_state_change_notifier(Notifier *notify)
592 notifier_remove(notify);
595 bool migration_in_setup(MigrationState *s)
597 return s->state == MIGRATION_STATUS_SETUP;
600 bool migration_has_finished(MigrationState *s)
602 return s->state == MIGRATION_STATUS_COMPLETED;
605 bool migration_has_failed(MigrationState *s)
607 return (s->state == MIGRATION_STATUS_CANCELLED ||
608 s->state == MIGRATION_STATUS_FAILED);
611 static MigrationState *migrate_init(const MigrationParams *params)
613 MigrationState *s = migrate_get_current();
614 int64_t bandwidth_limit = s->bandwidth_limit;
615 bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
616 int64_t xbzrle_cache_size = s->xbzrle_cache_size;
617 int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
618 int compress_thread_count =
619 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
620 int decompress_thread_count =
621 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
623 memcpy(enabled_capabilities, s->enabled_capabilities,
624 sizeof(enabled_capabilities));
626 memset(s, 0, sizeof(*s));
628 memcpy(s->enabled_capabilities, enabled_capabilities,
629 sizeof(enabled_capabilities));
630 s->xbzrle_cache_size = xbzrle_cache_size;
632 s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
633 s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
634 compress_thread_count;
635 s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
636 decompress_thread_count;
637 s->bandwidth_limit = bandwidth_limit;
638 migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
640 s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
644 static GSList *migration_blockers;
646 void migrate_add_blocker(Error *reason)
648 migration_blockers = g_slist_prepend(migration_blockers, reason);
651 void migrate_del_blocker(Error *reason)
653 migration_blockers = g_slist_remove(migration_blockers, reason);
656 void qmp_migrate_incoming(const char *uri, Error **errp)
658 Error *local_err = NULL;
659 static bool once = true;
661 if (!deferred_incoming) {
662 error_setg(errp, "For use with '-incoming defer'");
666 error_setg(errp, "The incoming migration has already been started");
669 qemu_start_incoming_migration(uri, &local_err);
672 error_propagate(errp, local_err);
679 void qmp_migrate(const char *uri, bool has_blk, bool blk,
680 bool has_inc, bool inc, bool has_detach, bool detach,
683 Error *local_err = NULL;
684 MigrationState *s = migrate_get_current();
685 MigrationParams params;
688 params.blk = has_blk && blk;
689 params.shared = has_inc && inc;
691 if (s->state == MIGRATION_STATUS_ACTIVE ||
692 s->state == MIGRATION_STATUS_SETUP ||
693 s->state == MIGRATION_STATUS_CANCELLING) {
694 error_setg(errp, QERR_MIGRATION_ACTIVE);
697 if (runstate_check(RUN_STATE_INMIGRATE)) {
698 error_setg(errp, "Guest is waiting for an incoming migration");
702 if (qemu_savevm_state_blocked(errp)) {
706 if (migration_blockers) {
707 *errp = error_copy(migration_blockers->data);
711 /* We are starting a new migration, so we want to start in a clean
712 state. This change is only needed if previous migration
713 failed/was cancelled. We don't use migrate_set_state() because
714 we are setting the initial state, not changing it. */
715 s->state = MIGRATION_STATUS_NONE;
717 s = migrate_init(¶ms);
719 if (strstart(uri, "tcp:", &p)) {
720 tcp_start_outgoing_migration(s, p, &local_err);
722 } else if (strstart(uri, "rdma:", &p)) {
723 rdma_start_outgoing_migration(s, p, &local_err);
726 } else if (strstart(uri, "exec:", &p)) {
727 exec_start_outgoing_migration(s, p, &local_err);
728 } else if (strstart(uri, "unix:", &p)) {
729 unix_start_outgoing_migration(s, p, &local_err);
730 } else if (strstart(uri, "fd:", &p)) {
731 fd_start_outgoing_migration(s, p, &local_err);
734 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
735 "a valid migration protocol");
736 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
742 error_propagate(errp, local_err);
747 void qmp_migrate_cancel(Error **errp)
749 migrate_fd_cancel(migrate_get_current());
752 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
754 MigrationState *s = migrate_get_current();
757 /* Check for truncation */
758 if (value != (size_t)value) {
759 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
760 "exceeding address space");
764 /* Cache should not be larger than guest ram size */
765 if (value > ram_bytes_total()) {
766 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
767 "exceeds guest ram size ");
771 new_size = xbzrle_cache_resize(value);
773 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
774 "is smaller than page size");
778 s->xbzrle_cache_size = new_size;
781 int64_t qmp_query_migrate_cache_size(Error **errp)
783 return migrate_xbzrle_cache_size();
786 void qmp_migrate_set_speed(int64_t value, Error **errp)
793 if (value > SIZE_MAX) {
797 s = migrate_get_current();
798 s->bandwidth_limit = value;
800 qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
804 void qmp_migrate_set_downtime(double value, Error **errp)
807 value = MAX(0, MIN(UINT64_MAX, value));
808 max_downtime = (uint64_t)value;
811 bool migrate_auto_converge(void)
815 s = migrate_get_current();
817 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
820 bool migrate_zero_blocks(void)
824 s = migrate_get_current();
826 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
829 bool migrate_use_compression(void)
833 s = migrate_get_current();
835 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
838 int migrate_compress_level(void)
842 s = migrate_get_current();
844 return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
847 int migrate_compress_threads(void)
851 s = migrate_get_current();
853 return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
856 int migrate_decompress_threads(void)
860 s = migrate_get_current();
862 return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
865 int migrate_use_xbzrle(void)
869 s = migrate_get_current();
871 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
874 int64_t migrate_xbzrle_cache_size(void)
878 s = migrate_get_current();
880 return s->xbzrle_cache_size;
883 /* migration thread support */
885 static void *migration_thread(void *opaque)
887 MigrationState *s = opaque;
888 int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
889 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
890 int64_t initial_bytes = 0;
891 int64_t max_size = 0;
892 int64_t start_time = initial_time;
893 bool old_vm_running = false;
895 qemu_savevm_state_header(s->file);
896 qemu_savevm_state_begin(s->file, &s->params);
898 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
899 migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE);
901 while (s->state == MIGRATION_STATUS_ACTIVE) {
902 int64_t current_time;
903 uint64_t pending_size;
905 if (!qemu_file_rate_limit(s->file)) {
906 pending_size = qemu_savevm_state_pending(s->file, max_size);
907 trace_migrate_pending(pending_size, max_size);
908 if (pending_size && pending_size >= max_size) {
909 qemu_savevm_state_iterate(s->file);
913 qemu_mutex_lock_iothread();
914 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
915 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
916 old_vm_running = runstate_is_running();
918 ret = global_state_store();
920 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
922 qemu_file_set_rate_limit(s->file, INT64_MAX);
923 qemu_savevm_state_complete(s->file);
926 qemu_mutex_unlock_iothread();
929 migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
930 MIGRATION_STATUS_FAILED);
934 if (!qemu_file_get_error(s->file)) {
935 migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
936 MIGRATION_STATUS_COMPLETED);
942 if (qemu_file_get_error(s->file)) {
943 migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
944 MIGRATION_STATUS_FAILED);
947 current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
948 if (current_time >= initial_time + BUFFER_DELAY) {
949 uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
950 uint64_t time_spent = current_time - initial_time;
951 double bandwidth = transferred_bytes / time_spent;
952 max_size = bandwidth * migrate_max_downtime() / 1000000;
954 s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
955 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
957 trace_migrate_transferred(transferred_bytes, time_spent,
958 bandwidth, max_size);
959 /* if we haven't sent anything, we don't want to recalculate
960 10000 is a small enough number for our purposes */
961 if (s->dirty_bytes_rate && transferred_bytes > 10000) {
962 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
965 qemu_file_reset_rate_limit(s->file);
966 initial_time = current_time;
967 initial_bytes = qemu_ftell(s->file);
969 if (qemu_file_rate_limit(s->file)) {
970 /* usleep expects microseconds */
971 g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
975 qemu_mutex_lock_iothread();
976 if (s->state == MIGRATION_STATUS_COMPLETED) {
977 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
978 uint64_t transferred_bytes = qemu_ftell(s->file);
979 s->total_time = end_time - s->total_time;
980 s->downtime = end_time - start_time;
982 s->mbps = (((double) transferred_bytes * 8.0) /
983 ((double) s->total_time)) / 1000;
985 runstate_set(RUN_STATE_POSTMIGRATE);
987 if (old_vm_running) {
991 qemu_bh_schedule(s->cleanup_bh);
992 qemu_mutex_unlock_iothread();
997 void migrate_fd_connect(MigrationState *s)
999 /* This is a best 1st approximation. ns to ms */
1000 s->expected_downtime = max_downtime/1000000;
1001 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1003 qemu_file_set_rate_limit(s->file,
1004 s->bandwidth_limit / XFER_LIMIT_RATIO);
1006 /* Notify before starting migration thread */
1007 notifier_list_notify(&migration_state_notifiers, s);
1009 migrate_compress_threads_create();
1010 qemu_thread_create(&s->thread, "migration", migration_thread, s,
1011 QEMU_THREAD_JOINABLE);