]> Git Repo - qemu.git/blob - migration/migration.c
migration: create migration event
[qemu.git] / migration / migration.c
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <[email protected]>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15
16 #include "qemu-common.h"
17 #include "qemu/error-report.h"
18 #include "qemu/main-loop.h"
19 #include "migration/migration.h"
20 #include "migration/qemu-file.h"
21 #include "sysemu/sysemu.h"
22 #include "block/block.h"
23 #include "qapi/qmp/qerror.h"
24 #include "qemu/sockets.h"
25 #include "migration/block.h"
26 #include "qemu/thread.h"
27 #include "qmp-commands.h"
28 #include "trace.h"
29 #include "qapi/util.h"
30 #include "qapi-event.h"
31
32 #define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
33
34 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
35  * data. */
36 #define BUFFER_DELAY     100
37 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
38
39 /* Default compression thread count */
40 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
41 /* Default decompression thread count, usually decompression is at
42  * least 4 times as fast as compression.*/
43 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
44 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
45 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
46
47 /* Migration XBZRLE default cache size */
48 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
49
50 static NotifierList migration_state_notifiers =
51     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
52
53 static bool deferred_incoming;
54
55 /* When we add fault tolerance, we could have several
56    migrations at once.  For now we don't need to add
57    dynamic creation of migration */
58
59 /* For outgoing */
60 MigrationState *migrate_get_current(void)
61 {
62     static MigrationState current_migration = {
63         .state = MIGRATION_STATUS_NONE,
64         .bandwidth_limit = MAX_THROTTLE,
65         .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
66         .mbps = -1,
67         .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
68                 DEFAULT_MIGRATE_COMPRESS_LEVEL,
69         .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
70                 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
71         .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
72                 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
73     };
74
75     return &current_migration;
76 }
77
78 /* For incoming */
79 static MigrationIncomingState *mis_current;
80
81 MigrationIncomingState *migration_incoming_get_current(void)
82 {
83     return mis_current;
84 }
85
86 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
87 {
88     mis_current = g_malloc0(sizeof(MigrationIncomingState));
89     mis_current->file = f;
90     QLIST_INIT(&mis_current->loadvm_handlers);
91
92     return mis_current;
93 }
94
95 void migration_incoming_state_destroy(void)
96 {
97     loadvm_free_handlers(mis_current);
98     g_free(mis_current);
99     mis_current = NULL;
100 }
101
102
103 typedef struct {
104     bool optional;
105     uint32_t size;
106     uint8_t runstate[100];
107 } GlobalState;
108
109 static GlobalState global_state;
110
111 static int global_state_store(void)
112 {
113     if (!runstate_store((char *)global_state.runstate,
114                         sizeof(global_state.runstate))) {
115         error_report("runstate name too big: %s", global_state.runstate);
116         trace_migrate_state_too_big();
117         return -EINVAL;
118     }
119     return 0;
120 }
121
122 static char *global_state_get_runstate(void)
123 {
124     return (char *)global_state.runstate;
125 }
126
127 void global_state_set_optional(void)
128 {
129     global_state.optional = true;
130 }
131
132 static bool global_state_needed(void *opaque)
133 {
134     GlobalState *s = opaque;
135     char *runstate = (char *)s->runstate;
136
137     /* If it is not optional, it is mandatory */
138
139     if (s->optional == false) {
140         return true;
141     }
142
143     /* If state is running or paused, it is not needed */
144
145     if (strcmp(runstate, "running") == 0 ||
146         strcmp(runstate, "paused") == 0) {
147         return false;
148     }
149
150     /* for any other state it is needed */
151     return true;
152 }
153
154 static int global_state_post_load(void *opaque, int version_id)
155 {
156     GlobalState *s = opaque;
157     int ret = 0;
158     char *runstate = (char *)s->runstate;
159
160     trace_migrate_global_state_post_load(runstate);
161
162     if (strcmp(runstate, "running") != 0) {
163         Error *local_err = NULL;
164         int r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX,
165                                 -1, &local_err);
166
167         if (r == -1) {
168             if (local_err) {
169                 error_report_err(local_err);
170             }
171             return -EINVAL;
172         }
173         ret = vm_stop_force_state(r);
174     }
175
176    return ret;
177 }
178
179 static void global_state_pre_save(void *opaque)
180 {
181     GlobalState *s = opaque;
182
183     trace_migrate_global_state_pre_save((char *)s->runstate);
184     s->size = strlen((char *)s->runstate) + 1;
185 }
186
187 static const VMStateDescription vmstate_globalstate = {
188     .name = "globalstate",
189     .version_id = 1,
190     .minimum_version_id = 1,
191     .post_load = global_state_post_load,
192     .pre_save = global_state_pre_save,
193     .needed = global_state_needed,
194     .fields = (VMStateField[]) {
195         VMSTATE_UINT32(size, GlobalState),
196         VMSTATE_BUFFER(runstate, GlobalState),
197         VMSTATE_END_OF_LIST()
198     },
199 };
200
201 void register_global_state(void)
202 {
203     /* We would use it independently that we receive it */
204     strcpy((char *)&global_state.runstate, "");
205     vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
206 }
207
208 /*
209  * Called on -incoming with a defer: uri.
210  * The migration can be started later after any parameters have been
211  * changed.
212  */
213 static void deferred_incoming_migration(Error **errp)
214 {
215     if (deferred_incoming) {
216         error_setg(errp, "Incoming migration already deferred");
217     }
218     deferred_incoming = true;
219 }
220
221 void qemu_start_incoming_migration(const char *uri, Error **errp)
222 {
223     const char *p;
224
225     if (!strcmp(uri, "defer")) {
226         deferred_incoming_migration(errp);
227     } else if (strstart(uri, "tcp:", &p)) {
228         tcp_start_incoming_migration(p, errp);
229 #ifdef CONFIG_RDMA
230     } else if (strstart(uri, "rdma:", &p)) {
231         rdma_start_incoming_migration(p, errp);
232 #endif
233 #if !defined(WIN32)
234     } else if (strstart(uri, "exec:", &p)) {
235         exec_start_incoming_migration(p, errp);
236     } else if (strstart(uri, "unix:", &p)) {
237         unix_start_incoming_migration(p, errp);
238     } else if (strstart(uri, "fd:", &p)) {
239         fd_start_incoming_migration(p, errp);
240 #endif
241     } else {
242         error_setg(errp, "unknown migration protocol: %s", uri);
243     }
244 }
245
246 static void process_incoming_migration_co(void *opaque)
247 {
248     QEMUFile *f = opaque;
249     Error *local_err = NULL;
250     int ret;
251
252     migration_incoming_state_new(f);
253
254     ret = qemu_loadvm_state(f);
255
256     qemu_fclose(f);
257     free_xbzrle_decoded_buf();
258     migration_incoming_state_destroy();
259
260     if (ret < 0) {
261         error_report("load of migration failed: %s", strerror(-ret));
262         migrate_decompress_threads_join();
263         exit(EXIT_FAILURE);
264     }
265     qemu_announce_self();
266
267     /* Make sure all file formats flush their mutable metadata */
268     bdrv_invalidate_cache_all(&local_err);
269     if (local_err) {
270         error_report_err(local_err);
271         migrate_decompress_threads_join();
272         exit(EXIT_FAILURE);
273     }
274
275     /* runstate == "" means that we haven't received it through the
276      * wire, so we obey autostart.  runstate == runing means that we
277      * need to run it, we need to make sure that we do it after
278      * everything else has finished.  Every other state change is done
279      * at the post_load function */
280
281     if (strcmp(global_state_get_runstate(), "running") == 0) {
282         vm_start();
283     } else if (strcmp(global_state_get_runstate(), "") == 0) {
284         if (autostart) {
285             vm_start();
286         } else {
287             runstate_set(RUN_STATE_PAUSED);
288         }
289     }
290     migrate_decompress_threads_join();
291 }
292
293 void process_incoming_migration(QEMUFile *f)
294 {
295     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
296     int fd = qemu_get_fd(f);
297
298     assert(fd != -1);
299     migrate_decompress_threads_create();
300     qemu_set_nonblock(fd);
301     qemu_coroutine_enter(co, f);
302 }
303
304 /* amount of nanoseconds we are willing to wait for migration to be down.
305  * the choice of nanoseconds is because it is the maximum resolution that
306  * get_clock() can achieve. It is an internal measure. All user-visible
307  * units must be in seconds */
308 static uint64_t max_downtime = 300000000;
309
310 uint64_t migrate_max_downtime(void)
311 {
312     return max_downtime;
313 }
314
315 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
316 {
317     MigrationCapabilityStatusList *head = NULL;
318     MigrationCapabilityStatusList *caps;
319     MigrationState *s = migrate_get_current();
320     int i;
321
322     caps = NULL; /* silence compiler warning */
323     for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
324         if (head == NULL) {
325             head = g_malloc0(sizeof(*caps));
326             caps = head;
327         } else {
328             caps->next = g_malloc0(sizeof(*caps));
329             caps = caps->next;
330         }
331         caps->value =
332             g_malloc(sizeof(*caps->value));
333         caps->value->capability = i;
334         caps->value->state = s->enabled_capabilities[i];
335     }
336
337     return head;
338 }
339
340 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
341 {
342     MigrationParameters *params;
343     MigrationState *s = migrate_get_current();
344
345     params = g_malloc0(sizeof(*params));
346     params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
347     params->compress_threads =
348             s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
349     params->decompress_threads =
350             s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
351
352     return params;
353 }
354
355 static void get_xbzrle_cache_stats(MigrationInfo *info)
356 {
357     if (migrate_use_xbzrle()) {
358         info->has_xbzrle_cache = true;
359         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
360         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
361         info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
362         info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
363         info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
364         info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
365         info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
366     }
367 }
368
369 MigrationInfo *qmp_query_migrate(Error **errp)
370 {
371     MigrationInfo *info = g_malloc0(sizeof(*info));
372     MigrationState *s = migrate_get_current();
373
374     switch (s->state) {
375     case MIGRATION_STATUS_NONE:
376         /* no migration has happened ever */
377         break;
378     case MIGRATION_STATUS_SETUP:
379         info->has_status = true;
380         info->has_total_time = false;
381         break;
382     case MIGRATION_STATUS_ACTIVE:
383     case MIGRATION_STATUS_CANCELLING:
384         info->has_status = true;
385         info->has_total_time = true;
386         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
387             - s->total_time;
388         info->has_expected_downtime = true;
389         info->expected_downtime = s->expected_downtime;
390         info->has_setup_time = true;
391         info->setup_time = s->setup_time;
392
393         info->has_ram = true;
394         info->ram = g_malloc0(sizeof(*info->ram));
395         info->ram->transferred = ram_bytes_transferred();
396         info->ram->remaining = ram_bytes_remaining();
397         info->ram->total = ram_bytes_total();
398         info->ram->duplicate = dup_mig_pages_transferred();
399         info->ram->skipped = skipped_mig_pages_transferred();
400         info->ram->normal = norm_mig_pages_transferred();
401         info->ram->normal_bytes = norm_mig_bytes_transferred();
402         info->ram->dirty_pages_rate = s->dirty_pages_rate;
403         info->ram->mbps = s->mbps;
404         info->ram->dirty_sync_count = s->dirty_sync_count;
405
406         if (blk_mig_active()) {
407             info->has_disk = true;
408             info->disk = g_malloc0(sizeof(*info->disk));
409             info->disk->transferred = blk_mig_bytes_transferred();
410             info->disk->remaining = blk_mig_bytes_remaining();
411             info->disk->total = blk_mig_bytes_total();
412         }
413
414         get_xbzrle_cache_stats(info);
415         break;
416     case MIGRATION_STATUS_COMPLETED:
417         get_xbzrle_cache_stats(info);
418
419         info->has_status = true;
420         info->has_total_time = true;
421         info->total_time = s->total_time;
422         info->has_downtime = true;
423         info->downtime = s->downtime;
424         info->has_setup_time = true;
425         info->setup_time = s->setup_time;
426
427         info->has_ram = true;
428         info->ram = g_malloc0(sizeof(*info->ram));
429         info->ram->transferred = ram_bytes_transferred();
430         info->ram->remaining = 0;
431         info->ram->total = ram_bytes_total();
432         info->ram->duplicate = dup_mig_pages_transferred();
433         info->ram->skipped = skipped_mig_pages_transferred();
434         info->ram->normal = norm_mig_pages_transferred();
435         info->ram->normal_bytes = norm_mig_bytes_transferred();
436         info->ram->mbps = s->mbps;
437         info->ram->dirty_sync_count = s->dirty_sync_count;
438         break;
439     case MIGRATION_STATUS_FAILED:
440         info->has_status = true;
441         break;
442     case MIGRATION_STATUS_CANCELLED:
443         info->has_status = true;
444         break;
445     }
446     info->status = s->state;
447
448     return info;
449 }
450
451 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
452                                   Error **errp)
453 {
454     MigrationState *s = migrate_get_current();
455     MigrationCapabilityStatusList *cap;
456
457     if (s->state == MIGRATION_STATUS_ACTIVE ||
458         s->state == MIGRATION_STATUS_SETUP) {
459         error_setg(errp, QERR_MIGRATION_ACTIVE);
460         return;
461     }
462
463     for (cap = params; cap; cap = cap->next) {
464         s->enabled_capabilities[cap->value->capability] = cap->value->state;
465     }
466 }
467
468 void qmp_migrate_set_parameters(bool has_compress_level,
469                                 int64_t compress_level,
470                                 bool has_compress_threads,
471                                 int64_t compress_threads,
472                                 bool has_decompress_threads,
473                                 int64_t decompress_threads, Error **errp)
474 {
475     MigrationState *s = migrate_get_current();
476
477     if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
478         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
479                    "is invalid, it should be in the range of 0 to 9");
480         return;
481     }
482     if (has_compress_threads &&
483             (compress_threads < 1 || compress_threads > 255)) {
484         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
485                    "compress_threads",
486                    "is invalid, it should be in the range of 1 to 255");
487         return;
488     }
489     if (has_decompress_threads &&
490             (decompress_threads < 1 || decompress_threads > 255)) {
491         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
492                    "decompress_threads",
493                    "is invalid, it should be in the range of 1 to 255");
494         return;
495     }
496
497     if (has_compress_level) {
498         s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
499     }
500     if (has_compress_threads) {
501         s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
502     }
503     if (has_decompress_threads) {
504         s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
505                                                     decompress_threads;
506     }
507 }
508
509 /* shared migration helpers */
510
511 static void migrate_set_state(MigrationState *s, int old_state, int new_state)
512 {
513     if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) {
514         qapi_event_send_migration(new_state, &error_abort);
515         trace_migrate_set_state(new_state);
516     }
517 }
518
519 static void migrate_fd_cleanup(void *opaque)
520 {
521     MigrationState *s = opaque;
522
523     qemu_bh_delete(s->cleanup_bh);
524     s->cleanup_bh = NULL;
525
526     if (s->file) {
527         trace_migrate_fd_cleanup();
528         qemu_mutex_unlock_iothread();
529         qemu_thread_join(&s->thread);
530         qemu_mutex_lock_iothread();
531
532         migrate_compress_threads_join();
533         qemu_fclose(s->file);
534         s->file = NULL;
535     }
536
537     assert(s->state != MIGRATION_STATUS_ACTIVE);
538
539     if (s->state != MIGRATION_STATUS_COMPLETED) {
540         qemu_savevm_state_cancel();
541         if (s->state == MIGRATION_STATUS_CANCELLING) {
542             migrate_set_state(s, MIGRATION_STATUS_CANCELLING,
543                               MIGRATION_STATUS_CANCELLED);
544         }
545     }
546
547     notifier_list_notify(&migration_state_notifiers, s);
548 }
549
550 void migrate_fd_error(MigrationState *s)
551 {
552     trace_migrate_fd_error();
553     assert(s->file == NULL);
554     migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
555     notifier_list_notify(&migration_state_notifiers, s);
556 }
557
558 static void migrate_fd_cancel(MigrationState *s)
559 {
560     int old_state ;
561     QEMUFile *f = migrate_get_current()->file;
562     trace_migrate_fd_cancel();
563
564     do {
565         old_state = s->state;
566         if (old_state != MIGRATION_STATUS_SETUP &&
567             old_state != MIGRATION_STATUS_ACTIVE) {
568             break;
569         }
570         migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING);
571     } while (s->state != MIGRATION_STATUS_CANCELLING);
572
573     /*
574      * If we're unlucky the migration code might be stuck somewhere in a
575      * send/write while the network has failed and is waiting to timeout;
576      * if we've got shutdown(2) available then we can force it to quit.
577      * The outgoing qemu file gets closed in migrate_fd_cleanup that is
578      * called in a bh, so there is no race against this cancel.
579      */
580     if (s->state == MIGRATION_STATUS_CANCELLING && f) {
581         qemu_file_shutdown(f);
582     }
583 }
584
585 void add_migration_state_change_notifier(Notifier *notify)
586 {
587     notifier_list_add(&migration_state_notifiers, notify);
588 }
589
590 void remove_migration_state_change_notifier(Notifier *notify)
591 {
592     notifier_remove(notify);
593 }
594
595 bool migration_in_setup(MigrationState *s)
596 {
597     return s->state == MIGRATION_STATUS_SETUP;
598 }
599
600 bool migration_has_finished(MigrationState *s)
601 {
602     return s->state == MIGRATION_STATUS_COMPLETED;
603 }
604
605 bool migration_has_failed(MigrationState *s)
606 {
607     return (s->state == MIGRATION_STATUS_CANCELLED ||
608             s->state == MIGRATION_STATUS_FAILED);
609 }
610
611 static MigrationState *migrate_init(const MigrationParams *params)
612 {
613     MigrationState *s = migrate_get_current();
614     int64_t bandwidth_limit = s->bandwidth_limit;
615     bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
616     int64_t xbzrle_cache_size = s->xbzrle_cache_size;
617     int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
618     int compress_thread_count =
619             s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
620     int decompress_thread_count =
621             s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
622
623     memcpy(enabled_capabilities, s->enabled_capabilities,
624            sizeof(enabled_capabilities));
625
626     memset(s, 0, sizeof(*s));
627     s->params = *params;
628     memcpy(s->enabled_capabilities, enabled_capabilities,
629            sizeof(enabled_capabilities));
630     s->xbzrle_cache_size = xbzrle_cache_size;
631
632     s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
633     s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
634                compress_thread_count;
635     s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
636                decompress_thread_count;
637     s->bandwidth_limit = bandwidth_limit;
638     migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
639
640     s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
641     return s;
642 }
643
644 static GSList *migration_blockers;
645
646 void migrate_add_blocker(Error *reason)
647 {
648     migration_blockers = g_slist_prepend(migration_blockers, reason);
649 }
650
651 void migrate_del_blocker(Error *reason)
652 {
653     migration_blockers = g_slist_remove(migration_blockers, reason);
654 }
655
656 void qmp_migrate_incoming(const char *uri, Error **errp)
657 {
658     Error *local_err = NULL;
659     static bool once = true;
660
661     if (!deferred_incoming) {
662         error_setg(errp, "For use with '-incoming defer'");
663         return;
664     }
665     if (!once) {
666         error_setg(errp, "The incoming migration has already been started");
667     }
668
669     qemu_start_incoming_migration(uri, &local_err);
670
671     if (local_err) {
672         error_propagate(errp, local_err);
673         return;
674     }
675
676     once = false;
677 }
678
679 void qmp_migrate(const char *uri, bool has_blk, bool blk,
680                  bool has_inc, bool inc, bool has_detach, bool detach,
681                  Error **errp)
682 {
683     Error *local_err = NULL;
684     MigrationState *s = migrate_get_current();
685     MigrationParams params;
686     const char *p;
687
688     params.blk = has_blk && blk;
689     params.shared = has_inc && inc;
690
691     if (s->state == MIGRATION_STATUS_ACTIVE ||
692         s->state == MIGRATION_STATUS_SETUP ||
693         s->state == MIGRATION_STATUS_CANCELLING) {
694         error_setg(errp, QERR_MIGRATION_ACTIVE);
695         return;
696     }
697     if (runstate_check(RUN_STATE_INMIGRATE)) {
698         error_setg(errp, "Guest is waiting for an incoming migration");
699         return;
700     }
701
702     if (qemu_savevm_state_blocked(errp)) {
703         return;
704     }
705
706     if (migration_blockers) {
707         *errp = error_copy(migration_blockers->data);
708         return;
709     }
710
711     /* We are starting a new migration, so we want to start in a clean
712        state.  This change is only needed if previous migration
713        failed/was cancelled.  We don't use migrate_set_state() because
714        we are setting the initial state, not changing it. */
715     s->state = MIGRATION_STATUS_NONE;
716
717     s = migrate_init(&params);
718
719     if (strstart(uri, "tcp:", &p)) {
720         tcp_start_outgoing_migration(s, p, &local_err);
721 #ifdef CONFIG_RDMA
722     } else if (strstart(uri, "rdma:", &p)) {
723         rdma_start_outgoing_migration(s, p, &local_err);
724 #endif
725 #if !defined(WIN32)
726     } else if (strstart(uri, "exec:", &p)) {
727         exec_start_outgoing_migration(s, p, &local_err);
728     } else if (strstart(uri, "unix:", &p)) {
729         unix_start_outgoing_migration(s, p, &local_err);
730     } else if (strstart(uri, "fd:", &p)) {
731         fd_start_outgoing_migration(s, p, &local_err);
732 #endif
733     } else {
734         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
735                    "a valid migration protocol");
736         migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
737         return;
738     }
739
740     if (local_err) {
741         migrate_fd_error(s);
742         error_propagate(errp, local_err);
743         return;
744     }
745 }
746
747 void qmp_migrate_cancel(Error **errp)
748 {
749     migrate_fd_cancel(migrate_get_current());
750 }
751
752 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
753 {
754     MigrationState *s = migrate_get_current();
755     int64_t new_size;
756
757     /* Check for truncation */
758     if (value != (size_t)value) {
759         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
760                    "exceeding address space");
761         return;
762     }
763
764     /* Cache should not be larger than guest ram size */
765     if (value > ram_bytes_total()) {
766         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
767                    "exceeds guest ram size ");
768         return;
769     }
770
771     new_size = xbzrle_cache_resize(value);
772     if (new_size < 0) {
773         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
774                    "is smaller than page size");
775         return;
776     }
777
778     s->xbzrle_cache_size = new_size;
779 }
780
781 int64_t qmp_query_migrate_cache_size(Error **errp)
782 {
783     return migrate_xbzrle_cache_size();
784 }
785
786 void qmp_migrate_set_speed(int64_t value, Error **errp)
787 {
788     MigrationState *s;
789
790     if (value < 0) {
791         value = 0;
792     }
793     if (value > SIZE_MAX) {
794         value = SIZE_MAX;
795     }
796
797     s = migrate_get_current();
798     s->bandwidth_limit = value;
799     if (s->file) {
800         qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
801     }
802 }
803
804 void qmp_migrate_set_downtime(double value, Error **errp)
805 {
806     value *= 1e9;
807     value = MAX(0, MIN(UINT64_MAX, value));
808     max_downtime = (uint64_t)value;
809 }
810
811 bool migrate_auto_converge(void)
812 {
813     MigrationState *s;
814
815     s = migrate_get_current();
816
817     return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
818 }
819
820 bool migrate_zero_blocks(void)
821 {
822     MigrationState *s;
823
824     s = migrate_get_current();
825
826     return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
827 }
828
829 bool migrate_use_compression(void)
830 {
831     MigrationState *s;
832
833     s = migrate_get_current();
834
835     return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
836 }
837
838 int migrate_compress_level(void)
839 {
840     MigrationState *s;
841
842     s = migrate_get_current();
843
844     return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
845 }
846
847 int migrate_compress_threads(void)
848 {
849     MigrationState *s;
850
851     s = migrate_get_current();
852
853     return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
854 }
855
856 int migrate_decompress_threads(void)
857 {
858     MigrationState *s;
859
860     s = migrate_get_current();
861
862     return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
863 }
864
865 int migrate_use_xbzrle(void)
866 {
867     MigrationState *s;
868
869     s = migrate_get_current();
870
871     return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
872 }
873
874 int64_t migrate_xbzrle_cache_size(void)
875 {
876     MigrationState *s;
877
878     s = migrate_get_current();
879
880     return s->xbzrle_cache_size;
881 }
882
883 /* migration thread support */
884
885 static void *migration_thread(void *opaque)
886 {
887     MigrationState *s = opaque;
888     int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
889     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
890     int64_t initial_bytes = 0;
891     int64_t max_size = 0;
892     int64_t start_time = initial_time;
893     bool old_vm_running = false;
894
895     qemu_savevm_state_header(s->file);
896     qemu_savevm_state_begin(s->file, &s->params);
897
898     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
899     migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE);
900
901     while (s->state == MIGRATION_STATUS_ACTIVE) {
902         int64_t current_time;
903         uint64_t pending_size;
904
905         if (!qemu_file_rate_limit(s->file)) {
906             pending_size = qemu_savevm_state_pending(s->file, max_size);
907             trace_migrate_pending(pending_size, max_size);
908             if (pending_size && pending_size >= max_size) {
909                 qemu_savevm_state_iterate(s->file);
910             } else {
911                 int ret;
912
913                 qemu_mutex_lock_iothread();
914                 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
915                 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
916                 old_vm_running = runstate_is_running();
917
918                 ret = global_state_store();
919                 if (!ret) {
920                     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
921                     if (ret >= 0) {
922                         qemu_file_set_rate_limit(s->file, INT64_MAX);
923                         qemu_savevm_state_complete(s->file);
924                     }
925                 }
926                 qemu_mutex_unlock_iothread();
927
928                 if (ret < 0) {
929                     migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
930                                       MIGRATION_STATUS_FAILED);
931                     break;
932                 }
933
934                 if (!qemu_file_get_error(s->file)) {
935                     migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
936                                       MIGRATION_STATUS_COMPLETED);
937                     break;
938                 }
939             }
940         }
941
942         if (qemu_file_get_error(s->file)) {
943             migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
944                               MIGRATION_STATUS_FAILED);
945             break;
946         }
947         current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
948         if (current_time >= initial_time + BUFFER_DELAY) {
949             uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
950             uint64_t time_spent = current_time - initial_time;
951             double bandwidth = transferred_bytes / time_spent;
952             max_size = bandwidth * migrate_max_downtime() / 1000000;
953
954             s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
955                     ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
956
957             trace_migrate_transferred(transferred_bytes, time_spent,
958                                       bandwidth, max_size);
959             /* if we haven't sent anything, we don't want to recalculate
960                10000 is a small enough number for our purposes */
961             if (s->dirty_bytes_rate && transferred_bytes > 10000) {
962                 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
963             }
964
965             qemu_file_reset_rate_limit(s->file);
966             initial_time = current_time;
967             initial_bytes = qemu_ftell(s->file);
968         }
969         if (qemu_file_rate_limit(s->file)) {
970             /* usleep expects microseconds */
971             g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
972         }
973     }
974
975     qemu_mutex_lock_iothread();
976     if (s->state == MIGRATION_STATUS_COMPLETED) {
977         int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
978         uint64_t transferred_bytes = qemu_ftell(s->file);
979         s->total_time = end_time - s->total_time;
980         s->downtime = end_time - start_time;
981         if (s->total_time) {
982             s->mbps = (((double) transferred_bytes * 8.0) /
983                        ((double) s->total_time)) / 1000;
984         }
985         runstate_set(RUN_STATE_POSTMIGRATE);
986     } else {
987         if (old_vm_running) {
988             vm_start();
989         }
990     }
991     qemu_bh_schedule(s->cleanup_bh);
992     qemu_mutex_unlock_iothread();
993
994     return NULL;
995 }
996
997 void migrate_fd_connect(MigrationState *s)
998 {
999     /* This is a best 1st approximation. ns to ms */
1000     s->expected_downtime = max_downtime/1000000;
1001     s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1002
1003     qemu_file_set_rate_limit(s->file,
1004                              s->bandwidth_limit / XFER_LIMIT_RATIO);
1005
1006     /* Notify before starting migration thread */
1007     notifier_list_notify(&migration_state_notifiers, s);
1008
1009     migrate_compress_threads_create();
1010     qemu_thread_create(&s->thread, "migration", migration_thread, s,
1011                        QEMU_THREAD_JOINABLE);
1012 }
This page took 0.0765 seconds and 4 git commands to generate.