]> Git Repo - qemu.git/blob - migration/migration.c
migration: Avoid qerror_report_err() outside QMP command handlers
[qemu.git] / migration / migration.c
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <[email protected]>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15
16 #include "qemu-common.h"
17 #include "qemu/main-loop.h"
18 #include "migration/migration.h"
19 #include "monitor/monitor.h"
20 #include "migration/qemu-file.h"
21 #include "sysemu/sysemu.h"
22 #include "block/block.h"
23 #include "qemu/sockets.h"
24 #include "migration/block.h"
25 #include "qemu/thread.h"
26 #include "qmp-commands.h"
27 #include "trace.h"
28
29 enum {
30     MIG_STATE_ERROR = -1,
31     MIG_STATE_NONE,
32     MIG_STATE_SETUP,
33     MIG_STATE_CANCELLING,
34     MIG_STATE_CANCELLED,
35     MIG_STATE_ACTIVE,
36     MIG_STATE_COMPLETED,
37 };
38
39 #define MAX_THROTTLE  (32 << 20)      /* Migration speed throttling */
40
41 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
42  * data. */
43 #define BUFFER_DELAY     100
44 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
45
46 /* Migration XBZRLE default cache size */
47 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
48
49 static NotifierList migration_state_notifiers =
50     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
51
52 static bool deferred_incoming;
53
54 /* When we add fault tolerance, we could have several
55    migrations at once.  For now we don't need to add
56    dynamic creation of migration */
57
58 MigrationState *migrate_get_current(void)
59 {
60     static MigrationState current_migration = {
61         .state = MIG_STATE_NONE,
62         .bandwidth_limit = MAX_THROTTLE,
63         .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
64         .mbps = -1,
65     };
66
67     return &current_migration;
68 }
69
70 /*
71  * Called on -incoming with a defer: uri.
72  * The migration can be started later after any parameters have been
73  * changed.
74  */
75 static void deferred_incoming_migration(Error **errp)
76 {
77     if (deferred_incoming) {
78         error_setg(errp, "Incoming migration already deferred");
79     }
80     deferred_incoming = true;
81 }
82
83 void qemu_start_incoming_migration(const char *uri, Error **errp)
84 {
85     const char *p;
86
87     if (!strcmp(uri, "defer")) {
88         deferred_incoming_migration(errp);
89     } else if (strstart(uri, "tcp:", &p)) {
90         tcp_start_incoming_migration(p, errp);
91 #ifdef CONFIG_RDMA
92     } else if (strstart(uri, "rdma:", &p)) {
93         rdma_start_incoming_migration(p, errp);
94 #endif
95 #if !defined(WIN32)
96     } else if (strstart(uri, "exec:", &p)) {
97         exec_start_incoming_migration(p, errp);
98     } else if (strstart(uri, "unix:", &p)) {
99         unix_start_incoming_migration(p, errp);
100     } else if (strstart(uri, "fd:", &p)) {
101         fd_start_incoming_migration(p, errp);
102 #endif
103     } else {
104         error_setg(errp, "unknown migration protocol: %s", uri);
105     }
106 }
107
108 static void process_incoming_migration_co(void *opaque)
109 {
110     QEMUFile *f = opaque;
111     Error *local_err = NULL;
112     int ret;
113
114     ret = qemu_loadvm_state(f);
115     qemu_fclose(f);
116     free_xbzrle_decoded_buf();
117     if (ret < 0) {
118         error_report("load of migration failed: %s", strerror(-ret));
119         exit(EXIT_FAILURE);
120     }
121     qemu_announce_self();
122
123     /* Make sure all file formats flush their mutable metadata */
124     bdrv_invalidate_cache_all(&local_err);
125     if (local_err) {
126         error_report_err(local_err);
127         exit(EXIT_FAILURE);
128     }
129
130     if (autostart) {
131         vm_start();
132     } else {
133         runstate_set(RUN_STATE_PAUSED);
134     }
135 }
136
137 void process_incoming_migration(QEMUFile *f)
138 {
139     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
140     int fd = qemu_get_fd(f);
141
142     assert(fd != -1);
143     qemu_set_nonblock(fd);
144     qemu_coroutine_enter(co, f);
145 }
146
147 /* amount of nanoseconds we are willing to wait for migration to be down.
148  * the choice of nanoseconds is because it is the maximum resolution that
149  * get_clock() can achieve. It is an internal measure. All user-visible
150  * units must be in seconds */
151 static uint64_t max_downtime = 300000000;
152
153 uint64_t migrate_max_downtime(void)
154 {
155     return max_downtime;
156 }
157
158 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
159 {
160     MigrationCapabilityStatusList *head = NULL;
161     MigrationCapabilityStatusList *caps;
162     MigrationState *s = migrate_get_current();
163     int i;
164
165     caps = NULL; /* silence compiler warning */
166     for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
167         if (head == NULL) {
168             head = g_malloc0(sizeof(*caps));
169             caps = head;
170         } else {
171             caps->next = g_malloc0(sizeof(*caps));
172             caps = caps->next;
173         }
174         caps->value =
175             g_malloc(sizeof(*caps->value));
176         caps->value->capability = i;
177         caps->value->state = s->enabled_capabilities[i];
178     }
179
180     return head;
181 }
182
183 static void get_xbzrle_cache_stats(MigrationInfo *info)
184 {
185     if (migrate_use_xbzrle()) {
186         info->has_xbzrle_cache = true;
187         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
188         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
189         info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
190         info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
191         info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
192         info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
193         info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
194     }
195 }
196
197 MigrationInfo *qmp_query_migrate(Error **errp)
198 {
199     MigrationInfo *info = g_malloc0(sizeof(*info));
200     MigrationState *s = migrate_get_current();
201
202     switch (s->state) {
203     case MIG_STATE_NONE:
204         /* no migration has happened ever */
205         break;
206     case MIG_STATE_SETUP:
207         info->has_status = true;
208         info->status = g_strdup("setup");
209         info->has_total_time = false;
210         break;
211     case MIG_STATE_ACTIVE:
212     case MIG_STATE_CANCELLING:
213         info->has_status = true;
214         info->status = g_strdup("active");
215         info->has_total_time = true;
216         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
217             - s->total_time;
218         info->has_expected_downtime = true;
219         info->expected_downtime = s->expected_downtime;
220         info->has_setup_time = true;
221         info->setup_time = s->setup_time;
222
223         info->has_ram = true;
224         info->ram = g_malloc0(sizeof(*info->ram));
225         info->ram->transferred = ram_bytes_transferred();
226         info->ram->remaining = ram_bytes_remaining();
227         info->ram->total = ram_bytes_total();
228         info->ram->duplicate = dup_mig_pages_transferred();
229         info->ram->skipped = skipped_mig_pages_transferred();
230         info->ram->normal = norm_mig_pages_transferred();
231         info->ram->normal_bytes = norm_mig_bytes_transferred();
232         info->ram->dirty_pages_rate = s->dirty_pages_rate;
233         info->ram->mbps = s->mbps;
234         info->ram->dirty_sync_count = s->dirty_sync_count;
235
236         if (blk_mig_active()) {
237             info->has_disk = true;
238             info->disk = g_malloc0(sizeof(*info->disk));
239             info->disk->transferred = blk_mig_bytes_transferred();
240             info->disk->remaining = blk_mig_bytes_remaining();
241             info->disk->total = blk_mig_bytes_total();
242         }
243
244         get_xbzrle_cache_stats(info);
245         break;
246     case MIG_STATE_COMPLETED:
247         get_xbzrle_cache_stats(info);
248
249         info->has_status = true;
250         info->status = g_strdup("completed");
251         info->has_total_time = true;
252         info->total_time = s->total_time;
253         info->has_downtime = true;
254         info->downtime = s->downtime;
255         info->has_setup_time = true;
256         info->setup_time = s->setup_time;
257
258         info->has_ram = true;
259         info->ram = g_malloc0(sizeof(*info->ram));
260         info->ram->transferred = ram_bytes_transferred();
261         info->ram->remaining = 0;
262         info->ram->total = ram_bytes_total();
263         info->ram->duplicate = dup_mig_pages_transferred();
264         info->ram->skipped = skipped_mig_pages_transferred();
265         info->ram->normal = norm_mig_pages_transferred();
266         info->ram->normal_bytes = norm_mig_bytes_transferred();
267         info->ram->mbps = s->mbps;
268         info->ram->dirty_sync_count = s->dirty_sync_count;
269         break;
270     case MIG_STATE_ERROR:
271         info->has_status = true;
272         info->status = g_strdup("failed");
273         break;
274     case MIG_STATE_CANCELLED:
275         info->has_status = true;
276         info->status = g_strdup("cancelled");
277         break;
278     }
279
280     return info;
281 }
282
283 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
284                                   Error **errp)
285 {
286     MigrationState *s = migrate_get_current();
287     MigrationCapabilityStatusList *cap;
288
289     if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP) {
290         error_set(errp, QERR_MIGRATION_ACTIVE);
291         return;
292     }
293
294     for (cap = params; cap; cap = cap->next) {
295         s->enabled_capabilities[cap->value->capability] = cap->value->state;
296     }
297 }
298
299 /* shared migration helpers */
300
301 static void migrate_set_state(MigrationState *s, int old_state, int new_state)
302 {
303     if (atomic_cmpxchg(&s->state, old_state, new_state) == new_state) {
304         trace_migrate_set_state(new_state);
305     }
306 }
307
308 static void migrate_fd_cleanup(void *opaque)
309 {
310     MigrationState *s = opaque;
311
312     qemu_bh_delete(s->cleanup_bh);
313     s->cleanup_bh = NULL;
314
315     if (s->file) {
316         trace_migrate_fd_cleanup();
317         qemu_mutex_unlock_iothread();
318         qemu_thread_join(&s->thread);
319         qemu_mutex_lock_iothread();
320
321         qemu_fclose(s->file);
322         s->file = NULL;
323     }
324
325     assert(s->state != MIG_STATE_ACTIVE);
326
327     if (s->state != MIG_STATE_COMPLETED) {
328         qemu_savevm_state_cancel();
329         if (s->state == MIG_STATE_CANCELLING) {
330             migrate_set_state(s, MIG_STATE_CANCELLING, MIG_STATE_CANCELLED);
331         }
332     }
333
334     notifier_list_notify(&migration_state_notifiers, s);
335 }
336
337 void migrate_fd_error(MigrationState *s)
338 {
339     trace_migrate_fd_error();
340     assert(s->file == NULL);
341     s->state = MIG_STATE_ERROR;
342     trace_migrate_set_state(MIG_STATE_ERROR);
343     notifier_list_notify(&migration_state_notifiers, s);
344 }
345
346 static void migrate_fd_cancel(MigrationState *s)
347 {
348     int old_state ;
349     QEMUFile *f = migrate_get_current()->file;
350     trace_migrate_fd_cancel();
351
352     do {
353         old_state = s->state;
354         if (old_state != MIG_STATE_SETUP && old_state != MIG_STATE_ACTIVE) {
355             break;
356         }
357         migrate_set_state(s, old_state, MIG_STATE_CANCELLING);
358     } while (s->state != MIG_STATE_CANCELLING);
359
360     /*
361      * If we're unlucky the migration code might be stuck somewhere in a
362      * send/write while the network has failed and is waiting to timeout;
363      * if we've got shutdown(2) available then we can force it to quit.
364      * The outgoing qemu file gets closed in migrate_fd_cleanup that is
365      * called in a bh, so there is no race against this cancel.
366      */
367     if (s->state == MIG_STATE_CANCELLING && f) {
368         qemu_file_shutdown(f);
369     }
370 }
371
372 void add_migration_state_change_notifier(Notifier *notify)
373 {
374     notifier_list_add(&migration_state_notifiers, notify);
375 }
376
377 void remove_migration_state_change_notifier(Notifier *notify)
378 {
379     notifier_remove(notify);
380 }
381
382 bool migration_in_setup(MigrationState *s)
383 {
384     return s->state == MIG_STATE_SETUP;
385 }
386
387 bool migration_has_finished(MigrationState *s)
388 {
389     return s->state == MIG_STATE_COMPLETED;
390 }
391
392 bool migration_has_failed(MigrationState *s)
393 {
394     return (s->state == MIG_STATE_CANCELLED ||
395             s->state == MIG_STATE_ERROR);
396 }
397
398 static MigrationState *migrate_init(const MigrationParams *params)
399 {
400     MigrationState *s = migrate_get_current();
401     int64_t bandwidth_limit = s->bandwidth_limit;
402     bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
403     int64_t xbzrle_cache_size = s->xbzrle_cache_size;
404
405     memcpy(enabled_capabilities, s->enabled_capabilities,
406            sizeof(enabled_capabilities));
407
408     memset(s, 0, sizeof(*s));
409     s->params = *params;
410     memcpy(s->enabled_capabilities, enabled_capabilities,
411            sizeof(enabled_capabilities));
412     s->xbzrle_cache_size = xbzrle_cache_size;
413
414     s->bandwidth_limit = bandwidth_limit;
415     s->state = MIG_STATE_SETUP;
416     trace_migrate_set_state(MIG_STATE_SETUP);
417
418     s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
419     return s;
420 }
421
422 static GSList *migration_blockers;
423
424 void migrate_add_blocker(Error *reason)
425 {
426     migration_blockers = g_slist_prepend(migration_blockers, reason);
427 }
428
429 void migrate_del_blocker(Error *reason)
430 {
431     migration_blockers = g_slist_remove(migration_blockers, reason);
432 }
433
434 void qmp_migrate_incoming(const char *uri, Error **errp)
435 {
436     Error *local_err = NULL;
437
438     if (!deferred_incoming) {
439         error_setg(errp, "'-incoming defer' is required for migrate_incoming");
440         return;
441     }
442
443     qemu_start_incoming_migration(uri, &local_err);
444
445     if (local_err) {
446         error_propagate(errp, local_err);
447         return;
448     }
449
450     deferred_incoming = false;
451 }
452
453 void qmp_migrate(const char *uri, bool has_blk, bool blk,
454                  bool has_inc, bool inc, bool has_detach, bool detach,
455                  Error **errp)
456 {
457     Error *local_err = NULL;
458     MigrationState *s = migrate_get_current();
459     MigrationParams params;
460     const char *p;
461
462     params.blk = has_blk && blk;
463     params.shared = has_inc && inc;
464
465     if (s->state == MIG_STATE_ACTIVE || s->state == MIG_STATE_SETUP ||
466         s->state == MIG_STATE_CANCELLING) {
467         error_set(errp, QERR_MIGRATION_ACTIVE);
468         return;
469     }
470
471     if (runstate_check(RUN_STATE_INMIGRATE)) {
472         error_setg(errp, "Guest is waiting for an incoming migration");
473         return;
474     }
475
476     if (qemu_savevm_state_blocked(errp)) {
477         return;
478     }
479
480     if (migration_blockers) {
481         *errp = error_copy(migration_blockers->data);
482         return;
483     }
484
485     s = migrate_init(&params);
486
487     if (strstart(uri, "tcp:", &p)) {
488         tcp_start_outgoing_migration(s, p, &local_err);
489 #ifdef CONFIG_RDMA
490     } else if (strstart(uri, "rdma:", &p)) {
491         rdma_start_outgoing_migration(s, p, &local_err);
492 #endif
493 #if !defined(WIN32)
494     } else if (strstart(uri, "exec:", &p)) {
495         exec_start_outgoing_migration(s, p, &local_err);
496     } else if (strstart(uri, "unix:", &p)) {
497         unix_start_outgoing_migration(s, p, &local_err);
498     } else if (strstart(uri, "fd:", &p)) {
499         fd_start_outgoing_migration(s, p, &local_err);
500 #endif
501     } else {
502         error_set(errp, QERR_INVALID_PARAMETER_VALUE, "uri", "a valid migration protocol");
503         s->state = MIG_STATE_ERROR;
504         return;
505     }
506
507     if (local_err) {
508         migrate_fd_error(s);
509         error_propagate(errp, local_err);
510         return;
511     }
512 }
513
514 void qmp_migrate_cancel(Error **errp)
515 {
516     migrate_fd_cancel(migrate_get_current());
517 }
518
519 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
520 {
521     MigrationState *s = migrate_get_current();
522     int64_t new_size;
523
524     /* Check for truncation */
525     if (value != (size_t)value) {
526         error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
527                   "exceeding address space");
528         return;
529     }
530
531     /* Cache should not be larger than guest ram size */
532     if (value > ram_bytes_total()) {
533         error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
534                   "exceeds guest ram size ");
535         return;
536     }
537
538     new_size = xbzrle_cache_resize(value);
539     if (new_size < 0) {
540         error_set(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
541                   "is smaller than page size");
542         return;
543     }
544
545     s->xbzrle_cache_size = new_size;
546 }
547
548 int64_t qmp_query_migrate_cache_size(Error **errp)
549 {
550     return migrate_xbzrle_cache_size();
551 }
552
553 void qmp_migrate_set_speed(int64_t value, Error **errp)
554 {
555     MigrationState *s;
556
557     if (value < 0) {
558         value = 0;
559     }
560     if (value > SIZE_MAX) {
561         value = SIZE_MAX;
562     }
563
564     s = migrate_get_current();
565     s->bandwidth_limit = value;
566     if (s->file) {
567         qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
568     }
569 }
570
571 void qmp_migrate_set_downtime(double value, Error **errp)
572 {
573     value *= 1e9;
574     value = MAX(0, MIN(UINT64_MAX, value));
575     max_downtime = (uint64_t)value;
576 }
577
578 bool migrate_rdma_pin_all(void)
579 {
580     MigrationState *s;
581
582     s = migrate_get_current();
583
584     return s->enabled_capabilities[MIGRATION_CAPABILITY_RDMA_PIN_ALL];
585 }
586
587 bool migrate_auto_converge(void)
588 {
589     MigrationState *s;
590
591     s = migrate_get_current();
592
593     return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
594 }
595
596 bool migrate_zero_blocks(void)
597 {
598     MigrationState *s;
599
600     s = migrate_get_current();
601
602     return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
603 }
604
605 int migrate_use_xbzrle(void)
606 {
607     MigrationState *s;
608
609     s = migrate_get_current();
610
611     return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
612 }
613
614 int64_t migrate_xbzrle_cache_size(void)
615 {
616     MigrationState *s;
617
618     s = migrate_get_current();
619
620     return s->xbzrle_cache_size;
621 }
622
623 /* migration thread support */
624
625 static void *migration_thread(void *opaque)
626 {
627     MigrationState *s = opaque;
628     int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
630     int64_t initial_bytes = 0;
631     int64_t max_size = 0;
632     int64_t start_time = initial_time;
633     bool old_vm_running = false;
634
635     qemu_savevm_state_begin(s->file, &s->params);
636
637     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
638     migrate_set_state(s, MIG_STATE_SETUP, MIG_STATE_ACTIVE);
639
640     while (s->state == MIG_STATE_ACTIVE) {
641         int64_t current_time;
642         uint64_t pending_size;
643
644         if (!qemu_file_rate_limit(s->file)) {
645             pending_size = qemu_savevm_state_pending(s->file, max_size);
646             trace_migrate_pending(pending_size, max_size);
647             if (pending_size && pending_size >= max_size) {
648                 qemu_savevm_state_iterate(s->file);
649             } else {
650                 int ret;
651
652                 qemu_mutex_lock_iothread();
653                 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
654                 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
655                 old_vm_running = runstate_is_running();
656
657                 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
658                 if (ret >= 0) {
659                     qemu_file_set_rate_limit(s->file, INT64_MAX);
660                     qemu_savevm_state_complete(s->file);
661                 }
662                 qemu_mutex_unlock_iothread();
663
664                 if (ret < 0) {
665                     migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
666                     break;
667                 }
668
669                 if (!qemu_file_get_error(s->file)) {
670                     migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_COMPLETED);
671                     break;
672                 }
673             }
674         }
675
676         if (qemu_file_get_error(s->file)) {
677             migrate_set_state(s, MIG_STATE_ACTIVE, MIG_STATE_ERROR);
678             break;
679         }
680         current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
681         if (current_time >= initial_time + BUFFER_DELAY) {
682             uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
683             uint64_t time_spent = current_time - initial_time;
684             double bandwidth = transferred_bytes / time_spent;
685             max_size = bandwidth * migrate_max_downtime() / 1000000;
686
687             s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
688                     ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
689
690             trace_migrate_transferred(transferred_bytes, time_spent,
691                                       bandwidth, max_size);
692             /* if we haven't sent anything, we don't want to recalculate
693                10000 is a small enough number for our purposes */
694             if (s->dirty_bytes_rate && transferred_bytes > 10000) {
695                 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
696             }
697
698             qemu_file_reset_rate_limit(s->file);
699             initial_time = current_time;
700             initial_bytes = qemu_ftell(s->file);
701         }
702         if (qemu_file_rate_limit(s->file)) {
703             /* usleep expects microseconds */
704             g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
705         }
706     }
707
708     qemu_mutex_lock_iothread();
709     if (s->state == MIG_STATE_COMPLETED) {
710         int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
711         uint64_t transferred_bytes = qemu_ftell(s->file);
712         s->total_time = end_time - s->total_time;
713         s->downtime = end_time - start_time;
714         if (s->total_time) {
715             s->mbps = (((double) transferred_bytes * 8.0) /
716                        ((double) s->total_time)) / 1000;
717         }
718         runstate_set(RUN_STATE_POSTMIGRATE);
719     } else {
720         if (old_vm_running) {
721             vm_start();
722         }
723     }
724     qemu_bh_schedule(s->cleanup_bh);
725     qemu_mutex_unlock_iothread();
726
727     return NULL;
728 }
729
730 void migrate_fd_connect(MigrationState *s)
731 {
732     s->state = MIG_STATE_SETUP;
733     trace_migrate_set_state(MIG_STATE_SETUP);
734
735     /* This is a best 1st approximation. ns to ms */
736     s->expected_downtime = max_downtime/1000000;
737     s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
738
739     qemu_file_set_rate_limit(s->file,
740                              s->bandwidth_limit / XFER_LIMIT_RATIO);
741
742     /* Notify before starting migration thread */
743     notifier_list_notify(&migration_state_notifiers, s);
744
745     qemu_thread_create(&s->thread, "migration", migration_thread, s,
746                        QEMU_THREAD_JOINABLE);
747 }
This page took 0.066362 seconds and 4 git commands to generate.