]> Git Repo - qemu.git/blob - migration/migration.c
migration: convert post-copy to use QIOChannelBuffer
[qemu.git] / migration / migration.c
1 /*
2  * QEMU live migration
3  *
4  * Copyright IBM, Corp. 2008
5  *
6  * Authors:
7  *  Anthony Liguori   <[email protected]>
8  *
9  * This work is licensed under the terms of the GNU GPL, version 2.  See
10  * the COPYING file in the top-level directory.
11  *
12  * Contributions after 2012-01-13 are licensed under the terms of the
13  * GNU GPL, version 2 or (at your option) any later version.
14  */
15
16 #include "qemu/osdep.h"
17 #include "qemu/cutils.h"
18 #include "qemu/error-report.h"
19 #include "qemu/main-loop.h"
20 #include "migration/migration.h"
21 #include "migration/qemu-file.h"
22 #include "sysemu/sysemu.h"
23 #include "block/block.h"
24 #include "qapi/qmp/qerror.h"
25 #include "qapi/util.h"
26 #include "qemu/sockets.h"
27 #include "qemu/rcu.h"
28 #include "migration/block.h"
29 #include "migration/postcopy-ram.h"
30 #include "qemu/thread.h"
31 #include "qmp-commands.h"
32 #include "trace.h"
33 #include "qapi-event.h"
34 #include "qom/cpu.h"
35 #include "exec/memory.h"
36 #include "exec/address-spaces.h"
37 #include "io/channel-buffer.h"
38
39 #define MAX_THROTTLE  (32 << 20)      /* Migration transfer speed throttling */
40
41 /* Amount of time to allocate to each "chunk" of bandwidth-throttled
42  * data. */
43 #define BUFFER_DELAY     100
44 #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
45
46 /* Default compression thread count */
47 #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
48 /* Default decompression thread count, usually decompression is at
49  * least 4 times as fast as compression.*/
50 #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
51 /*0: means nocompress, 1: best speed, ... 9: best compress ratio */
52 #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
53 /* Define default autoconverge cpu throttle migration parameters */
54 #define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
55 #define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
56
57 /* Migration XBZRLE default cache size */
58 #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
59
60 static NotifierList migration_state_notifiers =
61     NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
62
63 static bool deferred_incoming;
64
65 /*
66  * Current state of incoming postcopy; note this is not part of
67  * MigrationIncomingState since it's state is used during cleanup
68  * at the end as MIS is being freed.
69  */
70 static PostcopyState incoming_postcopy_state;
71
72 /* When we add fault tolerance, we could have several
73    migrations at once.  For now we don't need to add
74    dynamic creation of migration */
75
76 /* For outgoing */
77 MigrationState *migrate_get_current(void)
78 {
79     static bool once;
80     static MigrationState current_migration = {
81         .state = MIGRATION_STATUS_NONE,
82         .bandwidth_limit = MAX_THROTTLE,
83         .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
84         .mbps = -1,
85         .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
86                 DEFAULT_MIGRATE_COMPRESS_LEVEL,
87         .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
88                 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
89         .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
90                 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
91         .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
92                 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL,
93         .parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
94                 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT,
95     };
96
97     if (!once) {
98         qemu_mutex_init(&current_migration.src_page_req_mutex);
99         once = true;
100     }
101     return &current_migration;
102 }
103
104 /* For incoming */
105 static MigrationIncomingState *mis_current;
106
107 MigrationIncomingState *migration_incoming_get_current(void)
108 {
109     return mis_current;
110 }
111
112 MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
113 {
114     mis_current = g_new0(MigrationIncomingState, 1);
115     mis_current->from_src_file = f;
116     mis_current->state = MIGRATION_STATUS_NONE;
117     QLIST_INIT(&mis_current->loadvm_handlers);
118     qemu_mutex_init(&mis_current->rp_mutex);
119     qemu_event_init(&mis_current->main_thread_load_event, false);
120
121     return mis_current;
122 }
123
124 void migration_incoming_state_destroy(void)
125 {
126     qemu_event_destroy(&mis_current->main_thread_load_event);
127     loadvm_free_handlers(mis_current);
128     g_free(mis_current);
129     mis_current = NULL;
130 }
131
132
133 typedef struct {
134     bool optional;
135     uint32_t size;
136     uint8_t runstate[100];
137     RunState state;
138     bool received;
139 } GlobalState;
140
141 static GlobalState global_state;
142
143 int global_state_store(void)
144 {
145     if (!runstate_store((char *)global_state.runstate,
146                         sizeof(global_state.runstate))) {
147         error_report("runstate name too big: %s", global_state.runstate);
148         trace_migrate_state_too_big();
149         return -EINVAL;
150     }
151     return 0;
152 }
153
154 void global_state_store_running(void)
155 {
156     const char *state = RunState_lookup[RUN_STATE_RUNNING];
157     strncpy((char *)global_state.runstate,
158            state, sizeof(global_state.runstate));
159 }
160
161 static bool global_state_received(void)
162 {
163     return global_state.received;
164 }
165
166 static RunState global_state_get_runstate(void)
167 {
168     return global_state.state;
169 }
170
171 void global_state_set_optional(void)
172 {
173     global_state.optional = true;
174 }
175
176 static bool global_state_needed(void *opaque)
177 {
178     GlobalState *s = opaque;
179     char *runstate = (char *)s->runstate;
180
181     /* If it is not optional, it is mandatory */
182
183     if (s->optional == false) {
184         return true;
185     }
186
187     /* If state is running or paused, it is not needed */
188
189     if (strcmp(runstate, "running") == 0 ||
190         strcmp(runstate, "paused") == 0) {
191         return false;
192     }
193
194     /* for any other state it is needed */
195     return true;
196 }
197
198 static int global_state_post_load(void *opaque, int version_id)
199 {
200     GlobalState *s = opaque;
201     Error *local_err = NULL;
202     int r;
203     char *runstate = (char *)s->runstate;
204
205     s->received = true;
206     trace_migrate_global_state_post_load(runstate);
207
208     r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE__MAX,
209                                 -1, &local_err);
210
211     if (r == -1) {
212         if (local_err) {
213             error_report_err(local_err);
214         }
215         return -EINVAL;
216     }
217     s->state = r;
218
219     return 0;
220 }
221
222 static void global_state_pre_save(void *opaque)
223 {
224     GlobalState *s = opaque;
225
226     trace_migrate_global_state_pre_save((char *)s->runstate);
227     s->size = strlen((char *)s->runstate) + 1;
228 }
229
230 static const VMStateDescription vmstate_globalstate = {
231     .name = "globalstate",
232     .version_id = 1,
233     .minimum_version_id = 1,
234     .post_load = global_state_post_load,
235     .pre_save = global_state_pre_save,
236     .needed = global_state_needed,
237     .fields = (VMStateField[]) {
238         VMSTATE_UINT32(size, GlobalState),
239         VMSTATE_BUFFER(runstate, GlobalState),
240         VMSTATE_END_OF_LIST()
241     },
242 };
243
244 void register_global_state(void)
245 {
246     /* We would use it independently that we receive it */
247     strcpy((char *)&global_state.runstate, "");
248     global_state.received = false;
249     vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
250 }
251
252 static void migrate_generate_event(int new_state)
253 {
254     if (migrate_use_events()) {
255         qapi_event_send_migration(new_state, &error_abort);
256     }
257 }
258
259 /*
260  * Called on -incoming with a defer: uri.
261  * The migration can be started later after any parameters have been
262  * changed.
263  */
264 static void deferred_incoming_migration(Error **errp)
265 {
266     if (deferred_incoming) {
267         error_setg(errp, "Incoming migration already deferred");
268     }
269     deferred_incoming = true;
270 }
271
272 /* Request a range of pages from the source VM at the given
273  * start address.
274  *   rbname: Name of the RAMBlock to request the page in, if NULL it's the same
275  *           as the last request (a name must have been given previously)
276  *   Start: Address offset within the RB
277  *   Len: Length in bytes required - must be a multiple of pagesize
278  */
279 void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname,
280                                ram_addr_t start, size_t len)
281 {
282     uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
283     size_t msglen = 12; /* start + len */
284
285     *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
286     *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
287
288     if (rbname) {
289         int rbname_len = strlen(rbname);
290         assert(rbname_len < 256);
291
292         bufc[msglen++] = rbname_len;
293         memcpy(bufc + msglen, rbname, rbname_len);
294         msglen += rbname_len;
295         migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc);
296     } else {
297         migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc);
298     }
299 }
300
301 void qemu_start_incoming_migration(const char *uri, Error **errp)
302 {
303     const char *p;
304
305     qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
306     if (!strcmp(uri, "defer")) {
307         deferred_incoming_migration(errp);
308     } else if (strstart(uri, "tcp:", &p)) {
309         tcp_start_incoming_migration(p, errp);
310 #ifdef CONFIG_RDMA
311     } else if (strstart(uri, "rdma:", &p)) {
312         rdma_start_incoming_migration(p, errp);
313 #endif
314 #if !defined(WIN32)
315     } else if (strstart(uri, "exec:", &p)) {
316         exec_start_incoming_migration(p, errp);
317     } else if (strstart(uri, "unix:", &p)) {
318         unix_start_incoming_migration(p, errp);
319     } else if (strstart(uri, "fd:", &p)) {
320         fd_start_incoming_migration(p, errp);
321 #endif
322     } else {
323         error_setg(errp, "unknown migration protocol: %s", uri);
324     }
325 }
326
327 static void process_incoming_migration_bh(void *opaque)
328 {
329     Error *local_err = NULL;
330     MigrationIncomingState *mis = opaque;
331
332     /* Make sure all file formats flush their mutable metadata */
333     bdrv_invalidate_cache_all(&local_err);
334     if (local_err) {
335         migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
336                           MIGRATION_STATUS_FAILED);
337         error_report_err(local_err);
338         migrate_decompress_threads_join();
339         exit(EXIT_FAILURE);
340     }
341
342     /*
343      * This must happen after all error conditions are dealt with and
344      * we're sure the VM is going to be running on this host.
345      */
346     qemu_announce_self();
347
348     /* If global state section was not received or we are in running
349        state, we need to obey autostart. Any other state is set with
350        runstate_set. */
351
352     if (!global_state_received() ||
353         global_state_get_runstate() == RUN_STATE_RUNNING) {
354         if (autostart) {
355             vm_start();
356         } else {
357             runstate_set(RUN_STATE_PAUSED);
358         }
359     } else {
360         runstate_set(global_state_get_runstate());
361     }
362     migrate_decompress_threads_join();
363     /*
364      * This must happen after any state changes since as soon as an external
365      * observer sees this event they might start to prod at the VM assuming
366      * it's ready to use.
367      */
368     migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
369                       MIGRATION_STATUS_COMPLETED);
370     qemu_bh_delete(mis->bh);
371     migration_incoming_state_destroy();
372 }
373
374 static void process_incoming_migration_co(void *opaque)
375 {
376     QEMUFile *f = opaque;
377     MigrationIncomingState *mis;
378     PostcopyState ps;
379     int ret;
380
381     mis = migration_incoming_state_new(f);
382     postcopy_state_set(POSTCOPY_INCOMING_NONE);
383     migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
384                       MIGRATION_STATUS_ACTIVE);
385     ret = qemu_loadvm_state(f);
386
387     ps = postcopy_state_get();
388     trace_process_incoming_migration_co_end(ret, ps);
389     if (ps != POSTCOPY_INCOMING_NONE) {
390         if (ps == POSTCOPY_INCOMING_ADVISE) {
391             /*
392              * Where a migration had postcopy enabled (and thus went to advise)
393              * but managed to complete within the precopy period, we can use
394              * the normal exit.
395              */
396             postcopy_ram_incoming_cleanup(mis);
397         } else if (ret >= 0) {
398             /*
399              * Postcopy was started, cleanup should happen at the end of the
400              * postcopy thread.
401              */
402             trace_process_incoming_migration_co_postcopy_end_main();
403             return;
404         }
405         /* Else if something went wrong then just fall out of the normal exit */
406     }
407
408     qemu_fclose(f);
409     free_xbzrle_decoded_buf();
410
411     if (ret < 0) {
412         migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
413                           MIGRATION_STATUS_FAILED);
414         error_report("load of migration failed: %s", strerror(-ret));
415         migrate_decompress_threads_join();
416         exit(EXIT_FAILURE);
417     }
418
419     mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
420     qemu_bh_schedule(mis->bh);
421 }
422
423 void process_incoming_migration(QEMUFile *f)
424 {
425     Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
426
427     migrate_decompress_threads_create();
428     qemu_file_set_blocking(f, false);
429     qemu_coroutine_enter(co, f);
430 }
431
432
433 void migration_set_incoming_channel(MigrationState *s,
434                                     QIOChannel *ioc)
435 {
436     QEMUFile *f = qemu_fopen_channel_input(ioc);
437
438     process_incoming_migration(f);
439 }
440
441
442 void migration_set_outgoing_channel(MigrationState *s,
443                                     QIOChannel *ioc)
444 {
445     QEMUFile *f = qemu_fopen_channel_output(ioc);
446
447     s->to_dst_file = f;
448
449     migrate_fd_connect(s);
450 }
451
452
453 /*
454  * Send a message on the return channel back to the source
455  * of the migration.
456  */
457 void migrate_send_rp_message(MigrationIncomingState *mis,
458                              enum mig_rp_message_type message_type,
459                              uint16_t len, void *data)
460 {
461     trace_migrate_send_rp_message((int)message_type, len);
462     qemu_mutex_lock(&mis->rp_mutex);
463     qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
464     qemu_put_be16(mis->to_src_file, len);
465     qemu_put_buffer(mis->to_src_file, data, len);
466     qemu_fflush(mis->to_src_file);
467     qemu_mutex_unlock(&mis->rp_mutex);
468 }
469
470 /*
471  * Send a 'SHUT' message on the return channel with the given value
472  * to indicate that we've finished with the RP.  Non-0 value indicates
473  * error.
474  */
475 void migrate_send_rp_shut(MigrationIncomingState *mis,
476                           uint32_t value)
477 {
478     uint32_t buf;
479
480     buf = cpu_to_be32(value);
481     migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
482 }
483
484 /*
485  * Send a 'PONG' message on the return channel with the given value
486  * (normally in response to a 'PING')
487  */
488 void migrate_send_rp_pong(MigrationIncomingState *mis,
489                           uint32_t value)
490 {
491     uint32_t buf;
492
493     buf = cpu_to_be32(value);
494     migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
495 }
496
497 /* amount of nanoseconds we are willing to wait for migration to be down.
498  * the choice of nanoseconds is because it is the maximum resolution that
499  * get_clock() can achieve. It is an internal measure. All user-visible
500  * units must be in seconds */
501 static uint64_t max_downtime = 300000000;
502
503 uint64_t migrate_max_downtime(void)
504 {
505     return max_downtime;
506 }
507
508 MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
509 {
510     MigrationCapabilityStatusList *head = NULL;
511     MigrationCapabilityStatusList *caps;
512     MigrationState *s = migrate_get_current();
513     int i;
514
515     caps = NULL; /* silence compiler warning */
516     for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
517         if (head == NULL) {
518             head = g_malloc0(sizeof(*caps));
519             caps = head;
520         } else {
521             caps->next = g_malloc0(sizeof(*caps));
522             caps = caps->next;
523         }
524         caps->value =
525             g_malloc(sizeof(*caps->value));
526         caps->value->capability = i;
527         caps->value->state = s->enabled_capabilities[i];
528     }
529
530     return head;
531 }
532
533 MigrationParameters *qmp_query_migrate_parameters(Error **errp)
534 {
535     MigrationParameters *params;
536     MigrationState *s = migrate_get_current();
537
538     params = g_malloc0(sizeof(*params));
539     params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
540     params->compress_threads =
541             s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
542     params->decompress_threads =
543             s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
544     params->cpu_throttle_initial =
545             s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL];
546     params->cpu_throttle_increment =
547             s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT];
548
549     return params;
550 }
551
552 /*
553  * Return true if we're already in the middle of a migration
554  * (i.e. any of the active or setup states)
555  */
556 static bool migration_is_setup_or_active(int state)
557 {
558     switch (state) {
559     case MIGRATION_STATUS_ACTIVE:
560     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
561     case MIGRATION_STATUS_SETUP:
562         return true;
563
564     default:
565         return false;
566
567     }
568 }
569
570 static void get_xbzrle_cache_stats(MigrationInfo *info)
571 {
572     if (migrate_use_xbzrle()) {
573         info->has_xbzrle_cache = true;
574         info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
575         info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
576         info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
577         info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
578         info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
579         info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
580         info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
581     }
582 }
583
584 MigrationInfo *qmp_query_migrate(Error **errp)
585 {
586     MigrationInfo *info = g_malloc0(sizeof(*info));
587     MigrationState *s = migrate_get_current();
588
589     switch (s->state) {
590     case MIGRATION_STATUS_NONE:
591         /* no migration has happened ever */
592         break;
593     case MIGRATION_STATUS_SETUP:
594         info->has_status = true;
595         info->has_total_time = false;
596         break;
597     case MIGRATION_STATUS_ACTIVE:
598     case MIGRATION_STATUS_CANCELLING:
599         info->has_status = true;
600         info->has_total_time = true;
601         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
602             - s->total_time;
603         info->has_expected_downtime = true;
604         info->expected_downtime = s->expected_downtime;
605         info->has_setup_time = true;
606         info->setup_time = s->setup_time;
607
608         info->has_ram = true;
609         info->ram = g_malloc0(sizeof(*info->ram));
610         info->ram->transferred = ram_bytes_transferred();
611         info->ram->remaining = ram_bytes_remaining();
612         info->ram->total = ram_bytes_total();
613         info->ram->duplicate = dup_mig_pages_transferred();
614         info->ram->skipped = skipped_mig_pages_transferred();
615         info->ram->normal = norm_mig_pages_transferred();
616         info->ram->normal_bytes = norm_mig_bytes_transferred();
617         info->ram->dirty_pages_rate = s->dirty_pages_rate;
618         info->ram->mbps = s->mbps;
619         info->ram->dirty_sync_count = s->dirty_sync_count;
620
621         if (blk_mig_active()) {
622             info->has_disk = true;
623             info->disk = g_malloc0(sizeof(*info->disk));
624             info->disk->transferred = blk_mig_bytes_transferred();
625             info->disk->remaining = blk_mig_bytes_remaining();
626             info->disk->total = blk_mig_bytes_total();
627         }
628
629         if (cpu_throttle_active()) {
630             info->has_cpu_throttle_percentage = true;
631             info->cpu_throttle_percentage = cpu_throttle_get_percentage();
632         }
633
634         get_xbzrle_cache_stats(info);
635         break;
636     case MIGRATION_STATUS_POSTCOPY_ACTIVE:
637         /* Mostly the same as active; TODO add some postcopy stats */
638         info->has_status = true;
639         info->has_total_time = true;
640         info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
641             - s->total_time;
642         info->has_expected_downtime = true;
643         info->expected_downtime = s->expected_downtime;
644         info->has_setup_time = true;
645         info->setup_time = s->setup_time;
646
647         info->has_ram = true;
648         info->ram = g_malloc0(sizeof(*info->ram));
649         info->ram->transferred = ram_bytes_transferred();
650         info->ram->remaining = ram_bytes_remaining();
651         info->ram->total = ram_bytes_total();
652         info->ram->duplicate = dup_mig_pages_transferred();
653         info->ram->skipped = skipped_mig_pages_transferred();
654         info->ram->normal = norm_mig_pages_transferred();
655         info->ram->normal_bytes = norm_mig_bytes_transferred();
656         info->ram->dirty_pages_rate = s->dirty_pages_rate;
657         info->ram->mbps = s->mbps;
658         info->ram->dirty_sync_count = s->dirty_sync_count;
659
660         if (blk_mig_active()) {
661             info->has_disk = true;
662             info->disk = g_malloc0(sizeof(*info->disk));
663             info->disk->transferred = blk_mig_bytes_transferred();
664             info->disk->remaining = blk_mig_bytes_remaining();
665             info->disk->total = blk_mig_bytes_total();
666         }
667
668         get_xbzrle_cache_stats(info);
669         break;
670     case MIGRATION_STATUS_COMPLETED:
671         get_xbzrle_cache_stats(info);
672
673         info->has_status = true;
674         info->has_total_time = true;
675         info->total_time = s->total_time;
676         info->has_downtime = true;
677         info->downtime = s->downtime;
678         info->has_setup_time = true;
679         info->setup_time = s->setup_time;
680
681         info->has_ram = true;
682         info->ram = g_malloc0(sizeof(*info->ram));
683         info->ram->transferred = ram_bytes_transferred();
684         info->ram->remaining = 0;
685         info->ram->total = ram_bytes_total();
686         info->ram->duplicate = dup_mig_pages_transferred();
687         info->ram->skipped = skipped_mig_pages_transferred();
688         info->ram->normal = norm_mig_pages_transferred();
689         info->ram->normal_bytes = norm_mig_bytes_transferred();
690         info->ram->mbps = s->mbps;
691         info->ram->dirty_sync_count = s->dirty_sync_count;
692         break;
693     case MIGRATION_STATUS_FAILED:
694         info->has_status = true;
695         if (s->error) {
696             info->has_error_desc = true;
697             info->error_desc = g_strdup(error_get_pretty(s->error));
698         }
699         break;
700     case MIGRATION_STATUS_CANCELLED:
701         info->has_status = true;
702         break;
703     }
704     info->status = s->state;
705
706     return info;
707 }
708
709 void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
710                                   Error **errp)
711 {
712     MigrationState *s = migrate_get_current();
713     MigrationCapabilityStatusList *cap;
714
715     if (migration_is_setup_or_active(s->state)) {
716         error_setg(errp, QERR_MIGRATION_ACTIVE);
717         return;
718     }
719
720     for (cap = params; cap; cap = cap->next) {
721         s->enabled_capabilities[cap->value->capability] = cap->value->state;
722     }
723
724     if (migrate_postcopy_ram()) {
725         if (migrate_use_compression()) {
726             /* The decompression threads asynchronously write into RAM
727              * rather than use the atomic copies needed to avoid
728              * userfaulting.  It should be possible to fix the decompression
729              * threads for compatibility in future.
730              */
731             error_report("Postcopy is not currently compatible with "
732                          "compression");
733             s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM] =
734                 false;
735         }
736     }
737 }
738
739 void qmp_migrate_set_parameters(bool has_compress_level,
740                                 int64_t compress_level,
741                                 bool has_compress_threads,
742                                 int64_t compress_threads,
743                                 bool has_decompress_threads,
744                                 int64_t decompress_threads,
745                                 bool has_cpu_throttle_initial,
746                                 int64_t cpu_throttle_initial,
747                                 bool has_cpu_throttle_increment,
748                                 int64_t cpu_throttle_increment, Error **errp)
749 {
750     MigrationState *s = migrate_get_current();
751
752     if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
753         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
754                    "is invalid, it should be in the range of 0 to 9");
755         return;
756     }
757     if (has_compress_threads &&
758             (compress_threads < 1 || compress_threads > 255)) {
759         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
760                    "compress_threads",
761                    "is invalid, it should be in the range of 1 to 255");
762         return;
763     }
764     if (has_decompress_threads &&
765             (decompress_threads < 1 || decompress_threads > 255)) {
766         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
767                    "decompress_threads",
768                    "is invalid, it should be in the range of 1 to 255");
769         return;
770     }
771     if (has_cpu_throttle_initial &&
772             (cpu_throttle_initial < 1 || cpu_throttle_initial > 99)) {
773         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
774                    "cpu_throttle_initial",
775                    "an integer in the range of 1 to 99");
776     }
777     if (has_cpu_throttle_increment &&
778             (cpu_throttle_increment < 1 || cpu_throttle_increment > 99)) {
779         error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
780                    "cpu_throttle_increment",
781                    "an integer in the range of 1 to 99");
782     }
783
784     if (has_compress_level) {
785         s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
786     }
787     if (has_compress_threads) {
788         s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
789     }
790     if (has_decompress_threads) {
791         s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
792                                                     decompress_threads;
793     }
794     if (has_cpu_throttle_initial) {
795         s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INITIAL] =
796                                                     cpu_throttle_initial;
797     }
798
799     if (has_cpu_throttle_increment) {
800         s->parameters[MIGRATION_PARAMETER_CPU_THROTTLE_INCREMENT] =
801                                                     cpu_throttle_increment;
802     }
803 }
804
805 void qmp_migrate_start_postcopy(Error **errp)
806 {
807     MigrationState *s = migrate_get_current();
808
809     if (!migrate_postcopy_ram()) {
810         error_setg(errp, "Enable postcopy with migrate_set_capability before"
811                          " the start of migration");
812         return;
813     }
814
815     if (s->state == MIGRATION_STATUS_NONE) {
816         error_setg(errp, "Postcopy must be started after migration has been"
817                          " started");
818         return;
819     }
820     /*
821      * we don't error if migration has finished since that would be racy
822      * with issuing this command.
823      */
824     atomic_set(&s->start_postcopy, true);
825 }
826
827 /* shared migration helpers */
828
829 void migrate_set_state(int *state, int old_state, int new_state)
830 {
831     if (atomic_cmpxchg(state, old_state, new_state) == old_state) {
832         trace_migrate_set_state(new_state);
833         migrate_generate_event(new_state);
834     }
835 }
836
837 static void migrate_fd_cleanup(void *opaque)
838 {
839     MigrationState *s = opaque;
840
841     qemu_bh_delete(s->cleanup_bh);
842     s->cleanup_bh = NULL;
843
844     flush_page_queue(s);
845
846     if (s->to_dst_file) {
847         trace_migrate_fd_cleanup();
848         qemu_mutex_unlock_iothread();
849         if (s->migration_thread_running) {
850             qemu_thread_join(&s->thread);
851             s->migration_thread_running = false;
852         }
853         qemu_mutex_lock_iothread();
854
855         migrate_compress_threads_join();
856         qemu_fclose(s->to_dst_file);
857         s->to_dst_file = NULL;
858     }
859
860     assert((s->state != MIGRATION_STATUS_ACTIVE) &&
861            (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE));
862
863     if (s->state == MIGRATION_STATUS_CANCELLING) {
864         migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
865                           MIGRATION_STATUS_CANCELLED);
866     }
867
868     notifier_list_notify(&migration_state_notifiers, s);
869 }
870
871 void migrate_fd_error(MigrationState *s, const Error *error)
872 {
873     trace_migrate_fd_error(error ? error_get_pretty(error) : "");
874     assert(s->to_dst_file == NULL);
875     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
876                       MIGRATION_STATUS_FAILED);
877     if (!s->error) {
878         s->error = error_copy(error);
879     }
880     notifier_list_notify(&migration_state_notifiers, s);
881 }
882
883 static void migrate_fd_cancel(MigrationState *s)
884 {
885     int old_state ;
886     QEMUFile *f = migrate_get_current()->to_dst_file;
887     trace_migrate_fd_cancel();
888
889     if (s->rp_state.from_dst_file) {
890         /* shutdown the rp socket, so causing the rp thread to shutdown */
891         qemu_file_shutdown(s->rp_state.from_dst_file);
892     }
893
894     do {
895         old_state = s->state;
896         if (!migration_is_setup_or_active(old_state)) {
897             break;
898         }
899         migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
900     } while (s->state != MIGRATION_STATUS_CANCELLING);
901
902     /*
903      * If we're unlucky the migration code might be stuck somewhere in a
904      * send/write while the network has failed and is waiting to timeout;
905      * if we've got shutdown(2) available then we can force it to quit.
906      * The outgoing qemu file gets closed in migrate_fd_cleanup that is
907      * called in a bh, so there is no race against this cancel.
908      */
909     if (s->state == MIGRATION_STATUS_CANCELLING && f) {
910         qemu_file_shutdown(f);
911     }
912 }
913
914 void add_migration_state_change_notifier(Notifier *notify)
915 {
916     notifier_list_add(&migration_state_notifiers, notify);
917 }
918
919 void remove_migration_state_change_notifier(Notifier *notify)
920 {
921     notifier_remove(notify);
922 }
923
924 bool migration_in_setup(MigrationState *s)
925 {
926     return s->state == MIGRATION_STATUS_SETUP;
927 }
928
929 bool migration_has_finished(MigrationState *s)
930 {
931     return s->state == MIGRATION_STATUS_COMPLETED;
932 }
933
934 bool migration_has_failed(MigrationState *s)
935 {
936     return (s->state == MIGRATION_STATUS_CANCELLED ||
937             s->state == MIGRATION_STATUS_FAILED);
938 }
939
940 bool migration_in_postcopy(MigrationState *s)
941 {
942     return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
943 }
944
945 bool migration_in_postcopy_after_devices(MigrationState *s)
946 {
947     return migration_in_postcopy(s) && s->postcopy_after_devices;
948 }
949
950 MigrationState *migrate_init(const MigrationParams *params)
951 {
952     MigrationState *s = migrate_get_current();
953
954     /*
955      * Reinitialise all migration state, except
956      * parameters/capabilities that the user set, and
957      * locks.
958      */
959     s->bytes_xfer = 0;
960     s->xfer_limit = 0;
961     s->cleanup_bh = 0;
962     s->to_dst_file = NULL;
963     s->state = MIGRATION_STATUS_NONE;
964     s->params = *params;
965     s->rp_state.from_dst_file = NULL;
966     s->rp_state.error = false;
967     s->mbps = 0.0;
968     s->downtime = 0;
969     s->expected_downtime = 0;
970     s->dirty_pages_rate = 0;
971     s->dirty_bytes_rate = 0;
972     s->setup_time = 0;
973     s->dirty_sync_count = 0;
974     s->start_postcopy = false;
975     s->postcopy_after_devices = false;
976     s->migration_thread_running = false;
977     s->last_req_rb = NULL;
978     error_free(s->error);
979     s->error = NULL;
980
981     migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
982
983     QSIMPLEQ_INIT(&s->src_page_requests);
984
985     s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
986     return s;
987 }
988
989 static GSList *migration_blockers;
990
991 void migrate_add_blocker(Error *reason)
992 {
993     migration_blockers = g_slist_prepend(migration_blockers, reason);
994 }
995
996 void migrate_del_blocker(Error *reason)
997 {
998     migration_blockers = g_slist_remove(migration_blockers, reason);
999 }
1000
1001 void qmp_migrate_incoming(const char *uri, Error **errp)
1002 {
1003     Error *local_err = NULL;
1004     static bool once = true;
1005
1006     if (!deferred_incoming) {
1007         error_setg(errp, "For use with '-incoming defer'");
1008         return;
1009     }
1010     if (!once) {
1011         error_setg(errp, "The incoming migration has already been started");
1012     }
1013
1014     qemu_start_incoming_migration(uri, &local_err);
1015
1016     if (local_err) {
1017         error_propagate(errp, local_err);
1018         return;
1019     }
1020
1021     once = false;
1022 }
1023
1024 bool migration_is_blocked(Error **errp)
1025 {
1026     if (qemu_savevm_state_blocked(errp)) {
1027         return true;
1028     }
1029
1030     if (migration_blockers) {
1031         *errp = error_copy(migration_blockers->data);
1032         return true;
1033     }
1034
1035     return false;
1036 }
1037
1038 void qmp_migrate(const char *uri, bool has_blk, bool blk,
1039                  bool has_inc, bool inc, bool has_detach, bool detach,
1040                  Error **errp)
1041 {
1042     Error *local_err = NULL;
1043     MigrationState *s = migrate_get_current();
1044     MigrationParams params;
1045     const char *p;
1046
1047     params.blk = has_blk && blk;
1048     params.shared = has_inc && inc;
1049
1050     if (migration_is_setup_or_active(s->state) ||
1051         s->state == MIGRATION_STATUS_CANCELLING) {
1052         error_setg(errp, QERR_MIGRATION_ACTIVE);
1053         return;
1054     }
1055     if (runstate_check(RUN_STATE_INMIGRATE)) {
1056         error_setg(errp, "Guest is waiting for an incoming migration");
1057         return;
1058     }
1059
1060     if (migration_is_blocked(errp)) {
1061         return;
1062     }
1063
1064     s = migrate_init(&params);
1065
1066     if (strstart(uri, "tcp:", &p)) {
1067         tcp_start_outgoing_migration(s, p, &local_err);
1068 #ifdef CONFIG_RDMA
1069     } else if (strstart(uri, "rdma:", &p)) {
1070         rdma_start_outgoing_migration(s, p, &local_err);
1071 #endif
1072 #if !defined(WIN32)
1073     } else if (strstart(uri, "exec:", &p)) {
1074         exec_start_outgoing_migration(s, p, &local_err);
1075     } else if (strstart(uri, "unix:", &p)) {
1076         unix_start_outgoing_migration(s, p, &local_err);
1077     } else if (strstart(uri, "fd:", &p)) {
1078         fd_start_outgoing_migration(s, p, &local_err);
1079 #endif
1080     } else {
1081         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
1082                    "a valid migration protocol");
1083         migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1084                           MIGRATION_STATUS_FAILED);
1085         return;
1086     }
1087
1088     if (local_err) {
1089         migrate_fd_error(s, local_err);
1090         error_propagate(errp, local_err);
1091         return;
1092     }
1093 }
1094
1095 void qmp_migrate_cancel(Error **errp)
1096 {
1097     migrate_fd_cancel(migrate_get_current());
1098 }
1099
1100 void qmp_migrate_set_cache_size(int64_t value, Error **errp)
1101 {
1102     MigrationState *s = migrate_get_current();
1103     int64_t new_size;
1104
1105     /* Check for truncation */
1106     if (value != (size_t)value) {
1107         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1108                    "exceeding address space");
1109         return;
1110     }
1111
1112     /* Cache should not be larger than guest ram size */
1113     if (value > ram_bytes_total()) {
1114         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1115                    "exceeds guest ram size ");
1116         return;
1117     }
1118
1119     new_size = xbzrle_cache_resize(value);
1120     if (new_size < 0) {
1121         error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
1122                    "is smaller than page size");
1123         return;
1124     }
1125
1126     s->xbzrle_cache_size = new_size;
1127 }
1128
1129 int64_t qmp_query_migrate_cache_size(Error **errp)
1130 {
1131     return migrate_xbzrle_cache_size();
1132 }
1133
1134 void qmp_migrate_set_speed(int64_t value, Error **errp)
1135 {
1136     MigrationState *s;
1137
1138     if (value < 0) {
1139         value = 0;
1140     }
1141     if (value > SIZE_MAX) {
1142         value = SIZE_MAX;
1143     }
1144
1145     s = migrate_get_current();
1146     s->bandwidth_limit = value;
1147     if (s->to_dst_file) {
1148         qemu_file_set_rate_limit(s->to_dst_file,
1149                                  s->bandwidth_limit / XFER_LIMIT_RATIO);
1150     }
1151 }
1152
1153 void qmp_migrate_set_downtime(double value, Error **errp)
1154 {
1155     value *= 1e9;
1156     value = MAX(0, MIN(UINT64_MAX, value));
1157     max_downtime = (uint64_t)value;
1158 }
1159
1160 bool migrate_postcopy_ram(void)
1161 {
1162     MigrationState *s;
1163
1164     s = migrate_get_current();
1165
1166     return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1167 }
1168
1169 bool migrate_auto_converge(void)
1170 {
1171     MigrationState *s;
1172
1173     s = migrate_get_current();
1174
1175     return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
1176 }
1177
1178 bool migrate_zero_blocks(void)
1179 {
1180     MigrationState *s;
1181
1182     s = migrate_get_current();
1183
1184     return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
1185 }
1186
1187 bool migrate_use_compression(void)
1188 {
1189     MigrationState *s;
1190
1191     s = migrate_get_current();
1192
1193     return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
1194 }
1195
1196 int migrate_compress_level(void)
1197 {
1198     MigrationState *s;
1199
1200     s = migrate_get_current();
1201
1202     return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
1203 }
1204
1205 int migrate_compress_threads(void)
1206 {
1207     MigrationState *s;
1208
1209     s = migrate_get_current();
1210
1211     return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
1212 }
1213
1214 int migrate_decompress_threads(void)
1215 {
1216     MigrationState *s;
1217
1218     s = migrate_get_current();
1219
1220     return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
1221 }
1222
1223 bool migrate_use_events(void)
1224 {
1225     MigrationState *s;
1226
1227     s = migrate_get_current();
1228
1229     return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
1230 }
1231
1232 int migrate_use_xbzrle(void)
1233 {
1234     MigrationState *s;
1235
1236     s = migrate_get_current();
1237
1238     return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
1239 }
1240
1241 int64_t migrate_xbzrle_cache_size(void)
1242 {
1243     MigrationState *s;
1244
1245     s = migrate_get_current();
1246
1247     return s->xbzrle_cache_size;
1248 }
1249
1250 /* migration thread support */
1251 /*
1252  * Something bad happened to the RP stream, mark an error
1253  * The caller shall print or trace something to indicate why
1254  */
1255 static void mark_source_rp_bad(MigrationState *s)
1256 {
1257     s->rp_state.error = true;
1258 }
1259
1260 static struct rp_cmd_args {
1261     ssize_t     len; /* -1 = variable */
1262     const char *name;
1263 } rp_cmd_args[] = {
1264     [MIG_RP_MSG_INVALID]        = { .len = -1, .name = "INVALID" },
1265     [MIG_RP_MSG_SHUT]           = { .len =  4, .name = "SHUT" },
1266     [MIG_RP_MSG_PONG]           = { .len =  4, .name = "PONG" },
1267     [MIG_RP_MSG_REQ_PAGES]      = { .len = 12, .name = "REQ_PAGES" },
1268     [MIG_RP_MSG_REQ_PAGES_ID]   = { .len = -1, .name = "REQ_PAGES_ID" },
1269     [MIG_RP_MSG_MAX]            = { .len = -1, .name = "MAX" },
1270 };
1271
1272 /*
1273  * Process a request for pages received on the return path,
1274  * We're allowed to send more than requested (e.g. to round to our page size)
1275  * and we don't need to send pages that have already been sent.
1276  */
1277 static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
1278                                        ram_addr_t start, size_t len)
1279 {
1280     long our_host_ps = getpagesize();
1281
1282     trace_migrate_handle_rp_req_pages(rbname, start, len);
1283
1284     /*
1285      * Since we currently insist on matching page sizes, just sanity check
1286      * we're being asked for whole host pages.
1287      */
1288     if (start & (our_host_ps-1) ||
1289        (len & (our_host_ps-1))) {
1290         error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
1291                      " len: %zd", __func__, start, len);
1292         mark_source_rp_bad(ms);
1293         return;
1294     }
1295
1296     if (ram_save_queue_pages(ms, rbname, start, len)) {
1297         mark_source_rp_bad(ms);
1298     }
1299 }
1300
1301 /*
1302  * Handles messages sent on the return path towards the source VM
1303  *
1304  */
1305 static void *source_return_path_thread(void *opaque)
1306 {
1307     MigrationState *ms = opaque;
1308     QEMUFile *rp = ms->rp_state.from_dst_file;
1309     uint16_t header_len, header_type;
1310     uint8_t buf[512];
1311     uint32_t tmp32, sibling_error;
1312     ram_addr_t start = 0; /* =0 to silence warning */
1313     size_t  len = 0, expected_len;
1314     int res;
1315
1316     trace_source_return_path_thread_entry();
1317     while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
1318            migration_is_setup_or_active(ms->state)) {
1319         trace_source_return_path_thread_loop_top();
1320         header_type = qemu_get_be16(rp);
1321         header_len = qemu_get_be16(rp);
1322
1323         if (header_type >= MIG_RP_MSG_MAX ||
1324             header_type == MIG_RP_MSG_INVALID) {
1325             error_report("RP: Received invalid message 0x%04x length 0x%04x",
1326                     header_type, header_len);
1327             mark_source_rp_bad(ms);
1328             goto out;
1329         }
1330
1331         if ((rp_cmd_args[header_type].len != -1 &&
1332             header_len != rp_cmd_args[header_type].len) ||
1333             header_len > sizeof(buf)) {
1334             error_report("RP: Received '%s' message (0x%04x) with"
1335                     "incorrect length %d expecting %zu",
1336                     rp_cmd_args[header_type].name, header_type, header_len,
1337                     (size_t)rp_cmd_args[header_type].len);
1338             mark_source_rp_bad(ms);
1339             goto out;
1340         }
1341
1342         /* We know we've got a valid header by this point */
1343         res = qemu_get_buffer(rp, buf, header_len);
1344         if (res != header_len) {
1345             error_report("RP: Failed reading data for message 0x%04x"
1346                          " read %d expected %d",
1347                          header_type, res, header_len);
1348             mark_source_rp_bad(ms);
1349             goto out;
1350         }
1351
1352         /* OK, we have the message and the data */
1353         switch (header_type) {
1354         case MIG_RP_MSG_SHUT:
1355             sibling_error = be32_to_cpup((uint32_t *)buf);
1356             trace_source_return_path_thread_shut(sibling_error);
1357             if (sibling_error) {
1358                 error_report("RP: Sibling indicated error %d", sibling_error);
1359                 mark_source_rp_bad(ms);
1360             }
1361             /*
1362              * We'll let the main thread deal with closing the RP
1363              * we could do a shutdown(2) on it, but we're the only user
1364              * anyway, so there's nothing gained.
1365              */
1366             goto out;
1367
1368         case MIG_RP_MSG_PONG:
1369             tmp32 = be32_to_cpup((uint32_t *)buf);
1370             trace_source_return_path_thread_pong(tmp32);
1371             break;
1372
1373         case MIG_RP_MSG_REQ_PAGES:
1374             start = be64_to_cpup((uint64_t *)buf);
1375             len = be32_to_cpup((uint32_t *)(buf + 8));
1376             migrate_handle_rp_req_pages(ms, NULL, start, len);
1377             break;
1378
1379         case MIG_RP_MSG_REQ_PAGES_ID:
1380             expected_len = 12 + 1; /* header + termination */
1381
1382             if (header_len >= expected_len) {
1383                 start = be64_to_cpup((uint64_t *)buf);
1384                 len = be32_to_cpup((uint32_t *)(buf + 8));
1385                 /* Now we expect an idstr */
1386                 tmp32 = buf[12]; /* Length of the following idstr */
1387                 buf[13 + tmp32] = '\0';
1388                 expected_len += tmp32;
1389             }
1390             if (header_len != expected_len) {
1391                 error_report("RP: Req_Page_id with length %d expecting %zd",
1392                         header_len, expected_len);
1393                 mark_source_rp_bad(ms);
1394                 goto out;
1395             }
1396             migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
1397             break;
1398
1399         default:
1400             break;
1401         }
1402     }
1403     if (qemu_file_get_error(rp)) {
1404         trace_source_return_path_thread_bad_end();
1405         mark_source_rp_bad(ms);
1406     }
1407
1408     trace_source_return_path_thread_end();
1409 out:
1410     ms->rp_state.from_dst_file = NULL;
1411     qemu_fclose(rp);
1412     return NULL;
1413 }
1414
1415 static int open_return_path_on_source(MigrationState *ms)
1416 {
1417
1418     ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
1419     if (!ms->rp_state.from_dst_file) {
1420         return -1;
1421     }
1422
1423     trace_open_return_path_on_source();
1424     qemu_thread_create(&ms->rp_state.rp_thread, "return path",
1425                        source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
1426
1427     trace_open_return_path_on_source_continue();
1428
1429     return 0;
1430 }
1431
1432 /* Returns 0 if the RP was ok, otherwise there was an error on the RP */
1433 static int await_return_path_close_on_source(MigrationState *ms)
1434 {
1435     /*
1436      * If this is a normal exit then the destination will send a SHUT and the
1437      * rp_thread will exit, however if there's an error we need to cause
1438      * it to exit.
1439      */
1440     if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
1441         /*
1442          * shutdown(2), if we have it, will cause it to unblock if it's stuck
1443          * waiting for the destination.
1444          */
1445         qemu_file_shutdown(ms->rp_state.from_dst_file);
1446         mark_source_rp_bad(ms);
1447     }
1448     trace_await_return_path_close_on_source_joining();
1449     qemu_thread_join(&ms->rp_state.rp_thread);
1450     trace_await_return_path_close_on_source_close();
1451     return ms->rp_state.error;
1452 }
1453
1454 /*
1455  * Switch from normal iteration to postcopy
1456  * Returns non-0 on error
1457  */
1458 static int postcopy_start(MigrationState *ms, bool *old_vm_running)
1459 {
1460     int ret;
1461     QIOChannelBuffer *bioc;
1462     QEMUFile *fb;
1463     int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1464     migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
1465                       MIGRATION_STATUS_POSTCOPY_ACTIVE);
1466
1467     trace_postcopy_start();
1468     qemu_mutex_lock_iothread();
1469     trace_postcopy_start_set_run();
1470
1471     qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1472     *old_vm_running = runstate_is_running();
1473     global_state_store();
1474     ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1475     if (ret < 0) {
1476         goto fail;
1477     }
1478
1479     ret = bdrv_inactivate_all();
1480     if (ret < 0) {
1481         goto fail;
1482     }
1483
1484     /*
1485      * Cause any non-postcopiable, but iterative devices to
1486      * send out their final data.
1487      */
1488     qemu_savevm_state_complete_precopy(ms->to_dst_file, true);
1489
1490     /*
1491      * in Finish migrate and with the io-lock held everything should
1492      * be quiet, but we've potentially still got dirty pages and we
1493      * need to tell the destination to throw any pages it's already received
1494      * that are dirty
1495      */
1496     if (ram_postcopy_send_discard_bitmap(ms)) {
1497         error_report("postcopy send discard bitmap failed");
1498         goto fail;
1499     }
1500
1501     /*
1502      * send rest of state - note things that are doing postcopy
1503      * will notice we're in POSTCOPY_ACTIVE and not actually
1504      * wrap their state up here
1505      */
1506     qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
1507     /* Ping just for debugging, helps line traces up */
1508     qemu_savevm_send_ping(ms->to_dst_file, 2);
1509
1510     /*
1511      * While loading the device state we may trigger page transfer
1512      * requests and the fd must be free to process those, and thus
1513      * the destination must read the whole device state off the fd before
1514      * it starts processing it.  Unfortunately the ad-hoc migration format
1515      * doesn't allow the destination to know the size to read without fully
1516      * parsing it through each devices load-state code (especially the open
1517      * coded devices that use get/put).
1518      * So we wrap the device state up in a package with a length at the start;
1519      * to do this we use a qemu_buf to hold the whole of the device state.
1520      */
1521     bioc = qio_channel_buffer_new(4096);
1522     fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
1523     object_unref(OBJECT(bioc));
1524
1525     /*
1526      * Make sure the receiver can get incoming pages before we send the rest
1527      * of the state
1528      */
1529     qemu_savevm_send_postcopy_listen(fb);
1530
1531     qemu_savevm_state_complete_precopy(fb, false);
1532     qemu_savevm_send_ping(fb, 3);
1533
1534     qemu_savevm_send_postcopy_run(fb);
1535
1536     /* <><> end of stuff going into the package */
1537
1538     /* Now send that blob */
1539     if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
1540         goto fail_closefb;
1541     }
1542     qemu_fclose(fb);
1543
1544     /* Send a notify to give a chance for anything that needs to happen
1545      * at the transition to postcopy and after the device state; in particular
1546      * spice needs to trigger a transition now
1547      */
1548     ms->postcopy_after_devices = true;
1549     notifier_list_notify(&migration_state_notifiers, ms);
1550
1551     ms->downtime =  qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
1552
1553     qemu_mutex_unlock_iothread();
1554
1555     /*
1556      * Although this ping is just for debug, it could potentially be
1557      * used for getting a better measurement of downtime at the source.
1558      */
1559     qemu_savevm_send_ping(ms->to_dst_file, 4);
1560
1561     ret = qemu_file_get_error(ms->to_dst_file);
1562     if (ret) {
1563         error_report("postcopy_start: Migration stream errored");
1564         migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1565                               MIGRATION_STATUS_FAILED);
1566     }
1567
1568     return ret;
1569
1570 fail_closefb:
1571     qemu_fclose(fb);
1572 fail:
1573     migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
1574                           MIGRATION_STATUS_FAILED);
1575     qemu_mutex_unlock_iothread();
1576     return -1;
1577 }
1578
1579 /**
1580  * migration_completion: Used by migration_thread when there's not much left.
1581  *   The caller 'breaks' the loop when this returns.
1582  *
1583  * @s: Current migration state
1584  * @current_active_state: The migration state we expect to be in
1585  * @*old_vm_running: Pointer to old_vm_running flag
1586  * @*start_time: Pointer to time to update
1587  */
1588 static void migration_completion(MigrationState *s, int current_active_state,
1589                                  bool *old_vm_running,
1590                                  int64_t *start_time)
1591 {
1592     int ret;
1593
1594     if (s->state == MIGRATION_STATUS_ACTIVE) {
1595         qemu_mutex_lock_iothread();
1596         *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1597         qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
1598         *old_vm_running = runstate_is_running();
1599         ret = global_state_store();
1600
1601         if (!ret) {
1602             ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
1603             if (ret >= 0) {
1604                 ret = bdrv_inactivate_all();
1605             }
1606             if (ret >= 0) {
1607                 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
1608                 qemu_savevm_state_complete_precopy(s->to_dst_file, false);
1609             }
1610         }
1611         qemu_mutex_unlock_iothread();
1612
1613         if (ret < 0) {
1614             goto fail;
1615         }
1616     } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1617         trace_migration_completion_postcopy_end();
1618
1619         qemu_savevm_state_complete_postcopy(s->to_dst_file);
1620         trace_migration_completion_postcopy_end_after_complete();
1621     }
1622
1623     /*
1624      * If rp was opened we must clean up the thread before
1625      * cleaning everything else up (since if there are no failures
1626      * it will wait for the destination to send it's status in
1627      * a SHUT command).
1628      * Postcopy opens rp if enabled (even if it's not avtivated)
1629      */
1630     if (migrate_postcopy_ram()) {
1631         int rp_error;
1632         trace_migration_completion_postcopy_end_before_rp();
1633         rp_error = await_return_path_close_on_source(s);
1634         trace_migration_completion_postcopy_end_after_rp(rp_error);
1635         if (rp_error) {
1636             goto fail_invalidate;
1637         }
1638     }
1639
1640     if (qemu_file_get_error(s->to_dst_file)) {
1641         trace_migration_completion_file_err();
1642         goto fail_invalidate;
1643     }
1644
1645     migrate_set_state(&s->state, current_active_state,
1646                       MIGRATION_STATUS_COMPLETED);
1647     return;
1648
1649 fail_invalidate:
1650     /* If not doing postcopy, vm_start() will be called: let's regain
1651      * control on images.
1652      */
1653     if (s->state == MIGRATION_STATUS_ACTIVE) {
1654         Error *local_err = NULL;
1655
1656         bdrv_invalidate_cache_all(&local_err);
1657         if (local_err) {
1658             error_report_err(local_err);
1659         }
1660     }
1661
1662 fail:
1663     migrate_set_state(&s->state, current_active_state,
1664                       MIGRATION_STATUS_FAILED);
1665 }
1666
1667 /*
1668  * Master migration thread on the source VM.
1669  * It drives the migration and pumps the data down the outgoing channel.
1670  */
1671 static void *migration_thread(void *opaque)
1672 {
1673     MigrationState *s = opaque;
1674     /* Used by the bandwidth calcs, updated later */
1675     int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1676     int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
1677     int64_t initial_bytes = 0;
1678     int64_t max_size = 0;
1679     int64_t start_time = initial_time;
1680     int64_t end_time;
1681     bool old_vm_running = false;
1682     bool entered_postcopy = false;
1683     /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */
1684     enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE;
1685
1686     rcu_register_thread();
1687
1688     qemu_savevm_state_header(s->to_dst_file);
1689
1690     if (migrate_postcopy_ram()) {
1691         /* Now tell the dest that it should open its end so it can reply */
1692         qemu_savevm_send_open_return_path(s->to_dst_file);
1693
1694         /* And do a ping that will make stuff easier to debug */
1695         qemu_savevm_send_ping(s->to_dst_file, 1);
1696
1697         /*
1698          * Tell the destination that we *might* want to do postcopy later;
1699          * if the other end can't do postcopy it should fail now, nice and
1700          * early.
1701          */
1702         qemu_savevm_send_postcopy_advise(s->to_dst_file);
1703     }
1704
1705     qemu_savevm_state_begin(s->to_dst_file, &s->params);
1706
1707     s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
1708     current_active_state = MIGRATION_STATUS_ACTIVE;
1709     migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1710                       MIGRATION_STATUS_ACTIVE);
1711
1712     trace_migration_thread_setup_complete();
1713
1714     while (s->state == MIGRATION_STATUS_ACTIVE ||
1715            s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
1716         int64_t current_time;
1717         uint64_t pending_size;
1718
1719         if (!qemu_file_rate_limit(s->to_dst_file)) {
1720             uint64_t pend_post, pend_nonpost;
1721
1722             qemu_savevm_state_pending(s->to_dst_file, max_size, &pend_nonpost,
1723                                       &pend_post);
1724             pending_size = pend_nonpost + pend_post;
1725             trace_migrate_pending(pending_size, max_size,
1726                                   pend_post, pend_nonpost);
1727             if (pending_size && pending_size >= max_size) {
1728                 /* Still a significant amount to transfer */
1729
1730                 if (migrate_postcopy_ram() &&
1731                     s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE &&
1732                     pend_nonpost <= max_size &&
1733                     atomic_read(&s->start_postcopy)) {
1734
1735                     if (!postcopy_start(s, &old_vm_running)) {
1736                         current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE;
1737                         entered_postcopy = true;
1738                     }
1739
1740                     continue;
1741                 }
1742                 /* Just another iteration step */
1743                 qemu_savevm_state_iterate(s->to_dst_file, entered_postcopy);
1744             } else {
1745                 trace_migration_thread_low_pending(pending_size);
1746                 migration_completion(s, current_active_state,
1747                                      &old_vm_running, &start_time);
1748                 break;
1749             }
1750         }
1751
1752         if (qemu_file_get_error(s->to_dst_file)) {
1753             migrate_set_state(&s->state, current_active_state,
1754                               MIGRATION_STATUS_FAILED);
1755             trace_migration_thread_file_err();
1756             break;
1757         }
1758         current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1759         if (current_time >= initial_time + BUFFER_DELAY) {
1760             uint64_t transferred_bytes = qemu_ftell(s->to_dst_file) -
1761                                          initial_bytes;
1762             uint64_t time_spent = current_time - initial_time;
1763             double bandwidth = (double)transferred_bytes / time_spent;
1764             max_size = bandwidth * migrate_max_downtime() / 1000000;
1765
1766             s->mbps = (((double) transferred_bytes * 8.0) /
1767                     ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
1768
1769             trace_migrate_transferred(transferred_bytes, time_spent,
1770                                       bandwidth, max_size);
1771             /* if we haven't sent anything, we don't want to recalculate
1772                10000 is a small enough number for our purposes */
1773             if (s->dirty_bytes_rate && transferred_bytes > 10000) {
1774                 s->expected_downtime = s->dirty_bytes_rate / bandwidth;
1775             }
1776
1777             qemu_file_reset_rate_limit(s->to_dst_file);
1778             initial_time = current_time;
1779             initial_bytes = qemu_ftell(s->to_dst_file);
1780         }
1781         if (qemu_file_rate_limit(s->to_dst_file)) {
1782             /* usleep expects microseconds */
1783             g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
1784         }
1785     }
1786
1787     trace_migration_thread_after_loop();
1788     /* If we enabled cpu throttling for auto-converge, turn it off. */
1789     cpu_throttle_stop();
1790     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1791
1792     qemu_mutex_lock_iothread();
1793     qemu_savevm_state_cleanup();
1794     if (s->state == MIGRATION_STATUS_COMPLETED) {
1795         uint64_t transferred_bytes = qemu_ftell(s->to_dst_file);
1796         s->total_time = end_time - s->total_time;
1797         if (!entered_postcopy) {
1798             s->downtime = end_time - start_time;
1799         }
1800         if (s->total_time) {
1801             s->mbps = (((double) transferred_bytes * 8.0) /
1802                        ((double) s->total_time)) / 1000;
1803         }
1804         runstate_set(RUN_STATE_POSTMIGRATE);
1805     } else {
1806         if (old_vm_running && !entered_postcopy) {
1807             vm_start();
1808         }
1809     }
1810     qemu_bh_schedule(s->cleanup_bh);
1811     qemu_mutex_unlock_iothread();
1812
1813     rcu_unregister_thread();
1814     return NULL;
1815 }
1816
1817 void migrate_fd_connect(MigrationState *s)
1818 {
1819     /* This is a best 1st approximation. ns to ms */
1820     s->expected_downtime = max_downtime/1000000;
1821     s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
1822
1823     qemu_file_set_blocking(s->to_dst_file, true);
1824     qemu_file_set_rate_limit(s->to_dst_file,
1825                              s->bandwidth_limit / XFER_LIMIT_RATIO);
1826
1827     /* Notify before starting migration thread */
1828     notifier_list_notify(&migration_state_notifiers, s);
1829
1830     /*
1831      * Open the return path; currently for postcopy but other things might
1832      * also want it.
1833      */
1834     if (migrate_postcopy_ram()) {
1835         if (open_return_path_on_source(s)) {
1836             error_report("Unable to open return-path for postcopy");
1837             migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
1838                               MIGRATION_STATUS_FAILED);
1839             migrate_fd_cleanup(s);
1840             return;
1841         }
1842     }
1843
1844     migrate_compress_threads_create();
1845     qemu_thread_create(&s->thread, "migration", migration_thread, s,
1846                        QEMU_THREAD_JOINABLE);
1847     s->migration_thread_running = true;
1848 }
1849
1850 PostcopyState  postcopy_state_get(void)
1851 {
1852     return atomic_mb_read(&incoming_postcopy_state);
1853 }
1854
1855 /* Set the state and return the old state */
1856 PostcopyState postcopy_state_set(PostcopyState new_state)
1857 {
1858     return atomic_xchg(&incoming_postcopy_state, new_state);
1859 }
1860
This page took 0.124153 seconds and 4 git commands to generate.