]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * QEMU live migration | |
3 | * | |
4 | * Copyright IBM, Corp. 2008 | |
5 | * | |
6 | * Authors: | |
7 | * Anthony Liguori <[email protected]> | |
8 | * | |
9 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
10 | * the COPYING file in the top-level directory. | |
11 | * | |
12 | * Contributions after 2012-01-13 are licensed under the terms of the | |
13 | * GNU GPL, version 2 or (at your option) any later version. | |
14 | */ | |
15 | ||
16 | #include "qemu-common.h" | |
17 | #include "qemu/error-report.h" | |
18 | #include "qemu/main-loop.h" | |
19 | #include "migration/migration.h" | |
20 | #include "migration/qemu-file.h" | |
21 | #include "sysemu/sysemu.h" | |
22 | #include "block/block.h" | |
23 | #include "qapi/qmp/qerror.h" | |
24 | #include "qapi/util.h" | |
25 | #include "qemu/sockets.h" | |
26 | #include "qemu/rcu.h" | |
27 | #include "migration/block.h" | |
28 | #include "migration/postcopy-ram.h" | |
29 | #include "qemu/thread.h" | |
30 | #include "qmp-commands.h" | |
31 | #include "trace.h" | |
32 | #include "qapi-event.h" | |
33 | #include "qom/cpu.h" | |
34 | #include "exec/memory.h" | |
35 | #include "exec/address-spaces.h" | |
36 | ||
37 | #define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */ | |
38 | ||
39 | /* Amount of time to allocate to each "chunk" of bandwidth-throttled | |
40 | * data. */ | |
41 | #define BUFFER_DELAY 100 | |
42 | #define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY) | |
43 | ||
44 | /* Default compression thread count */ | |
45 | #define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8 | |
46 | /* Default decompression thread count, usually decompression is at | |
47 | * least 4 times as fast as compression.*/ | |
48 | #define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2 | |
49 | /*0: means nocompress, 1: best speed, ... 9: best compress ratio */ | |
50 | #define DEFAULT_MIGRATE_COMPRESS_LEVEL 1 | |
51 | /* Define default autoconverge cpu throttle migration parameters */ | |
52 | #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL 20 | |
53 | #define DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT 10 | |
54 | ||
55 | /* Migration XBZRLE default cache size */ | |
56 | #define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024) | |
57 | ||
58 | static NotifierList migration_state_notifiers = | |
59 | NOTIFIER_LIST_INITIALIZER(migration_state_notifiers); | |
60 | ||
61 | static bool deferred_incoming; | |
62 | ||
63 | /* | |
64 | * Current state of incoming postcopy; note this is not part of | |
65 | * MigrationIncomingState since it's state is used during cleanup | |
66 | * at the end as MIS is being freed. | |
67 | */ | |
68 | static PostcopyState incoming_postcopy_state; | |
69 | ||
70 | /* When we add fault tolerance, we could have several | |
71 | migrations at once. For now we don't need to add | |
72 | dynamic creation of migration */ | |
73 | ||
74 | /* For outgoing */ | |
75 | MigrationState *migrate_get_current(void) | |
76 | { | |
77 | static bool once; | |
78 | static MigrationState current_migration = { | |
79 | .state = MIGRATION_STATUS_NONE, | |
80 | .bandwidth_limit = MAX_THROTTLE, | |
81 | .xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE, | |
82 | .mbps = -1, | |
83 | .parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = | |
84 | DEFAULT_MIGRATE_COMPRESS_LEVEL, | |
85 | .parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = | |
86 | DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT, | |
87 | .parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = | |
88 | DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT, | |
89 | .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = | |
90 | DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL, | |
91 | .parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = | |
92 | DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT, | |
93 | }; | |
94 | ||
95 | if (!once) { | |
96 | qemu_mutex_init(¤t_migration.src_page_req_mutex); | |
97 | once = true; | |
98 | } | |
99 | return ¤t_migration; | |
100 | } | |
101 | ||
102 | /* For incoming */ | |
103 | static MigrationIncomingState *mis_current; | |
104 | ||
105 | MigrationIncomingState *migration_incoming_get_current(void) | |
106 | { | |
107 | return mis_current; | |
108 | } | |
109 | ||
110 | MigrationIncomingState *migration_incoming_state_new(QEMUFile* f) | |
111 | { | |
112 | mis_current = g_new0(MigrationIncomingState, 1); | |
113 | mis_current->from_src_file = f; | |
114 | QLIST_INIT(&mis_current->loadvm_handlers); | |
115 | qemu_mutex_init(&mis_current->rp_mutex); | |
116 | qemu_event_init(&mis_current->main_thread_load_event, false); | |
117 | ||
118 | return mis_current; | |
119 | } | |
120 | ||
121 | void migration_incoming_state_destroy(void) | |
122 | { | |
123 | qemu_event_destroy(&mis_current->main_thread_load_event); | |
124 | loadvm_free_handlers(mis_current); | |
125 | g_free(mis_current); | |
126 | mis_current = NULL; | |
127 | } | |
128 | ||
129 | ||
130 | typedef struct { | |
131 | bool optional; | |
132 | uint32_t size; | |
133 | uint8_t runstate[100]; | |
134 | RunState state; | |
135 | bool received; | |
136 | } GlobalState; | |
137 | ||
138 | static GlobalState global_state; | |
139 | ||
140 | int global_state_store(void) | |
141 | { | |
142 | if (!runstate_store((char *)global_state.runstate, | |
143 | sizeof(global_state.runstate))) { | |
144 | error_report("runstate name too big: %s", global_state.runstate); | |
145 | trace_migrate_state_too_big(); | |
146 | return -EINVAL; | |
147 | } | |
148 | return 0; | |
149 | } | |
150 | ||
151 | void global_state_store_running(void) | |
152 | { | |
153 | const char *state = RunState_lookup[RUN_STATE_RUNNING]; | |
154 | strncpy((char *)global_state.runstate, | |
155 | state, sizeof(global_state.runstate)); | |
156 | } | |
157 | ||
158 | static bool global_state_received(void) | |
159 | { | |
160 | return global_state.received; | |
161 | } | |
162 | ||
163 | static RunState global_state_get_runstate(void) | |
164 | { | |
165 | return global_state.state; | |
166 | } | |
167 | ||
168 | void global_state_set_optional(void) | |
169 | { | |
170 | global_state.optional = true; | |
171 | } | |
172 | ||
173 | static bool global_state_needed(void *opaque) | |
174 | { | |
175 | GlobalState *s = opaque; | |
176 | char *runstate = (char *)s->runstate; | |
177 | ||
178 | /* If it is not optional, it is mandatory */ | |
179 | ||
180 | if (s->optional == false) { | |
181 | return true; | |
182 | } | |
183 | ||
184 | /* If state is running or paused, it is not needed */ | |
185 | ||
186 | if (strcmp(runstate, "running") == 0 || | |
187 | strcmp(runstate, "paused") == 0) { | |
188 | return false; | |
189 | } | |
190 | ||
191 | /* for any other state it is needed */ | |
192 | return true; | |
193 | } | |
194 | ||
195 | static int global_state_post_load(void *opaque, int version_id) | |
196 | { | |
197 | GlobalState *s = opaque; | |
198 | Error *local_err = NULL; | |
199 | int r; | |
200 | char *runstate = (char *)s->runstate; | |
201 | ||
202 | s->received = true; | |
203 | trace_migrate_global_state_post_load(runstate); | |
204 | ||
205 | r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX, | |
206 | -1, &local_err); | |
207 | ||
208 | if (r == -1) { | |
209 | if (local_err) { | |
210 | error_report_err(local_err); | |
211 | } | |
212 | return -EINVAL; | |
213 | } | |
214 | s->state = r; | |
215 | ||
216 | return 0; | |
217 | } | |
218 | ||
219 | static void global_state_pre_save(void *opaque) | |
220 | { | |
221 | GlobalState *s = opaque; | |
222 | ||
223 | trace_migrate_global_state_pre_save((char *)s->runstate); | |
224 | s->size = strlen((char *)s->runstate) + 1; | |
225 | } | |
226 | ||
227 | static const VMStateDescription vmstate_globalstate = { | |
228 | .name = "globalstate", | |
229 | .version_id = 1, | |
230 | .minimum_version_id = 1, | |
231 | .post_load = global_state_post_load, | |
232 | .pre_save = global_state_pre_save, | |
233 | .needed = global_state_needed, | |
234 | .fields = (VMStateField[]) { | |
235 | VMSTATE_UINT32(size, GlobalState), | |
236 | VMSTATE_BUFFER(runstate, GlobalState), | |
237 | VMSTATE_END_OF_LIST() | |
238 | }, | |
239 | }; | |
240 | ||
241 | void register_global_state(void) | |
242 | { | |
243 | /* We would use it independently that we receive it */ | |
244 | strcpy((char *)&global_state.runstate, ""); | |
245 | global_state.received = false; | |
246 | vmstate_register(NULL, 0, &vmstate_globalstate, &global_state); | |
247 | } | |
248 | ||
249 | static void migrate_generate_event(int new_state) | |
250 | { | |
251 | if (migrate_use_events()) { | |
252 | qapi_event_send_migration(new_state, &error_abort); | |
253 | } | |
254 | } | |
255 | ||
256 | /* | |
257 | * Called on -incoming with a defer: uri. | |
258 | * The migration can be started later after any parameters have been | |
259 | * changed. | |
260 | */ | |
261 | static void deferred_incoming_migration(Error **errp) | |
262 | { | |
263 | if (deferred_incoming) { | |
264 | error_setg(errp, "Incoming migration already deferred"); | |
265 | } | |
266 | deferred_incoming = true; | |
267 | } | |
268 | ||
269 | /* Request a range of pages from the source VM at the given | |
270 | * start address. | |
271 | * rbname: Name of the RAMBlock to request the page in, if NULL it's the same | |
272 | * as the last request (a name must have been given previously) | |
273 | * Start: Address offset within the RB | |
274 | * Len: Length in bytes required - must be a multiple of pagesize | |
275 | */ | |
276 | void migrate_send_rp_req_pages(MigrationIncomingState *mis, const char *rbname, | |
277 | ram_addr_t start, size_t len) | |
278 | { | |
279 | uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname upto 256 */ | |
280 | size_t msglen = 12; /* start + len */ | |
281 | ||
282 | *(uint64_t *)bufc = cpu_to_be64((uint64_t)start); | |
283 | *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len); | |
284 | ||
285 | if (rbname) { | |
286 | int rbname_len = strlen(rbname); | |
287 | assert(rbname_len < 256); | |
288 | ||
289 | bufc[msglen++] = rbname_len; | |
290 | memcpy(bufc + msglen, rbname, rbname_len); | |
291 | msglen += rbname_len; | |
292 | migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES_ID, msglen, bufc); | |
293 | } else { | |
294 | migrate_send_rp_message(mis, MIG_RP_MSG_REQ_PAGES, msglen, bufc); | |
295 | } | |
296 | } | |
297 | ||
298 | void qemu_start_incoming_migration(const char *uri, Error **errp) | |
299 | { | |
300 | const char *p; | |
301 | ||
302 | qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort); | |
303 | if (!strcmp(uri, "defer")) { | |
304 | deferred_incoming_migration(errp); | |
305 | } else if (strstart(uri, "tcp:", &p)) { | |
306 | tcp_start_incoming_migration(p, errp); | |
307 | #ifdef CONFIG_RDMA | |
308 | } else if (strstart(uri, "rdma:", &p)) { | |
309 | rdma_start_incoming_migration(p, errp); | |
310 | #endif | |
311 | #if !defined(WIN32) | |
312 | } else if (strstart(uri, "exec:", &p)) { | |
313 | exec_start_incoming_migration(p, errp); | |
314 | } else if (strstart(uri, "unix:", &p)) { | |
315 | unix_start_incoming_migration(p, errp); | |
316 | } else if (strstart(uri, "fd:", &p)) { | |
317 | fd_start_incoming_migration(p, errp); | |
318 | #endif | |
319 | } else { | |
320 | error_setg(errp, "unknown migration protocol: %s", uri); | |
321 | } | |
322 | } | |
323 | ||
324 | static void process_incoming_migration_co(void *opaque) | |
325 | { | |
326 | QEMUFile *f = opaque; | |
327 | Error *local_err = NULL; | |
328 | MigrationIncomingState *mis; | |
329 | PostcopyState ps; | |
330 | int ret; | |
331 | ||
332 | mis = migration_incoming_state_new(f); | |
333 | postcopy_state_set(POSTCOPY_INCOMING_NONE); | |
334 | migrate_generate_event(MIGRATION_STATUS_ACTIVE); | |
335 | ||
336 | ret = qemu_loadvm_state(f); | |
337 | ||
338 | ps = postcopy_state_get(); | |
339 | trace_process_incoming_migration_co_end(ret, ps); | |
340 | if (ps != POSTCOPY_INCOMING_NONE) { | |
341 | if (ps == POSTCOPY_INCOMING_ADVISE) { | |
342 | /* | |
343 | * Where a migration had postcopy enabled (and thus went to advise) | |
344 | * but managed to complete within the precopy period, we can use | |
345 | * the normal exit. | |
346 | */ | |
347 | postcopy_ram_incoming_cleanup(mis); | |
348 | } else if (ret >= 0) { | |
349 | /* | |
350 | * Postcopy was started, cleanup should happen at the end of the | |
351 | * postcopy thread. | |
352 | */ | |
353 | trace_process_incoming_migration_co_postcopy_end_main(); | |
354 | return; | |
355 | } | |
356 | /* Else if something went wrong then just fall out of the normal exit */ | |
357 | } | |
358 | ||
359 | qemu_fclose(f); | |
360 | free_xbzrle_decoded_buf(); | |
361 | migration_incoming_state_destroy(); | |
362 | ||
363 | if (ret < 0) { | |
364 | migrate_generate_event(MIGRATION_STATUS_FAILED); | |
365 | error_report("load of migration failed: %s", strerror(-ret)); | |
366 | migrate_decompress_threads_join(); | |
367 | exit(EXIT_FAILURE); | |
368 | } | |
369 | ||
370 | /* Make sure all file formats flush their mutable metadata */ | |
371 | bdrv_invalidate_cache_all(&local_err); | |
372 | if (local_err) { | |
373 | migrate_generate_event(MIGRATION_STATUS_FAILED); | |
374 | error_report_err(local_err); | |
375 | migrate_decompress_threads_join(); | |
376 | exit(EXIT_FAILURE); | |
377 | } | |
378 | ||
379 | /* | |
380 | * This must happen after all error conditions are dealt with and | |
381 | * we're sure the VM is going to be running on this host. | |
382 | */ | |
383 | qemu_announce_self(); | |
384 | ||
385 | /* If global state section was not received or we are in running | |
386 | state, we need to obey autostart. Any other state is set with | |
387 | runstate_set. */ | |
388 | ||
389 | if (!global_state_received() || | |
390 | global_state_get_runstate() == RUN_STATE_RUNNING) { | |
391 | if (autostart) { | |
392 | vm_start(); | |
393 | } else { | |
394 | runstate_set(RUN_STATE_PAUSED); | |
395 | } | |
396 | } else { | |
397 | runstate_set(global_state_get_runstate()); | |
398 | } | |
399 | migrate_decompress_threads_join(); | |
400 | /* | |
401 | * This must happen after any state changes since as soon as an external | |
402 | * observer sees this event they might start to prod at the VM assuming | |
403 | * it's ready to use. | |
404 | */ | |
405 | migrate_generate_event(MIGRATION_STATUS_COMPLETED); | |
406 | } | |
407 | ||
408 | void process_incoming_migration(QEMUFile *f) | |
409 | { | |
410 | Coroutine *co = qemu_coroutine_create(process_incoming_migration_co); | |
411 | int fd = qemu_get_fd(f); | |
412 | ||
413 | assert(fd != -1); | |
414 | migrate_decompress_threads_create(); | |
415 | qemu_set_nonblock(fd); | |
416 | qemu_coroutine_enter(co, f); | |
417 | } | |
418 | ||
419 | /* | |
420 | * Send a message on the return channel back to the source | |
421 | * of the migration. | |
422 | */ | |
423 | void migrate_send_rp_message(MigrationIncomingState *mis, | |
424 | enum mig_rp_message_type message_type, | |
425 | uint16_t len, void *data) | |
426 | { | |
427 | trace_migrate_send_rp_message((int)message_type, len); | |
428 | qemu_mutex_lock(&mis->rp_mutex); | |
429 | qemu_put_be16(mis->to_src_file, (unsigned int)message_type); | |
430 | qemu_put_be16(mis->to_src_file, len); | |
431 | qemu_put_buffer(mis->to_src_file, data, len); | |
432 | qemu_fflush(mis->to_src_file); | |
433 | qemu_mutex_unlock(&mis->rp_mutex); | |
434 | } | |
435 | ||
436 | /* | |
437 | * Send a 'SHUT' message on the return channel with the given value | |
438 | * to indicate that we've finished with the RP. Non-0 value indicates | |
439 | * error. | |
440 | */ | |
441 | void migrate_send_rp_shut(MigrationIncomingState *mis, | |
442 | uint32_t value) | |
443 | { | |
444 | uint32_t buf; | |
445 | ||
446 | buf = cpu_to_be32(value); | |
447 | migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf); | |
448 | } | |
449 | ||
450 | /* | |
451 | * Send a 'PONG' message on the return channel with the given value | |
452 | * (normally in response to a 'PING') | |
453 | */ | |
454 | void migrate_send_rp_pong(MigrationIncomingState *mis, | |
455 | uint32_t value) | |
456 | { | |
457 | uint32_t buf; | |
458 | ||
459 | buf = cpu_to_be32(value); | |
460 | migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf); | |
461 | } | |
462 | ||
463 | /* amount of nanoseconds we are willing to wait for migration to be down. | |
464 | * the choice of nanoseconds is because it is the maximum resolution that | |
465 | * get_clock() can achieve. It is an internal measure. All user-visible | |
466 | * units must be in seconds */ | |
467 | static uint64_t max_downtime = 300000000; | |
468 | ||
469 | uint64_t migrate_max_downtime(void) | |
470 | { | |
471 | return max_downtime; | |
472 | } | |
473 | ||
474 | MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp) | |
475 | { | |
476 | MigrationCapabilityStatusList *head = NULL; | |
477 | MigrationCapabilityStatusList *caps; | |
478 | MigrationState *s = migrate_get_current(); | |
479 | int i; | |
480 | ||
481 | caps = NULL; /* silence compiler warning */ | |
482 | for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) { | |
483 | if (head == NULL) { | |
484 | head = g_malloc0(sizeof(*caps)); | |
485 | caps = head; | |
486 | } else { | |
487 | caps->next = g_malloc0(sizeof(*caps)); | |
488 | caps = caps->next; | |
489 | } | |
490 | caps->value = | |
491 | g_malloc(sizeof(*caps->value)); | |
492 | caps->value->capability = i; | |
493 | caps->value->state = s->enabled_capabilities[i]; | |
494 | } | |
495 | ||
496 | return head; | |
497 | } | |
498 | ||
499 | MigrationParameters *qmp_query_migrate_parameters(Error **errp) | |
500 | { | |
501 | MigrationParameters *params; | |
502 | MigrationState *s = migrate_get_current(); | |
503 | ||
504 | params = g_malloc0(sizeof(*params)); | |
505 | params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; | |
506 | params->compress_threads = | |
507 | s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; | |
508 | params->decompress_threads = | |
509 | s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; | |
510 | params->x_cpu_throttle_initial = | |
511 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; | |
512 | params->x_cpu_throttle_increment = | |
513 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; | |
514 | ||
515 | return params; | |
516 | } | |
517 | ||
518 | /* | |
519 | * Return true if we're already in the middle of a migration | |
520 | * (i.e. any of the active or setup states) | |
521 | */ | |
522 | static bool migration_is_setup_or_active(int state) | |
523 | { | |
524 | switch (state) { | |
525 | case MIGRATION_STATUS_ACTIVE: | |
526 | case MIGRATION_STATUS_POSTCOPY_ACTIVE: | |
527 | case MIGRATION_STATUS_SETUP: | |
528 | return true; | |
529 | ||
530 | default: | |
531 | return false; | |
532 | ||
533 | } | |
534 | } | |
535 | ||
536 | static void get_xbzrle_cache_stats(MigrationInfo *info) | |
537 | { | |
538 | if (migrate_use_xbzrle()) { | |
539 | info->has_xbzrle_cache = true; | |
540 | info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache)); | |
541 | info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size(); | |
542 | info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred(); | |
543 | info->xbzrle_cache->pages = xbzrle_mig_pages_transferred(); | |
544 | info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss(); | |
545 | info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate(); | |
546 | info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow(); | |
547 | } | |
548 | } | |
549 | ||
550 | MigrationInfo *qmp_query_migrate(Error **errp) | |
551 | { | |
552 | MigrationInfo *info = g_malloc0(sizeof(*info)); | |
553 | MigrationState *s = migrate_get_current(); | |
554 | ||
555 | switch (s->state) { | |
556 | case MIGRATION_STATUS_NONE: | |
557 | /* no migration has happened ever */ | |
558 | break; | |
559 | case MIGRATION_STATUS_SETUP: | |
560 | info->has_status = true; | |
561 | info->has_total_time = false; | |
562 | break; | |
563 | case MIGRATION_STATUS_ACTIVE: | |
564 | case MIGRATION_STATUS_CANCELLING: | |
565 | info->has_status = true; | |
566 | info->has_total_time = true; | |
567 | info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) | |
568 | - s->total_time; | |
569 | info->has_expected_downtime = true; | |
570 | info->expected_downtime = s->expected_downtime; | |
571 | info->has_setup_time = true; | |
572 | info->setup_time = s->setup_time; | |
573 | ||
574 | info->has_ram = true; | |
575 | info->ram = g_malloc0(sizeof(*info->ram)); | |
576 | info->ram->transferred = ram_bytes_transferred(); | |
577 | info->ram->remaining = ram_bytes_remaining(); | |
578 | info->ram->total = ram_bytes_total(); | |
579 | info->ram->duplicate = dup_mig_pages_transferred(); | |
580 | info->ram->skipped = skipped_mig_pages_transferred(); | |
581 | info->ram->normal = norm_mig_pages_transferred(); | |
582 | info->ram->normal_bytes = norm_mig_bytes_transferred(); | |
583 | info->ram->dirty_pages_rate = s->dirty_pages_rate; | |
584 | info->ram->mbps = s->mbps; | |
585 | info->ram->dirty_sync_count = s->dirty_sync_count; | |
586 | ||
587 | if (blk_mig_active()) { | |
588 | info->has_disk = true; | |
589 | info->disk = g_malloc0(sizeof(*info->disk)); | |
590 | info->disk->transferred = blk_mig_bytes_transferred(); | |
591 | info->disk->remaining = blk_mig_bytes_remaining(); | |
592 | info->disk->total = blk_mig_bytes_total(); | |
593 | } | |
594 | ||
595 | if (cpu_throttle_active()) { | |
596 | info->has_x_cpu_throttle_percentage = true; | |
597 | info->x_cpu_throttle_percentage = cpu_throttle_get_percentage(); | |
598 | } | |
599 | ||
600 | get_xbzrle_cache_stats(info); | |
601 | break; | |
602 | case MIGRATION_STATUS_POSTCOPY_ACTIVE: | |
603 | /* Mostly the same as active; TODO add some postcopy stats */ | |
604 | info->has_status = true; | |
605 | info->has_total_time = true; | |
606 | info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) | |
607 | - s->total_time; | |
608 | info->has_expected_downtime = true; | |
609 | info->expected_downtime = s->expected_downtime; | |
610 | info->has_setup_time = true; | |
611 | info->setup_time = s->setup_time; | |
612 | ||
613 | info->has_ram = true; | |
614 | info->ram = g_malloc0(sizeof(*info->ram)); | |
615 | info->ram->transferred = ram_bytes_transferred(); | |
616 | info->ram->remaining = ram_bytes_remaining(); | |
617 | info->ram->total = ram_bytes_total(); | |
618 | info->ram->duplicate = dup_mig_pages_transferred(); | |
619 | info->ram->skipped = skipped_mig_pages_transferred(); | |
620 | info->ram->normal = norm_mig_pages_transferred(); | |
621 | info->ram->normal_bytes = norm_mig_bytes_transferred(); | |
622 | info->ram->dirty_pages_rate = s->dirty_pages_rate; | |
623 | info->ram->mbps = s->mbps; | |
624 | ||
625 | if (blk_mig_active()) { | |
626 | info->has_disk = true; | |
627 | info->disk = g_malloc0(sizeof(*info->disk)); | |
628 | info->disk->transferred = blk_mig_bytes_transferred(); | |
629 | info->disk->remaining = blk_mig_bytes_remaining(); | |
630 | info->disk->total = blk_mig_bytes_total(); | |
631 | } | |
632 | ||
633 | get_xbzrle_cache_stats(info); | |
634 | break; | |
635 | case MIGRATION_STATUS_COMPLETED: | |
636 | get_xbzrle_cache_stats(info); | |
637 | ||
638 | info->has_status = true; | |
639 | info->has_total_time = true; | |
640 | info->total_time = s->total_time; | |
641 | info->has_downtime = true; | |
642 | info->downtime = s->downtime; | |
643 | info->has_setup_time = true; | |
644 | info->setup_time = s->setup_time; | |
645 | ||
646 | info->has_ram = true; | |
647 | info->ram = g_malloc0(sizeof(*info->ram)); | |
648 | info->ram->transferred = ram_bytes_transferred(); | |
649 | info->ram->remaining = 0; | |
650 | info->ram->total = ram_bytes_total(); | |
651 | info->ram->duplicate = dup_mig_pages_transferred(); | |
652 | info->ram->skipped = skipped_mig_pages_transferred(); | |
653 | info->ram->normal = norm_mig_pages_transferred(); | |
654 | info->ram->normal_bytes = norm_mig_bytes_transferred(); | |
655 | info->ram->mbps = s->mbps; | |
656 | info->ram->dirty_sync_count = s->dirty_sync_count; | |
657 | break; | |
658 | case MIGRATION_STATUS_FAILED: | |
659 | info->has_status = true; | |
660 | break; | |
661 | case MIGRATION_STATUS_CANCELLED: | |
662 | info->has_status = true; | |
663 | break; | |
664 | } | |
665 | info->status = s->state; | |
666 | ||
667 | return info; | |
668 | } | |
669 | ||
670 | void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params, | |
671 | Error **errp) | |
672 | { | |
673 | MigrationState *s = migrate_get_current(); | |
674 | MigrationCapabilityStatusList *cap; | |
675 | ||
676 | if (migration_is_setup_or_active(s->state)) { | |
677 | error_setg(errp, QERR_MIGRATION_ACTIVE); | |
678 | return; | |
679 | } | |
680 | ||
681 | for (cap = params; cap; cap = cap->next) { | |
682 | s->enabled_capabilities[cap->value->capability] = cap->value->state; | |
683 | } | |
684 | ||
685 | if (migrate_postcopy_ram()) { | |
686 | if (migrate_use_compression()) { | |
687 | /* The decompression threads asynchronously write into RAM | |
688 | * rather than use the atomic copies needed to avoid | |
689 | * userfaulting. It should be possible to fix the decompression | |
690 | * threads for compatibility in future. | |
691 | */ | |
692 | error_report("Postcopy is not currently compatible with " | |
693 | "compression"); | |
694 | s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM] = | |
695 | false; | |
696 | } | |
697 | } | |
698 | } | |
699 | ||
700 | void qmp_migrate_set_parameters(bool has_compress_level, | |
701 | int64_t compress_level, | |
702 | bool has_compress_threads, | |
703 | int64_t compress_threads, | |
704 | bool has_decompress_threads, | |
705 | int64_t decompress_threads, | |
706 | bool has_x_cpu_throttle_initial, | |
707 | int64_t x_cpu_throttle_initial, | |
708 | bool has_x_cpu_throttle_increment, | |
709 | int64_t x_cpu_throttle_increment, Error **errp) | |
710 | { | |
711 | MigrationState *s = migrate_get_current(); | |
712 | ||
713 | if (has_compress_level && (compress_level < 0 || compress_level > 9)) { | |
714 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level", | |
715 | "is invalid, it should be in the range of 0 to 9"); | |
716 | return; | |
717 | } | |
718 | if (has_compress_threads && | |
719 | (compress_threads < 1 || compress_threads > 255)) { | |
720 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, | |
721 | "compress_threads", | |
722 | "is invalid, it should be in the range of 1 to 255"); | |
723 | return; | |
724 | } | |
725 | if (has_decompress_threads && | |
726 | (decompress_threads < 1 || decompress_threads > 255)) { | |
727 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, | |
728 | "decompress_threads", | |
729 | "is invalid, it should be in the range of 1 to 255"); | |
730 | return; | |
731 | } | |
732 | if (has_x_cpu_throttle_initial && | |
733 | (x_cpu_throttle_initial < 1 || x_cpu_throttle_initial > 99)) { | |
734 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, | |
735 | "x_cpu_throttle_initial", | |
736 | "an integer in the range of 1 to 99"); | |
737 | } | |
738 | if (has_x_cpu_throttle_increment && | |
739 | (x_cpu_throttle_increment < 1 || x_cpu_throttle_increment > 99)) { | |
740 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, | |
741 | "x_cpu_throttle_increment", | |
742 | "an integer in the range of 1 to 99"); | |
743 | } | |
744 | ||
745 | if (has_compress_level) { | |
746 | s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; | |
747 | } | |
748 | if (has_compress_threads) { | |
749 | s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads; | |
750 | } | |
751 | if (has_decompress_threads) { | |
752 | s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = | |
753 | decompress_threads; | |
754 | } | |
755 | if (has_x_cpu_throttle_initial) { | |
756 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = | |
757 | x_cpu_throttle_initial; | |
758 | } | |
759 | ||
760 | if (has_x_cpu_throttle_increment) { | |
761 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = | |
762 | x_cpu_throttle_increment; | |
763 | } | |
764 | } | |
765 | ||
766 | void qmp_migrate_start_postcopy(Error **errp) | |
767 | { | |
768 | MigrationState *s = migrate_get_current(); | |
769 | ||
770 | if (!migrate_postcopy_ram()) { | |
771 | error_setg(errp, "Enable postcopy with migration_set_capability before" | |
772 | " the start of migration"); | |
773 | return; | |
774 | } | |
775 | ||
776 | if (s->state == MIGRATION_STATUS_NONE) { | |
777 | error_setg(errp, "Postcopy must be started after migration has been" | |
778 | " started"); | |
779 | return; | |
780 | } | |
781 | /* | |
782 | * we don't error if migration has finished since that would be racy | |
783 | * with issuing this command. | |
784 | */ | |
785 | atomic_set(&s->start_postcopy, true); | |
786 | } | |
787 | ||
788 | /* shared migration helpers */ | |
789 | ||
790 | static void migrate_set_state(MigrationState *s, int old_state, int new_state) | |
791 | { | |
792 | if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) { | |
793 | trace_migrate_set_state(new_state); | |
794 | migrate_generate_event(new_state); | |
795 | } | |
796 | } | |
797 | ||
798 | static void migrate_fd_cleanup(void *opaque) | |
799 | { | |
800 | MigrationState *s = opaque; | |
801 | ||
802 | qemu_bh_delete(s->cleanup_bh); | |
803 | s->cleanup_bh = NULL; | |
804 | ||
805 | flush_page_queue(s); | |
806 | ||
807 | if (s->file) { | |
808 | trace_migrate_fd_cleanup(); | |
809 | qemu_mutex_unlock_iothread(); | |
810 | if (s->migration_thread_running) { | |
811 | qemu_thread_join(&s->thread); | |
812 | s->migration_thread_running = false; | |
813 | } | |
814 | qemu_mutex_lock_iothread(); | |
815 | ||
816 | migrate_compress_threads_join(); | |
817 | qemu_fclose(s->file); | |
818 | s->file = NULL; | |
819 | } | |
820 | ||
821 | assert((s->state != MIGRATION_STATUS_ACTIVE) && | |
822 | (s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE)); | |
823 | ||
824 | if (s->state == MIGRATION_STATUS_CANCELLING) { | |
825 | migrate_set_state(s, MIGRATION_STATUS_CANCELLING, | |
826 | MIGRATION_STATUS_CANCELLED); | |
827 | } | |
828 | ||
829 | notifier_list_notify(&migration_state_notifiers, s); | |
830 | } | |
831 | ||
832 | void migrate_fd_error(MigrationState *s) | |
833 | { | |
834 | trace_migrate_fd_error(); | |
835 | assert(s->file == NULL); | |
836 | migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED); | |
837 | notifier_list_notify(&migration_state_notifiers, s); | |
838 | } | |
839 | ||
840 | static void migrate_fd_cancel(MigrationState *s) | |
841 | { | |
842 | int old_state ; | |
843 | QEMUFile *f = migrate_get_current()->file; | |
844 | trace_migrate_fd_cancel(); | |
845 | ||
846 | if (s->rp_state.from_dst_file) { | |
847 | /* shutdown the rp socket, so causing the rp thread to shutdown */ | |
848 | qemu_file_shutdown(s->rp_state.from_dst_file); | |
849 | } | |
850 | ||
851 | do { | |
852 | old_state = s->state; | |
853 | if (!migration_is_setup_or_active(old_state)) { | |
854 | break; | |
855 | } | |
856 | migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING); | |
857 | } while (s->state != MIGRATION_STATUS_CANCELLING); | |
858 | ||
859 | /* | |
860 | * If we're unlucky the migration code might be stuck somewhere in a | |
861 | * send/write while the network has failed and is waiting to timeout; | |
862 | * if we've got shutdown(2) available then we can force it to quit. | |
863 | * The outgoing qemu file gets closed in migrate_fd_cleanup that is | |
864 | * called in a bh, so there is no race against this cancel. | |
865 | */ | |
866 | if (s->state == MIGRATION_STATUS_CANCELLING && f) { | |
867 | qemu_file_shutdown(f); | |
868 | } | |
869 | } | |
870 | ||
871 | void add_migration_state_change_notifier(Notifier *notify) | |
872 | { | |
873 | notifier_list_add(&migration_state_notifiers, notify); | |
874 | } | |
875 | ||
876 | void remove_migration_state_change_notifier(Notifier *notify) | |
877 | { | |
878 | notifier_remove(notify); | |
879 | } | |
880 | ||
881 | bool migration_in_setup(MigrationState *s) | |
882 | { | |
883 | return s->state == MIGRATION_STATUS_SETUP; | |
884 | } | |
885 | ||
886 | bool migration_has_finished(MigrationState *s) | |
887 | { | |
888 | return s->state == MIGRATION_STATUS_COMPLETED; | |
889 | } | |
890 | ||
891 | bool migration_has_failed(MigrationState *s) | |
892 | { | |
893 | return (s->state == MIGRATION_STATUS_CANCELLED || | |
894 | s->state == MIGRATION_STATUS_FAILED); | |
895 | } | |
896 | ||
897 | bool migration_in_postcopy(MigrationState *s) | |
898 | { | |
899 | return (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE); | |
900 | } | |
901 | ||
902 | MigrationState *migrate_init(const MigrationParams *params) | |
903 | { | |
904 | MigrationState *s = migrate_get_current(); | |
905 | int64_t bandwidth_limit = s->bandwidth_limit; | |
906 | bool enabled_capabilities[MIGRATION_CAPABILITY_MAX]; | |
907 | int64_t xbzrle_cache_size = s->xbzrle_cache_size; | |
908 | int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; | |
909 | int compress_thread_count = | |
910 | s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; | |
911 | int decompress_thread_count = | |
912 | s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; | |
913 | int x_cpu_throttle_initial = | |
914 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL]; | |
915 | int x_cpu_throttle_increment = | |
916 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT]; | |
917 | ||
918 | memcpy(enabled_capabilities, s->enabled_capabilities, | |
919 | sizeof(enabled_capabilities)); | |
920 | ||
921 | memset(s, 0, sizeof(*s)); | |
922 | s->params = *params; | |
923 | memcpy(s->enabled_capabilities, enabled_capabilities, | |
924 | sizeof(enabled_capabilities)); | |
925 | s->xbzrle_cache_size = xbzrle_cache_size; | |
926 | ||
927 | s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level; | |
928 | s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = | |
929 | compress_thread_count; | |
930 | s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] = | |
931 | decompress_thread_count; | |
932 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] = | |
933 | x_cpu_throttle_initial; | |
934 | s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] = | |
935 | x_cpu_throttle_increment; | |
936 | s->bandwidth_limit = bandwidth_limit; | |
937 | migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP); | |
938 | ||
939 | QSIMPLEQ_INIT(&s->src_page_requests); | |
940 | ||
941 | s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
942 | return s; | |
943 | } | |
944 | ||
945 | static GSList *migration_blockers; | |
946 | ||
947 | void migrate_add_blocker(Error *reason) | |
948 | { | |
949 | migration_blockers = g_slist_prepend(migration_blockers, reason); | |
950 | } | |
951 | ||
952 | void migrate_del_blocker(Error *reason) | |
953 | { | |
954 | migration_blockers = g_slist_remove(migration_blockers, reason); | |
955 | } | |
956 | ||
957 | void qmp_migrate_incoming(const char *uri, Error **errp) | |
958 | { | |
959 | Error *local_err = NULL; | |
960 | static bool once = true; | |
961 | ||
962 | if (!deferred_incoming) { | |
963 | error_setg(errp, "For use with '-incoming defer'"); | |
964 | return; | |
965 | } | |
966 | if (!once) { | |
967 | error_setg(errp, "The incoming migration has already been started"); | |
968 | } | |
969 | ||
970 | qemu_start_incoming_migration(uri, &local_err); | |
971 | ||
972 | if (local_err) { | |
973 | error_propagate(errp, local_err); | |
974 | return; | |
975 | } | |
976 | ||
977 | once = false; | |
978 | } | |
979 | ||
980 | void qmp_migrate(const char *uri, bool has_blk, bool blk, | |
981 | bool has_inc, bool inc, bool has_detach, bool detach, | |
982 | Error **errp) | |
983 | { | |
984 | Error *local_err = NULL; | |
985 | MigrationState *s = migrate_get_current(); | |
986 | MigrationParams params; | |
987 | const char *p; | |
988 | ||
989 | params.blk = has_blk && blk; | |
990 | params.shared = has_inc && inc; | |
991 | ||
992 | if (migration_is_setup_or_active(s->state) || | |
993 | s->state == MIGRATION_STATUS_CANCELLING) { | |
994 | error_setg(errp, QERR_MIGRATION_ACTIVE); | |
995 | return; | |
996 | } | |
997 | if (runstate_check(RUN_STATE_INMIGRATE)) { | |
998 | error_setg(errp, "Guest is waiting for an incoming migration"); | |
999 | return; | |
1000 | } | |
1001 | ||
1002 | if (qemu_savevm_state_blocked(errp)) { | |
1003 | return; | |
1004 | } | |
1005 | ||
1006 | if (migration_blockers) { | |
1007 | *errp = error_copy(migration_blockers->data); | |
1008 | return; | |
1009 | } | |
1010 | ||
1011 | /* We are starting a new migration, so we want to start in a clean | |
1012 | state. This change is only needed if previous migration | |
1013 | failed/was cancelled. We don't use migrate_set_state() because | |
1014 | we are setting the initial state, not changing it. */ | |
1015 | s->state = MIGRATION_STATUS_NONE; | |
1016 | ||
1017 | s = migrate_init(¶ms); | |
1018 | ||
1019 | if (strstart(uri, "tcp:", &p)) { | |
1020 | tcp_start_outgoing_migration(s, p, &local_err); | |
1021 | #ifdef CONFIG_RDMA | |
1022 | } else if (strstart(uri, "rdma:", &p)) { | |
1023 | rdma_start_outgoing_migration(s, p, &local_err); | |
1024 | #endif | |
1025 | #if !defined(WIN32) | |
1026 | } else if (strstart(uri, "exec:", &p)) { | |
1027 | exec_start_outgoing_migration(s, p, &local_err); | |
1028 | } else if (strstart(uri, "unix:", &p)) { | |
1029 | unix_start_outgoing_migration(s, p, &local_err); | |
1030 | } else if (strstart(uri, "fd:", &p)) { | |
1031 | fd_start_outgoing_migration(s, p, &local_err); | |
1032 | #endif | |
1033 | } else { | |
1034 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri", | |
1035 | "a valid migration protocol"); | |
1036 | migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED); | |
1037 | return; | |
1038 | } | |
1039 | ||
1040 | if (local_err) { | |
1041 | migrate_fd_error(s); | |
1042 | error_propagate(errp, local_err); | |
1043 | return; | |
1044 | } | |
1045 | } | |
1046 | ||
1047 | void qmp_migrate_cancel(Error **errp) | |
1048 | { | |
1049 | migrate_fd_cancel(migrate_get_current()); | |
1050 | } | |
1051 | ||
1052 | void qmp_migrate_set_cache_size(int64_t value, Error **errp) | |
1053 | { | |
1054 | MigrationState *s = migrate_get_current(); | |
1055 | int64_t new_size; | |
1056 | ||
1057 | /* Check for truncation */ | |
1058 | if (value != (size_t)value) { | |
1059 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", | |
1060 | "exceeding address space"); | |
1061 | return; | |
1062 | } | |
1063 | ||
1064 | /* Cache should not be larger than guest ram size */ | |
1065 | if (value > ram_bytes_total()) { | |
1066 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", | |
1067 | "exceeds guest ram size "); | |
1068 | return; | |
1069 | } | |
1070 | ||
1071 | new_size = xbzrle_cache_resize(value); | |
1072 | if (new_size < 0) { | |
1073 | error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size", | |
1074 | "is smaller than page size"); | |
1075 | return; | |
1076 | } | |
1077 | ||
1078 | s->xbzrle_cache_size = new_size; | |
1079 | } | |
1080 | ||
1081 | int64_t qmp_query_migrate_cache_size(Error **errp) | |
1082 | { | |
1083 | return migrate_xbzrle_cache_size(); | |
1084 | } | |
1085 | ||
1086 | void qmp_migrate_set_speed(int64_t value, Error **errp) | |
1087 | { | |
1088 | MigrationState *s; | |
1089 | ||
1090 | if (value < 0) { | |
1091 | value = 0; | |
1092 | } | |
1093 | if (value > SIZE_MAX) { | |
1094 | value = SIZE_MAX; | |
1095 | } | |
1096 | ||
1097 | s = migrate_get_current(); | |
1098 | s->bandwidth_limit = value; | |
1099 | if (s->file) { | |
1100 | qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO); | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | void qmp_migrate_set_downtime(double value, Error **errp) | |
1105 | { | |
1106 | value *= 1e9; | |
1107 | value = MAX(0, MIN(UINT64_MAX, value)); | |
1108 | max_downtime = (uint64_t)value; | |
1109 | } | |
1110 | ||
1111 | bool migrate_postcopy_ram(void) | |
1112 | { | |
1113 | MigrationState *s; | |
1114 | ||
1115 | s = migrate_get_current(); | |
1116 | ||
1117 | return s->enabled_capabilities[MIGRATION_CAPABILITY_X_POSTCOPY_RAM]; | |
1118 | } | |
1119 | ||
1120 | bool migrate_auto_converge(void) | |
1121 | { | |
1122 | MigrationState *s; | |
1123 | ||
1124 | s = migrate_get_current(); | |
1125 | ||
1126 | return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE]; | |
1127 | } | |
1128 | ||
1129 | bool migrate_zero_blocks(void) | |
1130 | { | |
1131 | MigrationState *s; | |
1132 | ||
1133 | s = migrate_get_current(); | |
1134 | ||
1135 | return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS]; | |
1136 | } | |
1137 | ||
1138 | bool migrate_use_compression(void) | |
1139 | { | |
1140 | MigrationState *s; | |
1141 | ||
1142 | s = migrate_get_current(); | |
1143 | ||
1144 | return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS]; | |
1145 | } | |
1146 | ||
1147 | int migrate_compress_level(void) | |
1148 | { | |
1149 | MigrationState *s; | |
1150 | ||
1151 | s = migrate_get_current(); | |
1152 | ||
1153 | return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL]; | |
1154 | } | |
1155 | ||
1156 | int migrate_compress_threads(void) | |
1157 | { | |
1158 | MigrationState *s; | |
1159 | ||
1160 | s = migrate_get_current(); | |
1161 | ||
1162 | return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS]; | |
1163 | } | |
1164 | ||
1165 | int migrate_decompress_threads(void) | |
1166 | { | |
1167 | MigrationState *s; | |
1168 | ||
1169 | s = migrate_get_current(); | |
1170 | ||
1171 | return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS]; | |
1172 | } | |
1173 | ||
1174 | bool migrate_use_events(void) | |
1175 | { | |
1176 | MigrationState *s; | |
1177 | ||
1178 | s = migrate_get_current(); | |
1179 | ||
1180 | return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS]; | |
1181 | } | |
1182 | ||
1183 | int migrate_use_xbzrle(void) | |
1184 | { | |
1185 | MigrationState *s; | |
1186 | ||
1187 | s = migrate_get_current(); | |
1188 | ||
1189 | return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE]; | |
1190 | } | |
1191 | ||
1192 | int64_t migrate_xbzrle_cache_size(void) | |
1193 | { | |
1194 | MigrationState *s; | |
1195 | ||
1196 | s = migrate_get_current(); | |
1197 | ||
1198 | return s->xbzrle_cache_size; | |
1199 | } | |
1200 | ||
1201 | /* migration thread support */ | |
1202 | /* | |
1203 | * Something bad happened to the RP stream, mark an error | |
1204 | * The caller shall print or trace something to indicate why | |
1205 | */ | |
1206 | static void mark_source_rp_bad(MigrationState *s) | |
1207 | { | |
1208 | s->rp_state.error = true; | |
1209 | } | |
1210 | ||
1211 | static struct rp_cmd_args { | |
1212 | ssize_t len; /* -1 = variable */ | |
1213 | const char *name; | |
1214 | } rp_cmd_args[] = { | |
1215 | [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" }, | |
1216 | [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" }, | |
1217 | [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" }, | |
1218 | [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" }, | |
1219 | [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" }, | |
1220 | [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" }, | |
1221 | }; | |
1222 | ||
1223 | /* | |
1224 | * Process a request for pages received on the return path, | |
1225 | * We're allowed to send more than requested (e.g. to round to our page size) | |
1226 | * and we don't need to send pages that have already been sent. | |
1227 | */ | |
1228 | static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname, | |
1229 | ram_addr_t start, size_t len) | |
1230 | { | |
1231 | long our_host_ps = getpagesize(); | |
1232 | ||
1233 | trace_migrate_handle_rp_req_pages(rbname, start, len); | |
1234 | ||
1235 | /* | |
1236 | * Since we currently insist on matching page sizes, just sanity check | |
1237 | * we're being asked for whole host pages. | |
1238 | */ | |
1239 | if (start & (our_host_ps-1) || | |
1240 | (len & (our_host_ps-1))) { | |
1241 | error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT | |
1242 | " len: %zd", __func__, start, len); | |
1243 | mark_source_rp_bad(ms); | |
1244 | return; | |
1245 | } | |
1246 | ||
1247 | if (ram_save_queue_pages(ms, rbname, start, len)) { | |
1248 | mark_source_rp_bad(ms); | |
1249 | } | |
1250 | } | |
1251 | ||
1252 | /* | |
1253 | * Handles messages sent on the return path towards the source VM | |
1254 | * | |
1255 | */ | |
1256 | static void *source_return_path_thread(void *opaque) | |
1257 | { | |
1258 | MigrationState *ms = opaque; | |
1259 | QEMUFile *rp = ms->rp_state.from_dst_file; | |
1260 | uint16_t header_len, header_type; | |
1261 | const int max_len = 512; | |
1262 | uint8_t buf[max_len]; | |
1263 | uint32_t tmp32, sibling_error; | |
1264 | ram_addr_t start = 0; /* =0 to silence warning */ | |
1265 | size_t len = 0, expected_len; | |
1266 | int res; | |
1267 | ||
1268 | trace_source_return_path_thread_entry(); | |
1269 | while (!ms->rp_state.error && !qemu_file_get_error(rp) && | |
1270 | migration_is_setup_or_active(ms->state)) { | |
1271 | trace_source_return_path_thread_loop_top(); | |
1272 | header_type = qemu_get_be16(rp); | |
1273 | header_len = qemu_get_be16(rp); | |
1274 | ||
1275 | if (header_type >= MIG_RP_MSG_MAX || | |
1276 | header_type == MIG_RP_MSG_INVALID) { | |
1277 | error_report("RP: Received invalid message 0x%04x length 0x%04x", | |
1278 | header_type, header_len); | |
1279 | mark_source_rp_bad(ms); | |
1280 | goto out; | |
1281 | } | |
1282 | ||
1283 | if ((rp_cmd_args[header_type].len != -1 && | |
1284 | header_len != rp_cmd_args[header_type].len) || | |
1285 | header_len > max_len) { | |
1286 | error_report("RP: Received '%s' message (0x%04x) with" | |
1287 | "incorrect length %d expecting %zu", | |
1288 | rp_cmd_args[header_type].name, header_type, header_len, | |
1289 | (size_t)rp_cmd_args[header_type].len); | |
1290 | mark_source_rp_bad(ms); | |
1291 | goto out; | |
1292 | } | |
1293 | ||
1294 | /* We know we've got a valid header by this point */ | |
1295 | res = qemu_get_buffer(rp, buf, header_len); | |
1296 | if (res != header_len) { | |
1297 | error_report("RP: Failed reading data for message 0x%04x" | |
1298 | " read %d expected %d", | |
1299 | header_type, res, header_len); | |
1300 | mark_source_rp_bad(ms); | |
1301 | goto out; | |
1302 | } | |
1303 | ||
1304 | /* OK, we have the message and the data */ | |
1305 | switch (header_type) { | |
1306 | case MIG_RP_MSG_SHUT: | |
1307 | sibling_error = be32_to_cpup((uint32_t *)buf); | |
1308 | trace_source_return_path_thread_shut(sibling_error); | |
1309 | if (sibling_error) { | |
1310 | error_report("RP: Sibling indicated error %d", sibling_error); | |
1311 | mark_source_rp_bad(ms); | |
1312 | } | |
1313 | /* | |
1314 | * We'll let the main thread deal with closing the RP | |
1315 | * we could do a shutdown(2) on it, but we're the only user | |
1316 | * anyway, so there's nothing gained. | |
1317 | */ | |
1318 | goto out; | |
1319 | ||
1320 | case MIG_RP_MSG_PONG: | |
1321 | tmp32 = be32_to_cpup((uint32_t *)buf); | |
1322 | trace_source_return_path_thread_pong(tmp32); | |
1323 | break; | |
1324 | ||
1325 | case MIG_RP_MSG_REQ_PAGES: | |
1326 | start = be64_to_cpup((uint64_t *)buf); | |
1327 | len = be32_to_cpup((uint32_t *)(buf + 8)); | |
1328 | migrate_handle_rp_req_pages(ms, NULL, start, len); | |
1329 | break; | |
1330 | ||
1331 | case MIG_RP_MSG_REQ_PAGES_ID: | |
1332 | expected_len = 12 + 1; /* header + termination */ | |
1333 | ||
1334 | if (header_len >= expected_len) { | |
1335 | start = be64_to_cpup((uint64_t *)buf); | |
1336 | len = be32_to_cpup((uint32_t *)(buf + 8)); | |
1337 | /* Now we expect an idstr */ | |
1338 | tmp32 = buf[12]; /* Length of the following idstr */ | |
1339 | buf[13 + tmp32] = '\0'; | |
1340 | expected_len += tmp32; | |
1341 | } | |
1342 | if (header_len != expected_len) { | |
1343 | error_report("RP: Req_Page_id with length %d expecting %zd", | |
1344 | header_len, expected_len); | |
1345 | mark_source_rp_bad(ms); | |
1346 | goto out; | |
1347 | } | |
1348 | migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len); | |
1349 | break; | |
1350 | ||
1351 | default: | |
1352 | break; | |
1353 | } | |
1354 | } | |
1355 | if (rp && qemu_file_get_error(rp)) { | |
1356 | trace_source_return_path_thread_bad_end(); | |
1357 | mark_source_rp_bad(ms); | |
1358 | } | |
1359 | ||
1360 | trace_source_return_path_thread_end(); | |
1361 | out: | |
1362 | ms->rp_state.from_dst_file = NULL; | |
1363 | qemu_fclose(rp); | |
1364 | return NULL; | |
1365 | } | |
1366 | ||
1367 | static int open_return_path_on_source(MigrationState *ms) | |
1368 | { | |
1369 | ||
1370 | ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->file); | |
1371 | if (!ms->rp_state.from_dst_file) { | |
1372 | return -1; | |
1373 | } | |
1374 | ||
1375 | trace_open_return_path_on_source(); | |
1376 | qemu_thread_create(&ms->rp_state.rp_thread, "return path", | |
1377 | source_return_path_thread, ms, QEMU_THREAD_JOINABLE); | |
1378 | ||
1379 | trace_open_return_path_on_source_continue(); | |
1380 | ||
1381 | return 0; | |
1382 | } | |
1383 | ||
1384 | /* Returns 0 if the RP was ok, otherwise there was an error on the RP */ | |
1385 | static int await_return_path_close_on_source(MigrationState *ms) | |
1386 | { | |
1387 | /* | |
1388 | * If this is a normal exit then the destination will send a SHUT and the | |
1389 | * rp_thread will exit, however if there's an error we need to cause | |
1390 | * it to exit. | |
1391 | */ | |
1392 | if (qemu_file_get_error(ms->file) && ms->rp_state.from_dst_file) { | |
1393 | /* | |
1394 | * shutdown(2), if we have it, will cause it to unblock if it's stuck | |
1395 | * waiting for the destination. | |
1396 | */ | |
1397 | qemu_file_shutdown(ms->rp_state.from_dst_file); | |
1398 | mark_source_rp_bad(ms); | |
1399 | } | |
1400 | trace_await_return_path_close_on_source_joining(); | |
1401 | qemu_thread_join(&ms->rp_state.rp_thread); | |
1402 | trace_await_return_path_close_on_source_close(); | |
1403 | return ms->rp_state.error; | |
1404 | } | |
1405 | ||
1406 | /* | |
1407 | * Switch from normal iteration to postcopy | |
1408 | * Returns non-0 on error | |
1409 | */ | |
1410 | static int postcopy_start(MigrationState *ms, bool *old_vm_running) | |
1411 | { | |
1412 | int ret; | |
1413 | const QEMUSizedBuffer *qsb; | |
1414 | int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
1415 | migrate_set_state(ms, MIGRATION_STATUS_ACTIVE, | |
1416 | MIGRATION_STATUS_POSTCOPY_ACTIVE); | |
1417 | ||
1418 | trace_postcopy_start(); | |
1419 | qemu_mutex_lock_iothread(); | |
1420 | trace_postcopy_start_set_run(); | |
1421 | ||
1422 | qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); | |
1423 | *old_vm_running = runstate_is_running(); | |
1424 | global_state_store(); | |
1425 | ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); | |
1426 | ||
1427 | if (ret < 0) { | |
1428 | goto fail; | |
1429 | } | |
1430 | ||
1431 | /* | |
1432 | * Cause any non-postcopiable, but iterative devices to | |
1433 | * send out their final data. | |
1434 | */ | |
1435 | qemu_savevm_state_complete_precopy(ms->file, true); | |
1436 | ||
1437 | /* | |
1438 | * in Finish migrate and with the io-lock held everything should | |
1439 | * be quiet, but we've potentially still got dirty pages and we | |
1440 | * need to tell the destination to throw any pages it's already received | |
1441 | * that are dirty | |
1442 | */ | |
1443 | if (ram_postcopy_send_discard_bitmap(ms)) { | |
1444 | error_report("postcopy send discard bitmap failed"); | |
1445 | goto fail; | |
1446 | } | |
1447 | ||
1448 | /* | |
1449 | * send rest of state - note things that are doing postcopy | |
1450 | * will notice we're in POSTCOPY_ACTIVE and not actually | |
1451 | * wrap their state up here | |
1452 | */ | |
1453 | qemu_file_set_rate_limit(ms->file, INT64_MAX); | |
1454 | /* Ping just for debugging, helps line traces up */ | |
1455 | qemu_savevm_send_ping(ms->file, 2); | |
1456 | ||
1457 | /* | |
1458 | * While loading the device state we may trigger page transfer | |
1459 | * requests and the fd must be free to process those, and thus | |
1460 | * the destination must read the whole device state off the fd before | |
1461 | * it starts processing it. Unfortunately the ad-hoc migration format | |
1462 | * doesn't allow the destination to know the size to read without fully | |
1463 | * parsing it through each devices load-state code (especially the open | |
1464 | * coded devices that use get/put). | |
1465 | * So we wrap the device state up in a package with a length at the start; | |
1466 | * to do this we use a qemu_buf to hold the whole of the device state. | |
1467 | */ | |
1468 | QEMUFile *fb = qemu_bufopen("w", NULL); | |
1469 | if (!fb) { | |
1470 | error_report("Failed to create buffered file"); | |
1471 | goto fail; | |
1472 | } | |
1473 | ||
1474 | /* | |
1475 | * Make sure the receiver can get incoming pages before we send the rest | |
1476 | * of the state | |
1477 | */ | |
1478 | qemu_savevm_send_postcopy_listen(fb); | |
1479 | ||
1480 | qemu_savevm_state_complete_precopy(fb, false); | |
1481 | qemu_savevm_send_ping(fb, 3); | |
1482 | ||
1483 | qemu_savevm_send_postcopy_run(fb); | |
1484 | ||
1485 | /* <><> end of stuff going into the package */ | |
1486 | qsb = qemu_buf_get(fb); | |
1487 | ||
1488 | /* Now send that blob */ | |
1489 | if (qemu_savevm_send_packaged(ms->file, qsb)) { | |
1490 | goto fail_closefb; | |
1491 | } | |
1492 | qemu_fclose(fb); | |
1493 | ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop; | |
1494 | ||
1495 | qemu_mutex_unlock_iothread(); | |
1496 | ||
1497 | /* | |
1498 | * Although this ping is just for debug, it could potentially be | |
1499 | * used for getting a better measurement of downtime at the source. | |
1500 | */ | |
1501 | qemu_savevm_send_ping(ms->file, 4); | |
1502 | ||
1503 | ret = qemu_file_get_error(ms->file); | |
1504 | if (ret) { | |
1505 | error_report("postcopy_start: Migration stream errored"); | |
1506 | migrate_set_state(ms, MIGRATION_STATUS_POSTCOPY_ACTIVE, | |
1507 | MIGRATION_STATUS_FAILED); | |
1508 | } | |
1509 | ||
1510 | return ret; | |
1511 | ||
1512 | fail_closefb: | |
1513 | qemu_fclose(fb); | |
1514 | fail: | |
1515 | migrate_set_state(ms, MIGRATION_STATUS_POSTCOPY_ACTIVE, | |
1516 | MIGRATION_STATUS_FAILED); | |
1517 | qemu_mutex_unlock_iothread(); | |
1518 | return -1; | |
1519 | } | |
1520 | ||
1521 | /** | |
1522 | * migration_completion: Used by migration_thread when there's not much left. | |
1523 | * The caller 'breaks' the loop when this returns. | |
1524 | * | |
1525 | * @s: Current migration state | |
1526 | * @current_active_state: The migration state we expect to be in | |
1527 | * @*old_vm_running: Pointer to old_vm_running flag | |
1528 | * @*start_time: Pointer to time to update | |
1529 | */ | |
1530 | static void migration_completion(MigrationState *s, int current_active_state, | |
1531 | bool *old_vm_running, | |
1532 | int64_t *start_time) | |
1533 | { | |
1534 | int ret; | |
1535 | ||
1536 | if (s->state == MIGRATION_STATUS_ACTIVE) { | |
1537 | qemu_mutex_lock_iothread(); | |
1538 | *start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
1539 | qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER); | |
1540 | *old_vm_running = runstate_is_running(); | |
1541 | ret = global_state_store(); | |
1542 | ||
1543 | if (!ret) { | |
1544 | ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE); | |
1545 | if (ret >= 0) { | |
1546 | qemu_file_set_rate_limit(s->file, INT64_MAX); | |
1547 | qemu_savevm_state_complete_precopy(s->file, false); | |
1548 | } | |
1549 | } | |
1550 | qemu_mutex_unlock_iothread(); | |
1551 | ||
1552 | if (ret < 0) { | |
1553 | goto fail; | |
1554 | } | |
1555 | } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { | |
1556 | trace_migration_completion_postcopy_end(); | |
1557 | ||
1558 | qemu_savevm_state_complete_postcopy(s->file); | |
1559 | trace_migration_completion_postcopy_end_after_complete(); | |
1560 | } | |
1561 | ||
1562 | /* | |
1563 | * If rp was opened we must clean up the thread before | |
1564 | * cleaning everything else up (since if there are no failures | |
1565 | * it will wait for the destination to send it's status in | |
1566 | * a SHUT command). | |
1567 | * Postcopy opens rp if enabled (even if it's not avtivated) | |
1568 | */ | |
1569 | if (migrate_postcopy_ram()) { | |
1570 | int rp_error; | |
1571 | trace_migration_completion_postcopy_end_before_rp(); | |
1572 | rp_error = await_return_path_close_on_source(s); | |
1573 | trace_migration_completion_postcopy_end_after_rp(rp_error); | |
1574 | if (rp_error) { | |
1575 | goto fail; | |
1576 | } | |
1577 | } | |
1578 | ||
1579 | if (qemu_file_get_error(s->file)) { | |
1580 | trace_migration_completion_file_err(); | |
1581 | goto fail; | |
1582 | } | |
1583 | ||
1584 | migrate_set_state(s, current_active_state, MIGRATION_STATUS_COMPLETED); | |
1585 | return; | |
1586 | ||
1587 | fail: | |
1588 | migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED); | |
1589 | } | |
1590 | ||
1591 | /* | |
1592 | * Master migration thread on the source VM. | |
1593 | * It drives the migration and pumps the data down the outgoing channel. | |
1594 | */ | |
1595 | static void *migration_thread(void *opaque) | |
1596 | { | |
1597 | MigrationState *s = opaque; | |
1598 | /* Used by the bandwidth calcs, updated later */ | |
1599 | int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
1600 | int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST); | |
1601 | int64_t initial_bytes = 0; | |
1602 | int64_t max_size = 0; | |
1603 | int64_t start_time = initial_time; | |
1604 | int64_t end_time; | |
1605 | bool old_vm_running = false; | |
1606 | bool entered_postcopy = false; | |
1607 | /* The active state we expect to be in; ACTIVE or POSTCOPY_ACTIVE */ | |
1608 | enum MigrationStatus current_active_state = MIGRATION_STATUS_ACTIVE; | |
1609 | ||
1610 | rcu_register_thread(); | |
1611 | ||
1612 | qemu_savevm_state_header(s->file); | |
1613 | ||
1614 | if (migrate_postcopy_ram()) { | |
1615 | /* Now tell the dest that it should open its end so it can reply */ | |
1616 | qemu_savevm_send_open_return_path(s->file); | |
1617 | ||
1618 | /* And do a ping that will make stuff easier to debug */ | |
1619 | qemu_savevm_send_ping(s->file, 1); | |
1620 | ||
1621 | /* | |
1622 | * Tell the destination that we *might* want to do postcopy later; | |
1623 | * if the other end can't do postcopy it should fail now, nice and | |
1624 | * early. | |
1625 | */ | |
1626 | qemu_savevm_send_postcopy_advise(s->file); | |
1627 | } | |
1628 | ||
1629 | qemu_savevm_state_begin(s->file, &s->params); | |
1630 | ||
1631 | s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; | |
1632 | current_active_state = MIGRATION_STATUS_ACTIVE; | |
1633 | migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE); | |
1634 | ||
1635 | trace_migration_thread_setup_complete(); | |
1636 | ||
1637 | while (s->state == MIGRATION_STATUS_ACTIVE || | |
1638 | s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) { | |
1639 | int64_t current_time; | |
1640 | uint64_t pending_size; | |
1641 | ||
1642 | if (!qemu_file_rate_limit(s->file)) { | |
1643 | uint64_t pend_post, pend_nonpost; | |
1644 | ||
1645 | qemu_savevm_state_pending(s->file, max_size, &pend_nonpost, | |
1646 | &pend_post); | |
1647 | pending_size = pend_nonpost + pend_post; | |
1648 | trace_migrate_pending(pending_size, max_size, | |
1649 | pend_post, pend_nonpost); | |
1650 | if (pending_size && pending_size >= max_size) { | |
1651 | /* Still a significant amount to transfer */ | |
1652 | ||
1653 | current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
1654 | if (migrate_postcopy_ram() && | |
1655 | s->state != MIGRATION_STATUS_POSTCOPY_ACTIVE && | |
1656 | pend_nonpost <= max_size && | |
1657 | atomic_read(&s->start_postcopy)) { | |
1658 | ||
1659 | if (!postcopy_start(s, &old_vm_running)) { | |
1660 | current_active_state = MIGRATION_STATUS_POSTCOPY_ACTIVE; | |
1661 | entered_postcopy = true; | |
1662 | } | |
1663 | ||
1664 | continue; | |
1665 | } | |
1666 | /* Just another iteration step */ | |
1667 | qemu_savevm_state_iterate(s->file, entered_postcopy); | |
1668 | } else { | |
1669 | trace_migration_thread_low_pending(pending_size); | |
1670 | migration_completion(s, current_active_state, | |
1671 | &old_vm_running, &start_time); | |
1672 | break; | |
1673 | } | |
1674 | } | |
1675 | ||
1676 | if (qemu_file_get_error(s->file)) { | |
1677 | migrate_set_state(s, current_active_state, MIGRATION_STATUS_FAILED); | |
1678 | trace_migration_thread_file_err(); | |
1679 | break; | |
1680 | } | |
1681 | current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
1682 | if (current_time >= initial_time + BUFFER_DELAY) { | |
1683 | uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes; | |
1684 | uint64_t time_spent = current_time - initial_time; | |
1685 | double bandwidth = transferred_bytes / time_spent; | |
1686 | max_size = bandwidth * migrate_max_downtime() / 1000000; | |
1687 | ||
1688 | s->mbps = time_spent ? (((double) transferred_bytes * 8.0) / | |
1689 | ((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1; | |
1690 | ||
1691 | trace_migrate_transferred(transferred_bytes, time_spent, | |
1692 | bandwidth, max_size); | |
1693 | /* if we haven't sent anything, we don't want to recalculate | |
1694 | 10000 is a small enough number for our purposes */ | |
1695 | if (s->dirty_bytes_rate && transferred_bytes > 10000) { | |
1696 | s->expected_downtime = s->dirty_bytes_rate / bandwidth; | |
1697 | } | |
1698 | ||
1699 | qemu_file_reset_rate_limit(s->file); | |
1700 | initial_time = current_time; | |
1701 | initial_bytes = qemu_ftell(s->file); | |
1702 | } | |
1703 | if (qemu_file_rate_limit(s->file)) { | |
1704 | /* usleep expects microseconds */ | |
1705 | g_usleep((initial_time + BUFFER_DELAY - current_time)*1000); | |
1706 | } | |
1707 | } | |
1708 | ||
1709 | trace_migration_thread_after_loop(); | |
1710 | /* If we enabled cpu throttling for auto-converge, turn it off. */ | |
1711 | cpu_throttle_stop(); | |
1712 | end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); | |
1713 | ||
1714 | qemu_mutex_lock_iothread(); | |
1715 | qemu_savevm_state_cleanup(); | |
1716 | if (s->state == MIGRATION_STATUS_COMPLETED) { | |
1717 | uint64_t transferred_bytes = qemu_ftell(s->file); | |
1718 | s->total_time = end_time - s->total_time; | |
1719 | if (!entered_postcopy) { | |
1720 | s->downtime = end_time - start_time; | |
1721 | } | |
1722 | if (s->total_time) { | |
1723 | s->mbps = (((double) transferred_bytes * 8.0) / | |
1724 | ((double) s->total_time)) / 1000; | |
1725 | } | |
1726 | runstate_set(RUN_STATE_POSTMIGRATE); | |
1727 | } else { | |
1728 | if (old_vm_running && !entered_postcopy) { | |
1729 | vm_start(); | |
1730 | } | |
1731 | } | |
1732 | qemu_bh_schedule(s->cleanup_bh); | |
1733 | qemu_mutex_unlock_iothread(); | |
1734 | ||
1735 | rcu_unregister_thread(); | |
1736 | return NULL; | |
1737 | } | |
1738 | ||
1739 | void migrate_fd_connect(MigrationState *s) | |
1740 | { | |
1741 | /* This is a best 1st approximation. ns to ms */ | |
1742 | s->expected_downtime = max_downtime/1000000; | |
1743 | s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s); | |
1744 | ||
1745 | qemu_file_set_rate_limit(s->file, | |
1746 | s->bandwidth_limit / XFER_LIMIT_RATIO); | |
1747 | ||
1748 | /* Notify before starting migration thread */ | |
1749 | notifier_list_notify(&migration_state_notifiers, s); | |
1750 | ||
1751 | /* | |
1752 | * Open the return path; currently for postcopy but other things might | |
1753 | * also want it. | |
1754 | */ | |
1755 | if (migrate_postcopy_ram()) { | |
1756 | if (open_return_path_on_source(s)) { | |
1757 | error_report("Unable to open return-path for postcopy"); | |
1758 | migrate_set_state(s, MIGRATION_STATUS_SETUP, | |
1759 | MIGRATION_STATUS_FAILED); | |
1760 | migrate_fd_cleanup(s); | |
1761 | return; | |
1762 | } | |
1763 | } | |
1764 | ||
1765 | migrate_compress_threads_create(); | |
1766 | qemu_thread_create(&s->thread, "migration", migration_thread, s, | |
1767 | QEMU_THREAD_JOINABLE); | |
1768 | s->migration_thread_running = true; | |
1769 | } | |
1770 | ||
1771 | PostcopyState postcopy_state_get(void) | |
1772 | { | |
1773 | return atomic_mb_read(&incoming_postcopy_state); | |
1774 | } | |
1775 | ||
1776 | /* Set the state and return the old state */ | |
1777 | PostcopyState postcopy_state_set(PostcopyState new_state) | |
1778 | { | |
1779 | return atomic_xchg(&incoming_postcopy_state, new_state); | |
1780 | } | |
1781 |