]> Git Repo - qemu.git/blame - migration/ram.c
migration/throttle: Add cpu-throttle-tailslow migration parameter
[qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <[email protected]>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
33c11879 30#include "cpu.h"
f348b6d1 31#include "qemu/cutils.h"
56e93d26
JQ
32#include "qemu/bitops.h"
33#include "qemu/bitmap.h"
7205c9ec 34#include "qemu/main-loop.h"
709e3fe8 35#include "xbzrle.h"
7b1e1a22 36#include "ram.h"
6666c96a 37#include "migration.h"
f2a8f0a6 38#include "migration/register.h"
7b1e1a22 39#include "migration/misc.h"
08a0aee1 40#include "qemu-file.h"
be07b0ac 41#include "postcopy-ram.h"
53d37d36 42#include "page_cache.h"
56e93d26 43#include "qemu/error-report.h"
e688df6b 44#include "qapi/error.h"
ab7cbb0b 45#include "qapi/qapi-types-migration.h"
9af23989 46#include "qapi/qapi-events-migration.h"
8acabf69 47#include "qapi/qmp/qerror.h"
56e93d26 48#include "trace.h"
56e93d26 49#include "exec/ram_addr.h"
f9494614 50#include "exec/target_page.h"
56e93d26 51#include "qemu/rcu_queue.h"
a91246c9 52#include "migration/colo.h"
53d37d36 53#include "block.h"
af8b7d2b 54#include "sysemu/sysemu.h"
edd090c7 55#include "savevm.h"
b9ee2f7d 56#include "qemu/iov.h"
d32ca5ad 57#include "multifd.h"
56e93d26 58
56e93d26
JQ
59/***********************************************************/
60/* ram save/restore */
61
bb890ed5
JQ
62/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
63 * worked for pages that where filled with the same char. We switched
64 * it to only search for the zero value. And to avoid confusion with
65 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
66 */
67
56e93d26 68#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 69#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
70#define RAM_SAVE_FLAG_MEM_SIZE 0x04
71#define RAM_SAVE_FLAG_PAGE 0x08
72#define RAM_SAVE_FLAG_EOS 0x10
73#define RAM_SAVE_FLAG_CONTINUE 0x20
74#define RAM_SAVE_FLAG_XBZRLE 0x40
75/* 0x80 is reserved in migration.h start with 0x100 next */
76#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
77
56e93d26
JQ
78static inline bool is_zero_range(uint8_t *p, uint64_t size)
79{
a1febc49 80 return buffer_is_zero(p, size);
56e93d26
JQ
81}
82
9360447d
JQ
83XBZRLECacheStats xbzrle_counters;
84
56e93d26
JQ
85/* struct contains XBZRLE cache and a static page
86 used by the compression */
87static struct {
88 /* buffer used for XBZRLE encoding */
89 uint8_t *encoded_buf;
90 /* buffer for storing page content */
91 uint8_t *current_buf;
92 /* Cache for XBZRLE, Protected by lock. */
93 PageCache *cache;
94 QemuMutex lock;
c00e0928
JQ
95 /* it will store a page full of zeros */
96 uint8_t *zero_target_page;
f265e0e4
JQ
97 /* buffer used for XBZRLE decoding */
98 uint8_t *decoded_buf;
56e93d26
JQ
99} XBZRLE;
100
56e93d26
JQ
101static void XBZRLE_cache_lock(void)
102{
103 if (migrate_use_xbzrle())
104 qemu_mutex_lock(&XBZRLE.lock);
105}
106
107static void XBZRLE_cache_unlock(void)
108{
109 if (migrate_use_xbzrle())
110 qemu_mutex_unlock(&XBZRLE.lock);
111}
112
3d0684b2
JQ
113/**
114 * xbzrle_cache_resize: resize the xbzrle cache
115 *
116 * This function is called from qmp_migrate_set_cache_size in main
117 * thread, possibly while a migration is in progress. A running
118 * migration may be using the cache and might finish during this call,
119 * hence changes to the cache are protected by XBZRLE.lock().
120 *
c9dede2d 121 * Returns 0 for success or -1 for error
3d0684b2
JQ
122 *
123 * @new_size: new cache size
8acabf69 124 * @errp: set *errp if the check failed, with reason
56e93d26 125 */
c9dede2d 126int xbzrle_cache_resize(int64_t new_size, Error **errp)
56e93d26
JQ
127{
128 PageCache *new_cache;
c9dede2d 129 int64_t ret = 0;
56e93d26 130
8acabf69
JQ
131 /* Check for truncation */
132 if (new_size != (size_t)new_size) {
133 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
134 "exceeding address space");
135 return -1;
136 }
137
2a313e5c
JQ
138 if (new_size == migrate_xbzrle_cache_size()) {
139 /* nothing to do */
c9dede2d 140 return 0;
2a313e5c
JQ
141 }
142
56e93d26
JQ
143 XBZRLE_cache_lock();
144
145 if (XBZRLE.cache != NULL) {
80f8dfde 146 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 147 if (!new_cache) {
56e93d26
JQ
148 ret = -1;
149 goto out;
150 }
151
152 cache_fini(XBZRLE.cache);
153 XBZRLE.cache = new_cache;
154 }
56e93d26
JQ
155out:
156 XBZRLE_cache_unlock();
157 return ret;
158}
159
fbd162e6
YK
160static bool ramblock_is_ignored(RAMBlock *block)
161{
162 return !qemu_ram_is_migratable(block) ||
163 (migrate_ignore_shared() && qemu_ram_is_shared(block));
164}
165
b895de50 166/* Should be holding either ram_list.mutex, or the RCU lock. */
fbd162e6
YK
167#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
168 INTERNAL_RAMBLOCK_FOREACH(block) \
169 if (ramblock_is_ignored(block)) {} else
170
b895de50 171#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
343f632c 172 INTERNAL_RAMBLOCK_FOREACH(block) \
b895de50
CLG
173 if (!qemu_ram_is_migratable(block)) {} else
174
343f632c
DDAG
175#undef RAMBLOCK_FOREACH
176
fbd162e6
YK
177int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
178{
179 RAMBlock *block;
180 int ret = 0;
181
89ac5a1d
DDAG
182 RCU_READ_LOCK_GUARD();
183
fbd162e6
YK
184 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
185 ret = func(block, opaque);
186 if (ret) {
187 break;
188 }
189 }
fbd162e6
YK
190 return ret;
191}
192
f9494614
AP
193static void ramblock_recv_map_init(void)
194{
195 RAMBlock *rb;
196
fbd162e6 197 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
198 assert(!rb->receivedmap);
199 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
200 }
201}
202
203int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
204{
205 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
206 rb->receivedmap);
207}
208
1cba9f6e
DDAG
209bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
210{
211 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
212}
213
f9494614
AP
214void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
215{
216 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
217}
218
219void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
220 size_t nr)
221{
222 bitmap_set_atomic(rb->receivedmap,
223 ramblock_recv_bitmap_offset(host_addr, rb),
224 nr);
225}
226
a335debb
PX
227#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
228
229/*
230 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
231 *
232 * Returns >0 if success with sent bytes, or <0 if error.
233 */
234int64_t ramblock_recv_bitmap_send(QEMUFile *file,
235 const char *block_name)
236{
237 RAMBlock *block = qemu_ram_block_by_name(block_name);
238 unsigned long *le_bitmap, nbits;
239 uint64_t size;
240
241 if (!block) {
242 error_report("%s: invalid block name: %s", __func__, block_name);
243 return -1;
244 }
245
246 nbits = block->used_length >> TARGET_PAGE_BITS;
247
248 /*
249 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
250 * machines we may need 4 more bytes for padding (see below
251 * comment). So extend it a bit before hand.
252 */
253 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
254
255 /*
256 * Always use little endian when sending the bitmap. This is
257 * required that when source and destination VMs are not using the
258 * same endianess. (Note: big endian won't work.)
259 */
260 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
261
262 /* Size of the bitmap, in bytes */
a725ef9f 263 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
264
265 /*
266 * size is always aligned to 8 bytes for 64bit machines, but it
267 * may not be true for 32bit machines. We need this padding to
268 * make sure the migration can survive even between 32bit and
269 * 64bit machines.
270 */
271 size = ROUND_UP(size, 8);
272
273 qemu_put_be64(file, size);
274 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
275 /*
276 * Mark as an end, in case the middle part is screwed up due to
277 * some "misterious" reason.
278 */
279 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
280 qemu_fflush(file);
281
bf269906 282 g_free(le_bitmap);
a335debb
PX
283
284 if (qemu_file_get_error(file)) {
285 return qemu_file_get_error(file);
286 }
287
288 return size + sizeof(size);
289}
290
ec481c6c
JQ
291/*
292 * An outstanding page request, on the source, having been received
293 * and queued
294 */
295struct RAMSrcPageRequest {
296 RAMBlock *rb;
297 hwaddr offset;
298 hwaddr len;
299
300 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
301};
302
6f37bb8b
JQ
303/* State of RAM for migration */
304struct RAMState {
204b88b8
JQ
305 /* QEMUFile used for this migration */
306 QEMUFile *f;
6f37bb8b
JQ
307 /* Last block that we have visited searching for dirty pages */
308 RAMBlock *last_seen_block;
309 /* Last block from where we have sent data */
310 RAMBlock *last_sent_block;
269ace29
JQ
311 /* Last dirty target page we have sent */
312 ram_addr_t last_page;
6f37bb8b
JQ
313 /* last ram version we have seen */
314 uint32_t last_version;
315 /* We are in the first round */
316 bool ram_bulk_stage;
6eeb63f7
WW
317 /* The free page optimization is enabled */
318 bool fpo_enabled;
8d820d6f
JQ
319 /* How many times we have dirty too many pages */
320 int dirty_rate_high_cnt;
f664da80
JQ
321 /* these variables are used for bitmap sync */
322 /* last time we did a full bitmap_sync */
323 int64_t time_last_bitmap_sync;
eac74159 324 /* bytes transferred at start_time */
c4bdf0cf 325 uint64_t bytes_xfer_prev;
a66cd90c 326 /* number of dirty pages since start_time */
68908ed6 327 uint64_t num_dirty_pages_period;
b5833fde
JQ
328 /* xbzrle misses since the beginning of the period */
329 uint64_t xbzrle_cache_miss_prev;
76e03000
XG
330
331 /* compression statistics since the beginning of the period */
332 /* amount of count that no free thread to compress data */
333 uint64_t compress_thread_busy_prev;
334 /* amount bytes after compression */
335 uint64_t compressed_size_prev;
336 /* amount of compressed pages */
337 uint64_t compress_pages_prev;
338
be8b02ed
XG
339 /* total handled target pages at the beginning of period */
340 uint64_t target_page_count_prev;
341 /* total handled target pages since start */
342 uint64_t target_page_count;
9360447d 343 /* number of dirty bits in the bitmap */
2dfaf12e 344 uint64_t migration_dirty_pages;
386a907b 345 /* Protects modification of the bitmap and migration dirty pages */
108cfae0 346 QemuMutex bitmap_mutex;
68a098f3
JQ
347 /* The RAMBlock used in the last src_page_requests */
348 RAMBlock *last_req_rb;
ec481c6c
JQ
349 /* Queue of outstanding page requests from the destination */
350 QemuMutex src_page_req_mutex;
b58deb34 351 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
352};
353typedef struct RAMState RAMState;
354
53518d94 355static RAMState *ram_state;
6f37bb8b 356
bd227060
WW
357static NotifierWithReturnList precopy_notifier_list;
358
359void precopy_infrastructure_init(void)
360{
361 notifier_with_return_list_init(&precopy_notifier_list);
362}
363
364void precopy_add_notifier(NotifierWithReturn *n)
365{
366 notifier_with_return_list_add(&precopy_notifier_list, n);
367}
368
369void precopy_remove_notifier(NotifierWithReturn *n)
370{
371 notifier_with_return_remove(n);
372}
373
374int precopy_notify(PrecopyNotifyReason reason, Error **errp)
375{
376 PrecopyNotifyData pnd;
377 pnd.reason = reason;
378 pnd.errp = errp;
379
380 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
381}
382
6eeb63f7
WW
383void precopy_enable_free_page_optimization(void)
384{
385 if (!ram_state) {
386 return;
387 }
388
389 ram_state->fpo_enabled = true;
390}
391
9edabd4d 392uint64_t ram_bytes_remaining(void)
2f4fde93 393{
bae416e5
DDAG
394 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
395 0;
2f4fde93
JQ
396}
397
9360447d 398MigrationStats ram_counters;
96506894 399
b8fb8cb7
DDAG
400/* used by the search for pages to send */
401struct PageSearchStatus {
402 /* Current block being searched */
403 RAMBlock *block;
a935e30f
JQ
404 /* Current page to search from */
405 unsigned long page;
b8fb8cb7
DDAG
406 /* Set once we wrap around */
407 bool complete_round;
408};
409typedef struct PageSearchStatus PageSearchStatus;
410
76e03000
XG
411CompressionStats compression_counters;
412
56e93d26 413struct CompressParam {
56e93d26 414 bool done;
90e56fb4 415 bool quit;
5e5fdcff 416 bool zero_page;
56e93d26
JQ
417 QEMUFile *file;
418 QemuMutex mutex;
419 QemuCond cond;
420 RAMBlock *block;
421 ram_addr_t offset;
34ab9e97
XG
422
423 /* internally used fields */
dcaf446e 424 z_stream stream;
34ab9e97 425 uint8_t *originbuf;
56e93d26
JQ
426};
427typedef struct CompressParam CompressParam;
428
429struct DecompressParam {
73a8912b 430 bool done;
90e56fb4 431 bool quit;
56e93d26
JQ
432 QemuMutex mutex;
433 QemuCond cond;
434 void *des;
d341d9f3 435 uint8_t *compbuf;
56e93d26 436 int len;
797ca154 437 z_stream stream;
56e93d26
JQ
438};
439typedef struct DecompressParam DecompressParam;
440
441static CompressParam *comp_param;
442static QemuThread *compress_threads;
443/* comp_done_cond is used to wake up the migration thread when
444 * one of the compression threads has finished the compression.
445 * comp_done_lock is used to co-work with comp_done_cond.
446 */
0d9f9a5c
LL
447static QemuMutex comp_done_lock;
448static QemuCond comp_done_cond;
56e93d26
JQ
449/* The empty QEMUFileOps will be used by file in CompressParam */
450static const QEMUFileOps empty_ops = { };
451
34ab9e97 452static QEMUFile *decomp_file;
56e93d26
JQ
453static DecompressParam *decomp_param;
454static QemuThread *decompress_threads;
73a8912b
LL
455static QemuMutex decomp_done_lock;
456static QemuCond decomp_done_cond;
56e93d26 457
5e5fdcff 458static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 459 ram_addr_t offset, uint8_t *source_buf);
56e93d26
JQ
460
461static void *do_data_compress(void *opaque)
462{
463 CompressParam *param = opaque;
a7a9a88f
LL
464 RAMBlock *block;
465 ram_addr_t offset;
5e5fdcff 466 bool zero_page;
56e93d26 467
a7a9a88f 468 qemu_mutex_lock(&param->mutex);
90e56fb4 469 while (!param->quit) {
a7a9a88f
LL
470 if (param->block) {
471 block = param->block;
472 offset = param->offset;
473 param->block = NULL;
474 qemu_mutex_unlock(&param->mutex);
475
5e5fdcff
XG
476 zero_page = do_compress_ram_page(param->file, &param->stream,
477 block, offset, param->originbuf);
a7a9a88f 478
0d9f9a5c 479 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 480 param->done = true;
5e5fdcff 481 param->zero_page = zero_page;
0d9f9a5c
LL
482 qemu_cond_signal(&comp_done_cond);
483 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
484
485 qemu_mutex_lock(&param->mutex);
486 } else {
56e93d26
JQ
487 qemu_cond_wait(&param->cond, &param->mutex);
488 }
56e93d26 489 }
a7a9a88f 490 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
491
492 return NULL;
493}
494
f0afa331 495static void compress_threads_save_cleanup(void)
56e93d26
JQ
496{
497 int i, thread_count;
498
05306935 499 if (!migrate_use_compression() || !comp_param) {
56e93d26
JQ
500 return;
501 }
05306935 502
56e93d26
JQ
503 thread_count = migrate_compress_threads();
504 for (i = 0; i < thread_count; i++) {
dcaf446e
XG
505 /*
506 * we use it as a indicator which shows if the thread is
507 * properly init'd or not
508 */
509 if (!comp_param[i].file) {
510 break;
511 }
05306935
FL
512
513 qemu_mutex_lock(&comp_param[i].mutex);
514 comp_param[i].quit = true;
515 qemu_cond_signal(&comp_param[i].cond);
516 qemu_mutex_unlock(&comp_param[i].mutex);
517
56e93d26 518 qemu_thread_join(compress_threads + i);
56e93d26
JQ
519 qemu_mutex_destroy(&comp_param[i].mutex);
520 qemu_cond_destroy(&comp_param[i].cond);
dcaf446e 521 deflateEnd(&comp_param[i].stream);
34ab9e97 522 g_free(comp_param[i].originbuf);
dcaf446e
XG
523 qemu_fclose(comp_param[i].file);
524 comp_param[i].file = NULL;
56e93d26 525 }
0d9f9a5c
LL
526 qemu_mutex_destroy(&comp_done_lock);
527 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
528 g_free(compress_threads);
529 g_free(comp_param);
56e93d26
JQ
530 compress_threads = NULL;
531 comp_param = NULL;
56e93d26
JQ
532}
533
dcaf446e 534static int compress_threads_save_setup(void)
56e93d26
JQ
535{
536 int i, thread_count;
537
538 if (!migrate_use_compression()) {
dcaf446e 539 return 0;
56e93d26 540 }
56e93d26
JQ
541 thread_count = migrate_compress_threads();
542 compress_threads = g_new0(QemuThread, thread_count);
543 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
544 qemu_cond_init(&comp_done_cond);
545 qemu_mutex_init(&comp_done_lock);
56e93d26 546 for (i = 0; i < thread_count; i++) {
34ab9e97
XG
547 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
548 if (!comp_param[i].originbuf) {
549 goto exit;
550 }
551
dcaf446e
XG
552 if (deflateInit(&comp_param[i].stream,
553 migrate_compress_level()) != Z_OK) {
34ab9e97 554 g_free(comp_param[i].originbuf);
dcaf446e
XG
555 goto exit;
556 }
557
e110aa91
C
558 /* comp_param[i].file is just used as a dummy buffer to save data,
559 * set its ops to empty.
56e93d26
JQ
560 */
561 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
562 comp_param[i].done = true;
90e56fb4 563 comp_param[i].quit = false;
56e93d26
JQ
564 qemu_mutex_init(&comp_param[i].mutex);
565 qemu_cond_init(&comp_param[i].cond);
566 qemu_thread_create(compress_threads + i, "compress",
567 do_data_compress, comp_param + i,
568 QEMU_THREAD_JOINABLE);
569 }
dcaf446e
XG
570 return 0;
571
572exit:
573 compress_threads_save_cleanup();
574 return -1;
56e93d26
JQ
575}
576
577/**
3d0684b2 578 * save_page_header: write page header to wire
56e93d26
JQ
579 *
580 * If this is the 1st block, it also writes the block identification
581 *
3d0684b2 582 * Returns the number of bytes written
56e93d26
JQ
583 *
584 * @f: QEMUFile where to send the data
585 * @block: block that contains the page we want to send
586 * @offset: offset inside the block for the page
587 * in the lower bits, it contains flags
588 */
2bf3aa85
JQ
589static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
590 ram_addr_t offset)
56e93d26 591{
9f5f380b 592 size_t size, len;
56e93d26 593
24795694
JQ
594 if (block == rs->last_sent_block) {
595 offset |= RAM_SAVE_FLAG_CONTINUE;
596 }
2bf3aa85 597 qemu_put_be64(f, offset);
56e93d26
JQ
598 size = 8;
599
600 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 601 len = strlen(block->idstr);
2bf3aa85
JQ
602 qemu_put_byte(f, len);
603 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 604 size += 1 + len;
24795694 605 rs->last_sent_block = block;
56e93d26
JQ
606 }
607 return size;
608}
609
3d0684b2
JQ
610/**
611 * mig_throttle_guest_down: throotle down the guest
612 *
613 * Reduce amount of guest cpu execution to hopefully slow down memory
614 * writes. If guest dirty memory rate is reduced below the rate at
615 * which we can transfer pages to the destination then we should be
616 * able to complete migration. Some workloads dirty memory way too
617 * fast and will not effectively converge, even with auto-converge.
070afca2 618 */
cbbf8182
KZ
619static void mig_throttle_guest_down(uint64_t bytes_dirty_period,
620 uint64_t bytes_dirty_threshold)
070afca2
JH
621{
622 MigrationState *s = migrate_get_current();
2594f56d 623 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
cbbf8182
KZ
624 uint64_t pct_increment = s->parameters.cpu_throttle_increment;
625 bool pct_tailslow = s->parameters.cpu_throttle_tailslow;
4cbc9c7f 626 int pct_max = s->parameters.max_cpu_throttle;
070afca2 627
cbbf8182
KZ
628 uint64_t throttle_now = cpu_throttle_get_percentage();
629 uint64_t cpu_now, cpu_ideal, throttle_inc;
630
070afca2
JH
631 /* We have not started throttling yet. Let's start it. */
632 if (!cpu_throttle_active()) {
633 cpu_throttle_set(pct_initial);
634 } else {
635 /* Throttling already on, just increase the rate */
cbbf8182
KZ
636 if (!pct_tailslow) {
637 throttle_inc = pct_increment;
638 } else {
639 /* Compute the ideal CPU percentage used by Guest, which may
640 * make the dirty rate match the dirty rate threshold. */
641 cpu_now = 100 - throttle_now;
642 cpu_ideal = cpu_now * (bytes_dirty_threshold * 1.0 /
643 bytes_dirty_period);
644 throttle_inc = MIN(cpu_now - cpu_ideal, pct_increment);
645 }
646 cpu_throttle_set(MIN(throttle_now + throttle_inc, pct_max));
070afca2
JH
647 }
648}
649
3d0684b2
JQ
650/**
651 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
652 *
6f37bb8b 653 * @rs: current RAM state
3d0684b2
JQ
654 * @current_addr: address for the zero page
655 *
656 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
657 * The important thing is that a stale (not-yet-0'd) page be replaced
658 * by the new data.
659 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 660 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 661 */
6f37bb8b 662static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 663{
6f37bb8b 664 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
665 return;
666 }
667
668 /* We don't care if this fails to allocate a new cache page
669 * as long as it updated an old one */
c00e0928 670 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
9360447d 671 ram_counters.dirty_sync_count);
56e93d26
JQ
672}
673
674#define ENCODING_FLAG_XBZRLE 0x1
675
676/**
677 * save_xbzrle_page: compress and send current page
678 *
679 * Returns: 1 means that we wrote the page
680 * 0 means that page is identical to the one already sent
681 * -1 means that xbzrle would be longer than normal
682 *
5a987738 683 * @rs: current RAM state
3d0684b2
JQ
684 * @current_data: pointer to the address of the page contents
685 * @current_addr: addr of the page
56e93d26
JQ
686 * @block: block that contains the page we want to send
687 * @offset: offset inside the block for the page
688 * @last_stage: if we are at the completion stage
56e93d26 689 */
204b88b8 690static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 691 ram_addr_t current_addr, RAMBlock *block,
072c2511 692 ram_addr_t offset, bool last_stage)
56e93d26
JQ
693{
694 int encoded_len = 0, bytes_xbzrle;
695 uint8_t *prev_cached_page;
696
9360447d
JQ
697 if (!cache_is_cached(XBZRLE.cache, current_addr,
698 ram_counters.dirty_sync_count)) {
699 xbzrle_counters.cache_miss++;
56e93d26
JQ
700 if (!last_stage) {
701 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
9360447d 702 ram_counters.dirty_sync_count) == -1) {
56e93d26
JQ
703 return -1;
704 } else {
705 /* update *current_data when the page has been
706 inserted into cache */
707 *current_data = get_cached_data(XBZRLE.cache, current_addr);
708 }
709 }
710 return -1;
711 }
712
713 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
714
715 /* save current buffer into memory */
716 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
717
718 /* XBZRLE encoding (if there is no overflow) */
719 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
720 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
721 TARGET_PAGE_SIZE);
ca353803
WY
722
723 /*
724 * Update the cache contents, so that it corresponds to the data
725 * sent, in all cases except where we skip the page.
726 */
727 if (!last_stage && encoded_len != 0) {
728 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
729 /*
730 * In the case where we couldn't compress, ensure that the caller
731 * sends the data from the cache, since the guest might have
732 * changed the RAM since we copied it.
733 */
734 *current_data = prev_cached_page;
735 }
736
56e93d26 737 if (encoded_len == 0) {
55c4446b 738 trace_save_xbzrle_page_skipping();
56e93d26
JQ
739 return 0;
740 } else if (encoded_len == -1) {
55c4446b 741 trace_save_xbzrle_page_overflow();
9360447d 742 xbzrle_counters.overflow++;
56e93d26
JQ
743 return -1;
744 }
745
56e93d26 746 /* Send XBZRLE based compressed page */
2bf3aa85 747 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
748 offset | RAM_SAVE_FLAG_XBZRLE);
749 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
750 qemu_put_be16(rs->f, encoded_len);
751 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 752 bytes_xbzrle += encoded_len + 1 + 2;
9360447d
JQ
753 xbzrle_counters.pages++;
754 xbzrle_counters.bytes += bytes_xbzrle;
755 ram_counters.transferred += bytes_xbzrle;
56e93d26
JQ
756
757 return 1;
758}
759
3d0684b2
JQ
760/**
761 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 762 *
a5f7b1a6 763 * Returns the page offset within memory region of the start of a dirty page
3d0684b2 764 *
6f37bb8b 765 * @rs: current RAM state
3d0684b2 766 * @rb: RAMBlock where to search for dirty pages
a935e30f 767 * @start: page where we start the search
f3f491fc 768 */
56e93d26 769static inline
a935e30f 770unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 771 unsigned long start)
56e93d26 772{
6b6712ef
JQ
773 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
774 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
775 unsigned long next;
776
fbd162e6 777 if (ramblock_is_ignored(rb)) {
b895de50
CLG
778 return size;
779 }
780
6eeb63f7
WW
781 /*
782 * When the free page optimization is enabled, we need to check the bitmap
783 * to send the non-free pages rather than all the pages in the bulk stage.
784 */
785 if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
6b6712ef 786 next = start + 1;
56e93d26 787 } else {
6b6712ef 788 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
789 }
790
6b6712ef 791 return next;
56e93d26
JQ
792}
793
06b10688 794static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
795 RAMBlock *rb,
796 unsigned long page)
a82d593b
DDAG
797{
798 bool ret;
a82d593b 799
386a907b 800 qemu_mutex_lock(&rs->bitmap_mutex);
002cad6b
PX
801
802 /*
803 * Clear dirty bitmap if needed. This _must_ be called before we
804 * send any of the page in the chunk because we need to make sure
805 * we can capture further page content changes when we sync dirty
806 * log the next time. So as long as we are going to send any of
807 * the page in the chunk we clear the remote dirty bitmap for all.
808 * Clearing it earlier won't be a problem, but too late will.
809 */
810 if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
811 uint8_t shift = rb->clear_bmap_shift;
812 hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
8bba004c 813 hwaddr start = (((ram_addr_t)page) << TARGET_PAGE_BITS) & (-size);
002cad6b
PX
814
815 /*
816 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
817 * can make things easier sometimes since then start address
818 * of the small chunk will always be 64 pages aligned so the
819 * bitmap will always be aligned to unsigned long. We should
820 * even be able to remove this restriction but I'm simply
821 * keeping it.
822 */
823 assert(shift >= 6);
824 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
825 memory_region_clear_dirty_bitmap(rb->mr, start, size);
826 }
827
6b6712ef 828 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
829
830 if (ret) {
0d8ec885 831 rs->migration_dirty_pages--;
a82d593b 832 }
386a907b
WW
833 qemu_mutex_unlock(&rs->bitmap_mutex);
834
a82d593b
DDAG
835 return ret;
836}
837
267691b6 838/* Called with RCU critical section */
7a3e9571 839static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
56e93d26 840{
0d8ec885 841 rs->migration_dirty_pages +=
5d0980a4 842 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length,
0d8ec885 843 &rs->num_dirty_pages_period);
56e93d26
JQ
844}
845
3d0684b2
JQ
846/**
847 * ram_pagesize_summary: calculate all the pagesizes of a VM
848 *
849 * Returns a summary bitmap of the page sizes of all RAMBlocks
850 *
851 * For VMs with just normal pages this is equivalent to the host page
852 * size. If it's got some huge pages then it's the OR of all the
853 * different page sizes.
e8ca1db2
DDAG
854 */
855uint64_t ram_pagesize_summary(void)
856{
857 RAMBlock *block;
858 uint64_t summary = 0;
859
fbd162e6 860 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
e8ca1db2
DDAG
861 summary |= block->page_size;
862 }
863
864 return summary;
865}
866
aecbfe9c
XG
867uint64_t ram_get_total_transferred_pages(void)
868{
869 return ram_counters.normal + ram_counters.duplicate +
870 compression_counters.pages + xbzrle_counters.pages;
871}
872
b734035b
XG
873static void migration_update_rates(RAMState *rs, int64_t end_time)
874{
be8b02ed 875 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
76e03000 876 double compressed_size;
b734035b
XG
877
878 /* calculate period counters */
879 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
880 / (end_time - rs->time_last_bitmap_sync);
881
be8b02ed 882 if (!page_count) {
b734035b
XG
883 return;
884 }
885
886 if (migrate_use_xbzrle()) {
887 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
be8b02ed 888 rs->xbzrle_cache_miss_prev) / page_count;
b734035b
XG
889 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
890 }
76e03000
XG
891
892 if (migrate_use_compression()) {
893 compression_counters.busy_rate = (double)(compression_counters.busy -
894 rs->compress_thread_busy_prev) / page_count;
895 rs->compress_thread_busy_prev = compression_counters.busy;
896
897 compressed_size = compression_counters.compressed_size -
898 rs->compressed_size_prev;
899 if (compressed_size) {
900 double uncompressed_size = (compression_counters.pages -
901 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
902
903 /* Compression-Ratio = Uncompressed-size / Compressed-size */
904 compression_counters.compression_rate =
905 uncompressed_size / compressed_size;
906
907 rs->compress_pages_prev = compression_counters.pages;
908 rs->compressed_size_prev = compression_counters.compressed_size;
909 }
910 }
b734035b
XG
911}
912
dc14a470
KZ
913static void migration_trigger_throttle(RAMState *rs)
914{
915 MigrationState *s = migrate_get_current();
916 uint64_t threshold = s->parameters.throttle_trigger_threshold;
917
918 uint64_t bytes_xfer_period = ram_counters.transferred - rs->bytes_xfer_prev;
919 uint64_t bytes_dirty_period = rs->num_dirty_pages_period * TARGET_PAGE_SIZE;
920 uint64_t bytes_dirty_threshold = bytes_xfer_period * threshold / 100;
921
922 /* During block migration the auto-converge logic incorrectly detects
923 * that ram migration makes no progress. Avoid this by disabling the
924 * throttling logic during the bulk phase of block migration. */
925 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
926 /* The following detection logic can be refined later. For now:
927 Check to see if the ratio between dirtied bytes and the approx.
928 amount of bytes that just got transferred since the last time
929 we were in this routine reaches the threshold. If that happens
930 twice, start or increase throttling. */
931
932 if ((bytes_dirty_period > bytes_dirty_threshold) &&
933 (++rs->dirty_rate_high_cnt >= 2)) {
934 trace_migration_throttle();
935 rs->dirty_rate_high_cnt = 0;
cbbf8182
KZ
936 mig_throttle_guest_down(bytes_dirty_period,
937 bytes_dirty_threshold);
dc14a470
KZ
938 }
939 }
940}
941
8d820d6f 942static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
943{
944 RAMBlock *block;
56e93d26 945 int64_t end_time;
56e93d26 946
9360447d 947 ram_counters.dirty_sync_count++;
56e93d26 948
f664da80
JQ
949 if (!rs->time_last_bitmap_sync) {
950 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
951 }
952
953 trace_migration_bitmap_sync_start();
9c1f8f44 954 memory_global_dirty_log_sync();
56e93d26 955
108cfae0 956 qemu_mutex_lock(&rs->bitmap_mutex);
89ac5a1d
DDAG
957 WITH_RCU_READ_LOCK_GUARD() {
958 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
959 ramblock_sync_dirty_bitmap(rs, block);
960 }
961 ram_counters.remaining = ram_bytes_remaining();
56e93d26 962 }
108cfae0 963 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 964
9458a9a1 965 memory_global_after_dirty_log_sync();
a66cd90c 966 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 967
56e93d26
JQ
968 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
969
970 /* more than 1 second = 1000 millisecons */
f664da80 971 if (end_time > rs->time_last_bitmap_sync + 1000) {
dc14a470 972 migration_trigger_throttle(rs);
070afca2 973
b734035b
XG
974 migration_update_rates(rs, end_time);
975
be8b02ed 976 rs->target_page_count_prev = rs->target_page_count;
d693c6f1
FF
977
978 /* reset period counters */
f664da80 979 rs->time_last_bitmap_sync = end_time;
a66cd90c 980 rs->num_dirty_pages_period = 0;
dc14a470 981 rs->bytes_xfer_prev = ram_counters.transferred;
56e93d26 982 }
4addcd4f 983 if (migrate_use_events()) {
3ab72385 984 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
4addcd4f 985 }
56e93d26
JQ
986}
987
bd227060
WW
988static void migration_bitmap_sync_precopy(RAMState *rs)
989{
990 Error *local_err = NULL;
991
992 /*
993 * The current notifier usage is just an optimization to migration, so we
994 * don't stop the normal migration process in the error case.
995 */
996 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
997 error_report_err(local_err);
b4a1733c 998 local_err = NULL;
bd227060
WW
999 }
1000
1001 migration_bitmap_sync(rs);
1002
1003 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1004 error_report_err(local_err);
1005 }
1006}
1007
6c97ec5f
XG
1008/**
1009 * save_zero_page_to_file: send the zero page to the file
1010 *
1011 * Returns the size of data written to the file, 0 means the page is not
1012 * a zero page
1013 *
1014 * @rs: current RAM state
1015 * @file: the file where the data is saved
1016 * @block: block that contains the page we want to send
1017 * @offset: offset inside the block for the page
1018 */
1019static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1020 RAMBlock *block, ram_addr_t offset)
1021{
1022 uint8_t *p = block->host + offset;
1023 int len = 0;
1024
1025 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1026 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1027 qemu_put_byte(file, 0);
1028 len += 1;
1029 }
1030 return len;
1031}
1032
56e93d26 1033/**
3d0684b2 1034 * save_zero_page: send the zero page to the stream
56e93d26 1035 *
3d0684b2 1036 * Returns the number of pages written.
56e93d26 1037 *
f7ccd61b 1038 * @rs: current RAM state
56e93d26
JQ
1039 * @block: block that contains the page we want to send
1040 * @offset: offset inside the block for the page
56e93d26 1041 */
7faccdc3 1042static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
56e93d26 1043{
6c97ec5f 1044 int len = save_zero_page_to_file(rs, rs->f, block, offset);
56e93d26 1045
6c97ec5f 1046 if (len) {
9360447d 1047 ram_counters.duplicate++;
6c97ec5f
XG
1048 ram_counters.transferred += len;
1049 return 1;
56e93d26 1050 }
6c97ec5f 1051 return -1;
56e93d26
JQ
1052}
1053
5727309d 1054static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 1055{
5727309d 1056 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
1057 return;
1058 }
1059
8bba004c 1060 ram_discard_range(rbname, offset, ((ram_addr_t)pages) << TARGET_PAGE_BITS);
53f09a10
PB
1061}
1062
059ff0fb
XG
1063/*
1064 * @pages: the number of pages written by the control path,
1065 * < 0 - error
1066 * > 0 - number of pages written
1067 *
1068 * Return true if the pages has been saved, otherwise false is returned.
1069 */
1070static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1071 int *pages)
1072{
1073 uint64_t bytes_xmit = 0;
1074 int ret;
1075
1076 *pages = -1;
1077 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1078 &bytes_xmit);
1079 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1080 return false;
1081 }
1082
1083 if (bytes_xmit) {
1084 ram_counters.transferred += bytes_xmit;
1085 *pages = 1;
1086 }
1087
1088 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1089 return true;
1090 }
1091
1092 if (bytes_xmit > 0) {
1093 ram_counters.normal++;
1094 } else if (bytes_xmit == 0) {
1095 ram_counters.duplicate++;
1096 }
1097
1098 return true;
1099}
1100
65dacaa0
XG
1101/*
1102 * directly send the page to the stream
1103 *
1104 * Returns the number of pages written.
1105 *
1106 * @rs: current RAM state
1107 * @block: block that contains the page we want to send
1108 * @offset: offset inside the block for the page
1109 * @buf: the page to be sent
1110 * @async: send to page asyncly
1111 */
1112static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1113 uint8_t *buf, bool async)
1114{
1115 ram_counters.transferred += save_page_header(rs, rs->f, block,
1116 offset | RAM_SAVE_FLAG_PAGE);
1117 if (async) {
1118 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1119 migrate_release_ram() &
1120 migration_in_postcopy());
1121 } else {
1122 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1123 }
1124 ram_counters.transferred += TARGET_PAGE_SIZE;
1125 ram_counters.normal++;
1126 return 1;
1127}
1128
56e93d26 1129/**
3d0684b2 1130 * ram_save_page: send the given page to the stream
56e93d26 1131 *
3d0684b2 1132 * Returns the number of pages written.
3fd3c4b3
DDAG
1133 * < 0 - error
1134 * >=0 - Number of pages written - this might legally be 0
1135 * if xbzrle noticed the page was the same.
56e93d26 1136 *
6f37bb8b 1137 * @rs: current RAM state
56e93d26
JQ
1138 * @block: block that contains the page we want to send
1139 * @offset: offset inside the block for the page
1140 * @last_stage: if we are at the completion stage
56e93d26 1141 */
a0a8aa14 1142static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
1143{
1144 int pages = -1;
56e93d26 1145 uint8_t *p;
56e93d26 1146 bool send_async = true;
a08f6890 1147 RAMBlock *block = pss->block;
8bba004c 1148 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
059ff0fb 1149 ram_addr_t current_addr = block->offset + offset;
56e93d26 1150
2f68e399 1151 p = block->host + offset;
1db9d8e5 1152 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 1153
56e93d26 1154 XBZRLE_cache_lock();
d7400a34
XG
1155 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1156 migrate_use_xbzrle()) {
059ff0fb
XG
1157 pages = save_xbzrle_page(rs, &p, current_addr, block,
1158 offset, last_stage);
1159 if (!last_stage) {
1160 /* Can't send this cached data async, since the cache page
1161 * might get updated before it gets to the wire
56e93d26 1162 */
059ff0fb 1163 send_async = false;
56e93d26
JQ
1164 }
1165 }
1166
1167 /* XBZRLE overflow or normal page */
1168 if (pages == -1) {
65dacaa0 1169 pages = save_normal_page(rs, block, offset, p, send_async);
56e93d26
JQ
1170 }
1171
1172 XBZRLE_cache_unlock();
1173
1174 return pages;
1175}
1176
b9ee2f7d
JQ
1177static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
1178 ram_addr_t offset)
1179{
67a4c891 1180 if (multifd_queue_page(rs->f, block, offset) < 0) {
713f762a
IR
1181 return -1;
1182 }
b9ee2f7d
JQ
1183 ram_counters.normal++;
1184
1185 return 1;
1186}
1187
5e5fdcff 1188static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 1189 ram_addr_t offset, uint8_t *source_buf)
56e93d26 1190{
53518d94 1191 RAMState *rs = ram_state;
a7a9a88f 1192 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
5e5fdcff 1193 bool zero_page = false;
6ef3771c 1194 int ret;
56e93d26 1195
5e5fdcff
XG
1196 if (save_zero_page_to_file(rs, f, block, offset)) {
1197 zero_page = true;
1198 goto exit;
1199 }
1200
6ef3771c 1201 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
34ab9e97
XG
1202
1203 /*
1204 * copy it to a internal buffer to avoid it being modified by VM
1205 * so that we can catch up the error during compression and
1206 * decompression
1207 */
1208 memcpy(source_buf, p, TARGET_PAGE_SIZE);
6ef3771c
XG
1209 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
1210 if (ret < 0) {
1211 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
b3be2896 1212 error_report("compressed data failed!");
5e5fdcff 1213 return false;
b3be2896 1214 }
56e93d26 1215
5e5fdcff 1216exit:
6ef3771c 1217 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
5e5fdcff
XG
1218 return zero_page;
1219}
1220
1221static void
1222update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
1223{
76e03000
XG
1224 ram_counters.transferred += bytes_xmit;
1225
5e5fdcff
XG
1226 if (param->zero_page) {
1227 ram_counters.duplicate++;
76e03000 1228 return;
5e5fdcff 1229 }
76e03000
XG
1230
1231 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
1232 compression_counters.compressed_size += bytes_xmit - 8;
1233 compression_counters.pages++;
56e93d26
JQ
1234}
1235
32b05495
XG
1236static bool save_page_use_compression(RAMState *rs);
1237
ce25d337 1238static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
1239{
1240 int idx, len, thread_count;
1241
32b05495 1242 if (!save_page_use_compression(rs)) {
56e93d26
JQ
1243 return;
1244 }
1245 thread_count = migrate_compress_threads();
a7a9a88f 1246
0d9f9a5c 1247 qemu_mutex_lock(&comp_done_lock);
56e93d26 1248 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 1249 while (!comp_param[idx].done) {
0d9f9a5c 1250 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 1251 }
a7a9a88f 1252 }
0d9f9a5c 1253 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
1254
1255 for (idx = 0; idx < thread_count; idx++) {
1256 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 1257 if (!comp_param[idx].quit) {
ce25d337 1258 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
5e5fdcff
XG
1259 /*
1260 * it's safe to fetch zero_page without holding comp_done_lock
1261 * as there is no further request submitted to the thread,
1262 * i.e, the thread should be waiting for a request at this point.
1263 */
1264 update_compress_thread_counts(&comp_param[idx], len);
56e93d26 1265 }
a7a9a88f 1266 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
1267 }
1268}
1269
1270static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1271 ram_addr_t offset)
1272{
1273 param->block = block;
1274 param->offset = offset;
1275}
1276
ce25d337
JQ
1277static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1278 ram_addr_t offset)
56e93d26
JQ
1279{
1280 int idx, thread_count, bytes_xmit = -1, pages = -1;
1d58872a 1281 bool wait = migrate_compress_wait_thread();
56e93d26
JQ
1282
1283 thread_count = migrate_compress_threads();
0d9f9a5c 1284 qemu_mutex_lock(&comp_done_lock);
1d58872a
XG
1285retry:
1286 for (idx = 0; idx < thread_count; idx++) {
1287 if (comp_param[idx].done) {
1288 comp_param[idx].done = false;
1289 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
1290 qemu_mutex_lock(&comp_param[idx].mutex);
1291 set_compress_params(&comp_param[idx], block, offset);
1292 qemu_cond_signal(&comp_param[idx].cond);
1293 qemu_mutex_unlock(&comp_param[idx].mutex);
1294 pages = 1;
5e5fdcff 1295 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
56e93d26 1296 break;
56e93d26
JQ
1297 }
1298 }
1d58872a
XG
1299
1300 /*
1301 * wait for the free thread if the user specifies 'compress-wait-thread',
1302 * otherwise we will post the page out in the main thread as normal page.
1303 */
1304 if (pages < 0 && wait) {
1305 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
1306 goto retry;
1307 }
0d9f9a5c 1308 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
1309
1310 return pages;
1311}
1312
3d0684b2
JQ
1313/**
1314 * find_dirty_block: find the next dirty page and update any state
1315 * associated with the search process.
b9e60928 1316 *
a5f7b1a6 1317 * Returns true if a page is found
b9e60928 1318 *
6f37bb8b 1319 * @rs: current RAM state
3d0684b2
JQ
1320 * @pss: data about the state of the current dirty page scan
1321 * @again: set to false if the search has scanned the whole of RAM
b9e60928 1322 */
f20e2865 1323static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 1324{
f20e2865 1325 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 1326 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 1327 pss->page >= rs->last_page) {
b9e60928
DDAG
1328 /*
1329 * We've been once around the RAM and haven't found anything.
1330 * Give up.
1331 */
1332 *again = false;
1333 return false;
1334 }
8bba004c
AR
1335 if ((((ram_addr_t)pss->page) << TARGET_PAGE_BITS)
1336 >= pss->block->used_length) {
b9e60928 1337 /* Didn't find anything in this RAM Block */
a935e30f 1338 pss->page = 0;
b9e60928
DDAG
1339 pss->block = QLIST_NEXT_RCU(pss->block, next);
1340 if (!pss->block) {
48df9d80
XG
1341 /*
1342 * If memory migration starts over, we will meet a dirtied page
1343 * which may still exists in compression threads's ring, so we
1344 * should flush the compressed data to make sure the new page
1345 * is not overwritten by the old one in the destination.
1346 *
1347 * Also If xbzrle is on, stop using the data compression at this
1348 * point. In theory, xbzrle can do better than compression.
1349 */
1350 flush_compressed_data(rs);
1351
b9e60928
DDAG
1352 /* Hit the end of the list */
1353 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1354 /* Flag that we've looped */
1355 pss->complete_round = true;
6f37bb8b 1356 rs->ram_bulk_stage = false;
b9e60928
DDAG
1357 }
1358 /* Didn't find anything this time, but try again on the new block */
1359 *again = true;
1360 return false;
1361 } else {
1362 /* Can go around again, but... */
1363 *again = true;
1364 /* We've found something so probably don't need to */
1365 return true;
1366 }
1367}
1368
3d0684b2
JQ
1369/**
1370 * unqueue_page: gets a page of the queue
1371 *
a82d593b 1372 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 1373 *
3d0684b2
JQ
1374 * Returns the block of the page (or NULL if none available)
1375 *
ec481c6c 1376 * @rs: current RAM state
3d0684b2 1377 * @offset: used to return the offset within the RAMBlock
a82d593b 1378 */
f20e2865 1379static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
1380{
1381 RAMBlock *block = NULL;
1382
ae526e32
XG
1383 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
1384 return NULL;
1385 }
1386
6e8a355d 1387 QEMU_LOCK_GUARD(&rs->src_page_req_mutex);
ec481c6c
JQ
1388 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1389 struct RAMSrcPageRequest *entry =
1390 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
1391 block = entry->rb;
1392 *offset = entry->offset;
a82d593b
DDAG
1393
1394 if (entry->len > TARGET_PAGE_SIZE) {
1395 entry->len -= TARGET_PAGE_SIZE;
1396 entry->offset += TARGET_PAGE_SIZE;
1397 } else {
1398 memory_region_unref(block->mr);
ec481c6c 1399 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b 1400 g_free(entry);
e03a34f8 1401 migration_consume_urgent_request();
a82d593b
DDAG
1402 }
1403 }
a82d593b
DDAG
1404
1405 return block;
1406}
1407
3d0684b2 1408/**
ff1543af 1409 * get_queued_page: unqueue a page from the postcopy requests
3d0684b2
JQ
1410 *
1411 * Skips pages that are already sent (!dirty)
a82d593b 1412 *
a5f7b1a6 1413 * Returns true if a queued page is found
a82d593b 1414 *
6f37bb8b 1415 * @rs: current RAM state
3d0684b2 1416 * @pss: data about the state of the current dirty page scan
a82d593b 1417 */
f20e2865 1418static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
1419{
1420 RAMBlock *block;
1421 ram_addr_t offset;
1422 bool dirty;
1423
1424 do {
f20e2865 1425 block = unqueue_page(rs, &offset);
a82d593b
DDAG
1426 /*
1427 * We're sending this page, and since it's postcopy nothing else
1428 * will dirty it, and we must make sure it doesn't get sent again
1429 * even if this queue request was received after the background
1430 * search already sent it.
1431 */
1432 if (block) {
f20e2865
JQ
1433 unsigned long page;
1434
6b6712ef
JQ
1435 page = offset >> TARGET_PAGE_BITS;
1436 dirty = test_bit(page, block->bmap);
a82d593b 1437 if (!dirty) {
06b10688 1438 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
64737606 1439 page);
a82d593b 1440 } else {
f20e2865 1441 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
1442 }
1443 }
1444
1445 } while (block && !dirty);
1446
1447 if (block) {
1448 /*
1449 * As soon as we start servicing pages out of order, then we have
1450 * to kill the bulk stage, since the bulk stage assumes
1451 * in (migration_bitmap_find_and_reset_dirty) that every page is
1452 * dirty, that's no longer true.
1453 */
6f37bb8b 1454 rs->ram_bulk_stage = false;
a82d593b
DDAG
1455
1456 /*
1457 * We want the background search to continue from the queued page
1458 * since the guest is likely to want other pages near to the page
1459 * it just requested.
1460 */
1461 pss->block = block;
a935e30f 1462 pss->page = offset >> TARGET_PAGE_BITS;
422314e7
WY
1463
1464 /*
1465 * This unqueued page would break the "one round" check, even is
1466 * really rare.
1467 */
1468 pss->complete_round = false;
a82d593b
DDAG
1469 }
1470
1471 return !!block;
1472}
1473
6c595cde 1474/**
5e58f968
JQ
1475 * migration_page_queue_free: drop any remaining pages in the ram
1476 * request queue
6c595cde 1477 *
3d0684b2
JQ
1478 * It should be empty at the end anyway, but in error cases there may
1479 * be some left. in case that there is any page left, we drop it.
1480 *
6c595cde 1481 */
83c13382 1482static void migration_page_queue_free(RAMState *rs)
6c595cde 1483{
ec481c6c 1484 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
1485 /* This queue generally should be empty - but in the case of a failed
1486 * migration might have some droppings in.
1487 */
89ac5a1d 1488 RCU_READ_LOCK_GUARD();
ec481c6c 1489 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 1490 memory_region_unref(mspr->rb->mr);
ec481c6c 1491 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
1492 g_free(mspr);
1493 }
6c595cde
DDAG
1494}
1495
1496/**
3d0684b2
JQ
1497 * ram_save_queue_pages: queue the page for transmission
1498 *
1499 * A request from postcopy destination for example.
1500 *
1501 * Returns zero on success or negative on error
1502 *
3d0684b2
JQ
1503 * @rbname: Name of the RAMBLock of the request. NULL means the
1504 * same that last one.
1505 * @start: starting address from the start of the RAMBlock
1506 * @len: length (in bytes) to send
6c595cde 1507 */
96506894 1508int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
1509{
1510 RAMBlock *ramblock;
53518d94 1511 RAMState *rs = ram_state;
6c595cde 1512
9360447d 1513 ram_counters.postcopy_requests++;
89ac5a1d
DDAG
1514 RCU_READ_LOCK_GUARD();
1515
6c595cde
DDAG
1516 if (!rbname) {
1517 /* Reuse last RAMBlock */
68a098f3 1518 ramblock = rs->last_req_rb;
6c595cde
DDAG
1519
1520 if (!ramblock) {
1521 /*
1522 * Shouldn't happen, we can't reuse the last RAMBlock if
1523 * it's the 1st request.
1524 */
1525 error_report("ram_save_queue_pages no previous block");
03acb4e9 1526 return -1;
6c595cde
DDAG
1527 }
1528 } else {
1529 ramblock = qemu_ram_block_by_name(rbname);
1530
1531 if (!ramblock) {
1532 /* We shouldn't be asked for a non-existent RAMBlock */
1533 error_report("ram_save_queue_pages no block '%s'", rbname);
03acb4e9 1534 return -1;
6c595cde 1535 }
68a098f3 1536 rs->last_req_rb = ramblock;
6c595cde
DDAG
1537 }
1538 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1539 if (start+len > ramblock->used_length) {
9458ad6b
JQ
1540 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1541 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde 1542 __func__, start, len, ramblock->used_length);
03acb4e9 1543 return -1;
6c595cde
DDAG
1544 }
1545
ec481c6c
JQ
1546 struct RAMSrcPageRequest *new_entry =
1547 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
1548 new_entry->rb = ramblock;
1549 new_entry->offset = start;
1550 new_entry->len = len;
1551
1552 memory_region_ref(ramblock->mr);
ec481c6c
JQ
1553 qemu_mutex_lock(&rs->src_page_req_mutex);
1554 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 1555 migration_make_urgent_request();
ec481c6c 1556 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
1557
1558 return 0;
6c595cde
DDAG
1559}
1560
d7400a34
XG
1561static bool save_page_use_compression(RAMState *rs)
1562{
1563 if (!migrate_use_compression()) {
1564 return false;
1565 }
1566
1567 /*
1568 * If xbzrle is on, stop using the data compression after first
1569 * round of migration even if compression is enabled. In theory,
1570 * xbzrle can do better than compression.
1571 */
1572 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1573 return true;
1574 }
1575
1576 return false;
1577}
1578
5e5fdcff
XG
1579/*
1580 * try to compress the page before posting it out, return true if the page
1581 * has been properly handled by compression, otherwise needs other
1582 * paths to handle it
1583 */
1584static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
1585{
1586 if (!save_page_use_compression(rs)) {
1587 return false;
1588 }
1589
1590 /*
1591 * When starting the process of a new block, the first page of
1592 * the block should be sent out before other pages in the same
1593 * block, and all the pages in last block should have been sent
1594 * out, keeping this order is important, because the 'cont' flag
1595 * is used to avoid resending the block name.
1596 *
1597 * We post the fist page as normal page as compression will take
1598 * much CPU resource.
1599 */
1600 if (block != rs->last_sent_block) {
1601 flush_compressed_data(rs);
1602 return false;
1603 }
1604
1605 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
1606 return true;
1607 }
1608
76e03000 1609 compression_counters.busy++;
5e5fdcff
XG
1610 return false;
1611}
1612
a82d593b 1613/**
3d0684b2 1614 * ram_save_target_page: save one target page
a82d593b 1615 *
3d0684b2 1616 * Returns the number of pages written
a82d593b 1617 *
6f37bb8b 1618 * @rs: current RAM state
3d0684b2 1619 * @pss: data about the page we want to send
a82d593b 1620 * @last_stage: if we are at the completion stage
a82d593b 1621 */
a0a8aa14 1622static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 1623 bool last_stage)
a82d593b 1624{
a8ec91f9 1625 RAMBlock *block = pss->block;
8bba004c 1626 ram_addr_t offset = ((ram_addr_t)pss->page) << TARGET_PAGE_BITS;
a8ec91f9
XG
1627 int res;
1628
1629 if (control_save_page(rs, block, offset, &res)) {
1630 return res;
1631 }
1632
5e5fdcff
XG
1633 if (save_compress_page(rs, block, offset)) {
1634 return 1;
d7400a34
XG
1635 }
1636
1637 res = save_zero_page(rs, block, offset);
1638 if (res > 0) {
1639 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
1640 * page would be stale
1641 */
1642 if (!save_page_use_compression(rs)) {
1643 XBZRLE_cache_lock();
1644 xbzrle_cache_zero_page(rs, block->offset + offset);
1645 XBZRLE_cache_unlock();
1646 }
1647 ram_release_pages(block->idstr, offset, res);
1648 return res;
1649 }
1650
da3f56cb 1651 /*
c6b3a2e0
WY
1652 * Do not use multifd for:
1653 * 1. Compression as the first page in the new block should be posted out
1654 * before sending the compressed page
1655 * 2. In postcopy as one whole host page should be placed
da3f56cb 1656 */
c6b3a2e0
WY
1657 if (!save_page_use_compression(rs) && migrate_use_multifd()
1658 && !migration_in_postcopy()) {
b9ee2f7d 1659 return ram_save_multifd_page(rs, block, offset);
a82d593b
DDAG
1660 }
1661
1faa5665 1662 return ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
1663}
1664
1665/**
3d0684b2 1666 * ram_save_host_page: save a whole host page
a82d593b 1667 *
3d0684b2
JQ
1668 * Starting at *offset send pages up to the end of the current host
1669 * page. It's valid for the initial offset to point into the middle of
1670 * a host page in which case the remainder of the hostpage is sent.
1671 * Only dirty target pages are sent. Note that the host page size may
1672 * be a huge page for this block.
1eb3fc0a
DDAG
1673 * The saving stops at the boundary of the used_length of the block
1674 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 1675 *
3d0684b2
JQ
1676 * Returns the number of pages written or negative on error
1677 *
6f37bb8b 1678 * @rs: current RAM state
3d0684b2 1679 * @ms: current migration state
3d0684b2 1680 * @pss: data about the page we want to send
a82d593b 1681 * @last_stage: if we are at the completion stage
a82d593b 1682 */
a0a8aa14 1683static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 1684 bool last_stage)
a82d593b
DDAG
1685{
1686 int tmppages, pages = 0;
a935e30f
JQ
1687 size_t pagesize_bits =
1688 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 1689
fbd162e6 1690 if (ramblock_is_ignored(pss->block)) {
b895de50
CLG
1691 error_report("block %s should not be migrated !", pss->block->idstr);
1692 return 0;
1693 }
1694
a82d593b 1695 do {
1faa5665
XG
1696 /* Check the pages is dirty and if it is send it */
1697 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
1698 pss->page++;
1699 continue;
1700 }
1701
f20e2865 1702 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
1703 if (tmppages < 0) {
1704 return tmppages;
1705 }
1706
1707 pages += tmppages;
a935e30f 1708 pss->page++;
97e1e067
DDAG
1709 /* Allow rate limiting to happen in the middle of huge pages */
1710 migration_rate_limit();
1eb3fc0a 1711 } while ((pss->page & (pagesize_bits - 1)) &&
8bba004c
AR
1712 offset_in_ramblock(pss->block,
1713 ((ram_addr_t)pss->page) << TARGET_PAGE_BITS));
a82d593b
DDAG
1714
1715 /* The offset we leave with is the last one we looked at */
a935e30f 1716 pss->page--;
a82d593b
DDAG
1717 return pages;
1718}
6c595cde 1719
56e93d26 1720/**
3d0684b2 1721 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
1722 *
1723 * Called within an RCU critical section.
1724 *
e8f3735f
XG
1725 * Returns the number of pages written where zero means no dirty pages,
1726 * or negative on error
56e93d26 1727 *
6f37bb8b 1728 * @rs: current RAM state
56e93d26 1729 * @last_stage: if we are at the completion stage
a82d593b
DDAG
1730 *
1731 * On systems where host-page-size > target-page-size it will send all the
1732 * pages in a host page that are dirty.
56e93d26
JQ
1733 */
1734
ce25d337 1735static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 1736{
b8fb8cb7 1737 PageSearchStatus pss;
56e93d26 1738 int pages = 0;
b9e60928 1739 bool again, found;
56e93d26 1740
0827b9e9
AA
1741 /* No dirty page as there is zero RAM */
1742 if (!ram_bytes_total()) {
1743 return pages;
1744 }
1745
6f37bb8b 1746 pss.block = rs->last_seen_block;
a935e30f 1747 pss.page = rs->last_page;
b8fb8cb7
DDAG
1748 pss.complete_round = false;
1749
1750 if (!pss.block) {
1751 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1752 }
56e93d26 1753
b9e60928 1754 do {
a82d593b 1755 again = true;
f20e2865 1756 found = get_queued_page(rs, &pss);
b9e60928 1757
a82d593b
DDAG
1758 if (!found) {
1759 /* priority queue empty, so just search for something dirty */
f20e2865 1760 found = find_dirty_block(rs, &pss, &again);
a82d593b 1761 }
f3f491fc 1762
a82d593b 1763 if (found) {
f20e2865 1764 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 1765 }
b9e60928 1766 } while (!pages && again);
56e93d26 1767
6f37bb8b 1768 rs->last_seen_block = pss.block;
a935e30f 1769 rs->last_page = pss.page;
56e93d26
JQ
1770
1771 return pages;
1772}
1773
1774void acct_update_position(QEMUFile *f, size_t size, bool zero)
1775{
1776 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b 1777
56e93d26 1778 if (zero) {
9360447d 1779 ram_counters.duplicate += pages;
56e93d26 1780 } else {
9360447d
JQ
1781 ram_counters.normal += pages;
1782 ram_counters.transferred += size;
56e93d26
JQ
1783 qemu_update_position(f, size);
1784 }
1785}
1786
fbd162e6 1787static uint64_t ram_bytes_total_common(bool count_ignored)
56e93d26
JQ
1788{
1789 RAMBlock *block;
1790 uint64_t total = 0;
1791
89ac5a1d
DDAG
1792 RCU_READ_LOCK_GUARD();
1793
fbd162e6
YK
1794 if (count_ignored) {
1795 RAMBLOCK_FOREACH_MIGRATABLE(block) {
1796 total += block->used_length;
1797 }
1798 } else {
1799 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1800 total += block->used_length;
1801 }
99e15582 1802 }
56e93d26
JQ
1803 return total;
1804}
1805
fbd162e6
YK
1806uint64_t ram_bytes_total(void)
1807{
1808 return ram_bytes_total_common(false);
1809}
1810
f265e0e4 1811static void xbzrle_load_setup(void)
56e93d26 1812{
f265e0e4 1813 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
1814}
1815
f265e0e4
JQ
1816static void xbzrle_load_cleanup(void)
1817{
1818 g_free(XBZRLE.decoded_buf);
1819 XBZRLE.decoded_buf = NULL;
1820}
1821
7d7c96be
PX
1822static void ram_state_cleanup(RAMState **rsp)
1823{
b9ccaf6d
DDAG
1824 if (*rsp) {
1825 migration_page_queue_free(*rsp);
1826 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
1827 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
1828 g_free(*rsp);
1829 *rsp = NULL;
1830 }
7d7c96be
PX
1831}
1832
84593a08
PX
1833static void xbzrle_cleanup(void)
1834{
1835 XBZRLE_cache_lock();
1836 if (XBZRLE.cache) {
1837 cache_fini(XBZRLE.cache);
1838 g_free(XBZRLE.encoded_buf);
1839 g_free(XBZRLE.current_buf);
1840 g_free(XBZRLE.zero_target_page);
1841 XBZRLE.cache = NULL;
1842 XBZRLE.encoded_buf = NULL;
1843 XBZRLE.current_buf = NULL;
1844 XBZRLE.zero_target_page = NULL;
1845 }
1846 XBZRLE_cache_unlock();
1847}
1848
f265e0e4 1849static void ram_save_cleanup(void *opaque)
56e93d26 1850{
53518d94 1851 RAMState **rsp = opaque;
6b6712ef 1852 RAMBlock *block;
eb859c53 1853
2ff64038 1854 /* caller have hold iothread lock or is in a bh, so there is
4633456c 1855 * no writing race against the migration bitmap
2ff64038 1856 */
6b6712ef
JQ
1857 memory_global_dirty_log_stop();
1858
fbd162e6 1859 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
002cad6b
PX
1860 g_free(block->clear_bmap);
1861 block->clear_bmap = NULL;
6b6712ef
JQ
1862 g_free(block->bmap);
1863 block->bmap = NULL;
56e93d26
JQ
1864 }
1865
84593a08 1866 xbzrle_cleanup();
f0afa331 1867 compress_threads_save_cleanup();
7d7c96be 1868 ram_state_cleanup(rsp);
56e93d26
JQ
1869}
1870
6f37bb8b 1871static void ram_state_reset(RAMState *rs)
56e93d26 1872{
6f37bb8b
JQ
1873 rs->last_seen_block = NULL;
1874 rs->last_sent_block = NULL;
269ace29 1875 rs->last_page = 0;
6f37bb8b
JQ
1876 rs->last_version = ram_list.version;
1877 rs->ram_bulk_stage = true;
6eeb63f7 1878 rs->fpo_enabled = false;
56e93d26
JQ
1879}
1880
1881#define MAX_WAIT 50 /* ms, half buffered_file limit */
1882
4f2e4252
DDAG
1883/*
1884 * 'expected' is the value you expect the bitmap mostly to be full
1885 * of; it won't bother printing lines that are all this value.
1886 * If 'todump' is null the migration bitmap is dumped.
1887 */
6b6712ef
JQ
1888void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
1889 unsigned long pages)
4f2e4252 1890{
4f2e4252
DDAG
1891 int64_t cur;
1892 int64_t linelen = 128;
1893 char linebuf[129];
1894
6b6712ef 1895 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
1896 int64_t curb;
1897 bool found = false;
1898 /*
1899 * Last line; catch the case where the line length
1900 * is longer than remaining ram
1901 */
6b6712ef
JQ
1902 if (cur + linelen > pages) {
1903 linelen = pages - cur;
4f2e4252
DDAG
1904 }
1905 for (curb = 0; curb < linelen; curb++) {
1906 bool thisbit = test_bit(cur + curb, todump);
1907 linebuf[curb] = thisbit ? '1' : '.';
1908 found = found || (thisbit != expected);
1909 }
1910 if (found) {
1911 linebuf[curb] = '\0';
1912 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1913 }
1914 }
1915}
1916
e0b266f0
DDAG
1917/* **** functions for postcopy ***** */
1918
ced1c616
PB
1919void ram_postcopy_migrated_memory_release(MigrationState *ms)
1920{
1921 struct RAMBlock *block;
ced1c616 1922
fbd162e6 1923 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
1924 unsigned long *bitmap = block->bmap;
1925 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
1926 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
1927
1928 while (run_start < range) {
1929 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
8bba004c
AR
1930 ram_discard_range(block->idstr,
1931 ((ram_addr_t)run_start) << TARGET_PAGE_BITS,
1932 ((ram_addr_t)(run_end - run_start))
1933 << TARGET_PAGE_BITS);
ced1c616
PB
1934 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
1935 }
1936 }
1937}
1938
3d0684b2
JQ
1939/**
1940 * postcopy_send_discard_bm_ram: discard a RAMBlock
1941 *
1942 * Returns zero on success
1943 *
e0b266f0 1944 * Callback from postcopy_each_ram_send_discard for each RAMBlock
3d0684b2
JQ
1945 *
1946 * @ms: current migration state
89dab31b 1947 * @block: RAMBlock to discard
e0b266f0 1948 */
810cf2bb 1949static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
e0b266f0 1950{
6b6712ef 1951 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 1952 unsigned long current;
1e7cf8c3 1953 unsigned long *bitmap = block->bmap;
e0b266f0 1954
6b6712ef 1955 for (current = 0; current < end; ) {
1e7cf8c3 1956 unsigned long one = find_next_bit(bitmap, end, current);
33a5cb62 1957 unsigned long zero, discard_length;
e0b266f0 1958
33a5cb62
WY
1959 if (one >= end) {
1960 break;
1961 }
e0b266f0 1962
1e7cf8c3 1963 zero = find_next_zero_bit(bitmap, end, one + 1);
33a5cb62
WY
1964
1965 if (zero >= end) {
1966 discard_length = end - one;
e0b266f0 1967 } else {
33a5cb62
WY
1968 discard_length = zero - one;
1969 }
810cf2bb 1970 postcopy_discard_send_range(ms, one, discard_length);
33a5cb62 1971 current = one + discard_length;
e0b266f0
DDAG
1972 }
1973
1974 return 0;
1975}
1976
3d0684b2
JQ
1977/**
1978 * postcopy_each_ram_send_discard: discard all RAMBlocks
1979 *
1980 * Returns 0 for success or negative for error
1981 *
e0b266f0
DDAG
1982 * Utility for the outgoing postcopy code.
1983 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1984 * passing it bitmap indexes and name.
e0b266f0
DDAG
1985 * (qemu_ram_foreach_block ends up passing unscaled lengths
1986 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
1987 *
1988 * @ms: current migration state
e0b266f0
DDAG
1989 */
1990static int postcopy_each_ram_send_discard(MigrationState *ms)
1991{
1992 struct RAMBlock *block;
1993 int ret;
1994
fbd162e6 1995 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
810cf2bb 1996 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
1997
1998 /*
1999 * Postcopy sends chunks of bitmap over the wire, but it
2000 * just needs indexes at this point, avoids it having
2001 * target page specific code.
2002 */
810cf2bb
WY
2003 ret = postcopy_send_discard_bm_ram(ms, block);
2004 postcopy_discard_send_finish(ms);
e0b266f0
DDAG
2005 if (ret) {
2006 return ret;
2007 }
2008 }
2009
2010 return 0;
2011}
2012
3d0684b2 2013/**
8324ef86 2014 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
3d0684b2
JQ
2015 *
2016 * Helper for postcopy_chunk_hostpages; it's called twice to
2017 * canonicalize the two bitmaps, that are similar, but one is
2018 * inverted.
99e314eb 2019 *
3d0684b2
JQ
2020 * Postcopy requires that all target pages in a hostpage are dirty or
2021 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2022 *
3d0684b2 2023 * @ms: current migration state
3d0684b2 2024 * @block: block that contains the page we want to canonicalize
99e314eb 2025 */
1e7cf8c3 2026static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
99e314eb 2027{
53518d94 2028 RAMState *rs = ram_state;
6b6712ef 2029 unsigned long *bitmap = block->bmap;
29c59172 2030 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2031 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2032 unsigned long run_start;
2033
29c59172
DDAG
2034 if (block->page_size == TARGET_PAGE_SIZE) {
2035 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2036 return;
2037 }
2038
1e7cf8c3
WY
2039 /* Find a dirty page */
2040 run_start = find_next_bit(bitmap, pages, 0);
99e314eb 2041
6b6712ef 2042 while (run_start < pages) {
99e314eb
DDAG
2043
2044 /*
2045 * If the start of this run of pages is in the middle of a host
2046 * page, then we need to fixup this host page.
2047 */
9dec3cc3 2048 if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
99e314eb 2049 /* Find the end of this run */
1e7cf8c3 2050 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2051 /*
2052 * If the end isn't at the start of a host page, then the
2053 * run doesn't finish at the end of a host page
2054 * and we need to discard.
2055 */
99e314eb
DDAG
2056 }
2057
9dec3cc3 2058 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
99e314eb 2059 unsigned long page;
dad45ab2
WY
2060 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2061 host_ratio);
2062 run_start = QEMU_ALIGN_UP(run_start, host_ratio);
99e314eb 2063
99e314eb
DDAG
2064 /* Clean up the bitmap */
2065 for (page = fixup_start_addr;
2066 page < fixup_start_addr + host_ratio; page++) {
99e314eb
DDAG
2067 /*
2068 * Remark them as dirty, updating the count for any pages
2069 * that weren't previously dirty.
2070 */
0d8ec885 2071 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2072 }
2073 }
2074
1e7cf8c3
WY
2075 /* Find the next dirty page for the next iteration */
2076 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2077 }
2078}
2079
3d0684b2 2080/**
89dab31b 2081 * postcopy_chunk_hostpages: discard any partially sent host page
3d0684b2 2082 *
99e314eb
DDAG
2083 * Utility for the outgoing postcopy code.
2084 *
2085 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
2086 * dirty host-page size chunks as all dirty. In this case the host-page
2087 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 2088 *
3d0684b2
JQ
2089 * Returns zero on success
2090 *
2091 * @ms: current migration state
6b6712ef 2092 * @block: block we want to work with
99e314eb 2093 */
6b6712ef 2094static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 2095{
810cf2bb 2096 postcopy_discard_send_init(ms, block->idstr);
99e314eb 2097
6b6712ef 2098 /*
1e7cf8c3 2099 * Ensure that all partially dirty host pages are made fully dirty.
6b6712ef 2100 */
1e7cf8c3 2101 postcopy_chunk_hostpages_pass(ms, block);
99e314eb 2102
810cf2bb 2103 postcopy_discard_send_finish(ms);
99e314eb
DDAG
2104 return 0;
2105}
2106
3d0684b2
JQ
2107/**
2108 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2109 *
2110 * Returns zero on success
2111 *
e0b266f0
DDAG
2112 * Transmit the set of pages to be discarded after precopy to the target
2113 * these are pages that:
2114 * a) Have been previously transmitted but are now dirty again
2115 * b) Pages that have never been transmitted, this ensures that
2116 * any pages on the destination that have been mapped by background
2117 * tasks get discarded (transparent huge pages is the specific concern)
2118 * Hopefully this is pretty sparse
3d0684b2
JQ
2119 *
2120 * @ms: current migration state
e0b266f0
DDAG
2121 */
2122int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2123{
53518d94 2124 RAMState *rs = ram_state;
6b6712ef 2125 RAMBlock *block;
e0b266f0 2126 int ret;
e0b266f0 2127
89ac5a1d 2128 RCU_READ_LOCK_GUARD();
e0b266f0
DDAG
2129
2130 /* This should be our last sync, the src is now paused */
eb859c53 2131 migration_bitmap_sync(rs);
e0b266f0 2132
6b6712ef
JQ
2133 /* Easiest way to make sure we don't resume in the middle of a host-page */
2134 rs->last_seen_block = NULL;
2135 rs->last_sent_block = NULL;
2136 rs->last_page = 0;
e0b266f0 2137
fbd162e6 2138 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2139 /* Deal with TPS != HPS and huge pages */
2140 ret = postcopy_chunk_hostpages(ms, block);
2141 if (ret) {
6b6712ef
JQ
2142 return ret;
2143 }
e0b266f0 2144
e0b266f0 2145#ifdef DEBUG_POSTCOPY
1e7cf8c3
WY
2146 ram_debug_dump_bitmap(block->bmap, true,
2147 block->used_length >> TARGET_PAGE_BITS);
e0b266f0 2148#endif
6b6712ef
JQ
2149 }
2150 trace_ram_postcopy_send_discard_bitmap();
e0b266f0 2151
b3ac2b94 2152 return postcopy_each_ram_send_discard(ms);
e0b266f0
DDAG
2153}
2154
3d0684b2
JQ
2155/**
2156 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 2157 *
3d0684b2 2158 * Returns zero on success
e0b266f0 2159 *
36449157
JQ
2160 * @rbname: name of the RAMBlock of the request. NULL means the
2161 * same that last one.
3d0684b2
JQ
2162 * @start: RAMBlock starting page
2163 * @length: RAMBlock size
e0b266f0 2164 */
aaa2064c 2165int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0 2166{
36449157 2167 trace_ram_discard_range(rbname, start, length);
d3a5038c 2168
89ac5a1d 2169 RCU_READ_LOCK_GUARD();
36449157 2170 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
2171
2172 if (!rb) {
36449157 2173 error_report("ram_discard_range: Failed to find block '%s'", rbname);
03acb4e9 2174 return -1;
e0b266f0
DDAG
2175 }
2176
814bb08f
PX
2177 /*
2178 * On source VM, we don't need to update the received bitmap since
2179 * we don't even have one.
2180 */
2181 if (rb->receivedmap) {
2182 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2183 length >> qemu_target_page_bits());
2184 }
2185
03acb4e9 2186 return ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
2187}
2188
84593a08
PX
2189/*
2190 * For every allocation, we will try not to crash the VM if the
2191 * allocation failed.
2192 */
2193static int xbzrle_init(void)
2194{
2195 Error *local_err = NULL;
2196
2197 if (!migrate_use_xbzrle()) {
2198 return 0;
2199 }
2200
2201 XBZRLE_cache_lock();
2202
2203 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2204 if (!XBZRLE.zero_target_page) {
2205 error_report("%s: Error allocating zero page", __func__);
2206 goto err_out;
2207 }
2208
2209 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2210 TARGET_PAGE_SIZE, &local_err);
2211 if (!XBZRLE.cache) {
2212 error_report_err(local_err);
2213 goto free_zero_page;
2214 }
2215
2216 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2217 if (!XBZRLE.encoded_buf) {
2218 error_report("%s: Error allocating encoded_buf", __func__);
2219 goto free_cache;
2220 }
2221
2222 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2223 if (!XBZRLE.current_buf) {
2224 error_report("%s: Error allocating current_buf", __func__);
2225 goto free_encoded_buf;
2226 }
2227
2228 /* We are all good */
2229 XBZRLE_cache_unlock();
2230 return 0;
2231
2232free_encoded_buf:
2233 g_free(XBZRLE.encoded_buf);
2234 XBZRLE.encoded_buf = NULL;
2235free_cache:
2236 cache_fini(XBZRLE.cache);
2237 XBZRLE.cache = NULL;
2238free_zero_page:
2239 g_free(XBZRLE.zero_target_page);
2240 XBZRLE.zero_target_page = NULL;
2241err_out:
2242 XBZRLE_cache_unlock();
2243 return -ENOMEM;
2244}
2245
53518d94 2246static int ram_state_init(RAMState **rsp)
56e93d26 2247{
7d00ee6a
PX
2248 *rsp = g_try_new0(RAMState, 1);
2249
2250 if (!*rsp) {
2251 error_report("%s: Init ramstate fail", __func__);
2252 return -1;
2253 }
53518d94
JQ
2254
2255 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2256 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2257 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
56e93d26 2258
7d00ee6a 2259 /*
40c4d4a8
IR
2260 * Count the total number of pages used by ram blocks not including any
2261 * gaps due to alignment or unplugs.
03158519 2262 * This must match with the initial values of dirty bitmap.
7d00ee6a 2263 */
40c4d4a8 2264 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
7d00ee6a
PX
2265 ram_state_reset(*rsp);
2266
2267 return 0;
2268}
2269
d6eff5d7 2270static void ram_list_init_bitmaps(void)
7d00ee6a 2271{
002cad6b 2272 MigrationState *ms = migrate_get_current();
d6eff5d7
PX
2273 RAMBlock *block;
2274 unsigned long pages;
002cad6b 2275 uint8_t shift;
56e93d26 2276
0827b9e9
AA
2277 /* Skip setting bitmap if there is no RAM */
2278 if (ram_bytes_total()) {
002cad6b
PX
2279 shift = ms->clear_bitmap_shift;
2280 if (shift > CLEAR_BITMAP_SHIFT_MAX) {
2281 error_report("clear_bitmap_shift (%u) too big, using "
2282 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
2283 shift = CLEAR_BITMAP_SHIFT_MAX;
2284 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
2285 error_report("clear_bitmap_shift (%u) too small, using "
2286 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
2287 shift = CLEAR_BITMAP_SHIFT_MIN;
2288 }
2289
fbd162e6 2290 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
d6eff5d7 2291 pages = block->max_length >> TARGET_PAGE_BITS;
03158519
WY
2292 /*
2293 * The initial dirty bitmap for migration must be set with all
2294 * ones to make sure we'll migrate every guest RAM page to
2295 * destination.
40c4d4a8
IR
2296 * Here we set RAMBlock.bmap all to 1 because when rebegin a
2297 * new migration after a failed migration, ram_list.
2298 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
2299 * guest memory.
03158519 2300 */
6b6712ef 2301 block->bmap = bitmap_new(pages);
40c4d4a8 2302 bitmap_set(block->bmap, 0, pages);
002cad6b
PX
2303 block->clear_bmap_shift = shift;
2304 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
0827b9e9 2305 }
f3f491fc 2306 }
d6eff5d7
PX
2307}
2308
2309static void ram_init_bitmaps(RAMState *rs)
2310{
2311 /* For memory_global_dirty_log_start below. */
2312 qemu_mutex_lock_iothread();
2313 qemu_mutex_lock_ramlist();
f3f491fc 2314
89ac5a1d
DDAG
2315 WITH_RCU_READ_LOCK_GUARD() {
2316 ram_list_init_bitmaps();
2317 memory_global_dirty_log_start();
2318 migration_bitmap_sync_precopy(rs);
2319 }
56e93d26 2320 qemu_mutex_unlock_ramlist();
49877834 2321 qemu_mutex_unlock_iothread();
d6eff5d7
PX
2322}
2323
2324static int ram_init_all(RAMState **rsp)
2325{
2326 if (ram_state_init(rsp)) {
2327 return -1;
2328 }
2329
2330 if (xbzrle_init()) {
2331 ram_state_cleanup(rsp);
2332 return -1;
2333 }
2334
2335 ram_init_bitmaps(*rsp);
a91246c9
HZ
2336
2337 return 0;
2338}
2339
08614f34
PX
2340static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2341{
2342 RAMBlock *block;
2343 uint64_t pages = 0;
2344
2345 /*
2346 * Postcopy is not using xbzrle/compression, so no need for that.
2347 * Also, since source are already halted, we don't need to care
2348 * about dirty page logging as well.
2349 */
2350
fbd162e6 2351 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
08614f34
PX
2352 pages += bitmap_count_one(block->bmap,
2353 block->used_length >> TARGET_PAGE_BITS);
2354 }
2355
2356 /* This may not be aligned with current bitmaps. Recalculate. */
2357 rs->migration_dirty_pages = pages;
2358
2359 rs->last_seen_block = NULL;
2360 rs->last_sent_block = NULL;
2361 rs->last_page = 0;
2362 rs->last_version = ram_list.version;
2363 /*
2364 * Disable the bulk stage, otherwise we'll resend the whole RAM no
2365 * matter what we have sent.
2366 */
2367 rs->ram_bulk_stage = false;
2368
2369 /* Update RAMState cache of output QEMUFile */
2370 rs->f = out;
2371
2372 trace_ram_state_resume_prepare(pages);
2373}
2374
6bcb05fc
WW
2375/*
2376 * This function clears bits of the free pages reported by the caller from the
2377 * migration dirty bitmap. @addr is the host address corresponding to the
2378 * start of the continuous guest free pages, and @len is the total bytes of
2379 * those pages.
2380 */
2381void qemu_guest_free_page_hint(void *addr, size_t len)
2382{
2383 RAMBlock *block;
2384 ram_addr_t offset;
2385 size_t used_len, start, npages;
2386 MigrationState *s = migrate_get_current();
2387
2388 /* This function is currently expected to be used during live migration */
2389 if (!migration_is_setup_or_active(s->state)) {
2390 return;
2391 }
2392
2393 for (; len > 0; len -= used_len, addr += used_len) {
2394 block = qemu_ram_block_from_host(addr, false, &offset);
2395 if (unlikely(!block || offset >= block->used_length)) {
2396 /*
2397 * The implementation might not support RAMBlock resize during
2398 * live migration, but it could happen in theory with future
2399 * updates. So we add a check here to capture that case.
2400 */
2401 error_report_once("%s unexpected error", __func__);
2402 return;
2403 }
2404
2405 if (len <= block->used_length - offset) {
2406 used_len = len;
2407 } else {
2408 used_len = block->used_length - offset;
2409 }
2410
2411 start = offset >> TARGET_PAGE_BITS;
2412 npages = used_len >> TARGET_PAGE_BITS;
2413
2414 qemu_mutex_lock(&ram_state->bitmap_mutex);
2415 ram_state->migration_dirty_pages -=
2416 bitmap_count_one_with_offset(block->bmap, start, npages);
2417 bitmap_clear(block->bmap, start, npages);
2418 qemu_mutex_unlock(&ram_state->bitmap_mutex);
2419 }
2420}
2421
3d0684b2
JQ
2422/*
2423 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
2424 * long-running RCU critical section. When rcu-reclaims in the code
2425 * start to become numerous it will be necessary to reduce the
2426 * granularity of these critical sections.
2427 */
2428
3d0684b2
JQ
2429/**
2430 * ram_save_setup: Setup RAM for migration
2431 *
2432 * Returns zero to indicate success and negative for error
2433 *
2434 * @f: QEMUFile where to send the data
2435 * @opaque: RAMState pointer
2436 */
a91246c9
HZ
2437static int ram_save_setup(QEMUFile *f, void *opaque)
2438{
53518d94 2439 RAMState **rsp = opaque;
a91246c9
HZ
2440 RAMBlock *block;
2441
dcaf446e
XG
2442 if (compress_threads_save_setup()) {
2443 return -1;
2444 }
2445
a91246c9
HZ
2446 /* migration has already setup the bitmap, reuse it. */
2447 if (!migration_in_colo_state()) {
7d00ee6a 2448 if (ram_init_all(rsp) != 0) {
dcaf446e 2449 compress_threads_save_cleanup();
a91246c9 2450 return -1;
53518d94 2451 }
a91246c9 2452 }
53518d94 2453 (*rsp)->f = f;
a91246c9 2454
0e6ebd48
DDAG
2455 WITH_RCU_READ_LOCK_GUARD() {
2456 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
56e93d26 2457
0e6ebd48
DDAG
2458 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2459 qemu_put_byte(f, strlen(block->idstr));
2460 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2461 qemu_put_be64(f, block->used_length);
2462 if (migrate_postcopy_ram() && block->page_size !=
2463 qemu_host_page_size) {
2464 qemu_put_be64(f, block->page_size);
2465 }
2466 if (migrate_ignore_shared()) {
2467 qemu_put_be64(f, block->mr->addr);
2468 }
fbd162e6 2469 }
56e93d26
JQ
2470 }
2471
56e93d26
JQ
2472 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2473 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2474
99f2c6fb 2475 multifd_send_sync_main(f);
56e93d26 2476 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 2477 qemu_fflush(f);
56e93d26
JQ
2478
2479 return 0;
2480}
2481
3d0684b2
JQ
2482/**
2483 * ram_save_iterate: iterative stage for migration
2484 *
2485 * Returns zero to indicate success and negative for error
2486 *
2487 * @f: QEMUFile where to send the data
2488 * @opaque: RAMState pointer
2489 */
56e93d26
JQ
2490static int ram_save_iterate(QEMUFile *f, void *opaque)
2491{
53518d94
JQ
2492 RAMState **temp = opaque;
2493 RAMState *rs = *temp;
3d4095b2 2494 int ret = 0;
56e93d26
JQ
2495 int i;
2496 int64_t t0;
5c90308f 2497 int done = 0;
56e93d26 2498
b2557345
PL
2499 if (blk_mig_bulk_active()) {
2500 /* Avoid transferring ram during bulk phase of block migration as
2501 * the bulk phase will usually take a long time and transferring
2502 * ram updates during that time is pointless. */
2503 goto out;
2504 }
2505
89ac5a1d
DDAG
2506 WITH_RCU_READ_LOCK_GUARD() {
2507 if (ram_list.version != rs->last_version) {
2508 ram_state_reset(rs);
2509 }
56e93d26 2510
89ac5a1d
DDAG
2511 /* Read version before ram_list.blocks */
2512 smp_rmb();
56e93d26 2513
89ac5a1d 2514 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
56e93d26 2515
89ac5a1d
DDAG
2516 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2517 i = 0;
2518 while ((ret = qemu_file_rate_limit(f)) == 0 ||
2519 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2520 int pages;
e03a34f8 2521
89ac5a1d
DDAG
2522 if (qemu_file_get_error(f)) {
2523 break;
2524 }
e8f3735f 2525
89ac5a1d
DDAG
2526 pages = ram_find_and_save_block(rs, false);
2527 /* no more pages to sent */
2528 if (pages == 0) {
2529 done = 1;
2530 break;
2531 }
e8f3735f 2532
89ac5a1d
DDAG
2533 if (pages < 0) {
2534 qemu_file_set_error(f, pages);
56e93d26
JQ
2535 break;
2536 }
89ac5a1d
DDAG
2537
2538 rs->target_page_count += pages;
2539
644acf99
WY
2540 /*
2541 * During postcopy, it is necessary to make sure one whole host
2542 * page is sent in one chunk.
2543 */
2544 if (migrate_postcopy_ram()) {
2545 flush_compressed_data(rs);
2546 }
2547
89ac5a1d
DDAG
2548 /*
2549 * we want to check in the 1st loop, just in case it was the 1st
2550 * time and we had to sync the dirty bitmap.
2551 * qemu_clock_get_ns() is a bit expensive, so we only check each
2552 * some iterations
2553 */
2554 if ((i & 63) == 0) {
2555 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
2556 1000000;
2557 if (t1 > MAX_WAIT) {
2558 trace_ram_save_iterate_big_wait(t1, i);
2559 break;
2560 }
2561 }
2562 i++;
56e93d26 2563 }
56e93d26 2564 }
56e93d26
JQ
2565
2566 /*
2567 * Must occur before EOS (or any QEMUFile operation)
2568 * because of RDMA protocol.
2569 */
2570 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2571
b2557345 2572out:
b69a0227
JQ
2573 if (ret >= 0
2574 && migration_is_setup_or_active(migrate_get_current()->state)) {
99f2c6fb 2575 multifd_send_sync_main(rs->f);
3d4095b2
JQ
2576 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2577 qemu_fflush(f);
2578 ram_counters.transferred += 8;
56e93d26 2579
3d4095b2
JQ
2580 ret = qemu_file_get_error(f);
2581 }
56e93d26
JQ
2582 if (ret < 0) {
2583 return ret;
2584 }
2585
5c90308f 2586 return done;
56e93d26
JQ
2587}
2588
3d0684b2
JQ
2589/**
2590 * ram_save_complete: function called to send the remaining amount of ram
2591 *
e8f3735f 2592 * Returns zero to indicate success or negative on error
3d0684b2
JQ
2593 *
2594 * Called with iothread lock
2595 *
2596 * @f: QEMUFile where to send the data
2597 * @opaque: RAMState pointer
2598 */
56e93d26
JQ
2599static int ram_save_complete(QEMUFile *f, void *opaque)
2600{
53518d94
JQ
2601 RAMState **temp = opaque;
2602 RAMState *rs = *temp;
e8f3735f 2603 int ret = 0;
6f37bb8b 2604
89ac5a1d
DDAG
2605 WITH_RCU_READ_LOCK_GUARD() {
2606 if (!migration_in_postcopy()) {
2607 migration_bitmap_sync_precopy(rs);
2608 }
56e93d26 2609
89ac5a1d 2610 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
56e93d26 2611
89ac5a1d 2612 /* try transferring iterative blocks of memory */
56e93d26 2613
89ac5a1d
DDAG
2614 /* flush all remaining blocks regardless of rate limiting */
2615 while (true) {
2616 int pages;
56e93d26 2617
89ac5a1d
DDAG
2618 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
2619 /* no more blocks to sent */
2620 if (pages == 0) {
2621 break;
2622 }
2623 if (pages < 0) {
2624 ret = pages;
2625 break;
2626 }
e8f3735f 2627 }
56e93d26 2628
89ac5a1d
DDAG
2629 flush_compressed_data(rs);
2630 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2631 }
d09a6fde 2632
3d4095b2 2633 if (ret >= 0) {
99f2c6fb 2634 multifd_send_sync_main(rs->f);
3d4095b2
JQ
2635 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2636 qemu_fflush(f);
2637 }
56e93d26 2638
e8f3735f 2639 return ret;
56e93d26
JQ
2640}
2641
c31b098f 2642static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
2643 uint64_t *res_precopy_only,
2644 uint64_t *res_compatible,
2645 uint64_t *res_postcopy_only)
56e93d26 2646{
53518d94
JQ
2647 RAMState **temp = opaque;
2648 RAMState *rs = *temp;
56e93d26
JQ
2649 uint64_t remaining_size;
2650
9edabd4d 2651 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 2652
5727309d 2653 if (!migration_in_postcopy() &&
663e6c1d 2654 remaining_size < max_size) {
56e93d26 2655 qemu_mutex_lock_iothread();
89ac5a1d
DDAG
2656 WITH_RCU_READ_LOCK_GUARD() {
2657 migration_bitmap_sync_precopy(rs);
2658 }
56e93d26 2659 qemu_mutex_unlock_iothread();
9edabd4d 2660 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 2661 }
c31b098f 2662
86e1167e
VSO
2663 if (migrate_postcopy_ram()) {
2664 /* We can do postcopy, and all the data is postcopiable */
47995026 2665 *res_compatible += remaining_size;
86e1167e 2666 } else {
47995026 2667 *res_precopy_only += remaining_size;
86e1167e 2668 }
56e93d26
JQ
2669}
2670
2671static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2672{
2673 unsigned int xh_len;
2674 int xh_flags;
063e760a 2675 uint8_t *loaded_data;
56e93d26 2676
56e93d26
JQ
2677 /* extract RLE header */
2678 xh_flags = qemu_get_byte(f);
2679 xh_len = qemu_get_be16(f);
2680
2681 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2682 error_report("Failed to load XBZRLE page - wrong compression!");
2683 return -1;
2684 }
2685
2686 if (xh_len > TARGET_PAGE_SIZE) {
2687 error_report("Failed to load XBZRLE page - len overflow!");
2688 return -1;
2689 }
f265e0e4 2690 loaded_data = XBZRLE.decoded_buf;
56e93d26 2691 /* load data and decode */
f265e0e4 2692 /* it can change loaded_data to point to an internal buffer */
063e760a 2693 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
2694
2695 /* decode RLE */
063e760a 2696 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
2697 TARGET_PAGE_SIZE) == -1) {
2698 error_report("Failed to load XBZRLE page - decode error!");
2699 return -1;
2700 }
2701
2702 return 0;
2703}
2704
3d0684b2
JQ
2705/**
2706 * ram_block_from_stream: read a RAMBlock id from the migration stream
2707 *
2708 * Must be called from within a rcu critical section.
2709 *
56e93d26 2710 * Returns a pointer from within the RCU-protected ram_list.
a7180877 2711 *
3d0684b2
JQ
2712 * @f: QEMUFile where to read the data from
2713 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 2714 */
3d0684b2 2715static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
2716{
2717 static RAMBlock *block = NULL;
2718 char id[256];
2719 uint8_t len;
2720
2721 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 2722 if (!block) {
56e93d26
JQ
2723 error_report("Ack, bad migration stream!");
2724 return NULL;
2725 }
4c4bad48 2726 return block;
56e93d26
JQ
2727 }
2728
2729 len = qemu_get_byte(f);
2730 qemu_get_buffer(f, (uint8_t *)id, len);
2731 id[len] = 0;
2732
e3dd7493 2733 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
2734 if (!block) {
2735 error_report("Can't find block %s", id);
2736 return NULL;
56e93d26
JQ
2737 }
2738
fbd162e6 2739 if (ramblock_is_ignored(block)) {
b895de50
CLG
2740 error_report("block %s should not be migrated !", id);
2741 return NULL;
2742 }
2743
4c4bad48
HZ
2744 return block;
2745}
2746
2747static inline void *host_from_ram_block_offset(RAMBlock *block,
2748 ram_addr_t offset)
2749{
2750 if (!offset_in_ramblock(block, offset)) {
2751 return NULL;
2752 }
2753
2754 return block->host + offset;
56e93d26
JQ
2755}
2756
13af18f2 2757static inline void *colo_cache_from_block_offset(RAMBlock *block,
8af66371 2758 ram_addr_t offset, bool record_bitmap)
13af18f2
ZC
2759{
2760 if (!offset_in_ramblock(block, offset)) {
2761 return NULL;
2762 }
2763 if (!block->colo_cache) {
2764 error_report("%s: colo_cache is NULL in block :%s",
2765 __func__, block->idstr);
2766 return NULL;
2767 }
7d9acafa
ZC
2768
2769 /*
2770 * During colo checkpoint, we need bitmap of these migrated pages.
2771 * It help us to decide which pages in ram cache should be flushed
2772 * into VM's RAM later.
2773 */
8af66371
HZ
2774 if (record_bitmap &&
2775 !test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
7d9acafa
ZC
2776 ram_state->migration_dirty_pages++;
2777 }
13af18f2
ZC
2778 return block->colo_cache + offset;
2779}
2780
3d0684b2
JQ
2781/**
2782 * ram_handle_compressed: handle the zero page case
2783 *
56e93d26
JQ
2784 * If a page (or a whole RDMA chunk) has been
2785 * determined to be zero, then zap it.
3d0684b2
JQ
2786 *
2787 * @host: host address for the zero page
2788 * @ch: what the page is filled from. We only support zero
2789 * @size: size of the zero page
56e93d26
JQ
2790 */
2791void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2792{
2793 if (ch != 0 || !is_zero_range(host, size)) {
2794 memset(host, ch, size);
2795 }
2796}
2797
797ca154
XG
2798/* return the size after decompression, or negative value on error */
2799static int
2800qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
2801 const uint8_t *source, size_t source_len)
2802{
2803 int err;
2804
2805 err = inflateReset(stream);
2806 if (err != Z_OK) {
2807 return -1;
2808 }
2809
2810 stream->avail_in = source_len;
2811 stream->next_in = (uint8_t *)source;
2812 stream->avail_out = dest_len;
2813 stream->next_out = dest;
2814
2815 err = inflate(stream, Z_NO_FLUSH);
2816 if (err != Z_STREAM_END) {
2817 return -1;
2818 }
2819
2820 return stream->total_out;
2821}
2822
56e93d26
JQ
2823static void *do_data_decompress(void *opaque)
2824{
2825 DecompressParam *param = opaque;
2826 unsigned long pagesize;
33d151f4 2827 uint8_t *des;
34ab9e97 2828 int len, ret;
56e93d26 2829
33d151f4 2830 qemu_mutex_lock(&param->mutex);
90e56fb4 2831 while (!param->quit) {
33d151f4
LL
2832 if (param->des) {
2833 des = param->des;
2834 len = param->len;
2835 param->des = 0;
2836 qemu_mutex_unlock(&param->mutex);
2837
56e93d26 2838 pagesize = TARGET_PAGE_SIZE;
34ab9e97
XG
2839
2840 ret = qemu_uncompress_data(&param->stream, des, pagesize,
2841 param->compbuf, len);
f548222c 2842 if (ret < 0 && migrate_get_current()->decompress_error_check) {
34ab9e97
XG
2843 error_report("decompress data failed");
2844 qemu_file_set_error(decomp_file, ret);
2845 }
73a8912b 2846
33d151f4
LL
2847 qemu_mutex_lock(&decomp_done_lock);
2848 param->done = true;
2849 qemu_cond_signal(&decomp_done_cond);
2850 qemu_mutex_unlock(&decomp_done_lock);
2851
2852 qemu_mutex_lock(&param->mutex);
2853 } else {
2854 qemu_cond_wait(&param->cond, &param->mutex);
2855 }
56e93d26 2856 }
33d151f4 2857 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
2858
2859 return NULL;
2860}
2861
34ab9e97 2862static int wait_for_decompress_done(void)
5533b2e9
LL
2863{
2864 int idx, thread_count;
2865
2866 if (!migrate_use_compression()) {
34ab9e97 2867 return 0;
5533b2e9
LL
2868 }
2869
2870 thread_count = migrate_decompress_threads();
2871 qemu_mutex_lock(&decomp_done_lock);
2872 for (idx = 0; idx < thread_count; idx++) {
2873 while (!decomp_param[idx].done) {
2874 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2875 }
2876 }
2877 qemu_mutex_unlock(&decomp_done_lock);
34ab9e97 2878 return qemu_file_get_error(decomp_file);
5533b2e9
LL
2879}
2880
f0afa331 2881static void compress_threads_load_cleanup(void)
56e93d26
JQ
2882{
2883 int i, thread_count;
2884
3416ab5b
JQ
2885 if (!migrate_use_compression()) {
2886 return;
2887 }
56e93d26
JQ
2888 thread_count = migrate_decompress_threads();
2889 for (i = 0; i < thread_count; i++) {
797ca154
XG
2890 /*
2891 * we use it as a indicator which shows if the thread is
2892 * properly init'd or not
2893 */
2894 if (!decomp_param[i].compbuf) {
2895 break;
2896 }
2897
56e93d26 2898 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 2899 decomp_param[i].quit = true;
56e93d26
JQ
2900 qemu_cond_signal(&decomp_param[i].cond);
2901 qemu_mutex_unlock(&decomp_param[i].mutex);
2902 }
2903 for (i = 0; i < thread_count; i++) {
797ca154
XG
2904 if (!decomp_param[i].compbuf) {
2905 break;
2906 }
2907
56e93d26
JQ
2908 qemu_thread_join(decompress_threads + i);
2909 qemu_mutex_destroy(&decomp_param[i].mutex);
2910 qemu_cond_destroy(&decomp_param[i].cond);
797ca154 2911 inflateEnd(&decomp_param[i].stream);
56e93d26 2912 g_free(decomp_param[i].compbuf);
797ca154 2913 decomp_param[i].compbuf = NULL;
56e93d26
JQ
2914 }
2915 g_free(decompress_threads);
2916 g_free(decomp_param);
56e93d26
JQ
2917 decompress_threads = NULL;
2918 decomp_param = NULL;
34ab9e97 2919 decomp_file = NULL;
56e93d26
JQ
2920}
2921
34ab9e97 2922static int compress_threads_load_setup(QEMUFile *f)
797ca154
XG
2923{
2924 int i, thread_count;
2925
2926 if (!migrate_use_compression()) {
2927 return 0;
2928 }
2929
2930 thread_count = migrate_decompress_threads();
2931 decompress_threads = g_new0(QemuThread, thread_count);
2932 decomp_param = g_new0(DecompressParam, thread_count);
2933 qemu_mutex_init(&decomp_done_lock);
2934 qemu_cond_init(&decomp_done_cond);
34ab9e97 2935 decomp_file = f;
797ca154
XG
2936 for (i = 0; i < thread_count; i++) {
2937 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
2938 goto exit;
2939 }
2940
2941 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2942 qemu_mutex_init(&decomp_param[i].mutex);
2943 qemu_cond_init(&decomp_param[i].cond);
2944 decomp_param[i].done = true;
2945 decomp_param[i].quit = false;
2946 qemu_thread_create(decompress_threads + i, "decompress",
2947 do_data_decompress, decomp_param + i,
2948 QEMU_THREAD_JOINABLE);
2949 }
2950 return 0;
2951exit:
2952 compress_threads_load_cleanup();
2953 return -1;
2954}
2955
c1bc6626 2956static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
2957 void *host, int len)
2958{
2959 int idx, thread_count;
2960
2961 thread_count = migrate_decompress_threads();
73a8912b 2962 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
2963 while (true) {
2964 for (idx = 0; idx < thread_count; idx++) {
73a8912b 2965 if (decomp_param[idx].done) {
33d151f4
LL
2966 decomp_param[idx].done = false;
2967 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 2968 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
2969 decomp_param[idx].des = host;
2970 decomp_param[idx].len = len;
33d151f4
LL
2971 qemu_cond_signal(&decomp_param[idx].cond);
2972 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
2973 break;
2974 }
2975 }
2976 if (idx < thread_count) {
2977 break;
73a8912b
LL
2978 } else {
2979 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
2980 }
2981 }
73a8912b 2982 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
2983}
2984
13af18f2
ZC
2985/*
2986 * colo cache: this is for secondary VM, we cache the whole
2987 * memory of the secondary VM, it is need to hold the global lock
2988 * to call this helper.
2989 */
2990int colo_init_ram_cache(void)
2991{
2992 RAMBlock *block;
2993
44901b5a
PB
2994 WITH_RCU_READ_LOCK_GUARD() {
2995 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2996 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
2997 NULL,
2998 false);
2999 if (!block->colo_cache) {
3000 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3001 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3002 block->used_length);
3003 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3004 if (block->colo_cache) {
3005 qemu_anon_ram_free(block->colo_cache, block->used_length);
3006 block->colo_cache = NULL;
3007 }
89ac5a1d 3008 }
44901b5a 3009 return -errno;
89ac5a1d 3010 }
13af18f2 3011 }
13af18f2 3012 }
44901b5a 3013
7d9acafa
ZC
3014 /*
3015 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3016 * with to decide which page in cache should be flushed into SVM's RAM. Here
3017 * we use the same name 'ram_bitmap' as for migration.
3018 */
3019 if (ram_bytes_total()) {
3020 RAMBlock *block;
3021
fbd162e6 3022 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa 3023 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
7d9acafa 3024 block->bmap = bitmap_new(pages);
7d9acafa
ZC
3025 }
3026 }
7d9acafa 3027
0393031a 3028 ram_state_init(&ram_state);
13af18f2 3029 return 0;
13af18f2
ZC
3030}
3031
0393031a
HZ
3032/* TODO: duplicated with ram_init_bitmaps */
3033void colo_incoming_start_dirty_log(void)
3034{
3035 RAMBlock *block = NULL;
3036 /* For memory_global_dirty_log_start below. */
3037 qemu_mutex_lock_iothread();
3038 qemu_mutex_lock_ramlist();
3039
3040 memory_global_dirty_log_sync();
3041 WITH_RCU_READ_LOCK_GUARD() {
3042 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3043 ramblock_sync_dirty_bitmap(ram_state, block);
3044 /* Discard this dirty bitmap record */
3045 bitmap_zero(block->bmap, block->max_length >> TARGET_PAGE_BITS);
3046 }
3047 memory_global_dirty_log_start();
3048 }
3049 ram_state->migration_dirty_pages = 0;
3050 qemu_mutex_unlock_ramlist();
3051 qemu_mutex_unlock_iothread();
3052}
3053
13af18f2
ZC
3054/* It is need to hold the global lock to call this helper */
3055void colo_release_ram_cache(void)
3056{
3057 RAMBlock *block;
3058
d1955d22 3059 memory_global_dirty_log_stop();
fbd162e6 3060 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3061 g_free(block->bmap);
3062 block->bmap = NULL;
3063 }
3064
89ac5a1d
DDAG
3065 WITH_RCU_READ_LOCK_GUARD() {
3066 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3067 if (block->colo_cache) {
3068 qemu_anon_ram_free(block->colo_cache, block->used_length);
3069 block->colo_cache = NULL;
3070 }
13af18f2
ZC
3071 }
3072 }
0393031a 3073 ram_state_cleanup(&ram_state);
13af18f2
ZC
3074}
3075
f265e0e4
JQ
3076/**
3077 * ram_load_setup: Setup RAM for migration incoming side
3078 *
3079 * Returns zero to indicate success and negative for error
3080 *
3081 * @f: QEMUFile where to receive the data
3082 * @opaque: RAMState pointer
3083 */
3084static int ram_load_setup(QEMUFile *f, void *opaque)
3085{
34ab9e97 3086 if (compress_threads_load_setup(f)) {
797ca154
XG
3087 return -1;
3088 }
3089
f265e0e4 3090 xbzrle_load_setup();
f9494614 3091 ramblock_recv_map_init();
13af18f2 3092
f265e0e4
JQ
3093 return 0;
3094}
3095
3096static int ram_load_cleanup(void *opaque)
3097{
f9494614 3098 RAMBlock *rb;
56eb90af 3099
fbd162e6 3100 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
bd108a44 3101 qemu_ram_block_writeback(rb);
56eb90af
JH
3102 }
3103
f265e0e4 3104 xbzrle_load_cleanup();
f0afa331 3105 compress_threads_load_cleanup();
f9494614 3106
fbd162e6 3107 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
3108 g_free(rb->receivedmap);
3109 rb->receivedmap = NULL;
3110 }
13af18f2 3111
f265e0e4
JQ
3112 return 0;
3113}
3114
3d0684b2
JQ
3115/**
3116 * ram_postcopy_incoming_init: allocate postcopy data structures
3117 *
3118 * Returns 0 for success and negative if there was one error
3119 *
3120 * @mis: current migration incoming state
3121 *
3122 * Allocate data structures etc needed by incoming migration with
3123 * postcopy-ram. postcopy-ram's similarly names
3124 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
3125 */
3126int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3127{
c136180c 3128 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
3129}
3130
3d0684b2
JQ
3131/**
3132 * ram_load_postcopy: load a page in postcopy case
3133 *
3134 * Returns 0 for success or -errno in case of error
3135 *
a7180877
DDAG
3136 * Called in postcopy mode by ram_load().
3137 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
3138 *
3139 * @f: QEMUFile where to send the data
a7180877
DDAG
3140 */
3141static int ram_load_postcopy(QEMUFile *f)
3142{
3143 int flags = 0, ret = 0;
3144 bool place_needed = false;
1aa83678 3145 bool matches_target_page_size = false;
a7180877
DDAG
3146 MigrationIncomingState *mis = migration_incoming_get_current();
3147 /* Temporary page that is later 'placed' */
3414322a 3148 void *postcopy_host_page = mis->postcopy_tmp_page;
91ba442f 3149 void *this_host = NULL;
a3b6ff6d 3150 bool all_zero = false;
4cbb3c63 3151 int target_pages = 0;
a7180877
DDAG
3152
3153 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3154 ram_addr_t addr;
3155 void *host = NULL;
3156 void *page_buffer = NULL;
3157 void *place_source = NULL;
df9ff5e1 3158 RAMBlock *block = NULL;
a7180877 3159 uint8_t ch;
644acf99 3160 int len;
a7180877
DDAG
3161
3162 addr = qemu_get_be64(f);
7a9ddfbf
PX
3163
3164 /*
3165 * If qemu file error, we should stop here, and then "addr"
3166 * may be invalid
3167 */
3168 ret = qemu_file_get_error(f);
3169 if (ret) {
3170 break;
3171 }
3172
a7180877
DDAG
3173 flags = addr & ~TARGET_PAGE_MASK;
3174 addr &= TARGET_PAGE_MASK;
3175
3176 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3177 place_needed = false;
644acf99
WY
3178 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
3179 RAM_SAVE_FLAG_COMPRESS_PAGE)) {
df9ff5e1 3180 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
3181
3182 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
3183 if (!host) {
3184 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3185 ret = -EINVAL;
3186 break;
3187 }
4cbb3c63 3188 target_pages++;
1aa83678 3189 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 3190 /*
28abd200
DDAG
3191 * Postcopy requires that we place whole host pages atomically;
3192 * these may be huge pages for RAMBlocks that are backed by
3193 * hugetlbfs.
a7180877
DDAG
3194 * To make it atomic, the data is read into a temporary page
3195 * that's moved into place later.
3196 * The migration protocol uses, possibly smaller, target-pages
3197 * however the source ensures it always sends all the components
91ba442f 3198 * of a host page in one chunk.
a7180877
DDAG
3199 */
3200 page_buffer = postcopy_host_page +
28abd200 3201 ((uintptr_t)host & (block->page_size - 1));
a7180877 3202 /* If all TP are zero then we can optimise the place */
e5e73b0f 3203 if (target_pages == 1) {
a7180877 3204 all_zero = true;
91ba442f
WY
3205 this_host = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
3206 block->page_size);
c53b7ddc
DDAG
3207 } else {
3208 /* not the 1st TP within the HP */
91ba442f
WY
3209 if (QEMU_ALIGN_DOWN((uintptr_t)host, block->page_size) !=
3210 (uintptr_t)this_host) {
3211 error_report("Non-same host page %p/%p",
3212 host, this_host);
c53b7ddc
DDAG
3213 ret = -EINVAL;
3214 break;
3215 }
a7180877
DDAG
3216 }
3217
3218 /*
3219 * If it's the last part of a host page then we place the host
3220 * page
3221 */
4cbb3c63
WY
3222 if (target_pages == (block->page_size / TARGET_PAGE_SIZE)) {
3223 place_needed = true;
3224 target_pages = 0;
3225 }
a7180877
DDAG
3226 place_source = postcopy_host_page;
3227 }
3228
3229 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 3230 case RAM_SAVE_FLAG_ZERO:
a7180877 3231 ch = qemu_get_byte(f);
2e36bc1b
WY
3232 /*
3233 * Can skip to set page_buffer when
3234 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
3235 */
3236 if (ch || !matches_target_page_size) {
3237 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3238 }
a7180877
DDAG
3239 if (ch) {
3240 all_zero = false;
3241 }
3242 break;
3243
3244 case RAM_SAVE_FLAG_PAGE:
3245 all_zero = false;
1aa83678
PX
3246 if (!matches_target_page_size) {
3247 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
3248 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3249 } else {
1aa83678
PX
3250 /*
3251 * For small pages that matches target page size, we
3252 * avoid the qemu_file copy. Instead we directly use
3253 * the buffer of QEMUFile to place the page. Note: we
3254 * cannot do any QEMUFile operation before using that
3255 * buffer to make sure the buffer is valid when
3256 * placing the page.
a7180877
DDAG
3257 */
3258 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3259 TARGET_PAGE_SIZE);
3260 }
3261 break;
644acf99
WY
3262 case RAM_SAVE_FLAG_COMPRESS_PAGE:
3263 all_zero = false;
3264 len = qemu_get_be32(f);
3265 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3266 error_report("Invalid compressed data length: %d", len);
3267 ret = -EINVAL;
3268 break;
3269 }
3270 decompress_data_with_multi_threads(f, page_buffer, len);
3271 break;
3272
a7180877
DDAG
3273 case RAM_SAVE_FLAG_EOS:
3274 /* normal exit */
6df264ac 3275 multifd_recv_sync_main();
a7180877
DDAG
3276 break;
3277 default:
3278 error_report("Unknown combination of migration flags: %#x"
3279 " (postcopy mode)", flags);
3280 ret = -EINVAL;
7a9ddfbf
PX
3281 break;
3282 }
3283
644acf99
WY
3284 /* Got the whole host page, wait for decompress before placing. */
3285 if (place_needed) {
3286 ret |= wait_for_decompress_done();
3287 }
3288
7a9ddfbf
PX
3289 /* Detect for any possible file errors */
3290 if (!ret && qemu_file_get_error(f)) {
3291 ret = qemu_file_get_error(f);
a7180877
DDAG
3292 }
3293
7a9ddfbf 3294 if (!ret && place_needed) {
a7180877 3295 /* This gets called at the last target page in the host page */
91ba442f
WY
3296 void *place_dest = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
3297 block->page_size);
df9ff5e1 3298
a7180877 3299 if (all_zero) {
df9ff5e1 3300 ret = postcopy_place_page_zero(mis, place_dest,
8be4620b 3301 block);
a7180877 3302 } else {
df9ff5e1 3303 ret = postcopy_place_page(mis, place_dest,
8be4620b 3304 place_source, block);
a7180877
DDAG
3305 }
3306 }
a7180877
DDAG
3307 }
3308
3309 return ret;
3310}
3311
acab30b8
DHB
3312static bool postcopy_is_advised(void)
3313{
3314 PostcopyState ps = postcopy_state_get();
3315 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3316}
3317
3318static bool postcopy_is_running(void)
3319{
3320 PostcopyState ps = postcopy_state_get();
3321 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3322}
3323
e6f4aa18
ZC
3324/*
3325 * Flush content of RAM cache into SVM's memory.
3326 * Only flush the pages that be dirtied by PVM or SVM or both.
3327 */
3328static void colo_flush_ram_cache(void)
3329{
3330 RAMBlock *block = NULL;
3331 void *dst_host;
3332 void *src_host;
3333 unsigned long offset = 0;
3334
d1955d22 3335 memory_global_dirty_log_sync();
89ac5a1d
DDAG
3336 WITH_RCU_READ_LOCK_GUARD() {
3337 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3338 ramblock_sync_dirty_bitmap(ram_state, block);
3339 }
d1955d22 3340 }
d1955d22 3341
e6f4aa18 3342 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
89ac5a1d
DDAG
3343 WITH_RCU_READ_LOCK_GUARD() {
3344 block = QLIST_FIRST_RCU(&ram_list.blocks);
e6f4aa18 3345
89ac5a1d
DDAG
3346 while (block) {
3347 offset = migration_bitmap_find_dirty(ram_state, block, offset);
e6f4aa18 3348
8bba004c
AR
3349 if (((ram_addr_t)offset) << TARGET_PAGE_BITS
3350 >= block->used_length) {
89ac5a1d
DDAG
3351 offset = 0;
3352 block = QLIST_NEXT_RCU(block, next);
3353 } else {
3354 migration_bitmap_clear_dirty(ram_state, block, offset);
8bba004c
AR
3355 dst_host = block->host
3356 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
3357 src_host = block->colo_cache
3358 + (((ram_addr_t)offset) << TARGET_PAGE_BITS);
89ac5a1d
DDAG
3359 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
3360 }
e6f4aa18
ZC
3361 }
3362 }
e6f4aa18
ZC
3363 trace_colo_flush_ram_cache_end();
3364}
3365
10da4a36
WY
3366/**
3367 * ram_load_precopy: load pages in precopy case
3368 *
3369 * Returns 0 for success or -errno in case of error
3370 *
3371 * Called in precopy mode by ram_load().
3372 * rcu_read_lock is taken prior to this being called.
3373 *
3374 * @f: QEMUFile where to send the data
3375 */
3376static int ram_load_precopy(QEMUFile *f)
56e93d26 3377{
e65cec5e 3378 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
ef08fb38 3379 /* ADVISE is earlier, it shows the source has the postcopy capability on */
acab30b8 3380 bool postcopy_advised = postcopy_is_advised();
edc60127
JQ
3381 if (!migrate_use_compression()) {
3382 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3383 }
a7180877 3384
10da4a36 3385 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 3386 ram_addr_t addr, total_ram_bytes;
0393031a 3387 void *host = NULL, *host_bak = NULL;
56e93d26
JQ
3388 uint8_t ch;
3389
e65cec5e
YK
3390 /*
3391 * Yield periodically to let main loop run, but an iteration of
3392 * the main loop is expensive, so do it each some iterations
3393 */
3394 if ((i & 32767) == 0 && qemu_in_coroutine()) {
3395 aio_co_schedule(qemu_get_current_aio_context(),
3396 qemu_coroutine_self());
3397 qemu_coroutine_yield();
3398 }
3399 i++;
3400
56e93d26
JQ
3401 addr = qemu_get_be64(f);
3402 flags = addr & ~TARGET_PAGE_MASK;
3403 addr &= TARGET_PAGE_MASK;
3404
edc60127
JQ
3405 if (flags & invalid_flags) {
3406 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3407 error_report("Received an unexpected compressed page");
3408 }
3409
3410 ret = -EINVAL;
3411 break;
3412 }
3413
bb890ed5 3414 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 3415 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
3416 RAMBlock *block = ram_block_from_stream(f, flags);
3417
0393031a 3418 host = host_from_ram_block_offset(block, addr);
13af18f2 3419 /*
0393031a
HZ
3420 * After going into COLO stage, we should not load the page
3421 * into SVM's memory directly, we put them into colo_cache firstly.
3422 * NOTE: We need to keep a copy of SVM's ram in colo_cache.
3423 * Previously, we copied all these memory in preparing stage of COLO
3424 * while we need to stop VM, which is a time-consuming process.
3425 * Here we optimize it by a trick, back-up every page while in
3426 * migration process while COLO is enabled, though it affects the
3427 * speed of the migration, but it obviously reduce the downtime of
3428 * back-up all SVM'S memory in COLO preparing stage.
13af18f2 3429 */
0393031a
HZ
3430 if (migration_incoming_colo_enabled()) {
3431 if (migration_incoming_in_colo_state()) {
3432 /* In COLO stage, put all pages into cache temporarily */
8af66371 3433 host = colo_cache_from_block_offset(block, addr, true);
0393031a
HZ
3434 } else {
3435 /*
3436 * In migration stage but before COLO stage,
3437 * Put all pages into both cache and SVM's memory.
3438 */
8af66371 3439 host_bak = colo_cache_from_block_offset(block, addr, false);
0393031a 3440 }
13af18f2 3441 }
a776aa15
DDAG
3442 if (!host) {
3443 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3444 ret = -EINVAL;
3445 break;
3446 }
13af18f2
ZC
3447 if (!migration_incoming_in_colo_state()) {
3448 ramblock_recv_bitmap_set(block, host);
3449 }
3450
1db9d8e5 3451 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
3452 }
3453
56e93d26
JQ
3454 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3455 case RAM_SAVE_FLAG_MEM_SIZE:
3456 /* Synchronize RAM block list */
3457 total_ram_bytes = addr;
3458 while (!ret && total_ram_bytes) {
3459 RAMBlock *block;
56e93d26
JQ
3460 char id[256];
3461 ram_addr_t length;
3462
3463 len = qemu_get_byte(f);
3464 qemu_get_buffer(f, (uint8_t *)id, len);
3465 id[len] = 0;
3466 length = qemu_get_be64(f);
3467
e3dd7493 3468 block = qemu_ram_block_by_name(id);
b895de50
CLG
3469 if (block && !qemu_ram_is_migratable(block)) {
3470 error_report("block %s should not be migrated !", id);
3471 ret = -EINVAL;
3472 } else if (block) {
e3dd7493
DDAG
3473 if (length != block->used_length) {
3474 Error *local_err = NULL;
56e93d26 3475
fa53a0e5 3476 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
3477 &local_err);
3478 if (local_err) {
3479 error_report_err(local_err);
56e93d26 3480 }
56e93d26 3481 }
ef08fb38
DDAG
3482 /* For postcopy we need to check hugepage sizes match */
3483 if (postcopy_advised &&
3484 block->page_size != qemu_host_page_size) {
3485 uint64_t remote_page_size = qemu_get_be64(f);
3486 if (remote_page_size != block->page_size) {
3487 error_report("Mismatched RAM page size %s "
3488 "(local) %zd != %" PRId64,
3489 id, block->page_size,
3490 remote_page_size);
3491 ret = -EINVAL;
3492 }
3493 }
fbd162e6
YK
3494 if (migrate_ignore_shared()) {
3495 hwaddr addr = qemu_get_be64(f);
fbd162e6
YK
3496 if (ramblock_is_ignored(block) &&
3497 block->mr->addr != addr) {
3498 error_report("Mismatched GPAs for block %s "
3499 "%" PRId64 "!= %" PRId64,
3500 id, (uint64_t)addr,
3501 (uint64_t)block->mr->addr);
3502 ret = -EINVAL;
3503 }
3504 }
e3dd7493
DDAG
3505 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
3506 block->idstr);
3507 } else {
56e93d26
JQ
3508 error_report("Unknown ramblock \"%s\", cannot "
3509 "accept migration", id);
3510 ret = -EINVAL;
3511 }
3512
3513 total_ram_bytes -= length;
3514 }
3515 break;
a776aa15 3516
bb890ed5 3517 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
3518 ch = qemu_get_byte(f);
3519 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
3520 break;
a776aa15 3521
56e93d26 3522 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
3523 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
3524 break;
56e93d26 3525
a776aa15 3526 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
3527 len = qemu_get_be32(f);
3528 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3529 error_report("Invalid compressed data length: %d", len);
3530 ret = -EINVAL;
3531 break;
3532 }
c1bc6626 3533 decompress_data_with_multi_threads(f, host, len);
56e93d26 3534 break;
a776aa15 3535
56e93d26 3536 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
3537 if (load_xbzrle(f, addr, host) < 0) {
3538 error_report("Failed to decompress XBZRLE page at "
3539 RAM_ADDR_FMT, addr);
3540 ret = -EINVAL;
3541 break;
3542 }
3543 break;
3544 case RAM_SAVE_FLAG_EOS:
3545 /* normal exit */
6df264ac 3546 multifd_recv_sync_main();
56e93d26
JQ
3547 break;
3548 default:
3549 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 3550 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
3551 } else {
3552 error_report("Unknown combination of migration flags: %#x",
3553 flags);
3554 ret = -EINVAL;
3555 }
3556 }
3557 if (!ret) {
3558 ret = qemu_file_get_error(f);
3559 }
0393031a
HZ
3560 if (!ret && host_bak) {
3561 memcpy(host_bak, host, TARGET_PAGE_SIZE);
3562 }
56e93d26
JQ
3563 }
3564
ca1a6b70 3565 ret |= wait_for_decompress_done();
10da4a36
WY
3566 return ret;
3567}
3568
3569static int ram_load(QEMUFile *f, void *opaque, int version_id)
3570{
3571 int ret = 0;
3572 static uint64_t seq_iter;
3573 /*
3574 * If system is running in postcopy mode, page inserts to host memory must
3575 * be atomic
3576 */
3577 bool postcopy_running = postcopy_is_running();
3578
3579 seq_iter++;
3580
3581 if (version_id != 4) {
3582 return -EINVAL;
3583 }
3584
3585 /*
3586 * This RCU critical section can be very long running.
3587 * When RCU reclaims in the code start to become numerous,
3588 * it will be necessary to reduce the granularity of this
3589 * critical section.
3590 */
89ac5a1d
DDAG
3591 WITH_RCU_READ_LOCK_GUARD() {
3592 if (postcopy_running) {
3593 ret = ram_load_postcopy(f);
3594 } else {
3595 ret = ram_load_precopy(f);
3596 }
10da4a36 3597 }
55c4446b 3598 trace_ram_load_complete(ret, seq_iter);
e6f4aa18
ZC
3599
3600 if (!ret && migration_incoming_in_colo_state()) {
3601 colo_flush_ram_cache();
3602 }
56e93d26
JQ
3603 return ret;
3604}
3605
c6467627
VSO
3606static bool ram_has_postcopy(void *opaque)
3607{
469dd51b 3608 RAMBlock *rb;
fbd162e6 3609 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
469dd51b
JH
3610 if (ramblock_is_pmem(rb)) {
3611 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
3612 "is not supported now!", rb->idstr, rb->host);
3613 return false;
3614 }
3615 }
3616
c6467627
VSO
3617 return migrate_postcopy_ram();
3618}
3619
edd090c7
PX
3620/* Sync all the dirty bitmap with destination VM. */
3621static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
3622{
3623 RAMBlock *block;
3624 QEMUFile *file = s->to_dst_file;
3625 int ramblock_count = 0;
3626
3627 trace_ram_dirty_bitmap_sync_start();
3628
fbd162e6 3629 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
edd090c7
PX
3630 qemu_savevm_send_recv_bitmap(file, block->idstr);
3631 trace_ram_dirty_bitmap_request(block->idstr);
3632 ramblock_count++;
3633 }
3634
3635 trace_ram_dirty_bitmap_sync_wait();
3636
3637 /* Wait until all the ramblocks' dirty bitmap synced */
3638 while (ramblock_count--) {
3639 qemu_sem_wait(&s->rp_state.rp_sem);
3640 }
3641
3642 trace_ram_dirty_bitmap_sync_complete();
3643
3644 return 0;
3645}
3646
3647static void ram_dirty_bitmap_reload_notify(MigrationState *s)
3648{
3649 qemu_sem_post(&s->rp_state.rp_sem);
3650}
3651
a335debb
PX
3652/*
3653 * Read the received bitmap, revert it as the initial dirty bitmap.
3654 * This is only used when the postcopy migration is paused but wants
3655 * to resume from a middle point.
3656 */
3657int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
3658{
3659 int ret = -EINVAL;
3660 QEMUFile *file = s->rp_state.from_dst_file;
3661 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 3662 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
3663 uint64_t size, end_mark;
3664
3665 trace_ram_dirty_bitmap_reload_begin(block->idstr);
3666
3667 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
3668 error_report("%s: incorrect state %s", __func__,
3669 MigrationStatus_str(s->state));
3670 return -EINVAL;
3671 }
3672
3673 /*
3674 * Note: see comments in ramblock_recv_bitmap_send() on why we
3675 * need the endianess convertion, and the paddings.
3676 */
3677 local_size = ROUND_UP(local_size, 8);
3678
3679 /* Add paddings */
3680 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
3681
3682 size = qemu_get_be64(file);
3683
3684 /* The size of the bitmap should match with our ramblock */
3685 if (size != local_size) {
3686 error_report("%s: ramblock '%s' bitmap size mismatch "
3687 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
3688 block->idstr, size, local_size);
3689 ret = -EINVAL;
3690 goto out;
3691 }
3692
3693 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
3694 end_mark = qemu_get_be64(file);
3695
3696 ret = qemu_file_get_error(file);
3697 if (ret || size != local_size) {
3698 error_report("%s: read bitmap failed for ramblock '%s': %d"
3699 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
3700 __func__, block->idstr, ret, local_size, size);
3701 ret = -EIO;
3702 goto out;
3703 }
3704
3705 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
3706 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
3707 __func__, block->idstr, end_mark);
3708 ret = -EINVAL;
3709 goto out;
3710 }
3711
3712 /*
3713 * Endianess convertion. We are during postcopy (though paused).
3714 * The dirty bitmap won't change. We can directly modify it.
3715 */
3716 bitmap_from_le(block->bmap, le_bitmap, nbits);
3717
3718 /*
3719 * What we received is "received bitmap". Revert it as the initial
3720 * dirty bitmap for this ramblock.
3721 */
3722 bitmap_complement(block->bmap, block->bmap, nbits);
3723
3724 trace_ram_dirty_bitmap_reload_complete(block->idstr);
3725
edd090c7
PX
3726 /*
3727 * We succeeded to sync bitmap for current ramblock. If this is
3728 * the last one to sync, we need to notify the main send thread.
3729 */
3730 ram_dirty_bitmap_reload_notify(s);
3731
a335debb
PX
3732 ret = 0;
3733out:
bf269906 3734 g_free(le_bitmap);
a335debb
PX
3735 return ret;
3736}
3737
edd090c7
PX
3738static int ram_resume_prepare(MigrationState *s, void *opaque)
3739{
3740 RAMState *rs = *(RAMState **)opaque;
08614f34 3741 int ret;
edd090c7 3742
08614f34
PX
3743 ret = ram_dirty_bitmap_sync_all(s, rs);
3744 if (ret) {
3745 return ret;
3746 }
3747
3748 ram_state_resume_prepare(rs, s->to_dst_file);
3749
3750 return 0;
edd090c7
PX
3751}
3752
56e93d26 3753static SaveVMHandlers savevm_ram_handlers = {
9907e842 3754 .save_setup = ram_save_setup,
56e93d26 3755 .save_live_iterate = ram_save_iterate,
763c906b 3756 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 3757 .save_live_complete_precopy = ram_save_complete,
c6467627 3758 .has_postcopy = ram_has_postcopy,
56e93d26
JQ
3759 .save_live_pending = ram_save_pending,
3760 .load_state = ram_load,
f265e0e4
JQ
3761 .save_cleanup = ram_save_cleanup,
3762 .load_setup = ram_load_setup,
3763 .load_cleanup = ram_load_cleanup,
edd090c7 3764 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
3765};
3766
3767void ram_mig_init(void)
3768{
3769 qemu_mutex_init(&XBZRLE.lock);
ce62df53 3770 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 3771}
This page took 0.95387 seconds and 4 git commands to generate.