]> Git Repo - qemu.git/blame - migration/ram.c
hw/rdma: Delete unused headers inclusion
[qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <[email protected]>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
33c11879 30#include "cpu.h"
56e93d26 31#include <zlib.h>
f348b6d1 32#include "qemu/cutils.h"
56e93d26
JQ
33#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
7205c9ec 35#include "qemu/main-loop.h"
56eb90af 36#include "qemu/pmem.h"
709e3fe8 37#include "xbzrle.h"
7b1e1a22 38#include "ram.h"
6666c96a 39#include "migration.h"
71bb07db 40#include "socket.h"
f2a8f0a6 41#include "migration/register.h"
7b1e1a22 42#include "migration/misc.h"
08a0aee1 43#include "qemu-file.h"
be07b0ac 44#include "postcopy-ram.h"
53d37d36 45#include "page_cache.h"
56e93d26 46#include "qemu/error-report.h"
e688df6b 47#include "qapi/error.h"
9af23989 48#include "qapi/qapi-events-migration.h"
8acabf69 49#include "qapi/qmp/qerror.h"
56e93d26 50#include "trace.h"
56e93d26 51#include "exec/ram_addr.h"
f9494614 52#include "exec/target_page.h"
56e93d26 53#include "qemu/rcu_queue.h"
a91246c9 54#include "migration/colo.h"
53d37d36 55#include "block.h"
af8b7d2b
JQ
56#include "sysemu/sysemu.h"
57#include "qemu/uuid.h"
edd090c7 58#include "savevm.h"
b9ee2f7d 59#include "qemu/iov.h"
56e93d26 60
56e93d26
JQ
61/***********************************************************/
62/* ram save/restore */
63
bb890ed5
JQ
64/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 */
69
56e93d26 70#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 71#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
72#define RAM_SAVE_FLAG_MEM_SIZE 0x04
73#define RAM_SAVE_FLAG_PAGE 0x08
74#define RAM_SAVE_FLAG_EOS 0x10
75#define RAM_SAVE_FLAG_CONTINUE 0x20
76#define RAM_SAVE_FLAG_XBZRLE 0x40
77/* 0x80 is reserved in migration.h start with 0x100 next */
78#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
79
56e93d26
JQ
80static inline bool is_zero_range(uint8_t *p, uint64_t size)
81{
a1febc49 82 return buffer_is_zero(p, size);
56e93d26
JQ
83}
84
9360447d
JQ
85XBZRLECacheStats xbzrle_counters;
86
56e93d26
JQ
87/* struct contains XBZRLE cache and a static page
88 used by the compression */
89static struct {
90 /* buffer used for XBZRLE encoding */
91 uint8_t *encoded_buf;
92 /* buffer for storing page content */
93 uint8_t *current_buf;
94 /* Cache for XBZRLE, Protected by lock. */
95 PageCache *cache;
96 QemuMutex lock;
c00e0928
JQ
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page;
f265e0e4
JQ
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf;
56e93d26
JQ
101} XBZRLE;
102
56e93d26
JQ
103static void XBZRLE_cache_lock(void)
104{
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE.lock);
107}
108
109static void XBZRLE_cache_unlock(void)
110{
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE.lock);
113}
114
3d0684b2
JQ
115/**
116 * xbzrle_cache_resize: resize the xbzrle cache
117 *
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
122 *
c9dede2d 123 * Returns 0 for success or -1 for error
3d0684b2
JQ
124 *
125 * @new_size: new cache size
8acabf69 126 * @errp: set *errp if the check failed, with reason
56e93d26 127 */
c9dede2d 128int xbzrle_cache_resize(int64_t new_size, Error **errp)
56e93d26
JQ
129{
130 PageCache *new_cache;
c9dede2d 131 int64_t ret = 0;
56e93d26 132
8acabf69
JQ
133 /* Check for truncation */
134 if (new_size != (size_t)new_size) {
135 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136 "exceeding address space");
137 return -1;
138 }
139
2a313e5c
JQ
140 if (new_size == migrate_xbzrle_cache_size()) {
141 /* nothing to do */
c9dede2d 142 return 0;
2a313e5c
JQ
143 }
144
56e93d26
JQ
145 XBZRLE_cache_lock();
146
147 if (XBZRLE.cache != NULL) {
80f8dfde 148 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 149 if (!new_cache) {
56e93d26
JQ
150 ret = -1;
151 goto out;
152 }
153
154 cache_fini(XBZRLE.cache);
155 XBZRLE.cache = new_cache;
156 }
56e93d26
JQ
157out:
158 XBZRLE_cache_unlock();
159 return ret;
160}
161
fbd162e6
YK
162static bool ramblock_is_ignored(RAMBlock *block)
163{
164 return !qemu_ram_is_migratable(block) ||
165 (migrate_ignore_shared() && qemu_ram_is_shared(block));
166}
167
b895de50 168/* Should be holding either ram_list.mutex, or the RCU lock. */
fbd162e6
YK
169#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
170 INTERNAL_RAMBLOCK_FOREACH(block) \
171 if (ramblock_is_ignored(block)) {} else
172
b895de50 173#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
343f632c 174 INTERNAL_RAMBLOCK_FOREACH(block) \
b895de50
CLG
175 if (!qemu_ram_is_migratable(block)) {} else
176
343f632c
DDAG
177#undef RAMBLOCK_FOREACH
178
fbd162e6
YK
179int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180{
181 RAMBlock *block;
182 int ret = 0;
183
184 rcu_read_lock();
185 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
186 ret = func(block, opaque);
187 if (ret) {
188 break;
189 }
190 }
191 rcu_read_unlock();
192 return ret;
193}
194
f9494614
AP
195static void ramblock_recv_map_init(void)
196{
197 RAMBlock *rb;
198
fbd162e6 199 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
200 assert(!rb->receivedmap);
201 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202 }
203}
204
205int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206{
207 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208 rb->receivedmap);
209}
210
1cba9f6e
DDAG
211bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212{
213 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214}
215
f9494614
AP
216void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217{
218 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219}
220
221void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222 size_t nr)
223{
224 bitmap_set_atomic(rb->receivedmap,
225 ramblock_recv_bitmap_offset(host_addr, rb),
226 nr);
227}
228
a335debb
PX
229#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
230
231/*
232 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233 *
234 * Returns >0 if success with sent bytes, or <0 if error.
235 */
236int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237 const char *block_name)
238{
239 RAMBlock *block = qemu_ram_block_by_name(block_name);
240 unsigned long *le_bitmap, nbits;
241 uint64_t size;
242
243 if (!block) {
244 error_report("%s: invalid block name: %s", __func__, block_name);
245 return -1;
246 }
247
248 nbits = block->used_length >> TARGET_PAGE_BITS;
249
250 /*
251 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252 * machines we may need 4 more bytes for padding (see below
253 * comment). So extend it a bit before hand.
254 */
255 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256
257 /*
258 * Always use little endian when sending the bitmap. This is
259 * required that when source and destination VMs are not using the
260 * same endianess. (Note: big endian won't work.)
261 */
262 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263
264 /* Size of the bitmap, in bytes */
a725ef9f 265 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
266
267 /*
268 * size is always aligned to 8 bytes for 64bit machines, but it
269 * may not be true for 32bit machines. We need this padding to
270 * make sure the migration can survive even between 32bit and
271 * 64bit machines.
272 */
273 size = ROUND_UP(size, 8);
274
275 qemu_put_be64(file, size);
276 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277 /*
278 * Mark as an end, in case the middle part is screwed up due to
279 * some "misterious" reason.
280 */
281 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282 qemu_fflush(file);
283
bf269906 284 g_free(le_bitmap);
a335debb
PX
285
286 if (qemu_file_get_error(file)) {
287 return qemu_file_get_error(file);
288 }
289
290 return size + sizeof(size);
291}
292
ec481c6c
JQ
293/*
294 * An outstanding page request, on the source, having been received
295 * and queued
296 */
297struct RAMSrcPageRequest {
298 RAMBlock *rb;
299 hwaddr offset;
300 hwaddr len;
301
302 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303};
304
6f37bb8b
JQ
305/* State of RAM for migration */
306struct RAMState {
204b88b8
JQ
307 /* QEMUFile used for this migration */
308 QEMUFile *f;
6f37bb8b
JQ
309 /* Last block that we have visited searching for dirty pages */
310 RAMBlock *last_seen_block;
311 /* Last block from where we have sent data */
312 RAMBlock *last_sent_block;
269ace29
JQ
313 /* Last dirty target page we have sent */
314 ram_addr_t last_page;
6f37bb8b
JQ
315 /* last ram version we have seen */
316 uint32_t last_version;
317 /* We are in the first round */
318 bool ram_bulk_stage;
6eeb63f7
WW
319 /* The free page optimization is enabled */
320 bool fpo_enabled;
8d820d6f
JQ
321 /* How many times we have dirty too many pages */
322 int dirty_rate_high_cnt;
f664da80
JQ
323 /* these variables are used for bitmap sync */
324 /* last time we did a full bitmap_sync */
325 int64_t time_last_bitmap_sync;
eac74159 326 /* bytes transferred at start_time */
c4bdf0cf 327 uint64_t bytes_xfer_prev;
a66cd90c 328 /* number of dirty pages since start_time */
68908ed6 329 uint64_t num_dirty_pages_period;
b5833fde
JQ
330 /* xbzrle misses since the beginning of the period */
331 uint64_t xbzrle_cache_miss_prev;
76e03000
XG
332
333 /* compression statistics since the beginning of the period */
334 /* amount of count that no free thread to compress data */
335 uint64_t compress_thread_busy_prev;
336 /* amount bytes after compression */
337 uint64_t compressed_size_prev;
338 /* amount of compressed pages */
339 uint64_t compress_pages_prev;
340
be8b02ed
XG
341 /* total handled target pages at the beginning of period */
342 uint64_t target_page_count_prev;
343 /* total handled target pages since start */
344 uint64_t target_page_count;
9360447d 345 /* number of dirty bits in the bitmap */
2dfaf12e 346 uint64_t migration_dirty_pages;
386a907b 347 /* Protects modification of the bitmap and migration dirty pages */
108cfae0 348 QemuMutex bitmap_mutex;
68a098f3
JQ
349 /* The RAMBlock used in the last src_page_requests */
350 RAMBlock *last_req_rb;
ec481c6c
JQ
351 /* Queue of outstanding page requests from the destination */
352 QemuMutex src_page_req_mutex;
b58deb34 353 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
354};
355typedef struct RAMState RAMState;
356
53518d94 357static RAMState *ram_state;
6f37bb8b 358
bd227060
WW
359static NotifierWithReturnList precopy_notifier_list;
360
361void precopy_infrastructure_init(void)
362{
363 notifier_with_return_list_init(&precopy_notifier_list);
364}
365
366void precopy_add_notifier(NotifierWithReturn *n)
367{
368 notifier_with_return_list_add(&precopy_notifier_list, n);
369}
370
371void precopy_remove_notifier(NotifierWithReturn *n)
372{
373 notifier_with_return_remove(n);
374}
375
376int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377{
378 PrecopyNotifyData pnd;
379 pnd.reason = reason;
380 pnd.errp = errp;
381
382 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383}
384
6eeb63f7
WW
385void precopy_enable_free_page_optimization(void)
386{
387 if (!ram_state) {
388 return;
389 }
390
391 ram_state->fpo_enabled = true;
392}
393
9edabd4d 394uint64_t ram_bytes_remaining(void)
2f4fde93 395{
bae416e5
DDAG
396 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397 0;
2f4fde93
JQ
398}
399
9360447d 400MigrationStats ram_counters;
96506894 401
b8fb8cb7
DDAG
402/* used by the search for pages to send */
403struct PageSearchStatus {
404 /* Current block being searched */
405 RAMBlock *block;
a935e30f
JQ
406 /* Current page to search from */
407 unsigned long page;
b8fb8cb7
DDAG
408 /* Set once we wrap around */
409 bool complete_round;
410};
411typedef struct PageSearchStatus PageSearchStatus;
412
76e03000
XG
413CompressionStats compression_counters;
414
56e93d26 415struct CompressParam {
56e93d26 416 bool done;
90e56fb4 417 bool quit;
5e5fdcff 418 bool zero_page;
56e93d26
JQ
419 QEMUFile *file;
420 QemuMutex mutex;
421 QemuCond cond;
422 RAMBlock *block;
423 ram_addr_t offset;
34ab9e97
XG
424
425 /* internally used fields */
dcaf446e 426 z_stream stream;
34ab9e97 427 uint8_t *originbuf;
56e93d26
JQ
428};
429typedef struct CompressParam CompressParam;
430
431struct DecompressParam {
73a8912b 432 bool done;
90e56fb4 433 bool quit;
56e93d26
JQ
434 QemuMutex mutex;
435 QemuCond cond;
436 void *des;
d341d9f3 437 uint8_t *compbuf;
56e93d26 438 int len;
797ca154 439 z_stream stream;
56e93d26
JQ
440};
441typedef struct DecompressParam DecompressParam;
442
443static CompressParam *comp_param;
444static QemuThread *compress_threads;
445/* comp_done_cond is used to wake up the migration thread when
446 * one of the compression threads has finished the compression.
447 * comp_done_lock is used to co-work with comp_done_cond.
448 */
0d9f9a5c
LL
449static QemuMutex comp_done_lock;
450static QemuCond comp_done_cond;
56e93d26
JQ
451/* The empty QEMUFileOps will be used by file in CompressParam */
452static const QEMUFileOps empty_ops = { };
453
34ab9e97 454static QEMUFile *decomp_file;
56e93d26
JQ
455static DecompressParam *decomp_param;
456static QemuThread *decompress_threads;
73a8912b
LL
457static QemuMutex decomp_done_lock;
458static QemuCond decomp_done_cond;
56e93d26 459
5e5fdcff 460static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 461 ram_addr_t offset, uint8_t *source_buf);
56e93d26
JQ
462
463static void *do_data_compress(void *opaque)
464{
465 CompressParam *param = opaque;
a7a9a88f
LL
466 RAMBlock *block;
467 ram_addr_t offset;
5e5fdcff 468 bool zero_page;
56e93d26 469
a7a9a88f 470 qemu_mutex_lock(&param->mutex);
90e56fb4 471 while (!param->quit) {
a7a9a88f
LL
472 if (param->block) {
473 block = param->block;
474 offset = param->offset;
475 param->block = NULL;
476 qemu_mutex_unlock(&param->mutex);
477
5e5fdcff
XG
478 zero_page = do_compress_ram_page(param->file, &param->stream,
479 block, offset, param->originbuf);
a7a9a88f 480
0d9f9a5c 481 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 482 param->done = true;
5e5fdcff 483 param->zero_page = zero_page;
0d9f9a5c
LL
484 qemu_cond_signal(&comp_done_cond);
485 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
486
487 qemu_mutex_lock(&param->mutex);
488 } else {
56e93d26
JQ
489 qemu_cond_wait(&param->cond, &param->mutex);
490 }
56e93d26 491 }
a7a9a88f 492 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
493
494 return NULL;
495}
496
f0afa331 497static void compress_threads_save_cleanup(void)
56e93d26
JQ
498{
499 int i, thread_count;
500
05306935 501 if (!migrate_use_compression() || !comp_param) {
56e93d26
JQ
502 return;
503 }
05306935 504
56e93d26
JQ
505 thread_count = migrate_compress_threads();
506 for (i = 0; i < thread_count; i++) {
dcaf446e
XG
507 /*
508 * we use it as a indicator which shows if the thread is
509 * properly init'd or not
510 */
511 if (!comp_param[i].file) {
512 break;
513 }
05306935
FL
514
515 qemu_mutex_lock(&comp_param[i].mutex);
516 comp_param[i].quit = true;
517 qemu_cond_signal(&comp_param[i].cond);
518 qemu_mutex_unlock(&comp_param[i].mutex);
519
56e93d26 520 qemu_thread_join(compress_threads + i);
56e93d26
JQ
521 qemu_mutex_destroy(&comp_param[i].mutex);
522 qemu_cond_destroy(&comp_param[i].cond);
dcaf446e 523 deflateEnd(&comp_param[i].stream);
34ab9e97 524 g_free(comp_param[i].originbuf);
dcaf446e
XG
525 qemu_fclose(comp_param[i].file);
526 comp_param[i].file = NULL;
56e93d26 527 }
0d9f9a5c
LL
528 qemu_mutex_destroy(&comp_done_lock);
529 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
530 g_free(compress_threads);
531 g_free(comp_param);
56e93d26
JQ
532 compress_threads = NULL;
533 comp_param = NULL;
56e93d26
JQ
534}
535
dcaf446e 536static int compress_threads_save_setup(void)
56e93d26
JQ
537{
538 int i, thread_count;
539
540 if (!migrate_use_compression()) {
dcaf446e 541 return 0;
56e93d26 542 }
56e93d26
JQ
543 thread_count = migrate_compress_threads();
544 compress_threads = g_new0(QemuThread, thread_count);
545 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
546 qemu_cond_init(&comp_done_cond);
547 qemu_mutex_init(&comp_done_lock);
56e93d26 548 for (i = 0; i < thread_count; i++) {
34ab9e97
XG
549 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550 if (!comp_param[i].originbuf) {
551 goto exit;
552 }
553
dcaf446e
XG
554 if (deflateInit(&comp_param[i].stream,
555 migrate_compress_level()) != Z_OK) {
34ab9e97 556 g_free(comp_param[i].originbuf);
dcaf446e
XG
557 goto exit;
558 }
559
e110aa91
C
560 /* comp_param[i].file is just used as a dummy buffer to save data,
561 * set its ops to empty.
56e93d26
JQ
562 */
563 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564 comp_param[i].done = true;
90e56fb4 565 comp_param[i].quit = false;
56e93d26
JQ
566 qemu_mutex_init(&comp_param[i].mutex);
567 qemu_cond_init(&comp_param[i].cond);
568 qemu_thread_create(compress_threads + i, "compress",
569 do_data_compress, comp_param + i,
570 QEMU_THREAD_JOINABLE);
571 }
dcaf446e
XG
572 return 0;
573
574exit:
575 compress_threads_save_cleanup();
576 return -1;
56e93d26
JQ
577}
578
f986c3d2
JQ
579/* Multiple fd's */
580
af8b7d2b
JQ
581#define MULTIFD_MAGIC 0x11223344U
582#define MULTIFD_VERSION 1
583
6df264ac
JQ
584#define MULTIFD_FLAG_SYNC (1 << 0)
585
efd1a1d6 586/* This value needs to be a multiple of qemu_target_page_size() */
4b0c7264 587#define MULTIFD_PACKET_SIZE (512 * 1024)
efd1a1d6 588
af8b7d2b
JQ
589typedef struct {
590 uint32_t magic;
591 uint32_t version;
592 unsigned char uuid[16]; /* QemuUUID */
593 uint8_t id;
5fbd8b4b
JQ
594 uint8_t unused1[7]; /* Reserved for future use */
595 uint64_t unused2[4]; /* Reserved for future use */
af8b7d2b
JQ
596} __attribute__((packed)) MultiFDInit_t;
597
2a26c979
JQ
598typedef struct {
599 uint32_t magic;
600 uint32_t version;
601 uint32_t flags;
6f862692
JQ
602 /* maximum number of allocated pages */
603 uint32_t pages_alloc;
604 uint32_t pages_used;
2a34ee59
JQ
605 /* size of the next packet that contains pages */
606 uint32_t next_packet_size;
2a26c979 607 uint64_t packet_num;
5fbd8b4b 608 uint64_t unused[4]; /* Reserved for future use */
2a26c979
JQ
609 char ramblock[256];
610 uint64_t offset[];
611} __attribute__((packed)) MultiFDPacket_t;
612
34c55a94
JQ
613typedef struct {
614 /* number of used pages */
615 uint32_t used;
616 /* number of allocated pages */
617 uint32_t allocated;
618 /* global number of generated multifd packets */
619 uint64_t packet_num;
620 /* offset of each page */
621 ram_addr_t *offset;
622 /* pointer to each page */
623 struct iovec *iov;
624 RAMBlock *block;
625} MultiFDPages_t;
626
8c4598f2
JQ
627typedef struct {
628 /* this fields are not changed once the thread is created */
629 /* channel number */
f986c3d2 630 uint8_t id;
8c4598f2 631 /* channel thread name */
f986c3d2 632 char *name;
8c4598f2 633 /* channel thread id */
f986c3d2 634 QemuThread thread;
8c4598f2 635 /* communication channel */
60df2d4a 636 QIOChannel *c;
8c4598f2 637 /* sem where to wait for more work */
f986c3d2 638 QemuSemaphore sem;
8c4598f2 639 /* this mutex protects the following parameters */
f986c3d2 640 QemuMutex mutex;
8c4598f2 641 /* is this channel thread running */
66770707 642 bool running;
8c4598f2 643 /* should this thread finish */
f986c3d2 644 bool quit;
0beb5ed3
JQ
645 /* thread has work to do */
646 int pending_job;
34c55a94
JQ
647 /* array of pages to sent */
648 MultiFDPages_t *pages;
2a26c979
JQ
649 /* packet allocated len */
650 uint32_t packet_len;
651 /* pointer to the packet */
652 MultiFDPacket_t *packet;
653 /* multifd flags for each packet */
654 uint32_t flags;
2a34ee59
JQ
655 /* size of the next packet that contains pages */
656 uint32_t next_packet_size;
2a26c979
JQ
657 /* global number of generated multifd packets */
658 uint64_t packet_num;
408ea6ae
JQ
659 /* thread local variables */
660 /* packets sent through this channel */
661 uint64_t num_packets;
662 /* pages sent through this channel */
663 uint64_t num_pages;
6df264ac
JQ
664 /* syncs main thread and channels */
665 QemuSemaphore sem_sync;
8c4598f2
JQ
666} MultiFDSendParams;
667
668typedef struct {
669 /* this fields are not changed once the thread is created */
670 /* channel number */
671 uint8_t id;
672 /* channel thread name */
673 char *name;
674 /* channel thread id */
675 QemuThread thread;
676 /* communication channel */
677 QIOChannel *c;
8c4598f2
JQ
678 /* this mutex protects the following parameters */
679 QemuMutex mutex;
680 /* is this channel thread running */
681 bool running;
34c55a94
JQ
682 /* array of pages to receive */
683 MultiFDPages_t *pages;
2a26c979
JQ
684 /* packet allocated len */
685 uint32_t packet_len;
686 /* pointer to the packet */
687 MultiFDPacket_t *packet;
688 /* multifd flags for each packet */
689 uint32_t flags;
690 /* global number of generated multifd packets */
691 uint64_t packet_num;
408ea6ae 692 /* thread local variables */
2a34ee59
JQ
693 /* size of the next packet that contains pages */
694 uint32_t next_packet_size;
408ea6ae
JQ
695 /* packets sent through this channel */
696 uint64_t num_packets;
697 /* pages sent through this channel */
698 uint64_t num_pages;
6df264ac
JQ
699 /* syncs main thread and channels */
700 QemuSemaphore sem_sync;
8c4598f2 701} MultiFDRecvParams;
f986c3d2 702
af8b7d2b
JQ
703static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
704{
705 MultiFDInit_t msg;
706 int ret;
707
708 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
709 msg.version = cpu_to_be32(MULTIFD_VERSION);
710 msg.id = p->id;
711 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
712
713 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
714 if (ret != 0) {
715 return -1;
716 }
717 return 0;
718}
719
720static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
721{
722 MultiFDInit_t msg;
723 int ret;
724
725 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
726 if (ret != 0) {
727 return -1;
728 }
729
341ba0df
PM
730 msg.magic = be32_to_cpu(msg.magic);
731 msg.version = be32_to_cpu(msg.version);
af8b7d2b
JQ
732
733 if (msg.magic != MULTIFD_MAGIC) {
734 error_setg(errp, "multifd: received packet magic %x "
735 "expected %x", msg.magic, MULTIFD_MAGIC);
736 return -1;
737 }
738
739 if (msg.version != MULTIFD_VERSION) {
740 error_setg(errp, "multifd: received packet version %d "
741 "expected %d", msg.version, MULTIFD_VERSION);
742 return -1;
743 }
744
745 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
746 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
747 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
748
749 error_setg(errp, "multifd: received uuid '%s' and expected "
750 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
751 g_free(uuid);
752 g_free(msg_uuid);
753 return -1;
754 }
755
756 if (msg.id > migrate_multifd_channels()) {
757 error_setg(errp, "multifd: received channel version %d "
758 "expected %d", msg.version, MULTIFD_VERSION);
759 return -1;
760 }
761
762 return msg.id;
763}
764
34c55a94
JQ
765static MultiFDPages_t *multifd_pages_init(size_t size)
766{
767 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
768
769 pages->allocated = size;
770 pages->iov = g_new0(struct iovec, size);
771 pages->offset = g_new0(ram_addr_t, size);
772
773 return pages;
774}
775
776static void multifd_pages_clear(MultiFDPages_t *pages)
777{
778 pages->used = 0;
779 pages->allocated = 0;
780 pages->packet_num = 0;
781 pages->block = NULL;
782 g_free(pages->iov);
783 pages->iov = NULL;
784 g_free(pages->offset);
785 pages->offset = NULL;
786 g_free(pages);
787}
788
2a26c979
JQ
789static void multifd_send_fill_packet(MultiFDSendParams *p)
790{
791 MultiFDPacket_t *packet = p->packet;
7ed379b2 792 uint32_t page_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
2a26c979
JQ
793 int i;
794
795 packet->magic = cpu_to_be32(MULTIFD_MAGIC);
796 packet->version = cpu_to_be32(MULTIFD_VERSION);
797 packet->flags = cpu_to_be32(p->flags);
7ed379b2 798 packet->pages_alloc = cpu_to_be32(page_max);
6f862692 799 packet->pages_used = cpu_to_be32(p->pages->used);
2a34ee59 800 packet->next_packet_size = cpu_to_be32(p->next_packet_size);
2a26c979
JQ
801 packet->packet_num = cpu_to_be64(p->packet_num);
802
803 if (p->pages->block) {
804 strncpy(packet->ramblock, p->pages->block->idstr, 256);
805 }
806
807 for (i = 0; i < p->pages->used; i++) {
808 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
809 }
810}
811
812static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
813{
814 MultiFDPacket_t *packet = p->packet;
7ed379b2 815 uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
2a26c979
JQ
816 RAMBlock *block;
817 int i;
818
341ba0df 819 packet->magic = be32_to_cpu(packet->magic);
2a26c979
JQ
820 if (packet->magic != MULTIFD_MAGIC) {
821 error_setg(errp, "multifd: received packet "
822 "magic %x and expected magic %x",
823 packet->magic, MULTIFD_MAGIC);
824 return -1;
825 }
826
341ba0df 827 packet->version = be32_to_cpu(packet->version);
2a26c979
JQ
828 if (packet->version != MULTIFD_VERSION) {
829 error_setg(errp, "multifd: received packet "
830 "version %d and expected version %d",
831 packet->version, MULTIFD_VERSION);
832 return -1;
833 }
834
835 p->flags = be32_to_cpu(packet->flags);
836
6f862692 837 packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
7ed379b2
JQ
838 /*
839 * If we recevied a packet that is 100 times bigger than expected
840 * just stop migration. It is a magic number.
841 */
842 if (packet->pages_alloc > pages_max * 100) {
2a26c979 843 error_setg(errp, "multifd: received packet "
7ed379b2
JQ
844 "with size %d and expected a maximum size of %d",
845 packet->pages_alloc, pages_max * 100) ;
2a26c979
JQ
846 return -1;
847 }
7ed379b2
JQ
848 /*
849 * We received a packet that is bigger than expected but inside
850 * reasonable limits (see previous comment). Just reallocate.
851 */
852 if (packet->pages_alloc > p->pages->allocated) {
853 multifd_pages_clear(p->pages);
f151f8ac 854 p->pages = multifd_pages_init(packet->pages_alloc);
7ed379b2 855 }
2a26c979 856
6f862692
JQ
857 p->pages->used = be32_to_cpu(packet->pages_used);
858 if (p->pages->used > packet->pages_alloc) {
2a26c979 859 error_setg(errp, "multifd: received packet "
6f862692
JQ
860 "with %d pages and expected maximum pages are %d",
861 p->pages->used, packet->pages_alloc) ;
2a26c979
JQ
862 return -1;
863 }
864
2a34ee59 865 p->next_packet_size = be32_to_cpu(packet->next_packet_size);
2a26c979
JQ
866 p->packet_num = be64_to_cpu(packet->packet_num);
867
868 if (p->pages->used) {
869 /* make sure that ramblock is 0 terminated */
870 packet->ramblock[255] = 0;
871 block = qemu_ram_block_by_name(packet->ramblock);
872 if (!block) {
873 error_setg(errp, "multifd: unknown ram block %s",
874 packet->ramblock);
875 return -1;
876 }
877 }
878
879 for (i = 0; i < p->pages->used; i++) {
880 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
881
882 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
883 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
884 " (max " RAM_ADDR_FMT ")",
885 offset, block->max_length);
886 return -1;
887 }
888 p->pages->iov[i].iov_base = block->host + offset;
889 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
890 }
891
892 return 0;
893}
894
f986c3d2
JQ
895struct {
896 MultiFDSendParams *params;
897 /* number of created threads */
898 int count;
34c55a94
JQ
899 /* array of pages to sent */
900 MultiFDPages_t *pages;
6df264ac
JQ
901 /* syncs main thread and channels */
902 QemuSemaphore sem_sync;
903 /* global number of generated multifd packets */
904 uint64_t packet_num;
b9ee2f7d
JQ
905 /* send channels ready */
906 QemuSemaphore channels_ready;
f986c3d2
JQ
907} *multifd_send_state;
908
b9ee2f7d
JQ
909/*
910 * How we use multifd_send_state->pages and channel->pages?
911 *
912 * We create a pages for each channel, and a main one. Each time that
913 * we need to send a batch of pages we interchange the ones between
914 * multifd_send_state and the channel that is sending it. There are
915 * two reasons for that:
916 * - to not have to do so many mallocs during migration
917 * - to make easier to know what to free at the end of migration
918 *
919 * This way we always know who is the owner of each "pages" struct,
a5f7b1a6 920 * and we don't need any locking. It belongs to the migration thread
b9ee2f7d
JQ
921 * or to the channel thread. Switching is safe because the migration
922 * thread is using the channel mutex when changing it, and the channel
923 * have to had finish with its own, otherwise pending_job can't be
924 * false.
925 */
926
927static void multifd_send_pages(void)
928{
929 int i;
930 static int next_channel;
931 MultiFDSendParams *p = NULL; /* make happy gcc */
932 MultiFDPages_t *pages = multifd_send_state->pages;
933 uint64_t transferred;
934
935 qemu_sem_wait(&multifd_send_state->channels_ready);
936 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
937 p = &multifd_send_state->params[i];
938
939 qemu_mutex_lock(&p->mutex);
940 if (!p->pending_job) {
941 p->pending_job++;
942 next_channel = (i + 1) % migrate_multifd_channels();
943 break;
944 }
945 qemu_mutex_unlock(&p->mutex);
946 }
947 p->pages->used = 0;
948
949 p->packet_num = multifd_send_state->packet_num++;
950 p->pages->block = NULL;
951 multifd_send_state->pages = p->pages;
952 p->pages = pages;
4fcefd44 953 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
b9ee2f7d
JQ
954 ram_counters.multifd_bytes += transferred;
955 ram_counters.transferred += transferred;;
956 qemu_mutex_unlock(&p->mutex);
957 qemu_sem_post(&p->sem);
958}
959
960static void multifd_queue_page(RAMBlock *block, ram_addr_t offset)
961{
962 MultiFDPages_t *pages = multifd_send_state->pages;
963
964 if (!pages->block) {
965 pages->block = block;
966 }
967
968 if (pages->block == block) {
969 pages->offset[pages->used] = offset;
970 pages->iov[pages->used].iov_base = block->host + offset;
971 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
972 pages->used++;
973
974 if (pages->used < pages->allocated) {
975 return;
976 }
977 }
978
979 multifd_send_pages();
980
981 if (pages->block != block) {
982 multifd_queue_page(block, offset);
983 }
984}
985
66770707 986static void multifd_send_terminate_threads(Error *err)
f986c3d2
JQ
987{
988 int i;
989
7a169d74
JQ
990 if (err) {
991 MigrationState *s = migrate_get_current();
992 migrate_set_error(s, err);
993 if (s->state == MIGRATION_STATUS_SETUP ||
994 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
995 s->state == MIGRATION_STATUS_DEVICE ||
996 s->state == MIGRATION_STATUS_ACTIVE) {
997 migrate_set_state(&s->state, s->state,
998 MIGRATION_STATUS_FAILED);
999 }
1000 }
1001
66770707 1002 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1003 MultiFDSendParams *p = &multifd_send_state->params[i];
1004
1005 qemu_mutex_lock(&p->mutex);
1006 p->quit = true;
1007 qemu_sem_post(&p->sem);
1008 qemu_mutex_unlock(&p->mutex);
1009 }
1010}
1011
1398b2e3 1012void multifd_save_cleanup(void)
f986c3d2
JQ
1013{
1014 int i;
f986c3d2
JQ
1015
1016 if (!migrate_use_multifd()) {
1398b2e3 1017 return;
f986c3d2 1018 }
66770707
JQ
1019 multifd_send_terminate_threads(NULL);
1020 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1021 MultiFDSendParams *p = &multifd_send_state->params[i];
1022
66770707
JQ
1023 if (p->running) {
1024 qemu_thread_join(&p->thread);
1025 }
60df2d4a
JQ
1026 socket_send_channel_destroy(p->c);
1027 p->c = NULL;
f986c3d2
JQ
1028 qemu_mutex_destroy(&p->mutex);
1029 qemu_sem_destroy(&p->sem);
6df264ac 1030 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1031 g_free(p->name);
1032 p->name = NULL;
34c55a94
JQ
1033 multifd_pages_clear(p->pages);
1034 p->pages = NULL;
2a26c979
JQ
1035 p->packet_len = 0;
1036 g_free(p->packet);
1037 p->packet = NULL;
f986c3d2 1038 }
b9ee2f7d 1039 qemu_sem_destroy(&multifd_send_state->channels_ready);
6df264ac 1040 qemu_sem_destroy(&multifd_send_state->sem_sync);
f986c3d2
JQ
1041 g_free(multifd_send_state->params);
1042 multifd_send_state->params = NULL;
34c55a94
JQ
1043 multifd_pages_clear(multifd_send_state->pages);
1044 multifd_send_state->pages = NULL;
f986c3d2
JQ
1045 g_free(multifd_send_state);
1046 multifd_send_state = NULL;
f986c3d2
JQ
1047}
1048
6df264ac
JQ
1049static void multifd_send_sync_main(void)
1050{
1051 int i;
1052
1053 if (!migrate_use_multifd()) {
1054 return;
1055 }
b9ee2f7d
JQ
1056 if (multifd_send_state->pages->used) {
1057 multifd_send_pages();
1058 }
6df264ac
JQ
1059 for (i = 0; i < migrate_multifd_channels(); i++) {
1060 MultiFDSendParams *p = &multifd_send_state->params[i];
1061
1062 trace_multifd_send_sync_main_signal(p->id);
1063
1064 qemu_mutex_lock(&p->mutex);
b9ee2f7d
JQ
1065
1066 p->packet_num = multifd_send_state->packet_num++;
6df264ac
JQ
1067 p->flags |= MULTIFD_FLAG_SYNC;
1068 p->pending_job++;
1069 qemu_mutex_unlock(&p->mutex);
1070 qemu_sem_post(&p->sem);
1071 }
1072 for (i = 0; i < migrate_multifd_channels(); i++) {
1073 MultiFDSendParams *p = &multifd_send_state->params[i];
1074
1075 trace_multifd_send_sync_main_wait(p->id);
1076 qemu_sem_wait(&multifd_send_state->sem_sync);
1077 }
1078 trace_multifd_send_sync_main(multifd_send_state->packet_num);
1079}
1080
f986c3d2
JQ
1081static void *multifd_send_thread(void *opaque)
1082{
1083 MultiFDSendParams *p = opaque;
af8b7d2b 1084 Error *local_err = NULL;
8b2db7f5 1085 int ret;
af8b7d2b 1086
408ea6ae 1087 trace_multifd_send_thread_start(p->id);
74637e6f 1088 rcu_register_thread();
408ea6ae 1089
af8b7d2b
JQ
1090 if (multifd_send_initial_packet(p, &local_err) < 0) {
1091 goto out;
1092 }
408ea6ae
JQ
1093 /* initial packet */
1094 p->num_packets = 1;
f986c3d2
JQ
1095
1096 while (true) {
d82628e4 1097 qemu_sem_wait(&p->sem);
f986c3d2 1098 qemu_mutex_lock(&p->mutex);
0beb5ed3
JQ
1099
1100 if (p->pending_job) {
1101 uint32_t used = p->pages->used;
1102 uint64_t packet_num = p->packet_num;
1103 uint32_t flags = p->flags;
1104
2a34ee59 1105 p->next_packet_size = used * qemu_target_page_size();
0beb5ed3
JQ
1106 multifd_send_fill_packet(p);
1107 p->flags = 0;
1108 p->num_packets++;
1109 p->num_pages += used;
1110 p->pages->used = 0;
1111 qemu_mutex_unlock(&p->mutex);
1112
2a34ee59
JQ
1113 trace_multifd_send(p->id, packet_num, used, flags,
1114 p->next_packet_size);
0beb5ed3 1115
8b2db7f5
JQ
1116 ret = qio_channel_write_all(p->c, (void *)p->packet,
1117 p->packet_len, &local_err);
1118 if (ret != 0) {
1119 break;
1120 }
1121
ad24c7cb
JQ
1122 if (used) {
1123 ret = qio_channel_writev_all(p->c, p->pages->iov,
1124 used, &local_err);
1125 if (ret != 0) {
1126 break;
1127 }
8b2db7f5 1128 }
0beb5ed3
JQ
1129
1130 qemu_mutex_lock(&p->mutex);
1131 p->pending_job--;
1132 qemu_mutex_unlock(&p->mutex);
6df264ac
JQ
1133
1134 if (flags & MULTIFD_FLAG_SYNC) {
1135 qemu_sem_post(&multifd_send_state->sem_sync);
1136 }
b9ee2f7d 1137 qemu_sem_post(&multifd_send_state->channels_ready);
0beb5ed3 1138 } else if (p->quit) {
f986c3d2
JQ
1139 qemu_mutex_unlock(&p->mutex);
1140 break;
6df264ac
JQ
1141 } else {
1142 qemu_mutex_unlock(&p->mutex);
1143 /* sometimes there are spurious wakeups */
f986c3d2 1144 }
f986c3d2
JQ
1145 }
1146
af8b7d2b
JQ
1147out:
1148 if (local_err) {
1149 multifd_send_terminate_threads(local_err);
1150 }
1151
66770707
JQ
1152 qemu_mutex_lock(&p->mutex);
1153 p->running = false;
1154 qemu_mutex_unlock(&p->mutex);
1155
74637e6f 1156 rcu_unregister_thread();
408ea6ae
JQ
1157 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1158
f986c3d2
JQ
1159 return NULL;
1160}
1161
60df2d4a
JQ
1162static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1163{
1164 MultiFDSendParams *p = opaque;
1165 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1166 Error *local_err = NULL;
1167
1168 if (qio_task_propagate_error(task, &local_err)) {
1398b2e3
FL
1169 migrate_set_error(migrate_get_current(), local_err);
1170 multifd_save_cleanup();
60df2d4a
JQ
1171 } else {
1172 p->c = QIO_CHANNEL(sioc);
1173 qio_channel_set_delay(p->c, false);
1174 p->running = true;
1175 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1176 QEMU_THREAD_JOINABLE);
1177
1178 atomic_inc(&multifd_send_state->count);
1179 }
1180}
1181
f986c3d2
JQ
1182int multifd_save_setup(void)
1183{
1184 int thread_count;
efd1a1d6 1185 uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
f986c3d2
JQ
1186 uint8_t i;
1187
1188 if (!migrate_use_multifd()) {
1189 return 0;
1190 }
1191 thread_count = migrate_multifd_channels();
1192 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1193 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
66770707 1194 atomic_set(&multifd_send_state->count, 0);
34c55a94 1195 multifd_send_state->pages = multifd_pages_init(page_count);
6df264ac 1196 qemu_sem_init(&multifd_send_state->sem_sync, 0);
b9ee2f7d 1197 qemu_sem_init(&multifd_send_state->channels_ready, 0);
34c55a94 1198
f986c3d2
JQ
1199 for (i = 0; i < thread_count; i++) {
1200 MultiFDSendParams *p = &multifd_send_state->params[i];
1201
1202 qemu_mutex_init(&p->mutex);
1203 qemu_sem_init(&p->sem, 0);
6df264ac 1204 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1205 p->quit = false;
0beb5ed3 1206 p->pending_job = 0;
f986c3d2 1207 p->id = i;
34c55a94 1208 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1209 p->packet_len = sizeof(MultiFDPacket_t)
1210 + sizeof(ram_addr_t) * page_count;
1211 p->packet = g_malloc0(p->packet_len);
f986c3d2 1212 p->name = g_strdup_printf("multifdsend_%d", i);
60df2d4a 1213 socket_send_channel_create(multifd_new_send_channel_async, p);
f986c3d2
JQ
1214 }
1215 return 0;
1216}
1217
f986c3d2
JQ
1218struct {
1219 MultiFDRecvParams *params;
1220 /* number of created threads */
1221 int count;
6df264ac
JQ
1222 /* syncs main thread and channels */
1223 QemuSemaphore sem_sync;
1224 /* global number of generated multifd packets */
1225 uint64_t packet_num;
f986c3d2
JQ
1226} *multifd_recv_state;
1227
66770707 1228static void multifd_recv_terminate_threads(Error *err)
f986c3d2
JQ
1229{
1230 int i;
1231
7a169d74
JQ
1232 if (err) {
1233 MigrationState *s = migrate_get_current();
1234 migrate_set_error(s, err);
1235 if (s->state == MIGRATION_STATUS_SETUP ||
1236 s->state == MIGRATION_STATUS_ACTIVE) {
1237 migrate_set_state(&s->state, s->state,
1238 MIGRATION_STATUS_FAILED);
1239 }
1240 }
1241
66770707 1242 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1243 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1244
1245 qemu_mutex_lock(&p->mutex);
7a5cc33c
JQ
1246 /* We could arrive here for two reasons:
1247 - normal quit, i.e. everything went fine, just finished
1248 - error quit: We close the channels so the channel threads
1249 finish the qio_channel_read_all_eof() */
1250 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
f986c3d2
JQ
1251 qemu_mutex_unlock(&p->mutex);
1252 }
1253}
1254
1255int multifd_load_cleanup(Error **errp)
1256{
1257 int i;
1258 int ret = 0;
1259
1260 if (!migrate_use_multifd()) {
1261 return 0;
1262 }
66770707
JQ
1263 multifd_recv_terminate_threads(NULL);
1264 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1265 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1266
66770707
JQ
1267 if (p->running) {
1268 qemu_thread_join(&p->thread);
1269 }
60df2d4a
JQ
1270 object_unref(OBJECT(p->c));
1271 p->c = NULL;
f986c3d2 1272 qemu_mutex_destroy(&p->mutex);
6df264ac 1273 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1274 g_free(p->name);
1275 p->name = NULL;
34c55a94
JQ
1276 multifd_pages_clear(p->pages);
1277 p->pages = NULL;
2a26c979
JQ
1278 p->packet_len = 0;
1279 g_free(p->packet);
1280 p->packet = NULL;
f986c3d2 1281 }
6df264ac 1282 qemu_sem_destroy(&multifd_recv_state->sem_sync);
f986c3d2
JQ
1283 g_free(multifd_recv_state->params);
1284 multifd_recv_state->params = NULL;
1285 g_free(multifd_recv_state);
1286 multifd_recv_state = NULL;
1287
1288 return ret;
1289}
1290
6df264ac
JQ
1291static void multifd_recv_sync_main(void)
1292{
1293 int i;
1294
1295 if (!migrate_use_multifd()) {
1296 return;
1297 }
1298 for (i = 0; i < migrate_multifd_channels(); i++) {
1299 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1300
6df264ac
JQ
1301 trace_multifd_recv_sync_main_wait(p->id);
1302 qemu_sem_wait(&multifd_recv_state->sem_sync);
1303 qemu_mutex_lock(&p->mutex);
1304 if (multifd_recv_state->packet_num < p->packet_num) {
1305 multifd_recv_state->packet_num = p->packet_num;
1306 }
1307 qemu_mutex_unlock(&p->mutex);
1308 }
1309 for (i = 0; i < migrate_multifd_channels(); i++) {
1310 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1311
1312 trace_multifd_recv_sync_main_signal(p->id);
6df264ac
JQ
1313 qemu_sem_post(&p->sem_sync);
1314 }
1315 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1316}
1317
f986c3d2
JQ
1318static void *multifd_recv_thread(void *opaque)
1319{
1320 MultiFDRecvParams *p = opaque;
2a26c979
JQ
1321 Error *local_err = NULL;
1322 int ret;
f986c3d2 1323
408ea6ae 1324 trace_multifd_recv_thread_start(p->id);
74637e6f 1325 rcu_register_thread();
408ea6ae 1326
f986c3d2 1327 while (true) {
6df264ac
JQ
1328 uint32_t used;
1329 uint32_t flags;
0beb5ed3 1330
8b2db7f5
JQ
1331 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1332 p->packet_len, &local_err);
1333 if (ret == 0) { /* EOF */
1334 break;
1335 }
1336 if (ret == -1) { /* Error */
1337 break;
1338 }
2a26c979 1339
6df264ac
JQ
1340 qemu_mutex_lock(&p->mutex);
1341 ret = multifd_recv_unfill_packet(p, &local_err);
1342 if (ret) {
f986c3d2
JQ
1343 qemu_mutex_unlock(&p->mutex);
1344 break;
1345 }
6df264ac
JQ
1346
1347 used = p->pages->used;
1348 flags = p->flags;
2a34ee59
JQ
1349 trace_multifd_recv(p->id, p->packet_num, used, flags,
1350 p->next_packet_size);
6df264ac
JQ
1351 p->num_packets++;
1352 p->num_pages += used;
f986c3d2 1353 qemu_mutex_unlock(&p->mutex);
6df264ac 1354
ad24c7cb
JQ
1355 if (used) {
1356 ret = qio_channel_readv_all(p->c, p->pages->iov,
1357 used, &local_err);
1358 if (ret != 0) {
1359 break;
1360 }
8b2db7f5
JQ
1361 }
1362
6df264ac
JQ
1363 if (flags & MULTIFD_FLAG_SYNC) {
1364 qemu_sem_post(&multifd_recv_state->sem_sync);
1365 qemu_sem_wait(&p->sem_sync);
1366 }
f986c3d2
JQ
1367 }
1368
d82628e4
JQ
1369 if (local_err) {
1370 multifd_recv_terminate_threads(local_err);
1371 }
66770707
JQ
1372 qemu_mutex_lock(&p->mutex);
1373 p->running = false;
1374 qemu_mutex_unlock(&p->mutex);
1375
74637e6f 1376 rcu_unregister_thread();
408ea6ae
JQ
1377 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1378
f986c3d2
JQ
1379 return NULL;
1380}
1381
1382int multifd_load_setup(void)
1383{
1384 int thread_count;
efd1a1d6 1385 uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
f986c3d2
JQ
1386 uint8_t i;
1387
1388 if (!migrate_use_multifd()) {
1389 return 0;
1390 }
1391 thread_count = migrate_multifd_channels();
1392 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1393 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
66770707 1394 atomic_set(&multifd_recv_state->count, 0);
6df264ac 1395 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
34c55a94 1396
f986c3d2
JQ
1397 for (i = 0; i < thread_count; i++) {
1398 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1399
1400 qemu_mutex_init(&p->mutex);
6df264ac 1401 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1402 p->id = i;
34c55a94 1403 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1404 p->packet_len = sizeof(MultiFDPacket_t)
1405 + sizeof(ram_addr_t) * page_count;
1406 p->packet = g_malloc0(p->packet_len);
f986c3d2 1407 p->name = g_strdup_printf("multifdrecv_%d", i);
f986c3d2
JQ
1408 }
1409 return 0;
1410}
1411
62c1e0ca
JQ
1412bool multifd_recv_all_channels_created(void)
1413{
1414 int thread_count = migrate_multifd_channels();
1415
1416 if (!migrate_use_multifd()) {
1417 return true;
1418 }
1419
1420 return thread_count == atomic_read(&multifd_recv_state->count);
1421}
1422
49ed0d24
FL
1423/*
1424 * Try to receive all multifd channels to get ready for the migration.
1425 * - Return true and do not set @errp when correctly receving all channels;
1426 * - Return false and do not set @errp when correctly receiving the current one;
1427 * - Return false and set @errp when failing to receive the current channel.
1428 */
1429bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
71bb07db 1430{
60df2d4a 1431 MultiFDRecvParams *p;
af8b7d2b
JQ
1432 Error *local_err = NULL;
1433 int id;
60df2d4a 1434
af8b7d2b
JQ
1435 id = multifd_recv_initial_packet(ioc, &local_err);
1436 if (id < 0) {
1437 multifd_recv_terminate_threads(local_err);
49ed0d24
FL
1438 error_propagate_prepend(errp, local_err,
1439 "failed to receive packet"
1440 " via multifd channel %d: ",
1441 atomic_read(&multifd_recv_state->count));
81e62053 1442 return false;
af8b7d2b
JQ
1443 }
1444
1445 p = &multifd_recv_state->params[id];
1446 if (p->c != NULL) {
1447 error_setg(&local_err, "multifd: received id '%d' already setup'",
1448 id);
1449 multifd_recv_terminate_threads(local_err);
49ed0d24 1450 error_propagate(errp, local_err);
81e62053 1451 return false;
af8b7d2b 1452 }
60df2d4a
JQ
1453 p->c = ioc;
1454 object_ref(OBJECT(ioc));
408ea6ae
JQ
1455 /* initial packet */
1456 p->num_packets = 1;
60df2d4a
JQ
1457
1458 p->running = true;
1459 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1460 QEMU_THREAD_JOINABLE);
1461 atomic_inc(&multifd_recv_state->count);
49ed0d24
FL
1462 return atomic_read(&multifd_recv_state->count) ==
1463 migrate_multifd_channels();
71bb07db
JQ
1464}
1465
56e93d26 1466/**
3d0684b2 1467 * save_page_header: write page header to wire
56e93d26
JQ
1468 *
1469 * If this is the 1st block, it also writes the block identification
1470 *
3d0684b2 1471 * Returns the number of bytes written
56e93d26
JQ
1472 *
1473 * @f: QEMUFile where to send the data
1474 * @block: block that contains the page we want to send
1475 * @offset: offset inside the block for the page
1476 * in the lower bits, it contains flags
1477 */
2bf3aa85
JQ
1478static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1479 ram_addr_t offset)
56e93d26 1480{
9f5f380b 1481 size_t size, len;
56e93d26 1482
24795694
JQ
1483 if (block == rs->last_sent_block) {
1484 offset |= RAM_SAVE_FLAG_CONTINUE;
1485 }
2bf3aa85 1486 qemu_put_be64(f, offset);
56e93d26
JQ
1487 size = 8;
1488
1489 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 1490 len = strlen(block->idstr);
2bf3aa85
JQ
1491 qemu_put_byte(f, len);
1492 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 1493 size += 1 + len;
24795694 1494 rs->last_sent_block = block;
56e93d26
JQ
1495 }
1496 return size;
1497}
1498
3d0684b2
JQ
1499/**
1500 * mig_throttle_guest_down: throotle down the guest
1501 *
1502 * Reduce amount of guest cpu execution to hopefully slow down memory
1503 * writes. If guest dirty memory rate is reduced below the rate at
1504 * which we can transfer pages to the destination then we should be
1505 * able to complete migration. Some workloads dirty memory way too
1506 * fast and will not effectively converge, even with auto-converge.
070afca2
JH
1507 */
1508static void mig_throttle_guest_down(void)
1509{
1510 MigrationState *s = migrate_get_current();
2594f56d
DB
1511 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1512 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
4cbc9c7f 1513 int pct_max = s->parameters.max_cpu_throttle;
070afca2
JH
1514
1515 /* We have not started throttling yet. Let's start it. */
1516 if (!cpu_throttle_active()) {
1517 cpu_throttle_set(pct_initial);
1518 } else {
1519 /* Throttling already on, just increase the rate */
4cbc9c7f
LQ
1520 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1521 pct_max));
070afca2
JH
1522 }
1523}
1524
3d0684b2
JQ
1525/**
1526 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1527 *
6f37bb8b 1528 * @rs: current RAM state
3d0684b2
JQ
1529 * @current_addr: address for the zero page
1530 *
1531 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
1532 * The important thing is that a stale (not-yet-0'd) page be replaced
1533 * by the new data.
1534 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 1535 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 1536 */
6f37bb8b 1537static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 1538{
6f37bb8b 1539 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
1540 return;
1541 }
1542
1543 /* We don't care if this fails to allocate a new cache page
1544 * as long as it updated an old one */
c00e0928 1545 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
9360447d 1546 ram_counters.dirty_sync_count);
56e93d26
JQ
1547}
1548
1549#define ENCODING_FLAG_XBZRLE 0x1
1550
1551/**
1552 * save_xbzrle_page: compress and send current page
1553 *
1554 * Returns: 1 means that we wrote the page
1555 * 0 means that page is identical to the one already sent
1556 * -1 means that xbzrle would be longer than normal
1557 *
5a987738 1558 * @rs: current RAM state
3d0684b2
JQ
1559 * @current_data: pointer to the address of the page contents
1560 * @current_addr: addr of the page
56e93d26
JQ
1561 * @block: block that contains the page we want to send
1562 * @offset: offset inside the block for the page
1563 * @last_stage: if we are at the completion stage
56e93d26 1564 */
204b88b8 1565static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 1566 ram_addr_t current_addr, RAMBlock *block,
072c2511 1567 ram_addr_t offset, bool last_stage)
56e93d26
JQ
1568{
1569 int encoded_len = 0, bytes_xbzrle;
1570 uint8_t *prev_cached_page;
1571
9360447d
JQ
1572 if (!cache_is_cached(XBZRLE.cache, current_addr,
1573 ram_counters.dirty_sync_count)) {
1574 xbzrle_counters.cache_miss++;
56e93d26
JQ
1575 if (!last_stage) {
1576 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
9360447d 1577 ram_counters.dirty_sync_count) == -1) {
56e93d26
JQ
1578 return -1;
1579 } else {
1580 /* update *current_data when the page has been
1581 inserted into cache */
1582 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1583 }
1584 }
1585 return -1;
1586 }
1587
1588 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1589
1590 /* save current buffer into memory */
1591 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1592
1593 /* XBZRLE encoding (if there is no overflow) */
1594 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1595 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1596 TARGET_PAGE_SIZE);
1597 if (encoded_len == 0) {
55c4446b 1598 trace_save_xbzrle_page_skipping();
56e93d26
JQ
1599 return 0;
1600 } else if (encoded_len == -1) {
55c4446b 1601 trace_save_xbzrle_page_overflow();
9360447d 1602 xbzrle_counters.overflow++;
56e93d26
JQ
1603 /* update data in the cache */
1604 if (!last_stage) {
1605 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1606 *current_data = prev_cached_page;
1607 }
1608 return -1;
1609 }
1610
1611 /* we need to update the data in the cache, in order to get the same data */
1612 if (!last_stage) {
1613 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1614 }
1615
1616 /* Send XBZRLE based compressed page */
2bf3aa85 1617 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
1618 offset | RAM_SAVE_FLAG_XBZRLE);
1619 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1620 qemu_put_be16(rs->f, encoded_len);
1621 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 1622 bytes_xbzrle += encoded_len + 1 + 2;
9360447d
JQ
1623 xbzrle_counters.pages++;
1624 xbzrle_counters.bytes += bytes_xbzrle;
1625 ram_counters.transferred += bytes_xbzrle;
56e93d26
JQ
1626
1627 return 1;
1628}
1629
3d0684b2
JQ
1630/**
1631 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 1632 *
a5f7b1a6 1633 * Returns the page offset within memory region of the start of a dirty page
3d0684b2 1634 *
6f37bb8b 1635 * @rs: current RAM state
3d0684b2 1636 * @rb: RAMBlock where to search for dirty pages
a935e30f 1637 * @start: page where we start the search
f3f491fc 1638 */
56e93d26 1639static inline
a935e30f 1640unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 1641 unsigned long start)
56e93d26 1642{
6b6712ef
JQ
1643 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1644 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
1645 unsigned long next;
1646
fbd162e6 1647 if (ramblock_is_ignored(rb)) {
b895de50
CLG
1648 return size;
1649 }
1650
6eeb63f7
WW
1651 /*
1652 * When the free page optimization is enabled, we need to check the bitmap
1653 * to send the non-free pages rather than all the pages in the bulk stage.
1654 */
1655 if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
6b6712ef 1656 next = start + 1;
56e93d26 1657 } else {
6b6712ef 1658 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
1659 }
1660
6b6712ef 1661 return next;
56e93d26
JQ
1662}
1663
06b10688 1664static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
1665 RAMBlock *rb,
1666 unsigned long page)
a82d593b
DDAG
1667{
1668 bool ret;
a82d593b 1669
386a907b 1670 qemu_mutex_lock(&rs->bitmap_mutex);
6b6712ef 1671 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
1672
1673 if (ret) {
0d8ec885 1674 rs->migration_dirty_pages--;
a82d593b 1675 }
386a907b
WW
1676 qemu_mutex_unlock(&rs->bitmap_mutex);
1677
a82d593b
DDAG
1678 return ret;
1679}
1680
15440dd5 1681static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
bf212979 1682 ram_addr_t length)
56e93d26 1683{
0d8ec885 1684 rs->migration_dirty_pages +=
bf212979 1685 cpu_physical_memory_sync_dirty_bitmap(rb, 0, length,
0d8ec885 1686 &rs->num_dirty_pages_period);
56e93d26
JQ
1687}
1688
3d0684b2
JQ
1689/**
1690 * ram_pagesize_summary: calculate all the pagesizes of a VM
1691 *
1692 * Returns a summary bitmap of the page sizes of all RAMBlocks
1693 *
1694 * For VMs with just normal pages this is equivalent to the host page
1695 * size. If it's got some huge pages then it's the OR of all the
1696 * different page sizes.
e8ca1db2
DDAG
1697 */
1698uint64_t ram_pagesize_summary(void)
1699{
1700 RAMBlock *block;
1701 uint64_t summary = 0;
1702
fbd162e6 1703 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
e8ca1db2
DDAG
1704 summary |= block->page_size;
1705 }
1706
1707 return summary;
1708}
1709
aecbfe9c
XG
1710uint64_t ram_get_total_transferred_pages(void)
1711{
1712 return ram_counters.normal + ram_counters.duplicate +
1713 compression_counters.pages + xbzrle_counters.pages;
1714}
1715
b734035b
XG
1716static void migration_update_rates(RAMState *rs, int64_t end_time)
1717{
be8b02ed 1718 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
76e03000 1719 double compressed_size;
b734035b
XG
1720
1721 /* calculate period counters */
1722 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1723 / (end_time - rs->time_last_bitmap_sync);
1724
be8b02ed 1725 if (!page_count) {
b734035b
XG
1726 return;
1727 }
1728
1729 if (migrate_use_xbzrle()) {
1730 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
be8b02ed 1731 rs->xbzrle_cache_miss_prev) / page_count;
b734035b
XG
1732 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1733 }
76e03000
XG
1734
1735 if (migrate_use_compression()) {
1736 compression_counters.busy_rate = (double)(compression_counters.busy -
1737 rs->compress_thread_busy_prev) / page_count;
1738 rs->compress_thread_busy_prev = compression_counters.busy;
1739
1740 compressed_size = compression_counters.compressed_size -
1741 rs->compressed_size_prev;
1742 if (compressed_size) {
1743 double uncompressed_size = (compression_counters.pages -
1744 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1745
1746 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1747 compression_counters.compression_rate =
1748 uncompressed_size / compressed_size;
1749
1750 rs->compress_pages_prev = compression_counters.pages;
1751 rs->compressed_size_prev = compression_counters.compressed_size;
1752 }
1753 }
b734035b
XG
1754}
1755
8d820d6f 1756static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
1757{
1758 RAMBlock *block;
56e93d26 1759 int64_t end_time;
c4bdf0cf 1760 uint64_t bytes_xfer_now;
56e93d26 1761
9360447d 1762 ram_counters.dirty_sync_count++;
56e93d26 1763
f664da80
JQ
1764 if (!rs->time_last_bitmap_sync) {
1765 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
1766 }
1767
1768 trace_migration_bitmap_sync_start();
9c1f8f44 1769 memory_global_dirty_log_sync();
56e93d26 1770
108cfae0 1771 qemu_mutex_lock(&rs->bitmap_mutex);
56e93d26 1772 rcu_read_lock();
fbd162e6 1773 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
bf212979 1774 migration_bitmap_sync_range(rs, block, block->used_length);
56e93d26 1775 }
650af890 1776 ram_counters.remaining = ram_bytes_remaining();
56e93d26 1777 rcu_read_unlock();
108cfae0 1778 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 1779
a66cd90c 1780 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 1781
56e93d26
JQ
1782 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1783
1784 /* more than 1 second = 1000 millisecons */
f664da80 1785 if (end_time > rs->time_last_bitmap_sync + 1000) {
9360447d 1786 bytes_xfer_now = ram_counters.transferred;
d693c6f1 1787
9ac78b61
PL
1788 /* During block migration the auto-converge logic incorrectly detects
1789 * that ram migration makes no progress. Avoid this by disabling the
1790 * throttling logic during the bulk phase of block migration. */
1791 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
56e93d26
JQ
1792 /* The following detection logic can be refined later. For now:
1793 Check to see if the dirtied bytes is 50% more than the approx.
1794 amount of bytes that just got transferred since the last time we
070afca2
JH
1795 were in this routine. If that happens twice, start or increase
1796 throttling */
070afca2 1797
d693c6f1 1798 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
eac74159 1799 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
b4a3c64b 1800 (++rs->dirty_rate_high_cnt >= 2)) {
56e93d26 1801 trace_migration_throttle();
8d820d6f 1802 rs->dirty_rate_high_cnt = 0;
070afca2 1803 mig_throttle_guest_down();
d693c6f1 1804 }
56e93d26 1805 }
070afca2 1806
b734035b
XG
1807 migration_update_rates(rs, end_time);
1808
be8b02ed 1809 rs->target_page_count_prev = rs->target_page_count;
d693c6f1
FF
1810
1811 /* reset period counters */
f664da80 1812 rs->time_last_bitmap_sync = end_time;
a66cd90c 1813 rs->num_dirty_pages_period = 0;
d2a4d85a 1814 rs->bytes_xfer_prev = bytes_xfer_now;
56e93d26 1815 }
4addcd4f 1816 if (migrate_use_events()) {
3ab72385 1817 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
4addcd4f 1818 }
56e93d26
JQ
1819}
1820
bd227060
WW
1821static void migration_bitmap_sync_precopy(RAMState *rs)
1822{
1823 Error *local_err = NULL;
1824
1825 /*
1826 * The current notifier usage is just an optimization to migration, so we
1827 * don't stop the normal migration process in the error case.
1828 */
1829 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1830 error_report_err(local_err);
1831 }
1832
1833 migration_bitmap_sync(rs);
1834
1835 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1836 error_report_err(local_err);
1837 }
1838}
1839
6c97ec5f
XG
1840/**
1841 * save_zero_page_to_file: send the zero page to the file
1842 *
1843 * Returns the size of data written to the file, 0 means the page is not
1844 * a zero page
1845 *
1846 * @rs: current RAM state
1847 * @file: the file where the data is saved
1848 * @block: block that contains the page we want to send
1849 * @offset: offset inside the block for the page
1850 */
1851static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1852 RAMBlock *block, ram_addr_t offset)
1853{
1854 uint8_t *p = block->host + offset;
1855 int len = 0;
1856
1857 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1858 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1859 qemu_put_byte(file, 0);
1860 len += 1;
1861 }
1862 return len;
1863}
1864
56e93d26 1865/**
3d0684b2 1866 * save_zero_page: send the zero page to the stream
56e93d26 1867 *
3d0684b2 1868 * Returns the number of pages written.
56e93d26 1869 *
f7ccd61b 1870 * @rs: current RAM state
56e93d26
JQ
1871 * @block: block that contains the page we want to send
1872 * @offset: offset inside the block for the page
56e93d26 1873 */
7faccdc3 1874static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
56e93d26 1875{
6c97ec5f 1876 int len = save_zero_page_to_file(rs, rs->f, block, offset);
56e93d26 1877
6c97ec5f 1878 if (len) {
9360447d 1879 ram_counters.duplicate++;
6c97ec5f
XG
1880 ram_counters.transferred += len;
1881 return 1;
56e93d26 1882 }
6c97ec5f 1883 return -1;
56e93d26
JQ
1884}
1885
5727309d 1886static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 1887{
5727309d 1888 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
1889 return;
1890 }
1891
aaa2064c 1892 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
53f09a10
PB
1893}
1894
059ff0fb
XG
1895/*
1896 * @pages: the number of pages written by the control path,
1897 * < 0 - error
1898 * > 0 - number of pages written
1899 *
1900 * Return true if the pages has been saved, otherwise false is returned.
1901 */
1902static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1903 int *pages)
1904{
1905 uint64_t bytes_xmit = 0;
1906 int ret;
1907
1908 *pages = -1;
1909 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1910 &bytes_xmit);
1911 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1912 return false;
1913 }
1914
1915 if (bytes_xmit) {
1916 ram_counters.transferred += bytes_xmit;
1917 *pages = 1;
1918 }
1919
1920 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1921 return true;
1922 }
1923
1924 if (bytes_xmit > 0) {
1925 ram_counters.normal++;
1926 } else if (bytes_xmit == 0) {
1927 ram_counters.duplicate++;
1928 }
1929
1930 return true;
1931}
1932
65dacaa0
XG
1933/*
1934 * directly send the page to the stream
1935 *
1936 * Returns the number of pages written.
1937 *
1938 * @rs: current RAM state
1939 * @block: block that contains the page we want to send
1940 * @offset: offset inside the block for the page
1941 * @buf: the page to be sent
1942 * @async: send to page asyncly
1943 */
1944static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1945 uint8_t *buf, bool async)
1946{
1947 ram_counters.transferred += save_page_header(rs, rs->f, block,
1948 offset | RAM_SAVE_FLAG_PAGE);
1949 if (async) {
1950 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1951 migrate_release_ram() &
1952 migration_in_postcopy());
1953 } else {
1954 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1955 }
1956 ram_counters.transferred += TARGET_PAGE_SIZE;
1957 ram_counters.normal++;
1958 return 1;
1959}
1960
56e93d26 1961/**
3d0684b2 1962 * ram_save_page: send the given page to the stream
56e93d26 1963 *
3d0684b2 1964 * Returns the number of pages written.
3fd3c4b3
DDAG
1965 * < 0 - error
1966 * >=0 - Number of pages written - this might legally be 0
1967 * if xbzrle noticed the page was the same.
56e93d26 1968 *
6f37bb8b 1969 * @rs: current RAM state
56e93d26
JQ
1970 * @block: block that contains the page we want to send
1971 * @offset: offset inside the block for the page
1972 * @last_stage: if we are at the completion stage
56e93d26 1973 */
a0a8aa14 1974static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
1975{
1976 int pages = -1;
56e93d26 1977 uint8_t *p;
56e93d26 1978 bool send_async = true;
a08f6890 1979 RAMBlock *block = pss->block;
a935e30f 1980 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
059ff0fb 1981 ram_addr_t current_addr = block->offset + offset;
56e93d26 1982
2f68e399 1983 p = block->host + offset;
1db9d8e5 1984 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 1985
56e93d26 1986 XBZRLE_cache_lock();
d7400a34
XG
1987 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1988 migrate_use_xbzrle()) {
059ff0fb
XG
1989 pages = save_xbzrle_page(rs, &p, current_addr, block,
1990 offset, last_stage);
1991 if (!last_stage) {
1992 /* Can't send this cached data async, since the cache page
1993 * might get updated before it gets to the wire
56e93d26 1994 */
059ff0fb 1995 send_async = false;
56e93d26
JQ
1996 }
1997 }
1998
1999 /* XBZRLE overflow or normal page */
2000 if (pages == -1) {
65dacaa0 2001 pages = save_normal_page(rs, block, offset, p, send_async);
56e93d26
JQ
2002 }
2003
2004 XBZRLE_cache_unlock();
2005
2006 return pages;
2007}
2008
b9ee2f7d
JQ
2009static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2010 ram_addr_t offset)
2011{
b9ee2f7d 2012 multifd_queue_page(block, offset);
b9ee2f7d
JQ
2013 ram_counters.normal++;
2014
2015 return 1;
2016}
2017
5e5fdcff 2018static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 2019 ram_addr_t offset, uint8_t *source_buf)
56e93d26 2020{
53518d94 2021 RAMState *rs = ram_state;
a7a9a88f 2022 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
5e5fdcff 2023 bool zero_page = false;
6ef3771c 2024 int ret;
56e93d26 2025
5e5fdcff
XG
2026 if (save_zero_page_to_file(rs, f, block, offset)) {
2027 zero_page = true;
2028 goto exit;
2029 }
2030
6ef3771c 2031 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
34ab9e97
XG
2032
2033 /*
2034 * copy it to a internal buffer to avoid it being modified by VM
2035 * so that we can catch up the error during compression and
2036 * decompression
2037 */
2038 memcpy(source_buf, p, TARGET_PAGE_SIZE);
6ef3771c
XG
2039 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2040 if (ret < 0) {
2041 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
b3be2896 2042 error_report("compressed data failed!");
5e5fdcff 2043 return false;
b3be2896 2044 }
56e93d26 2045
5e5fdcff 2046exit:
6ef3771c 2047 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
5e5fdcff
XG
2048 return zero_page;
2049}
2050
2051static void
2052update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2053{
76e03000
XG
2054 ram_counters.transferred += bytes_xmit;
2055
5e5fdcff
XG
2056 if (param->zero_page) {
2057 ram_counters.duplicate++;
76e03000 2058 return;
5e5fdcff 2059 }
76e03000
XG
2060
2061 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2062 compression_counters.compressed_size += bytes_xmit - 8;
2063 compression_counters.pages++;
56e93d26
JQ
2064}
2065
32b05495
XG
2066static bool save_page_use_compression(RAMState *rs);
2067
ce25d337 2068static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
2069{
2070 int idx, len, thread_count;
2071
32b05495 2072 if (!save_page_use_compression(rs)) {
56e93d26
JQ
2073 return;
2074 }
2075 thread_count = migrate_compress_threads();
a7a9a88f 2076
0d9f9a5c 2077 qemu_mutex_lock(&comp_done_lock);
56e93d26 2078 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 2079 while (!comp_param[idx].done) {
0d9f9a5c 2080 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 2081 }
a7a9a88f 2082 }
0d9f9a5c 2083 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
2084
2085 for (idx = 0; idx < thread_count; idx++) {
2086 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 2087 if (!comp_param[idx].quit) {
ce25d337 2088 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
5e5fdcff
XG
2089 /*
2090 * it's safe to fetch zero_page without holding comp_done_lock
2091 * as there is no further request submitted to the thread,
2092 * i.e, the thread should be waiting for a request at this point.
2093 */
2094 update_compress_thread_counts(&comp_param[idx], len);
56e93d26 2095 }
a7a9a88f 2096 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
2097 }
2098}
2099
2100static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2101 ram_addr_t offset)
2102{
2103 param->block = block;
2104 param->offset = offset;
2105}
2106
ce25d337
JQ
2107static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2108 ram_addr_t offset)
56e93d26
JQ
2109{
2110 int idx, thread_count, bytes_xmit = -1, pages = -1;
1d58872a 2111 bool wait = migrate_compress_wait_thread();
56e93d26
JQ
2112
2113 thread_count = migrate_compress_threads();
0d9f9a5c 2114 qemu_mutex_lock(&comp_done_lock);
1d58872a
XG
2115retry:
2116 for (idx = 0; idx < thread_count; idx++) {
2117 if (comp_param[idx].done) {
2118 comp_param[idx].done = false;
2119 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2120 qemu_mutex_lock(&comp_param[idx].mutex);
2121 set_compress_params(&comp_param[idx], block, offset);
2122 qemu_cond_signal(&comp_param[idx].cond);
2123 qemu_mutex_unlock(&comp_param[idx].mutex);
2124 pages = 1;
5e5fdcff 2125 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
56e93d26 2126 break;
56e93d26
JQ
2127 }
2128 }
1d58872a
XG
2129
2130 /*
2131 * wait for the free thread if the user specifies 'compress-wait-thread',
2132 * otherwise we will post the page out in the main thread as normal page.
2133 */
2134 if (pages < 0 && wait) {
2135 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2136 goto retry;
2137 }
0d9f9a5c 2138 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
2139
2140 return pages;
2141}
2142
3d0684b2
JQ
2143/**
2144 * find_dirty_block: find the next dirty page and update any state
2145 * associated with the search process.
b9e60928 2146 *
a5f7b1a6 2147 * Returns true if a page is found
b9e60928 2148 *
6f37bb8b 2149 * @rs: current RAM state
3d0684b2
JQ
2150 * @pss: data about the state of the current dirty page scan
2151 * @again: set to false if the search has scanned the whole of RAM
b9e60928 2152 */
f20e2865 2153static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 2154{
f20e2865 2155 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 2156 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 2157 pss->page >= rs->last_page) {
b9e60928
DDAG
2158 /*
2159 * We've been once around the RAM and haven't found anything.
2160 * Give up.
2161 */
2162 *again = false;
2163 return false;
2164 }
a935e30f 2165 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
b9e60928 2166 /* Didn't find anything in this RAM Block */
a935e30f 2167 pss->page = 0;
b9e60928
DDAG
2168 pss->block = QLIST_NEXT_RCU(pss->block, next);
2169 if (!pss->block) {
48df9d80
XG
2170 /*
2171 * If memory migration starts over, we will meet a dirtied page
2172 * which may still exists in compression threads's ring, so we
2173 * should flush the compressed data to make sure the new page
2174 * is not overwritten by the old one in the destination.
2175 *
2176 * Also If xbzrle is on, stop using the data compression at this
2177 * point. In theory, xbzrle can do better than compression.
2178 */
2179 flush_compressed_data(rs);
2180
b9e60928
DDAG
2181 /* Hit the end of the list */
2182 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2183 /* Flag that we've looped */
2184 pss->complete_round = true;
6f37bb8b 2185 rs->ram_bulk_stage = false;
b9e60928
DDAG
2186 }
2187 /* Didn't find anything this time, but try again on the new block */
2188 *again = true;
2189 return false;
2190 } else {
2191 /* Can go around again, but... */
2192 *again = true;
2193 /* We've found something so probably don't need to */
2194 return true;
2195 }
2196}
2197
3d0684b2
JQ
2198/**
2199 * unqueue_page: gets a page of the queue
2200 *
a82d593b 2201 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 2202 *
3d0684b2
JQ
2203 * Returns the block of the page (or NULL if none available)
2204 *
ec481c6c 2205 * @rs: current RAM state
3d0684b2 2206 * @offset: used to return the offset within the RAMBlock
a82d593b 2207 */
f20e2865 2208static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
2209{
2210 RAMBlock *block = NULL;
2211
ae526e32
XG
2212 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2213 return NULL;
2214 }
2215
ec481c6c
JQ
2216 qemu_mutex_lock(&rs->src_page_req_mutex);
2217 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2218 struct RAMSrcPageRequest *entry =
2219 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
2220 block = entry->rb;
2221 *offset = entry->offset;
a82d593b
DDAG
2222
2223 if (entry->len > TARGET_PAGE_SIZE) {
2224 entry->len -= TARGET_PAGE_SIZE;
2225 entry->offset += TARGET_PAGE_SIZE;
2226 } else {
2227 memory_region_unref(block->mr);
ec481c6c 2228 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b 2229 g_free(entry);
e03a34f8 2230 migration_consume_urgent_request();
a82d593b
DDAG
2231 }
2232 }
ec481c6c 2233 qemu_mutex_unlock(&rs->src_page_req_mutex);
a82d593b
DDAG
2234
2235 return block;
2236}
2237
3d0684b2
JQ
2238/**
2239 * get_queued_page: unqueue a page from the postocpy requests
2240 *
2241 * Skips pages that are already sent (!dirty)
a82d593b 2242 *
a5f7b1a6 2243 * Returns true if a queued page is found
a82d593b 2244 *
6f37bb8b 2245 * @rs: current RAM state
3d0684b2 2246 * @pss: data about the state of the current dirty page scan
a82d593b 2247 */
f20e2865 2248static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
2249{
2250 RAMBlock *block;
2251 ram_addr_t offset;
2252 bool dirty;
2253
2254 do {
f20e2865 2255 block = unqueue_page(rs, &offset);
a82d593b
DDAG
2256 /*
2257 * We're sending this page, and since it's postcopy nothing else
2258 * will dirty it, and we must make sure it doesn't get sent again
2259 * even if this queue request was received after the background
2260 * search already sent it.
2261 */
2262 if (block) {
f20e2865
JQ
2263 unsigned long page;
2264
6b6712ef
JQ
2265 page = offset >> TARGET_PAGE_BITS;
2266 dirty = test_bit(page, block->bmap);
a82d593b 2267 if (!dirty) {
06b10688 2268 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
6b6712ef 2269 page, test_bit(page, block->unsentmap));
a82d593b 2270 } else {
f20e2865 2271 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
2272 }
2273 }
2274
2275 } while (block && !dirty);
2276
2277 if (block) {
2278 /*
2279 * As soon as we start servicing pages out of order, then we have
2280 * to kill the bulk stage, since the bulk stage assumes
2281 * in (migration_bitmap_find_and_reset_dirty) that every page is
2282 * dirty, that's no longer true.
2283 */
6f37bb8b 2284 rs->ram_bulk_stage = false;
a82d593b
DDAG
2285
2286 /*
2287 * We want the background search to continue from the queued page
2288 * since the guest is likely to want other pages near to the page
2289 * it just requested.
2290 */
2291 pss->block = block;
a935e30f 2292 pss->page = offset >> TARGET_PAGE_BITS;
a82d593b
DDAG
2293 }
2294
2295 return !!block;
2296}
2297
6c595cde 2298/**
5e58f968
JQ
2299 * migration_page_queue_free: drop any remaining pages in the ram
2300 * request queue
6c595cde 2301 *
3d0684b2
JQ
2302 * It should be empty at the end anyway, but in error cases there may
2303 * be some left. in case that there is any page left, we drop it.
2304 *
6c595cde 2305 */
83c13382 2306static void migration_page_queue_free(RAMState *rs)
6c595cde 2307{
ec481c6c 2308 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
2309 /* This queue generally should be empty - but in the case of a failed
2310 * migration might have some droppings in.
2311 */
2312 rcu_read_lock();
ec481c6c 2313 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 2314 memory_region_unref(mspr->rb->mr);
ec481c6c 2315 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
2316 g_free(mspr);
2317 }
2318 rcu_read_unlock();
2319}
2320
2321/**
3d0684b2
JQ
2322 * ram_save_queue_pages: queue the page for transmission
2323 *
2324 * A request from postcopy destination for example.
2325 *
2326 * Returns zero on success or negative on error
2327 *
3d0684b2
JQ
2328 * @rbname: Name of the RAMBLock of the request. NULL means the
2329 * same that last one.
2330 * @start: starting address from the start of the RAMBlock
2331 * @len: length (in bytes) to send
6c595cde 2332 */
96506894 2333int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
2334{
2335 RAMBlock *ramblock;
53518d94 2336 RAMState *rs = ram_state;
6c595cde 2337
9360447d 2338 ram_counters.postcopy_requests++;
6c595cde
DDAG
2339 rcu_read_lock();
2340 if (!rbname) {
2341 /* Reuse last RAMBlock */
68a098f3 2342 ramblock = rs->last_req_rb;
6c595cde
DDAG
2343
2344 if (!ramblock) {
2345 /*
2346 * Shouldn't happen, we can't reuse the last RAMBlock if
2347 * it's the 1st request.
2348 */
2349 error_report("ram_save_queue_pages no previous block");
2350 goto err;
2351 }
2352 } else {
2353 ramblock = qemu_ram_block_by_name(rbname);
2354
2355 if (!ramblock) {
2356 /* We shouldn't be asked for a non-existent RAMBlock */
2357 error_report("ram_save_queue_pages no block '%s'", rbname);
2358 goto err;
2359 }
68a098f3 2360 rs->last_req_rb = ramblock;
6c595cde
DDAG
2361 }
2362 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2363 if (start+len > ramblock->used_length) {
9458ad6b
JQ
2364 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2365 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
2366 __func__, start, len, ramblock->used_length);
2367 goto err;
2368 }
2369
ec481c6c
JQ
2370 struct RAMSrcPageRequest *new_entry =
2371 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
2372 new_entry->rb = ramblock;
2373 new_entry->offset = start;
2374 new_entry->len = len;
2375
2376 memory_region_ref(ramblock->mr);
ec481c6c
JQ
2377 qemu_mutex_lock(&rs->src_page_req_mutex);
2378 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 2379 migration_make_urgent_request();
ec481c6c 2380 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
2381 rcu_read_unlock();
2382
2383 return 0;
2384
2385err:
2386 rcu_read_unlock();
2387 return -1;
2388}
2389
d7400a34
XG
2390static bool save_page_use_compression(RAMState *rs)
2391{
2392 if (!migrate_use_compression()) {
2393 return false;
2394 }
2395
2396 /*
2397 * If xbzrle is on, stop using the data compression after first
2398 * round of migration even if compression is enabled. In theory,
2399 * xbzrle can do better than compression.
2400 */
2401 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2402 return true;
2403 }
2404
2405 return false;
2406}
2407
5e5fdcff
XG
2408/*
2409 * try to compress the page before posting it out, return true if the page
2410 * has been properly handled by compression, otherwise needs other
2411 * paths to handle it
2412 */
2413static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2414{
2415 if (!save_page_use_compression(rs)) {
2416 return false;
2417 }
2418
2419 /*
2420 * When starting the process of a new block, the first page of
2421 * the block should be sent out before other pages in the same
2422 * block, and all the pages in last block should have been sent
2423 * out, keeping this order is important, because the 'cont' flag
2424 * is used to avoid resending the block name.
2425 *
2426 * We post the fist page as normal page as compression will take
2427 * much CPU resource.
2428 */
2429 if (block != rs->last_sent_block) {
2430 flush_compressed_data(rs);
2431 return false;
2432 }
2433
2434 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2435 return true;
2436 }
2437
76e03000 2438 compression_counters.busy++;
5e5fdcff
XG
2439 return false;
2440}
2441
a82d593b 2442/**
3d0684b2 2443 * ram_save_target_page: save one target page
a82d593b 2444 *
3d0684b2 2445 * Returns the number of pages written
a82d593b 2446 *
6f37bb8b 2447 * @rs: current RAM state
3d0684b2 2448 * @pss: data about the page we want to send
a82d593b 2449 * @last_stage: if we are at the completion stage
a82d593b 2450 */
a0a8aa14 2451static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2452 bool last_stage)
a82d593b 2453{
a8ec91f9
XG
2454 RAMBlock *block = pss->block;
2455 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2456 int res;
2457
2458 if (control_save_page(rs, block, offset, &res)) {
2459 return res;
2460 }
2461
5e5fdcff
XG
2462 if (save_compress_page(rs, block, offset)) {
2463 return 1;
d7400a34
XG
2464 }
2465
2466 res = save_zero_page(rs, block, offset);
2467 if (res > 0) {
2468 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2469 * page would be stale
2470 */
2471 if (!save_page_use_compression(rs)) {
2472 XBZRLE_cache_lock();
2473 xbzrle_cache_zero_page(rs, block->offset + offset);
2474 XBZRLE_cache_unlock();
2475 }
2476 ram_release_pages(block->idstr, offset, res);
2477 return res;
2478 }
2479
da3f56cb 2480 /*
5e5fdcff
XG
2481 * do not use multifd for compression as the first page in the new
2482 * block should be posted out before sending the compressed page
da3f56cb 2483 */
5e5fdcff 2484 if (!save_page_use_compression(rs) && migrate_use_multifd()) {
b9ee2f7d 2485 return ram_save_multifd_page(rs, block, offset);
a82d593b
DDAG
2486 }
2487
1faa5665 2488 return ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
2489}
2490
2491/**
3d0684b2 2492 * ram_save_host_page: save a whole host page
a82d593b 2493 *
3d0684b2
JQ
2494 * Starting at *offset send pages up to the end of the current host
2495 * page. It's valid for the initial offset to point into the middle of
2496 * a host page in which case the remainder of the hostpage is sent.
2497 * Only dirty target pages are sent. Note that the host page size may
2498 * be a huge page for this block.
1eb3fc0a
DDAG
2499 * The saving stops at the boundary of the used_length of the block
2500 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 2501 *
3d0684b2
JQ
2502 * Returns the number of pages written or negative on error
2503 *
6f37bb8b 2504 * @rs: current RAM state
3d0684b2 2505 * @ms: current migration state
3d0684b2 2506 * @pss: data about the page we want to send
a82d593b 2507 * @last_stage: if we are at the completion stage
a82d593b 2508 */
a0a8aa14 2509static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2510 bool last_stage)
a82d593b
DDAG
2511{
2512 int tmppages, pages = 0;
a935e30f
JQ
2513 size_t pagesize_bits =
2514 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 2515
fbd162e6 2516 if (ramblock_is_ignored(pss->block)) {
b895de50
CLG
2517 error_report("block %s should not be migrated !", pss->block->idstr);
2518 return 0;
2519 }
2520
a82d593b 2521 do {
1faa5665
XG
2522 /* Check the pages is dirty and if it is send it */
2523 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2524 pss->page++;
2525 continue;
2526 }
2527
f20e2865 2528 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
2529 if (tmppages < 0) {
2530 return tmppages;
2531 }
2532
2533 pages += tmppages;
1faa5665
XG
2534 if (pss->block->unsentmap) {
2535 clear_bit(pss->page, pss->block->unsentmap);
2536 }
2537
a935e30f 2538 pss->page++;
1eb3fc0a
DDAG
2539 } while ((pss->page & (pagesize_bits - 1)) &&
2540 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
a82d593b
DDAG
2541
2542 /* The offset we leave with is the last one we looked at */
a935e30f 2543 pss->page--;
a82d593b
DDAG
2544 return pages;
2545}
6c595cde 2546
56e93d26 2547/**
3d0684b2 2548 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
2549 *
2550 * Called within an RCU critical section.
2551 *
e8f3735f
XG
2552 * Returns the number of pages written where zero means no dirty pages,
2553 * or negative on error
56e93d26 2554 *
6f37bb8b 2555 * @rs: current RAM state
56e93d26 2556 * @last_stage: if we are at the completion stage
a82d593b
DDAG
2557 *
2558 * On systems where host-page-size > target-page-size it will send all the
2559 * pages in a host page that are dirty.
56e93d26
JQ
2560 */
2561
ce25d337 2562static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 2563{
b8fb8cb7 2564 PageSearchStatus pss;
56e93d26 2565 int pages = 0;
b9e60928 2566 bool again, found;
56e93d26 2567
0827b9e9
AA
2568 /* No dirty page as there is zero RAM */
2569 if (!ram_bytes_total()) {
2570 return pages;
2571 }
2572
6f37bb8b 2573 pss.block = rs->last_seen_block;
a935e30f 2574 pss.page = rs->last_page;
b8fb8cb7
DDAG
2575 pss.complete_round = false;
2576
2577 if (!pss.block) {
2578 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2579 }
56e93d26 2580
b9e60928 2581 do {
a82d593b 2582 again = true;
f20e2865 2583 found = get_queued_page(rs, &pss);
b9e60928 2584
a82d593b
DDAG
2585 if (!found) {
2586 /* priority queue empty, so just search for something dirty */
f20e2865 2587 found = find_dirty_block(rs, &pss, &again);
a82d593b 2588 }
f3f491fc 2589
a82d593b 2590 if (found) {
f20e2865 2591 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 2592 }
b9e60928 2593 } while (!pages && again);
56e93d26 2594
6f37bb8b 2595 rs->last_seen_block = pss.block;
a935e30f 2596 rs->last_page = pss.page;
56e93d26
JQ
2597
2598 return pages;
2599}
2600
2601void acct_update_position(QEMUFile *f, size_t size, bool zero)
2602{
2603 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b 2604
56e93d26 2605 if (zero) {
9360447d 2606 ram_counters.duplicate += pages;
56e93d26 2607 } else {
9360447d
JQ
2608 ram_counters.normal += pages;
2609 ram_counters.transferred += size;
56e93d26
JQ
2610 qemu_update_position(f, size);
2611 }
2612}
2613
fbd162e6 2614static uint64_t ram_bytes_total_common(bool count_ignored)
56e93d26
JQ
2615{
2616 RAMBlock *block;
2617 uint64_t total = 0;
2618
2619 rcu_read_lock();
fbd162e6
YK
2620 if (count_ignored) {
2621 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2622 total += block->used_length;
2623 }
2624 } else {
2625 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2626 total += block->used_length;
2627 }
99e15582 2628 }
56e93d26
JQ
2629 rcu_read_unlock();
2630 return total;
2631}
2632
fbd162e6
YK
2633uint64_t ram_bytes_total(void)
2634{
2635 return ram_bytes_total_common(false);
2636}
2637
f265e0e4 2638static void xbzrle_load_setup(void)
56e93d26 2639{
f265e0e4 2640 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
2641}
2642
f265e0e4
JQ
2643static void xbzrle_load_cleanup(void)
2644{
2645 g_free(XBZRLE.decoded_buf);
2646 XBZRLE.decoded_buf = NULL;
2647}
2648
7d7c96be
PX
2649static void ram_state_cleanup(RAMState **rsp)
2650{
b9ccaf6d
DDAG
2651 if (*rsp) {
2652 migration_page_queue_free(*rsp);
2653 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2654 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2655 g_free(*rsp);
2656 *rsp = NULL;
2657 }
7d7c96be
PX
2658}
2659
84593a08
PX
2660static void xbzrle_cleanup(void)
2661{
2662 XBZRLE_cache_lock();
2663 if (XBZRLE.cache) {
2664 cache_fini(XBZRLE.cache);
2665 g_free(XBZRLE.encoded_buf);
2666 g_free(XBZRLE.current_buf);
2667 g_free(XBZRLE.zero_target_page);
2668 XBZRLE.cache = NULL;
2669 XBZRLE.encoded_buf = NULL;
2670 XBZRLE.current_buf = NULL;
2671 XBZRLE.zero_target_page = NULL;
2672 }
2673 XBZRLE_cache_unlock();
2674}
2675
f265e0e4 2676static void ram_save_cleanup(void *opaque)
56e93d26 2677{
53518d94 2678 RAMState **rsp = opaque;
6b6712ef 2679 RAMBlock *block;
eb859c53 2680
2ff64038 2681 /* caller have hold iothread lock or is in a bh, so there is
4633456c 2682 * no writing race against the migration bitmap
2ff64038 2683 */
6b6712ef
JQ
2684 memory_global_dirty_log_stop();
2685
fbd162e6 2686 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2687 g_free(block->bmap);
2688 block->bmap = NULL;
2689 g_free(block->unsentmap);
2690 block->unsentmap = NULL;
56e93d26
JQ
2691 }
2692
84593a08 2693 xbzrle_cleanup();
f0afa331 2694 compress_threads_save_cleanup();
7d7c96be 2695 ram_state_cleanup(rsp);
56e93d26
JQ
2696}
2697
6f37bb8b 2698static void ram_state_reset(RAMState *rs)
56e93d26 2699{
6f37bb8b
JQ
2700 rs->last_seen_block = NULL;
2701 rs->last_sent_block = NULL;
269ace29 2702 rs->last_page = 0;
6f37bb8b
JQ
2703 rs->last_version = ram_list.version;
2704 rs->ram_bulk_stage = true;
6eeb63f7 2705 rs->fpo_enabled = false;
56e93d26
JQ
2706}
2707
2708#define MAX_WAIT 50 /* ms, half buffered_file limit */
2709
4f2e4252
DDAG
2710/*
2711 * 'expected' is the value you expect the bitmap mostly to be full
2712 * of; it won't bother printing lines that are all this value.
2713 * If 'todump' is null the migration bitmap is dumped.
2714 */
6b6712ef
JQ
2715void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2716 unsigned long pages)
4f2e4252 2717{
4f2e4252
DDAG
2718 int64_t cur;
2719 int64_t linelen = 128;
2720 char linebuf[129];
2721
6b6712ef 2722 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
2723 int64_t curb;
2724 bool found = false;
2725 /*
2726 * Last line; catch the case where the line length
2727 * is longer than remaining ram
2728 */
6b6712ef
JQ
2729 if (cur + linelen > pages) {
2730 linelen = pages - cur;
4f2e4252
DDAG
2731 }
2732 for (curb = 0; curb < linelen; curb++) {
2733 bool thisbit = test_bit(cur + curb, todump);
2734 linebuf[curb] = thisbit ? '1' : '.';
2735 found = found || (thisbit != expected);
2736 }
2737 if (found) {
2738 linebuf[curb] = '\0';
2739 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2740 }
2741 }
2742}
2743
e0b266f0
DDAG
2744/* **** functions for postcopy ***** */
2745
ced1c616
PB
2746void ram_postcopy_migrated_memory_release(MigrationState *ms)
2747{
2748 struct RAMBlock *block;
ced1c616 2749
fbd162e6 2750 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2751 unsigned long *bitmap = block->bmap;
2752 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2753 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
2754
2755 while (run_start < range) {
2756 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
aaa2064c 2757 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
ced1c616
PB
2758 (run_end - run_start) << TARGET_PAGE_BITS);
2759 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2760 }
2761 }
2762}
2763
3d0684b2
JQ
2764/**
2765 * postcopy_send_discard_bm_ram: discard a RAMBlock
2766 *
2767 * Returns zero on success
2768 *
e0b266f0
DDAG
2769 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2770 * Note: At this point the 'unsentmap' is the processed bitmap combined
2771 * with the dirtymap; so a '1' means it's either dirty or unsent.
3d0684b2
JQ
2772 *
2773 * @ms: current migration state
2774 * @pds: state for postcopy
2775 * @start: RAMBlock starting page
2776 * @length: RAMBlock size
e0b266f0
DDAG
2777 */
2778static int postcopy_send_discard_bm_ram(MigrationState *ms,
2779 PostcopyDiscardState *pds,
6b6712ef 2780 RAMBlock *block)
e0b266f0 2781{
6b6712ef 2782 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 2783 unsigned long current;
6b6712ef 2784 unsigned long *unsentmap = block->unsentmap;
e0b266f0 2785
6b6712ef 2786 for (current = 0; current < end; ) {
e0b266f0
DDAG
2787 unsigned long one = find_next_bit(unsentmap, end, current);
2788
2789 if (one <= end) {
2790 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2791 unsigned long discard_length;
2792
2793 if (zero >= end) {
2794 discard_length = end - one;
2795 } else {
2796 discard_length = zero - one;
2797 }
d688c62d
DDAG
2798 if (discard_length) {
2799 postcopy_discard_send_range(ms, pds, one, discard_length);
2800 }
e0b266f0
DDAG
2801 current = one + discard_length;
2802 } else {
2803 current = one;
2804 }
2805 }
2806
2807 return 0;
2808}
2809
3d0684b2
JQ
2810/**
2811 * postcopy_each_ram_send_discard: discard all RAMBlocks
2812 *
2813 * Returns 0 for success or negative for error
2814 *
e0b266f0
DDAG
2815 * Utility for the outgoing postcopy code.
2816 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2817 * passing it bitmap indexes and name.
e0b266f0
DDAG
2818 * (qemu_ram_foreach_block ends up passing unscaled lengths
2819 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
2820 *
2821 * @ms: current migration state
e0b266f0
DDAG
2822 */
2823static int postcopy_each_ram_send_discard(MigrationState *ms)
2824{
2825 struct RAMBlock *block;
2826 int ret;
2827
fbd162e6 2828 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2829 PostcopyDiscardState *pds =
2830 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
2831
2832 /*
2833 * Postcopy sends chunks of bitmap over the wire, but it
2834 * just needs indexes at this point, avoids it having
2835 * target page specific code.
2836 */
6b6712ef 2837 ret = postcopy_send_discard_bm_ram(ms, pds, block);
e0b266f0
DDAG
2838 postcopy_discard_send_finish(ms, pds);
2839 if (ret) {
2840 return ret;
2841 }
2842 }
2843
2844 return 0;
2845}
2846
3d0684b2
JQ
2847/**
2848 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
2849 *
2850 * Helper for postcopy_chunk_hostpages; it's called twice to
2851 * canonicalize the two bitmaps, that are similar, but one is
2852 * inverted.
99e314eb 2853 *
3d0684b2
JQ
2854 * Postcopy requires that all target pages in a hostpage are dirty or
2855 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2856 *
3d0684b2
JQ
2857 * @ms: current migration state
2858 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2859 * otherwise we need to canonicalize partially dirty host pages
2860 * @block: block that contains the page we want to canonicalize
2861 * @pds: state for postcopy
99e314eb
DDAG
2862 */
2863static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2864 RAMBlock *block,
2865 PostcopyDiscardState *pds)
2866{
53518d94 2867 RAMState *rs = ram_state;
6b6712ef
JQ
2868 unsigned long *bitmap = block->bmap;
2869 unsigned long *unsentmap = block->unsentmap;
29c59172 2870 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2871 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2872 unsigned long run_start;
2873
29c59172
DDAG
2874 if (block->page_size == TARGET_PAGE_SIZE) {
2875 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2876 return;
2877 }
2878
99e314eb
DDAG
2879 if (unsent_pass) {
2880 /* Find a sent page */
6b6712ef 2881 run_start = find_next_zero_bit(unsentmap, pages, 0);
99e314eb
DDAG
2882 } else {
2883 /* Find a dirty page */
6b6712ef 2884 run_start = find_next_bit(bitmap, pages, 0);
99e314eb
DDAG
2885 }
2886
6b6712ef 2887 while (run_start < pages) {
99e314eb
DDAG
2888 bool do_fixup = false;
2889 unsigned long fixup_start_addr;
2890 unsigned long host_offset;
2891
2892 /*
2893 * If the start of this run of pages is in the middle of a host
2894 * page, then we need to fixup this host page.
2895 */
2896 host_offset = run_start % host_ratio;
2897 if (host_offset) {
2898 do_fixup = true;
2899 run_start -= host_offset;
2900 fixup_start_addr = run_start;
2901 /* For the next pass */
2902 run_start = run_start + host_ratio;
2903 } else {
2904 /* Find the end of this run */
2905 unsigned long run_end;
2906 if (unsent_pass) {
6b6712ef 2907 run_end = find_next_bit(unsentmap, pages, run_start + 1);
99e314eb 2908 } else {
6b6712ef 2909 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2910 }
2911 /*
2912 * If the end isn't at the start of a host page, then the
2913 * run doesn't finish at the end of a host page
2914 * and we need to discard.
2915 */
2916 host_offset = run_end % host_ratio;
2917 if (host_offset) {
2918 do_fixup = true;
2919 fixup_start_addr = run_end - host_offset;
2920 /*
2921 * This host page has gone, the next loop iteration starts
2922 * from after the fixup
2923 */
2924 run_start = fixup_start_addr + host_ratio;
2925 } else {
2926 /*
2927 * No discards on this iteration, next loop starts from
2928 * next sent/dirty page
2929 */
2930 run_start = run_end + 1;
2931 }
2932 }
2933
2934 if (do_fixup) {
2935 unsigned long page;
2936
2937 /* Tell the destination to discard this page */
2938 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2939 /* For the unsent_pass we:
2940 * discard partially sent pages
2941 * For the !unsent_pass (dirty) we:
2942 * discard partially dirty pages that were sent
2943 * (any partially sent pages were already discarded
2944 * by the previous unsent_pass)
2945 */
2946 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2947 host_ratio);
2948 }
2949
2950 /* Clean up the bitmap */
2951 for (page = fixup_start_addr;
2952 page < fixup_start_addr + host_ratio; page++) {
2953 /* All pages in this host page are now not sent */
2954 set_bit(page, unsentmap);
2955
2956 /*
2957 * Remark them as dirty, updating the count for any pages
2958 * that weren't previously dirty.
2959 */
0d8ec885 2960 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2961 }
2962 }
2963
2964 if (unsent_pass) {
2965 /* Find the next sent page for the next iteration */
6b6712ef 2966 run_start = find_next_zero_bit(unsentmap, pages, run_start);
99e314eb
DDAG
2967 } else {
2968 /* Find the next dirty page for the next iteration */
6b6712ef 2969 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2970 }
2971 }
2972}
2973
3d0684b2
JQ
2974/**
2975 * postcopy_chuck_hostpages: discrad any partially sent host page
2976 *
99e314eb
DDAG
2977 * Utility for the outgoing postcopy code.
2978 *
2979 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
2980 * dirty host-page size chunks as all dirty. In this case the host-page
2981 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 2982 *
3d0684b2
JQ
2983 * Returns zero on success
2984 *
2985 * @ms: current migration state
6b6712ef 2986 * @block: block we want to work with
99e314eb 2987 */
6b6712ef 2988static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 2989{
6b6712ef
JQ
2990 PostcopyDiscardState *pds =
2991 postcopy_discard_send_init(ms, block->idstr);
99e314eb 2992
6b6712ef
JQ
2993 /* First pass: Discard all partially sent host pages */
2994 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2995 /*
2996 * Second pass: Ensure that all partially dirty host pages are made
2997 * fully dirty.
2998 */
2999 postcopy_chunk_hostpages_pass(ms, false, block, pds);
99e314eb 3000
6b6712ef 3001 postcopy_discard_send_finish(ms, pds);
99e314eb
DDAG
3002 return 0;
3003}
3004
3d0684b2
JQ
3005/**
3006 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3007 *
3008 * Returns zero on success
3009 *
e0b266f0
DDAG
3010 * Transmit the set of pages to be discarded after precopy to the target
3011 * these are pages that:
3012 * a) Have been previously transmitted but are now dirty again
3013 * b) Pages that have never been transmitted, this ensures that
3014 * any pages on the destination that have been mapped by background
3015 * tasks get discarded (transparent huge pages is the specific concern)
3016 * Hopefully this is pretty sparse
3d0684b2
JQ
3017 *
3018 * @ms: current migration state
e0b266f0
DDAG
3019 */
3020int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3021{
53518d94 3022 RAMState *rs = ram_state;
6b6712ef 3023 RAMBlock *block;
e0b266f0 3024 int ret;
e0b266f0
DDAG
3025
3026 rcu_read_lock();
3027
3028 /* This should be our last sync, the src is now paused */
eb859c53 3029 migration_bitmap_sync(rs);
e0b266f0 3030
6b6712ef
JQ
3031 /* Easiest way to make sure we don't resume in the middle of a host-page */
3032 rs->last_seen_block = NULL;
3033 rs->last_sent_block = NULL;
3034 rs->last_page = 0;
e0b266f0 3035
fbd162e6 3036 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
3037 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
3038 unsigned long *bitmap = block->bmap;
3039 unsigned long *unsentmap = block->unsentmap;
3040
3041 if (!unsentmap) {
3042 /* We don't have a safe way to resize the sentmap, so
3043 * if the bitmap was resized it will be NULL at this
3044 * point.
3045 */
3046 error_report("migration ram resized during precopy phase");
3047 rcu_read_unlock();
3048 return -EINVAL;
3049 }
3050 /* Deal with TPS != HPS and huge pages */
3051 ret = postcopy_chunk_hostpages(ms, block);
3052 if (ret) {
3053 rcu_read_unlock();
3054 return ret;
3055 }
e0b266f0 3056
6b6712ef
JQ
3057 /*
3058 * Update the unsentmap to be unsentmap = unsentmap | dirty
3059 */
3060 bitmap_or(unsentmap, unsentmap, bitmap, pages);
e0b266f0 3061#ifdef DEBUG_POSTCOPY
6b6712ef 3062 ram_debug_dump_bitmap(unsentmap, true, pages);
e0b266f0 3063#endif
6b6712ef
JQ
3064 }
3065 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
3066
3067 ret = postcopy_each_ram_send_discard(ms);
3068 rcu_read_unlock();
3069
3070 return ret;
3071}
3072
3d0684b2
JQ
3073/**
3074 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 3075 *
3d0684b2 3076 * Returns zero on success
e0b266f0 3077 *
36449157
JQ
3078 * @rbname: name of the RAMBlock of the request. NULL means the
3079 * same that last one.
3d0684b2
JQ
3080 * @start: RAMBlock starting page
3081 * @length: RAMBlock size
e0b266f0 3082 */
aaa2064c 3083int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0
DDAG
3084{
3085 int ret = -1;
3086
36449157 3087 trace_ram_discard_range(rbname, start, length);
d3a5038c 3088
e0b266f0 3089 rcu_read_lock();
36449157 3090 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
3091
3092 if (!rb) {
36449157 3093 error_report("ram_discard_range: Failed to find block '%s'", rbname);
e0b266f0
DDAG
3094 goto err;
3095 }
3096
814bb08f
PX
3097 /*
3098 * On source VM, we don't need to update the received bitmap since
3099 * we don't even have one.
3100 */
3101 if (rb->receivedmap) {
3102 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3103 length >> qemu_target_page_bits());
3104 }
3105
d3a5038c 3106 ret = ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
3107
3108err:
3109 rcu_read_unlock();
3110
3111 return ret;
3112}
3113
84593a08
PX
3114/*
3115 * For every allocation, we will try not to crash the VM if the
3116 * allocation failed.
3117 */
3118static int xbzrle_init(void)
3119{
3120 Error *local_err = NULL;
3121
3122 if (!migrate_use_xbzrle()) {
3123 return 0;
3124 }
3125
3126 XBZRLE_cache_lock();
3127
3128 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3129 if (!XBZRLE.zero_target_page) {
3130 error_report("%s: Error allocating zero page", __func__);
3131 goto err_out;
3132 }
3133
3134 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3135 TARGET_PAGE_SIZE, &local_err);
3136 if (!XBZRLE.cache) {
3137 error_report_err(local_err);
3138 goto free_zero_page;
3139 }
3140
3141 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3142 if (!XBZRLE.encoded_buf) {
3143 error_report("%s: Error allocating encoded_buf", __func__);
3144 goto free_cache;
3145 }
3146
3147 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3148 if (!XBZRLE.current_buf) {
3149 error_report("%s: Error allocating current_buf", __func__);
3150 goto free_encoded_buf;
3151 }
3152
3153 /* We are all good */
3154 XBZRLE_cache_unlock();
3155 return 0;
3156
3157free_encoded_buf:
3158 g_free(XBZRLE.encoded_buf);
3159 XBZRLE.encoded_buf = NULL;
3160free_cache:
3161 cache_fini(XBZRLE.cache);
3162 XBZRLE.cache = NULL;
3163free_zero_page:
3164 g_free(XBZRLE.zero_target_page);
3165 XBZRLE.zero_target_page = NULL;
3166err_out:
3167 XBZRLE_cache_unlock();
3168 return -ENOMEM;
3169}
3170
53518d94 3171static int ram_state_init(RAMState **rsp)
56e93d26 3172{
7d00ee6a
PX
3173 *rsp = g_try_new0(RAMState, 1);
3174
3175 if (!*rsp) {
3176 error_report("%s: Init ramstate fail", __func__);
3177 return -1;
3178 }
53518d94
JQ
3179
3180 qemu_mutex_init(&(*rsp)->bitmap_mutex);
3181 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3182 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
56e93d26 3183
7d00ee6a
PX
3184 /*
3185 * Count the total number of pages used by ram blocks not including any
3186 * gaps due to alignment or unplugs.
3187 */
3188 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
3189
3190 ram_state_reset(*rsp);
3191
3192 return 0;
3193}
3194
d6eff5d7 3195static void ram_list_init_bitmaps(void)
7d00ee6a 3196{
d6eff5d7
PX
3197 RAMBlock *block;
3198 unsigned long pages;
56e93d26 3199
0827b9e9
AA
3200 /* Skip setting bitmap if there is no RAM */
3201 if (ram_bytes_total()) {
fbd162e6 3202 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
d6eff5d7 3203 pages = block->max_length >> TARGET_PAGE_BITS;
6b6712ef
JQ
3204 block->bmap = bitmap_new(pages);
3205 bitmap_set(block->bmap, 0, pages);
3206 if (migrate_postcopy_ram()) {
3207 block->unsentmap = bitmap_new(pages);
3208 bitmap_set(block->unsentmap, 0, pages);
3209 }
0827b9e9 3210 }
f3f491fc 3211 }
d6eff5d7
PX
3212}
3213
3214static void ram_init_bitmaps(RAMState *rs)
3215{
3216 /* For memory_global_dirty_log_start below. */
3217 qemu_mutex_lock_iothread();
3218 qemu_mutex_lock_ramlist();
3219 rcu_read_lock();
f3f491fc 3220
d6eff5d7 3221 ram_list_init_bitmaps();
56e93d26 3222 memory_global_dirty_log_start();
bd227060 3223 migration_bitmap_sync_precopy(rs);
d6eff5d7
PX
3224
3225 rcu_read_unlock();
56e93d26 3226 qemu_mutex_unlock_ramlist();
49877834 3227 qemu_mutex_unlock_iothread();
d6eff5d7
PX
3228}
3229
3230static int ram_init_all(RAMState **rsp)
3231{
3232 if (ram_state_init(rsp)) {
3233 return -1;
3234 }
3235
3236 if (xbzrle_init()) {
3237 ram_state_cleanup(rsp);
3238 return -1;
3239 }
3240
3241 ram_init_bitmaps(*rsp);
a91246c9
HZ
3242
3243 return 0;
3244}
3245
08614f34
PX
3246static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3247{
3248 RAMBlock *block;
3249 uint64_t pages = 0;
3250
3251 /*
3252 * Postcopy is not using xbzrle/compression, so no need for that.
3253 * Also, since source are already halted, we don't need to care
3254 * about dirty page logging as well.
3255 */
3256
fbd162e6 3257 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
08614f34
PX
3258 pages += bitmap_count_one(block->bmap,
3259 block->used_length >> TARGET_PAGE_BITS);
3260 }
3261
3262 /* This may not be aligned with current bitmaps. Recalculate. */
3263 rs->migration_dirty_pages = pages;
3264
3265 rs->last_seen_block = NULL;
3266 rs->last_sent_block = NULL;
3267 rs->last_page = 0;
3268 rs->last_version = ram_list.version;
3269 /*
3270 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3271 * matter what we have sent.
3272 */
3273 rs->ram_bulk_stage = false;
3274
3275 /* Update RAMState cache of output QEMUFile */
3276 rs->f = out;
3277
3278 trace_ram_state_resume_prepare(pages);
3279}
3280
6bcb05fc
WW
3281/*
3282 * This function clears bits of the free pages reported by the caller from the
3283 * migration dirty bitmap. @addr is the host address corresponding to the
3284 * start of the continuous guest free pages, and @len is the total bytes of
3285 * those pages.
3286 */
3287void qemu_guest_free_page_hint(void *addr, size_t len)
3288{
3289 RAMBlock *block;
3290 ram_addr_t offset;
3291 size_t used_len, start, npages;
3292 MigrationState *s = migrate_get_current();
3293
3294 /* This function is currently expected to be used during live migration */
3295 if (!migration_is_setup_or_active(s->state)) {
3296 return;
3297 }
3298
3299 for (; len > 0; len -= used_len, addr += used_len) {
3300 block = qemu_ram_block_from_host(addr, false, &offset);
3301 if (unlikely(!block || offset >= block->used_length)) {
3302 /*
3303 * The implementation might not support RAMBlock resize during
3304 * live migration, but it could happen in theory with future
3305 * updates. So we add a check here to capture that case.
3306 */
3307 error_report_once("%s unexpected error", __func__);
3308 return;
3309 }
3310
3311 if (len <= block->used_length - offset) {
3312 used_len = len;
3313 } else {
3314 used_len = block->used_length - offset;
3315 }
3316
3317 start = offset >> TARGET_PAGE_BITS;
3318 npages = used_len >> TARGET_PAGE_BITS;
3319
3320 qemu_mutex_lock(&ram_state->bitmap_mutex);
3321 ram_state->migration_dirty_pages -=
3322 bitmap_count_one_with_offset(block->bmap, start, npages);
3323 bitmap_clear(block->bmap, start, npages);
3324 qemu_mutex_unlock(&ram_state->bitmap_mutex);
3325 }
3326}
3327
3d0684b2
JQ
3328/*
3329 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
3330 * long-running RCU critical section. When rcu-reclaims in the code
3331 * start to become numerous it will be necessary to reduce the
3332 * granularity of these critical sections.
3333 */
3334
3d0684b2
JQ
3335/**
3336 * ram_save_setup: Setup RAM for migration
3337 *
3338 * Returns zero to indicate success and negative for error
3339 *
3340 * @f: QEMUFile where to send the data
3341 * @opaque: RAMState pointer
3342 */
a91246c9
HZ
3343static int ram_save_setup(QEMUFile *f, void *opaque)
3344{
53518d94 3345 RAMState **rsp = opaque;
a91246c9
HZ
3346 RAMBlock *block;
3347
dcaf446e
XG
3348 if (compress_threads_save_setup()) {
3349 return -1;
3350 }
3351
a91246c9
HZ
3352 /* migration has already setup the bitmap, reuse it. */
3353 if (!migration_in_colo_state()) {
7d00ee6a 3354 if (ram_init_all(rsp) != 0) {
dcaf446e 3355 compress_threads_save_cleanup();
a91246c9 3356 return -1;
53518d94 3357 }
a91246c9 3358 }
53518d94 3359 (*rsp)->f = f;
a91246c9
HZ
3360
3361 rcu_read_lock();
56e93d26 3362
fbd162e6 3363 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
56e93d26 3364
b895de50 3365 RAMBLOCK_FOREACH_MIGRATABLE(block) {
56e93d26
JQ
3366 qemu_put_byte(f, strlen(block->idstr));
3367 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3368 qemu_put_be64(f, block->used_length);
ef08fb38
DDAG
3369 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
3370 qemu_put_be64(f, block->page_size);
3371 }
fbd162e6
YK
3372 if (migrate_ignore_shared()) {
3373 qemu_put_be64(f, block->mr->addr);
3374 qemu_put_byte(f, ramblock_is_ignored(block) ? 1 : 0);
3375 }
56e93d26
JQ
3376 }
3377
3378 rcu_read_unlock();
3379
3380 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3381 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3382
6df264ac 3383 multifd_send_sync_main();
56e93d26 3384 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3385 qemu_fflush(f);
56e93d26
JQ
3386
3387 return 0;
3388}
3389
3d0684b2
JQ
3390/**
3391 * ram_save_iterate: iterative stage for migration
3392 *
3393 * Returns zero to indicate success and negative for error
3394 *
3395 * @f: QEMUFile where to send the data
3396 * @opaque: RAMState pointer
3397 */
56e93d26
JQ
3398static int ram_save_iterate(QEMUFile *f, void *opaque)
3399{
53518d94
JQ
3400 RAMState **temp = opaque;
3401 RAMState *rs = *temp;
56e93d26
JQ
3402 int ret;
3403 int i;
3404 int64_t t0;
5c90308f 3405 int done = 0;
56e93d26 3406
b2557345
PL
3407 if (blk_mig_bulk_active()) {
3408 /* Avoid transferring ram during bulk phase of block migration as
3409 * the bulk phase will usually take a long time and transferring
3410 * ram updates during that time is pointless. */
3411 goto out;
3412 }
3413
56e93d26 3414 rcu_read_lock();
6f37bb8b
JQ
3415 if (ram_list.version != rs->last_version) {
3416 ram_state_reset(rs);
56e93d26
JQ
3417 }
3418
3419 /* Read version before ram_list.blocks */
3420 smp_rmb();
3421
3422 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
3423
3424 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3425 i = 0;
e03a34f8
DDAG
3426 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3427 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
56e93d26
JQ
3428 int pages;
3429
e03a34f8
DDAG
3430 if (qemu_file_get_error(f)) {
3431 break;
3432 }
3433
ce25d337 3434 pages = ram_find_and_save_block(rs, false);
56e93d26
JQ
3435 /* no more pages to sent */
3436 if (pages == 0) {
5c90308f 3437 done = 1;
56e93d26
JQ
3438 break;
3439 }
e8f3735f
XG
3440
3441 if (pages < 0) {
3442 qemu_file_set_error(f, pages);
3443 break;
3444 }
3445
be8b02ed 3446 rs->target_page_count += pages;
070afca2 3447
56e93d26
JQ
3448 /* we want to check in the 1st loop, just in case it was the 1st time
3449 and we had to sync the dirty bitmap.
a5f7b1a6 3450 qemu_clock_get_ns() is a bit expensive, so we only check each some
56e93d26
JQ
3451 iterations
3452 */
3453 if ((i & 63) == 0) {
3454 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
3455 if (t1 > MAX_WAIT) {
55c4446b 3456 trace_ram_save_iterate_big_wait(t1, i);
56e93d26
JQ
3457 break;
3458 }
3459 }
3460 i++;
3461 }
56e93d26
JQ
3462 rcu_read_unlock();
3463
3464 /*
3465 * Must occur before EOS (or any QEMUFile operation)
3466 * because of RDMA protocol.
3467 */
3468 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3469
6df264ac 3470 multifd_send_sync_main();
b2557345 3471out:
56e93d26 3472 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3473 qemu_fflush(f);
9360447d 3474 ram_counters.transferred += 8;
56e93d26
JQ
3475
3476 ret = qemu_file_get_error(f);
3477 if (ret < 0) {
3478 return ret;
3479 }
3480
5c90308f 3481 return done;
56e93d26
JQ
3482}
3483
3d0684b2
JQ
3484/**
3485 * ram_save_complete: function called to send the remaining amount of ram
3486 *
e8f3735f 3487 * Returns zero to indicate success or negative on error
3d0684b2
JQ
3488 *
3489 * Called with iothread lock
3490 *
3491 * @f: QEMUFile where to send the data
3492 * @opaque: RAMState pointer
3493 */
56e93d26
JQ
3494static int ram_save_complete(QEMUFile *f, void *opaque)
3495{
53518d94
JQ
3496 RAMState **temp = opaque;
3497 RAMState *rs = *temp;
e8f3735f 3498 int ret = 0;
6f37bb8b 3499
56e93d26
JQ
3500 rcu_read_lock();
3501
5727309d 3502 if (!migration_in_postcopy()) {
bd227060 3503 migration_bitmap_sync_precopy(rs);
663e6c1d 3504 }
56e93d26
JQ
3505
3506 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3507
3508 /* try transferring iterative blocks of memory */
3509
3510 /* flush all remaining blocks regardless of rate limiting */
3511 while (true) {
3512 int pages;
3513
ce25d337 3514 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
56e93d26
JQ
3515 /* no more blocks to sent */
3516 if (pages == 0) {
3517 break;
3518 }
e8f3735f
XG
3519 if (pages < 0) {
3520 ret = pages;
3521 break;
3522 }
56e93d26
JQ
3523 }
3524
ce25d337 3525 flush_compressed_data(rs);
56e93d26 3526 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
56e93d26
JQ
3527
3528 rcu_read_unlock();
d09a6fde 3529
6df264ac 3530 multifd_send_sync_main();
56e93d26 3531 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3532 qemu_fflush(f);
56e93d26 3533
e8f3735f 3534 return ret;
56e93d26
JQ
3535}
3536
c31b098f 3537static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
3538 uint64_t *res_precopy_only,
3539 uint64_t *res_compatible,
3540 uint64_t *res_postcopy_only)
56e93d26 3541{
53518d94
JQ
3542 RAMState **temp = opaque;
3543 RAMState *rs = *temp;
56e93d26
JQ
3544 uint64_t remaining_size;
3545
9edabd4d 3546 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3547
5727309d 3548 if (!migration_in_postcopy() &&
663e6c1d 3549 remaining_size < max_size) {
56e93d26
JQ
3550 qemu_mutex_lock_iothread();
3551 rcu_read_lock();
bd227060 3552 migration_bitmap_sync_precopy(rs);
56e93d26
JQ
3553 rcu_read_unlock();
3554 qemu_mutex_unlock_iothread();
9edabd4d 3555 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3556 }
c31b098f 3557
86e1167e
VSO
3558 if (migrate_postcopy_ram()) {
3559 /* We can do postcopy, and all the data is postcopiable */
47995026 3560 *res_compatible += remaining_size;
86e1167e 3561 } else {
47995026 3562 *res_precopy_only += remaining_size;
86e1167e 3563 }
56e93d26
JQ
3564}
3565
3566static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3567{
3568 unsigned int xh_len;
3569 int xh_flags;
063e760a 3570 uint8_t *loaded_data;
56e93d26 3571
56e93d26
JQ
3572 /* extract RLE header */
3573 xh_flags = qemu_get_byte(f);
3574 xh_len = qemu_get_be16(f);
3575
3576 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3577 error_report("Failed to load XBZRLE page - wrong compression!");
3578 return -1;
3579 }
3580
3581 if (xh_len > TARGET_PAGE_SIZE) {
3582 error_report("Failed to load XBZRLE page - len overflow!");
3583 return -1;
3584 }
f265e0e4 3585 loaded_data = XBZRLE.decoded_buf;
56e93d26 3586 /* load data and decode */
f265e0e4 3587 /* it can change loaded_data to point to an internal buffer */
063e760a 3588 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
3589
3590 /* decode RLE */
063e760a 3591 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
3592 TARGET_PAGE_SIZE) == -1) {
3593 error_report("Failed to load XBZRLE page - decode error!");
3594 return -1;
3595 }
3596
3597 return 0;
3598}
3599
3d0684b2
JQ
3600/**
3601 * ram_block_from_stream: read a RAMBlock id from the migration stream
3602 *
3603 * Must be called from within a rcu critical section.
3604 *
56e93d26 3605 * Returns a pointer from within the RCU-protected ram_list.
a7180877 3606 *
3d0684b2
JQ
3607 * @f: QEMUFile where to read the data from
3608 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 3609 */
3d0684b2 3610static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
3611{
3612 static RAMBlock *block = NULL;
3613 char id[256];
3614 uint8_t len;
3615
3616 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 3617 if (!block) {
56e93d26
JQ
3618 error_report("Ack, bad migration stream!");
3619 return NULL;
3620 }
4c4bad48 3621 return block;
56e93d26
JQ
3622 }
3623
3624 len = qemu_get_byte(f);
3625 qemu_get_buffer(f, (uint8_t *)id, len);
3626 id[len] = 0;
3627
e3dd7493 3628 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
3629 if (!block) {
3630 error_report("Can't find block %s", id);
3631 return NULL;
56e93d26
JQ
3632 }
3633
fbd162e6 3634 if (ramblock_is_ignored(block)) {
b895de50
CLG
3635 error_report("block %s should not be migrated !", id);
3636 return NULL;
3637 }
3638
4c4bad48
HZ
3639 return block;
3640}
3641
3642static inline void *host_from_ram_block_offset(RAMBlock *block,
3643 ram_addr_t offset)
3644{
3645 if (!offset_in_ramblock(block, offset)) {
3646 return NULL;
3647 }
3648
3649 return block->host + offset;
56e93d26
JQ
3650}
3651
13af18f2
ZC
3652static inline void *colo_cache_from_block_offset(RAMBlock *block,
3653 ram_addr_t offset)
3654{
3655 if (!offset_in_ramblock(block, offset)) {
3656 return NULL;
3657 }
3658 if (!block->colo_cache) {
3659 error_report("%s: colo_cache is NULL in block :%s",
3660 __func__, block->idstr);
3661 return NULL;
3662 }
7d9acafa
ZC
3663
3664 /*
3665 * During colo checkpoint, we need bitmap of these migrated pages.
3666 * It help us to decide which pages in ram cache should be flushed
3667 * into VM's RAM later.
3668 */
3669 if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3670 ram_state->migration_dirty_pages++;
3671 }
13af18f2
ZC
3672 return block->colo_cache + offset;
3673}
3674
3d0684b2
JQ
3675/**
3676 * ram_handle_compressed: handle the zero page case
3677 *
56e93d26
JQ
3678 * If a page (or a whole RDMA chunk) has been
3679 * determined to be zero, then zap it.
3d0684b2
JQ
3680 *
3681 * @host: host address for the zero page
3682 * @ch: what the page is filled from. We only support zero
3683 * @size: size of the zero page
56e93d26
JQ
3684 */
3685void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3686{
3687 if (ch != 0 || !is_zero_range(host, size)) {
3688 memset(host, ch, size);
3689 }
3690}
3691
797ca154
XG
3692/* return the size after decompression, or negative value on error */
3693static int
3694qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3695 const uint8_t *source, size_t source_len)
3696{
3697 int err;
3698
3699 err = inflateReset(stream);
3700 if (err != Z_OK) {
3701 return -1;
3702 }
3703
3704 stream->avail_in = source_len;
3705 stream->next_in = (uint8_t *)source;
3706 stream->avail_out = dest_len;
3707 stream->next_out = dest;
3708
3709 err = inflate(stream, Z_NO_FLUSH);
3710 if (err != Z_STREAM_END) {
3711 return -1;
3712 }
3713
3714 return stream->total_out;
3715}
3716
56e93d26
JQ
3717static void *do_data_decompress(void *opaque)
3718{
3719 DecompressParam *param = opaque;
3720 unsigned long pagesize;
33d151f4 3721 uint8_t *des;
34ab9e97 3722 int len, ret;
56e93d26 3723
33d151f4 3724 qemu_mutex_lock(&param->mutex);
90e56fb4 3725 while (!param->quit) {
33d151f4
LL
3726 if (param->des) {
3727 des = param->des;
3728 len = param->len;
3729 param->des = 0;
3730 qemu_mutex_unlock(&param->mutex);
3731
56e93d26 3732 pagesize = TARGET_PAGE_SIZE;
34ab9e97
XG
3733
3734 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3735 param->compbuf, len);
f548222c 3736 if (ret < 0 && migrate_get_current()->decompress_error_check) {
34ab9e97
XG
3737 error_report("decompress data failed");
3738 qemu_file_set_error(decomp_file, ret);
3739 }
73a8912b 3740
33d151f4
LL
3741 qemu_mutex_lock(&decomp_done_lock);
3742 param->done = true;
3743 qemu_cond_signal(&decomp_done_cond);
3744 qemu_mutex_unlock(&decomp_done_lock);
3745
3746 qemu_mutex_lock(&param->mutex);
3747 } else {
3748 qemu_cond_wait(&param->cond, &param->mutex);
3749 }
56e93d26 3750 }
33d151f4 3751 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
3752
3753 return NULL;
3754}
3755
34ab9e97 3756static int wait_for_decompress_done(void)
5533b2e9
LL
3757{
3758 int idx, thread_count;
3759
3760 if (!migrate_use_compression()) {
34ab9e97 3761 return 0;
5533b2e9
LL
3762 }
3763
3764 thread_count = migrate_decompress_threads();
3765 qemu_mutex_lock(&decomp_done_lock);
3766 for (idx = 0; idx < thread_count; idx++) {
3767 while (!decomp_param[idx].done) {
3768 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3769 }
3770 }
3771 qemu_mutex_unlock(&decomp_done_lock);
34ab9e97 3772 return qemu_file_get_error(decomp_file);
5533b2e9
LL
3773}
3774
f0afa331 3775static void compress_threads_load_cleanup(void)
56e93d26
JQ
3776{
3777 int i, thread_count;
3778
3416ab5b
JQ
3779 if (!migrate_use_compression()) {
3780 return;
3781 }
56e93d26
JQ
3782 thread_count = migrate_decompress_threads();
3783 for (i = 0; i < thread_count; i++) {
797ca154
XG
3784 /*
3785 * we use it as a indicator which shows if the thread is
3786 * properly init'd or not
3787 */
3788 if (!decomp_param[i].compbuf) {
3789 break;
3790 }
3791
56e93d26 3792 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 3793 decomp_param[i].quit = true;
56e93d26
JQ
3794 qemu_cond_signal(&decomp_param[i].cond);
3795 qemu_mutex_unlock(&decomp_param[i].mutex);
3796 }
3797 for (i = 0; i < thread_count; i++) {
797ca154
XG
3798 if (!decomp_param[i].compbuf) {
3799 break;
3800 }
3801
56e93d26
JQ
3802 qemu_thread_join(decompress_threads + i);
3803 qemu_mutex_destroy(&decomp_param[i].mutex);
3804 qemu_cond_destroy(&decomp_param[i].cond);
797ca154 3805 inflateEnd(&decomp_param[i].stream);
56e93d26 3806 g_free(decomp_param[i].compbuf);
797ca154 3807 decomp_param[i].compbuf = NULL;
56e93d26
JQ
3808 }
3809 g_free(decompress_threads);
3810 g_free(decomp_param);
56e93d26
JQ
3811 decompress_threads = NULL;
3812 decomp_param = NULL;
34ab9e97 3813 decomp_file = NULL;
56e93d26
JQ
3814}
3815
34ab9e97 3816static int compress_threads_load_setup(QEMUFile *f)
797ca154
XG
3817{
3818 int i, thread_count;
3819
3820 if (!migrate_use_compression()) {
3821 return 0;
3822 }
3823
3824 thread_count = migrate_decompress_threads();
3825 decompress_threads = g_new0(QemuThread, thread_count);
3826 decomp_param = g_new0(DecompressParam, thread_count);
3827 qemu_mutex_init(&decomp_done_lock);
3828 qemu_cond_init(&decomp_done_cond);
34ab9e97 3829 decomp_file = f;
797ca154
XG
3830 for (i = 0; i < thread_count; i++) {
3831 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3832 goto exit;
3833 }
3834
3835 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3836 qemu_mutex_init(&decomp_param[i].mutex);
3837 qemu_cond_init(&decomp_param[i].cond);
3838 decomp_param[i].done = true;
3839 decomp_param[i].quit = false;
3840 qemu_thread_create(decompress_threads + i, "decompress",
3841 do_data_decompress, decomp_param + i,
3842 QEMU_THREAD_JOINABLE);
3843 }
3844 return 0;
3845exit:
3846 compress_threads_load_cleanup();
3847 return -1;
3848}
3849
c1bc6626 3850static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
3851 void *host, int len)
3852{
3853 int idx, thread_count;
3854
3855 thread_count = migrate_decompress_threads();
73a8912b 3856 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
3857 while (true) {
3858 for (idx = 0; idx < thread_count; idx++) {
73a8912b 3859 if (decomp_param[idx].done) {
33d151f4
LL
3860 decomp_param[idx].done = false;
3861 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 3862 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
3863 decomp_param[idx].des = host;
3864 decomp_param[idx].len = len;
33d151f4
LL
3865 qemu_cond_signal(&decomp_param[idx].cond);
3866 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
3867 break;
3868 }
3869 }
3870 if (idx < thread_count) {
3871 break;
73a8912b
LL
3872 } else {
3873 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
3874 }
3875 }
73a8912b 3876 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
3877}
3878
13af18f2
ZC
3879/*
3880 * colo cache: this is for secondary VM, we cache the whole
3881 * memory of the secondary VM, it is need to hold the global lock
3882 * to call this helper.
3883 */
3884int colo_init_ram_cache(void)
3885{
3886 RAMBlock *block;
3887
3888 rcu_read_lock();
fbd162e6 3889 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3890 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3891 NULL,
3892 false);
3893 if (!block->colo_cache) {
3894 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3895 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3896 block->used_length);
3897 goto out_locked;
3898 }
3899 memcpy(block->colo_cache, block->host, block->used_length);
3900 }
3901 rcu_read_unlock();
7d9acafa
ZC
3902 /*
3903 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3904 * with to decide which page in cache should be flushed into SVM's RAM. Here
3905 * we use the same name 'ram_bitmap' as for migration.
3906 */
3907 if (ram_bytes_total()) {
3908 RAMBlock *block;
3909
fbd162e6 3910 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3911 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3912
3913 block->bmap = bitmap_new(pages);
3914 bitmap_set(block->bmap, 0, pages);
3915 }
3916 }
3917 ram_state = g_new0(RAMState, 1);
3918 ram_state->migration_dirty_pages = 0;
c6e5bafb 3919 qemu_mutex_init(&ram_state->bitmap_mutex);
d1955d22 3920 memory_global_dirty_log_start();
7d9acafa 3921
13af18f2
ZC
3922 return 0;
3923
3924out_locked:
7d9acafa 3925
fbd162e6 3926 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3927 if (block->colo_cache) {
3928 qemu_anon_ram_free(block->colo_cache, block->used_length);
3929 block->colo_cache = NULL;
3930 }
3931 }
3932
3933 rcu_read_unlock();
3934 return -errno;
3935}
3936
3937/* It is need to hold the global lock to call this helper */
3938void colo_release_ram_cache(void)
3939{
3940 RAMBlock *block;
3941
d1955d22 3942 memory_global_dirty_log_stop();
fbd162e6 3943 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3944 g_free(block->bmap);
3945 block->bmap = NULL;
3946 }
3947
13af18f2 3948 rcu_read_lock();
7d9acafa 3949
fbd162e6 3950 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3951 if (block->colo_cache) {
3952 qemu_anon_ram_free(block->colo_cache, block->used_length);
3953 block->colo_cache = NULL;
3954 }
3955 }
7d9acafa 3956
13af18f2 3957 rcu_read_unlock();
c6e5bafb 3958 qemu_mutex_destroy(&ram_state->bitmap_mutex);
7d9acafa
ZC
3959 g_free(ram_state);
3960 ram_state = NULL;
13af18f2
ZC
3961}
3962
f265e0e4
JQ
3963/**
3964 * ram_load_setup: Setup RAM for migration incoming side
3965 *
3966 * Returns zero to indicate success and negative for error
3967 *
3968 * @f: QEMUFile where to receive the data
3969 * @opaque: RAMState pointer
3970 */
3971static int ram_load_setup(QEMUFile *f, void *opaque)
3972{
34ab9e97 3973 if (compress_threads_load_setup(f)) {
797ca154
XG
3974 return -1;
3975 }
3976
f265e0e4 3977 xbzrle_load_setup();
f9494614 3978 ramblock_recv_map_init();
13af18f2 3979
f265e0e4
JQ
3980 return 0;
3981}
3982
3983static int ram_load_cleanup(void *opaque)
3984{
f9494614 3985 RAMBlock *rb;
56eb90af 3986
fbd162e6 3987 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
56eb90af
JH
3988 if (ramblock_is_pmem(rb)) {
3989 pmem_persist(rb->host, rb->used_length);
3990 }
3991 }
3992
f265e0e4 3993 xbzrle_load_cleanup();
f0afa331 3994 compress_threads_load_cleanup();
f9494614 3995
fbd162e6 3996 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
3997 g_free(rb->receivedmap);
3998 rb->receivedmap = NULL;
3999 }
13af18f2 4000
f265e0e4
JQ
4001 return 0;
4002}
4003
3d0684b2
JQ
4004/**
4005 * ram_postcopy_incoming_init: allocate postcopy data structures
4006 *
4007 * Returns 0 for success and negative if there was one error
4008 *
4009 * @mis: current migration incoming state
4010 *
4011 * Allocate data structures etc needed by incoming migration with
4012 * postcopy-ram. postcopy-ram's similarly names
4013 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
4014 */
4015int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4016{
c136180c 4017 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
4018}
4019
3d0684b2
JQ
4020/**
4021 * ram_load_postcopy: load a page in postcopy case
4022 *
4023 * Returns 0 for success or -errno in case of error
4024 *
a7180877
DDAG
4025 * Called in postcopy mode by ram_load().
4026 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
4027 *
4028 * @f: QEMUFile where to send the data
a7180877
DDAG
4029 */
4030static int ram_load_postcopy(QEMUFile *f)
4031{
4032 int flags = 0, ret = 0;
4033 bool place_needed = false;
1aa83678 4034 bool matches_target_page_size = false;
a7180877
DDAG
4035 MigrationIncomingState *mis = migration_incoming_get_current();
4036 /* Temporary page that is later 'placed' */
4037 void *postcopy_host_page = postcopy_get_tmp_page(mis);
c53b7ddc 4038 void *last_host = NULL;
a3b6ff6d 4039 bool all_zero = false;
a7180877
DDAG
4040
4041 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4042 ram_addr_t addr;
4043 void *host = NULL;
4044 void *page_buffer = NULL;
4045 void *place_source = NULL;
df9ff5e1 4046 RAMBlock *block = NULL;
a7180877 4047 uint8_t ch;
a7180877
DDAG
4048
4049 addr = qemu_get_be64(f);
7a9ddfbf
PX
4050
4051 /*
4052 * If qemu file error, we should stop here, and then "addr"
4053 * may be invalid
4054 */
4055 ret = qemu_file_get_error(f);
4056 if (ret) {
4057 break;
4058 }
4059
a7180877
DDAG
4060 flags = addr & ~TARGET_PAGE_MASK;
4061 addr &= TARGET_PAGE_MASK;
4062
4063 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4064 place_needed = false;
bb890ed5 4065 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
df9ff5e1 4066 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
4067
4068 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
4069 if (!host) {
4070 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4071 ret = -EINVAL;
4072 break;
4073 }
1aa83678 4074 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 4075 /*
28abd200
DDAG
4076 * Postcopy requires that we place whole host pages atomically;
4077 * these may be huge pages for RAMBlocks that are backed by
4078 * hugetlbfs.
a7180877
DDAG
4079 * To make it atomic, the data is read into a temporary page
4080 * that's moved into place later.
4081 * The migration protocol uses, possibly smaller, target-pages
4082 * however the source ensures it always sends all the components
4083 * of a host page in order.
4084 */
4085 page_buffer = postcopy_host_page +
28abd200 4086 ((uintptr_t)host & (block->page_size - 1));
a7180877 4087 /* If all TP are zero then we can optimise the place */
28abd200 4088 if (!((uintptr_t)host & (block->page_size - 1))) {
a7180877 4089 all_zero = true;
c53b7ddc
DDAG
4090 } else {
4091 /* not the 1st TP within the HP */
4092 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 4093 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
4094 host, last_host);
4095 ret = -EINVAL;
4096 break;
4097 }
a7180877
DDAG
4098 }
4099
c53b7ddc 4100
a7180877
DDAG
4101 /*
4102 * If it's the last part of a host page then we place the host
4103 * page
4104 */
4105 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
28abd200 4106 (block->page_size - 1)) == 0;
a7180877
DDAG
4107 place_source = postcopy_host_page;
4108 }
c53b7ddc 4109 last_host = host;
a7180877
DDAG
4110
4111 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 4112 case RAM_SAVE_FLAG_ZERO:
a7180877
DDAG
4113 ch = qemu_get_byte(f);
4114 memset(page_buffer, ch, TARGET_PAGE_SIZE);
4115 if (ch) {
4116 all_zero = false;
4117 }
4118 break;
4119
4120 case RAM_SAVE_FLAG_PAGE:
4121 all_zero = false;
1aa83678
PX
4122 if (!matches_target_page_size) {
4123 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
4124 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4125 } else {
1aa83678
PX
4126 /*
4127 * For small pages that matches target page size, we
4128 * avoid the qemu_file copy. Instead we directly use
4129 * the buffer of QEMUFile to place the page. Note: we
4130 * cannot do any QEMUFile operation before using that
4131 * buffer to make sure the buffer is valid when
4132 * placing the page.
a7180877
DDAG
4133 */
4134 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4135 TARGET_PAGE_SIZE);
4136 }
4137 break;
4138 case RAM_SAVE_FLAG_EOS:
4139 /* normal exit */
6df264ac 4140 multifd_recv_sync_main();
a7180877
DDAG
4141 break;
4142 default:
4143 error_report("Unknown combination of migration flags: %#x"
4144 " (postcopy mode)", flags);
4145 ret = -EINVAL;
7a9ddfbf
PX
4146 break;
4147 }
4148
4149 /* Detect for any possible file errors */
4150 if (!ret && qemu_file_get_error(f)) {
4151 ret = qemu_file_get_error(f);
a7180877
DDAG
4152 }
4153
7a9ddfbf 4154 if (!ret && place_needed) {
a7180877 4155 /* This gets called at the last target page in the host page */
df9ff5e1
DDAG
4156 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4157
a7180877 4158 if (all_zero) {
df9ff5e1 4159 ret = postcopy_place_page_zero(mis, place_dest,
8be4620b 4160 block);
a7180877 4161 } else {
df9ff5e1 4162 ret = postcopy_place_page(mis, place_dest,
8be4620b 4163 place_source, block);
a7180877
DDAG
4164 }
4165 }
a7180877
DDAG
4166 }
4167
4168 return ret;
4169}
4170
acab30b8
DHB
4171static bool postcopy_is_advised(void)
4172{
4173 PostcopyState ps = postcopy_state_get();
4174 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4175}
4176
4177static bool postcopy_is_running(void)
4178{
4179 PostcopyState ps = postcopy_state_get();
4180 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4181}
4182
e6f4aa18
ZC
4183/*
4184 * Flush content of RAM cache into SVM's memory.
4185 * Only flush the pages that be dirtied by PVM or SVM or both.
4186 */
4187static void colo_flush_ram_cache(void)
4188{
4189 RAMBlock *block = NULL;
4190 void *dst_host;
4191 void *src_host;
4192 unsigned long offset = 0;
4193
d1955d22
HZ
4194 memory_global_dirty_log_sync();
4195 rcu_read_lock();
fbd162e6 4196 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
bf212979 4197 migration_bitmap_sync_range(ram_state, block, block->used_length);
d1955d22
HZ
4198 }
4199 rcu_read_unlock();
4200
e6f4aa18
ZC
4201 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4202 rcu_read_lock();
4203 block = QLIST_FIRST_RCU(&ram_list.blocks);
4204
4205 while (block) {
4206 offset = migration_bitmap_find_dirty(ram_state, block, offset);
4207
4208 if (offset << TARGET_PAGE_BITS >= block->used_length) {
4209 offset = 0;
4210 block = QLIST_NEXT_RCU(block, next);
4211 } else {
4212 migration_bitmap_clear_dirty(ram_state, block, offset);
4213 dst_host = block->host + (offset << TARGET_PAGE_BITS);
4214 src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4215 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4216 }
4217 }
4218
4219 rcu_read_unlock();
4220 trace_colo_flush_ram_cache_end();
4221}
4222
56e93d26
JQ
4223static int ram_load(QEMUFile *f, void *opaque, int version_id)
4224{
edc60127 4225 int flags = 0, ret = 0, invalid_flags = 0;
56e93d26
JQ
4226 static uint64_t seq_iter;
4227 int len = 0;
a7180877
DDAG
4228 /*
4229 * If system is running in postcopy mode, page inserts to host memory must
4230 * be atomic
4231 */
acab30b8 4232 bool postcopy_running = postcopy_is_running();
ef08fb38 4233 /* ADVISE is earlier, it shows the source has the postcopy capability on */
acab30b8 4234 bool postcopy_advised = postcopy_is_advised();
56e93d26
JQ
4235
4236 seq_iter++;
4237
4238 if (version_id != 4) {
4239 ret = -EINVAL;
4240 }
4241
edc60127
JQ
4242 if (!migrate_use_compression()) {
4243 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4244 }
56e93d26
JQ
4245 /* This RCU critical section can be very long running.
4246 * When RCU reclaims in the code start to become numerous,
4247 * it will be necessary to reduce the granularity of this
4248 * critical section.
4249 */
4250 rcu_read_lock();
a7180877
DDAG
4251
4252 if (postcopy_running) {
4253 ret = ram_load_postcopy(f);
4254 }
4255
4256 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 4257 ram_addr_t addr, total_ram_bytes;
a776aa15 4258 void *host = NULL;
56e93d26
JQ
4259 uint8_t ch;
4260
4261 addr = qemu_get_be64(f);
4262 flags = addr & ~TARGET_PAGE_MASK;
4263 addr &= TARGET_PAGE_MASK;
4264
edc60127
JQ
4265 if (flags & invalid_flags) {
4266 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4267 error_report("Received an unexpected compressed page");
4268 }
4269
4270 ret = -EINVAL;
4271 break;
4272 }
4273
bb890ed5 4274 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 4275 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
4276 RAMBlock *block = ram_block_from_stream(f, flags);
4277
13af18f2
ZC
4278 /*
4279 * After going into COLO, we should load the Page into colo_cache.
4280 */
4281 if (migration_incoming_in_colo_state()) {
4282 host = colo_cache_from_block_offset(block, addr);
4283 } else {
4284 host = host_from_ram_block_offset(block, addr);
4285 }
a776aa15
DDAG
4286 if (!host) {
4287 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4288 ret = -EINVAL;
4289 break;
4290 }
13af18f2
ZC
4291
4292 if (!migration_incoming_in_colo_state()) {
4293 ramblock_recv_bitmap_set(block, host);
4294 }
4295
1db9d8e5 4296 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
4297 }
4298
56e93d26
JQ
4299 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4300 case RAM_SAVE_FLAG_MEM_SIZE:
4301 /* Synchronize RAM block list */
4302 total_ram_bytes = addr;
4303 while (!ret && total_ram_bytes) {
4304 RAMBlock *block;
56e93d26
JQ
4305 char id[256];
4306 ram_addr_t length;
4307
4308 len = qemu_get_byte(f);
4309 qemu_get_buffer(f, (uint8_t *)id, len);
4310 id[len] = 0;
4311 length = qemu_get_be64(f);
4312
e3dd7493 4313 block = qemu_ram_block_by_name(id);
b895de50
CLG
4314 if (block && !qemu_ram_is_migratable(block)) {
4315 error_report("block %s should not be migrated !", id);
4316 ret = -EINVAL;
4317 } else if (block) {
e3dd7493
DDAG
4318 if (length != block->used_length) {
4319 Error *local_err = NULL;
56e93d26 4320
fa53a0e5 4321 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
4322 &local_err);
4323 if (local_err) {
4324 error_report_err(local_err);
56e93d26 4325 }
56e93d26 4326 }
ef08fb38
DDAG
4327 /* For postcopy we need to check hugepage sizes match */
4328 if (postcopy_advised &&
4329 block->page_size != qemu_host_page_size) {
4330 uint64_t remote_page_size = qemu_get_be64(f);
4331 if (remote_page_size != block->page_size) {
4332 error_report("Mismatched RAM page size %s "
4333 "(local) %zd != %" PRId64,
4334 id, block->page_size,
4335 remote_page_size);
4336 ret = -EINVAL;
4337 }
4338 }
fbd162e6
YK
4339 if (migrate_ignore_shared()) {
4340 hwaddr addr = qemu_get_be64(f);
4341 bool ignored = qemu_get_byte(f);
4342 if (ignored != ramblock_is_ignored(block)) {
4343 error_report("RAM block %s should %s be migrated",
4344 id, ignored ? "" : "not");
4345 ret = -EINVAL;
4346 }
4347 if (ramblock_is_ignored(block) &&
4348 block->mr->addr != addr) {
4349 error_report("Mismatched GPAs for block %s "
4350 "%" PRId64 "!= %" PRId64,
4351 id, (uint64_t)addr,
4352 (uint64_t)block->mr->addr);
4353 ret = -EINVAL;
4354 }
4355 }
e3dd7493
DDAG
4356 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4357 block->idstr);
4358 } else {
56e93d26
JQ
4359 error_report("Unknown ramblock \"%s\", cannot "
4360 "accept migration", id);
4361 ret = -EINVAL;
4362 }
4363
4364 total_ram_bytes -= length;
4365 }
4366 break;
a776aa15 4367
bb890ed5 4368 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
4369 ch = qemu_get_byte(f);
4370 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4371 break;
a776aa15 4372
56e93d26 4373 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
4374 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4375 break;
56e93d26 4376
a776aa15 4377 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
4378 len = qemu_get_be32(f);
4379 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4380 error_report("Invalid compressed data length: %d", len);
4381 ret = -EINVAL;
4382 break;
4383 }
c1bc6626 4384 decompress_data_with_multi_threads(f, host, len);
56e93d26 4385 break;
a776aa15 4386
56e93d26 4387 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
4388 if (load_xbzrle(f, addr, host) < 0) {
4389 error_report("Failed to decompress XBZRLE page at "
4390 RAM_ADDR_FMT, addr);
4391 ret = -EINVAL;
4392 break;
4393 }
4394 break;
4395 case RAM_SAVE_FLAG_EOS:
4396 /* normal exit */
6df264ac 4397 multifd_recv_sync_main();
56e93d26
JQ
4398 break;
4399 default:
4400 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 4401 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
4402 } else {
4403 error_report("Unknown combination of migration flags: %#x",
4404 flags);
4405 ret = -EINVAL;
4406 }
4407 }
4408 if (!ret) {
4409 ret = qemu_file_get_error(f);
4410 }
4411 }
4412
34ab9e97 4413 ret |= wait_for_decompress_done();
56e93d26 4414 rcu_read_unlock();
55c4446b 4415 trace_ram_load_complete(ret, seq_iter);
e6f4aa18
ZC
4416
4417 if (!ret && migration_incoming_in_colo_state()) {
4418 colo_flush_ram_cache();
4419 }
56e93d26
JQ
4420 return ret;
4421}
4422
c6467627
VSO
4423static bool ram_has_postcopy(void *opaque)
4424{
469dd51b 4425 RAMBlock *rb;
fbd162e6 4426 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
469dd51b
JH
4427 if (ramblock_is_pmem(rb)) {
4428 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4429 "is not supported now!", rb->idstr, rb->host);
4430 return false;
4431 }
4432 }
4433
c6467627
VSO
4434 return migrate_postcopy_ram();
4435}
4436
edd090c7
PX
4437/* Sync all the dirty bitmap with destination VM. */
4438static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4439{
4440 RAMBlock *block;
4441 QEMUFile *file = s->to_dst_file;
4442 int ramblock_count = 0;
4443
4444 trace_ram_dirty_bitmap_sync_start();
4445
fbd162e6 4446 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
edd090c7
PX
4447 qemu_savevm_send_recv_bitmap(file, block->idstr);
4448 trace_ram_dirty_bitmap_request(block->idstr);
4449 ramblock_count++;
4450 }
4451
4452 trace_ram_dirty_bitmap_sync_wait();
4453
4454 /* Wait until all the ramblocks' dirty bitmap synced */
4455 while (ramblock_count--) {
4456 qemu_sem_wait(&s->rp_state.rp_sem);
4457 }
4458
4459 trace_ram_dirty_bitmap_sync_complete();
4460
4461 return 0;
4462}
4463
4464static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4465{
4466 qemu_sem_post(&s->rp_state.rp_sem);
4467}
4468
a335debb
PX
4469/*
4470 * Read the received bitmap, revert it as the initial dirty bitmap.
4471 * This is only used when the postcopy migration is paused but wants
4472 * to resume from a middle point.
4473 */
4474int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4475{
4476 int ret = -EINVAL;
4477 QEMUFile *file = s->rp_state.from_dst_file;
4478 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 4479 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
4480 uint64_t size, end_mark;
4481
4482 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4483
4484 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4485 error_report("%s: incorrect state %s", __func__,
4486 MigrationStatus_str(s->state));
4487 return -EINVAL;
4488 }
4489
4490 /*
4491 * Note: see comments in ramblock_recv_bitmap_send() on why we
4492 * need the endianess convertion, and the paddings.
4493 */
4494 local_size = ROUND_UP(local_size, 8);
4495
4496 /* Add paddings */
4497 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4498
4499 size = qemu_get_be64(file);
4500
4501 /* The size of the bitmap should match with our ramblock */
4502 if (size != local_size) {
4503 error_report("%s: ramblock '%s' bitmap size mismatch "
4504 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4505 block->idstr, size, local_size);
4506 ret = -EINVAL;
4507 goto out;
4508 }
4509
4510 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4511 end_mark = qemu_get_be64(file);
4512
4513 ret = qemu_file_get_error(file);
4514 if (ret || size != local_size) {
4515 error_report("%s: read bitmap failed for ramblock '%s': %d"
4516 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4517 __func__, block->idstr, ret, local_size, size);
4518 ret = -EIO;
4519 goto out;
4520 }
4521
4522 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4523 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4524 __func__, block->idstr, end_mark);
4525 ret = -EINVAL;
4526 goto out;
4527 }
4528
4529 /*
4530 * Endianess convertion. We are during postcopy (though paused).
4531 * The dirty bitmap won't change. We can directly modify it.
4532 */
4533 bitmap_from_le(block->bmap, le_bitmap, nbits);
4534
4535 /*
4536 * What we received is "received bitmap". Revert it as the initial
4537 * dirty bitmap for this ramblock.
4538 */
4539 bitmap_complement(block->bmap, block->bmap, nbits);
4540
4541 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4542
edd090c7
PX
4543 /*
4544 * We succeeded to sync bitmap for current ramblock. If this is
4545 * the last one to sync, we need to notify the main send thread.
4546 */
4547 ram_dirty_bitmap_reload_notify(s);
4548
a335debb
PX
4549 ret = 0;
4550out:
bf269906 4551 g_free(le_bitmap);
a335debb
PX
4552 return ret;
4553}
4554
edd090c7
PX
4555static int ram_resume_prepare(MigrationState *s, void *opaque)
4556{
4557 RAMState *rs = *(RAMState **)opaque;
08614f34 4558 int ret;
edd090c7 4559
08614f34
PX
4560 ret = ram_dirty_bitmap_sync_all(s, rs);
4561 if (ret) {
4562 return ret;
4563 }
4564
4565 ram_state_resume_prepare(rs, s->to_dst_file);
4566
4567 return 0;
edd090c7
PX
4568}
4569
56e93d26 4570static SaveVMHandlers savevm_ram_handlers = {
9907e842 4571 .save_setup = ram_save_setup,
56e93d26 4572 .save_live_iterate = ram_save_iterate,
763c906b 4573 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 4574 .save_live_complete_precopy = ram_save_complete,
c6467627 4575 .has_postcopy = ram_has_postcopy,
56e93d26
JQ
4576 .save_live_pending = ram_save_pending,
4577 .load_state = ram_load,
f265e0e4
JQ
4578 .save_cleanup = ram_save_cleanup,
4579 .load_setup = ram_load_setup,
4580 .load_cleanup = ram_load_cleanup,
edd090c7 4581 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
4582};
4583
4584void ram_mig_init(void)
4585{
4586 qemu_mutex_init(&XBZRLE.lock);
6f37bb8b 4587 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 4588}
This page took 0.938857 seconds and 4 git commands to generate.