]> Git Repo - qemu.git/blame - migration/ram.c
Merge remote-tracking branch 'remotes/cleber/tags/python-next-pull-request' into...
[qemu.git] / migration / ram.c
CommitLineData
56e93d26
JQ
1/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
76cc7b58
JQ
5 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <[email protected]>
56e93d26
JQ
9 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
e688df6b 28
1393a485 29#include "qemu/osdep.h"
33c11879 30#include "cpu.h"
56e93d26 31#include <zlib.h>
f348b6d1 32#include "qemu/cutils.h"
56e93d26
JQ
33#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
7205c9ec 35#include "qemu/main-loop.h"
56eb90af 36#include "qemu/pmem.h"
709e3fe8 37#include "xbzrle.h"
7b1e1a22 38#include "ram.h"
6666c96a 39#include "migration.h"
71bb07db 40#include "socket.h"
f2a8f0a6 41#include "migration/register.h"
7b1e1a22 42#include "migration/misc.h"
08a0aee1 43#include "qemu-file.h"
be07b0ac 44#include "postcopy-ram.h"
53d37d36 45#include "page_cache.h"
56e93d26 46#include "qemu/error-report.h"
e688df6b 47#include "qapi/error.h"
9af23989 48#include "qapi/qapi-events-migration.h"
8acabf69 49#include "qapi/qmp/qerror.h"
56e93d26 50#include "trace.h"
56e93d26 51#include "exec/ram_addr.h"
f9494614 52#include "exec/target_page.h"
56e93d26 53#include "qemu/rcu_queue.h"
a91246c9 54#include "migration/colo.h"
53d37d36 55#include "block.h"
af8b7d2b
JQ
56#include "sysemu/sysemu.h"
57#include "qemu/uuid.h"
edd090c7 58#include "savevm.h"
b9ee2f7d 59#include "qemu/iov.h"
56e93d26 60
56e93d26
JQ
61/***********************************************************/
62/* ram save/restore */
63
bb890ed5
JQ
64/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
65 * worked for pages that where filled with the same char. We switched
66 * it to only search for the zero value. And to avoid confusion with
67 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
68 */
69
56e93d26 70#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
bb890ed5 71#define RAM_SAVE_FLAG_ZERO 0x02
56e93d26
JQ
72#define RAM_SAVE_FLAG_MEM_SIZE 0x04
73#define RAM_SAVE_FLAG_PAGE 0x08
74#define RAM_SAVE_FLAG_EOS 0x10
75#define RAM_SAVE_FLAG_CONTINUE 0x20
76#define RAM_SAVE_FLAG_XBZRLE 0x40
77/* 0x80 is reserved in migration.h start with 0x100 next */
78#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
79
56e93d26
JQ
80static inline bool is_zero_range(uint8_t *p, uint64_t size)
81{
a1febc49 82 return buffer_is_zero(p, size);
56e93d26
JQ
83}
84
9360447d
JQ
85XBZRLECacheStats xbzrle_counters;
86
56e93d26
JQ
87/* struct contains XBZRLE cache and a static page
88 used by the compression */
89static struct {
90 /* buffer used for XBZRLE encoding */
91 uint8_t *encoded_buf;
92 /* buffer for storing page content */
93 uint8_t *current_buf;
94 /* Cache for XBZRLE, Protected by lock. */
95 PageCache *cache;
96 QemuMutex lock;
c00e0928
JQ
97 /* it will store a page full of zeros */
98 uint8_t *zero_target_page;
f265e0e4
JQ
99 /* buffer used for XBZRLE decoding */
100 uint8_t *decoded_buf;
56e93d26
JQ
101} XBZRLE;
102
56e93d26
JQ
103static void XBZRLE_cache_lock(void)
104{
105 if (migrate_use_xbzrle())
106 qemu_mutex_lock(&XBZRLE.lock);
107}
108
109static void XBZRLE_cache_unlock(void)
110{
111 if (migrate_use_xbzrle())
112 qemu_mutex_unlock(&XBZRLE.lock);
113}
114
3d0684b2
JQ
115/**
116 * xbzrle_cache_resize: resize the xbzrle cache
117 *
118 * This function is called from qmp_migrate_set_cache_size in main
119 * thread, possibly while a migration is in progress. A running
120 * migration may be using the cache and might finish during this call,
121 * hence changes to the cache are protected by XBZRLE.lock().
122 *
c9dede2d 123 * Returns 0 for success or -1 for error
3d0684b2
JQ
124 *
125 * @new_size: new cache size
8acabf69 126 * @errp: set *errp if the check failed, with reason
56e93d26 127 */
c9dede2d 128int xbzrle_cache_resize(int64_t new_size, Error **errp)
56e93d26
JQ
129{
130 PageCache *new_cache;
c9dede2d 131 int64_t ret = 0;
56e93d26 132
8acabf69
JQ
133 /* Check for truncation */
134 if (new_size != (size_t)new_size) {
135 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
136 "exceeding address space");
137 return -1;
138 }
139
2a313e5c
JQ
140 if (new_size == migrate_xbzrle_cache_size()) {
141 /* nothing to do */
c9dede2d 142 return 0;
2a313e5c
JQ
143 }
144
56e93d26
JQ
145 XBZRLE_cache_lock();
146
147 if (XBZRLE.cache != NULL) {
80f8dfde 148 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
56e93d26 149 if (!new_cache) {
56e93d26
JQ
150 ret = -1;
151 goto out;
152 }
153
154 cache_fini(XBZRLE.cache);
155 XBZRLE.cache = new_cache;
156 }
56e93d26
JQ
157out:
158 XBZRLE_cache_unlock();
159 return ret;
160}
161
fbd162e6
YK
162static bool ramblock_is_ignored(RAMBlock *block)
163{
164 return !qemu_ram_is_migratable(block) ||
165 (migrate_ignore_shared() && qemu_ram_is_shared(block));
166}
167
b895de50 168/* Should be holding either ram_list.mutex, or the RCU lock. */
fbd162e6
YK
169#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
170 INTERNAL_RAMBLOCK_FOREACH(block) \
171 if (ramblock_is_ignored(block)) {} else
172
b895de50 173#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
343f632c 174 INTERNAL_RAMBLOCK_FOREACH(block) \
b895de50
CLG
175 if (!qemu_ram_is_migratable(block)) {} else
176
343f632c
DDAG
177#undef RAMBLOCK_FOREACH
178
fbd162e6
YK
179int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
180{
181 RAMBlock *block;
182 int ret = 0;
183
89ac5a1d
DDAG
184 RCU_READ_LOCK_GUARD();
185
fbd162e6
YK
186 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
187 ret = func(block, opaque);
188 if (ret) {
189 break;
190 }
191 }
fbd162e6
YK
192 return ret;
193}
194
f9494614
AP
195static void ramblock_recv_map_init(void)
196{
197 RAMBlock *rb;
198
fbd162e6 199 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
200 assert(!rb->receivedmap);
201 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
202 }
203}
204
205int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
206{
207 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
208 rb->receivedmap);
209}
210
1cba9f6e
DDAG
211bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
212{
213 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
214}
215
f9494614
AP
216void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
217{
218 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
219}
220
221void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
222 size_t nr)
223{
224 bitmap_set_atomic(rb->receivedmap,
225 ramblock_recv_bitmap_offset(host_addr, rb),
226 nr);
227}
228
a335debb
PX
229#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
230
231/*
232 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
233 *
234 * Returns >0 if success with sent bytes, or <0 if error.
235 */
236int64_t ramblock_recv_bitmap_send(QEMUFile *file,
237 const char *block_name)
238{
239 RAMBlock *block = qemu_ram_block_by_name(block_name);
240 unsigned long *le_bitmap, nbits;
241 uint64_t size;
242
243 if (!block) {
244 error_report("%s: invalid block name: %s", __func__, block_name);
245 return -1;
246 }
247
248 nbits = block->used_length >> TARGET_PAGE_BITS;
249
250 /*
251 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
252 * machines we may need 4 more bytes for padding (see below
253 * comment). So extend it a bit before hand.
254 */
255 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
256
257 /*
258 * Always use little endian when sending the bitmap. This is
259 * required that when source and destination VMs are not using the
260 * same endianess. (Note: big endian won't work.)
261 */
262 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
263
264 /* Size of the bitmap, in bytes */
a725ef9f 265 size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
266
267 /*
268 * size is always aligned to 8 bytes for 64bit machines, but it
269 * may not be true for 32bit machines. We need this padding to
270 * make sure the migration can survive even between 32bit and
271 * 64bit machines.
272 */
273 size = ROUND_UP(size, 8);
274
275 qemu_put_be64(file, size);
276 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
277 /*
278 * Mark as an end, in case the middle part is screwed up due to
279 * some "misterious" reason.
280 */
281 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
282 qemu_fflush(file);
283
bf269906 284 g_free(le_bitmap);
a335debb
PX
285
286 if (qemu_file_get_error(file)) {
287 return qemu_file_get_error(file);
288 }
289
290 return size + sizeof(size);
291}
292
ec481c6c
JQ
293/*
294 * An outstanding page request, on the source, having been received
295 * and queued
296 */
297struct RAMSrcPageRequest {
298 RAMBlock *rb;
299 hwaddr offset;
300 hwaddr len;
301
302 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
303};
304
6f37bb8b
JQ
305/* State of RAM for migration */
306struct RAMState {
204b88b8
JQ
307 /* QEMUFile used for this migration */
308 QEMUFile *f;
6f37bb8b
JQ
309 /* Last block that we have visited searching for dirty pages */
310 RAMBlock *last_seen_block;
311 /* Last block from where we have sent data */
312 RAMBlock *last_sent_block;
269ace29
JQ
313 /* Last dirty target page we have sent */
314 ram_addr_t last_page;
6f37bb8b
JQ
315 /* last ram version we have seen */
316 uint32_t last_version;
317 /* We are in the first round */
318 bool ram_bulk_stage;
6eeb63f7
WW
319 /* The free page optimization is enabled */
320 bool fpo_enabled;
8d820d6f
JQ
321 /* How many times we have dirty too many pages */
322 int dirty_rate_high_cnt;
f664da80
JQ
323 /* these variables are used for bitmap sync */
324 /* last time we did a full bitmap_sync */
325 int64_t time_last_bitmap_sync;
eac74159 326 /* bytes transferred at start_time */
c4bdf0cf 327 uint64_t bytes_xfer_prev;
a66cd90c 328 /* number of dirty pages since start_time */
68908ed6 329 uint64_t num_dirty_pages_period;
b5833fde
JQ
330 /* xbzrle misses since the beginning of the period */
331 uint64_t xbzrle_cache_miss_prev;
76e03000
XG
332
333 /* compression statistics since the beginning of the period */
334 /* amount of count that no free thread to compress data */
335 uint64_t compress_thread_busy_prev;
336 /* amount bytes after compression */
337 uint64_t compressed_size_prev;
338 /* amount of compressed pages */
339 uint64_t compress_pages_prev;
340
be8b02ed
XG
341 /* total handled target pages at the beginning of period */
342 uint64_t target_page_count_prev;
343 /* total handled target pages since start */
344 uint64_t target_page_count;
9360447d 345 /* number of dirty bits in the bitmap */
2dfaf12e 346 uint64_t migration_dirty_pages;
386a907b 347 /* Protects modification of the bitmap and migration dirty pages */
108cfae0 348 QemuMutex bitmap_mutex;
68a098f3
JQ
349 /* The RAMBlock used in the last src_page_requests */
350 RAMBlock *last_req_rb;
ec481c6c
JQ
351 /* Queue of outstanding page requests from the destination */
352 QemuMutex src_page_req_mutex;
b58deb34 353 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
6f37bb8b
JQ
354};
355typedef struct RAMState RAMState;
356
53518d94 357static RAMState *ram_state;
6f37bb8b 358
bd227060
WW
359static NotifierWithReturnList precopy_notifier_list;
360
361void precopy_infrastructure_init(void)
362{
363 notifier_with_return_list_init(&precopy_notifier_list);
364}
365
366void precopy_add_notifier(NotifierWithReturn *n)
367{
368 notifier_with_return_list_add(&precopy_notifier_list, n);
369}
370
371void precopy_remove_notifier(NotifierWithReturn *n)
372{
373 notifier_with_return_remove(n);
374}
375
376int precopy_notify(PrecopyNotifyReason reason, Error **errp)
377{
378 PrecopyNotifyData pnd;
379 pnd.reason = reason;
380 pnd.errp = errp;
381
382 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
383}
384
6eeb63f7
WW
385void precopy_enable_free_page_optimization(void)
386{
387 if (!ram_state) {
388 return;
389 }
390
391 ram_state->fpo_enabled = true;
392}
393
9edabd4d 394uint64_t ram_bytes_remaining(void)
2f4fde93 395{
bae416e5
DDAG
396 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
397 0;
2f4fde93
JQ
398}
399
9360447d 400MigrationStats ram_counters;
96506894 401
b8fb8cb7
DDAG
402/* used by the search for pages to send */
403struct PageSearchStatus {
404 /* Current block being searched */
405 RAMBlock *block;
a935e30f
JQ
406 /* Current page to search from */
407 unsigned long page;
b8fb8cb7
DDAG
408 /* Set once we wrap around */
409 bool complete_round;
410};
411typedef struct PageSearchStatus PageSearchStatus;
412
76e03000
XG
413CompressionStats compression_counters;
414
56e93d26 415struct CompressParam {
56e93d26 416 bool done;
90e56fb4 417 bool quit;
5e5fdcff 418 bool zero_page;
56e93d26
JQ
419 QEMUFile *file;
420 QemuMutex mutex;
421 QemuCond cond;
422 RAMBlock *block;
423 ram_addr_t offset;
34ab9e97
XG
424
425 /* internally used fields */
dcaf446e 426 z_stream stream;
34ab9e97 427 uint8_t *originbuf;
56e93d26
JQ
428};
429typedef struct CompressParam CompressParam;
430
431struct DecompressParam {
73a8912b 432 bool done;
90e56fb4 433 bool quit;
56e93d26
JQ
434 QemuMutex mutex;
435 QemuCond cond;
436 void *des;
d341d9f3 437 uint8_t *compbuf;
56e93d26 438 int len;
797ca154 439 z_stream stream;
56e93d26
JQ
440};
441typedef struct DecompressParam DecompressParam;
442
443static CompressParam *comp_param;
444static QemuThread *compress_threads;
445/* comp_done_cond is used to wake up the migration thread when
446 * one of the compression threads has finished the compression.
447 * comp_done_lock is used to co-work with comp_done_cond.
448 */
0d9f9a5c
LL
449static QemuMutex comp_done_lock;
450static QemuCond comp_done_cond;
56e93d26
JQ
451/* The empty QEMUFileOps will be used by file in CompressParam */
452static const QEMUFileOps empty_ops = { };
453
34ab9e97 454static QEMUFile *decomp_file;
56e93d26
JQ
455static DecompressParam *decomp_param;
456static QemuThread *decompress_threads;
73a8912b
LL
457static QemuMutex decomp_done_lock;
458static QemuCond decomp_done_cond;
56e93d26 459
5e5fdcff 460static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 461 ram_addr_t offset, uint8_t *source_buf);
56e93d26
JQ
462
463static void *do_data_compress(void *opaque)
464{
465 CompressParam *param = opaque;
a7a9a88f
LL
466 RAMBlock *block;
467 ram_addr_t offset;
5e5fdcff 468 bool zero_page;
56e93d26 469
a7a9a88f 470 qemu_mutex_lock(&param->mutex);
90e56fb4 471 while (!param->quit) {
a7a9a88f
LL
472 if (param->block) {
473 block = param->block;
474 offset = param->offset;
475 param->block = NULL;
476 qemu_mutex_unlock(&param->mutex);
477
5e5fdcff
XG
478 zero_page = do_compress_ram_page(param->file, &param->stream,
479 block, offset, param->originbuf);
a7a9a88f 480
0d9f9a5c 481 qemu_mutex_lock(&comp_done_lock);
a7a9a88f 482 param->done = true;
5e5fdcff 483 param->zero_page = zero_page;
0d9f9a5c
LL
484 qemu_cond_signal(&comp_done_cond);
485 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
486
487 qemu_mutex_lock(&param->mutex);
488 } else {
56e93d26
JQ
489 qemu_cond_wait(&param->cond, &param->mutex);
490 }
56e93d26 491 }
a7a9a88f 492 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
493
494 return NULL;
495}
496
f0afa331 497static void compress_threads_save_cleanup(void)
56e93d26
JQ
498{
499 int i, thread_count;
500
05306935 501 if (!migrate_use_compression() || !comp_param) {
56e93d26
JQ
502 return;
503 }
05306935 504
56e93d26
JQ
505 thread_count = migrate_compress_threads();
506 for (i = 0; i < thread_count; i++) {
dcaf446e
XG
507 /*
508 * we use it as a indicator which shows if the thread is
509 * properly init'd or not
510 */
511 if (!comp_param[i].file) {
512 break;
513 }
05306935
FL
514
515 qemu_mutex_lock(&comp_param[i].mutex);
516 comp_param[i].quit = true;
517 qemu_cond_signal(&comp_param[i].cond);
518 qemu_mutex_unlock(&comp_param[i].mutex);
519
56e93d26 520 qemu_thread_join(compress_threads + i);
56e93d26
JQ
521 qemu_mutex_destroy(&comp_param[i].mutex);
522 qemu_cond_destroy(&comp_param[i].cond);
dcaf446e 523 deflateEnd(&comp_param[i].stream);
34ab9e97 524 g_free(comp_param[i].originbuf);
dcaf446e
XG
525 qemu_fclose(comp_param[i].file);
526 comp_param[i].file = NULL;
56e93d26 527 }
0d9f9a5c
LL
528 qemu_mutex_destroy(&comp_done_lock);
529 qemu_cond_destroy(&comp_done_cond);
56e93d26
JQ
530 g_free(compress_threads);
531 g_free(comp_param);
56e93d26
JQ
532 compress_threads = NULL;
533 comp_param = NULL;
56e93d26
JQ
534}
535
dcaf446e 536static int compress_threads_save_setup(void)
56e93d26
JQ
537{
538 int i, thread_count;
539
540 if (!migrate_use_compression()) {
dcaf446e 541 return 0;
56e93d26 542 }
56e93d26
JQ
543 thread_count = migrate_compress_threads();
544 compress_threads = g_new0(QemuThread, thread_count);
545 comp_param = g_new0(CompressParam, thread_count);
0d9f9a5c
LL
546 qemu_cond_init(&comp_done_cond);
547 qemu_mutex_init(&comp_done_lock);
56e93d26 548 for (i = 0; i < thread_count; i++) {
34ab9e97
XG
549 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
550 if (!comp_param[i].originbuf) {
551 goto exit;
552 }
553
dcaf446e
XG
554 if (deflateInit(&comp_param[i].stream,
555 migrate_compress_level()) != Z_OK) {
34ab9e97 556 g_free(comp_param[i].originbuf);
dcaf446e
XG
557 goto exit;
558 }
559
e110aa91
C
560 /* comp_param[i].file is just used as a dummy buffer to save data,
561 * set its ops to empty.
56e93d26
JQ
562 */
563 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
564 comp_param[i].done = true;
90e56fb4 565 comp_param[i].quit = false;
56e93d26
JQ
566 qemu_mutex_init(&comp_param[i].mutex);
567 qemu_cond_init(&comp_param[i].cond);
568 qemu_thread_create(compress_threads + i, "compress",
569 do_data_compress, comp_param + i,
570 QEMU_THREAD_JOINABLE);
571 }
dcaf446e
XG
572 return 0;
573
574exit:
575 compress_threads_save_cleanup();
576 return -1;
56e93d26
JQ
577}
578
f986c3d2
JQ
579/* Multiple fd's */
580
af8b7d2b
JQ
581#define MULTIFD_MAGIC 0x11223344U
582#define MULTIFD_VERSION 1
583
6df264ac
JQ
584#define MULTIFD_FLAG_SYNC (1 << 0)
585
efd1a1d6 586/* This value needs to be a multiple of qemu_target_page_size() */
4b0c7264 587#define MULTIFD_PACKET_SIZE (512 * 1024)
efd1a1d6 588
af8b7d2b
JQ
589typedef struct {
590 uint32_t magic;
591 uint32_t version;
592 unsigned char uuid[16]; /* QemuUUID */
593 uint8_t id;
5fbd8b4b
JQ
594 uint8_t unused1[7]; /* Reserved for future use */
595 uint64_t unused2[4]; /* Reserved for future use */
af8b7d2b
JQ
596} __attribute__((packed)) MultiFDInit_t;
597
2a26c979
JQ
598typedef struct {
599 uint32_t magic;
600 uint32_t version;
601 uint32_t flags;
6f862692
JQ
602 /* maximum number of allocated pages */
603 uint32_t pages_alloc;
604 uint32_t pages_used;
2a34ee59
JQ
605 /* size of the next packet that contains pages */
606 uint32_t next_packet_size;
2a26c979 607 uint64_t packet_num;
5fbd8b4b 608 uint64_t unused[4]; /* Reserved for future use */
2a26c979
JQ
609 char ramblock[256];
610 uint64_t offset[];
611} __attribute__((packed)) MultiFDPacket_t;
612
34c55a94
JQ
613typedef struct {
614 /* number of used pages */
615 uint32_t used;
616 /* number of allocated pages */
617 uint32_t allocated;
618 /* global number of generated multifd packets */
619 uint64_t packet_num;
620 /* offset of each page */
621 ram_addr_t *offset;
622 /* pointer to each page */
623 struct iovec *iov;
624 RAMBlock *block;
625} MultiFDPages_t;
626
8c4598f2
JQ
627typedef struct {
628 /* this fields are not changed once the thread is created */
629 /* channel number */
f986c3d2 630 uint8_t id;
8c4598f2 631 /* channel thread name */
f986c3d2 632 char *name;
8c4598f2 633 /* channel thread id */
f986c3d2 634 QemuThread thread;
8c4598f2 635 /* communication channel */
60df2d4a 636 QIOChannel *c;
8c4598f2 637 /* sem where to wait for more work */
f986c3d2 638 QemuSemaphore sem;
8c4598f2 639 /* this mutex protects the following parameters */
f986c3d2 640 QemuMutex mutex;
8c4598f2 641 /* is this channel thread running */
66770707 642 bool running;
8c4598f2 643 /* should this thread finish */
f986c3d2 644 bool quit;
0beb5ed3
JQ
645 /* thread has work to do */
646 int pending_job;
34c55a94
JQ
647 /* array of pages to sent */
648 MultiFDPages_t *pages;
2a26c979
JQ
649 /* packet allocated len */
650 uint32_t packet_len;
651 /* pointer to the packet */
652 MultiFDPacket_t *packet;
653 /* multifd flags for each packet */
654 uint32_t flags;
2a34ee59
JQ
655 /* size of the next packet that contains pages */
656 uint32_t next_packet_size;
2a26c979
JQ
657 /* global number of generated multifd packets */
658 uint64_t packet_num;
408ea6ae
JQ
659 /* thread local variables */
660 /* packets sent through this channel */
661 uint64_t num_packets;
662 /* pages sent through this channel */
663 uint64_t num_pages;
18cdcea3
JQ
664 /* syncs main thread and channels */
665 QemuSemaphore sem_sync;
8c4598f2
JQ
666} MultiFDSendParams;
667
668typedef struct {
669 /* this fields are not changed once the thread is created */
670 /* channel number */
671 uint8_t id;
672 /* channel thread name */
673 char *name;
674 /* channel thread id */
675 QemuThread thread;
676 /* communication channel */
677 QIOChannel *c;
8c4598f2
JQ
678 /* this mutex protects the following parameters */
679 QemuMutex mutex;
680 /* is this channel thread running */
681 bool running;
3c3ca25d
JQ
682 /* should this thread finish */
683 bool quit;
34c55a94
JQ
684 /* array of pages to receive */
685 MultiFDPages_t *pages;
2a26c979
JQ
686 /* packet allocated len */
687 uint32_t packet_len;
688 /* pointer to the packet */
689 MultiFDPacket_t *packet;
690 /* multifd flags for each packet */
691 uint32_t flags;
692 /* global number of generated multifd packets */
693 uint64_t packet_num;
408ea6ae 694 /* thread local variables */
2a34ee59
JQ
695 /* size of the next packet that contains pages */
696 uint32_t next_packet_size;
408ea6ae
JQ
697 /* packets sent through this channel */
698 uint64_t num_packets;
699 /* pages sent through this channel */
700 uint64_t num_pages;
6df264ac
JQ
701 /* syncs main thread and channels */
702 QemuSemaphore sem_sync;
8c4598f2 703} MultiFDRecvParams;
f986c3d2 704
af8b7d2b
JQ
705static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
706{
707 MultiFDInit_t msg;
708 int ret;
709
710 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
711 msg.version = cpu_to_be32(MULTIFD_VERSION);
712 msg.id = p->id;
713 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
714
715 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
716 if (ret != 0) {
717 return -1;
718 }
719 return 0;
720}
721
722static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
723{
724 MultiFDInit_t msg;
725 int ret;
726
727 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
728 if (ret != 0) {
729 return -1;
730 }
731
341ba0df
PM
732 msg.magic = be32_to_cpu(msg.magic);
733 msg.version = be32_to_cpu(msg.version);
af8b7d2b
JQ
734
735 if (msg.magic != MULTIFD_MAGIC) {
736 error_setg(errp, "multifd: received packet magic %x "
737 "expected %x", msg.magic, MULTIFD_MAGIC);
738 return -1;
739 }
740
741 if (msg.version != MULTIFD_VERSION) {
742 error_setg(errp, "multifd: received packet version %d "
743 "expected %d", msg.version, MULTIFD_VERSION);
744 return -1;
745 }
746
747 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
748 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
749 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
750
751 error_setg(errp, "multifd: received uuid '%s' and expected "
752 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
753 g_free(uuid);
754 g_free(msg_uuid);
755 return -1;
756 }
757
758 if (msg.id > migrate_multifd_channels()) {
759 error_setg(errp, "multifd: received channel version %d "
760 "expected %d", msg.version, MULTIFD_VERSION);
761 return -1;
762 }
763
764 return msg.id;
765}
766
34c55a94
JQ
767static MultiFDPages_t *multifd_pages_init(size_t size)
768{
769 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
770
771 pages->allocated = size;
772 pages->iov = g_new0(struct iovec, size);
773 pages->offset = g_new0(ram_addr_t, size);
774
775 return pages;
776}
777
778static void multifd_pages_clear(MultiFDPages_t *pages)
779{
780 pages->used = 0;
781 pages->allocated = 0;
782 pages->packet_num = 0;
783 pages->block = NULL;
784 g_free(pages->iov);
785 pages->iov = NULL;
786 g_free(pages->offset);
787 pages->offset = NULL;
788 g_free(pages);
789}
790
2a26c979
JQ
791static void multifd_send_fill_packet(MultiFDSendParams *p)
792{
793 MultiFDPacket_t *packet = p->packet;
794 int i;
795
2a26c979 796 packet->flags = cpu_to_be32(p->flags);
f2148c4c 797 packet->pages_alloc = cpu_to_be32(p->pages->allocated);
6f862692 798 packet->pages_used = cpu_to_be32(p->pages->used);
2a34ee59 799 packet->next_packet_size = cpu_to_be32(p->next_packet_size);
2a26c979
JQ
800 packet->packet_num = cpu_to_be64(p->packet_num);
801
802 if (p->pages->block) {
803 strncpy(packet->ramblock, p->pages->block->idstr, 256);
804 }
805
806 for (i = 0; i < p->pages->used; i++) {
807 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
808 }
809}
810
811static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
812{
813 MultiFDPacket_t *packet = p->packet;
7ed379b2 814 uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
2a26c979
JQ
815 RAMBlock *block;
816 int i;
817
341ba0df 818 packet->magic = be32_to_cpu(packet->magic);
2a26c979
JQ
819 if (packet->magic != MULTIFD_MAGIC) {
820 error_setg(errp, "multifd: received packet "
821 "magic %x and expected magic %x",
822 packet->magic, MULTIFD_MAGIC);
823 return -1;
824 }
825
341ba0df 826 packet->version = be32_to_cpu(packet->version);
2a26c979
JQ
827 if (packet->version != MULTIFD_VERSION) {
828 error_setg(errp, "multifd: received packet "
829 "version %d and expected version %d",
830 packet->version, MULTIFD_VERSION);
831 return -1;
832 }
833
834 p->flags = be32_to_cpu(packet->flags);
835
6f862692 836 packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
7ed379b2 837 /*
d884e77b 838 * If we received a packet that is 100 times bigger than expected
7ed379b2
JQ
839 * just stop migration. It is a magic number.
840 */
841 if (packet->pages_alloc > pages_max * 100) {
2a26c979 842 error_setg(errp, "multifd: received packet "
7ed379b2
JQ
843 "with size %d and expected a maximum size of %d",
844 packet->pages_alloc, pages_max * 100) ;
2a26c979
JQ
845 return -1;
846 }
7ed379b2
JQ
847 /*
848 * We received a packet that is bigger than expected but inside
849 * reasonable limits (see previous comment). Just reallocate.
850 */
851 if (packet->pages_alloc > p->pages->allocated) {
852 multifd_pages_clear(p->pages);
f151f8ac 853 p->pages = multifd_pages_init(packet->pages_alloc);
7ed379b2 854 }
2a26c979 855
6f862692
JQ
856 p->pages->used = be32_to_cpu(packet->pages_used);
857 if (p->pages->used > packet->pages_alloc) {
2a26c979 858 error_setg(errp, "multifd: received packet "
6f862692
JQ
859 "with %d pages and expected maximum pages are %d",
860 p->pages->used, packet->pages_alloc) ;
2a26c979
JQ
861 return -1;
862 }
863
2a34ee59 864 p->next_packet_size = be32_to_cpu(packet->next_packet_size);
2a26c979
JQ
865 p->packet_num = be64_to_cpu(packet->packet_num);
866
867 if (p->pages->used) {
868 /* make sure that ramblock is 0 terminated */
869 packet->ramblock[255] = 0;
870 block = qemu_ram_block_by_name(packet->ramblock);
871 if (!block) {
872 error_setg(errp, "multifd: unknown ram block %s",
873 packet->ramblock);
874 return -1;
875 }
876 }
877
878 for (i = 0; i < p->pages->used; i++) {
879 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
880
881 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
882 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
883 " (max " RAM_ADDR_FMT ")",
884 offset, block->max_length);
885 return -1;
886 }
887 p->pages->iov[i].iov_base = block->host + offset;
888 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
889 }
890
891 return 0;
892}
893
f986c3d2
JQ
894struct {
895 MultiFDSendParams *params;
34c55a94
JQ
896 /* array of pages to sent */
897 MultiFDPages_t *pages;
6df264ac
JQ
898 /* global number of generated multifd packets */
899 uint64_t packet_num;
b9ee2f7d
JQ
900 /* send channels ready */
901 QemuSemaphore channels_ready;
f986c3d2
JQ
902} *multifd_send_state;
903
b9ee2f7d
JQ
904/*
905 * How we use multifd_send_state->pages and channel->pages?
906 *
907 * We create a pages for each channel, and a main one. Each time that
908 * we need to send a batch of pages we interchange the ones between
909 * multifd_send_state and the channel that is sending it. There are
910 * two reasons for that:
911 * - to not have to do so many mallocs during migration
912 * - to make easier to know what to free at the end of migration
913 *
914 * This way we always know who is the owner of each "pages" struct,
a5f7b1a6 915 * and we don't need any locking. It belongs to the migration thread
b9ee2f7d
JQ
916 * or to the channel thread. Switching is safe because the migration
917 * thread is using the channel mutex when changing it, and the channel
918 * have to had finish with its own, otherwise pending_job can't be
919 * false.
920 */
921
1b81c974 922static int multifd_send_pages(RAMState *rs)
b9ee2f7d
JQ
923{
924 int i;
925 static int next_channel;
926 MultiFDSendParams *p = NULL; /* make happy gcc */
927 MultiFDPages_t *pages = multifd_send_state->pages;
928 uint64_t transferred;
929
930 qemu_sem_wait(&multifd_send_state->channels_ready);
931 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
932 p = &multifd_send_state->params[i];
933
934 qemu_mutex_lock(&p->mutex);
713f762a
IR
935 if (p->quit) {
936 error_report("%s: channel %d has already quit!", __func__, i);
937 qemu_mutex_unlock(&p->mutex);
938 return -1;
939 }
b9ee2f7d
JQ
940 if (!p->pending_job) {
941 p->pending_job++;
942 next_channel = (i + 1) % migrate_multifd_channels();
943 break;
944 }
945 qemu_mutex_unlock(&p->mutex);
946 }
947 p->pages->used = 0;
948
949 p->packet_num = multifd_send_state->packet_num++;
950 p->pages->block = NULL;
951 multifd_send_state->pages = p->pages;
952 p->pages = pages;
4fcefd44 953 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
1b81c974 954 qemu_file_update_transfer(rs->f, transferred);
b9ee2f7d
JQ
955 ram_counters.multifd_bytes += transferred;
956 ram_counters.transferred += transferred;;
957 qemu_mutex_unlock(&p->mutex);
958 qemu_sem_post(&p->sem);
713f762a
IR
959
960 return 1;
b9ee2f7d
JQ
961}
962
1b81c974 963static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
b9ee2f7d
JQ
964{
965 MultiFDPages_t *pages = multifd_send_state->pages;
966
967 if (!pages->block) {
968 pages->block = block;
969 }
970
971 if (pages->block == block) {
972 pages->offset[pages->used] = offset;
973 pages->iov[pages->used].iov_base = block->host + offset;
974 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
975 pages->used++;
976
977 if (pages->used < pages->allocated) {
713f762a 978 return 1;
b9ee2f7d
JQ
979 }
980 }
981
1b81c974 982 if (multifd_send_pages(rs) < 0) {
713f762a
IR
983 return -1;
984 }
b9ee2f7d
JQ
985
986 if (pages->block != block) {
1b81c974 987 return multifd_queue_page(rs, block, offset);
b9ee2f7d 988 }
713f762a
IR
989
990 return 1;
b9ee2f7d
JQ
991}
992
66770707 993static void multifd_send_terminate_threads(Error *err)
f986c3d2
JQ
994{
995 int i;
996
5558c91a
JQ
997 trace_multifd_send_terminate_threads(err != NULL);
998
7a169d74
JQ
999 if (err) {
1000 MigrationState *s = migrate_get_current();
1001 migrate_set_error(s, err);
1002 if (s->state == MIGRATION_STATUS_SETUP ||
1003 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
1004 s->state == MIGRATION_STATUS_DEVICE ||
1005 s->state == MIGRATION_STATUS_ACTIVE) {
1006 migrate_set_state(&s->state, s->state,
1007 MIGRATION_STATUS_FAILED);
1008 }
1009 }
1010
66770707 1011 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1012 MultiFDSendParams *p = &multifd_send_state->params[i];
1013
1014 qemu_mutex_lock(&p->mutex);
1015 p->quit = true;
1016 qemu_sem_post(&p->sem);
1017 qemu_mutex_unlock(&p->mutex);
1018 }
1019}
1020
1398b2e3 1021void multifd_save_cleanup(void)
f986c3d2
JQ
1022{
1023 int i;
f986c3d2
JQ
1024
1025 if (!migrate_use_multifd()) {
1398b2e3 1026 return;
f986c3d2 1027 }
66770707
JQ
1028 multifd_send_terminate_threads(NULL);
1029 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1030 MultiFDSendParams *p = &multifd_send_state->params[i];
1031
66770707
JQ
1032 if (p->running) {
1033 qemu_thread_join(&p->thread);
1034 }
60df2d4a
JQ
1035 socket_send_channel_destroy(p->c);
1036 p->c = NULL;
f986c3d2
JQ
1037 qemu_mutex_destroy(&p->mutex);
1038 qemu_sem_destroy(&p->sem);
18cdcea3 1039 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1040 g_free(p->name);
1041 p->name = NULL;
34c55a94
JQ
1042 multifd_pages_clear(p->pages);
1043 p->pages = NULL;
2a26c979
JQ
1044 p->packet_len = 0;
1045 g_free(p->packet);
1046 p->packet = NULL;
f986c3d2 1047 }
b9ee2f7d 1048 qemu_sem_destroy(&multifd_send_state->channels_ready);
f986c3d2
JQ
1049 g_free(multifd_send_state->params);
1050 multifd_send_state->params = NULL;
34c55a94
JQ
1051 multifd_pages_clear(multifd_send_state->pages);
1052 multifd_send_state->pages = NULL;
f986c3d2
JQ
1053 g_free(multifd_send_state);
1054 multifd_send_state = NULL;
f986c3d2
JQ
1055}
1056
1b81c974 1057static void multifd_send_sync_main(RAMState *rs)
6df264ac
JQ
1058{
1059 int i;
1060
1061 if (!migrate_use_multifd()) {
1062 return;
1063 }
b9ee2f7d 1064 if (multifd_send_state->pages->used) {
1b81c974 1065 if (multifd_send_pages(rs) < 0) {
713f762a
IR
1066 error_report("%s: multifd_send_pages fail", __func__);
1067 return;
1068 }
b9ee2f7d 1069 }
6df264ac
JQ
1070 for (i = 0; i < migrate_multifd_channels(); i++) {
1071 MultiFDSendParams *p = &multifd_send_state->params[i];
1072
1073 trace_multifd_send_sync_main_signal(p->id);
1074
1075 qemu_mutex_lock(&p->mutex);
b9ee2f7d 1076
713f762a
IR
1077 if (p->quit) {
1078 error_report("%s: channel %d has already quit", __func__, i);
1079 qemu_mutex_unlock(&p->mutex);
1080 return;
1081 }
1082
b9ee2f7d 1083 p->packet_num = multifd_send_state->packet_num++;
6df264ac
JQ
1084 p->flags |= MULTIFD_FLAG_SYNC;
1085 p->pending_job++;
1b81c974 1086 qemu_file_update_transfer(rs->f, p->packet_len);
81507f6b
IR
1087 ram_counters.multifd_bytes += p->packet_len;
1088 ram_counters.transferred += p->packet_len;
6df264ac
JQ
1089 qemu_mutex_unlock(&p->mutex);
1090 qemu_sem_post(&p->sem);
1091 }
1092 for (i = 0; i < migrate_multifd_channels(); i++) {
1093 MultiFDSendParams *p = &multifd_send_state->params[i];
1094
1095 trace_multifd_send_sync_main_wait(p->id);
18cdcea3 1096 qemu_sem_wait(&p->sem_sync);
6df264ac
JQ
1097 }
1098 trace_multifd_send_sync_main(multifd_send_state->packet_num);
1099}
1100
f986c3d2
JQ
1101static void *multifd_send_thread(void *opaque)
1102{
1103 MultiFDSendParams *p = opaque;
af8b7d2b 1104 Error *local_err = NULL;
a3ec6b7d
IR
1105 int ret = 0;
1106 uint32_t flags = 0;
af8b7d2b 1107
408ea6ae 1108 trace_multifd_send_thread_start(p->id);
74637e6f 1109 rcu_register_thread();
408ea6ae 1110
af8b7d2b 1111 if (multifd_send_initial_packet(p, &local_err) < 0) {
2f4aefd3 1112 ret = -1;
af8b7d2b
JQ
1113 goto out;
1114 }
408ea6ae
JQ
1115 /* initial packet */
1116 p->num_packets = 1;
f986c3d2
JQ
1117
1118 while (true) {
d82628e4 1119 qemu_sem_wait(&p->sem);
f986c3d2 1120 qemu_mutex_lock(&p->mutex);
0beb5ed3
JQ
1121
1122 if (p->pending_job) {
1123 uint32_t used = p->pages->used;
1124 uint64_t packet_num = p->packet_num;
a3ec6b7d 1125 flags = p->flags;
0beb5ed3 1126
2a34ee59 1127 p->next_packet_size = used * qemu_target_page_size();
0beb5ed3
JQ
1128 multifd_send_fill_packet(p);
1129 p->flags = 0;
1130 p->num_packets++;
1131 p->num_pages += used;
0beb5ed3
JQ
1132 qemu_mutex_unlock(&p->mutex);
1133
2a34ee59
JQ
1134 trace_multifd_send(p->id, packet_num, used, flags,
1135 p->next_packet_size);
0beb5ed3 1136
8b2db7f5
JQ
1137 ret = qio_channel_write_all(p->c, (void *)p->packet,
1138 p->packet_len, &local_err);
1139 if (ret != 0) {
1140 break;
1141 }
1142
ad24c7cb
JQ
1143 if (used) {
1144 ret = qio_channel_writev_all(p->c, p->pages->iov,
1145 used, &local_err);
1146 if (ret != 0) {
1147 break;
1148 }
8b2db7f5 1149 }
0beb5ed3
JQ
1150
1151 qemu_mutex_lock(&p->mutex);
1152 p->pending_job--;
1153 qemu_mutex_unlock(&p->mutex);
6df264ac
JQ
1154
1155 if (flags & MULTIFD_FLAG_SYNC) {
18cdcea3 1156 qemu_sem_post(&p->sem_sync);
6df264ac 1157 }
b9ee2f7d 1158 qemu_sem_post(&multifd_send_state->channels_ready);
0beb5ed3 1159 } else if (p->quit) {
f986c3d2
JQ
1160 qemu_mutex_unlock(&p->mutex);
1161 break;
6df264ac
JQ
1162 } else {
1163 qemu_mutex_unlock(&p->mutex);
1164 /* sometimes there are spurious wakeups */
f986c3d2 1165 }
f986c3d2
JQ
1166 }
1167
af8b7d2b
JQ
1168out:
1169 if (local_err) {
7dd59d01 1170 trace_multifd_send_error(p->id);
af8b7d2b
JQ
1171 multifd_send_terminate_threads(local_err);
1172 }
1173
a3ec6b7d
IR
1174 /*
1175 * Error happen, I will exit, but I can't just leave, tell
1176 * who pay attention to me.
1177 */
1178 if (ret != 0) {
2f4aefd3 1179 qemu_sem_post(&p->sem_sync);
a3ec6b7d
IR
1180 qemu_sem_post(&multifd_send_state->channels_ready);
1181 }
1182
66770707
JQ
1183 qemu_mutex_lock(&p->mutex);
1184 p->running = false;
1185 qemu_mutex_unlock(&p->mutex);
1186
74637e6f 1187 rcu_unregister_thread();
408ea6ae
JQ
1188 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1189
f986c3d2
JQ
1190 return NULL;
1191}
1192
60df2d4a
JQ
1193static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1194{
1195 MultiFDSendParams *p = opaque;
1196 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1197 Error *local_err = NULL;
1198
7dd59d01 1199 trace_multifd_new_send_channel_async(p->id);
60df2d4a 1200 if (qio_task_propagate_error(task, &local_err)) {
1398b2e3
FL
1201 migrate_set_error(migrate_get_current(), local_err);
1202 multifd_save_cleanup();
60df2d4a
JQ
1203 } else {
1204 p->c = QIO_CHANNEL(sioc);
1205 qio_channel_set_delay(p->c, false);
1206 p->running = true;
1207 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1208 QEMU_THREAD_JOINABLE);
60df2d4a
JQ
1209 }
1210}
1211
f986c3d2
JQ
1212int multifd_save_setup(void)
1213{
1214 int thread_count;
efd1a1d6 1215 uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
f986c3d2
JQ
1216 uint8_t i;
1217
1218 if (!migrate_use_multifd()) {
1219 return 0;
1220 }
1221 thread_count = migrate_multifd_channels();
1222 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1223 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
34c55a94 1224 multifd_send_state->pages = multifd_pages_init(page_count);
b9ee2f7d 1225 qemu_sem_init(&multifd_send_state->channels_ready, 0);
34c55a94 1226
f986c3d2
JQ
1227 for (i = 0; i < thread_count; i++) {
1228 MultiFDSendParams *p = &multifd_send_state->params[i];
1229
1230 qemu_mutex_init(&p->mutex);
1231 qemu_sem_init(&p->sem, 0);
18cdcea3 1232 qemu_sem_init(&p->sem_sync, 0);
f986c3d2 1233 p->quit = false;
0beb5ed3 1234 p->pending_job = 0;
f986c3d2 1235 p->id = i;
34c55a94 1236 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1237 p->packet_len = sizeof(MultiFDPacket_t)
1238 + sizeof(ram_addr_t) * page_count;
1239 p->packet = g_malloc0(p->packet_len);
9985e1f4
WY
1240 p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
1241 p->packet->version = cpu_to_be32(MULTIFD_VERSION);
f986c3d2 1242 p->name = g_strdup_printf("multifdsend_%d", i);
60df2d4a 1243 socket_send_channel_create(multifd_new_send_channel_async, p);
f986c3d2
JQ
1244 }
1245 return 0;
1246}
1247
f986c3d2
JQ
1248struct {
1249 MultiFDRecvParams *params;
1250 /* number of created threads */
1251 int count;
6df264ac
JQ
1252 /* syncs main thread and channels */
1253 QemuSemaphore sem_sync;
1254 /* global number of generated multifd packets */
1255 uint64_t packet_num;
f986c3d2
JQ
1256} *multifd_recv_state;
1257
66770707 1258static void multifd_recv_terminate_threads(Error *err)
f986c3d2
JQ
1259{
1260 int i;
1261
5558c91a
JQ
1262 trace_multifd_recv_terminate_threads(err != NULL);
1263
7a169d74
JQ
1264 if (err) {
1265 MigrationState *s = migrate_get_current();
1266 migrate_set_error(s, err);
1267 if (s->state == MIGRATION_STATUS_SETUP ||
1268 s->state == MIGRATION_STATUS_ACTIVE) {
1269 migrate_set_state(&s->state, s->state,
1270 MIGRATION_STATUS_FAILED);
1271 }
1272 }
1273
66770707 1274 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1275 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1276
1277 qemu_mutex_lock(&p->mutex);
3c3ca25d 1278 p->quit = true;
7a5cc33c
JQ
1279 /* We could arrive here for two reasons:
1280 - normal quit, i.e. everything went fine, just finished
1281 - error quit: We close the channels so the channel threads
1282 finish the qio_channel_read_all_eof() */
1283 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
f986c3d2
JQ
1284 qemu_mutex_unlock(&p->mutex);
1285 }
1286}
1287
1288int multifd_load_cleanup(Error **errp)
1289{
1290 int i;
1291 int ret = 0;
1292
1293 if (!migrate_use_multifd()) {
1294 return 0;
1295 }
66770707
JQ
1296 multifd_recv_terminate_threads(NULL);
1297 for (i = 0; i < migrate_multifd_channels(); i++) {
f986c3d2
JQ
1298 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1299
66770707 1300 if (p->running) {
3c3ca25d 1301 p->quit = true;
f193bc0c
IR
1302 /*
1303 * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1304 * however try to wakeup it without harm in cleanup phase.
1305 */
1306 qemu_sem_post(&p->sem_sync);
66770707
JQ
1307 qemu_thread_join(&p->thread);
1308 }
60df2d4a
JQ
1309 object_unref(OBJECT(p->c));
1310 p->c = NULL;
f986c3d2 1311 qemu_mutex_destroy(&p->mutex);
6df264ac 1312 qemu_sem_destroy(&p->sem_sync);
f986c3d2
JQ
1313 g_free(p->name);
1314 p->name = NULL;
34c55a94
JQ
1315 multifd_pages_clear(p->pages);
1316 p->pages = NULL;
2a26c979
JQ
1317 p->packet_len = 0;
1318 g_free(p->packet);
1319 p->packet = NULL;
f986c3d2 1320 }
6df264ac 1321 qemu_sem_destroy(&multifd_recv_state->sem_sync);
f986c3d2
JQ
1322 g_free(multifd_recv_state->params);
1323 multifd_recv_state->params = NULL;
1324 g_free(multifd_recv_state);
1325 multifd_recv_state = NULL;
1326
1327 return ret;
1328}
1329
6df264ac
JQ
1330static void multifd_recv_sync_main(void)
1331{
1332 int i;
1333
1334 if (!migrate_use_multifd()) {
1335 return;
1336 }
1337 for (i = 0; i < migrate_multifd_channels(); i++) {
1338 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1339
6df264ac
JQ
1340 trace_multifd_recv_sync_main_wait(p->id);
1341 qemu_sem_wait(&multifd_recv_state->sem_sync);
77568ea7
WY
1342 }
1343 for (i = 0; i < migrate_multifd_channels(); i++) {
1344 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1345
6df264ac
JQ
1346 qemu_mutex_lock(&p->mutex);
1347 if (multifd_recv_state->packet_num < p->packet_num) {
1348 multifd_recv_state->packet_num = p->packet_num;
1349 }
1350 qemu_mutex_unlock(&p->mutex);
6df264ac 1351 trace_multifd_recv_sync_main_signal(p->id);
6df264ac
JQ
1352 qemu_sem_post(&p->sem_sync);
1353 }
1354 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1355}
1356
f986c3d2
JQ
1357static void *multifd_recv_thread(void *opaque)
1358{
1359 MultiFDRecvParams *p = opaque;
2a26c979
JQ
1360 Error *local_err = NULL;
1361 int ret;
f986c3d2 1362
408ea6ae 1363 trace_multifd_recv_thread_start(p->id);
74637e6f 1364 rcu_register_thread();
408ea6ae 1365
f986c3d2 1366 while (true) {
6df264ac
JQ
1367 uint32_t used;
1368 uint32_t flags;
0beb5ed3 1369
3c3ca25d
JQ
1370 if (p->quit) {
1371 break;
1372 }
1373
8b2db7f5
JQ
1374 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1375 p->packet_len, &local_err);
1376 if (ret == 0) { /* EOF */
1377 break;
1378 }
1379 if (ret == -1) { /* Error */
1380 break;
1381 }
2a26c979 1382
6df264ac
JQ
1383 qemu_mutex_lock(&p->mutex);
1384 ret = multifd_recv_unfill_packet(p, &local_err);
1385 if (ret) {
f986c3d2
JQ
1386 qemu_mutex_unlock(&p->mutex);
1387 break;
1388 }
6df264ac
JQ
1389
1390 used = p->pages->used;
1391 flags = p->flags;
2a34ee59
JQ
1392 trace_multifd_recv(p->id, p->packet_num, used, flags,
1393 p->next_packet_size);
6df264ac
JQ
1394 p->num_packets++;
1395 p->num_pages += used;
f986c3d2 1396 qemu_mutex_unlock(&p->mutex);
6df264ac 1397
ad24c7cb
JQ
1398 if (used) {
1399 ret = qio_channel_readv_all(p->c, p->pages->iov,
1400 used, &local_err);
1401 if (ret != 0) {
1402 break;
1403 }
8b2db7f5
JQ
1404 }
1405
6df264ac
JQ
1406 if (flags & MULTIFD_FLAG_SYNC) {
1407 qemu_sem_post(&multifd_recv_state->sem_sync);
1408 qemu_sem_wait(&p->sem_sync);
1409 }
f986c3d2
JQ
1410 }
1411
d82628e4
JQ
1412 if (local_err) {
1413 multifd_recv_terminate_threads(local_err);
1414 }
66770707
JQ
1415 qemu_mutex_lock(&p->mutex);
1416 p->running = false;
1417 qemu_mutex_unlock(&p->mutex);
1418
74637e6f 1419 rcu_unregister_thread();
408ea6ae
JQ
1420 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1421
f986c3d2
JQ
1422 return NULL;
1423}
1424
1425int multifd_load_setup(void)
1426{
1427 int thread_count;
efd1a1d6 1428 uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
f986c3d2
JQ
1429 uint8_t i;
1430
1431 if (!migrate_use_multifd()) {
1432 return 0;
1433 }
1434 thread_count = migrate_multifd_channels();
1435 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1436 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
66770707 1437 atomic_set(&multifd_recv_state->count, 0);
6df264ac 1438 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
34c55a94 1439
f986c3d2
JQ
1440 for (i = 0; i < thread_count; i++) {
1441 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1442
1443 qemu_mutex_init(&p->mutex);
6df264ac 1444 qemu_sem_init(&p->sem_sync, 0);
3c3ca25d 1445 p->quit = false;
f986c3d2 1446 p->id = i;
34c55a94 1447 p->pages = multifd_pages_init(page_count);
2a26c979
JQ
1448 p->packet_len = sizeof(MultiFDPacket_t)
1449 + sizeof(ram_addr_t) * page_count;
1450 p->packet = g_malloc0(p->packet_len);
f986c3d2 1451 p->name = g_strdup_printf("multifdrecv_%d", i);
f986c3d2
JQ
1452 }
1453 return 0;
1454}
1455
62c1e0ca
JQ
1456bool multifd_recv_all_channels_created(void)
1457{
1458 int thread_count = migrate_multifd_channels();
1459
1460 if (!migrate_use_multifd()) {
1461 return true;
1462 }
1463
1464 return thread_count == atomic_read(&multifd_recv_state->count);
1465}
1466
49ed0d24
FL
1467/*
1468 * Try to receive all multifd channels to get ready for the migration.
1469 * - Return true and do not set @errp when correctly receving all channels;
1470 * - Return false and do not set @errp when correctly receiving the current one;
1471 * - Return false and set @errp when failing to receive the current channel.
1472 */
1473bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
71bb07db 1474{
60df2d4a 1475 MultiFDRecvParams *p;
af8b7d2b
JQ
1476 Error *local_err = NULL;
1477 int id;
60df2d4a 1478
af8b7d2b
JQ
1479 id = multifd_recv_initial_packet(ioc, &local_err);
1480 if (id < 0) {
1481 multifd_recv_terminate_threads(local_err);
49ed0d24
FL
1482 error_propagate_prepend(errp, local_err,
1483 "failed to receive packet"
1484 " via multifd channel %d: ",
1485 atomic_read(&multifd_recv_state->count));
81e62053 1486 return false;
af8b7d2b 1487 }
7dd59d01 1488 trace_multifd_recv_new_channel(id);
af8b7d2b
JQ
1489
1490 p = &multifd_recv_state->params[id];
1491 if (p->c != NULL) {
1492 error_setg(&local_err, "multifd: received id '%d' already setup'",
1493 id);
1494 multifd_recv_terminate_threads(local_err);
49ed0d24 1495 error_propagate(errp, local_err);
81e62053 1496 return false;
af8b7d2b 1497 }
60df2d4a
JQ
1498 p->c = ioc;
1499 object_ref(OBJECT(ioc));
408ea6ae
JQ
1500 /* initial packet */
1501 p->num_packets = 1;
60df2d4a
JQ
1502
1503 p->running = true;
1504 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1505 QEMU_THREAD_JOINABLE);
1506 atomic_inc(&multifd_recv_state->count);
49ed0d24
FL
1507 return atomic_read(&multifd_recv_state->count) ==
1508 migrate_multifd_channels();
71bb07db
JQ
1509}
1510
56e93d26 1511/**
3d0684b2 1512 * save_page_header: write page header to wire
56e93d26
JQ
1513 *
1514 * If this is the 1st block, it also writes the block identification
1515 *
3d0684b2 1516 * Returns the number of bytes written
56e93d26
JQ
1517 *
1518 * @f: QEMUFile where to send the data
1519 * @block: block that contains the page we want to send
1520 * @offset: offset inside the block for the page
1521 * in the lower bits, it contains flags
1522 */
2bf3aa85
JQ
1523static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1524 ram_addr_t offset)
56e93d26 1525{
9f5f380b 1526 size_t size, len;
56e93d26 1527
24795694
JQ
1528 if (block == rs->last_sent_block) {
1529 offset |= RAM_SAVE_FLAG_CONTINUE;
1530 }
2bf3aa85 1531 qemu_put_be64(f, offset);
56e93d26
JQ
1532 size = 8;
1533
1534 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
9f5f380b 1535 len = strlen(block->idstr);
2bf3aa85
JQ
1536 qemu_put_byte(f, len);
1537 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
9f5f380b 1538 size += 1 + len;
24795694 1539 rs->last_sent_block = block;
56e93d26
JQ
1540 }
1541 return size;
1542}
1543
3d0684b2
JQ
1544/**
1545 * mig_throttle_guest_down: throotle down the guest
1546 *
1547 * Reduce amount of guest cpu execution to hopefully slow down memory
1548 * writes. If guest dirty memory rate is reduced below the rate at
1549 * which we can transfer pages to the destination then we should be
1550 * able to complete migration. Some workloads dirty memory way too
1551 * fast and will not effectively converge, even with auto-converge.
070afca2
JH
1552 */
1553static void mig_throttle_guest_down(void)
1554{
1555 MigrationState *s = migrate_get_current();
2594f56d
DB
1556 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1557 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
4cbc9c7f 1558 int pct_max = s->parameters.max_cpu_throttle;
070afca2
JH
1559
1560 /* We have not started throttling yet. Let's start it. */
1561 if (!cpu_throttle_active()) {
1562 cpu_throttle_set(pct_initial);
1563 } else {
1564 /* Throttling already on, just increase the rate */
4cbc9c7f
LQ
1565 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1566 pct_max));
070afca2
JH
1567 }
1568}
1569
3d0684b2
JQ
1570/**
1571 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1572 *
6f37bb8b 1573 * @rs: current RAM state
3d0684b2
JQ
1574 * @current_addr: address for the zero page
1575 *
1576 * Update the xbzrle cache to reflect a page that's been sent as all 0.
56e93d26
JQ
1577 * The important thing is that a stale (not-yet-0'd) page be replaced
1578 * by the new data.
1579 * As a bonus, if the page wasn't in the cache it gets added so that
3d0684b2 1580 * when a small write is made into the 0'd page it gets XBZRLE sent.
56e93d26 1581 */
6f37bb8b 1582static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
56e93d26 1583{
6f37bb8b 1584 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
56e93d26
JQ
1585 return;
1586 }
1587
1588 /* We don't care if this fails to allocate a new cache page
1589 * as long as it updated an old one */
c00e0928 1590 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
9360447d 1591 ram_counters.dirty_sync_count);
56e93d26
JQ
1592}
1593
1594#define ENCODING_FLAG_XBZRLE 0x1
1595
1596/**
1597 * save_xbzrle_page: compress and send current page
1598 *
1599 * Returns: 1 means that we wrote the page
1600 * 0 means that page is identical to the one already sent
1601 * -1 means that xbzrle would be longer than normal
1602 *
5a987738 1603 * @rs: current RAM state
3d0684b2
JQ
1604 * @current_data: pointer to the address of the page contents
1605 * @current_addr: addr of the page
56e93d26
JQ
1606 * @block: block that contains the page we want to send
1607 * @offset: offset inside the block for the page
1608 * @last_stage: if we are at the completion stage
56e93d26 1609 */
204b88b8 1610static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
56e93d26 1611 ram_addr_t current_addr, RAMBlock *block,
072c2511 1612 ram_addr_t offset, bool last_stage)
56e93d26
JQ
1613{
1614 int encoded_len = 0, bytes_xbzrle;
1615 uint8_t *prev_cached_page;
1616
9360447d
JQ
1617 if (!cache_is_cached(XBZRLE.cache, current_addr,
1618 ram_counters.dirty_sync_count)) {
1619 xbzrle_counters.cache_miss++;
56e93d26
JQ
1620 if (!last_stage) {
1621 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
9360447d 1622 ram_counters.dirty_sync_count) == -1) {
56e93d26
JQ
1623 return -1;
1624 } else {
1625 /* update *current_data when the page has been
1626 inserted into cache */
1627 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1628 }
1629 }
1630 return -1;
1631 }
1632
1633 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1634
1635 /* save current buffer into memory */
1636 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1637
1638 /* XBZRLE encoding (if there is no overflow) */
1639 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1640 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1641 TARGET_PAGE_SIZE);
ca353803
WY
1642
1643 /*
1644 * Update the cache contents, so that it corresponds to the data
1645 * sent, in all cases except where we skip the page.
1646 */
1647 if (!last_stage && encoded_len != 0) {
1648 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1649 /*
1650 * In the case where we couldn't compress, ensure that the caller
1651 * sends the data from the cache, since the guest might have
1652 * changed the RAM since we copied it.
1653 */
1654 *current_data = prev_cached_page;
1655 }
1656
56e93d26 1657 if (encoded_len == 0) {
55c4446b 1658 trace_save_xbzrle_page_skipping();
56e93d26
JQ
1659 return 0;
1660 } else if (encoded_len == -1) {
55c4446b 1661 trace_save_xbzrle_page_overflow();
9360447d 1662 xbzrle_counters.overflow++;
56e93d26
JQ
1663 return -1;
1664 }
1665
56e93d26 1666 /* Send XBZRLE based compressed page */
2bf3aa85 1667 bytes_xbzrle = save_page_header(rs, rs->f, block,
204b88b8
JQ
1668 offset | RAM_SAVE_FLAG_XBZRLE);
1669 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1670 qemu_put_be16(rs->f, encoded_len);
1671 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
56e93d26 1672 bytes_xbzrle += encoded_len + 1 + 2;
9360447d
JQ
1673 xbzrle_counters.pages++;
1674 xbzrle_counters.bytes += bytes_xbzrle;
1675 ram_counters.transferred += bytes_xbzrle;
56e93d26
JQ
1676
1677 return 1;
1678}
1679
3d0684b2
JQ
1680/**
1681 * migration_bitmap_find_dirty: find the next dirty page from start
f3f491fc 1682 *
a5f7b1a6 1683 * Returns the page offset within memory region of the start of a dirty page
3d0684b2 1684 *
6f37bb8b 1685 * @rs: current RAM state
3d0684b2 1686 * @rb: RAMBlock where to search for dirty pages
a935e30f 1687 * @start: page where we start the search
f3f491fc 1688 */
56e93d26 1689static inline
a935e30f 1690unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
f20e2865 1691 unsigned long start)
56e93d26 1692{
6b6712ef
JQ
1693 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1694 unsigned long *bitmap = rb->bmap;
56e93d26
JQ
1695 unsigned long next;
1696
fbd162e6 1697 if (ramblock_is_ignored(rb)) {
b895de50
CLG
1698 return size;
1699 }
1700
6eeb63f7
WW
1701 /*
1702 * When the free page optimization is enabled, we need to check the bitmap
1703 * to send the non-free pages rather than all the pages in the bulk stage.
1704 */
1705 if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
6b6712ef 1706 next = start + 1;
56e93d26 1707 } else {
6b6712ef 1708 next = find_next_bit(bitmap, size, start);
56e93d26
JQ
1709 }
1710
6b6712ef 1711 return next;
56e93d26
JQ
1712}
1713
06b10688 1714static inline bool migration_bitmap_clear_dirty(RAMState *rs,
f20e2865
JQ
1715 RAMBlock *rb,
1716 unsigned long page)
a82d593b
DDAG
1717{
1718 bool ret;
a82d593b 1719
386a907b 1720 qemu_mutex_lock(&rs->bitmap_mutex);
002cad6b
PX
1721
1722 /*
1723 * Clear dirty bitmap if needed. This _must_ be called before we
1724 * send any of the page in the chunk because we need to make sure
1725 * we can capture further page content changes when we sync dirty
1726 * log the next time. So as long as we are going to send any of
1727 * the page in the chunk we clear the remote dirty bitmap for all.
1728 * Clearing it earlier won't be a problem, but too late will.
1729 */
1730 if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
1731 uint8_t shift = rb->clear_bmap_shift;
1732 hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
1733 hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
1734
1735 /*
1736 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
1737 * can make things easier sometimes since then start address
1738 * of the small chunk will always be 64 pages aligned so the
1739 * bitmap will always be aligned to unsigned long. We should
1740 * even be able to remove this restriction but I'm simply
1741 * keeping it.
1742 */
1743 assert(shift >= 6);
1744 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
1745 memory_region_clear_dirty_bitmap(rb->mr, start, size);
1746 }
1747
6b6712ef 1748 ret = test_and_clear_bit(page, rb->bmap);
a82d593b
DDAG
1749
1750 if (ret) {
0d8ec885 1751 rs->migration_dirty_pages--;
a82d593b 1752 }
386a907b
WW
1753 qemu_mutex_unlock(&rs->bitmap_mutex);
1754
a82d593b
DDAG
1755 return ret;
1756}
1757
267691b6 1758/* Called with RCU critical section */
7a3e9571 1759static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
56e93d26 1760{
0d8ec885 1761 rs->migration_dirty_pages +=
5d0980a4 1762 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length,
0d8ec885 1763 &rs->num_dirty_pages_period);
56e93d26
JQ
1764}
1765
3d0684b2
JQ
1766/**
1767 * ram_pagesize_summary: calculate all the pagesizes of a VM
1768 *
1769 * Returns a summary bitmap of the page sizes of all RAMBlocks
1770 *
1771 * For VMs with just normal pages this is equivalent to the host page
1772 * size. If it's got some huge pages then it's the OR of all the
1773 * different page sizes.
e8ca1db2
DDAG
1774 */
1775uint64_t ram_pagesize_summary(void)
1776{
1777 RAMBlock *block;
1778 uint64_t summary = 0;
1779
fbd162e6 1780 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
e8ca1db2
DDAG
1781 summary |= block->page_size;
1782 }
1783
1784 return summary;
1785}
1786
aecbfe9c
XG
1787uint64_t ram_get_total_transferred_pages(void)
1788{
1789 return ram_counters.normal + ram_counters.duplicate +
1790 compression_counters.pages + xbzrle_counters.pages;
1791}
1792
b734035b
XG
1793static void migration_update_rates(RAMState *rs, int64_t end_time)
1794{
be8b02ed 1795 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
76e03000 1796 double compressed_size;
b734035b
XG
1797
1798 /* calculate period counters */
1799 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1800 / (end_time - rs->time_last_bitmap_sync);
1801
be8b02ed 1802 if (!page_count) {
b734035b
XG
1803 return;
1804 }
1805
1806 if (migrate_use_xbzrle()) {
1807 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
be8b02ed 1808 rs->xbzrle_cache_miss_prev) / page_count;
b734035b
XG
1809 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1810 }
76e03000
XG
1811
1812 if (migrate_use_compression()) {
1813 compression_counters.busy_rate = (double)(compression_counters.busy -
1814 rs->compress_thread_busy_prev) / page_count;
1815 rs->compress_thread_busy_prev = compression_counters.busy;
1816
1817 compressed_size = compression_counters.compressed_size -
1818 rs->compressed_size_prev;
1819 if (compressed_size) {
1820 double uncompressed_size = (compression_counters.pages -
1821 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1822
1823 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1824 compression_counters.compression_rate =
1825 uncompressed_size / compressed_size;
1826
1827 rs->compress_pages_prev = compression_counters.pages;
1828 rs->compressed_size_prev = compression_counters.compressed_size;
1829 }
1830 }
b734035b
XG
1831}
1832
8d820d6f 1833static void migration_bitmap_sync(RAMState *rs)
56e93d26
JQ
1834{
1835 RAMBlock *block;
56e93d26 1836 int64_t end_time;
c4bdf0cf 1837 uint64_t bytes_xfer_now;
56e93d26 1838
9360447d 1839 ram_counters.dirty_sync_count++;
56e93d26 1840
f664da80
JQ
1841 if (!rs->time_last_bitmap_sync) {
1842 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
56e93d26
JQ
1843 }
1844
1845 trace_migration_bitmap_sync_start();
9c1f8f44 1846 memory_global_dirty_log_sync();
56e93d26 1847
108cfae0 1848 qemu_mutex_lock(&rs->bitmap_mutex);
89ac5a1d
DDAG
1849 WITH_RCU_READ_LOCK_GUARD() {
1850 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1851 ramblock_sync_dirty_bitmap(rs, block);
1852 }
1853 ram_counters.remaining = ram_bytes_remaining();
56e93d26 1854 }
108cfae0 1855 qemu_mutex_unlock(&rs->bitmap_mutex);
56e93d26 1856
9458a9a1 1857 memory_global_after_dirty_log_sync();
a66cd90c 1858 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
1ffb5dfd 1859
56e93d26
JQ
1860 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1861
1862 /* more than 1 second = 1000 millisecons */
f664da80 1863 if (end_time > rs->time_last_bitmap_sync + 1000) {
9360447d 1864 bytes_xfer_now = ram_counters.transferred;
d693c6f1 1865
9ac78b61
PL
1866 /* During block migration the auto-converge logic incorrectly detects
1867 * that ram migration makes no progress. Avoid this by disabling the
1868 * throttling logic during the bulk phase of block migration. */
1869 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
56e93d26
JQ
1870 /* The following detection logic can be refined later. For now:
1871 Check to see if the dirtied bytes is 50% more than the approx.
1872 amount of bytes that just got transferred since the last time we
070afca2
JH
1873 were in this routine. If that happens twice, start or increase
1874 throttling */
070afca2 1875
d693c6f1 1876 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
eac74159 1877 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
b4a3c64b 1878 (++rs->dirty_rate_high_cnt >= 2)) {
56e93d26 1879 trace_migration_throttle();
8d820d6f 1880 rs->dirty_rate_high_cnt = 0;
070afca2 1881 mig_throttle_guest_down();
d693c6f1 1882 }
56e93d26 1883 }
070afca2 1884
b734035b
XG
1885 migration_update_rates(rs, end_time);
1886
be8b02ed 1887 rs->target_page_count_prev = rs->target_page_count;
d693c6f1
FF
1888
1889 /* reset period counters */
f664da80 1890 rs->time_last_bitmap_sync = end_time;
a66cd90c 1891 rs->num_dirty_pages_period = 0;
d2a4d85a 1892 rs->bytes_xfer_prev = bytes_xfer_now;
56e93d26 1893 }
4addcd4f 1894 if (migrate_use_events()) {
3ab72385 1895 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
4addcd4f 1896 }
56e93d26
JQ
1897}
1898
bd227060
WW
1899static void migration_bitmap_sync_precopy(RAMState *rs)
1900{
1901 Error *local_err = NULL;
1902
1903 /*
1904 * The current notifier usage is just an optimization to migration, so we
1905 * don't stop the normal migration process in the error case.
1906 */
1907 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1908 error_report_err(local_err);
1909 }
1910
1911 migration_bitmap_sync(rs);
1912
1913 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1914 error_report_err(local_err);
1915 }
1916}
1917
6c97ec5f
XG
1918/**
1919 * save_zero_page_to_file: send the zero page to the file
1920 *
1921 * Returns the size of data written to the file, 0 means the page is not
1922 * a zero page
1923 *
1924 * @rs: current RAM state
1925 * @file: the file where the data is saved
1926 * @block: block that contains the page we want to send
1927 * @offset: offset inside the block for the page
1928 */
1929static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1930 RAMBlock *block, ram_addr_t offset)
1931{
1932 uint8_t *p = block->host + offset;
1933 int len = 0;
1934
1935 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1936 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1937 qemu_put_byte(file, 0);
1938 len += 1;
1939 }
1940 return len;
1941}
1942
56e93d26 1943/**
3d0684b2 1944 * save_zero_page: send the zero page to the stream
56e93d26 1945 *
3d0684b2 1946 * Returns the number of pages written.
56e93d26 1947 *
f7ccd61b 1948 * @rs: current RAM state
56e93d26
JQ
1949 * @block: block that contains the page we want to send
1950 * @offset: offset inside the block for the page
56e93d26 1951 */
7faccdc3 1952static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
56e93d26 1953{
6c97ec5f 1954 int len = save_zero_page_to_file(rs, rs->f, block, offset);
56e93d26 1955
6c97ec5f 1956 if (len) {
9360447d 1957 ram_counters.duplicate++;
6c97ec5f
XG
1958 ram_counters.transferred += len;
1959 return 1;
56e93d26 1960 }
6c97ec5f 1961 return -1;
56e93d26
JQ
1962}
1963
5727309d 1964static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
53f09a10 1965{
5727309d 1966 if (!migrate_release_ram() || !migration_in_postcopy()) {
53f09a10
PB
1967 return;
1968 }
1969
aaa2064c 1970 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
53f09a10
PB
1971}
1972
059ff0fb
XG
1973/*
1974 * @pages: the number of pages written by the control path,
1975 * < 0 - error
1976 * > 0 - number of pages written
1977 *
1978 * Return true if the pages has been saved, otherwise false is returned.
1979 */
1980static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1981 int *pages)
1982{
1983 uint64_t bytes_xmit = 0;
1984 int ret;
1985
1986 *pages = -1;
1987 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1988 &bytes_xmit);
1989 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1990 return false;
1991 }
1992
1993 if (bytes_xmit) {
1994 ram_counters.transferred += bytes_xmit;
1995 *pages = 1;
1996 }
1997
1998 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1999 return true;
2000 }
2001
2002 if (bytes_xmit > 0) {
2003 ram_counters.normal++;
2004 } else if (bytes_xmit == 0) {
2005 ram_counters.duplicate++;
2006 }
2007
2008 return true;
2009}
2010
65dacaa0
XG
2011/*
2012 * directly send the page to the stream
2013 *
2014 * Returns the number of pages written.
2015 *
2016 * @rs: current RAM state
2017 * @block: block that contains the page we want to send
2018 * @offset: offset inside the block for the page
2019 * @buf: the page to be sent
2020 * @async: send to page asyncly
2021 */
2022static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2023 uint8_t *buf, bool async)
2024{
2025 ram_counters.transferred += save_page_header(rs, rs->f, block,
2026 offset | RAM_SAVE_FLAG_PAGE);
2027 if (async) {
2028 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
2029 migrate_release_ram() &
2030 migration_in_postcopy());
2031 } else {
2032 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
2033 }
2034 ram_counters.transferred += TARGET_PAGE_SIZE;
2035 ram_counters.normal++;
2036 return 1;
2037}
2038
56e93d26 2039/**
3d0684b2 2040 * ram_save_page: send the given page to the stream
56e93d26 2041 *
3d0684b2 2042 * Returns the number of pages written.
3fd3c4b3
DDAG
2043 * < 0 - error
2044 * >=0 - Number of pages written - this might legally be 0
2045 * if xbzrle noticed the page was the same.
56e93d26 2046 *
6f37bb8b 2047 * @rs: current RAM state
56e93d26
JQ
2048 * @block: block that contains the page we want to send
2049 * @offset: offset inside the block for the page
2050 * @last_stage: if we are at the completion stage
56e93d26 2051 */
a0a8aa14 2052static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
56e93d26
JQ
2053{
2054 int pages = -1;
56e93d26 2055 uint8_t *p;
56e93d26 2056 bool send_async = true;
a08f6890 2057 RAMBlock *block = pss->block;
a935e30f 2058 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
059ff0fb 2059 ram_addr_t current_addr = block->offset + offset;
56e93d26 2060
2f68e399 2061 p = block->host + offset;
1db9d8e5 2062 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
56e93d26 2063
56e93d26 2064 XBZRLE_cache_lock();
d7400a34
XG
2065 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
2066 migrate_use_xbzrle()) {
059ff0fb
XG
2067 pages = save_xbzrle_page(rs, &p, current_addr, block,
2068 offset, last_stage);
2069 if (!last_stage) {
2070 /* Can't send this cached data async, since the cache page
2071 * might get updated before it gets to the wire
56e93d26 2072 */
059ff0fb 2073 send_async = false;
56e93d26
JQ
2074 }
2075 }
2076
2077 /* XBZRLE overflow or normal page */
2078 if (pages == -1) {
65dacaa0 2079 pages = save_normal_page(rs, block, offset, p, send_async);
56e93d26
JQ
2080 }
2081
2082 XBZRLE_cache_unlock();
2083
2084 return pages;
2085}
2086
b9ee2f7d
JQ
2087static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2088 ram_addr_t offset)
2089{
1b81c974 2090 if (multifd_queue_page(rs, block, offset) < 0) {
713f762a
IR
2091 return -1;
2092 }
b9ee2f7d
JQ
2093 ram_counters.normal++;
2094
2095 return 1;
2096}
2097
5e5fdcff 2098static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
6ef3771c 2099 ram_addr_t offset, uint8_t *source_buf)
56e93d26 2100{
53518d94 2101 RAMState *rs = ram_state;
a7a9a88f 2102 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
5e5fdcff 2103 bool zero_page = false;
6ef3771c 2104 int ret;
56e93d26 2105
5e5fdcff
XG
2106 if (save_zero_page_to_file(rs, f, block, offset)) {
2107 zero_page = true;
2108 goto exit;
2109 }
2110
6ef3771c 2111 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
34ab9e97
XG
2112
2113 /*
2114 * copy it to a internal buffer to avoid it being modified by VM
2115 * so that we can catch up the error during compression and
2116 * decompression
2117 */
2118 memcpy(source_buf, p, TARGET_PAGE_SIZE);
6ef3771c
XG
2119 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2120 if (ret < 0) {
2121 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
b3be2896 2122 error_report("compressed data failed!");
5e5fdcff 2123 return false;
b3be2896 2124 }
56e93d26 2125
5e5fdcff 2126exit:
6ef3771c 2127 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
5e5fdcff
XG
2128 return zero_page;
2129}
2130
2131static void
2132update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2133{
76e03000
XG
2134 ram_counters.transferred += bytes_xmit;
2135
5e5fdcff
XG
2136 if (param->zero_page) {
2137 ram_counters.duplicate++;
76e03000 2138 return;
5e5fdcff 2139 }
76e03000
XG
2140
2141 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2142 compression_counters.compressed_size += bytes_xmit - 8;
2143 compression_counters.pages++;
56e93d26
JQ
2144}
2145
32b05495
XG
2146static bool save_page_use_compression(RAMState *rs);
2147
ce25d337 2148static void flush_compressed_data(RAMState *rs)
56e93d26
JQ
2149{
2150 int idx, len, thread_count;
2151
32b05495 2152 if (!save_page_use_compression(rs)) {
56e93d26
JQ
2153 return;
2154 }
2155 thread_count = migrate_compress_threads();
a7a9a88f 2156
0d9f9a5c 2157 qemu_mutex_lock(&comp_done_lock);
56e93d26 2158 for (idx = 0; idx < thread_count; idx++) {
a7a9a88f 2159 while (!comp_param[idx].done) {
0d9f9a5c 2160 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
56e93d26 2161 }
a7a9a88f 2162 }
0d9f9a5c 2163 qemu_mutex_unlock(&comp_done_lock);
a7a9a88f
LL
2164
2165 for (idx = 0; idx < thread_count; idx++) {
2166 qemu_mutex_lock(&comp_param[idx].mutex);
90e56fb4 2167 if (!comp_param[idx].quit) {
ce25d337 2168 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
5e5fdcff
XG
2169 /*
2170 * it's safe to fetch zero_page without holding comp_done_lock
2171 * as there is no further request submitted to the thread,
2172 * i.e, the thread should be waiting for a request at this point.
2173 */
2174 update_compress_thread_counts(&comp_param[idx], len);
56e93d26 2175 }
a7a9a88f 2176 qemu_mutex_unlock(&comp_param[idx].mutex);
56e93d26
JQ
2177 }
2178}
2179
2180static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2181 ram_addr_t offset)
2182{
2183 param->block = block;
2184 param->offset = offset;
2185}
2186
ce25d337
JQ
2187static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2188 ram_addr_t offset)
56e93d26
JQ
2189{
2190 int idx, thread_count, bytes_xmit = -1, pages = -1;
1d58872a 2191 bool wait = migrate_compress_wait_thread();
56e93d26
JQ
2192
2193 thread_count = migrate_compress_threads();
0d9f9a5c 2194 qemu_mutex_lock(&comp_done_lock);
1d58872a
XG
2195retry:
2196 for (idx = 0; idx < thread_count; idx++) {
2197 if (comp_param[idx].done) {
2198 comp_param[idx].done = false;
2199 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2200 qemu_mutex_lock(&comp_param[idx].mutex);
2201 set_compress_params(&comp_param[idx], block, offset);
2202 qemu_cond_signal(&comp_param[idx].cond);
2203 qemu_mutex_unlock(&comp_param[idx].mutex);
2204 pages = 1;
5e5fdcff 2205 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
56e93d26 2206 break;
56e93d26
JQ
2207 }
2208 }
1d58872a
XG
2209
2210 /*
2211 * wait for the free thread if the user specifies 'compress-wait-thread',
2212 * otherwise we will post the page out in the main thread as normal page.
2213 */
2214 if (pages < 0 && wait) {
2215 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2216 goto retry;
2217 }
0d9f9a5c 2218 qemu_mutex_unlock(&comp_done_lock);
56e93d26
JQ
2219
2220 return pages;
2221}
2222
3d0684b2
JQ
2223/**
2224 * find_dirty_block: find the next dirty page and update any state
2225 * associated with the search process.
b9e60928 2226 *
a5f7b1a6 2227 * Returns true if a page is found
b9e60928 2228 *
6f37bb8b 2229 * @rs: current RAM state
3d0684b2
JQ
2230 * @pss: data about the state of the current dirty page scan
2231 * @again: set to false if the search has scanned the whole of RAM
b9e60928 2232 */
f20e2865 2233static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
b9e60928 2234{
f20e2865 2235 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
6f37bb8b 2236 if (pss->complete_round && pss->block == rs->last_seen_block &&
a935e30f 2237 pss->page >= rs->last_page) {
b9e60928
DDAG
2238 /*
2239 * We've been once around the RAM and haven't found anything.
2240 * Give up.
2241 */
2242 *again = false;
2243 return false;
2244 }
a935e30f 2245 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
b9e60928 2246 /* Didn't find anything in this RAM Block */
a935e30f 2247 pss->page = 0;
b9e60928
DDAG
2248 pss->block = QLIST_NEXT_RCU(pss->block, next);
2249 if (!pss->block) {
48df9d80
XG
2250 /*
2251 * If memory migration starts over, we will meet a dirtied page
2252 * which may still exists in compression threads's ring, so we
2253 * should flush the compressed data to make sure the new page
2254 * is not overwritten by the old one in the destination.
2255 *
2256 * Also If xbzrle is on, stop using the data compression at this
2257 * point. In theory, xbzrle can do better than compression.
2258 */
2259 flush_compressed_data(rs);
2260
b9e60928
DDAG
2261 /* Hit the end of the list */
2262 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2263 /* Flag that we've looped */
2264 pss->complete_round = true;
6f37bb8b 2265 rs->ram_bulk_stage = false;
b9e60928
DDAG
2266 }
2267 /* Didn't find anything this time, but try again on the new block */
2268 *again = true;
2269 return false;
2270 } else {
2271 /* Can go around again, but... */
2272 *again = true;
2273 /* We've found something so probably don't need to */
2274 return true;
2275 }
2276}
2277
3d0684b2
JQ
2278/**
2279 * unqueue_page: gets a page of the queue
2280 *
a82d593b 2281 * Helper for 'get_queued_page' - gets a page off the queue
a82d593b 2282 *
3d0684b2
JQ
2283 * Returns the block of the page (or NULL if none available)
2284 *
ec481c6c 2285 * @rs: current RAM state
3d0684b2 2286 * @offset: used to return the offset within the RAMBlock
a82d593b 2287 */
f20e2865 2288static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
a82d593b
DDAG
2289{
2290 RAMBlock *block = NULL;
2291
ae526e32
XG
2292 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2293 return NULL;
2294 }
2295
ec481c6c
JQ
2296 qemu_mutex_lock(&rs->src_page_req_mutex);
2297 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2298 struct RAMSrcPageRequest *entry =
2299 QSIMPLEQ_FIRST(&rs->src_page_requests);
a82d593b
DDAG
2300 block = entry->rb;
2301 *offset = entry->offset;
a82d593b
DDAG
2302
2303 if (entry->len > TARGET_PAGE_SIZE) {
2304 entry->len -= TARGET_PAGE_SIZE;
2305 entry->offset += TARGET_PAGE_SIZE;
2306 } else {
2307 memory_region_unref(block->mr);
ec481c6c 2308 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
a82d593b 2309 g_free(entry);
e03a34f8 2310 migration_consume_urgent_request();
a82d593b
DDAG
2311 }
2312 }
ec481c6c 2313 qemu_mutex_unlock(&rs->src_page_req_mutex);
a82d593b
DDAG
2314
2315 return block;
2316}
2317
3d0684b2 2318/**
ff1543af 2319 * get_queued_page: unqueue a page from the postcopy requests
3d0684b2
JQ
2320 *
2321 * Skips pages that are already sent (!dirty)
a82d593b 2322 *
a5f7b1a6 2323 * Returns true if a queued page is found
a82d593b 2324 *
6f37bb8b 2325 * @rs: current RAM state
3d0684b2 2326 * @pss: data about the state of the current dirty page scan
a82d593b 2327 */
f20e2865 2328static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
a82d593b
DDAG
2329{
2330 RAMBlock *block;
2331 ram_addr_t offset;
2332 bool dirty;
2333
2334 do {
f20e2865 2335 block = unqueue_page(rs, &offset);
a82d593b
DDAG
2336 /*
2337 * We're sending this page, and since it's postcopy nothing else
2338 * will dirty it, and we must make sure it doesn't get sent again
2339 * even if this queue request was received after the background
2340 * search already sent it.
2341 */
2342 if (block) {
f20e2865
JQ
2343 unsigned long page;
2344
6b6712ef
JQ
2345 page = offset >> TARGET_PAGE_BITS;
2346 dirty = test_bit(page, block->bmap);
a82d593b 2347 if (!dirty) {
06b10688 2348 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
64737606 2349 page);
a82d593b 2350 } else {
f20e2865 2351 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
a82d593b
DDAG
2352 }
2353 }
2354
2355 } while (block && !dirty);
2356
2357 if (block) {
2358 /*
2359 * As soon as we start servicing pages out of order, then we have
2360 * to kill the bulk stage, since the bulk stage assumes
2361 * in (migration_bitmap_find_and_reset_dirty) that every page is
2362 * dirty, that's no longer true.
2363 */
6f37bb8b 2364 rs->ram_bulk_stage = false;
a82d593b
DDAG
2365
2366 /*
2367 * We want the background search to continue from the queued page
2368 * since the guest is likely to want other pages near to the page
2369 * it just requested.
2370 */
2371 pss->block = block;
a935e30f 2372 pss->page = offset >> TARGET_PAGE_BITS;
422314e7
WY
2373
2374 /*
2375 * This unqueued page would break the "one round" check, even is
2376 * really rare.
2377 */
2378 pss->complete_round = false;
a82d593b
DDAG
2379 }
2380
2381 return !!block;
2382}
2383
6c595cde 2384/**
5e58f968
JQ
2385 * migration_page_queue_free: drop any remaining pages in the ram
2386 * request queue
6c595cde 2387 *
3d0684b2
JQ
2388 * It should be empty at the end anyway, but in error cases there may
2389 * be some left. in case that there is any page left, we drop it.
2390 *
6c595cde 2391 */
83c13382 2392static void migration_page_queue_free(RAMState *rs)
6c595cde 2393{
ec481c6c 2394 struct RAMSrcPageRequest *mspr, *next_mspr;
6c595cde
DDAG
2395 /* This queue generally should be empty - but in the case of a failed
2396 * migration might have some droppings in.
2397 */
89ac5a1d 2398 RCU_READ_LOCK_GUARD();
ec481c6c 2399 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
6c595cde 2400 memory_region_unref(mspr->rb->mr);
ec481c6c 2401 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
6c595cde
DDAG
2402 g_free(mspr);
2403 }
6c595cde
DDAG
2404}
2405
2406/**
3d0684b2
JQ
2407 * ram_save_queue_pages: queue the page for transmission
2408 *
2409 * A request from postcopy destination for example.
2410 *
2411 * Returns zero on success or negative on error
2412 *
3d0684b2
JQ
2413 * @rbname: Name of the RAMBLock of the request. NULL means the
2414 * same that last one.
2415 * @start: starting address from the start of the RAMBlock
2416 * @len: length (in bytes) to send
6c595cde 2417 */
96506894 2418int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
6c595cde
DDAG
2419{
2420 RAMBlock *ramblock;
53518d94 2421 RAMState *rs = ram_state;
6c595cde 2422
9360447d 2423 ram_counters.postcopy_requests++;
89ac5a1d
DDAG
2424 RCU_READ_LOCK_GUARD();
2425
6c595cde
DDAG
2426 if (!rbname) {
2427 /* Reuse last RAMBlock */
68a098f3 2428 ramblock = rs->last_req_rb;
6c595cde
DDAG
2429
2430 if (!ramblock) {
2431 /*
2432 * Shouldn't happen, we can't reuse the last RAMBlock if
2433 * it's the 1st request.
2434 */
2435 error_report("ram_save_queue_pages no previous block");
2436 goto err;
2437 }
2438 } else {
2439 ramblock = qemu_ram_block_by_name(rbname);
2440
2441 if (!ramblock) {
2442 /* We shouldn't be asked for a non-existent RAMBlock */
2443 error_report("ram_save_queue_pages no block '%s'", rbname);
2444 goto err;
2445 }
68a098f3 2446 rs->last_req_rb = ramblock;
6c595cde
DDAG
2447 }
2448 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2449 if (start+len > ramblock->used_length) {
9458ad6b
JQ
2450 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2451 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
6c595cde
DDAG
2452 __func__, start, len, ramblock->used_length);
2453 goto err;
2454 }
2455
ec481c6c
JQ
2456 struct RAMSrcPageRequest *new_entry =
2457 g_malloc0(sizeof(struct RAMSrcPageRequest));
6c595cde
DDAG
2458 new_entry->rb = ramblock;
2459 new_entry->offset = start;
2460 new_entry->len = len;
2461
2462 memory_region_ref(ramblock->mr);
ec481c6c
JQ
2463 qemu_mutex_lock(&rs->src_page_req_mutex);
2464 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
e03a34f8 2465 migration_make_urgent_request();
ec481c6c 2466 qemu_mutex_unlock(&rs->src_page_req_mutex);
6c595cde
DDAG
2467
2468 return 0;
2469
2470err:
6c595cde
DDAG
2471 return -1;
2472}
2473
d7400a34
XG
2474static bool save_page_use_compression(RAMState *rs)
2475{
2476 if (!migrate_use_compression()) {
2477 return false;
2478 }
2479
2480 /*
2481 * If xbzrle is on, stop using the data compression after first
2482 * round of migration even if compression is enabled. In theory,
2483 * xbzrle can do better than compression.
2484 */
2485 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2486 return true;
2487 }
2488
2489 return false;
2490}
2491
5e5fdcff
XG
2492/*
2493 * try to compress the page before posting it out, return true if the page
2494 * has been properly handled by compression, otherwise needs other
2495 * paths to handle it
2496 */
2497static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2498{
2499 if (!save_page_use_compression(rs)) {
2500 return false;
2501 }
2502
2503 /*
2504 * When starting the process of a new block, the first page of
2505 * the block should be sent out before other pages in the same
2506 * block, and all the pages in last block should have been sent
2507 * out, keeping this order is important, because the 'cont' flag
2508 * is used to avoid resending the block name.
2509 *
2510 * We post the fist page as normal page as compression will take
2511 * much CPU resource.
2512 */
2513 if (block != rs->last_sent_block) {
2514 flush_compressed_data(rs);
2515 return false;
2516 }
2517
2518 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2519 return true;
2520 }
2521
76e03000 2522 compression_counters.busy++;
5e5fdcff
XG
2523 return false;
2524}
2525
a82d593b 2526/**
3d0684b2 2527 * ram_save_target_page: save one target page
a82d593b 2528 *
3d0684b2 2529 * Returns the number of pages written
a82d593b 2530 *
6f37bb8b 2531 * @rs: current RAM state
3d0684b2 2532 * @pss: data about the page we want to send
a82d593b 2533 * @last_stage: if we are at the completion stage
a82d593b 2534 */
a0a8aa14 2535static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2536 bool last_stage)
a82d593b 2537{
a8ec91f9
XG
2538 RAMBlock *block = pss->block;
2539 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2540 int res;
2541
2542 if (control_save_page(rs, block, offset, &res)) {
2543 return res;
2544 }
2545
5e5fdcff
XG
2546 if (save_compress_page(rs, block, offset)) {
2547 return 1;
d7400a34
XG
2548 }
2549
2550 res = save_zero_page(rs, block, offset);
2551 if (res > 0) {
2552 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2553 * page would be stale
2554 */
2555 if (!save_page_use_compression(rs)) {
2556 XBZRLE_cache_lock();
2557 xbzrle_cache_zero_page(rs, block->offset + offset);
2558 XBZRLE_cache_unlock();
2559 }
2560 ram_release_pages(block->idstr, offset, res);
2561 return res;
2562 }
2563
da3f56cb 2564 /*
5e5fdcff
XG
2565 * do not use multifd for compression as the first page in the new
2566 * block should be posted out before sending the compressed page
da3f56cb 2567 */
5e5fdcff 2568 if (!save_page_use_compression(rs) && migrate_use_multifd()) {
b9ee2f7d 2569 return ram_save_multifd_page(rs, block, offset);
a82d593b
DDAG
2570 }
2571
1faa5665 2572 return ram_save_page(rs, pss, last_stage);
a82d593b
DDAG
2573}
2574
2575/**
3d0684b2 2576 * ram_save_host_page: save a whole host page
a82d593b 2577 *
3d0684b2
JQ
2578 * Starting at *offset send pages up to the end of the current host
2579 * page. It's valid for the initial offset to point into the middle of
2580 * a host page in which case the remainder of the hostpage is sent.
2581 * Only dirty target pages are sent. Note that the host page size may
2582 * be a huge page for this block.
1eb3fc0a
DDAG
2583 * The saving stops at the boundary of the used_length of the block
2584 * if the RAMBlock isn't a multiple of the host page size.
a82d593b 2585 *
3d0684b2
JQ
2586 * Returns the number of pages written or negative on error
2587 *
6f37bb8b 2588 * @rs: current RAM state
3d0684b2 2589 * @ms: current migration state
3d0684b2 2590 * @pss: data about the page we want to send
a82d593b 2591 * @last_stage: if we are at the completion stage
a82d593b 2592 */
a0a8aa14 2593static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
f20e2865 2594 bool last_stage)
a82d593b
DDAG
2595{
2596 int tmppages, pages = 0;
a935e30f
JQ
2597 size_t pagesize_bits =
2598 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
4c011c37 2599
fbd162e6 2600 if (ramblock_is_ignored(pss->block)) {
b895de50
CLG
2601 error_report("block %s should not be migrated !", pss->block->idstr);
2602 return 0;
2603 }
2604
a82d593b 2605 do {
1faa5665
XG
2606 /* Check the pages is dirty and if it is send it */
2607 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2608 pss->page++;
2609 continue;
2610 }
2611
f20e2865 2612 tmppages = ram_save_target_page(rs, pss, last_stage);
a82d593b
DDAG
2613 if (tmppages < 0) {
2614 return tmppages;
2615 }
2616
2617 pages += tmppages;
a935e30f 2618 pss->page++;
1eb3fc0a
DDAG
2619 } while ((pss->page & (pagesize_bits - 1)) &&
2620 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
a82d593b
DDAG
2621
2622 /* The offset we leave with is the last one we looked at */
a935e30f 2623 pss->page--;
a82d593b
DDAG
2624 return pages;
2625}
6c595cde 2626
56e93d26 2627/**
3d0684b2 2628 * ram_find_and_save_block: finds a dirty page and sends it to f
56e93d26
JQ
2629 *
2630 * Called within an RCU critical section.
2631 *
e8f3735f
XG
2632 * Returns the number of pages written where zero means no dirty pages,
2633 * or negative on error
56e93d26 2634 *
6f37bb8b 2635 * @rs: current RAM state
56e93d26 2636 * @last_stage: if we are at the completion stage
a82d593b
DDAG
2637 *
2638 * On systems where host-page-size > target-page-size it will send all the
2639 * pages in a host page that are dirty.
56e93d26
JQ
2640 */
2641
ce25d337 2642static int ram_find_and_save_block(RAMState *rs, bool last_stage)
56e93d26 2643{
b8fb8cb7 2644 PageSearchStatus pss;
56e93d26 2645 int pages = 0;
b9e60928 2646 bool again, found;
56e93d26 2647
0827b9e9
AA
2648 /* No dirty page as there is zero RAM */
2649 if (!ram_bytes_total()) {
2650 return pages;
2651 }
2652
6f37bb8b 2653 pss.block = rs->last_seen_block;
a935e30f 2654 pss.page = rs->last_page;
b8fb8cb7
DDAG
2655 pss.complete_round = false;
2656
2657 if (!pss.block) {
2658 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2659 }
56e93d26 2660
b9e60928 2661 do {
a82d593b 2662 again = true;
f20e2865 2663 found = get_queued_page(rs, &pss);
b9e60928 2664
a82d593b
DDAG
2665 if (!found) {
2666 /* priority queue empty, so just search for something dirty */
f20e2865 2667 found = find_dirty_block(rs, &pss, &again);
a82d593b 2668 }
f3f491fc 2669
a82d593b 2670 if (found) {
f20e2865 2671 pages = ram_save_host_page(rs, &pss, last_stage);
56e93d26 2672 }
b9e60928 2673 } while (!pages && again);
56e93d26 2674
6f37bb8b 2675 rs->last_seen_block = pss.block;
a935e30f 2676 rs->last_page = pss.page;
56e93d26
JQ
2677
2678 return pages;
2679}
2680
2681void acct_update_position(QEMUFile *f, size_t size, bool zero)
2682{
2683 uint64_t pages = size / TARGET_PAGE_SIZE;
f7ccd61b 2684
56e93d26 2685 if (zero) {
9360447d 2686 ram_counters.duplicate += pages;
56e93d26 2687 } else {
9360447d
JQ
2688 ram_counters.normal += pages;
2689 ram_counters.transferred += size;
56e93d26
JQ
2690 qemu_update_position(f, size);
2691 }
2692}
2693
fbd162e6 2694static uint64_t ram_bytes_total_common(bool count_ignored)
56e93d26
JQ
2695{
2696 RAMBlock *block;
2697 uint64_t total = 0;
2698
89ac5a1d
DDAG
2699 RCU_READ_LOCK_GUARD();
2700
fbd162e6
YK
2701 if (count_ignored) {
2702 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2703 total += block->used_length;
2704 }
2705 } else {
2706 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2707 total += block->used_length;
2708 }
99e15582 2709 }
56e93d26
JQ
2710 return total;
2711}
2712
fbd162e6
YK
2713uint64_t ram_bytes_total(void)
2714{
2715 return ram_bytes_total_common(false);
2716}
2717
f265e0e4 2718static void xbzrle_load_setup(void)
56e93d26 2719{
f265e0e4 2720 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
56e93d26
JQ
2721}
2722
f265e0e4
JQ
2723static void xbzrle_load_cleanup(void)
2724{
2725 g_free(XBZRLE.decoded_buf);
2726 XBZRLE.decoded_buf = NULL;
2727}
2728
7d7c96be
PX
2729static void ram_state_cleanup(RAMState **rsp)
2730{
b9ccaf6d
DDAG
2731 if (*rsp) {
2732 migration_page_queue_free(*rsp);
2733 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2734 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2735 g_free(*rsp);
2736 *rsp = NULL;
2737 }
7d7c96be
PX
2738}
2739
84593a08
PX
2740static void xbzrle_cleanup(void)
2741{
2742 XBZRLE_cache_lock();
2743 if (XBZRLE.cache) {
2744 cache_fini(XBZRLE.cache);
2745 g_free(XBZRLE.encoded_buf);
2746 g_free(XBZRLE.current_buf);
2747 g_free(XBZRLE.zero_target_page);
2748 XBZRLE.cache = NULL;
2749 XBZRLE.encoded_buf = NULL;
2750 XBZRLE.current_buf = NULL;
2751 XBZRLE.zero_target_page = NULL;
2752 }
2753 XBZRLE_cache_unlock();
2754}
2755
f265e0e4 2756static void ram_save_cleanup(void *opaque)
56e93d26 2757{
53518d94 2758 RAMState **rsp = opaque;
6b6712ef 2759 RAMBlock *block;
eb859c53 2760
2ff64038 2761 /* caller have hold iothread lock or is in a bh, so there is
4633456c 2762 * no writing race against the migration bitmap
2ff64038 2763 */
6b6712ef
JQ
2764 memory_global_dirty_log_stop();
2765
fbd162e6 2766 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
002cad6b
PX
2767 g_free(block->clear_bmap);
2768 block->clear_bmap = NULL;
6b6712ef
JQ
2769 g_free(block->bmap);
2770 block->bmap = NULL;
56e93d26
JQ
2771 }
2772
84593a08 2773 xbzrle_cleanup();
f0afa331 2774 compress_threads_save_cleanup();
7d7c96be 2775 ram_state_cleanup(rsp);
56e93d26
JQ
2776}
2777
6f37bb8b 2778static void ram_state_reset(RAMState *rs)
56e93d26 2779{
6f37bb8b
JQ
2780 rs->last_seen_block = NULL;
2781 rs->last_sent_block = NULL;
269ace29 2782 rs->last_page = 0;
6f37bb8b
JQ
2783 rs->last_version = ram_list.version;
2784 rs->ram_bulk_stage = true;
6eeb63f7 2785 rs->fpo_enabled = false;
56e93d26
JQ
2786}
2787
2788#define MAX_WAIT 50 /* ms, half buffered_file limit */
2789
4f2e4252
DDAG
2790/*
2791 * 'expected' is the value you expect the bitmap mostly to be full
2792 * of; it won't bother printing lines that are all this value.
2793 * If 'todump' is null the migration bitmap is dumped.
2794 */
6b6712ef
JQ
2795void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2796 unsigned long pages)
4f2e4252 2797{
4f2e4252
DDAG
2798 int64_t cur;
2799 int64_t linelen = 128;
2800 char linebuf[129];
2801
6b6712ef 2802 for (cur = 0; cur < pages; cur += linelen) {
4f2e4252
DDAG
2803 int64_t curb;
2804 bool found = false;
2805 /*
2806 * Last line; catch the case where the line length
2807 * is longer than remaining ram
2808 */
6b6712ef
JQ
2809 if (cur + linelen > pages) {
2810 linelen = pages - cur;
4f2e4252
DDAG
2811 }
2812 for (curb = 0; curb < linelen; curb++) {
2813 bool thisbit = test_bit(cur + curb, todump);
2814 linebuf[curb] = thisbit ? '1' : '.';
2815 found = found || (thisbit != expected);
2816 }
2817 if (found) {
2818 linebuf[curb] = '\0';
2819 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2820 }
2821 }
2822}
2823
e0b266f0
DDAG
2824/* **** functions for postcopy ***** */
2825
ced1c616
PB
2826void ram_postcopy_migrated_memory_release(MigrationState *ms)
2827{
2828 struct RAMBlock *block;
ced1c616 2829
fbd162e6 2830 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
2831 unsigned long *bitmap = block->bmap;
2832 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2833 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
ced1c616
PB
2834
2835 while (run_start < range) {
2836 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
aaa2064c 2837 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
ced1c616
PB
2838 (run_end - run_start) << TARGET_PAGE_BITS);
2839 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2840 }
2841 }
2842}
2843
3d0684b2
JQ
2844/**
2845 * postcopy_send_discard_bm_ram: discard a RAMBlock
2846 *
2847 * Returns zero on success
2848 *
e0b266f0 2849 * Callback from postcopy_each_ram_send_discard for each RAMBlock
3d0684b2
JQ
2850 *
2851 * @ms: current migration state
89dab31b 2852 * @block: RAMBlock to discard
e0b266f0 2853 */
810cf2bb 2854static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
e0b266f0 2855{
6b6712ef 2856 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
e0b266f0 2857 unsigned long current;
1e7cf8c3 2858 unsigned long *bitmap = block->bmap;
e0b266f0 2859
6b6712ef 2860 for (current = 0; current < end; ) {
1e7cf8c3 2861 unsigned long one = find_next_bit(bitmap, end, current);
33a5cb62 2862 unsigned long zero, discard_length;
e0b266f0 2863
33a5cb62
WY
2864 if (one >= end) {
2865 break;
2866 }
e0b266f0 2867
1e7cf8c3 2868 zero = find_next_zero_bit(bitmap, end, one + 1);
33a5cb62
WY
2869
2870 if (zero >= end) {
2871 discard_length = end - one;
e0b266f0 2872 } else {
33a5cb62
WY
2873 discard_length = zero - one;
2874 }
810cf2bb 2875 postcopy_discard_send_range(ms, one, discard_length);
33a5cb62 2876 current = one + discard_length;
e0b266f0
DDAG
2877 }
2878
2879 return 0;
2880}
2881
3d0684b2
JQ
2882/**
2883 * postcopy_each_ram_send_discard: discard all RAMBlocks
2884 *
2885 * Returns 0 for success or negative for error
2886 *
e0b266f0
DDAG
2887 * Utility for the outgoing postcopy code.
2888 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2889 * passing it bitmap indexes and name.
e0b266f0
DDAG
2890 * (qemu_ram_foreach_block ends up passing unscaled lengths
2891 * which would mean postcopy code would have to deal with target page)
3d0684b2
JQ
2892 *
2893 * @ms: current migration state
e0b266f0
DDAG
2894 */
2895static int postcopy_each_ram_send_discard(MigrationState *ms)
2896{
2897 struct RAMBlock *block;
2898 int ret;
2899
fbd162e6 2900 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
810cf2bb 2901 postcopy_discard_send_init(ms, block->idstr);
e0b266f0
DDAG
2902
2903 /*
2904 * Postcopy sends chunks of bitmap over the wire, but it
2905 * just needs indexes at this point, avoids it having
2906 * target page specific code.
2907 */
810cf2bb
WY
2908 ret = postcopy_send_discard_bm_ram(ms, block);
2909 postcopy_discard_send_finish(ms);
e0b266f0
DDAG
2910 if (ret) {
2911 return ret;
2912 }
2913 }
2914
2915 return 0;
2916}
2917
3d0684b2 2918/**
8324ef86 2919 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
3d0684b2
JQ
2920 *
2921 * Helper for postcopy_chunk_hostpages; it's called twice to
2922 * canonicalize the two bitmaps, that are similar, but one is
2923 * inverted.
99e314eb 2924 *
3d0684b2
JQ
2925 * Postcopy requires that all target pages in a hostpage are dirty or
2926 * clean, not a mix. This function canonicalizes the bitmaps.
99e314eb 2927 *
3d0684b2 2928 * @ms: current migration state
3d0684b2 2929 * @block: block that contains the page we want to canonicalize
99e314eb 2930 */
1e7cf8c3 2931static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
99e314eb 2932{
53518d94 2933 RAMState *rs = ram_state;
6b6712ef 2934 unsigned long *bitmap = block->bmap;
29c59172 2935 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
6b6712ef 2936 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
99e314eb
DDAG
2937 unsigned long run_start;
2938
29c59172
DDAG
2939 if (block->page_size == TARGET_PAGE_SIZE) {
2940 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2941 return;
2942 }
2943
1e7cf8c3
WY
2944 /* Find a dirty page */
2945 run_start = find_next_bit(bitmap, pages, 0);
99e314eb 2946
6b6712ef 2947 while (run_start < pages) {
99e314eb
DDAG
2948
2949 /*
2950 * If the start of this run of pages is in the middle of a host
2951 * page, then we need to fixup this host page.
2952 */
9dec3cc3 2953 if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
99e314eb 2954 /* Find the end of this run */
1e7cf8c3 2955 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
99e314eb
DDAG
2956 /*
2957 * If the end isn't at the start of a host page, then the
2958 * run doesn't finish at the end of a host page
2959 * and we need to discard.
2960 */
99e314eb
DDAG
2961 }
2962
9dec3cc3 2963 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
99e314eb 2964 unsigned long page;
dad45ab2
WY
2965 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2966 host_ratio);
2967 run_start = QEMU_ALIGN_UP(run_start, host_ratio);
99e314eb 2968
99e314eb
DDAG
2969 /* Clean up the bitmap */
2970 for (page = fixup_start_addr;
2971 page < fixup_start_addr + host_ratio; page++) {
99e314eb
DDAG
2972 /*
2973 * Remark them as dirty, updating the count for any pages
2974 * that weren't previously dirty.
2975 */
0d8ec885 2976 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
99e314eb
DDAG
2977 }
2978 }
2979
1e7cf8c3
WY
2980 /* Find the next dirty page for the next iteration */
2981 run_start = find_next_bit(bitmap, pages, run_start);
99e314eb
DDAG
2982 }
2983}
2984
3d0684b2 2985/**
89dab31b 2986 * postcopy_chunk_hostpages: discard any partially sent host page
3d0684b2 2987 *
99e314eb
DDAG
2988 * Utility for the outgoing postcopy code.
2989 *
2990 * Discard any partially sent host-page size chunks, mark any partially
29c59172
DDAG
2991 * dirty host-page size chunks as all dirty. In this case the host-page
2992 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
99e314eb 2993 *
3d0684b2
JQ
2994 * Returns zero on success
2995 *
2996 * @ms: current migration state
6b6712ef 2997 * @block: block we want to work with
99e314eb 2998 */
6b6712ef 2999static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
99e314eb 3000{
810cf2bb 3001 postcopy_discard_send_init(ms, block->idstr);
99e314eb 3002
6b6712ef 3003 /*
1e7cf8c3 3004 * Ensure that all partially dirty host pages are made fully dirty.
6b6712ef 3005 */
1e7cf8c3 3006 postcopy_chunk_hostpages_pass(ms, block);
99e314eb 3007
810cf2bb 3008 postcopy_discard_send_finish(ms);
99e314eb
DDAG
3009 return 0;
3010}
3011
3d0684b2
JQ
3012/**
3013 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3014 *
3015 * Returns zero on success
3016 *
e0b266f0
DDAG
3017 * Transmit the set of pages to be discarded after precopy to the target
3018 * these are pages that:
3019 * a) Have been previously transmitted but are now dirty again
3020 * b) Pages that have never been transmitted, this ensures that
3021 * any pages on the destination that have been mapped by background
3022 * tasks get discarded (transparent huge pages is the specific concern)
3023 * Hopefully this is pretty sparse
3d0684b2
JQ
3024 *
3025 * @ms: current migration state
e0b266f0
DDAG
3026 */
3027int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3028{
53518d94 3029 RAMState *rs = ram_state;
6b6712ef 3030 RAMBlock *block;
e0b266f0 3031 int ret;
e0b266f0 3032
89ac5a1d 3033 RCU_READ_LOCK_GUARD();
e0b266f0
DDAG
3034
3035 /* This should be our last sync, the src is now paused */
eb859c53 3036 migration_bitmap_sync(rs);
e0b266f0 3037
6b6712ef
JQ
3038 /* Easiest way to make sure we don't resume in the middle of a host-page */
3039 rs->last_seen_block = NULL;
3040 rs->last_sent_block = NULL;
3041 rs->last_page = 0;
e0b266f0 3042
fbd162e6 3043 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
6b6712ef
JQ
3044 /* Deal with TPS != HPS and huge pages */
3045 ret = postcopy_chunk_hostpages(ms, block);
3046 if (ret) {
6b6712ef
JQ
3047 return ret;
3048 }
e0b266f0 3049
e0b266f0 3050#ifdef DEBUG_POSTCOPY
1e7cf8c3
WY
3051 ram_debug_dump_bitmap(block->bmap, true,
3052 block->used_length >> TARGET_PAGE_BITS);
e0b266f0 3053#endif
6b6712ef
JQ
3054 }
3055 trace_ram_postcopy_send_discard_bitmap();
e0b266f0
DDAG
3056
3057 ret = postcopy_each_ram_send_discard(ms);
e0b266f0
DDAG
3058
3059 return ret;
3060}
3061
3d0684b2
JQ
3062/**
3063 * ram_discard_range: discard dirtied pages at the beginning of postcopy
e0b266f0 3064 *
3d0684b2 3065 * Returns zero on success
e0b266f0 3066 *
36449157
JQ
3067 * @rbname: name of the RAMBlock of the request. NULL means the
3068 * same that last one.
3d0684b2
JQ
3069 * @start: RAMBlock starting page
3070 * @length: RAMBlock size
e0b266f0 3071 */
aaa2064c 3072int ram_discard_range(const char *rbname, uint64_t start, size_t length)
e0b266f0
DDAG
3073{
3074 int ret = -1;
3075
36449157 3076 trace_ram_discard_range(rbname, start, length);
d3a5038c 3077
89ac5a1d 3078 RCU_READ_LOCK_GUARD();
36449157 3079 RAMBlock *rb = qemu_ram_block_by_name(rbname);
e0b266f0
DDAG
3080
3081 if (!rb) {
36449157 3082 error_report("ram_discard_range: Failed to find block '%s'", rbname);
e0b266f0
DDAG
3083 goto err;
3084 }
3085
814bb08f
PX
3086 /*
3087 * On source VM, we don't need to update the received bitmap since
3088 * we don't even have one.
3089 */
3090 if (rb->receivedmap) {
3091 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3092 length >> qemu_target_page_bits());
3093 }
3094
d3a5038c 3095 ret = ram_block_discard_range(rb, start, length);
e0b266f0
DDAG
3096
3097err:
e0b266f0
DDAG
3098 return ret;
3099}
3100
84593a08
PX
3101/*
3102 * For every allocation, we will try not to crash the VM if the
3103 * allocation failed.
3104 */
3105static int xbzrle_init(void)
3106{
3107 Error *local_err = NULL;
3108
3109 if (!migrate_use_xbzrle()) {
3110 return 0;
3111 }
3112
3113 XBZRLE_cache_lock();
3114
3115 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3116 if (!XBZRLE.zero_target_page) {
3117 error_report("%s: Error allocating zero page", __func__);
3118 goto err_out;
3119 }
3120
3121 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3122 TARGET_PAGE_SIZE, &local_err);
3123 if (!XBZRLE.cache) {
3124 error_report_err(local_err);
3125 goto free_zero_page;
3126 }
3127
3128 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3129 if (!XBZRLE.encoded_buf) {
3130 error_report("%s: Error allocating encoded_buf", __func__);
3131 goto free_cache;
3132 }
3133
3134 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3135 if (!XBZRLE.current_buf) {
3136 error_report("%s: Error allocating current_buf", __func__);
3137 goto free_encoded_buf;
3138 }
3139
3140 /* We are all good */
3141 XBZRLE_cache_unlock();
3142 return 0;
3143
3144free_encoded_buf:
3145 g_free(XBZRLE.encoded_buf);
3146 XBZRLE.encoded_buf = NULL;
3147free_cache:
3148 cache_fini(XBZRLE.cache);
3149 XBZRLE.cache = NULL;
3150free_zero_page:
3151 g_free(XBZRLE.zero_target_page);
3152 XBZRLE.zero_target_page = NULL;
3153err_out:
3154 XBZRLE_cache_unlock();
3155 return -ENOMEM;
3156}
3157
53518d94 3158static int ram_state_init(RAMState **rsp)
56e93d26 3159{
7d00ee6a
PX
3160 *rsp = g_try_new0(RAMState, 1);
3161
3162 if (!*rsp) {
3163 error_report("%s: Init ramstate fail", __func__);
3164 return -1;
3165 }
53518d94
JQ
3166
3167 qemu_mutex_init(&(*rsp)->bitmap_mutex);
3168 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3169 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
56e93d26 3170
7d00ee6a 3171 /*
40c4d4a8
IR
3172 * Count the total number of pages used by ram blocks not including any
3173 * gaps due to alignment or unplugs.
03158519 3174 * This must match with the initial values of dirty bitmap.
7d00ee6a 3175 */
40c4d4a8 3176 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
7d00ee6a
PX
3177 ram_state_reset(*rsp);
3178
3179 return 0;
3180}
3181
d6eff5d7 3182static void ram_list_init_bitmaps(void)
7d00ee6a 3183{
002cad6b 3184 MigrationState *ms = migrate_get_current();
d6eff5d7
PX
3185 RAMBlock *block;
3186 unsigned long pages;
002cad6b 3187 uint8_t shift;
56e93d26 3188
0827b9e9
AA
3189 /* Skip setting bitmap if there is no RAM */
3190 if (ram_bytes_total()) {
002cad6b
PX
3191 shift = ms->clear_bitmap_shift;
3192 if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3193 error_report("clear_bitmap_shift (%u) too big, using "
3194 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3195 shift = CLEAR_BITMAP_SHIFT_MAX;
3196 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3197 error_report("clear_bitmap_shift (%u) too small, using "
3198 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3199 shift = CLEAR_BITMAP_SHIFT_MIN;
3200 }
3201
fbd162e6 3202 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
d6eff5d7 3203 pages = block->max_length >> TARGET_PAGE_BITS;
03158519
WY
3204 /*
3205 * The initial dirty bitmap for migration must be set with all
3206 * ones to make sure we'll migrate every guest RAM page to
3207 * destination.
40c4d4a8
IR
3208 * Here we set RAMBlock.bmap all to 1 because when rebegin a
3209 * new migration after a failed migration, ram_list.
3210 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3211 * guest memory.
03158519 3212 */
6b6712ef 3213 block->bmap = bitmap_new(pages);
40c4d4a8 3214 bitmap_set(block->bmap, 0, pages);
002cad6b
PX
3215 block->clear_bmap_shift = shift;
3216 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
0827b9e9 3217 }
f3f491fc 3218 }
d6eff5d7
PX
3219}
3220
3221static void ram_init_bitmaps(RAMState *rs)
3222{
3223 /* For memory_global_dirty_log_start below. */
3224 qemu_mutex_lock_iothread();
3225 qemu_mutex_lock_ramlist();
f3f491fc 3226
89ac5a1d
DDAG
3227 WITH_RCU_READ_LOCK_GUARD() {
3228 ram_list_init_bitmaps();
3229 memory_global_dirty_log_start();
3230 migration_bitmap_sync_precopy(rs);
3231 }
56e93d26 3232 qemu_mutex_unlock_ramlist();
49877834 3233 qemu_mutex_unlock_iothread();
d6eff5d7
PX
3234}
3235
3236static int ram_init_all(RAMState **rsp)
3237{
3238 if (ram_state_init(rsp)) {
3239 return -1;
3240 }
3241
3242 if (xbzrle_init()) {
3243 ram_state_cleanup(rsp);
3244 return -1;
3245 }
3246
3247 ram_init_bitmaps(*rsp);
a91246c9
HZ
3248
3249 return 0;
3250}
3251
08614f34
PX
3252static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3253{
3254 RAMBlock *block;
3255 uint64_t pages = 0;
3256
3257 /*
3258 * Postcopy is not using xbzrle/compression, so no need for that.
3259 * Also, since source are already halted, we don't need to care
3260 * about dirty page logging as well.
3261 */
3262
fbd162e6 3263 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
08614f34
PX
3264 pages += bitmap_count_one(block->bmap,
3265 block->used_length >> TARGET_PAGE_BITS);
3266 }
3267
3268 /* This may not be aligned with current bitmaps. Recalculate. */
3269 rs->migration_dirty_pages = pages;
3270
3271 rs->last_seen_block = NULL;
3272 rs->last_sent_block = NULL;
3273 rs->last_page = 0;
3274 rs->last_version = ram_list.version;
3275 /*
3276 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3277 * matter what we have sent.
3278 */
3279 rs->ram_bulk_stage = false;
3280
3281 /* Update RAMState cache of output QEMUFile */
3282 rs->f = out;
3283
3284 trace_ram_state_resume_prepare(pages);
3285}
3286
6bcb05fc
WW
3287/*
3288 * This function clears bits of the free pages reported by the caller from the
3289 * migration dirty bitmap. @addr is the host address corresponding to the
3290 * start of the continuous guest free pages, and @len is the total bytes of
3291 * those pages.
3292 */
3293void qemu_guest_free_page_hint(void *addr, size_t len)
3294{
3295 RAMBlock *block;
3296 ram_addr_t offset;
3297 size_t used_len, start, npages;
3298 MigrationState *s = migrate_get_current();
3299
3300 /* This function is currently expected to be used during live migration */
3301 if (!migration_is_setup_or_active(s->state)) {
3302 return;
3303 }
3304
3305 for (; len > 0; len -= used_len, addr += used_len) {
3306 block = qemu_ram_block_from_host(addr, false, &offset);
3307 if (unlikely(!block || offset >= block->used_length)) {
3308 /*
3309 * The implementation might not support RAMBlock resize during
3310 * live migration, but it could happen in theory with future
3311 * updates. So we add a check here to capture that case.
3312 */
3313 error_report_once("%s unexpected error", __func__);
3314 return;
3315 }
3316
3317 if (len <= block->used_length - offset) {
3318 used_len = len;
3319 } else {
3320 used_len = block->used_length - offset;
3321 }
3322
3323 start = offset >> TARGET_PAGE_BITS;
3324 npages = used_len >> TARGET_PAGE_BITS;
3325
3326 qemu_mutex_lock(&ram_state->bitmap_mutex);
3327 ram_state->migration_dirty_pages -=
3328 bitmap_count_one_with_offset(block->bmap, start, npages);
3329 bitmap_clear(block->bmap, start, npages);
3330 qemu_mutex_unlock(&ram_state->bitmap_mutex);
3331 }
3332}
3333
3d0684b2
JQ
3334/*
3335 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
a91246c9
HZ
3336 * long-running RCU critical section. When rcu-reclaims in the code
3337 * start to become numerous it will be necessary to reduce the
3338 * granularity of these critical sections.
3339 */
3340
3d0684b2
JQ
3341/**
3342 * ram_save_setup: Setup RAM for migration
3343 *
3344 * Returns zero to indicate success and negative for error
3345 *
3346 * @f: QEMUFile where to send the data
3347 * @opaque: RAMState pointer
3348 */
a91246c9
HZ
3349static int ram_save_setup(QEMUFile *f, void *opaque)
3350{
53518d94 3351 RAMState **rsp = opaque;
a91246c9
HZ
3352 RAMBlock *block;
3353
dcaf446e
XG
3354 if (compress_threads_save_setup()) {
3355 return -1;
3356 }
3357
a91246c9
HZ
3358 /* migration has already setup the bitmap, reuse it. */
3359 if (!migration_in_colo_state()) {
7d00ee6a 3360 if (ram_init_all(rsp) != 0) {
dcaf446e 3361 compress_threads_save_cleanup();
a91246c9 3362 return -1;
53518d94 3363 }
a91246c9 3364 }
53518d94 3365 (*rsp)->f = f;
a91246c9 3366
0e6ebd48
DDAG
3367 WITH_RCU_READ_LOCK_GUARD() {
3368 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
56e93d26 3369
0e6ebd48
DDAG
3370 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3371 qemu_put_byte(f, strlen(block->idstr));
3372 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3373 qemu_put_be64(f, block->used_length);
3374 if (migrate_postcopy_ram() && block->page_size !=
3375 qemu_host_page_size) {
3376 qemu_put_be64(f, block->page_size);
3377 }
3378 if (migrate_ignore_shared()) {
3379 qemu_put_be64(f, block->mr->addr);
3380 }
fbd162e6 3381 }
56e93d26
JQ
3382 }
3383
56e93d26
JQ
3384 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3385 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3386
1b81c974 3387 multifd_send_sync_main(*rsp);
56e93d26 3388 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3389 qemu_fflush(f);
56e93d26
JQ
3390
3391 return 0;
3392}
3393
3d0684b2
JQ
3394/**
3395 * ram_save_iterate: iterative stage for migration
3396 *
3397 * Returns zero to indicate success and negative for error
3398 *
3399 * @f: QEMUFile where to send the data
3400 * @opaque: RAMState pointer
3401 */
56e93d26
JQ
3402static int ram_save_iterate(QEMUFile *f, void *opaque)
3403{
53518d94
JQ
3404 RAMState **temp = opaque;
3405 RAMState *rs = *temp;
56e93d26
JQ
3406 int ret;
3407 int i;
3408 int64_t t0;
5c90308f 3409 int done = 0;
56e93d26 3410
b2557345
PL
3411 if (blk_mig_bulk_active()) {
3412 /* Avoid transferring ram during bulk phase of block migration as
3413 * the bulk phase will usually take a long time and transferring
3414 * ram updates during that time is pointless. */
3415 goto out;
3416 }
3417
89ac5a1d
DDAG
3418 WITH_RCU_READ_LOCK_GUARD() {
3419 if (ram_list.version != rs->last_version) {
3420 ram_state_reset(rs);
3421 }
56e93d26 3422
89ac5a1d
DDAG
3423 /* Read version before ram_list.blocks */
3424 smp_rmb();
56e93d26 3425
89ac5a1d 3426 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
56e93d26 3427
89ac5a1d
DDAG
3428 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3429 i = 0;
3430 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3431 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3432 int pages;
e03a34f8 3433
89ac5a1d
DDAG
3434 if (qemu_file_get_error(f)) {
3435 break;
3436 }
e8f3735f 3437
89ac5a1d
DDAG
3438 pages = ram_find_and_save_block(rs, false);
3439 /* no more pages to sent */
3440 if (pages == 0) {
3441 done = 1;
3442 break;
3443 }
e8f3735f 3444
89ac5a1d
DDAG
3445 if (pages < 0) {
3446 qemu_file_set_error(f, pages);
56e93d26
JQ
3447 break;
3448 }
89ac5a1d
DDAG
3449
3450 rs->target_page_count += pages;
3451
3452 /*
3453 * we want to check in the 1st loop, just in case it was the 1st
3454 * time and we had to sync the dirty bitmap.
3455 * qemu_clock_get_ns() is a bit expensive, so we only check each
3456 * some iterations
3457 */
3458 if ((i & 63) == 0) {
3459 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3460 1000000;
3461 if (t1 > MAX_WAIT) {
3462 trace_ram_save_iterate_big_wait(t1, i);
3463 break;
3464 }
3465 }
3466 i++;
56e93d26 3467 }
56e93d26 3468 }
56e93d26
JQ
3469
3470 /*
3471 * Must occur before EOS (or any QEMUFile operation)
3472 * because of RDMA protocol.
3473 */
3474 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3475
b2557345 3476out:
1b81c974 3477 multifd_send_sync_main(rs);
56e93d26 3478 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3479 qemu_fflush(f);
9360447d 3480 ram_counters.transferred += 8;
56e93d26
JQ
3481
3482 ret = qemu_file_get_error(f);
3483 if (ret < 0) {
3484 return ret;
3485 }
3486
5c90308f 3487 return done;
56e93d26
JQ
3488}
3489
3d0684b2
JQ
3490/**
3491 * ram_save_complete: function called to send the remaining amount of ram
3492 *
e8f3735f 3493 * Returns zero to indicate success or negative on error
3d0684b2
JQ
3494 *
3495 * Called with iothread lock
3496 *
3497 * @f: QEMUFile where to send the data
3498 * @opaque: RAMState pointer
3499 */
56e93d26
JQ
3500static int ram_save_complete(QEMUFile *f, void *opaque)
3501{
53518d94
JQ
3502 RAMState **temp = opaque;
3503 RAMState *rs = *temp;
e8f3735f 3504 int ret = 0;
6f37bb8b 3505
89ac5a1d
DDAG
3506 WITH_RCU_READ_LOCK_GUARD() {
3507 if (!migration_in_postcopy()) {
3508 migration_bitmap_sync_precopy(rs);
3509 }
56e93d26 3510
89ac5a1d 3511 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
56e93d26 3512
89ac5a1d 3513 /* try transferring iterative blocks of memory */
56e93d26 3514
89ac5a1d
DDAG
3515 /* flush all remaining blocks regardless of rate limiting */
3516 while (true) {
3517 int pages;
56e93d26 3518
89ac5a1d
DDAG
3519 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3520 /* no more blocks to sent */
3521 if (pages == 0) {
3522 break;
3523 }
3524 if (pages < 0) {
3525 ret = pages;
3526 break;
3527 }
e8f3735f 3528 }
56e93d26 3529
89ac5a1d
DDAG
3530 flush_compressed_data(rs);
3531 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
3532 }
d09a6fde 3533
1b81c974 3534 multifd_send_sync_main(rs);
56e93d26 3535 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
35374cbd 3536 qemu_fflush(f);
56e93d26 3537
e8f3735f 3538 return ret;
56e93d26
JQ
3539}
3540
c31b098f 3541static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
47995026
VSO
3542 uint64_t *res_precopy_only,
3543 uint64_t *res_compatible,
3544 uint64_t *res_postcopy_only)
56e93d26 3545{
53518d94
JQ
3546 RAMState **temp = opaque;
3547 RAMState *rs = *temp;
56e93d26
JQ
3548 uint64_t remaining_size;
3549
9edabd4d 3550 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3551
5727309d 3552 if (!migration_in_postcopy() &&
663e6c1d 3553 remaining_size < max_size) {
56e93d26 3554 qemu_mutex_lock_iothread();
89ac5a1d
DDAG
3555 WITH_RCU_READ_LOCK_GUARD() {
3556 migration_bitmap_sync_precopy(rs);
3557 }
56e93d26 3558 qemu_mutex_unlock_iothread();
9edabd4d 3559 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
56e93d26 3560 }
c31b098f 3561
86e1167e
VSO
3562 if (migrate_postcopy_ram()) {
3563 /* We can do postcopy, and all the data is postcopiable */
47995026 3564 *res_compatible += remaining_size;
86e1167e 3565 } else {
47995026 3566 *res_precopy_only += remaining_size;
86e1167e 3567 }
56e93d26
JQ
3568}
3569
3570static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3571{
3572 unsigned int xh_len;
3573 int xh_flags;
063e760a 3574 uint8_t *loaded_data;
56e93d26 3575
56e93d26
JQ
3576 /* extract RLE header */
3577 xh_flags = qemu_get_byte(f);
3578 xh_len = qemu_get_be16(f);
3579
3580 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3581 error_report("Failed to load XBZRLE page - wrong compression!");
3582 return -1;
3583 }
3584
3585 if (xh_len > TARGET_PAGE_SIZE) {
3586 error_report("Failed to load XBZRLE page - len overflow!");
3587 return -1;
3588 }
f265e0e4 3589 loaded_data = XBZRLE.decoded_buf;
56e93d26 3590 /* load data and decode */
f265e0e4 3591 /* it can change loaded_data to point to an internal buffer */
063e760a 3592 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
56e93d26
JQ
3593
3594 /* decode RLE */
063e760a 3595 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
56e93d26
JQ
3596 TARGET_PAGE_SIZE) == -1) {
3597 error_report("Failed to load XBZRLE page - decode error!");
3598 return -1;
3599 }
3600
3601 return 0;
3602}
3603
3d0684b2
JQ
3604/**
3605 * ram_block_from_stream: read a RAMBlock id from the migration stream
3606 *
3607 * Must be called from within a rcu critical section.
3608 *
56e93d26 3609 * Returns a pointer from within the RCU-protected ram_list.
a7180877 3610 *
3d0684b2
JQ
3611 * @f: QEMUFile where to read the data from
3612 * @flags: Page flags (mostly to see if it's a continuation of previous block)
a7180877 3613 */
3d0684b2 3614static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
56e93d26
JQ
3615{
3616 static RAMBlock *block = NULL;
3617 char id[256];
3618 uint8_t len;
3619
3620 if (flags & RAM_SAVE_FLAG_CONTINUE) {
4c4bad48 3621 if (!block) {
56e93d26
JQ
3622 error_report("Ack, bad migration stream!");
3623 return NULL;
3624 }
4c4bad48 3625 return block;
56e93d26
JQ
3626 }
3627
3628 len = qemu_get_byte(f);
3629 qemu_get_buffer(f, (uint8_t *)id, len);
3630 id[len] = 0;
3631
e3dd7493 3632 block = qemu_ram_block_by_name(id);
4c4bad48
HZ
3633 if (!block) {
3634 error_report("Can't find block %s", id);
3635 return NULL;
56e93d26
JQ
3636 }
3637
fbd162e6 3638 if (ramblock_is_ignored(block)) {
b895de50
CLG
3639 error_report("block %s should not be migrated !", id);
3640 return NULL;
3641 }
3642
4c4bad48
HZ
3643 return block;
3644}
3645
3646static inline void *host_from_ram_block_offset(RAMBlock *block,
3647 ram_addr_t offset)
3648{
3649 if (!offset_in_ramblock(block, offset)) {
3650 return NULL;
3651 }
3652
3653 return block->host + offset;
56e93d26
JQ
3654}
3655
13af18f2
ZC
3656static inline void *colo_cache_from_block_offset(RAMBlock *block,
3657 ram_addr_t offset)
3658{
3659 if (!offset_in_ramblock(block, offset)) {
3660 return NULL;
3661 }
3662 if (!block->colo_cache) {
3663 error_report("%s: colo_cache is NULL in block :%s",
3664 __func__, block->idstr);
3665 return NULL;
3666 }
7d9acafa
ZC
3667
3668 /*
3669 * During colo checkpoint, we need bitmap of these migrated pages.
3670 * It help us to decide which pages in ram cache should be flushed
3671 * into VM's RAM later.
3672 */
3673 if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3674 ram_state->migration_dirty_pages++;
3675 }
13af18f2
ZC
3676 return block->colo_cache + offset;
3677}
3678
3d0684b2
JQ
3679/**
3680 * ram_handle_compressed: handle the zero page case
3681 *
56e93d26
JQ
3682 * If a page (or a whole RDMA chunk) has been
3683 * determined to be zero, then zap it.
3d0684b2
JQ
3684 *
3685 * @host: host address for the zero page
3686 * @ch: what the page is filled from. We only support zero
3687 * @size: size of the zero page
56e93d26
JQ
3688 */
3689void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3690{
3691 if (ch != 0 || !is_zero_range(host, size)) {
3692 memset(host, ch, size);
3693 }
3694}
3695
797ca154
XG
3696/* return the size after decompression, or negative value on error */
3697static int
3698qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3699 const uint8_t *source, size_t source_len)
3700{
3701 int err;
3702
3703 err = inflateReset(stream);
3704 if (err != Z_OK) {
3705 return -1;
3706 }
3707
3708 stream->avail_in = source_len;
3709 stream->next_in = (uint8_t *)source;
3710 stream->avail_out = dest_len;
3711 stream->next_out = dest;
3712
3713 err = inflate(stream, Z_NO_FLUSH);
3714 if (err != Z_STREAM_END) {
3715 return -1;
3716 }
3717
3718 return stream->total_out;
3719}
3720
56e93d26
JQ
3721static void *do_data_decompress(void *opaque)
3722{
3723 DecompressParam *param = opaque;
3724 unsigned long pagesize;
33d151f4 3725 uint8_t *des;
34ab9e97 3726 int len, ret;
56e93d26 3727
33d151f4 3728 qemu_mutex_lock(&param->mutex);
90e56fb4 3729 while (!param->quit) {
33d151f4
LL
3730 if (param->des) {
3731 des = param->des;
3732 len = param->len;
3733 param->des = 0;
3734 qemu_mutex_unlock(&param->mutex);
3735
56e93d26 3736 pagesize = TARGET_PAGE_SIZE;
34ab9e97
XG
3737
3738 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3739 param->compbuf, len);
f548222c 3740 if (ret < 0 && migrate_get_current()->decompress_error_check) {
34ab9e97
XG
3741 error_report("decompress data failed");
3742 qemu_file_set_error(decomp_file, ret);
3743 }
73a8912b 3744
33d151f4
LL
3745 qemu_mutex_lock(&decomp_done_lock);
3746 param->done = true;
3747 qemu_cond_signal(&decomp_done_cond);
3748 qemu_mutex_unlock(&decomp_done_lock);
3749
3750 qemu_mutex_lock(&param->mutex);
3751 } else {
3752 qemu_cond_wait(&param->cond, &param->mutex);
3753 }
56e93d26 3754 }
33d151f4 3755 qemu_mutex_unlock(&param->mutex);
56e93d26
JQ
3756
3757 return NULL;
3758}
3759
34ab9e97 3760static int wait_for_decompress_done(void)
5533b2e9
LL
3761{
3762 int idx, thread_count;
3763
3764 if (!migrate_use_compression()) {
34ab9e97 3765 return 0;
5533b2e9
LL
3766 }
3767
3768 thread_count = migrate_decompress_threads();
3769 qemu_mutex_lock(&decomp_done_lock);
3770 for (idx = 0; idx < thread_count; idx++) {
3771 while (!decomp_param[idx].done) {
3772 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3773 }
3774 }
3775 qemu_mutex_unlock(&decomp_done_lock);
34ab9e97 3776 return qemu_file_get_error(decomp_file);
5533b2e9
LL
3777}
3778
f0afa331 3779static void compress_threads_load_cleanup(void)
56e93d26
JQ
3780{
3781 int i, thread_count;
3782
3416ab5b
JQ
3783 if (!migrate_use_compression()) {
3784 return;
3785 }
56e93d26
JQ
3786 thread_count = migrate_decompress_threads();
3787 for (i = 0; i < thread_count; i++) {
797ca154
XG
3788 /*
3789 * we use it as a indicator which shows if the thread is
3790 * properly init'd or not
3791 */
3792 if (!decomp_param[i].compbuf) {
3793 break;
3794 }
3795
56e93d26 3796 qemu_mutex_lock(&decomp_param[i].mutex);
90e56fb4 3797 decomp_param[i].quit = true;
56e93d26
JQ
3798 qemu_cond_signal(&decomp_param[i].cond);
3799 qemu_mutex_unlock(&decomp_param[i].mutex);
3800 }
3801 for (i = 0; i < thread_count; i++) {
797ca154
XG
3802 if (!decomp_param[i].compbuf) {
3803 break;
3804 }
3805
56e93d26
JQ
3806 qemu_thread_join(decompress_threads + i);
3807 qemu_mutex_destroy(&decomp_param[i].mutex);
3808 qemu_cond_destroy(&decomp_param[i].cond);
797ca154 3809 inflateEnd(&decomp_param[i].stream);
56e93d26 3810 g_free(decomp_param[i].compbuf);
797ca154 3811 decomp_param[i].compbuf = NULL;
56e93d26
JQ
3812 }
3813 g_free(decompress_threads);
3814 g_free(decomp_param);
56e93d26
JQ
3815 decompress_threads = NULL;
3816 decomp_param = NULL;
34ab9e97 3817 decomp_file = NULL;
56e93d26
JQ
3818}
3819
34ab9e97 3820static int compress_threads_load_setup(QEMUFile *f)
797ca154
XG
3821{
3822 int i, thread_count;
3823
3824 if (!migrate_use_compression()) {
3825 return 0;
3826 }
3827
3828 thread_count = migrate_decompress_threads();
3829 decompress_threads = g_new0(QemuThread, thread_count);
3830 decomp_param = g_new0(DecompressParam, thread_count);
3831 qemu_mutex_init(&decomp_done_lock);
3832 qemu_cond_init(&decomp_done_cond);
34ab9e97 3833 decomp_file = f;
797ca154
XG
3834 for (i = 0; i < thread_count; i++) {
3835 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3836 goto exit;
3837 }
3838
3839 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3840 qemu_mutex_init(&decomp_param[i].mutex);
3841 qemu_cond_init(&decomp_param[i].cond);
3842 decomp_param[i].done = true;
3843 decomp_param[i].quit = false;
3844 qemu_thread_create(decompress_threads + i, "decompress",
3845 do_data_decompress, decomp_param + i,
3846 QEMU_THREAD_JOINABLE);
3847 }
3848 return 0;
3849exit:
3850 compress_threads_load_cleanup();
3851 return -1;
3852}
3853
c1bc6626 3854static void decompress_data_with_multi_threads(QEMUFile *f,
56e93d26
JQ
3855 void *host, int len)
3856{
3857 int idx, thread_count;
3858
3859 thread_count = migrate_decompress_threads();
73a8912b 3860 qemu_mutex_lock(&decomp_done_lock);
56e93d26
JQ
3861 while (true) {
3862 for (idx = 0; idx < thread_count; idx++) {
73a8912b 3863 if (decomp_param[idx].done) {
33d151f4
LL
3864 decomp_param[idx].done = false;
3865 qemu_mutex_lock(&decomp_param[idx].mutex);
c1bc6626 3866 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
56e93d26
JQ
3867 decomp_param[idx].des = host;
3868 decomp_param[idx].len = len;
33d151f4
LL
3869 qemu_cond_signal(&decomp_param[idx].cond);
3870 qemu_mutex_unlock(&decomp_param[idx].mutex);
56e93d26
JQ
3871 break;
3872 }
3873 }
3874 if (idx < thread_count) {
3875 break;
73a8912b
LL
3876 } else {
3877 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
56e93d26
JQ
3878 }
3879 }
73a8912b 3880 qemu_mutex_unlock(&decomp_done_lock);
56e93d26
JQ
3881}
3882
13af18f2
ZC
3883/*
3884 * colo cache: this is for secondary VM, we cache the whole
3885 * memory of the secondary VM, it is need to hold the global lock
3886 * to call this helper.
3887 */
3888int colo_init_ram_cache(void)
3889{
3890 RAMBlock *block;
3891
3892 rcu_read_lock();
fbd162e6 3893 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
13af18f2
ZC
3894 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3895 NULL,
3896 false);
3897 if (!block->colo_cache) {
3898 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3899 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3900 block->used_length);
89ac5a1d
DDAG
3901 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3902 if (block->colo_cache) {
3903 qemu_anon_ram_free(block->colo_cache, block->used_length);
3904 block->colo_cache = NULL;
3905 }
3906 }
3907 return -errno;
13af18f2
ZC
3908 }
3909 memcpy(block->colo_cache, block->host, block->used_length);
3910 }
3911 rcu_read_unlock();
7d9acafa
ZC
3912 /*
3913 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3914 * with to decide which page in cache should be flushed into SVM's RAM. Here
3915 * we use the same name 'ram_bitmap' as for migration.
3916 */
3917 if (ram_bytes_total()) {
3918 RAMBlock *block;
3919
fbd162e6 3920 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3921 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3922
3923 block->bmap = bitmap_new(pages);
3924 bitmap_set(block->bmap, 0, pages);
3925 }
3926 }
3927 ram_state = g_new0(RAMState, 1);
3928 ram_state->migration_dirty_pages = 0;
c6e5bafb 3929 qemu_mutex_init(&ram_state->bitmap_mutex);
d1955d22 3930 memory_global_dirty_log_start();
7d9acafa 3931
13af18f2 3932 return 0;
13af18f2
ZC
3933}
3934
3935/* It is need to hold the global lock to call this helper */
3936void colo_release_ram_cache(void)
3937{
3938 RAMBlock *block;
3939
d1955d22 3940 memory_global_dirty_log_stop();
fbd162e6 3941 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
7d9acafa
ZC
3942 g_free(block->bmap);
3943 block->bmap = NULL;
3944 }
3945
89ac5a1d
DDAG
3946 WITH_RCU_READ_LOCK_GUARD() {
3947 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3948 if (block->colo_cache) {
3949 qemu_anon_ram_free(block->colo_cache, block->used_length);
3950 block->colo_cache = NULL;
3951 }
13af18f2
ZC
3952 }
3953 }
c6e5bafb 3954 qemu_mutex_destroy(&ram_state->bitmap_mutex);
7d9acafa
ZC
3955 g_free(ram_state);
3956 ram_state = NULL;
13af18f2
ZC
3957}
3958
f265e0e4
JQ
3959/**
3960 * ram_load_setup: Setup RAM for migration incoming side
3961 *
3962 * Returns zero to indicate success and negative for error
3963 *
3964 * @f: QEMUFile where to receive the data
3965 * @opaque: RAMState pointer
3966 */
3967static int ram_load_setup(QEMUFile *f, void *opaque)
3968{
34ab9e97 3969 if (compress_threads_load_setup(f)) {
797ca154
XG
3970 return -1;
3971 }
3972
f265e0e4 3973 xbzrle_load_setup();
f9494614 3974 ramblock_recv_map_init();
13af18f2 3975
f265e0e4
JQ
3976 return 0;
3977}
3978
3979static int ram_load_cleanup(void *opaque)
3980{
f9494614 3981 RAMBlock *rb;
56eb90af 3982
fbd162e6 3983 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
56eb90af
JH
3984 if (ramblock_is_pmem(rb)) {
3985 pmem_persist(rb->host, rb->used_length);
3986 }
3987 }
3988
f265e0e4 3989 xbzrle_load_cleanup();
f0afa331 3990 compress_threads_load_cleanup();
f9494614 3991
fbd162e6 3992 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
f9494614
AP
3993 g_free(rb->receivedmap);
3994 rb->receivedmap = NULL;
3995 }
13af18f2 3996
f265e0e4
JQ
3997 return 0;
3998}
3999
3d0684b2
JQ
4000/**
4001 * ram_postcopy_incoming_init: allocate postcopy data structures
4002 *
4003 * Returns 0 for success and negative if there was one error
4004 *
4005 * @mis: current migration incoming state
4006 *
4007 * Allocate data structures etc needed by incoming migration with
4008 * postcopy-ram. postcopy-ram's similarly names
4009 * postcopy_ram_incoming_init does the work.
1caddf8a
DDAG
4010 */
4011int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4012{
c136180c 4013 return postcopy_ram_incoming_init(mis);
1caddf8a
DDAG
4014}
4015
3d0684b2
JQ
4016/**
4017 * ram_load_postcopy: load a page in postcopy case
4018 *
4019 * Returns 0 for success or -errno in case of error
4020 *
a7180877
DDAG
4021 * Called in postcopy mode by ram_load().
4022 * rcu_read_lock is taken prior to this being called.
3d0684b2
JQ
4023 *
4024 * @f: QEMUFile where to send the data
a7180877
DDAG
4025 */
4026static int ram_load_postcopy(QEMUFile *f)
4027{
4028 int flags = 0, ret = 0;
4029 bool place_needed = false;
1aa83678 4030 bool matches_target_page_size = false;
a7180877
DDAG
4031 MigrationIncomingState *mis = migration_incoming_get_current();
4032 /* Temporary page that is later 'placed' */
3414322a 4033 void *postcopy_host_page = mis->postcopy_tmp_page;
c53b7ddc 4034 void *last_host = NULL;
a3b6ff6d 4035 bool all_zero = false;
a7180877
DDAG
4036
4037 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4038 ram_addr_t addr;
4039 void *host = NULL;
4040 void *page_buffer = NULL;
4041 void *place_source = NULL;
df9ff5e1 4042 RAMBlock *block = NULL;
a7180877 4043 uint8_t ch;
a7180877
DDAG
4044
4045 addr = qemu_get_be64(f);
7a9ddfbf
PX
4046
4047 /*
4048 * If qemu file error, we should stop here, and then "addr"
4049 * may be invalid
4050 */
4051 ret = qemu_file_get_error(f);
4052 if (ret) {
4053 break;
4054 }
4055
a7180877
DDAG
4056 flags = addr & ~TARGET_PAGE_MASK;
4057 addr &= TARGET_PAGE_MASK;
4058
4059 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4060 place_needed = false;
bb890ed5 4061 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
df9ff5e1 4062 block = ram_block_from_stream(f, flags);
4c4bad48
HZ
4063
4064 host = host_from_ram_block_offset(block, addr);
a7180877
DDAG
4065 if (!host) {
4066 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4067 ret = -EINVAL;
4068 break;
4069 }
1aa83678 4070 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
a7180877 4071 /*
28abd200
DDAG
4072 * Postcopy requires that we place whole host pages atomically;
4073 * these may be huge pages for RAMBlocks that are backed by
4074 * hugetlbfs.
a7180877
DDAG
4075 * To make it atomic, the data is read into a temporary page
4076 * that's moved into place later.
4077 * The migration protocol uses, possibly smaller, target-pages
4078 * however the source ensures it always sends all the components
4079 * of a host page in order.
4080 */
4081 page_buffer = postcopy_host_page +
28abd200 4082 ((uintptr_t)host & (block->page_size - 1));
a7180877 4083 /* If all TP are zero then we can optimise the place */
28abd200 4084 if (!((uintptr_t)host & (block->page_size - 1))) {
a7180877 4085 all_zero = true;
c53b7ddc
DDAG
4086 } else {
4087 /* not the 1st TP within the HP */
4088 if (host != (last_host + TARGET_PAGE_SIZE)) {
9af9e0fe 4089 error_report("Non-sequential target page %p/%p",
c53b7ddc
DDAG
4090 host, last_host);
4091 ret = -EINVAL;
4092 break;
4093 }
a7180877
DDAG
4094 }
4095
c53b7ddc 4096
a7180877
DDAG
4097 /*
4098 * If it's the last part of a host page then we place the host
4099 * page
4100 */
4101 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
28abd200 4102 (block->page_size - 1)) == 0;
a7180877
DDAG
4103 place_source = postcopy_host_page;
4104 }
c53b7ddc 4105 last_host = host;
a7180877
DDAG
4106
4107 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
bb890ed5 4108 case RAM_SAVE_FLAG_ZERO:
a7180877
DDAG
4109 ch = qemu_get_byte(f);
4110 memset(page_buffer, ch, TARGET_PAGE_SIZE);
4111 if (ch) {
4112 all_zero = false;
4113 }
4114 break;
4115
4116 case RAM_SAVE_FLAG_PAGE:
4117 all_zero = false;
1aa83678
PX
4118 if (!matches_target_page_size) {
4119 /* For huge pages, we always use temporary buffer */
a7180877
DDAG
4120 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4121 } else {
1aa83678
PX
4122 /*
4123 * For small pages that matches target page size, we
4124 * avoid the qemu_file copy. Instead we directly use
4125 * the buffer of QEMUFile to place the page. Note: we
4126 * cannot do any QEMUFile operation before using that
4127 * buffer to make sure the buffer is valid when
4128 * placing the page.
a7180877
DDAG
4129 */
4130 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4131 TARGET_PAGE_SIZE);
4132 }
4133 break;
4134 case RAM_SAVE_FLAG_EOS:
4135 /* normal exit */
6df264ac 4136 multifd_recv_sync_main();
a7180877
DDAG
4137 break;
4138 default:
4139 error_report("Unknown combination of migration flags: %#x"
4140 " (postcopy mode)", flags);
4141 ret = -EINVAL;
7a9ddfbf
PX
4142 break;
4143 }
4144
4145 /* Detect for any possible file errors */
4146 if (!ret && qemu_file_get_error(f)) {
4147 ret = qemu_file_get_error(f);
a7180877
DDAG
4148 }
4149
7a9ddfbf 4150 if (!ret && place_needed) {
a7180877 4151 /* This gets called at the last target page in the host page */
df9ff5e1
DDAG
4152 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
4153
a7180877 4154 if (all_zero) {
df9ff5e1 4155 ret = postcopy_place_page_zero(mis, place_dest,
8be4620b 4156 block);
a7180877 4157 } else {
df9ff5e1 4158 ret = postcopy_place_page(mis, place_dest,
8be4620b 4159 place_source, block);
a7180877
DDAG
4160 }
4161 }
a7180877
DDAG
4162 }
4163
4164 return ret;
4165}
4166
acab30b8
DHB
4167static bool postcopy_is_advised(void)
4168{
4169 PostcopyState ps = postcopy_state_get();
4170 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4171}
4172
4173static bool postcopy_is_running(void)
4174{
4175 PostcopyState ps = postcopy_state_get();
4176 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4177}
4178
e6f4aa18
ZC
4179/*
4180 * Flush content of RAM cache into SVM's memory.
4181 * Only flush the pages that be dirtied by PVM or SVM or both.
4182 */
4183static void colo_flush_ram_cache(void)
4184{
4185 RAMBlock *block = NULL;
4186 void *dst_host;
4187 void *src_host;
4188 unsigned long offset = 0;
4189
d1955d22 4190 memory_global_dirty_log_sync();
89ac5a1d
DDAG
4191 WITH_RCU_READ_LOCK_GUARD() {
4192 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4193 ramblock_sync_dirty_bitmap(ram_state, block);
4194 }
d1955d22 4195 }
d1955d22 4196
e6f4aa18 4197 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
89ac5a1d
DDAG
4198 WITH_RCU_READ_LOCK_GUARD() {
4199 block = QLIST_FIRST_RCU(&ram_list.blocks);
e6f4aa18 4200
89ac5a1d
DDAG
4201 while (block) {
4202 offset = migration_bitmap_find_dirty(ram_state, block, offset);
e6f4aa18 4203
89ac5a1d
DDAG
4204 if (offset << TARGET_PAGE_BITS >= block->used_length) {
4205 offset = 0;
4206 block = QLIST_NEXT_RCU(block, next);
4207 } else {
4208 migration_bitmap_clear_dirty(ram_state, block, offset);
4209 dst_host = block->host + (offset << TARGET_PAGE_BITS);
4210 src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4211 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4212 }
e6f4aa18
ZC
4213 }
4214 }
e6f4aa18
ZC
4215 trace_colo_flush_ram_cache_end();
4216}
4217
10da4a36
WY
4218/**
4219 * ram_load_precopy: load pages in precopy case
4220 *
4221 * Returns 0 for success or -errno in case of error
4222 *
4223 * Called in precopy mode by ram_load().
4224 * rcu_read_lock is taken prior to this being called.
4225 *
4226 * @f: QEMUFile where to send the data
4227 */
4228static int ram_load_precopy(QEMUFile *f)
56e93d26 4229{
10da4a36 4230 int flags = 0, ret = 0, invalid_flags = 0, len = 0;
ef08fb38 4231 /* ADVISE is earlier, it shows the source has the postcopy capability on */
acab30b8 4232 bool postcopy_advised = postcopy_is_advised();
edc60127
JQ
4233 if (!migrate_use_compression()) {
4234 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4235 }
a7180877 4236
10da4a36 4237 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
56e93d26 4238 ram_addr_t addr, total_ram_bytes;
a776aa15 4239 void *host = NULL;
56e93d26
JQ
4240 uint8_t ch;
4241
4242 addr = qemu_get_be64(f);
4243 flags = addr & ~TARGET_PAGE_MASK;
4244 addr &= TARGET_PAGE_MASK;
4245
edc60127
JQ
4246 if (flags & invalid_flags) {
4247 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4248 error_report("Received an unexpected compressed page");
4249 }
4250
4251 ret = -EINVAL;
4252 break;
4253 }
4254
bb890ed5 4255 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
a776aa15 4256 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
4c4bad48
HZ
4257 RAMBlock *block = ram_block_from_stream(f, flags);
4258
13af18f2
ZC
4259 /*
4260 * After going into COLO, we should load the Page into colo_cache.
4261 */
4262 if (migration_incoming_in_colo_state()) {
4263 host = colo_cache_from_block_offset(block, addr);
4264 } else {
4265 host = host_from_ram_block_offset(block, addr);
4266 }
a776aa15
DDAG
4267 if (!host) {
4268 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4269 ret = -EINVAL;
4270 break;
4271 }
13af18f2
ZC
4272
4273 if (!migration_incoming_in_colo_state()) {
4274 ramblock_recv_bitmap_set(block, host);
4275 }
4276
1db9d8e5 4277 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
a776aa15
DDAG
4278 }
4279
56e93d26
JQ
4280 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4281 case RAM_SAVE_FLAG_MEM_SIZE:
4282 /* Synchronize RAM block list */
4283 total_ram_bytes = addr;
4284 while (!ret && total_ram_bytes) {
4285 RAMBlock *block;
56e93d26
JQ
4286 char id[256];
4287 ram_addr_t length;
4288
4289 len = qemu_get_byte(f);
4290 qemu_get_buffer(f, (uint8_t *)id, len);
4291 id[len] = 0;
4292 length = qemu_get_be64(f);
4293
e3dd7493 4294 block = qemu_ram_block_by_name(id);
b895de50
CLG
4295 if (block && !qemu_ram_is_migratable(block)) {
4296 error_report("block %s should not be migrated !", id);
4297 ret = -EINVAL;
4298 } else if (block) {
e3dd7493
DDAG
4299 if (length != block->used_length) {
4300 Error *local_err = NULL;
56e93d26 4301
fa53a0e5 4302 ret = qemu_ram_resize(block, length,
e3dd7493
DDAG
4303 &local_err);
4304 if (local_err) {
4305 error_report_err(local_err);
56e93d26 4306 }
56e93d26 4307 }
ef08fb38
DDAG
4308 /* For postcopy we need to check hugepage sizes match */
4309 if (postcopy_advised &&
4310 block->page_size != qemu_host_page_size) {
4311 uint64_t remote_page_size = qemu_get_be64(f);
4312 if (remote_page_size != block->page_size) {
4313 error_report("Mismatched RAM page size %s "
4314 "(local) %zd != %" PRId64,
4315 id, block->page_size,
4316 remote_page_size);
4317 ret = -EINVAL;
4318 }
4319 }
fbd162e6
YK
4320 if (migrate_ignore_shared()) {
4321 hwaddr addr = qemu_get_be64(f);
fbd162e6
YK
4322 if (ramblock_is_ignored(block) &&
4323 block->mr->addr != addr) {
4324 error_report("Mismatched GPAs for block %s "
4325 "%" PRId64 "!= %" PRId64,
4326 id, (uint64_t)addr,
4327 (uint64_t)block->mr->addr);
4328 ret = -EINVAL;
4329 }
4330 }
e3dd7493
DDAG
4331 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4332 block->idstr);
4333 } else {
56e93d26
JQ
4334 error_report("Unknown ramblock \"%s\", cannot "
4335 "accept migration", id);
4336 ret = -EINVAL;
4337 }
4338
4339 total_ram_bytes -= length;
4340 }
4341 break;
a776aa15 4342
bb890ed5 4343 case RAM_SAVE_FLAG_ZERO:
56e93d26
JQ
4344 ch = qemu_get_byte(f);
4345 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4346 break;
a776aa15 4347
56e93d26 4348 case RAM_SAVE_FLAG_PAGE:
56e93d26
JQ
4349 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4350 break;
56e93d26 4351
a776aa15 4352 case RAM_SAVE_FLAG_COMPRESS_PAGE:
56e93d26
JQ
4353 len = qemu_get_be32(f);
4354 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4355 error_report("Invalid compressed data length: %d", len);
4356 ret = -EINVAL;
4357 break;
4358 }
c1bc6626 4359 decompress_data_with_multi_threads(f, host, len);
56e93d26 4360 break;
a776aa15 4361
56e93d26 4362 case RAM_SAVE_FLAG_XBZRLE:
56e93d26
JQ
4363 if (load_xbzrle(f, addr, host) < 0) {
4364 error_report("Failed to decompress XBZRLE page at "
4365 RAM_ADDR_FMT, addr);
4366 ret = -EINVAL;
4367 break;
4368 }
4369 break;
4370 case RAM_SAVE_FLAG_EOS:
4371 /* normal exit */
6df264ac 4372 multifd_recv_sync_main();
56e93d26
JQ
4373 break;
4374 default:
4375 if (flags & RAM_SAVE_FLAG_HOOK) {
632e3a5c 4376 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
56e93d26
JQ
4377 } else {
4378 error_report("Unknown combination of migration flags: %#x",
4379 flags);
4380 ret = -EINVAL;
4381 }
4382 }
4383 if (!ret) {
4384 ret = qemu_file_get_error(f);
4385 }
4386 }
4387
10da4a36
WY
4388 return ret;
4389}
4390
4391static int ram_load(QEMUFile *f, void *opaque, int version_id)
4392{
4393 int ret = 0;
4394 static uint64_t seq_iter;
4395 /*
4396 * If system is running in postcopy mode, page inserts to host memory must
4397 * be atomic
4398 */
4399 bool postcopy_running = postcopy_is_running();
4400
4401 seq_iter++;
4402
4403 if (version_id != 4) {
4404 return -EINVAL;
4405 }
4406
4407 /*
4408 * This RCU critical section can be very long running.
4409 * When RCU reclaims in the code start to become numerous,
4410 * it will be necessary to reduce the granularity of this
4411 * critical section.
4412 */
89ac5a1d
DDAG
4413 WITH_RCU_READ_LOCK_GUARD() {
4414 if (postcopy_running) {
4415 ret = ram_load_postcopy(f);
4416 } else {
4417 ret = ram_load_precopy(f);
4418 }
10da4a36 4419
89ac5a1d 4420 ret |= wait_for_decompress_done();
10da4a36 4421 }
55c4446b 4422 trace_ram_load_complete(ret, seq_iter);
e6f4aa18
ZC
4423
4424 if (!ret && migration_incoming_in_colo_state()) {
4425 colo_flush_ram_cache();
4426 }
56e93d26
JQ
4427 return ret;
4428}
4429
c6467627
VSO
4430static bool ram_has_postcopy(void *opaque)
4431{
469dd51b 4432 RAMBlock *rb;
fbd162e6 4433 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
469dd51b
JH
4434 if (ramblock_is_pmem(rb)) {
4435 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4436 "is not supported now!", rb->idstr, rb->host);
4437 return false;
4438 }
4439 }
4440
c6467627
VSO
4441 return migrate_postcopy_ram();
4442}
4443
edd090c7
PX
4444/* Sync all the dirty bitmap with destination VM. */
4445static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4446{
4447 RAMBlock *block;
4448 QEMUFile *file = s->to_dst_file;
4449 int ramblock_count = 0;
4450
4451 trace_ram_dirty_bitmap_sync_start();
4452
fbd162e6 4453 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
edd090c7
PX
4454 qemu_savevm_send_recv_bitmap(file, block->idstr);
4455 trace_ram_dirty_bitmap_request(block->idstr);
4456 ramblock_count++;
4457 }
4458
4459 trace_ram_dirty_bitmap_sync_wait();
4460
4461 /* Wait until all the ramblocks' dirty bitmap synced */
4462 while (ramblock_count--) {
4463 qemu_sem_wait(&s->rp_state.rp_sem);
4464 }
4465
4466 trace_ram_dirty_bitmap_sync_complete();
4467
4468 return 0;
4469}
4470
4471static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4472{
4473 qemu_sem_post(&s->rp_state.rp_sem);
4474}
4475
a335debb
PX
4476/*
4477 * Read the received bitmap, revert it as the initial dirty bitmap.
4478 * This is only used when the postcopy migration is paused but wants
4479 * to resume from a middle point.
4480 */
4481int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4482{
4483 int ret = -EINVAL;
4484 QEMUFile *file = s->rp_state.from_dst_file;
4485 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
a725ef9f 4486 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
a335debb
PX
4487 uint64_t size, end_mark;
4488
4489 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4490
4491 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4492 error_report("%s: incorrect state %s", __func__,
4493 MigrationStatus_str(s->state));
4494 return -EINVAL;
4495 }
4496
4497 /*
4498 * Note: see comments in ramblock_recv_bitmap_send() on why we
4499 * need the endianess convertion, and the paddings.
4500 */
4501 local_size = ROUND_UP(local_size, 8);
4502
4503 /* Add paddings */
4504 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4505
4506 size = qemu_get_be64(file);
4507
4508 /* The size of the bitmap should match with our ramblock */
4509 if (size != local_size) {
4510 error_report("%s: ramblock '%s' bitmap size mismatch "
4511 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4512 block->idstr, size, local_size);
4513 ret = -EINVAL;
4514 goto out;
4515 }
4516
4517 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4518 end_mark = qemu_get_be64(file);
4519
4520 ret = qemu_file_get_error(file);
4521 if (ret || size != local_size) {
4522 error_report("%s: read bitmap failed for ramblock '%s': %d"
4523 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4524 __func__, block->idstr, ret, local_size, size);
4525 ret = -EIO;
4526 goto out;
4527 }
4528
4529 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4530 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4531 __func__, block->idstr, end_mark);
4532 ret = -EINVAL;
4533 goto out;
4534 }
4535
4536 /*
4537 * Endianess convertion. We are during postcopy (though paused).
4538 * The dirty bitmap won't change. We can directly modify it.
4539 */
4540 bitmap_from_le(block->bmap, le_bitmap, nbits);
4541
4542 /*
4543 * What we received is "received bitmap". Revert it as the initial
4544 * dirty bitmap for this ramblock.
4545 */
4546 bitmap_complement(block->bmap, block->bmap, nbits);
4547
4548 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4549
edd090c7
PX
4550 /*
4551 * We succeeded to sync bitmap for current ramblock. If this is
4552 * the last one to sync, we need to notify the main send thread.
4553 */
4554 ram_dirty_bitmap_reload_notify(s);
4555
a335debb
PX
4556 ret = 0;
4557out:
bf269906 4558 g_free(le_bitmap);
a335debb
PX
4559 return ret;
4560}
4561
edd090c7
PX
4562static int ram_resume_prepare(MigrationState *s, void *opaque)
4563{
4564 RAMState *rs = *(RAMState **)opaque;
08614f34 4565 int ret;
edd090c7 4566
08614f34
PX
4567 ret = ram_dirty_bitmap_sync_all(s, rs);
4568 if (ret) {
4569 return ret;
4570 }
4571
4572 ram_state_resume_prepare(rs, s->to_dst_file);
4573
4574 return 0;
edd090c7
PX
4575}
4576
56e93d26 4577static SaveVMHandlers savevm_ram_handlers = {
9907e842 4578 .save_setup = ram_save_setup,
56e93d26 4579 .save_live_iterate = ram_save_iterate,
763c906b 4580 .save_live_complete_postcopy = ram_save_complete,
a3e06c3d 4581 .save_live_complete_precopy = ram_save_complete,
c6467627 4582 .has_postcopy = ram_has_postcopy,
56e93d26
JQ
4583 .save_live_pending = ram_save_pending,
4584 .load_state = ram_load,
f265e0e4
JQ
4585 .save_cleanup = ram_save_cleanup,
4586 .load_setup = ram_load_setup,
4587 .load_cleanup = ram_load_cleanup,
edd090c7 4588 .resume_prepare = ram_resume_prepare,
56e93d26
JQ
4589};
4590
4591void ram_mig_init(void)
4592{
4593 qemu_mutex_init(&XBZRLE.lock);
ce62df53 4594 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
56e93d26 4595}
This page took 1.005274 seconds and 4 git commands to generate.