4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu/osdep.h"
29 #include "qemu-common.h"
32 #include "qapi-event.h"
33 #include "qemu/cutils.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "qemu/timer.h"
37 #include "qemu/main-loop.h"
38 #include "migration/migration.h"
39 #include "migration/postcopy-ram.h"
40 #include "exec/address-spaces.h"
41 #include "migration/page_cache.h"
42 #include "qemu/error-report.h"
44 #include "exec/ram_addr.h"
45 #include "qemu/rcu_queue.h"
47 #ifdef DEBUG_MIGRATION_RAM
48 #define DPRINTF(fmt, ...) \
49 do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
51 #define DPRINTF(fmt, ...) \
55 static int dirty_rate_high_cnt;
57 static uint64_t bitmap_sync_count;
59 /***********************************************************/
60 /* ram save/restore */
62 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
63 #define RAM_SAVE_FLAG_COMPRESS 0x02
64 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
65 #define RAM_SAVE_FLAG_PAGE 0x08
66 #define RAM_SAVE_FLAG_EOS 0x10
67 #define RAM_SAVE_FLAG_CONTINUE 0x20
68 #define RAM_SAVE_FLAG_XBZRLE 0x40
69 /* 0x80 is reserved in migration.h start with 0x100 next */
70 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
72 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
74 static inline bool is_zero_range(uint8_t *p, uint64_t size)
76 return buffer_find_nonzero_offset(p, size) == size;
79 /* struct contains XBZRLE cache and a static page
80 used by the compression */
82 /* buffer used for XBZRLE encoding */
84 /* buffer for storing page content */
86 /* Cache for XBZRLE, Protected by lock. */
91 /* buffer used for XBZRLE decoding */
92 static uint8_t *xbzrle_decoded_buf;
94 static void XBZRLE_cache_lock(void)
96 if (migrate_use_xbzrle())
97 qemu_mutex_lock(&XBZRLE.lock);
100 static void XBZRLE_cache_unlock(void)
102 if (migrate_use_xbzrle())
103 qemu_mutex_unlock(&XBZRLE.lock);
107 * called from qmp_migrate_set_cache_size in main thread, possibly while
108 * a migration is in progress.
109 * A running migration maybe using the cache and might finish during this
110 * call, hence changes to the cache are protected by XBZRLE.lock().
112 int64_t xbzrle_cache_resize(int64_t new_size)
114 PageCache *new_cache;
117 if (new_size < TARGET_PAGE_SIZE) {
123 if (XBZRLE.cache != NULL) {
124 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
127 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
130 error_report("Error creating cache");
135 cache_fini(XBZRLE.cache);
136 XBZRLE.cache = new_cache;
140 ret = pow2floor(new_size);
142 XBZRLE_cache_unlock();
146 /* accounting for migration statistics */
147 typedef struct AccountingInfo {
149 uint64_t skipped_pages;
152 uint64_t xbzrle_bytes;
153 uint64_t xbzrle_pages;
154 uint64_t xbzrle_cache_miss;
155 double xbzrle_cache_miss_rate;
156 uint64_t xbzrle_overflows;
159 static AccountingInfo acct_info;
161 static void acct_clear(void)
163 memset(&acct_info, 0, sizeof(acct_info));
166 uint64_t dup_mig_bytes_transferred(void)
168 return acct_info.dup_pages * TARGET_PAGE_SIZE;
171 uint64_t dup_mig_pages_transferred(void)
173 return acct_info.dup_pages;
176 uint64_t skipped_mig_bytes_transferred(void)
178 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
181 uint64_t skipped_mig_pages_transferred(void)
183 return acct_info.skipped_pages;
186 uint64_t norm_mig_bytes_transferred(void)
188 return acct_info.norm_pages * TARGET_PAGE_SIZE;
191 uint64_t norm_mig_pages_transferred(void)
193 return acct_info.norm_pages;
196 uint64_t xbzrle_mig_bytes_transferred(void)
198 return acct_info.xbzrle_bytes;
201 uint64_t xbzrle_mig_pages_transferred(void)
203 return acct_info.xbzrle_pages;
206 uint64_t xbzrle_mig_pages_cache_miss(void)
208 return acct_info.xbzrle_cache_miss;
211 double xbzrle_mig_cache_miss_rate(void)
213 return acct_info.xbzrle_cache_miss_rate;
216 uint64_t xbzrle_mig_pages_overflow(void)
218 return acct_info.xbzrle_overflows;
221 /* This is the last block that we have visited serching for dirty pages
223 static RAMBlock *last_seen_block;
224 /* This is the last block from where we have sent data */
225 static RAMBlock *last_sent_block;
226 static ram_addr_t last_offset;
227 static QemuMutex migration_bitmap_mutex;
228 static uint64_t migration_dirty_pages;
229 static uint32_t last_version;
230 static bool ram_bulk_stage;
232 /* used by the search for pages to send */
233 struct PageSearchStatus {
234 /* Current block being searched */
236 /* Current offset to search from */
238 /* Set once we wrap around */
241 typedef struct PageSearchStatus PageSearchStatus;
243 static struct BitmapRcu {
245 /* Main migration bitmap */
247 /* bitmap of pages that haven't been sent even once
248 * only maintained and used in postcopy at the moment
249 * where it's used to send the dirtymap at the start
250 * of the postcopy phase
252 unsigned long *unsentmap;
253 } *migration_bitmap_rcu;
255 struct CompressParam {
264 typedef struct CompressParam CompressParam;
266 struct DecompressParam {
274 typedef struct DecompressParam DecompressParam;
276 static CompressParam *comp_param;
277 static QemuThread *compress_threads;
278 /* comp_done_cond is used to wake up the migration thread when
279 * one of the compression threads has finished the compression.
280 * comp_done_lock is used to co-work with comp_done_cond.
282 static QemuMutex *comp_done_lock;
283 static QemuCond *comp_done_cond;
284 /* The empty QEMUFileOps will be used by file in CompressParam */
285 static const QEMUFileOps empty_ops = { };
287 static bool compression_switch;
288 static bool quit_comp_thread;
289 static bool quit_decomp_thread;
290 static DecompressParam *decomp_param;
291 static QemuThread *decompress_threads;
293 static int do_compress_ram_page(CompressParam *param);
295 static void *do_data_compress(void *opaque)
297 CompressParam *param = opaque;
299 while (!quit_comp_thread) {
300 qemu_mutex_lock(¶m->mutex);
301 /* Re-check the quit_comp_thread in case of
302 * terminate_compression_threads is called just before
303 * qemu_mutex_lock(¶m->mutex) and after
304 * while(!quit_comp_thread), re-check it here can make
305 * sure the compression thread terminate as expected.
307 while (!param->start && !quit_comp_thread) {
308 qemu_cond_wait(¶m->cond, ¶m->mutex);
310 if (!quit_comp_thread) {
311 do_compress_ram_page(param);
313 param->start = false;
314 qemu_mutex_unlock(¶m->mutex);
316 qemu_mutex_lock(comp_done_lock);
318 qemu_cond_signal(comp_done_cond);
319 qemu_mutex_unlock(comp_done_lock);
325 static inline void terminate_compression_threads(void)
327 int idx, thread_count;
329 thread_count = migrate_compress_threads();
330 quit_comp_thread = true;
331 for (idx = 0; idx < thread_count; idx++) {
332 qemu_mutex_lock(&comp_param[idx].mutex);
333 qemu_cond_signal(&comp_param[idx].cond);
334 qemu_mutex_unlock(&comp_param[idx].mutex);
338 void migrate_compress_threads_join(void)
342 if (!migrate_use_compression()) {
345 terminate_compression_threads();
346 thread_count = migrate_compress_threads();
347 for (i = 0; i < thread_count; i++) {
348 qemu_thread_join(compress_threads + i);
349 qemu_fclose(comp_param[i].file);
350 qemu_mutex_destroy(&comp_param[i].mutex);
351 qemu_cond_destroy(&comp_param[i].cond);
353 qemu_mutex_destroy(comp_done_lock);
354 qemu_cond_destroy(comp_done_cond);
355 g_free(compress_threads);
357 g_free(comp_done_cond);
358 g_free(comp_done_lock);
359 compress_threads = NULL;
361 comp_done_cond = NULL;
362 comp_done_lock = NULL;
365 void migrate_compress_threads_create(void)
369 if (!migrate_use_compression()) {
372 quit_comp_thread = false;
373 compression_switch = true;
374 thread_count = migrate_compress_threads();
375 compress_threads = g_new0(QemuThread, thread_count);
376 comp_param = g_new0(CompressParam, thread_count);
377 comp_done_cond = g_new0(QemuCond, 1);
378 comp_done_lock = g_new0(QemuMutex, 1);
379 qemu_cond_init(comp_done_cond);
380 qemu_mutex_init(comp_done_lock);
381 for (i = 0; i < thread_count; i++) {
382 /* com_param[i].file is just used as a dummy buffer to save data, set
385 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
386 comp_param[i].done = true;
387 qemu_mutex_init(&comp_param[i].mutex);
388 qemu_cond_init(&comp_param[i].cond);
389 qemu_thread_create(compress_threads + i, "compress",
390 do_data_compress, comp_param + i,
391 QEMU_THREAD_JOINABLE);
396 * save_page_header: Write page header to wire
398 * If this is the 1st block, it also writes the block identification
400 * Returns: Number of bytes written
402 * @f: QEMUFile where to send the data
403 * @block: block that contains the page we want to send
404 * @offset: offset inside the block for the page
405 * in the lower bits, it contains flags
407 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
411 qemu_put_be64(f, offset);
414 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
415 len = strlen(block->idstr);
416 qemu_put_byte(f, len);
417 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
423 /* Reduce amount of guest cpu execution to hopefully slow down memory writes.
424 * If guest dirty memory rate is reduced below the rate at which we can
425 * transfer pages to the destination then we should be able to complete
426 * migration. Some workloads dirty memory way too fast and will not effectively
427 * converge, even with auto-converge.
429 static void mig_throttle_guest_down(void)
431 MigrationState *s = migrate_get_current();
432 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
433 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
435 /* We have not started throttling yet. Let's start it. */
436 if (!cpu_throttle_active()) {
437 cpu_throttle_set(pct_initial);
439 /* Throttling already on, just increase the rate */
440 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
444 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
445 * The important thing is that a stale (not-yet-0'd) page be replaced
447 * As a bonus, if the page wasn't in the cache it gets added so that
448 * when a small write is made into the 0'd page it gets XBZRLE sent
450 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
452 if (ram_bulk_stage || !migrate_use_xbzrle()) {
456 /* We don't care if this fails to allocate a new cache page
457 * as long as it updated an old one */
458 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
462 #define ENCODING_FLAG_XBZRLE 0x1
465 * save_xbzrle_page: compress and send current page
467 * Returns: 1 means that we wrote the page
468 * 0 means that page is identical to the one already sent
469 * -1 means that xbzrle would be longer than normal
471 * @f: QEMUFile where to send the data
474 * @block: block that contains the page we want to send
475 * @offset: offset inside the block for the page
476 * @last_stage: if we are at the completion stage
477 * @bytes_transferred: increase it with the number of transferred bytes
479 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
480 ram_addr_t current_addr, RAMBlock *block,
481 ram_addr_t offset, bool last_stage,
482 uint64_t *bytes_transferred)
484 int encoded_len = 0, bytes_xbzrle;
485 uint8_t *prev_cached_page;
487 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
488 acct_info.xbzrle_cache_miss++;
490 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
491 bitmap_sync_count) == -1) {
494 /* update *current_data when the page has been
495 inserted into cache */
496 *current_data = get_cached_data(XBZRLE.cache, current_addr);
502 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
504 /* save current buffer into memory */
505 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
507 /* XBZRLE encoding (if there is no overflow) */
508 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
509 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
511 if (encoded_len == 0) {
512 DPRINTF("Skipping unmodified page\n");
514 } else if (encoded_len == -1) {
515 DPRINTF("Overflow\n");
516 acct_info.xbzrle_overflows++;
517 /* update data in the cache */
519 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
520 *current_data = prev_cached_page;
525 /* we need to update the data in the cache, in order to get the same data */
527 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
530 /* Send XBZRLE based compressed page */
531 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
532 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
533 qemu_put_be16(f, encoded_len);
534 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
535 bytes_xbzrle += encoded_len + 1 + 2;
536 acct_info.xbzrle_pages++;
537 acct_info.xbzrle_bytes += bytes_xbzrle;
538 *bytes_transferred += bytes_xbzrle;
543 /* Called with rcu_read_lock() to protect migration_bitmap
544 * rb: The RAMBlock to search for dirty pages in
545 * start: Start address (typically so we can continue from previous page)
546 * ram_addr_abs: Pointer into which to store the address of the dirty page
547 * within the global ram_addr space
549 * Returns: byte offset within memory region of the start of a dirty page
552 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
554 ram_addr_t *ram_addr_abs)
556 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
557 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
558 uint64_t rb_size = rb->used_length;
559 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
560 unsigned long *bitmap;
564 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
565 if (ram_bulk_stage && nr > base) {
568 next = find_next_bit(bitmap, size, nr);
571 *ram_addr_abs = next << TARGET_PAGE_BITS;
572 return (next - base) << TARGET_PAGE_BITS;
575 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
578 int nr = addr >> TARGET_PAGE_BITS;
579 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
581 ret = test_and_clear_bit(nr, bitmap);
584 migration_dirty_pages--;
589 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
591 unsigned long *bitmap;
592 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
593 migration_dirty_pages +=
594 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
597 /* Fix me: there are too many global variables used in migration process. */
598 static int64_t start_time;
599 static int64_t bytes_xfer_prev;
600 static int64_t num_dirty_pages_period;
601 static uint64_t xbzrle_cache_miss_prev;
602 static uint64_t iterations_prev;
604 static void migration_bitmap_sync_init(void)
608 num_dirty_pages_period = 0;
609 xbzrle_cache_miss_prev = 0;
613 static void migration_bitmap_sync(void)
616 uint64_t num_dirty_pages_init = migration_dirty_pages;
617 MigrationState *s = migrate_get_current();
619 int64_t bytes_xfer_now;
623 if (!bytes_xfer_prev) {
624 bytes_xfer_prev = ram_bytes_transferred();
628 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
631 trace_migration_bitmap_sync_start();
632 address_space_sync_dirty_bitmap(&address_space_memory);
634 qemu_mutex_lock(&migration_bitmap_mutex);
636 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
637 migration_bitmap_sync_range(block->offset, block->used_length);
640 qemu_mutex_unlock(&migration_bitmap_mutex);
642 trace_migration_bitmap_sync_end(migration_dirty_pages
643 - num_dirty_pages_init);
644 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
647 /* more than 1 second = 1000 millisecons */
648 if (end_time > start_time + 1000) {
649 if (migrate_auto_converge()) {
650 /* The following detection logic can be refined later. For now:
651 Check to see if the dirtied bytes is 50% more than the approx.
652 amount of bytes that just got transferred since the last time we
653 were in this routine. If that happens twice, start or increase
655 bytes_xfer_now = ram_bytes_transferred();
657 if (s->dirty_pages_rate &&
658 (num_dirty_pages_period * TARGET_PAGE_SIZE >
659 (bytes_xfer_now - bytes_xfer_prev)/2) &&
660 (dirty_rate_high_cnt++ >= 2)) {
661 trace_migration_throttle();
662 dirty_rate_high_cnt = 0;
663 mig_throttle_guest_down();
665 bytes_xfer_prev = bytes_xfer_now;
668 if (migrate_use_xbzrle()) {
669 if (iterations_prev != acct_info.iterations) {
670 acct_info.xbzrle_cache_miss_rate =
671 (double)(acct_info.xbzrle_cache_miss -
672 xbzrle_cache_miss_prev) /
673 (acct_info.iterations - iterations_prev);
675 iterations_prev = acct_info.iterations;
676 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
678 s->dirty_pages_rate = num_dirty_pages_period * 1000
679 / (end_time - start_time);
680 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681 start_time = end_time;
682 num_dirty_pages_period = 0;
684 s->dirty_sync_count = bitmap_sync_count;
685 if (migrate_use_events()) {
686 qapi_event_send_migration_pass(bitmap_sync_count, NULL);
691 * save_zero_page: Send the zero page to the stream
693 * Returns: Number of pages written.
695 * @f: QEMUFile where to send the data
696 * @block: block that contains the page we want to send
697 * @offset: offset inside the block for the page
698 * @p: pointer to the page
699 * @bytes_transferred: increase it with the number of transferred bytes
701 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
702 uint8_t *p, uint64_t *bytes_transferred)
706 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
707 acct_info.dup_pages++;
708 *bytes_transferred += save_page_header(f, block,
709 offset | RAM_SAVE_FLAG_COMPRESS);
711 *bytes_transferred += 1;
719 * ram_save_page: Send the given page to the stream
721 * Returns: Number of pages written.
723 * >=0 - Number of pages written - this might legally be 0
724 * if xbzrle noticed the page was the same.
726 * @f: QEMUFile where to send the data
727 * @block: block that contains the page we want to send
728 * @offset: offset inside the block for the page
729 * @last_stage: if we are at the completion stage
730 * @bytes_transferred: increase it with the number of transferred bytes
732 static int ram_save_page(QEMUFile *f, PageSearchStatus *pss,
733 bool last_stage, uint64_t *bytes_transferred)
737 ram_addr_t current_addr;
740 bool send_async = true;
741 RAMBlock *block = pss->block;
742 ram_addr_t offset = pss->offset;
744 p = block->host + offset;
746 /* In doubt sent page as normal */
748 ret = ram_control_save_page(f, block->offset,
749 offset, TARGET_PAGE_SIZE, &bytes_xmit);
751 *bytes_transferred += bytes_xmit;
757 current_addr = block->offset + offset;
759 if (block == last_sent_block) {
760 offset |= RAM_SAVE_FLAG_CONTINUE;
762 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
763 if (ret != RAM_SAVE_CONTROL_DELAYED) {
764 if (bytes_xmit > 0) {
765 acct_info.norm_pages++;
766 } else if (bytes_xmit == 0) {
767 acct_info.dup_pages++;
771 pages = save_zero_page(f, block, offset, p, bytes_transferred);
773 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
774 * page would be stale
776 xbzrle_cache_zero_page(current_addr);
777 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
778 pages = save_xbzrle_page(f, &p, current_addr, block,
779 offset, last_stage, bytes_transferred);
781 /* Can't send this cached data async, since the cache page
782 * might get updated before it gets to the wire
789 /* XBZRLE overflow or normal page */
791 *bytes_transferred += save_page_header(f, block,
792 offset | RAM_SAVE_FLAG_PAGE);
794 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
796 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
798 *bytes_transferred += TARGET_PAGE_SIZE;
800 acct_info.norm_pages++;
803 XBZRLE_cache_unlock();
808 static int do_compress_ram_page(CompressParam *param)
810 int bytes_sent, blen;
812 RAMBlock *block = param->block;
813 ram_addr_t offset = param->offset;
815 p = block->host + (offset & TARGET_PAGE_MASK);
817 bytes_sent = save_page_header(param->file, block, offset |
818 RAM_SAVE_FLAG_COMPRESS_PAGE);
819 blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
820 migrate_compress_level());
826 static inline void start_compression(CompressParam *param)
829 qemu_mutex_lock(¶m->mutex);
831 qemu_cond_signal(¶m->cond);
832 qemu_mutex_unlock(¶m->mutex);
835 static inline void start_decompression(DecompressParam *param)
837 qemu_mutex_lock(¶m->mutex);
839 qemu_cond_signal(¶m->cond);
840 qemu_mutex_unlock(¶m->mutex);
843 static uint64_t bytes_transferred;
845 static void flush_compressed_data(QEMUFile *f)
847 int idx, len, thread_count;
849 if (!migrate_use_compression()) {
852 thread_count = migrate_compress_threads();
853 for (idx = 0; idx < thread_count; idx++) {
854 if (!comp_param[idx].done) {
855 qemu_mutex_lock(comp_done_lock);
856 while (!comp_param[idx].done && !quit_comp_thread) {
857 qemu_cond_wait(comp_done_cond, comp_done_lock);
859 qemu_mutex_unlock(comp_done_lock);
861 if (!quit_comp_thread) {
862 len = qemu_put_qemu_file(f, comp_param[idx].file);
863 bytes_transferred += len;
868 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
871 param->block = block;
872 param->offset = offset;
875 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
877 uint64_t *bytes_transferred)
879 int idx, thread_count, bytes_xmit = -1, pages = -1;
881 thread_count = migrate_compress_threads();
882 qemu_mutex_lock(comp_done_lock);
884 for (idx = 0; idx < thread_count; idx++) {
885 if (comp_param[idx].done) {
886 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
887 set_compress_params(&comp_param[idx], block, offset);
888 start_compression(&comp_param[idx]);
890 acct_info.norm_pages++;
891 *bytes_transferred += bytes_xmit;
898 qemu_cond_wait(comp_done_cond, comp_done_lock);
901 qemu_mutex_unlock(comp_done_lock);
907 * ram_save_compressed_page: compress the given page and send it to the stream
909 * Returns: Number of pages written.
911 * @f: QEMUFile where to send the data
912 * @block: block that contains the page we want to send
913 * @offset: offset inside the block for the page
914 * @last_stage: if we are at the completion stage
915 * @bytes_transferred: increase it with the number of transferred bytes
917 static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
919 uint64_t *bytes_transferred)
925 RAMBlock *block = pss->block;
926 ram_addr_t offset = pss->offset;
928 p = block->host + offset;
931 ret = ram_control_save_page(f, block->offset,
932 offset, TARGET_PAGE_SIZE, &bytes_xmit);
934 *bytes_transferred += bytes_xmit;
937 if (block == last_sent_block) {
938 offset |= RAM_SAVE_FLAG_CONTINUE;
940 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
941 if (ret != RAM_SAVE_CONTROL_DELAYED) {
942 if (bytes_xmit > 0) {
943 acct_info.norm_pages++;
944 } else if (bytes_xmit == 0) {
945 acct_info.dup_pages++;
949 /* When starting the process of a new block, the first page of
950 * the block should be sent out before other pages in the same
951 * block, and all the pages in last block should have been sent
952 * out, keeping this order is important, because the 'cont' flag
953 * is used to avoid resending the block name.
955 if (block != last_sent_block) {
956 flush_compressed_data(f);
957 pages = save_zero_page(f, block, offset, p, bytes_transferred);
959 set_compress_params(&comp_param[0], block, offset);
960 /* Use the qemu thread to compress the data to make sure the
961 * first page is sent out before other pages
963 bytes_xmit = do_compress_ram_page(&comp_param[0]);
964 acct_info.norm_pages++;
965 qemu_put_qemu_file(f, comp_param[0].file);
966 *bytes_transferred += bytes_xmit;
970 pages = save_zero_page(f, block, offset, p, bytes_transferred);
972 pages = compress_page_with_multi_thread(f, block, offset,
982 * Find the next dirty page and update any state associated with
983 * the search process.
985 * Returns: True if a page is found
987 * @f: Current migration stream.
988 * @pss: Data about the state of the current dirty page scan.
989 * @*again: Set to false if the search has scanned the whole of RAM
990 * *ram_addr_abs: Pointer into which to store the address of the dirty page
991 * within the global ram_addr space
993 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
994 bool *again, ram_addr_t *ram_addr_abs)
996 pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
998 if (pss->complete_round && pss->block == last_seen_block &&
999 pss->offset >= last_offset) {
1001 * We've been once around the RAM and haven't found anything.
1007 if (pss->offset >= pss->block->used_length) {
1008 /* Didn't find anything in this RAM Block */
1010 pss->block = QLIST_NEXT_RCU(pss->block, next);
1012 /* Hit the end of the list */
1013 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1014 /* Flag that we've looped */
1015 pss->complete_round = true;
1016 ram_bulk_stage = false;
1017 if (migrate_use_xbzrle()) {
1018 /* If xbzrle is on, stop using the data compression at this
1019 * point. In theory, xbzrle can do better than compression.
1021 flush_compressed_data(f);
1022 compression_switch = false;
1025 /* Didn't find anything this time, but try again on the new block */
1029 /* Can go around again, but... */
1031 /* We've found something so probably don't need to */
1037 * Helper for 'get_queued_page' - gets a page off the queue
1038 * ms: MigrationState in
1039 * *offset: Used to return the offset within the RAMBlock
1040 * ram_addr_abs: global offset in the dirty/sent bitmaps
1042 * Returns: block (or NULL if none available)
1044 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1045 ram_addr_t *ram_addr_abs)
1047 RAMBlock *block = NULL;
1049 qemu_mutex_lock(&ms->src_page_req_mutex);
1050 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1051 struct MigrationSrcPageRequest *entry =
1052 QSIMPLEQ_FIRST(&ms->src_page_requests);
1054 *offset = entry->offset;
1055 *ram_addr_abs = (entry->offset + entry->rb->offset) &
1058 if (entry->len > TARGET_PAGE_SIZE) {
1059 entry->len -= TARGET_PAGE_SIZE;
1060 entry->offset += TARGET_PAGE_SIZE;
1062 memory_region_unref(block->mr);
1063 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1067 qemu_mutex_unlock(&ms->src_page_req_mutex);
1073 * Unqueue a page from the queue fed by postcopy page requests; skips pages
1074 * that are already sent (!dirty)
1076 * ms: MigrationState in
1077 * pss: PageSearchStatus structure updated with found block/offset
1078 * ram_addr_abs: global offset in the dirty/sent bitmaps
1080 * Returns: true if a queued page is found
1082 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1083 ram_addr_t *ram_addr_abs)
1090 block = unqueue_page(ms, &offset, ram_addr_abs);
1092 * We're sending this page, and since it's postcopy nothing else
1093 * will dirty it, and we must make sure it doesn't get sent again
1094 * even if this queue request was received after the background
1095 * search already sent it.
1098 unsigned long *bitmap;
1099 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1100 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1102 trace_get_queued_page_not_dirty(
1103 block->idstr, (uint64_t)offset,
1104 (uint64_t)*ram_addr_abs,
1105 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1106 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1108 trace_get_queued_page(block->idstr,
1110 (uint64_t)*ram_addr_abs);
1114 } while (block && !dirty);
1118 * As soon as we start servicing pages out of order, then we have
1119 * to kill the bulk stage, since the bulk stage assumes
1120 * in (migration_bitmap_find_and_reset_dirty) that every page is
1121 * dirty, that's no longer true.
1123 ram_bulk_stage = false;
1126 * We want the background search to continue from the queued page
1127 * since the guest is likely to want other pages near to the page
1128 * it just requested.
1131 pss->offset = offset;
1138 * flush_page_queue: Flush any remaining pages in the ram request queue
1139 * it should be empty at the end anyway, but in error cases there may be
1142 * ms: MigrationState
1144 void flush_page_queue(MigrationState *ms)
1146 struct MigrationSrcPageRequest *mspr, *next_mspr;
1147 /* This queue generally should be empty - but in the case of a failed
1148 * migration might have some droppings in.
1151 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1152 memory_region_unref(mspr->rb->mr);
1153 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1160 * Queue the pages for transmission, e.g. a request from postcopy destination
1161 * ms: MigrationStatus in which the queue is held
1162 * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1163 * start: Offset from the start of the RAMBlock
1164 * len: Length (in bytes) to send
1165 * Return: 0 on success
1167 int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1168 ram_addr_t start, ram_addr_t len)
1174 /* Reuse last RAMBlock */
1175 ramblock = ms->last_req_rb;
1179 * Shouldn't happen, we can't reuse the last RAMBlock if
1180 * it's the 1st request.
1182 error_report("ram_save_queue_pages no previous block");
1186 ramblock = qemu_ram_block_by_name(rbname);
1189 /* We shouldn't be asked for a non-existent RAMBlock */
1190 error_report("ram_save_queue_pages no block '%s'", rbname);
1193 ms->last_req_rb = ramblock;
1195 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1196 if (start+len > ramblock->used_length) {
1197 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1198 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1199 __func__, start, len, ramblock->used_length);
1203 struct MigrationSrcPageRequest *new_entry =
1204 g_malloc0(sizeof(struct MigrationSrcPageRequest));
1205 new_entry->rb = ramblock;
1206 new_entry->offset = start;
1207 new_entry->len = len;
1209 memory_region_ref(ramblock->mr);
1210 qemu_mutex_lock(&ms->src_page_req_mutex);
1211 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1212 qemu_mutex_unlock(&ms->src_page_req_mutex);
1223 * ram_save_target_page: Save one target page
1226 * @f: QEMUFile where to send the data
1227 * @block: pointer to block that contains the page we want to send
1228 * @offset: offset inside the block for the page;
1229 * @last_stage: if we are at the completion stage
1230 * @bytes_transferred: increase it with the number of transferred bytes
1231 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1233 * Returns: Number of pages written.
1235 static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1236 PageSearchStatus *pss,
1238 uint64_t *bytes_transferred,
1239 ram_addr_t dirty_ram_abs)
1243 /* Check the pages is dirty and if it is send it */
1244 if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1245 unsigned long *unsentmap;
1246 if (compression_switch && migrate_use_compression()) {
1247 res = ram_save_compressed_page(f, pss,
1251 res = ram_save_page(f, pss, last_stage,
1258 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1260 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1262 /* Only update last_sent_block if a block was actually sent; xbzrle
1263 * might have decided the page was identical so didn't bother writing
1267 last_sent_block = pss->block;
1275 * ram_save_host_page: Starting at *offset send pages up to the end
1276 * of the current host page. It's valid for the initial
1277 * offset to point into the middle of a host page
1278 * in which case the remainder of the hostpage is sent.
1279 * Only dirty target pages are sent.
1281 * Returns: Number of pages written.
1283 * @f: QEMUFile where to send the data
1284 * @block: pointer to block that contains the page we want to send
1285 * @offset: offset inside the block for the page; updated to last target page
1287 * @last_stage: if we are at the completion stage
1288 * @bytes_transferred: increase it with the number of transferred bytes
1289 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1291 static int ram_save_host_page(MigrationState *ms, QEMUFile *f,
1292 PageSearchStatus *pss,
1294 uint64_t *bytes_transferred,
1295 ram_addr_t dirty_ram_abs)
1297 int tmppages, pages = 0;
1299 tmppages = ram_save_target_page(ms, f, pss, last_stage,
1300 bytes_transferred, dirty_ram_abs);
1306 pss->offset += TARGET_PAGE_SIZE;
1307 dirty_ram_abs += TARGET_PAGE_SIZE;
1308 } while (pss->offset & (qemu_host_page_size - 1));
1310 /* The offset we leave with is the last one we looked at */
1311 pss->offset -= TARGET_PAGE_SIZE;
1316 * ram_find_and_save_block: Finds a dirty page and sends it to f
1318 * Called within an RCU critical section.
1320 * Returns: The number of pages written
1321 * 0 means no dirty pages
1323 * @f: QEMUFile where to send the data
1324 * @last_stage: if we are at the completion stage
1325 * @bytes_transferred: increase it with the number of transferred bytes
1327 * On systems where host-page-size > target-page-size it will send all the
1328 * pages in a host page that are dirty.
1331 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1332 uint64_t *bytes_transferred)
1334 PageSearchStatus pss;
1335 MigrationState *ms = migrate_get_current();
1338 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1341 pss.block = last_seen_block;
1342 pss.offset = last_offset;
1343 pss.complete_round = false;
1346 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1351 found = get_queued_page(ms, &pss, &dirty_ram_abs);
1354 /* priority queue empty, so just search for something dirty */
1355 found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1359 pages = ram_save_host_page(ms, f, &pss,
1360 last_stage, bytes_transferred,
1363 } while (!pages && again);
1365 last_seen_block = pss.block;
1366 last_offset = pss.offset;
1371 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1373 uint64_t pages = size / TARGET_PAGE_SIZE;
1375 acct_info.dup_pages += pages;
1377 acct_info.norm_pages += pages;
1378 bytes_transferred += size;
1379 qemu_update_position(f, size);
1383 static ram_addr_t ram_save_remaining(void)
1385 return migration_dirty_pages;
1388 uint64_t ram_bytes_remaining(void)
1390 return ram_save_remaining() * TARGET_PAGE_SIZE;
1393 uint64_t ram_bytes_transferred(void)
1395 return bytes_transferred;
1398 uint64_t ram_bytes_total(void)
1404 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1405 total += block->used_length;
1410 void free_xbzrle_decoded_buf(void)
1412 g_free(xbzrle_decoded_buf);
1413 xbzrle_decoded_buf = NULL;
1416 static void migration_bitmap_free(struct BitmapRcu *bmap)
1419 g_free(bmap->unsentmap);
1423 static void ram_migration_cleanup(void *opaque)
1425 /* caller have hold iothread lock or is in a bh, so there is
1426 * no writing race against this migration_bitmap
1428 struct BitmapRcu *bitmap = migration_bitmap_rcu;
1429 atomic_rcu_set(&migration_bitmap_rcu, NULL);
1431 memory_global_dirty_log_stop();
1432 call_rcu(bitmap, migration_bitmap_free, rcu);
1435 XBZRLE_cache_lock();
1437 cache_fini(XBZRLE.cache);
1438 g_free(XBZRLE.encoded_buf);
1439 g_free(XBZRLE.current_buf);
1440 XBZRLE.cache = NULL;
1441 XBZRLE.encoded_buf = NULL;
1442 XBZRLE.current_buf = NULL;
1444 XBZRLE_cache_unlock();
1447 static void reset_ram_globals(void)
1449 last_seen_block = NULL;
1450 last_sent_block = NULL;
1452 last_version = ram_list.version;
1453 ram_bulk_stage = true;
1456 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1458 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1460 /* called in qemu main thread, so there is
1461 * no writing race against this migration_bitmap
1463 if (migration_bitmap_rcu) {
1464 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1465 bitmap = g_new(struct BitmapRcu, 1);
1466 bitmap->bmap = bitmap_new(new);
1468 /* prevent migration_bitmap content from being set bit
1469 * by migration_bitmap_sync_range() at the same time.
1470 * it is safe to migration if migration_bitmap is cleared bit
1473 qemu_mutex_lock(&migration_bitmap_mutex);
1474 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1475 bitmap_set(bitmap->bmap, old, new - old);
1477 /* We don't have a way to safely extend the sentmap
1478 * with RCU; so mark it as missing, entry to postcopy
1481 bitmap->unsentmap = NULL;
1483 atomic_rcu_set(&migration_bitmap_rcu, bitmap);
1484 qemu_mutex_unlock(&migration_bitmap_mutex);
1485 migration_dirty_pages += new - old;
1486 call_rcu(old_bitmap, migration_bitmap_free, rcu);
1491 * 'expected' is the value you expect the bitmap mostly to be full
1492 * of; it won't bother printing lines that are all this value.
1493 * If 'todump' is null the migration bitmap is dumped.
1495 void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1497 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1500 int64_t linelen = 128;
1504 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1507 for (cur = 0; cur < ram_pages; cur += linelen) {
1511 * Last line; catch the case where the line length
1512 * is longer than remaining ram
1514 if (cur + linelen > ram_pages) {
1515 linelen = ram_pages - cur;
1517 for (curb = 0; curb < linelen; curb++) {
1518 bool thisbit = test_bit(cur + curb, todump);
1519 linebuf[curb] = thisbit ? '1' : '.';
1520 found = found || (thisbit != expected);
1523 linebuf[curb] = '\0';
1524 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1529 /* **** functions for postcopy ***** */
1532 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1533 * Note: At this point the 'unsentmap' is the processed bitmap combined
1534 * with the dirtymap; so a '1' means it's either dirty or unsent.
1535 * start,length: Indexes into the bitmap for the first bit
1536 * representing the named block and length in target-pages
1538 static int postcopy_send_discard_bm_ram(MigrationState *ms,
1539 PostcopyDiscardState *pds,
1540 unsigned long start,
1541 unsigned long length)
1543 unsigned long end = start + length; /* one after the end */
1544 unsigned long current;
1545 unsigned long *unsentmap;
1547 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1548 for (current = start; current < end; ) {
1549 unsigned long one = find_next_bit(unsentmap, end, current);
1552 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1553 unsigned long discard_length;
1556 discard_length = end - one;
1558 discard_length = zero - one;
1560 if (discard_length) {
1561 postcopy_discard_send_range(ms, pds, one, discard_length);
1563 current = one + discard_length;
1573 * Utility for the outgoing postcopy code.
1574 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1575 * passing it bitmap indexes and name.
1576 * Returns: 0 on success
1577 * (qemu_ram_foreach_block ends up passing unscaled lengths
1578 * which would mean postcopy code would have to deal with target page)
1580 static int postcopy_each_ram_send_discard(MigrationState *ms)
1582 struct RAMBlock *block;
1585 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1586 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1587 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1592 * Postcopy sends chunks of bitmap over the wire, but it
1593 * just needs indexes at this point, avoids it having
1594 * target page specific code.
1596 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1597 block->used_length >> TARGET_PAGE_BITS);
1598 postcopy_discard_send_finish(ms, pds);
1608 * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1609 * the two bitmaps, that are similar, but one is inverted.
1611 * We search for runs of target-pages that don't start or end on a
1612 * host page boundary;
1613 * unsent_pass=true: Cleans up partially unsent host pages by searching
1615 * unsent_pass=false: Cleans up partially dirty host pages by searching
1616 * the main migration bitmap
1619 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1621 PostcopyDiscardState *pds)
1623 unsigned long *bitmap;
1624 unsigned long *unsentmap;
1625 unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1626 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1627 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1628 unsigned long last = first + (len - 1);
1629 unsigned long run_start;
1631 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1632 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1635 /* Find a sent page */
1636 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1638 /* Find a dirty page */
1639 run_start = find_next_bit(bitmap, last + 1, first);
1642 while (run_start <= last) {
1643 bool do_fixup = false;
1644 unsigned long fixup_start_addr;
1645 unsigned long host_offset;
1648 * If the start of this run of pages is in the middle of a host
1649 * page, then we need to fixup this host page.
1651 host_offset = run_start % host_ratio;
1654 run_start -= host_offset;
1655 fixup_start_addr = run_start;
1656 /* For the next pass */
1657 run_start = run_start + host_ratio;
1659 /* Find the end of this run */
1660 unsigned long run_end;
1662 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1664 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1667 * If the end isn't at the start of a host page, then the
1668 * run doesn't finish at the end of a host page
1669 * and we need to discard.
1671 host_offset = run_end % host_ratio;
1674 fixup_start_addr = run_end - host_offset;
1676 * This host page has gone, the next loop iteration starts
1677 * from after the fixup
1679 run_start = fixup_start_addr + host_ratio;
1682 * No discards on this iteration, next loop starts from
1683 * next sent/dirty page
1685 run_start = run_end + 1;
1692 /* Tell the destination to discard this page */
1693 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1694 /* For the unsent_pass we:
1695 * discard partially sent pages
1696 * For the !unsent_pass (dirty) we:
1697 * discard partially dirty pages that were sent
1698 * (any partially sent pages were already discarded
1699 * by the previous unsent_pass)
1701 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1705 /* Clean up the bitmap */
1706 for (page = fixup_start_addr;
1707 page < fixup_start_addr + host_ratio; page++) {
1708 /* All pages in this host page are now not sent */
1709 set_bit(page, unsentmap);
1712 * Remark them as dirty, updating the count for any pages
1713 * that weren't previously dirty.
1715 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1720 /* Find the next sent page for the next iteration */
1721 run_start = find_next_zero_bit(unsentmap, last + 1,
1724 /* Find the next dirty page for the next iteration */
1725 run_start = find_next_bit(bitmap, last + 1, run_start);
1731 * Utility for the outgoing postcopy code.
1733 * Discard any partially sent host-page size chunks, mark any partially
1734 * dirty host-page size chunks as all dirty.
1736 * Returns: 0 on success
1738 static int postcopy_chunk_hostpages(MigrationState *ms)
1740 struct RAMBlock *block;
1742 if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1743 /* Easy case - TPS==HPS - nothing to be done */
1747 /* Easiest way to make sure we don't resume in the middle of a host-page */
1748 last_seen_block = NULL;
1749 last_sent_block = NULL;
1752 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1753 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1755 PostcopyDiscardState *pds =
1756 postcopy_discard_send_init(ms, first, block->idstr);
1758 /* First pass: Discard all partially sent host pages */
1759 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1761 * Second pass: Ensure that all partially dirty host pages are made
1764 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1766 postcopy_discard_send_finish(ms, pds);
1767 } /* ram_list loop */
1773 * Transmit the set of pages to be discarded after precopy to the target
1774 * these are pages that:
1775 * a) Have been previously transmitted but are now dirty again
1776 * b) Pages that have never been transmitted, this ensures that
1777 * any pages on the destination that have been mapped by background
1778 * tasks get discarded (transparent huge pages is the specific concern)
1779 * Hopefully this is pretty sparse
1781 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1784 unsigned long *bitmap, *unsentmap;
1788 /* This should be our last sync, the src is now paused */
1789 migration_bitmap_sync();
1791 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1793 /* We don't have a safe way to resize the sentmap, so
1794 * if the bitmap was resized it will be NULL at this
1797 error_report("migration ram resized during precopy phase");
1802 /* Deal with TPS != HPS */
1803 ret = postcopy_chunk_hostpages(ms);
1810 * Update the unsentmap to be unsentmap = unsentmap | dirty
1812 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1813 bitmap_or(unsentmap, unsentmap, bitmap,
1814 last_ram_offset() >> TARGET_PAGE_BITS);
1817 trace_ram_postcopy_send_discard_bitmap();
1818 #ifdef DEBUG_POSTCOPY
1819 ram_debug_dump_bitmap(unsentmap, true);
1822 ret = postcopy_each_ram_send_discard(ms);
1829 * At the start of the postcopy phase of migration, any now-dirty
1830 * precopied pages are discarded.
1832 * start, length describe a byte address range within the RAMBlock
1834 * Returns 0 on success.
1836 int ram_discard_range(MigrationIncomingState *mis,
1837 const char *block_name,
1838 uint64_t start, size_t length)
1843 RAMBlock *rb = qemu_ram_block_by_name(block_name);
1846 error_report("ram_discard_range: Failed to find block '%s'",
1851 uint8_t *host_startaddr = rb->host + start;
1853 if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1854 error_report("ram_discard_range: Unaligned start address: %p",
1859 if ((start + length) <= rb->used_length) {
1860 uint8_t *host_endaddr = host_startaddr + length;
1861 if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1862 error_report("ram_discard_range: Unaligned end address: %p",
1866 ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1868 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1869 "/%zx/" RAM_ADDR_FMT")",
1870 block_name, start, length, rb->used_length);
1880 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1881 * long-running RCU critical section. When rcu-reclaims in the code
1882 * start to become numerous it will be necessary to reduce the
1883 * granularity of these critical sections.
1886 static int ram_save_setup(QEMUFile *f, void *opaque)
1889 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1891 dirty_rate_high_cnt = 0;
1892 bitmap_sync_count = 0;
1893 migration_bitmap_sync_init();
1894 qemu_mutex_init(&migration_bitmap_mutex);
1896 if (migrate_use_xbzrle()) {
1897 XBZRLE_cache_lock();
1898 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1901 if (!XBZRLE.cache) {
1902 XBZRLE_cache_unlock();
1903 error_report("Error creating cache");
1906 XBZRLE_cache_unlock();
1908 /* We prefer not to abort if there is no memory */
1909 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1910 if (!XBZRLE.encoded_buf) {
1911 error_report("Error allocating encoded_buf");
1915 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1916 if (!XBZRLE.current_buf) {
1917 error_report("Error allocating current_buf");
1918 g_free(XBZRLE.encoded_buf);
1919 XBZRLE.encoded_buf = NULL;
1926 /* For memory_global_dirty_log_start below. */
1927 qemu_mutex_lock_iothread();
1929 qemu_mutex_lock_ramlist();
1931 bytes_transferred = 0;
1932 reset_ram_globals();
1934 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1935 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
1936 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1937 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
1939 if (migrate_postcopy_ram()) {
1940 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1941 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1945 * Count the total number of pages used by ram blocks not including any
1946 * gaps due to alignment or unplugs.
1948 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1950 memory_global_dirty_log_start();
1951 migration_bitmap_sync();
1952 qemu_mutex_unlock_ramlist();
1953 qemu_mutex_unlock_iothread();
1955 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1957 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1958 qemu_put_byte(f, strlen(block->idstr));
1959 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1960 qemu_put_be64(f, block->used_length);
1965 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1966 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1968 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1973 static int ram_save_iterate(QEMUFile *f, void *opaque)
1981 if (ram_list.version != last_version) {
1982 reset_ram_globals();
1985 /* Read version before ram_list.blocks */
1988 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1990 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1992 while ((ret = qemu_file_rate_limit(f)) == 0) {
1995 pages = ram_find_and_save_block(f, false, &bytes_transferred);
1996 /* no more pages to sent */
2000 pages_sent += pages;
2001 acct_info.iterations++;
2003 /* we want to check in the 1st loop, just in case it was the 1st time
2004 and we had to sync the dirty bitmap.
2005 qemu_get_clock_ns() is a bit expensive, so we only check each some
2008 if ((i & 63) == 0) {
2009 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2010 if (t1 > MAX_WAIT) {
2011 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
2018 flush_compressed_data(f);
2022 * Must occur before EOS (or any QEMUFile operation)
2023 * because of RDMA protocol.
2025 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2027 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2028 bytes_transferred += 8;
2030 ret = qemu_file_get_error(f);
2038 /* Called with iothread lock */
2039 static int ram_save_complete(QEMUFile *f, void *opaque)
2043 if (!migration_in_postcopy(migrate_get_current())) {
2044 migration_bitmap_sync();
2047 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2049 /* try transferring iterative blocks of memory */
2051 /* flush all remaining blocks regardless of rate limiting */
2055 pages = ram_find_and_save_block(f, true, &bytes_transferred);
2056 /* no more blocks to sent */
2062 flush_compressed_data(f);
2063 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2067 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2072 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2073 uint64_t *non_postcopiable_pending,
2074 uint64_t *postcopiable_pending)
2076 uint64_t remaining_size;
2078 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2080 if (!migration_in_postcopy(migrate_get_current()) &&
2081 remaining_size < max_size) {
2082 qemu_mutex_lock_iothread();
2084 migration_bitmap_sync();
2086 qemu_mutex_unlock_iothread();
2087 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2090 /* We can do postcopy, and all the data is postcopiable */
2091 *postcopiable_pending += remaining_size;
2094 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2096 unsigned int xh_len;
2098 uint8_t *loaded_data;
2100 if (!xbzrle_decoded_buf) {
2101 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2103 loaded_data = xbzrle_decoded_buf;
2105 /* extract RLE header */
2106 xh_flags = qemu_get_byte(f);
2107 xh_len = qemu_get_be16(f);
2109 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2110 error_report("Failed to load XBZRLE page - wrong compression!");
2114 if (xh_len > TARGET_PAGE_SIZE) {
2115 error_report("Failed to load XBZRLE page - len overflow!");
2118 /* load data and decode */
2119 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
2122 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
2123 TARGET_PAGE_SIZE) == -1) {
2124 error_report("Failed to load XBZRLE page - decode error!");
2131 /* Must be called from within a rcu critical section.
2132 * Returns a pointer from within the RCU-protected ram_list.
2135 * Read a RAMBlock ID from the stream f.
2137 * f: Stream to read from
2138 * flags: Page flags (mostly to see if it's a continuation of previous block)
2140 static inline RAMBlock *ram_block_from_stream(QEMUFile *f,
2143 static RAMBlock *block = NULL;
2147 if (flags & RAM_SAVE_FLAG_CONTINUE) {
2149 error_report("Ack, bad migration stream!");
2155 len = qemu_get_byte(f);
2156 qemu_get_buffer(f, (uint8_t *)id, len);
2159 block = qemu_ram_block_by_name(id);
2161 error_report("Can't find block %s", id);
2168 static inline void *host_from_ram_block_offset(RAMBlock *block,
2171 if (!offset_in_ramblock(block, offset)) {
2175 return block->host + offset;
2179 * If a page (or a whole RDMA chunk) has been
2180 * determined to be zero, then zap it.
2182 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2184 if (ch != 0 || !is_zero_range(host, size)) {
2185 memset(host, ch, size);
2189 static void *do_data_decompress(void *opaque)
2191 DecompressParam *param = opaque;
2192 unsigned long pagesize;
2194 while (!quit_decomp_thread) {
2195 qemu_mutex_lock(¶m->mutex);
2196 while (!param->start && !quit_decomp_thread) {
2197 qemu_cond_wait(¶m->cond, ¶m->mutex);
2198 pagesize = TARGET_PAGE_SIZE;
2199 if (!quit_decomp_thread) {
2200 /* uncompress() will return failed in some case, especially
2201 * when the page is dirted when doing the compression, it's
2202 * not a problem because the dirty page will be retransferred
2203 * and uncompress() won't break the data in other pages.
2205 uncompress((Bytef *)param->des, &pagesize,
2206 (const Bytef *)param->compbuf, param->len);
2208 param->start = false;
2210 qemu_mutex_unlock(¶m->mutex);
2216 void migrate_decompress_threads_create(void)
2218 int i, thread_count;
2220 thread_count = migrate_decompress_threads();
2221 decompress_threads = g_new0(QemuThread, thread_count);
2222 decomp_param = g_new0(DecompressParam, thread_count);
2223 quit_decomp_thread = false;
2224 for (i = 0; i < thread_count; i++) {
2225 qemu_mutex_init(&decomp_param[i].mutex);
2226 qemu_cond_init(&decomp_param[i].cond);
2227 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2228 qemu_thread_create(decompress_threads + i, "decompress",
2229 do_data_decompress, decomp_param + i,
2230 QEMU_THREAD_JOINABLE);
2234 void migrate_decompress_threads_join(void)
2236 int i, thread_count;
2238 quit_decomp_thread = true;
2239 thread_count = migrate_decompress_threads();
2240 for (i = 0; i < thread_count; i++) {
2241 qemu_mutex_lock(&decomp_param[i].mutex);
2242 qemu_cond_signal(&decomp_param[i].cond);
2243 qemu_mutex_unlock(&decomp_param[i].mutex);
2245 for (i = 0; i < thread_count; i++) {
2246 qemu_thread_join(decompress_threads + i);
2247 qemu_mutex_destroy(&decomp_param[i].mutex);
2248 qemu_cond_destroy(&decomp_param[i].cond);
2249 g_free(decomp_param[i].compbuf);
2251 g_free(decompress_threads);
2252 g_free(decomp_param);
2253 decompress_threads = NULL;
2254 decomp_param = NULL;
2257 static void decompress_data_with_multi_threads(QEMUFile *f,
2258 void *host, int len)
2260 int idx, thread_count;
2262 thread_count = migrate_decompress_threads();
2264 for (idx = 0; idx < thread_count; idx++) {
2265 if (!decomp_param[idx].start) {
2266 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
2267 decomp_param[idx].des = host;
2268 decomp_param[idx].len = len;
2269 start_decompression(&decomp_param[idx]);
2273 if (idx < thread_count) {
2280 * Allocate data structures etc needed by incoming migration with postcopy-ram
2281 * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2283 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2285 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2287 return postcopy_ram_incoming_init(mis, ram_pages);
2291 * Called in postcopy mode by ram_load().
2292 * rcu_read_lock is taken prior to this being called.
2294 static int ram_load_postcopy(QEMUFile *f)
2296 int flags = 0, ret = 0;
2297 bool place_needed = false;
2298 bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2299 MigrationIncomingState *mis = migration_incoming_get_current();
2300 /* Temporary page that is later 'placed' */
2301 void *postcopy_host_page = postcopy_get_tmp_page(mis);
2302 void *last_host = NULL;
2303 bool all_zero = false;
2305 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2308 void *page_buffer = NULL;
2309 void *place_source = NULL;
2312 addr = qemu_get_be64(f);
2313 flags = addr & ~TARGET_PAGE_MASK;
2314 addr &= TARGET_PAGE_MASK;
2316 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2317 place_needed = false;
2318 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2319 RAMBlock *block = ram_block_from_stream(f, flags);
2321 host = host_from_ram_block_offset(block, addr);
2323 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2329 * Postcopy requires that we place whole host pages atomically.
2330 * To make it atomic, the data is read into a temporary page
2331 * that's moved into place later.
2332 * The migration protocol uses, possibly smaller, target-pages
2333 * however the source ensures it always sends all the components
2334 * of a host page in order.
2336 page_buffer = postcopy_host_page +
2337 ((uintptr_t)host & ~qemu_host_page_mask);
2338 /* If all TP are zero then we can optimise the place */
2339 if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2342 /* not the 1st TP within the HP */
2343 if (host != (last_host + TARGET_PAGE_SIZE)) {
2344 error_report("Non-sequential target page %p/%p",
2353 * If it's the last part of a host page then we place the host
2356 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2357 ~qemu_host_page_mask) == 0;
2358 place_source = postcopy_host_page;
2362 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2363 case RAM_SAVE_FLAG_COMPRESS:
2364 ch = qemu_get_byte(f);
2365 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2371 case RAM_SAVE_FLAG_PAGE:
2373 if (!place_needed || !matching_page_sizes) {
2374 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2376 /* Avoids the qemu_file copy during postcopy, which is
2377 * going to do a copy later; can only do it when we
2378 * do this read in one go (matching page sizes)
2380 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2384 case RAM_SAVE_FLAG_EOS:
2388 error_report("Unknown combination of migration flags: %#x"
2389 " (postcopy mode)", flags);
2394 /* This gets called at the last target page in the host page */
2396 ret = postcopy_place_page_zero(mis,
2397 host + TARGET_PAGE_SIZE -
2398 qemu_host_page_size);
2400 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2401 qemu_host_page_size,
2406 ret = qemu_file_get_error(f);
2413 static int ram_load(QEMUFile *f, void *opaque, int version_id)
2415 int flags = 0, ret = 0;
2416 static uint64_t seq_iter;
2419 * If system is running in postcopy mode, page inserts to host memory must
2422 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2426 if (version_id != 4) {
2430 /* This RCU critical section can be very long running.
2431 * When RCU reclaims in the code start to become numerous,
2432 * it will be necessary to reduce the granularity of this
2437 if (postcopy_running) {
2438 ret = ram_load_postcopy(f);
2441 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2442 ram_addr_t addr, total_ram_bytes;
2446 addr = qemu_get_be64(f);
2447 flags = addr & ~TARGET_PAGE_MASK;
2448 addr &= TARGET_PAGE_MASK;
2450 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2451 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2452 RAMBlock *block = ram_block_from_stream(f, flags);
2454 host = host_from_ram_block_offset(block, addr);
2456 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2462 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2463 case RAM_SAVE_FLAG_MEM_SIZE:
2464 /* Synchronize RAM block list */
2465 total_ram_bytes = addr;
2466 while (!ret && total_ram_bytes) {
2471 len = qemu_get_byte(f);
2472 qemu_get_buffer(f, (uint8_t *)id, len);
2474 length = qemu_get_be64(f);
2476 block = qemu_ram_block_by_name(id);
2478 if (length != block->used_length) {
2479 Error *local_err = NULL;
2481 ret = qemu_ram_resize(block, length,
2484 error_report_err(local_err);
2487 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2490 error_report("Unknown ramblock \"%s\", cannot "
2491 "accept migration", id);
2495 total_ram_bytes -= length;
2499 case RAM_SAVE_FLAG_COMPRESS:
2500 ch = qemu_get_byte(f);
2501 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2504 case RAM_SAVE_FLAG_PAGE:
2505 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2508 case RAM_SAVE_FLAG_COMPRESS_PAGE:
2509 len = qemu_get_be32(f);
2510 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2511 error_report("Invalid compressed data length: %d", len);
2515 decompress_data_with_multi_threads(f, host, len);
2518 case RAM_SAVE_FLAG_XBZRLE:
2519 if (load_xbzrle(f, addr, host) < 0) {
2520 error_report("Failed to decompress XBZRLE page at "
2521 RAM_ADDR_FMT, addr);
2526 case RAM_SAVE_FLAG_EOS:
2530 if (flags & RAM_SAVE_FLAG_HOOK) {
2531 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2533 error_report("Unknown combination of migration flags: %#x",
2539 ret = qemu_file_get_error(f);
2544 DPRINTF("Completed load of VM with exit code %d seq iteration "
2545 "%" PRIu64 "\n", ret, seq_iter);
2549 static SaveVMHandlers savevm_ram_handlers = {
2550 .save_live_setup = ram_save_setup,
2551 .save_live_iterate = ram_save_iterate,
2552 .save_live_complete_postcopy = ram_save_complete,
2553 .save_live_complete_precopy = ram_save_complete,
2554 .save_live_pending = ram_save_pending,
2555 .load_state = ram_load,
2556 .cleanup = ram_migration_cleanup,
2559 void ram_mig_init(void)
2561 qemu_mutex_init(&XBZRLE.lock);
2562 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);