4 * Copyright (c) 2003-2008 Fabrice Bellard
5 * Copyright (c) 2011-2015 Red Hat Inc
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
28 #include "qemu/osdep.h"
29 #include "qemu-common.h"
32 #include "qapi-event.h"
33 #include "qemu/cutils.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "qemu/timer.h"
37 #include "qemu/main-loop.h"
38 #include "migration/migration.h"
39 #include "migration/postcopy-ram.h"
40 #include "exec/address-spaces.h"
41 #include "migration/page_cache.h"
42 #include "qemu/error-report.h"
44 #include "exec/ram_addr.h"
45 #include "qemu/rcu_queue.h"
46 #include "migration/colo.h"
48 /***********************************************************/
49 /* ram save/restore */
51 #define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
52 #define RAM_SAVE_FLAG_COMPRESS 0x02
53 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
54 #define RAM_SAVE_FLAG_PAGE 0x08
55 #define RAM_SAVE_FLAG_EOS 0x10
56 #define RAM_SAVE_FLAG_CONTINUE 0x20
57 #define RAM_SAVE_FLAG_XBZRLE 0x40
58 /* 0x80 is reserved in migration.h start with 0x100 next */
59 #define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
61 static uint8_t *ZERO_TARGET_PAGE;
63 static inline bool is_zero_range(uint8_t *p, uint64_t size)
65 return buffer_is_zero(p, size);
68 /* struct contains XBZRLE cache and a static page
69 used by the compression */
71 /* buffer used for XBZRLE encoding */
73 /* buffer for storing page content */
75 /* Cache for XBZRLE, Protected by lock. */
80 /* buffer used for XBZRLE decoding */
81 static uint8_t *xbzrle_decoded_buf;
83 static void XBZRLE_cache_lock(void)
85 if (migrate_use_xbzrle())
86 qemu_mutex_lock(&XBZRLE.lock);
89 static void XBZRLE_cache_unlock(void)
91 if (migrate_use_xbzrle())
92 qemu_mutex_unlock(&XBZRLE.lock);
96 * xbzrle_cache_resize: resize the xbzrle cache
98 * This function is called from qmp_migrate_set_cache_size in main
99 * thread, possibly while a migration is in progress. A running
100 * migration may be using the cache and might finish during this call,
101 * hence changes to the cache are protected by XBZRLE.lock().
103 * Returns the new_size or negative in case of error.
105 * @new_size: new cache size
107 int64_t xbzrle_cache_resize(int64_t new_size)
109 PageCache *new_cache;
112 if (new_size < TARGET_PAGE_SIZE) {
118 if (XBZRLE.cache != NULL) {
119 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
122 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
125 error_report("Error creating cache");
130 cache_fini(XBZRLE.cache);
131 XBZRLE.cache = new_cache;
135 ret = pow2floor(new_size);
137 XBZRLE_cache_unlock();
143 /* Main migration bitmap */
145 /* bitmap of pages that haven't been sent even once
146 * only maintained and used in postcopy at the moment
147 * where it's used to send the dirtymap at the start
148 * of the postcopy phase
150 unsigned long *unsentmap;
152 typedef struct RAMBitmap RAMBitmap;
155 * An outstanding page request, on the source, having been received
158 struct RAMSrcPageRequest {
163 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
166 /* State of RAM for migration */
168 /* QEMUFile used for this migration */
170 /* Last block that we have visited searching for dirty pages */
171 RAMBlock *last_seen_block;
172 /* Last block from where we have sent data */
173 RAMBlock *last_sent_block;
174 /* Last dirty target page we have sent */
175 ram_addr_t last_page;
176 /* last ram version we have seen */
177 uint32_t last_version;
178 /* We are in the first round */
180 /* How many times we have dirty too many pages */
181 int dirty_rate_high_cnt;
182 /* How many times we have synchronized the bitmap */
183 uint64_t bitmap_sync_count;
184 /* these variables are used for bitmap sync */
185 /* last time we did a full bitmap_sync */
186 int64_t time_last_bitmap_sync;
187 /* bytes transferred at start_time */
188 uint64_t bytes_xfer_prev;
189 /* number of dirty pages since start_time */
190 uint64_t num_dirty_pages_period;
191 /* xbzrle misses since the beginning of the period */
192 uint64_t xbzrle_cache_miss_prev;
193 /* number of iterations at the beginning of period */
194 uint64_t iterations_prev;
195 /* Accounting fields */
196 /* number of zero pages. It used to be pages filled by the same char. */
198 /* number of normal transferred pages */
200 /* Iterations since start */
202 /* xbzrle transmitted bytes. Notice that this is with
203 * compression, they can't be calculated from the pages */
204 uint64_t xbzrle_bytes;
205 /* xbzrle transmmited pages */
206 uint64_t xbzrle_pages;
207 /* xbzrle number of cache miss */
208 uint64_t xbzrle_cache_miss;
209 /* xbzrle miss rate */
210 double xbzrle_cache_miss_rate;
211 /* xbzrle number of overflows */
212 uint64_t xbzrle_overflows;
213 /* number of dirty bits in the bitmap */
214 uint64_t migration_dirty_pages;
215 /* total number of bytes transferred */
216 uint64_t bytes_transferred;
217 /* number of dirtied pages in the last second */
218 uint64_t dirty_pages_rate;
219 /* Count of requests incoming from destination */
220 uint64_t postcopy_requests;
221 /* protects modification of the bitmap */
222 QemuMutex bitmap_mutex;
223 /* Ram Bitmap protected by RCU */
224 RAMBitmap *ram_bitmap;
225 /* The RAMBlock used in the last src_page_requests */
226 RAMBlock *last_req_rb;
227 /* Queue of outstanding page requests from the destination */
228 QemuMutex src_page_req_mutex;
229 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
231 typedef struct RAMState RAMState;
233 static RAMState ram_state;
235 uint64_t dup_mig_pages_transferred(void)
237 return ram_state.zero_pages;
240 uint64_t norm_mig_pages_transferred(void)
242 return ram_state.norm_pages;
245 uint64_t xbzrle_mig_bytes_transferred(void)
247 return ram_state.xbzrle_bytes;
250 uint64_t xbzrle_mig_pages_transferred(void)
252 return ram_state.xbzrle_pages;
255 uint64_t xbzrle_mig_pages_cache_miss(void)
257 return ram_state.xbzrle_cache_miss;
260 double xbzrle_mig_cache_miss_rate(void)
262 return ram_state.xbzrle_cache_miss_rate;
265 uint64_t xbzrle_mig_pages_overflow(void)
267 return ram_state.xbzrle_overflows;
270 uint64_t ram_bytes_transferred(void)
272 return ram_state.bytes_transferred;
275 uint64_t ram_bytes_remaining(void)
277 return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
280 uint64_t ram_dirty_sync_count(void)
282 return ram_state.bitmap_sync_count;
285 uint64_t ram_dirty_pages_rate(void)
287 return ram_state.dirty_pages_rate;
290 uint64_t ram_postcopy_requests(void)
292 return ram_state.postcopy_requests;
295 /* used by the search for pages to send */
296 struct PageSearchStatus {
297 /* Current block being searched */
299 /* Current page to search from */
301 /* Set once we wrap around */
304 typedef struct PageSearchStatus PageSearchStatus;
306 struct CompressParam {
315 typedef struct CompressParam CompressParam;
317 struct DecompressParam {
326 typedef struct DecompressParam DecompressParam;
328 static CompressParam *comp_param;
329 static QemuThread *compress_threads;
330 /* comp_done_cond is used to wake up the migration thread when
331 * one of the compression threads has finished the compression.
332 * comp_done_lock is used to co-work with comp_done_cond.
334 static QemuMutex comp_done_lock;
335 static QemuCond comp_done_cond;
336 /* The empty QEMUFileOps will be used by file in CompressParam */
337 static const QEMUFileOps empty_ops = { };
339 static DecompressParam *decomp_param;
340 static QemuThread *decompress_threads;
341 static QemuMutex decomp_done_lock;
342 static QemuCond decomp_done_cond;
344 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
347 static void *do_data_compress(void *opaque)
349 CompressParam *param = opaque;
353 qemu_mutex_lock(¶m->mutex);
354 while (!param->quit) {
356 block = param->block;
357 offset = param->offset;
359 qemu_mutex_unlock(¶m->mutex);
361 do_compress_ram_page(param->file, block, offset);
363 qemu_mutex_lock(&comp_done_lock);
365 qemu_cond_signal(&comp_done_cond);
366 qemu_mutex_unlock(&comp_done_lock);
368 qemu_mutex_lock(¶m->mutex);
370 qemu_cond_wait(¶m->cond, ¶m->mutex);
373 qemu_mutex_unlock(¶m->mutex);
378 static inline void terminate_compression_threads(void)
380 int idx, thread_count;
382 thread_count = migrate_compress_threads();
384 for (idx = 0; idx < thread_count; idx++) {
385 qemu_mutex_lock(&comp_param[idx].mutex);
386 comp_param[idx].quit = true;
387 qemu_cond_signal(&comp_param[idx].cond);
388 qemu_mutex_unlock(&comp_param[idx].mutex);
392 void migrate_compress_threads_join(void)
396 if (!migrate_use_compression()) {
399 terminate_compression_threads();
400 thread_count = migrate_compress_threads();
401 for (i = 0; i < thread_count; i++) {
402 qemu_thread_join(compress_threads + i);
403 qemu_fclose(comp_param[i].file);
404 qemu_mutex_destroy(&comp_param[i].mutex);
405 qemu_cond_destroy(&comp_param[i].cond);
407 qemu_mutex_destroy(&comp_done_lock);
408 qemu_cond_destroy(&comp_done_cond);
409 g_free(compress_threads);
411 compress_threads = NULL;
415 void migrate_compress_threads_create(void)
419 if (!migrate_use_compression()) {
422 thread_count = migrate_compress_threads();
423 compress_threads = g_new0(QemuThread, thread_count);
424 comp_param = g_new0(CompressParam, thread_count);
425 qemu_cond_init(&comp_done_cond);
426 qemu_mutex_init(&comp_done_lock);
427 for (i = 0; i < thread_count; i++) {
428 /* comp_param[i].file is just used as a dummy buffer to save data,
429 * set its ops to empty.
431 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
432 comp_param[i].done = true;
433 comp_param[i].quit = false;
434 qemu_mutex_init(&comp_param[i].mutex);
435 qemu_cond_init(&comp_param[i].cond);
436 qemu_thread_create(compress_threads + i, "compress",
437 do_data_compress, comp_param + i,
438 QEMU_THREAD_JOINABLE);
443 * save_page_header: write page header to wire
445 * If this is the 1st block, it also writes the block identification
447 * Returns the number of bytes written
449 * @f: QEMUFile where to send the data
450 * @block: block that contains the page we want to send
451 * @offset: offset inside the block for the page
452 * in the lower bits, it contains flags
454 static size_t save_page_header(RAMState *rs, RAMBlock *block, ram_addr_t offset)
458 if (block == rs->last_sent_block) {
459 offset |= RAM_SAVE_FLAG_CONTINUE;
461 qemu_put_be64(rs->f, offset);
464 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
465 len = strlen(block->idstr);
466 qemu_put_byte(rs->f, len);
467 qemu_put_buffer(rs->f, (uint8_t *)block->idstr, len);
469 rs->last_sent_block = block;
475 * mig_throttle_guest_down: throotle down the guest
477 * Reduce amount of guest cpu execution to hopefully slow down memory
478 * writes. If guest dirty memory rate is reduced below the rate at
479 * which we can transfer pages to the destination then we should be
480 * able to complete migration. Some workloads dirty memory way too
481 * fast and will not effectively converge, even with auto-converge.
483 static void mig_throttle_guest_down(void)
485 MigrationState *s = migrate_get_current();
486 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
487 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
489 /* We have not started throttling yet. Let's start it. */
490 if (!cpu_throttle_active()) {
491 cpu_throttle_set(pct_initial);
493 /* Throttling already on, just increase the rate */
494 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
499 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
501 * @rs: current RAM state
502 * @current_addr: address for the zero page
504 * Update the xbzrle cache to reflect a page that's been sent as all 0.
505 * The important thing is that a stale (not-yet-0'd) page be replaced
507 * As a bonus, if the page wasn't in the cache it gets added so that
508 * when a small write is made into the 0'd page it gets XBZRLE sent.
510 static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
512 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
516 /* We don't care if this fails to allocate a new cache page
517 * as long as it updated an old one */
518 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
519 rs->bitmap_sync_count);
522 #define ENCODING_FLAG_XBZRLE 0x1
525 * save_xbzrle_page: compress and send current page
527 * Returns: 1 means that we wrote the page
528 * 0 means that page is identical to the one already sent
529 * -1 means that xbzrle would be longer than normal
531 * @rs: current RAM state
532 * @current_data: pointer to the address of the page contents
533 * @current_addr: addr of the page
534 * @block: block that contains the page we want to send
535 * @offset: offset inside the block for the page
536 * @last_stage: if we are at the completion stage
538 static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
539 ram_addr_t current_addr, RAMBlock *block,
540 ram_addr_t offset, bool last_stage)
542 int encoded_len = 0, bytes_xbzrle;
543 uint8_t *prev_cached_page;
545 if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
546 rs->xbzrle_cache_miss++;
548 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
549 rs->bitmap_sync_count) == -1) {
552 /* update *current_data when the page has been
553 inserted into cache */
554 *current_data = get_cached_data(XBZRLE.cache, current_addr);
560 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
562 /* save current buffer into memory */
563 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
565 /* XBZRLE encoding (if there is no overflow) */
566 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
567 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
569 if (encoded_len == 0) {
570 trace_save_xbzrle_page_skipping();
572 } else if (encoded_len == -1) {
573 trace_save_xbzrle_page_overflow();
574 rs->xbzrle_overflows++;
575 /* update data in the cache */
577 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
578 *current_data = prev_cached_page;
583 /* we need to update the data in the cache, in order to get the same data */
585 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
588 /* Send XBZRLE based compressed page */
589 bytes_xbzrle = save_page_header(rs, block,
590 offset | RAM_SAVE_FLAG_XBZRLE);
591 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
592 qemu_put_be16(rs->f, encoded_len);
593 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
594 bytes_xbzrle += encoded_len + 1 + 2;
596 rs->xbzrle_bytes += bytes_xbzrle;
597 rs->bytes_transferred += bytes_xbzrle;
603 * migration_bitmap_find_dirty: find the next dirty page from start
605 * Called with rcu_read_lock() to protect migration_bitmap
607 * Returns the byte offset within memory region of the start of a dirty page
609 * @rs: current RAM state
610 * @rb: RAMBlock where to search for dirty pages
611 * @start: page where we start the search
614 unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
617 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
618 unsigned long nr = base + start;
619 uint64_t rb_size = rb->used_length;
620 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
621 unsigned long *bitmap;
625 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
626 if (rs->ram_bulk_stage && nr > base) {
629 next = find_next_bit(bitmap, size, nr);
635 static inline bool migration_bitmap_clear_dirty(RAMState *rs,
640 unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
641 unsigned long nr = (rb->offset >> TARGET_PAGE_BITS) + page;
643 ret = test_and_clear_bit(nr, bitmap);
646 rs->migration_dirty_pages--;
651 static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
652 ram_addr_t start, ram_addr_t length)
654 unsigned long *bitmap;
655 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
656 rs->migration_dirty_pages +=
657 cpu_physical_memory_sync_dirty_bitmap(bitmap, rb, start, length,
658 &rs->num_dirty_pages_period);
662 * ram_pagesize_summary: calculate all the pagesizes of a VM
664 * Returns a summary bitmap of the page sizes of all RAMBlocks
666 * For VMs with just normal pages this is equivalent to the host page
667 * size. If it's got some huge pages then it's the OR of all the
668 * different page sizes.
670 uint64_t ram_pagesize_summary(void)
673 uint64_t summary = 0;
675 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
676 summary |= block->page_size;
682 static void migration_bitmap_sync(RAMState *rs)
686 uint64_t bytes_xfer_now;
688 rs->bitmap_sync_count++;
690 if (!rs->bytes_xfer_prev) {
691 rs->bytes_xfer_prev = ram_bytes_transferred();
694 if (!rs->time_last_bitmap_sync) {
695 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
698 trace_migration_bitmap_sync_start();
699 memory_global_dirty_log_sync();
701 qemu_mutex_lock(&rs->bitmap_mutex);
703 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
704 migration_bitmap_sync_range(rs, block, 0, block->used_length);
707 qemu_mutex_unlock(&rs->bitmap_mutex);
709 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
711 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
713 /* more than 1 second = 1000 millisecons */
714 if (end_time > rs->time_last_bitmap_sync + 1000) {
715 if (migrate_auto_converge()) {
716 /* The following detection logic can be refined later. For now:
717 Check to see if the dirtied bytes is 50% more than the approx.
718 amount of bytes that just got transferred since the last time we
719 were in this routine. If that happens twice, start or increase
721 bytes_xfer_now = ram_bytes_transferred();
723 if (rs->dirty_pages_rate &&
724 (rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
725 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
726 (rs->dirty_rate_high_cnt++ >= 2)) {
727 trace_migration_throttle();
728 rs->dirty_rate_high_cnt = 0;
729 mig_throttle_guest_down();
731 rs->bytes_xfer_prev = bytes_xfer_now;
734 if (migrate_use_xbzrle()) {
735 if (rs->iterations_prev != rs->iterations) {
736 rs->xbzrle_cache_miss_rate =
737 (double)(rs->xbzrle_cache_miss -
738 rs->xbzrle_cache_miss_prev) /
739 (rs->iterations - rs->iterations_prev);
741 rs->iterations_prev = rs->iterations;
742 rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
744 rs->dirty_pages_rate = rs->num_dirty_pages_period * 1000
745 / (end_time - rs->time_last_bitmap_sync);
746 rs->time_last_bitmap_sync = end_time;
747 rs->num_dirty_pages_period = 0;
749 if (migrate_use_events()) {
750 qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
755 * save_zero_page: send the zero page to the stream
757 * Returns the number of pages written.
759 * @rs: current RAM state
760 * @block: block that contains the page we want to send
761 * @offset: offset inside the block for the page
762 * @p: pointer to the page
764 static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
769 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
771 rs->bytes_transferred +=
772 save_page_header(rs, block, offset | RAM_SAVE_FLAG_COMPRESS);
773 qemu_put_byte(rs->f, 0);
774 rs->bytes_transferred += 1;
781 static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
783 if (!migrate_release_ram() || !migration_in_postcopy()) {
787 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
791 * ram_save_page: send the given page to the stream
793 * Returns the number of pages written.
795 * >=0 - Number of pages written - this might legally be 0
796 * if xbzrle noticed the page was the same.
798 * @rs: current RAM state
799 * @block: block that contains the page we want to send
800 * @offset: offset inside the block for the page
801 * @last_stage: if we are at the completion stage
803 static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
807 ram_addr_t current_addr;
810 bool send_async = true;
811 RAMBlock *block = pss->block;
812 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
814 p = block->host + offset;
816 /* In doubt sent page as normal */
818 ret = ram_control_save_page(rs->f, block->offset,
819 offset, TARGET_PAGE_SIZE, &bytes_xmit);
821 rs->bytes_transferred += bytes_xmit;
827 current_addr = block->offset + offset;
829 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
830 if (ret != RAM_SAVE_CONTROL_DELAYED) {
831 if (bytes_xmit > 0) {
833 } else if (bytes_xmit == 0) {
838 pages = save_zero_page(rs, block, offset, p);
840 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
841 * page would be stale
843 xbzrle_cache_zero_page(rs, current_addr);
844 ram_release_pages(block->idstr, offset, pages);
845 } else if (!rs->ram_bulk_stage &&
846 !migration_in_postcopy() && migrate_use_xbzrle()) {
847 pages = save_xbzrle_page(rs, &p, current_addr, block,
850 /* Can't send this cached data async, since the cache page
851 * might get updated before it gets to the wire
858 /* XBZRLE overflow or normal page */
860 rs->bytes_transferred += save_page_header(rs, block,
861 offset | RAM_SAVE_FLAG_PAGE);
863 qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
864 migrate_release_ram() &
865 migration_in_postcopy());
867 qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
869 rs->bytes_transferred += TARGET_PAGE_SIZE;
874 XBZRLE_cache_unlock();
879 static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
882 RAMState *rs = &ram_state;
883 int bytes_sent, blen;
884 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
886 bytes_sent = save_page_header(rs, block, offset |
887 RAM_SAVE_FLAG_COMPRESS_PAGE);
888 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
889 migrate_compress_level());
892 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
893 error_report("compressed data failed!");
896 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
902 static void flush_compressed_data(RAMState *rs)
904 int idx, len, thread_count;
906 if (!migrate_use_compression()) {
909 thread_count = migrate_compress_threads();
911 qemu_mutex_lock(&comp_done_lock);
912 for (idx = 0; idx < thread_count; idx++) {
913 while (!comp_param[idx].done) {
914 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
917 qemu_mutex_unlock(&comp_done_lock);
919 for (idx = 0; idx < thread_count; idx++) {
920 qemu_mutex_lock(&comp_param[idx].mutex);
921 if (!comp_param[idx].quit) {
922 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
923 rs->bytes_transferred += len;
925 qemu_mutex_unlock(&comp_param[idx].mutex);
929 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
932 param->block = block;
933 param->offset = offset;
936 static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
939 int idx, thread_count, bytes_xmit = -1, pages = -1;
941 thread_count = migrate_compress_threads();
942 qemu_mutex_lock(&comp_done_lock);
944 for (idx = 0; idx < thread_count; idx++) {
945 if (comp_param[idx].done) {
946 comp_param[idx].done = false;
947 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
948 qemu_mutex_lock(&comp_param[idx].mutex);
949 set_compress_params(&comp_param[idx], block, offset);
950 qemu_cond_signal(&comp_param[idx].cond);
951 qemu_mutex_unlock(&comp_param[idx].mutex);
954 rs->bytes_transferred += bytes_xmit;
961 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
964 qemu_mutex_unlock(&comp_done_lock);
970 * ram_save_compressed_page: compress the given page and send it to the stream
972 * Returns the number of pages written.
974 * @rs: current RAM state
975 * @block: block that contains the page we want to send
976 * @offset: offset inside the block for the page
977 * @last_stage: if we are at the completion stage
979 static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
983 uint64_t bytes_xmit = 0;
986 RAMBlock *block = pss->block;
987 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
989 p = block->host + offset;
991 ret = ram_control_save_page(rs->f, block->offset,
992 offset, TARGET_PAGE_SIZE, &bytes_xmit);
994 rs->bytes_transferred += bytes_xmit;
997 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
998 if (ret != RAM_SAVE_CONTROL_DELAYED) {
999 if (bytes_xmit > 0) {
1001 } else if (bytes_xmit == 0) {
1006 /* When starting the process of a new block, the first page of
1007 * the block should be sent out before other pages in the same
1008 * block, and all the pages in last block should have been sent
1009 * out, keeping this order is important, because the 'cont' flag
1010 * is used to avoid resending the block name.
1012 if (block != rs->last_sent_block) {
1013 flush_compressed_data(rs);
1014 pages = save_zero_page(rs, block, offset, p);
1016 /* Make sure the first page is sent out before other pages */
1017 bytes_xmit = save_page_header(rs, block, offset |
1018 RAM_SAVE_FLAG_COMPRESS_PAGE);
1019 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
1020 migrate_compress_level());
1022 rs->bytes_transferred += bytes_xmit + blen;
1026 qemu_file_set_error(rs->f, blen);
1027 error_report("compressed data failed!");
1031 ram_release_pages(block->idstr, offset, pages);
1034 pages = save_zero_page(rs, block, offset, p);
1036 pages = compress_page_with_multi_thread(rs, block, offset);
1038 ram_release_pages(block->idstr, offset, pages);
1047 * find_dirty_block: find the next dirty page and update any state
1048 * associated with the search process.
1050 * Returns if a page is found
1052 * @rs: current RAM state
1053 * @pss: data about the state of the current dirty page scan
1054 * @again: set to false if the search has scanned the whole of RAM
1056 static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
1058 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
1059 if (pss->complete_round && pss->block == rs->last_seen_block &&
1060 pss->page >= rs->last_page) {
1062 * We've been once around the RAM and haven't found anything.
1068 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
1069 /* Didn't find anything in this RAM Block */
1071 pss->block = QLIST_NEXT_RCU(pss->block, next);
1073 /* Hit the end of the list */
1074 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1075 /* Flag that we've looped */
1076 pss->complete_round = true;
1077 rs->ram_bulk_stage = false;
1078 if (migrate_use_xbzrle()) {
1079 /* If xbzrle is on, stop using the data compression at this
1080 * point. In theory, xbzrle can do better than compression.
1082 flush_compressed_data(rs);
1085 /* Didn't find anything this time, but try again on the new block */
1089 /* Can go around again, but... */
1091 /* We've found something so probably don't need to */
1097 * unqueue_page: gets a page of the queue
1099 * Helper for 'get_queued_page' - gets a page off the queue
1101 * Returns the block of the page (or NULL if none available)
1103 * @rs: current RAM state
1104 * @offset: used to return the offset within the RAMBlock
1106 static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
1108 RAMBlock *block = NULL;
1110 qemu_mutex_lock(&rs->src_page_req_mutex);
1111 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1112 struct RAMSrcPageRequest *entry =
1113 QSIMPLEQ_FIRST(&rs->src_page_requests);
1115 *offset = entry->offset;
1117 if (entry->len > TARGET_PAGE_SIZE) {
1118 entry->len -= TARGET_PAGE_SIZE;
1119 entry->offset += TARGET_PAGE_SIZE;
1121 memory_region_unref(block->mr);
1122 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1126 qemu_mutex_unlock(&rs->src_page_req_mutex);
1132 * get_queued_page: unqueue a page from the postocpy requests
1134 * Skips pages that are already sent (!dirty)
1136 * Returns if a queued page is found
1138 * @rs: current RAM state
1139 * @pss: data about the state of the current dirty page scan
1141 static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
1148 block = unqueue_page(rs, &offset);
1150 * We're sending this page, and since it's postcopy nothing else
1151 * will dirty it, and we must make sure it doesn't get sent again
1152 * even if this queue request was received after the background
1153 * search already sent it.
1156 unsigned long *bitmap;
1159 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
1160 page = (block->offset + offset) >> TARGET_PAGE_BITS;
1161 dirty = test_bit(page, bitmap);
1163 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
1166 atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
1168 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
1172 } while (block && !dirty);
1176 * As soon as we start servicing pages out of order, then we have
1177 * to kill the bulk stage, since the bulk stage assumes
1178 * in (migration_bitmap_find_and_reset_dirty) that every page is
1179 * dirty, that's no longer true.
1181 rs->ram_bulk_stage = false;
1184 * We want the background search to continue from the queued page
1185 * since the guest is likely to want other pages near to the page
1186 * it just requested.
1189 pss->page = offset >> TARGET_PAGE_BITS;
1196 * migration_page_queue_free: drop any remaining pages in the ram
1199 * It should be empty at the end anyway, but in error cases there may
1200 * be some left. in case that there is any page left, we drop it.
1203 void migration_page_queue_free(void)
1205 struct RAMSrcPageRequest *mspr, *next_mspr;
1206 RAMState *rs = &ram_state;
1207 /* This queue generally should be empty - but in the case of a failed
1208 * migration might have some droppings in.
1211 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
1212 memory_region_unref(mspr->rb->mr);
1213 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
1220 * ram_save_queue_pages: queue the page for transmission
1222 * A request from postcopy destination for example.
1224 * Returns zero on success or negative on error
1226 * @rbname: Name of the RAMBLock of the request. NULL means the
1227 * same that last one.
1228 * @start: starting address from the start of the RAMBlock
1229 * @len: length (in bytes) to send
1231 int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
1234 RAMState *rs = &ram_state;
1236 rs->postcopy_requests++;
1239 /* Reuse last RAMBlock */
1240 ramblock = rs->last_req_rb;
1244 * Shouldn't happen, we can't reuse the last RAMBlock if
1245 * it's the 1st request.
1247 error_report("ram_save_queue_pages no previous block");
1251 ramblock = qemu_ram_block_by_name(rbname);
1254 /* We shouldn't be asked for a non-existent RAMBlock */
1255 error_report("ram_save_queue_pages no block '%s'", rbname);
1258 rs->last_req_rb = ramblock;
1260 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1261 if (start+len > ramblock->used_length) {
1262 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1263 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1264 __func__, start, len, ramblock->used_length);
1268 struct RAMSrcPageRequest *new_entry =
1269 g_malloc0(sizeof(struct RAMSrcPageRequest));
1270 new_entry->rb = ramblock;
1271 new_entry->offset = start;
1272 new_entry->len = len;
1274 memory_region_ref(ramblock->mr);
1275 qemu_mutex_lock(&rs->src_page_req_mutex);
1276 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
1277 qemu_mutex_unlock(&rs->src_page_req_mutex);
1288 * ram_save_target_page: save one target page
1290 * Returns the number of pages written
1292 * @rs: current RAM state
1293 * @ms: current migration state
1294 * @pss: data about the page we want to send
1295 * @last_stage: if we are at the completion stage
1297 static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
1302 /* Check the pages is dirty and if it is send it */
1303 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
1304 unsigned long *unsentmap;
1306 * If xbzrle is on, stop using the data compression after first
1307 * round of migration even if compression is enabled. In theory,
1308 * xbzrle can do better than compression.
1310 unsigned long page =
1311 (pss->block->offset >> TARGET_PAGE_BITS) + pss->page;
1312 if (migrate_use_compression()
1313 && (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
1314 res = ram_save_compressed_page(rs, pss, last_stage);
1316 res = ram_save_page(rs, pss, last_stage);
1322 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
1324 clear_bit(page, unsentmap);
1332 * ram_save_host_page: save a whole host page
1334 * Starting at *offset send pages up to the end of the current host
1335 * page. It's valid for the initial offset to point into the middle of
1336 * a host page in which case the remainder of the hostpage is sent.
1337 * Only dirty target pages are sent. Note that the host page size may
1338 * be a huge page for this block.
1340 * Returns the number of pages written or negative on error
1342 * @rs: current RAM state
1343 * @ms: current migration state
1344 * @pss: data about the page we want to send
1345 * @last_stage: if we are at the completion stage
1347 static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
1350 int tmppages, pages = 0;
1351 size_t pagesize_bits =
1352 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
1355 tmppages = ram_save_target_page(rs, pss, last_stage);
1362 } while (pss->page & (pagesize_bits - 1));
1364 /* The offset we leave with is the last one we looked at */
1370 * ram_find_and_save_block: finds a dirty page and sends it to f
1372 * Called within an RCU critical section.
1374 * Returns the number of pages written where zero means no dirty pages
1376 * @rs: current RAM state
1377 * @last_stage: if we are at the completion stage
1379 * On systems where host-page-size > target-page-size it will send all the
1380 * pages in a host page that are dirty.
1383 static int ram_find_and_save_block(RAMState *rs, bool last_stage)
1385 PageSearchStatus pss;
1389 /* No dirty page as there is zero RAM */
1390 if (!ram_bytes_total()) {
1394 pss.block = rs->last_seen_block;
1395 pss.page = rs->last_page;
1396 pss.complete_round = false;
1399 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1404 found = get_queued_page(rs, &pss);
1407 /* priority queue empty, so just search for something dirty */
1408 found = find_dirty_block(rs, &pss, &again);
1412 pages = ram_save_host_page(rs, &pss, last_stage);
1414 } while (!pages && again);
1416 rs->last_seen_block = pss.block;
1417 rs->last_page = pss.page;
1422 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1424 uint64_t pages = size / TARGET_PAGE_SIZE;
1425 RAMState *rs = &ram_state;
1428 rs->zero_pages += pages;
1430 rs->norm_pages += pages;
1431 rs->bytes_transferred += size;
1432 qemu_update_position(f, size);
1436 uint64_t ram_bytes_total(void)
1442 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1443 total += block->used_length;
1448 void free_xbzrle_decoded_buf(void)
1450 g_free(xbzrle_decoded_buf);
1451 xbzrle_decoded_buf = NULL;
1454 static void migration_bitmap_free(RAMBitmap *bmap)
1457 g_free(bmap->unsentmap);
1461 static void ram_migration_cleanup(void *opaque)
1463 RAMState *rs = opaque;
1465 /* caller have hold iothread lock or is in a bh, so there is
1466 * no writing race against this migration_bitmap
1468 RAMBitmap *bitmap = rs->ram_bitmap;
1469 atomic_rcu_set(&rs->ram_bitmap, NULL);
1471 memory_global_dirty_log_stop();
1472 call_rcu(bitmap, migration_bitmap_free, rcu);
1475 XBZRLE_cache_lock();
1477 cache_fini(XBZRLE.cache);
1478 g_free(XBZRLE.encoded_buf);
1479 g_free(XBZRLE.current_buf);
1480 g_free(ZERO_TARGET_PAGE);
1481 XBZRLE.cache = NULL;
1482 XBZRLE.encoded_buf = NULL;
1483 XBZRLE.current_buf = NULL;
1485 XBZRLE_cache_unlock();
1488 static void ram_state_reset(RAMState *rs)
1490 rs->last_seen_block = NULL;
1491 rs->last_sent_block = NULL;
1493 rs->last_version = ram_list.version;
1494 rs->ram_bulk_stage = true;
1497 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1500 * 'expected' is the value you expect the bitmap mostly to be full
1501 * of; it won't bother printing lines that are all this value.
1502 * If 'todump' is null the migration bitmap is dumped.
1504 void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1506 unsigned long ram_pages = last_ram_page();
1507 RAMState *rs = &ram_state;
1509 int64_t linelen = 128;
1513 todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
1516 for (cur = 0; cur < ram_pages; cur += linelen) {
1520 * Last line; catch the case where the line length
1521 * is longer than remaining ram
1523 if (cur + linelen > ram_pages) {
1524 linelen = ram_pages - cur;
1526 for (curb = 0; curb < linelen; curb++) {
1527 bool thisbit = test_bit(cur + curb, todump);
1528 linebuf[curb] = thisbit ? '1' : '.';
1529 found = found || (thisbit != expected);
1532 linebuf[curb] = '\0';
1533 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1538 /* **** functions for postcopy ***** */
1540 void ram_postcopy_migrated_memory_release(MigrationState *ms)
1542 RAMState *rs = &ram_state;
1543 struct RAMBlock *block;
1544 unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
1546 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1547 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1548 unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
1549 unsigned long run_start = find_next_zero_bit(bitmap, range, first);
1551 while (run_start < range) {
1552 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
1553 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
1554 (run_end - run_start) << TARGET_PAGE_BITS);
1555 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
1561 * postcopy_send_discard_bm_ram: discard a RAMBlock
1563 * Returns zero on success
1565 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1566 * Note: At this point the 'unsentmap' is the processed bitmap combined
1567 * with the dirtymap; so a '1' means it's either dirty or unsent.
1569 * @ms: current migration state
1570 * @pds: state for postcopy
1571 * @start: RAMBlock starting page
1572 * @length: RAMBlock size
1574 static int postcopy_send_discard_bm_ram(MigrationState *ms,
1575 PostcopyDiscardState *pds,
1576 unsigned long start,
1577 unsigned long length)
1579 RAMState *rs = &ram_state;
1580 unsigned long end = start + length; /* one after the end */
1581 unsigned long current;
1582 unsigned long *unsentmap;
1584 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
1585 for (current = start; current < end; ) {
1586 unsigned long one = find_next_bit(unsentmap, end, current);
1589 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1590 unsigned long discard_length;
1593 discard_length = end - one;
1595 discard_length = zero - one;
1597 if (discard_length) {
1598 postcopy_discard_send_range(ms, pds, one, discard_length);
1600 current = one + discard_length;
1610 * postcopy_each_ram_send_discard: discard all RAMBlocks
1612 * Returns 0 for success or negative for error
1614 * Utility for the outgoing postcopy code.
1615 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1616 * passing it bitmap indexes and name.
1617 * (qemu_ram_foreach_block ends up passing unscaled lengths
1618 * which would mean postcopy code would have to deal with target page)
1620 * @ms: current migration state
1622 static int postcopy_each_ram_send_discard(MigrationState *ms)
1624 struct RAMBlock *block;
1627 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1628 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1629 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1634 * Postcopy sends chunks of bitmap over the wire, but it
1635 * just needs indexes at this point, avoids it having
1636 * target page specific code.
1638 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1639 block->used_length >> TARGET_PAGE_BITS);
1640 postcopy_discard_send_finish(ms, pds);
1650 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
1652 * Helper for postcopy_chunk_hostpages; it's called twice to
1653 * canonicalize the two bitmaps, that are similar, but one is
1656 * Postcopy requires that all target pages in a hostpage are dirty or
1657 * clean, not a mix. This function canonicalizes the bitmaps.
1659 * @ms: current migration state
1660 * @unsent_pass: if true we need to canonicalize partially unsent host pages
1661 * otherwise we need to canonicalize partially dirty host pages
1662 * @block: block that contains the page we want to canonicalize
1663 * @pds: state for postcopy
1665 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1667 PostcopyDiscardState *pds)
1669 RAMState *rs = &ram_state;
1670 unsigned long *bitmap;
1671 unsigned long *unsentmap;
1672 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
1673 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1674 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1675 unsigned long last = first + (len - 1);
1676 unsigned long run_start;
1678 if (block->page_size == TARGET_PAGE_SIZE) {
1679 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
1683 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
1684 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
1687 /* Find a sent page */
1688 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1690 /* Find a dirty page */
1691 run_start = find_next_bit(bitmap, last + 1, first);
1694 while (run_start <= last) {
1695 bool do_fixup = false;
1696 unsigned long fixup_start_addr;
1697 unsigned long host_offset;
1700 * If the start of this run of pages is in the middle of a host
1701 * page, then we need to fixup this host page.
1703 host_offset = run_start % host_ratio;
1706 run_start -= host_offset;
1707 fixup_start_addr = run_start;
1708 /* For the next pass */
1709 run_start = run_start + host_ratio;
1711 /* Find the end of this run */
1712 unsigned long run_end;
1714 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1716 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1719 * If the end isn't at the start of a host page, then the
1720 * run doesn't finish at the end of a host page
1721 * and we need to discard.
1723 host_offset = run_end % host_ratio;
1726 fixup_start_addr = run_end - host_offset;
1728 * This host page has gone, the next loop iteration starts
1729 * from after the fixup
1731 run_start = fixup_start_addr + host_ratio;
1734 * No discards on this iteration, next loop starts from
1735 * next sent/dirty page
1737 run_start = run_end + 1;
1744 /* Tell the destination to discard this page */
1745 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1746 /* For the unsent_pass we:
1747 * discard partially sent pages
1748 * For the !unsent_pass (dirty) we:
1749 * discard partially dirty pages that were sent
1750 * (any partially sent pages were already discarded
1751 * by the previous unsent_pass)
1753 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1757 /* Clean up the bitmap */
1758 for (page = fixup_start_addr;
1759 page < fixup_start_addr + host_ratio; page++) {
1760 /* All pages in this host page are now not sent */
1761 set_bit(page, unsentmap);
1764 * Remark them as dirty, updating the count for any pages
1765 * that weren't previously dirty.
1767 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
1772 /* Find the next sent page for the next iteration */
1773 run_start = find_next_zero_bit(unsentmap, last + 1,
1776 /* Find the next dirty page for the next iteration */
1777 run_start = find_next_bit(bitmap, last + 1, run_start);
1783 * postcopy_chuck_hostpages: discrad any partially sent host page
1785 * Utility for the outgoing postcopy code.
1787 * Discard any partially sent host-page size chunks, mark any partially
1788 * dirty host-page size chunks as all dirty. In this case the host-page
1789 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
1791 * Returns zero on success
1793 * @ms: current migration state
1795 static int postcopy_chunk_hostpages(MigrationState *ms)
1797 RAMState *rs = &ram_state;
1798 struct RAMBlock *block;
1800 /* Easiest way to make sure we don't resume in the middle of a host-page */
1801 rs->last_seen_block = NULL;
1802 rs->last_sent_block = NULL;
1805 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1806 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1808 PostcopyDiscardState *pds =
1809 postcopy_discard_send_init(ms, first, block->idstr);
1811 /* First pass: Discard all partially sent host pages */
1812 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1814 * Second pass: Ensure that all partially dirty host pages are made
1817 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1819 postcopy_discard_send_finish(ms, pds);
1820 } /* ram_list loop */
1826 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
1828 * Returns zero on success
1830 * Transmit the set of pages to be discarded after precopy to the target
1831 * these are pages that:
1832 * a) Have been previously transmitted but are now dirty again
1833 * b) Pages that have never been transmitted, this ensures that
1834 * any pages on the destination that have been mapped by background
1835 * tasks get discarded (transparent huge pages is the specific concern)
1836 * Hopefully this is pretty sparse
1838 * @ms: current migration state
1840 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1842 RAMState *rs = &ram_state;
1844 unsigned long *bitmap, *unsentmap;
1848 /* This should be our last sync, the src is now paused */
1849 migration_bitmap_sync(rs);
1851 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
1853 /* We don't have a safe way to resize the sentmap, so
1854 * if the bitmap was resized it will be NULL at this
1857 error_report("migration ram resized during precopy phase");
1862 /* Deal with TPS != HPS and huge pages */
1863 ret = postcopy_chunk_hostpages(ms);
1870 * Update the unsentmap to be unsentmap = unsentmap | dirty
1872 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
1873 bitmap_or(unsentmap, unsentmap, bitmap, last_ram_page());
1876 trace_ram_postcopy_send_discard_bitmap();
1877 #ifdef DEBUG_POSTCOPY
1878 ram_debug_dump_bitmap(unsentmap, true);
1881 ret = postcopy_each_ram_send_discard(ms);
1888 * ram_discard_range: discard dirtied pages at the beginning of postcopy
1890 * Returns zero on success
1892 * @rbname: name of the RAMBlock of the request. NULL means the
1893 * same that last one.
1894 * @start: RAMBlock starting page
1895 * @length: RAMBlock size
1897 int ram_discard_range(const char *rbname, uint64_t start, size_t length)
1901 trace_ram_discard_range(rbname, start, length);
1904 RAMBlock *rb = qemu_ram_block_by_name(rbname);
1907 error_report("ram_discard_range: Failed to find block '%s'", rbname);
1911 ret = ram_block_discard_range(rb, start, length);
1919 static int ram_state_init(RAMState *rs)
1921 unsigned long ram_bitmap_pages;
1923 memset(rs, 0, sizeof(*rs));
1924 qemu_mutex_init(&rs->bitmap_mutex);
1925 qemu_mutex_init(&rs->src_page_req_mutex);
1926 QSIMPLEQ_INIT(&rs->src_page_requests);
1928 if (migrate_use_xbzrle()) {
1929 XBZRLE_cache_lock();
1930 ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
1931 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1934 if (!XBZRLE.cache) {
1935 XBZRLE_cache_unlock();
1936 error_report("Error creating cache");
1939 XBZRLE_cache_unlock();
1941 /* We prefer not to abort if there is no memory */
1942 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1943 if (!XBZRLE.encoded_buf) {
1944 error_report("Error allocating encoded_buf");
1948 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1949 if (!XBZRLE.current_buf) {
1950 error_report("Error allocating current_buf");
1951 g_free(XBZRLE.encoded_buf);
1952 XBZRLE.encoded_buf = NULL;
1957 /* For memory_global_dirty_log_start below. */
1958 qemu_mutex_lock_iothread();
1960 qemu_mutex_lock_ramlist();
1962 ram_state_reset(rs);
1964 rs->ram_bitmap = g_new0(RAMBitmap, 1);
1965 /* Skip setting bitmap if there is no RAM */
1966 if (ram_bytes_total()) {
1967 ram_bitmap_pages = last_ram_page();
1968 rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
1969 bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
1971 if (migrate_postcopy_ram()) {
1972 rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
1973 bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
1978 * Count the total number of pages used by ram blocks not including any
1979 * gaps due to alignment or unplugs.
1981 rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1983 memory_global_dirty_log_start();
1984 migration_bitmap_sync(rs);
1985 qemu_mutex_unlock_ramlist();
1986 qemu_mutex_unlock_iothread();
1993 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1994 * long-running RCU critical section. When rcu-reclaims in the code
1995 * start to become numerous it will be necessary to reduce the
1996 * granularity of these critical sections.
2000 * ram_save_setup: Setup RAM for migration
2002 * Returns zero to indicate success and negative for error
2004 * @f: QEMUFile where to send the data
2005 * @opaque: RAMState pointer
2007 static int ram_save_setup(QEMUFile *f, void *opaque)
2009 RAMState *rs = opaque;
2012 /* migration has already setup the bitmap, reuse it. */
2013 if (!migration_in_colo_state()) {
2014 if (ram_state_init(rs) < 0) {
2022 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
2024 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2025 qemu_put_byte(f, strlen(block->idstr));
2026 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2027 qemu_put_be64(f, block->used_length);
2028 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
2029 qemu_put_be64(f, block->page_size);
2035 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2036 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2038 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2044 * ram_save_iterate: iterative stage for migration
2046 * Returns zero to indicate success and negative for error
2048 * @f: QEMUFile where to send the data
2049 * @opaque: RAMState pointer
2051 static int ram_save_iterate(QEMUFile *f, void *opaque)
2053 RAMState *rs = opaque;
2060 if (ram_list.version != rs->last_version) {
2061 ram_state_reset(rs);
2064 /* Read version before ram_list.blocks */
2067 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2069 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2071 while ((ret = qemu_file_rate_limit(f)) == 0) {
2074 pages = ram_find_and_save_block(rs, false);
2075 /* no more pages to sent */
2082 /* we want to check in the 1st loop, just in case it was the 1st time
2083 and we had to sync the dirty bitmap.
2084 qemu_get_clock_ns() is a bit expensive, so we only check each some
2087 if ((i & 63) == 0) {
2088 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2089 if (t1 > MAX_WAIT) {
2090 trace_ram_save_iterate_big_wait(t1, i);
2096 flush_compressed_data(rs);
2100 * Must occur before EOS (or any QEMUFile operation)
2101 * because of RDMA protocol.
2103 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2105 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2106 rs->bytes_transferred += 8;
2108 ret = qemu_file_get_error(f);
2117 * ram_save_complete: function called to send the remaining amount of ram
2119 * Returns zero to indicate success
2121 * Called with iothread lock
2123 * @f: QEMUFile where to send the data
2124 * @opaque: RAMState pointer
2126 static int ram_save_complete(QEMUFile *f, void *opaque)
2128 RAMState *rs = opaque;
2132 if (!migration_in_postcopy()) {
2133 migration_bitmap_sync(rs);
2136 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2138 /* try transferring iterative blocks of memory */
2140 /* flush all remaining blocks regardless of rate limiting */
2144 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
2145 /* no more blocks to sent */
2151 flush_compressed_data(rs);
2152 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2156 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2161 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2162 uint64_t *non_postcopiable_pending,
2163 uint64_t *postcopiable_pending)
2165 RAMState *rs = opaque;
2166 uint64_t remaining_size;
2168 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
2170 if (!migration_in_postcopy() &&
2171 remaining_size < max_size) {
2172 qemu_mutex_lock_iothread();
2174 migration_bitmap_sync(rs);
2176 qemu_mutex_unlock_iothread();
2177 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
2180 /* We can do postcopy, and all the data is postcopiable */
2181 *postcopiable_pending += remaining_size;
2184 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2186 unsigned int xh_len;
2188 uint8_t *loaded_data;
2190 if (!xbzrle_decoded_buf) {
2191 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2193 loaded_data = xbzrle_decoded_buf;
2195 /* extract RLE header */
2196 xh_flags = qemu_get_byte(f);
2197 xh_len = qemu_get_be16(f);
2199 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2200 error_report("Failed to load XBZRLE page - wrong compression!");
2204 if (xh_len > TARGET_PAGE_SIZE) {
2205 error_report("Failed to load XBZRLE page - len overflow!");
2208 /* load data and decode */
2209 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
2212 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
2213 TARGET_PAGE_SIZE) == -1) {
2214 error_report("Failed to load XBZRLE page - decode error!");
2222 * ram_block_from_stream: read a RAMBlock id from the migration stream
2224 * Must be called from within a rcu critical section.
2226 * Returns a pointer from within the RCU-protected ram_list.
2228 * @f: QEMUFile where to read the data from
2229 * @flags: Page flags (mostly to see if it's a continuation of previous block)
2231 static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
2233 static RAMBlock *block = NULL;
2237 if (flags & RAM_SAVE_FLAG_CONTINUE) {
2239 error_report("Ack, bad migration stream!");
2245 len = qemu_get_byte(f);
2246 qemu_get_buffer(f, (uint8_t *)id, len);
2249 block = qemu_ram_block_by_name(id);
2251 error_report("Can't find block %s", id);
2258 static inline void *host_from_ram_block_offset(RAMBlock *block,
2261 if (!offset_in_ramblock(block, offset)) {
2265 return block->host + offset;
2269 * ram_handle_compressed: handle the zero page case
2271 * If a page (or a whole RDMA chunk) has been
2272 * determined to be zero, then zap it.
2274 * @host: host address for the zero page
2275 * @ch: what the page is filled from. We only support zero
2276 * @size: size of the zero page
2278 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2280 if (ch != 0 || !is_zero_range(host, size)) {
2281 memset(host, ch, size);
2285 static void *do_data_decompress(void *opaque)
2287 DecompressParam *param = opaque;
2288 unsigned long pagesize;
2292 qemu_mutex_lock(¶m->mutex);
2293 while (!param->quit) {
2298 qemu_mutex_unlock(¶m->mutex);
2300 pagesize = TARGET_PAGE_SIZE;
2301 /* uncompress() will return failed in some case, especially
2302 * when the page is dirted when doing the compression, it's
2303 * not a problem because the dirty page will be retransferred
2304 * and uncompress() won't break the data in other pages.
2306 uncompress((Bytef *)des, &pagesize,
2307 (const Bytef *)param->compbuf, len);
2309 qemu_mutex_lock(&decomp_done_lock);
2311 qemu_cond_signal(&decomp_done_cond);
2312 qemu_mutex_unlock(&decomp_done_lock);
2314 qemu_mutex_lock(¶m->mutex);
2316 qemu_cond_wait(¶m->cond, ¶m->mutex);
2319 qemu_mutex_unlock(¶m->mutex);
2324 static void wait_for_decompress_done(void)
2326 int idx, thread_count;
2328 if (!migrate_use_compression()) {
2332 thread_count = migrate_decompress_threads();
2333 qemu_mutex_lock(&decomp_done_lock);
2334 for (idx = 0; idx < thread_count; idx++) {
2335 while (!decomp_param[idx].done) {
2336 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2339 qemu_mutex_unlock(&decomp_done_lock);
2342 void migrate_decompress_threads_create(void)
2344 int i, thread_count;
2346 thread_count = migrate_decompress_threads();
2347 decompress_threads = g_new0(QemuThread, thread_count);
2348 decomp_param = g_new0(DecompressParam, thread_count);
2349 qemu_mutex_init(&decomp_done_lock);
2350 qemu_cond_init(&decomp_done_cond);
2351 for (i = 0; i < thread_count; i++) {
2352 qemu_mutex_init(&decomp_param[i].mutex);
2353 qemu_cond_init(&decomp_param[i].cond);
2354 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2355 decomp_param[i].done = true;
2356 decomp_param[i].quit = false;
2357 qemu_thread_create(decompress_threads + i, "decompress",
2358 do_data_decompress, decomp_param + i,
2359 QEMU_THREAD_JOINABLE);
2363 void migrate_decompress_threads_join(void)
2365 int i, thread_count;
2367 thread_count = migrate_decompress_threads();
2368 for (i = 0; i < thread_count; i++) {
2369 qemu_mutex_lock(&decomp_param[i].mutex);
2370 decomp_param[i].quit = true;
2371 qemu_cond_signal(&decomp_param[i].cond);
2372 qemu_mutex_unlock(&decomp_param[i].mutex);
2374 for (i = 0; i < thread_count; i++) {
2375 qemu_thread_join(decompress_threads + i);
2376 qemu_mutex_destroy(&decomp_param[i].mutex);
2377 qemu_cond_destroy(&decomp_param[i].cond);
2378 g_free(decomp_param[i].compbuf);
2380 g_free(decompress_threads);
2381 g_free(decomp_param);
2382 decompress_threads = NULL;
2383 decomp_param = NULL;
2386 static void decompress_data_with_multi_threads(QEMUFile *f,
2387 void *host, int len)
2389 int idx, thread_count;
2391 thread_count = migrate_decompress_threads();
2392 qemu_mutex_lock(&decomp_done_lock);
2394 for (idx = 0; idx < thread_count; idx++) {
2395 if (decomp_param[idx].done) {
2396 decomp_param[idx].done = false;
2397 qemu_mutex_lock(&decomp_param[idx].mutex);
2398 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
2399 decomp_param[idx].des = host;
2400 decomp_param[idx].len = len;
2401 qemu_cond_signal(&decomp_param[idx].cond);
2402 qemu_mutex_unlock(&decomp_param[idx].mutex);
2406 if (idx < thread_count) {
2409 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2412 qemu_mutex_unlock(&decomp_done_lock);
2416 * ram_postcopy_incoming_init: allocate postcopy data structures
2418 * Returns 0 for success and negative if there was one error
2420 * @mis: current migration incoming state
2422 * Allocate data structures etc needed by incoming migration with
2423 * postcopy-ram. postcopy-ram's similarly names
2424 * postcopy_ram_incoming_init does the work.
2426 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2428 unsigned long ram_pages = last_ram_page();
2430 return postcopy_ram_incoming_init(mis, ram_pages);
2434 * ram_load_postcopy: load a page in postcopy case
2436 * Returns 0 for success or -errno in case of error
2438 * Called in postcopy mode by ram_load().
2439 * rcu_read_lock is taken prior to this being called.
2441 * @f: QEMUFile where to send the data
2443 static int ram_load_postcopy(QEMUFile *f)
2445 int flags = 0, ret = 0;
2446 bool place_needed = false;
2447 bool matching_page_sizes = false;
2448 MigrationIncomingState *mis = migration_incoming_get_current();
2449 /* Temporary page that is later 'placed' */
2450 void *postcopy_host_page = postcopy_get_tmp_page(mis);
2451 void *last_host = NULL;
2452 bool all_zero = false;
2454 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2457 void *page_buffer = NULL;
2458 void *place_source = NULL;
2459 RAMBlock *block = NULL;
2462 addr = qemu_get_be64(f);
2463 flags = addr & ~TARGET_PAGE_MASK;
2464 addr &= TARGET_PAGE_MASK;
2466 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2467 place_needed = false;
2468 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2469 block = ram_block_from_stream(f, flags);
2471 host = host_from_ram_block_offset(block, addr);
2473 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2477 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
2479 * Postcopy requires that we place whole host pages atomically;
2480 * these may be huge pages for RAMBlocks that are backed by
2482 * To make it atomic, the data is read into a temporary page
2483 * that's moved into place later.
2484 * The migration protocol uses, possibly smaller, target-pages
2485 * however the source ensures it always sends all the components
2486 * of a host page in order.
2488 page_buffer = postcopy_host_page +
2489 ((uintptr_t)host & (block->page_size - 1));
2490 /* If all TP are zero then we can optimise the place */
2491 if (!((uintptr_t)host & (block->page_size - 1))) {
2494 /* not the 1st TP within the HP */
2495 if (host != (last_host + TARGET_PAGE_SIZE)) {
2496 error_report("Non-sequential target page %p/%p",
2505 * If it's the last part of a host page then we place the host
2508 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2509 (block->page_size - 1)) == 0;
2510 place_source = postcopy_host_page;
2514 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2515 case RAM_SAVE_FLAG_COMPRESS:
2516 ch = qemu_get_byte(f);
2517 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2523 case RAM_SAVE_FLAG_PAGE:
2525 if (!place_needed || !matching_page_sizes) {
2526 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2528 /* Avoids the qemu_file copy during postcopy, which is
2529 * going to do a copy later; can only do it when we
2530 * do this read in one go (matching page sizes)
2532 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2536 case RAM_SAVE_FLAG_EOS:
2540 error_report("Unknown combination of migration flags: %#x"
2541 " (postcopy mode)", flags);
2546 /* This gets called at the last target page in the host page */
2547 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
2550 ret = postcopy_place_page_zero(mis, place_dest,
2553 ret = postcopy_place_page(mis, place_dest,
2554 place_source, block->page_size);
2558 ret = qemu_file_get_error(f);
2565 static int ram_load(QEMUFile *f, void *opaque, int version_id)
2567 int flags = 0, ret = 0;
2568 static uint64_t seq_iter;
2571 * If system is running in postcopy mode, page inserts to host memory must
2574 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2575 /* ADVISE is earlier, it shows the source has the postcopy capability on */
2576 bool postcopy_advised = postcopy_state_get() >= POSTCOPY_INCOMING_ADVISE;
2580 if (version_id != 4) {
2584 /* This RCU critical section can be very long running.
2585 * When RCU reclaims in the code start to become numerous,
2586 * it will be necessary to reduce the granularity of this
2591 if (postcopy_running) {
2592 ret = ram_load_postcopy(f);
2595 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2596 ram_addr_t addr, total_ram_bytes;
2600 addr = qemu_get_be64(f);
2601 flags = addr & ~TARGET_PAGE_MASK;
2602 addr &= TARGET_PAGE_MASK;
2604 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2605 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2606 RAMBlock *block = ram_block_from_stream(f, flags);
2608 host = host_from_ram_block_offset(block, addr);
2610 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2616 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2617 case RAM_SAVE_FLAG_MEM_SIZE:
2618 /* Synchronize RAM block list */
2619 total_ram_bytes = addr;
2620 while (!ret && total_ram_bytes) {
2625 len = qemu_get_byte(f);
2626 qemu_get_buffer(f, (uint8_t *)id, len);
2628 length = qemu_get_be64(f);
2630 block = qemu_ram_block_by_name(id);
2632 if (length != block->used_length) {
2633 Error *local_err = NULL;
2635 ret = qemu_ram_resize(block, length,
2638 error_report_err(local_err);
2641 /* For postcopy we need to check hugepage sizes match */
2642 if (postcopy_advised &&
2643 block->page_size != qemu_host_page_size) {
2644 uint64_t remote_page_size = qemu_get_be64(f);
2645 if (remote_page_size != block->page_size) {
2646 error_report("Mismatched RAM page size %s "
2647 "(local) %zd != %" PRId64,
2648 id, block->page_size,
2653 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2656 error_report("Unknown ramblock \"%s\", cannot "
2657 "accept migration", id);
2661 total_ram_bytes -= length;
2665 case RAM_SAVE_FLAG_COMPRESS:
2666 ch = qemu_get_byte(f);
2667 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2670 case RAM_SAVE_FLAG_PAGE:
2671 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2674 case RAM_SAVE_FLAG_COMPRESS_PAGE:
2675 len = qemu_get_be32(f);
2676 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2677 error_report("Invalid compressed data length: %d", len);
2681 decompress_data_with_multi_threads(f, host, len);
2684 case RAM_SAVE_FLAG_XBZRLE:
2685 if (load_xbzrle(f, addr, host) < 0) {
2686 error_report("Failed to decompress XBZRLE page at "
2687 RAM_ADDR_FMT, addr);
2692 case RAM_SAVE_FLAG_EOS:
2696 if (flags & RAM_SAVE_FLAG_HOOK) {
2697 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2699 error_report("Unknown combination of migration flags: %#x",
2705 ret = qemu_file_get_error(f);
2709 wait_for_decompress_done();
2711 trace_ram_load_complete(ret, seq_iter);
2715 static SaveVMHandlers savevm_ram_handlers = {
2716 .save_live_setup = ram_save_setup,
2717 .save_live_iterate = ram_save_iterate,
2718 .save_live_complete_postcopy = ram_save_complete,
2719 .save_live_complete_precopy = ram_save_complete,
2720 .save_live_pending = ram_save_pending,
2721 .load_state = ram_load,
2722 .cleanup = ram_migration_cleanup,
2725 void ram_mig_init(void)
2727 qemu_mutex_init(&XBZRLE.lock);
2728 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);