]> Git Repo - qemu.git/blob - migration/ram.c
42fb8ac6d6b663a0d918ff0290aceae37405a48b
[qemu.git] / migration / ram.c
1 /*
2  * QEMU System Emulator
3  *
4  * Copyright (c) 2003-2008 Fabrice Bellard
5  * Copyright (c) 2011-2015 Red Hat Inc
6  *
7  * Authors:
8  *  Juan Quintela <[email protected]>
9  *
10  * Permission is hereby granted, free of charge, to any person obtaining a copy
11  * of this software and associated documentation files (the "Software"), to deal
12  * in the Software without restriction, including without limitation the rights
13  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14  * copies of the Software, and to permit persons to whom the Software is
15  * furnished to do so, subject to the following conditions:
16  *
17  * The above copyright notice and this permission notice shall be included in
18  * all copies or substantial portions of the Software.
19  *
20  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26  * THE SOFTWARE.
27  */
28 #include "qemu/osdep.h"
29 #include "qemu-common.h"
30 #include "cpu.h"
31 #include <zlib.h>
32 #include "qapi-event.h"
33 #include "qemu/cutils.h"
34 #include "qemu/bitops.h"
35 #include "qemu/bitmap.h"
36 #include "qemu/timer.h"
37 #include "qemu/main-loop.h"
38 #include "migration/migration.h"
39 #include "migration/postcopy-ram.h"
40 #include "exec/address-spaces.h"
41 #include "migration/page_cache.h"
42 #include "qemu/error-report.h"
43 #include "trace.h"
44 #include "exec/ram_addr.h"
45 #include "qemu/rcu_queue.h"
46
47 #ifdef DEBUG_MIGRATION_RAM
48 #define DPRINTF(fmt, ...) \
49     do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
50 #else
51 #define DPRINTF(fmt, ...) \
52     do { } while (0)
53 #endif
54
55 static int dirty_rate_high_cnt;
56
57 static uint64_t bitmap_sync_count;
58
59 /***********************************************************/
60 /* ram save/restore */
61
62 #define RAM_SAVE_FLAG_FULL     0x01 /* Obsolete, not used anymore */
63 #define RAM_SAVE_FLAG_COMPRESS 0x02
64 #define RAM_SAVE_FLAG_MEM_SIZE 0x04
65 #define RAM_SAVE_FLAG_PAGE     0x08
66 #define RAM_SAVE_FLAG_EOS      0x10
67 #define RAM_SAVE_FLAG_CONTINUE 0x20
68 #define RAM_SAVE_FLAG_XBZRLE   0x40
69 /* 0x80 is reserved in migration.h start with 0x100 next */
70 #define RAM_SAVE_FLAG_COMPRESS_PAGE    0x100
71
72 static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
73
74 static inline bool is_zero_range(uint8_t *p, uint64_t size)
75 {
76     return buffer_find_nonzero_offset(p, size) == size;
77 }
78
79 /* struct contains XBZRLE cache and a static page
80    used by the compression */
81 static struct {
82     /* buffer used for XBZRLE encoding */
83     uint8_t *encoded_buf;
84     /* buffer for storing page content */
85     uint8_t *current_buf;
86     /* Cache for XBZRLE, Protected by lock. */
87     PageCache *cache;
88     QemuMutex lock;
89 } XBZRLE;
90
91 /* buffer used for XBZRLE decoding */
92 static uint8_t *xbzrle_decoded_buf;
93
94 static void XBZRLE_cache_lock(void)
95 {
96     if (migrate_use_xbzrle())
97         qemu_mutex_lock(&XBZRLE.lock);
98 }
99
100 static void XBZRLE_cache_unlock(void)
101 {
102     if (migrate_use_xbzrle())
103         qemu_mutex_unlock(&XBZRLE.lock);
104 }
105
106 /*
107  * called from qmp_migrate_set_cache_size in main thread, possibly while
108  * a migration is in progress.
109  * A running migration maybe using the cache and might finish during this
110  * call, hence changes to the cache are protected by XBZRLE.lock().
111  */
112 int64_t xbzrle_cache_resize(int64_t new_size)
113 {
114     PageCache *new_cache;
115     int64_t ret;
116
117     if (new_size < TARGET_PAGE_SIZE) {
118         return -1;
119     }
120
121     XBZRLE_cache_lock();
122
123     if (XBZRLE.cache != NULL) {
124         if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
125             goto out_new_size;
126         }
127         new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
128                                         TARGET_PAGE_SIZE);
129         if (!new_cache) {
130             error_report("Error creating cache");
131             ret = -1;
132             goto out;
133         }
134
135         cache_fini(XBZRLE.cache);
136         XBZRLE.cache = new_cache;
137     }
138
139 out_new_size:
140     ret = pow2floor(new_size);
141 out:
142     XBZRLE_cache_unlock();
143     return ret;
144 }
145
146 /* accounting for migration statistics */
147 typedef struct AccountingInfo {
148     uint64_t dup_pages;
149     uint64_t skipped_pages;
150     uint64_t norm_pages;
151     uint64_t iterations;
152     uint64_t xbzrle_bytes;
153     uint64_t xbzrle_pages;
154     uint64_t xbzrle_cache_miss;
155     double xbzrle_cache_miss_rate;
156     uint64_t xbzrle_overflows;
157 } AccountingInfo;
158
159 static AccountingInfo acct_info;
160
161 static void acct_clear(void)
162 {
163     memset(&acct_info, 0, sizeof(acct_info));
164 }
165
166 uint64_t dup_mig_bytes_transferred(void)
167 {
168     return acct_info.dup_pages * TARGET_PAGE_SIZE;
169 }
170
171 uint64_t dup_mig_pages_transferred(void)
172 {
173     return acct_info.dup_pages;
174 }
175
176 uint64_t skipped_mig_bytes_transferred(void)
177 {
178     return acct_info.skipped_pages * TARGET_PAGE_SIZE;
179 }
180
181 uint64_t skipped_mig_pages_transferred(void)
182 {
183     return acct_info.skipped_pages;
184 }
185
186 uint64_t norm_mig_bytes_transferred(void)
187 {
188     return acct_info.norm_pages * TARGET_PAGE_SIZE;
189 }
190
191 uint64_t norm_mig_pages_transferred(void)
192 {
193     return acct_info.norm_pages;
194 }
195
196 uint64_t xbzrle_mig_bytes_transferred(void)
197 {
198     return acct_info.xbzrle_bytes;
199 }
200
201 uint64_t xbzrle_mig_pages_transferred(void)
202 {
203     return acct_info.xbzrle_pages;
204 }
205
206 uint64_t xbzrle_mig_pages_cache_miss(void)
207 {
208     return acct_info.xbzrle_cache_miss;
209 }
210
211 double xbzrle_mig_cache_miss_rate(void)
212 {
213     return acct_info.xbzrle_cache_miss_rate;
214 }
215
216 uint64_t xbzrle_mig_pages_overflow(void)
217 {
218     return acct_info.xbzrle_overflows;
219 }
220
221 /* This is the last block that we have visited serching for dirty pages
222  */
223 static RAMBlock *last_seen_block;
224 /* This is the last block from where we have sent data */
225 static RAMBlock *last_sent_block;
226 static ram_addr_t last_offset;
227 static QemuMutex migration_bitmap_mutex;
228 static uint64_t migration_dirty_pages;
229 static uint32_t last_version;
230 static bool ram_bulk_stage;
231
232 /* used by the search for pages to send */
233 struct PageSearchStatus {
234     /* Current block being searched */
235     RAMBlock    *block;
236     /* Current offset to search from */
237     ram_addr_t   offset;
238     /* Set once we wrap around */
239     bool         complete_round;
240 };
241 typedef struct PageSearchStatus PageSearchStatus;
242
243 static struct BitmapRcu {
244     struct rcu_head rcu;
245     /* Main migration bitmap */
246     unsigned long *bmap;
247     /* bitmap of pages that haven't been sent even once
248      * only maintained and used in postcopy at the moment
249      * where it's used to send the dirtymap at the start
250      * of the postcopy phase
251      */
252     unsigned long *unsentmap;
253 } *migration_bitmap_rcu;
254
255 struct CompressParam {
256     bool start;
257     bool done;
258     QEMUFile *file;
259     QemuMutex mutex;
260     QemuCond cond;
261     RAMBlock *block;
262     ram_addr_t offset;
263 };
264 typedef struct CompressParam CompressParam;
265
266 struct DecompressParam {
267     bool start;
268     QemuMutex mutex;
269     QemuCond cond;
270     void *des;
271     uint8_t *compbuf;
272     int len;
273 };
274 typedef struct DecompressParam DecompressParam;
275
276 static CompressParam *comp_param;
277 static QemuThread *compress_threads;
278 /* comp_done_cond is used to wake up the migration thread when
279  * one of the compression threads has finished the compression.
280  * comp_done_lock is used to co-work with comp_done_cond.
281  */
282 static QemuMutex *comp_done_lock;
283 static QemuCond *comp_done_cond;
284 /* The empty QEMUFileOps will be used by file in CompressParam */
285 static const QEMUFileOps empty_ops = { };
286
287 static bool compression_switch;
288 static bool quit_comp_thread;
289 static bool quit_decomp_thread;
290 static DecompressParam *decomp_param;
291 static QemuThread *decompress_threads;
292
293 static int do_compress_ram_page(CompressParam *param);
294
295 static void *do_data_compress(void *opaque)
296 {
297     CompressParam *param = opaque;
298
299     while (!quit_comp_thread) {
300         qemu_mutex_lock(&param->mutex);
301         /* Re-check the quit_comp_thread in case of
302          * terminate_compression_threads is called just before
303          * qemu_mutex_lock(&param->mutex) and after
304          * while(!quit_comp_thread), re-check it here can make
305          * sure the compression thread terminate as expected.
306          */
307         while (!param->start && !quit_comp_thread) {
308             qemu_cond_wait(&param->cond, &param->mutex);
309         }
310         if (!quit_comp_thread) {
311             do_compress_ram_page(param);
312         }
313         param->start = false;
314         qemu_mutex_unlock(&param->mutex);
315
316         qemu_mutex_lock(comp_done_lock);
317         param->done = true;
318         qemu_cond_signal(comp_done_cond);
319         qemu_mutex_unlock(comp_done_lock);
320     }
321
322     return NULL;
323 }
324
325 static inline void terminate_compression_threads(void)
326 {
327     int idx, thread_count;
328
329     thread_count = migrate_compress_threads();
330     quit_comp_thread = true;
331     for (idx = 0; idx < thread_count; idx++) {
332         qemu_mutex_lock(&comp_param[idx].mutex);
333         qemu_cond_signal(&comp_param[idx].cond);
334         qemu_mutex_unlock(&comp_param[idx].mutex);
335     }
336 }
337
338 void migrate_compress_threads_join(void)
339 {
340     int i, thread_count;
341
342     if (!migrate_use_compression()) {
343         return;
344     }
345     terminate_compression_threads();
346     thread_count = migrate_compress_threads();
347     for (i = 0; i < thread_count; i++) {
348         qemu_thread_join(compress_threads + i);
349         qemu_fclose(comp_param[i].file);
350         qemu_mutex_destroy(&comp_param[i].mutex);
351         qemu_cond_destroy(&comp_param[i].cond);
352     }
353     qemu_mutex_destroy(comp_done_lock);
354     qemu_cond_destroy(comp_done_cond);
355     g_free(compress_threads);
356     g_free(comp_param);
357     g_free(comp_done_cond);
358     g_free(comp_done_lock);
359     compress_threads = NULL;
360     comp_param = NULL;
361     comp_done_cond = NULL;
362     comp_done_lock = NULL;
363 }
364
365 void migrate_compress_threads_create(void)
366 {
367     int i, thread_count;
368
369     if (!migrate_use_compression()) {
370         return;
371     }
372     quit_comp_thread = false;
373     compression_switch = true;
374     thread_count = migrate_compress_threads();
375     compress_threads = g_new0(QemuThread, thread_count);
376     comp_param = g_new0(CompressParam, thread_count);
377     comp_done_cond = g_new0(QemuCond, 1);
378     comp_done_lock = g_new0(QemuMutex, 1);
379     qemu_cond_init(comp_done_cond);
380     qemu_mutex_init(comp_done_lock);
381     for (i = 0; i < thread_count; i++) {
382         /* com_param[i].file is just used as a dummy buffer to save data, set
383          * it's ops to empty.
384          */
385         comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
386         comp_param[i].done = true;
387         qemu_mutex_init(&comp_param[i].mutex);
388         qemu_cond_init(&comp_param[i].cond);
389         qemu_thread_create(compress_threads + i, "compress",
390                            do_data_compress, comp_param + i,
391                            QEMU_THREAD_JOINABLE);
392     }
393 }
394
395 /**
396  * save_page_header: Write page header to wire
397  *
398  * If this is the 1st block, it also writes the block identification
399  *
400  * Returns: Number of bytes written
401  *
402  * @f: QEMUFile where to send the data
403  * @block: block that contains the page we want to send
404  * @offset: offset inside the block for the page
405  *          in the lower bits, it contains flags
406  */
407 static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
408 {
409     size_t size, len;
410
411     qemu_put_be64(f, offset);
412     size = 8;
413
414     if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
415         len = strlen(block->idstr);
416         qemu_put_byte(f, len);
417         qemu_put_buffer(f, (uint8_t *)block->idstr, len);
418         size += 1 + len;
419     }
420     return size;
421 }
422
423 /* Reduce amount of guest cpu execution to hopefully slow down memory writes.
424  * If guest dirty memory rate is reduced below the rate at which we can
425  * transfer pages to the destination then we should be able to complete
426  * migration. Some workloads dirty memory way too fast and will not effectively
427  * converge, even with auto-converge.
428  */
429 static void mig_throttle_guest_down(void)
430 {
431     MigrationState *s = migrate_get_current();
432     uint64_t pct_initial = s->parameters.cpu_throttle_initial;
433     uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
434
435     /* We have not started throttling yet. Let's start it. */
436     if (!cpu_throttle_active()) {
437         cpu_throttle_set(pct_initial);
438     } else {
439         /* Throttling already on, just increase the rate */
440         cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
441     }
442 }
443
444 /* Update the xbzrle cache to reflect a page that's been sent as all 0.
445  * The important thing is that a stale (not-yet-0'd) page be replaced
446  * by the new data.
447  * As a bonus, if the page wasn't in the cache it gets added so that
448  * when a small write is made into the 0'd page it gets XBZRLE sent
449  */
450 static void xbzrle_cache_zero_page(ram_addr_t current_addr)
451 {
452     if (ram_bulk_stage || !migrate_use_xbzrle()) {
453         return;
454     }
455
456     /* We don't care if this fails to allocate a new cache page
457      * as long as it updated an old one */
458     cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
459                  bitmap_sync_count);
460 }
461
462 #define ENCODING_FLAG_XBZRLE 0x1
463
464 /**
465  * save_xbzrle_page: compress and send current page
466  *
467  * Returns: 1 means that we wrote the page
468  *          0 means that page is identical to the one already sent
469  *          -1 means that xbzrle would be longer than normal
470  *
471  * @f: QEMUFile where to send the data
472  * @current_data:
473  * @current_addr:
474  * @block: block that contains the page we want to send
475  * @offset: offset inside the block for the page
476  * @last_stage: if we are at the completion stage
477  * @bytes_transferred: increase it with the number of transferred bytes
478  */
479 static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
480                             ram_addr_t current_addr, RAMBlock *block,
481                             ram_addr_t offset, bool last_stage,
482                             uint64_t *bytes_transferred)
483 {
484     int encoded_len = 0, bytes_xbzrle;
485     uint8_t *prev_cached_page;
486
487     if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
488         acct_info.xbzrle_cache_miss++;
489         if (!last_stage) {
490             if (cache_insert(XBZRLE.cache, current_addr, *current_data,
491                              bitmap_sync_count) == -1) {
492                 return -1;
493             } else {
494                 /* update *current_data when the page has been
495                    inserted into cache */
496                 *current_data = get_cached_data(XBZRLE.cache, current_addr);
497             }
498         }
499         return -1;
500     }
501
502     prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
503
504     /* save current buffer into memory */
505     memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
506
507     /* XBZRLE encoding (if there is no overflow) */
508     encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
509                                        TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
510                                        TARGET_PAGE_SIZE);
511     if (encoded_len == 0) {
512         DPRINTF("Skipping unmodified page\n");
513         return 0;
514     } else if (encoded_len == -1) {
515         DPRINTF("Overflow\n");
516         acct_info.xbzrle_overflows++;
517         /* update data in the cache */
518         if (!last_stage) {
519             memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
520             *current_data = prev_cached_page;
521         }
522         return -1;
523     }
524
525     /* we need to update the data in the cache, in order to get the same data */
526     if (!last_stage) {
527         memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
528     }
529
530     /* Send XBZRLE based compressed page */
531     bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
532     qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
533     qemu_put_be16(f, encoded_len);
534     qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
535     bytes_xbzrle += encoded_len + 1 + 2;
536     acct_info.xbzrle_pages++;
537     acct_info.xbzrle_bytes += bytes_xbzrle;
538     *bytes_transferred += bytes_xbzrle;
539
540     return 1;
541 }
542
543 /* Called with rcu_read_lock() to protect migration_bitmap
544  * rb: The RAMBlock  to search for dirty pages in
545  * start: Start address (typically so we can continue from previous page)
546  * ram_addr_abs: Pointer into which to store the address of the dirty page
547  *               within the global ram_addr space
548  *
549  * Returns: byte offset within memory region of the start of a dirty page
550  */
551 static inline
552 ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
553                                        ram_addr_t start,
554                                        ram_addr_t *ram_addr_abs)
555 {
556     unsigned long base = rb->offset >> TARGET_PAGE_BITS;
557     unsigned long nr = base + (start >> TARGET_PAGE_BITS);
558     uint64_t rb_size = rb->used_length;
559     unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
560     unsigned long *bitmap;
561
562     unsigned long next;
563
564     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
565     if (ram_bulk_stage && nr > base) {
566         next = nr + 1;
567     } else {
568         next = find_next_bit(bitmap, size, nr);
569     }
570
571     *ram_addr_abs = next << TARGET_PAGE_BITS;
572     return (next - base) << TARGET_PAGE_BITS;
573 }
574
575 static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
576 {
577     bool ret;
578     int nr = addr >> TARGET_PAGE_BITS;
579     unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
580
581     ret = test_and_clear_bit(nr, bitmap);
582
583     if (ret) {
584         migration_dirty_pages--;
585     }
586     return ret;
587 }
588
589 static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
590 {
591     unsigned long *bitmap;
592     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
593     migration_dirty_pages +=
594         cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
595 }
596
597 /* Fix me: there are too many global variables used in migration process. */
598 static int64_t start_time;
599 static int64_t bytes_xfer_prev;
600 static int64_t num_dirty_pages_period;
601 static uint64_t xbzrle_cache_miss_prev;
602 static uint64_t iterations_prev;
603
604 static void migration_bitmap_sync_init(void)
605 {
606     start_time = 0;
607     bytes_xfer_prev = 0;
608     num_dirty_pages_period = 0;
609     xbzrle_cache_miss_prev = 0;
610     iterations_prev = 0;
611 }
612
613 static void migration_bitmap_sync(void)
614 {
615     RAMBlock *block;
616     uint64_t num_dirty_pages_init = migration_dirty_pages;
617     MigrationState *s = migrate_get_current();
618     int64_t end_time;
619     int64_t bytes_xfer_now;
620
621     bitmap_sync_count++;
622
623     if (!bytes_xfer_prev) {
624         bytes_xfer_prev = ram_bytes_transferred();
625     }
626
627     if (!start_time) {
628         start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629     }
630
631     trace_migration_bitmap_sync_start();
632     address_space_sync_dirty_bitmap(&address_space_memory);
633
634     qemu_mutex_lock(&migration_bitmap_mutex);
635     rcu_read_lock();
636     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
637         migration_bitmap_sync_range(block->offset, block->used_length);
638     }
639     rcu_read_unlock();
640     qemu_mutex_unlock(&migration_bitmap_mutex);
641
642     trace_migration_bitmap_sync_end(migration_dirty_pages
643                                     - num_dirty_pages_init);
644     num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645     end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
646
647     /* more than 1 second = 1000 millisecons */
648     if (end_time > start_time + 1000) {
649         if (migrate_auto_converge()) {
650             /* The following detection logic can be refined later. For now:
651                Check to see if the dirtied bytes is 50% more than the approx.
652                amount of bytes that just got transferred since the last time we
653                were in this routine. If that happens twice, start or increase
654                throttling */
655             bytes_xfer_now = ram_bytes_transferred();
656
657             if (s->dirty_pages_rate &&
658                (num_dirty_pages_period * TARGET_PAGE_SIZE >
659                    (bytes_xfer_now - bytes_xfer_prev)/2) &&
660                (dirty_rate_high_cnt++ >= 2)) {
661                     trace_migration_throttle();
662                     dirty_rate_high_cnt = 0;
663                     mig_throttle_guest_down();
664              }
665              bytes_xfer_prev = bytes_xfer_now;
666         }
667
668         if (migrate_use_xbzrle()) {
669             if (iterations_prev != acct_info.iterations) {
670                 acct_info.xbzrle_cache_miss_rate =
671                    (double)(acct_info.xbzrle_cache_miss -
672                             xbzrle_cache_miss_prev) /
673                    (acct_info.iterations - iterations_prev);
674             }
675             iterations_prev = acct_info.iterations;
676             xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
677         }
678         s->dirty_pages_rate = num_dirty_pages_period * 1000
679             / (end_time - start_time);
680         s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681         start_time = end_time;
682         num_dirty_pages_period = 0;
683     }
684     s->dirty_sync_count = bitmap_sync_count;
685     if (migrate_use_events()) {
686         qapi_event_send_migration_pass(bitmap_sync_count, NULL);
687     }
688 }
689
690 /**
691  * save_zero_page: Send the zero page to the stream
692  *
693  * Returns: Number of pages written.
694  *
695  * @f: QEMUFile where to send the data
696  * @block: block that contains the page we want to send
697  * @offset: offset inside the block for the page
698  * @p: pointer to the page
699  * @bytes_transferred: increase it with the number of transferred bytes
700  */
701 static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
702                           uint8_t *p, uint64_t *bytes_transferred)
703 {
704     int pages = -1;
705
706     if (is_zero_range(p, TARGET_PAGE_SIZE)) {
707         acct_info.dup_pages++;
708         *bytes_transferred += save_page_header(f, block,
709                                                offset | RAM_SAVE_FLAG_COMPRESS);
710         qemu_put_byte(f, 0);
711         *bytes_transferred += 1;
712         pages = 1;
713     }
714
715     return pages;
716 }
717
718 /**
719  * ram_save_page: Send the given page to the stream
720  *
721  * Returns: Number of pages written.
722  *          < 0 - error
723  *          >=0 - Number of pages written - this might legally be 0
724  *                if xbzrle noticed the page was the same.
725  *
726  * @f: QEMUFile where to send the data
727  * @block: block that contains the page we want to send
728  * @offset: offset inside the block for the page
729  * @last_stage: if we are at the completion stage
730  * @bytes_transferred: increase it with the number of transferred bytes
731  */
732 static int ram_save_page(QEMUFile *f, PageSearchStatus *pss,
733                          bool last_stage, uint64_t *bytes_transferred)
734 {
735     int pages = -1;
736     uint64_t bytes_xmit;
737     ram_addr_t current_addr;
738     uint8_t *p;
739     int ret;
740     bool send_async = true;
741     RAMBlock *block = pss->block;
742     ram_addr_t offset = pss->offset;
743
744     p = block->host + offset;
745
746     /* In doubt sent page as normal */
747     bytes_xmit = 0;
748     ret = ram_control_save_page(f, block->offset,
749                            offset, TARGET_PAGE_SIZE, &bytes_xmit);
750     if (bytes_xmit) {
751         *bytes_transferred += bytes_xmit;
752         pages = 1;
753     }
754
755     XBZRLE_cache_lock();
756
757     current_addr = block->offset + offset;
758
759     if (block == last_sent_block) {
760         offset |= RAM_SAVE_FLAG_CONTINUE;
761     }
762     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
763         if (ret != RAM_SAVE_CONTROL_DELAYED) {
764             if (bytes_xmit > 0) {
765                 acct_info.norm_pages++;
766             } else if (bytes_xmit == 0) {
767                 acct_info.dup_pages++;
768             }
769         }
770     } else {
771         pages = save_zero_page(f, block, offset, p, bytes_transferred);
772         if (pages > 0) {
773             /* Must let xbzrle know, otherwise a previous (now 0'd) cached
774              * page would be stale
775              */
776             xbzrle_cache_zero_page(current_addr);
777         } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
778             pages = save_xbzrle_page(f, &p, current_addr, block,
779                                      offset, last_stage, bytes_transferred);
780             if (!last_stage) {
781                 /* Can't send this cached data async, since the cache page
782                  * might get updated before it gets to the wire
783                  */
784                 send_async = false;
785             }
786         }
787     }
788
789     /* XBZRLE overflow or normal page */
790     if (pages == -1) {
791         *bytes_transferred += save_page_header(f, block,
792                                                offset | RAM_SAVE_FLAG_PAGE);
793         if (send_async) {
794             qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
795         } else {
796             qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
797         }
798         *bytes_transferred += TARGET_PAGE_SIZE;
799         pages = 1;
800         acct_info.norm_pages++;
801     }
802
803     XBZRLE_cache_unlock();
804
805     return pages;
806 }
807
808 static int do_compress_ram_page(CompressParam *param)
809 {
810     int bytes_sent, blen;
811     uint8_t *p;
812     RAMBlock *block = param->block;
813     ram_addr_t offset = param->offset;
814
815     p = block->host + (offset & TARGET_PAGE_MASK);
816
817     bytes_sent = save_page_header(param->file, block, offset |
818                                   RAM_SAVE_FLAG_COMPRESS_PAGE);
819     blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
820                                      migrate_compress_level());
821     bytes_sent += blen;
822
823     return bytes_sent;
824 }
825
826 static inline void start_compression(CompressParam *param)
827 {
828     param->done = false;
829     qemu_mutex_lock(&param->mutex);
830     param->start = true;
831     qemu_cond_signal(&param->cond);
832     qemu_mutex_unlock(&param->mutex);
833 }
834
835 static inline void start_decompression(DecompressParam *param)
836 {
837     qemu_mutex_lock(&param->mutex);
838     param->start = true;
839     qemu_cond_signal(&param->cond);
840     qemu_mutex_unlock(&param->mutex);
841 }
842
843 static uint64_t bytes_transferred;
844
845 static void flush_compressed_data(QEMUFile *f)
846 {
847     int idx, len, thread_count;
848
849     if (!migrate_use_compression()) {
850         return;
851     }
852     thread_count = migrate_compress_threads();
853     for (idx = 0; idx < thread_count; idx++) {
854         if (!comp_param[idx].done) {
855             qemu_mutex_lock(comp_done_lock);
856             while (!comp_param[idx].done && !quit_comp_thread) {
857                 qemu_cond_wait(comp_done_cond, comp_done_lock);
858             }
859             qemu_mutex_unlock(comp_done_lock);
860         }
861         if (!quit_comp_thread) {
862             len = qemu_put_qemu_file(f, comp_param[idx].file);
863             bytes_transferred += len;
864         }
865     }
866 }
867
868 static inline void set_compress_params(CompressParam *param, RAMBlock *block,
869                                        ram_addr_t offset)
870 {
871     param->block = block;
872     param->offset = offset;
873 }
874
875 static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
876                                            ram_addr_t offset,
877                                            uint64_t *bytes_transferred)
878 {
879     int idx, thread_count, bytes_xmit = -1, pages = -1;
880
881     thread_count = migrate_compress_threads();
882     qemu_mutex_lock(comp_done_lock);
883     while (true) {
884         for (idx = 0; idx < thread_count; idx++) {
885             if (comp_param[idx].done) {
886                 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
887                 set_compress_params(&comp_param[idx], block, offset);
888                 start_compression(&comp_param[idx]);
889                 pages = 1;
890                 acct_info.norm_pages++;
891                 *bytes_transferred += bytes_xmit;
892                 break;
893             }
894         }
895         if (pages > 0) {
896             break;
897         } else {
898             qemu_cond_wait(comp_done_cond, comp_done_lock);
899         }
900     }
901     qemu_mutex_unlock(comp_done_lock);
902
903     return pages;
904 }
905
906 /**
907  * ram_save_compressed_page: compress the given page and send it to the stream
908  *
909  * Returns: Number of pages written.
910  *
911  * @f: QEMUFile where to send the data
912  * @block: block that contains the page we want to send
913  * @offset: offset inside the block for the page
914  * @last_stage: if we are at the completion stage
915  * @bytes_transferred: increase it with the number of transferred bytes
916  */
917 static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
918                                     bool last_stage,
919                                     uint64_t *bytes_transferred)
920 {
921     int pages = -1;
922     uint64_t bytes_xmit;
923     uint8_t *p;
924     int ret;
925     RAMBlock *block = pss->block;
926     ram_addr_t offset = pss->offset;
927
928     p = block->host + offset;
929
930     bytes_xmit = 0;
931     ret = ram_control_save_page(f, block->offset,
932                                 offset, TARGET_PAGE_SIZE, &bytes_xmit);
933     if (bytes_xmit) {
934         *bytes_transferred += bytes_xmit;
935         pages = 1;
936     }
937     if (block == last_sent_block) {
938         offset |= RAM_SAVE_FLAG_CONTINUE;
939     }
940     if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
941         if (ret != RAM_SAVE_CONTROL_DELAYED) {
942             if (bytes_xmit > 0) {
943                 acct_info.norm_pages++;
944             } else if (bytes_xmit == 0) {
945                 acct_info.dup_pages++;
946             }
947         }
948     } else {
949         /* When starting the process of a new block, the first page of
950          * the block should be sent out before other pages in the same
951          * block, and all the pages in last block should have been sent
952          * out, keeping this order is important, because the 'cont' flag
953          * is used to avoid resending the block name.
954          */
955         if (block != last_sent_block) {
956             flush_compressed_data(f);
957             pages = save_zero_page(f, block, offset, p, bytes_transferred);
958             if (pages == -1) {
959                 set_compress_params(&comp_param[0], block, offset);
960                 /* Use the qemu thread to compress the data to make sure the
961                  * first page is sent out before other pages
962                  */
963                 bytes_xmit = do_compress_ram_page(&comp_param[0]);
964                 acct_info.norm_pages++;
965                 qemu_put_qemu_file(f, comp_param[0].file);
966                 *bytes_transferred += bytes_xmit;
967                 pages = 1;
968             }
969         } else {
970             pages = save_zero_page(f, block, offset, p, bytes_transferred);
971             if (pages == -1) {
972                 pages = compress_page_with_multi_thread(f, block, offset,
973                                                         bytes_transferred);
974             }
975         }
976     }
977
978     return pages;
979 }
980
981 /*
982  * Find the next dirty page and update any state associated with
983  * the search process.
984  *
985  * Returns: True if a page is found
986  *
987  * @f: Current migration stream.
988  * @pss: Data about the state of the current dirty page scan.
989  * @*again: Set to false if the search has scanned the whole of RAM
990  * *ram_addr_abs: Pointer into which to store the address of the dirty page
991  *               within the global ram_addr space
992  */
993 static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
994                              bool *again, ram_addr_t *ram_addr_abs)
995 {
996     pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
997                                               ram_addr_abs);
998     if (pss->complete_round && pss->block == last_seen_block &&
999         pss->offset >= last_offset) {
1000         /*
1001          * We've been once around the RAM and haven't found anything.
1002          * Give up.
1003          */
1004         *again = false;
1005         return false;
1006     }
1007     if (pss->offset >= pss->block->used_length) {
1008         /* Didn't find anything in this RAM Block */
1009         pss->offset = 0;
1010         pss->block = QLIST_NEXT_RCU(pss->block, next);
1011         if (!pss->block) {
1012             /* Hit the end of the list */
1013             pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1014             /* Flag that we've looped */
1015             pss->complete_round = true;
1016             ram_bulk_stage = false;
1017             if (migrate_use_xbzrle()) {
1018                 /* If xbzrle is on, stop using the data compression at this
1019                  * point. In theory, xbzrle can do better than compression.
1020                  */
1021                 flush_compressed_data(f);
1022                 compression_switch = false;
1023             }
1024         }
1025         /* Didn't find anything this time, but try again on the new block */
1026         *again = true;
1027         return false;
1028     } else {
1029         /* Can go around again, but... */
1030         *again = true;
1031         /* We've found something so probably don't need to */
1032         return true;
1033     }
1034 }
1035
1036 /*
1037  * Helper for 'get_queued_page' - gets a page off the queue
1038  *      ms:      MigrationState in
1039  * *offset:      Used to return the offset within the RAMBlock
1040  * ram_addr_abs: global offset in the dirty/sent bitmaps
1041  *
1042  * Returns:      block (or NULL if none available)
1043  */
1044 static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1045                               ram_addr_t *ram_addr_abs)
1046 {
1047     RAMBlock *block = NULL;
1048
1049     qemu_mutex_lock(&ms->src_page_req_mutex);
1050     if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1051         struct MigrationSrcPageRequest *entry =
1052                                 QSIMPLEQ_FIRST(&ms->src_page_requests);
1053         block = entry->rb;
1054         *offset = entry->offset;
1055         *ram_addr_abs = (entry->offset + entry->rb->offset) &
1056                         TARGET_PAGE_MASK;
1057
1058         if (entry->len > TARGET_PAGE_SIZE) {
1059             entry->len -= TARGET_PAGE_SIZE;
1060             entry->offset += TARGET_PAGE_SIZE;
1061         } else {
1062             memory_region_unref(block->mr);
1063             QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1064             g_free(entry);
1065         }
1066     }
1067     qemu_mutex_unlock(&ms->src_page_req_mutex);
1068
1069     return block;
1070 }
1071
1072 /*
1073  * Unqueue a page from the queue fed by postcopy page requests; skips pages
1074  * that are already sent (!dirty)
1075  *
1076  *      ms:      MigrationState in
1077  *     pss:      PageSearchStatus structure updated with found block/offset
1078  * ram_addr_abs: global offset in the dirty/sent bitmaps
1079  *
1080  * Returns:      true if a queued page is found
1081  */
1082 static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1083                             ram_addr_t *ram_addr_abs)
1084 {
1085     RAMBlock  *block;
1086     ram_addr_t offset;
1087     bool dirty;
1088
1089     do {
1090         block = unqueue_page(ms, &offset, ram_addr_abs);
1091         /*
1092          * We're sending this page, and since it's postcopy nothing else
1093          * will dirty it, and we must make sure it doesn't get sent again
1094          * even if this queue request was received after the background
1095          * search already sent it.
1096          */
1097         if (block) {
1098             unsigned long *bitmap;
1099             bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1100             dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1101             if (!dirty) {
1102                 trace_get_queued_page_not_dirty(
1103                     block->idstr, (uint64_t)offset,
1104                     (uint64_t)*ram_addr_abs,
1105                     test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1106                          atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1107             } else {
1108                 trace_get_queued_page(block->idstr,
1109                                       (uint64_t)offset,
1110                                       (uint64_t)*ram_addr_abs);
1111             }
1112         }
1113
1114     } while (block && !dirty);
1115
1116     if (block) {
1117         /*
1118          * As soon as we start servicing pages out of order, then we have
1119          * to kill the bulk stage, since the bulk stage assumes
1120          * in (migration_bitmap_find_and_reset_dirty) that every page is
1121          * dirty, that's no longer true.
1122          */
1123         ram_bulk_stage = false;
1124
1125         /*
1126          * We want the background search to continue from the queued page
1127          * since the guest is likely to want other pages near to the page
1128          * it just requested.
1129          */
1130         pss->block = block;
1131         pss->offset = offset;
1132     }
1133
1134     return !!block;
1135 }
1136
1137 /**
1138  * flush_page_queue: Flush any remaining pages in the ram request queue
1139  *    it should be empty at the end anyway, but in error cases there may be
1140  *    some left.
1141  *
1142  * ms: MigrationState
1143  */
1144 void flush_page_queue(MigrationState *ms)
1145 {
1146     struct MigrationSrcPageRequest *mspr, *next_mspr;
1147     /* This queue generally should be empty - but in the case of a failed
1148      * migration might have some droppings in.
1149      */
1150     rcu_read_lock();
1151     QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1152         memory_region_unref(mspr->rb->mr);
1153         QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1154         g_free(mspr);
1155     }
1156     rcu_read_unlock();
1157 }
1158
1159 /**
1160  * Queue the pages for transmission, e.g. a request from postcopy destination
1161  *   ms: MigrationStatus in which the queue is held
1162  *   rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1163  *   start: Offset from the start of the RAMBlock
1164  *   len: Length (in bytes) to send
1165  *   Return: 0 on success
1166  */
1167 int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1168                          ram_addr_t start, ram_addr_t len)
1169 {
1170     RAMBlock *ramblock;
1171
1172     ms->postcopy_requests++;
1173     rcu_read_lock();
1174     if (!rbname) {
1175         /* Reuse last RAMBlock */
1176         ramblock = ms->last_req_rb;
1177
1178         if (!ramblock) {
1179             /*
1180              * Shouldn't happen, we can't reuse the last RAMBlock if
1181              * it's the 1st request.
1182              */
1183             error_report("ram_save_queue_pages no previous block");
1184             goto err;
1185         }
1186     } else {
1187         ramblock = qemu_ram_block_by_name(rbname);
1188
1189         if (!ramblock) {
1190             /* We shouldn't be asked for a non-existent RAMBlock */
1191             error_report("ram_save_queue_pages no block '%s'", rbname);
1192             goto err;
1193         }
1194         ms->last_req_rb = ramblock;
1195     }
1196     trace_ram_save_queue_pages(ramblock->idstr, start, len);
1197     if (start+len > ramblock->used_length) {
1198         error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1199                      RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1200                      __func__, start, len, ramblock->used_length);
1201         goto err;
1202     }
1203
1204     struct MigrationSrcPageRequest *new_entry =
1205         g_malloc0(sizeof(struct MigrationSrcPageRequest));
1206     new_entry->rb = ramblock;
1207     new_entry->offset = start;
1208     new_entry->len = len;
1209
1210     memory_region_ref(ramblock->mr);
1211     qemu_mutex_lock(&ms->src_page_req_mutex);
1212     QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1213     qemu_mutex_unlock(&ms->src_page_req_mutex);
1214     rcu_read_unlock();
1215
1216     return 0;
1217
1218 err:
1219     rcu_read_unlock();
1220     return -1;
1221 }
1222
1223 /**
1224  * ram_save_target_page: Save one target page
1225  *
1226  *
1227  * @f: QEMUFile where to send the data
1228  * @block: pointer to block that contains the page we want to send
1229  * @offset: offset inside the block for the page;
1230  * @last_stage: if we are at the completion stage
1231  * @bytes_transferred: increase it with the number of transferred bytes
1232  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1233  *
1234  * Returns: Number of pages written.
1235  */
1236 static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1237                                 PageSearchStatus *pss,
1238                                 bool last_stage,
1239                                 uint64_t *bytes_transferred,
1240                                 ram_addr_t dirty_ram_abs)
1241 {
1242     int res = 0;
1243
1244     /* Check the pages is dirty and if it is send it */
1245     if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1246         unsigned long *unsentmap;
1247         if (compression_switch && migrate_use_compression()) {
1248             res = ram_save_compressed_page(f, pss,
1249                                            last_stage,
1250                                            bytes_transferred);
1251         } else {
1252             res = ram_save_page(f, pss, last_stage,
1253                                 bytes_transferred);
1254         }
1255
1256         if (res < 0) {
1257             return res;
1258         }
1259         unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1260         if (unsentmap) {
1261             clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1262         }
1263         /* Only update last_sent_block if a block was actually sent; xbzrle
1264          * might have decided the page was identical so didn't bother writing
1265          * to the stream.
1266          */
1267         if (res > 0) {
1268             last_sent_block = pss->block;
1269         }
1270     }
1271
1272     return res;
1273 }
1274
1275 /**
1276  * ram_save_host_page: Starting at *offset send pages up to the end
1277  *                     of the current host page.  It's valid for the initial
1278  *                     offset to point into the middle of a host page
1279  *                     in which case the remainder of the hostpage is sent.
1280  *                     Only dirty target pages are sent.
1281  *
1282  * Returns: Number of pages written.
1283  *
1284  * @f: QEMUFile where to send the data
1285  * @block: pointer to block that contains the page we want to send
1286  * @offset: offset inside the block for the page; updated to last target page
1287  *          sent
1288  * @last_stage: if we are at the completion stage
1289  * @bytes_transferred: increase it with the number of transferred bytes
1290  * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1291  */
1292 static int ram_save_host_page(MigrationState *ms, QEMUFile *f,
1293                               PageSearchStatus *pss,
1294                               bool last_stage,
1295                               uint64_t *bytes_transferred,
1296                               ram_addr_t dirty_ram_abs)
1297 {
1298     int tmppages, pages = 0;
1299     do {
1300         tmppages = ram_save_target_page(ms, f, pss, last_stage,
1301                                         bytes_transferred, dirty_ram_abs);
1302         if (tmppages < 0) {
1303             return tmppages;
1304         }
1305
1306         pages += tmppages;
1307         pss->offset += TARGET_PAGE_SIZE;
1308         dirty_ram_abs += TARGET_PAGE_SIZE;
1309     } while (pss->offset & (qemu_host_page_size - 1));
1310
1311     /* The offset we leave with is the last one we looked at */
1312     pss->offset -= TARGET_PAGE_SIZE;
1313     return pages;
1314 }
1315
1316 /**
1317  * ram_find_and_save_block: Finds a dirty page and sends it to f
1318  *
1319  * Called within an RCU critical section.
1320  *
1321  * Returns:  The number of pages written
1322  *           0 means no dirty pages
1323  *
1324  * @f: QEMUFile where to send the data
1325  * @last_stage: if we are at the completion stage
1326  * @bytes_transferred: increase it with the number of transferred bytes
1327  *
1328  * On systems where host-page-size > target-page-size it will send all the
1329  * pages in a host page that are dirty.
1330  */
1331
1332 static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1333                                    uint64_t *bytes_transferred)
1334 {
1335     PageSearchStatus pss;
1336     MigrationState *ms = migrate_get_current();
1337     int pages = 0;
1338     bool again, found;
1339     ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1340                                  ram_addr_t space */
1341
1342     pss.block = last_seen_block;
1343     pss.offset = last_offset;
1344     pss.complete_round = false;
1345
1346     if (!pss.block) {
1347         pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1348     }
1349
1350     do {
1351         again = true;
1352         found = get_queued_page(ms, &pss, &dirty_ram_abs);
1353
1354         if (!found) {
1355             /* priority queue empty, so just search for something dirty */
1356             found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1357         }
1358
1359         if (found) {
1360             pages = ram_save_host_page(ms, f, &pss,
1361                                        last_stage, bytes_transferred,
1362                                        dirty_ram_abs);
1363         }
1364     } while (!pages && again);
1365
1366     last_seen_block = pss.block;
1367     last_offset = pss.offset;
1368
1369     return pages;
1370 }
1371
1372 void acct_update_position(QEMUFile *f, size_t size, bool zero)
1373 {
1374     uint64_t pages = size / TARGET_PAGE_SIZE;
1375     if (zero) {
1376         acct_info.dup_pages += pages;
1377     } else {
1378         acct_info.norm_pages += pages;
1379         bytes_transferred += size;
1380         qemu_update_position(f, size);
1381     }
1382 }
1383
1384 static ram_addr_t ram_save_remaining(void)
1385 {
1386     return migration_dirty_pages;
1387 }
1388
1389 uint64_t ram_bytes_remaining(void)
1390 {
1391     return ram_save_remaining() * TARGET_PAGE_SIZE;
1392 }
1393
1394 uint64_t ram_bytes_transferred(void)
1395 {
1396     return bytes_transferred;
1397 }
1398
1399 uint64_t ram_bytes_total(void)
1400 {
1401     RAMBlock *block;
1402     uint64_t total = 0;
1403
1404     rcu_read_lock();
1405     QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1406         total += block->used_length;
1407     rcu_read_unlock();
1408     return total;
1409 }
1410
1411 void free_xbzrle_decoded_buf(void)
1412 {
1413     g_free(xbzrle_decoded_buf);
1414     xbzrle_decoded_buf = NULL;
1415 }
1416
1417 static void migration_bitmap_free(struct BitmapRcu *bmap)
1418 {
1419     g_free(bmap->bmap);
1420     g_free(bmap->unsentmap);
1421     g_free(bmap);
1422 }
1423
1424 static void ram_migration_cleanup(void *opaque)
1425 {
1426     /* caller have hold iothread lock or is in a bh, so there is
1427      * no writing race against this migration_bitmap
1428      */
1429     struct BitmapRcu *bitmap = migration_bitmap_rcu;
1430     atomic_rcu_set(&migration_bitmap_rcu, NULL);
1431     if (bitmap) {
1432         memory_global_dirty_log_stop();
1433         call_rcu(bitmap, migration_bitmap_free, rcu);
1434     }
1435
1436     XBZRLE_cache_lock();
1437     if (XBZRLE.cache) {
1438         cache_fini(XBZRLE.cache);
1439         g_free(XBZRLE.encoded_buf);
1440         g_free(XBZRLE.current_buf);
1441         XBZRLE.cache = NULL;
1442         XBZRLE.encoded_buf = NULL;
1443         XBZRLE.current_buf = NULL;
1444     }
1445     XBZRLE_cache_unlock();
1446 }
1447
1448 static void reset_ram_globals(void)
1449 {
1450     last_seen_block = NULL;
1451     last_sent_block = NULL;
1452     last_offset = 0;
1453     last_version = ram_list.version;
1454     ram_bulk_stage = true;
1455 }
1456
1457 #define MAX_WAIT 50 /* ms, half buffered_file limit */
1458
1459 void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1460 {
1461     /* called in qemu main thread, so there is
1462      * no writing race against this migration_bitmap
1463      */
1464     if (migration_bitmap_rcu) {
1465         struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1466         bitmap = g_new(struct BitmapRcu, 1);
1467         bitmap->bmap = bitmap_new(new);
1468
1469         /* prevent migration_bitmap content from being set bit
1470          * by migration_bitmap_sync_range() at the same time.
1471          * it is safe to migration if migration_bitmap is cleared bit
1472          * at the same time.
1473          */
1474         qemu_mutex_lock(&migration_bitmap_mutex);
1475         bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1476         bitmap_set(bitmap->bmap, old, new - old);
1477
1478         /* We don't have a way to safely extend the sentmap
1479          * with RCU; so mark it as missing, entry to postcopy
1480          * will fail.
1481          */
1482         bitmap->unsentmap = NULL;
1483
1484         atomic_rcu_set(&migration_bitmap_rcu, bitmap);
1485         qemu_mutex_unlock(&migration_bitmap_mutex);
1486         migration_dirty_pages += new - old;
1487         call_rcu(old_bitmap, migration_bitmap_free, rcu);
1488     }
1489 }
1490
1491 /*
1492  * 'expected' is the value you expect the bitmap mostly to be full
1493  * of; it won't bother printing lines that are all this value.
1494  * If 'todump' is null the migration bitmap is dumped.
1495  */
1496 void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1497 {
1498     int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1499
1500     int64_t cur;
1501     int64_t linelen = 128;
1502     char linebuf[129];
1503
1504     if (!todump) {
1505         todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1506     }
1507
1508     for (cur = 0; cur < ram_pages; cur += linelen) {
1509         int64_t curb;
1510         bool found = false;
1511         /*
1512          * Last line; catch the case where the line length
1513          * is longer than remaining ram
1514          */
1515         if (cur + linelen > ram_pages) {
1516             linelen = ram_pages - cur;
1517         }
1518         for (curb = 0; curb < linelen; curb++) {
1519             bool thisbit = test_bit(cur + curb, todump);
1520             linebuf[curb] = thisbit ? '1' : '.';
1521             found = found || (thisbit != expected);
1522         }
1523         if (found) {
1524             linebuf[curb] = '\0';
1525             fprintf(stderr,  "0x%08" PRIx64 " : %s\n", cur, linebuf);
1526         }
1527     }
1528 }
1529
1530 /* **** functions for postcopy ***** */
1531
1532 /*
1533  * Callback from postcopy_each_ram_send_discard for each RAMBlock
1534  * Note: At this point the 'unsentmap' is the processed bitmap combined
1535  *       with the dirtymap; so a '1' means it's either dirty or unsent.
1536  * start,length: Indexes into the bitmap for the first bit
1537  *            representing the named block and length in target-pages
1538  */
1539 static int postcopy_send_discard_bm_ram(MigrationState *ms,
1540                                         PostcopyDiscardState *pds,
1541                                         unsigned long start,
1542                                         unsigned long length)
1543 {
1544     unsigned long end = start + length; /* one after the end */
1545     unsigned long current;
1546     unsigned long *unsentmap;
1547
1548     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1549     for (current = start; current < end; ) {
1550         unsigned long one = find_next_bit(unsentmap, end, current);
1551
1552         if (one <= end) {
1553             unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1554             unsigned long discard_length;
1555
1556             if (zero >= end) {
1557                 discard_length = end - one;
1558             } else {
1559                 discard_length = zero - one;
1560             }
1561             if (discard_length) {
1562                 postcopy_discard_send_range(ms, pds, one, discard_length);
1563             }
1564             current = one + discard_length;
1565         } else {
1566             current = one;
1567         }
1568     }
1569
1570     return 0;
1571 }
1572
1573 /*
1574  * Utility for the outgoing postcopy code.
1575  *   Calls postcopy_send_discard_bm_ram for each RAMBlock
1576  *   passing it bitmap indexes and name.
1577  * Returns: 0 on success
1578  * (qemu_ram_foreach_block ends up passing unscaled lengths
1579  *  which would mean postcopy code would have to deal with target page)
1580  */
1581 static int postcopy_each_ram_send_discard(MigrationState *ms)
1582 {
1583     struct RAMBlock *block;
1584     int ret;
1585
1586     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1587         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1588         PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1589                                                                first,
1590                                                                block->idstr);
1591
1592         /*
1593          * Postcopy sends chunks of bitmap over the wire, but it
1594          * just needs indexes at this point, avoids it having
1595          * target page specific code.
1596          */
1597         ret = postcopy_send_discard_bm_ram(ms, pds, first,
1598                                     block->used_length >> TARGET_PAGE_BITS);
1599         postcopy_discard_send_finish(ms, pds);
1600         if (ret) {
1601             return ret;
1602         }
1603     }
1604
1605     return 0;
1606 }
1607
1608 /*
1609  * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1610  *   the two bitmaps, that are similar, but one is inverted.
1611  *
1612  * We search for runs of target-pages that don't start or end on a
1613  * host page boundary;
1614  * unsent_pass=true: Cleans up partially unsent host pages by searching
1615  *                 the unsentmap
1616  * unsent_pass=false: Cleans up partially dirty host pages by searching
1617  *                 the main migration bitmap
1618  *
1619  */
1620 static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1621                                           RAMBlock *block,
1622                                           PostcopyDiscardState *pds)
1623 {
1624     unsigned long *bitmap;
1625     unsigned long *unsentmap;
1626     unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1627     unsigned long first = block->offset >> TARGET_PAGE_BITS;
1628     unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1629     unsigned long last = first + (len - 1);
1630     unsigned long run_start;
1631
1632     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1633     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1634
1635     if (unsent_pass) {
1636         /* Find a sent page */
1637         run_start = find_next_zero_bit(unsentmap, last + 1, first);
1638     } else {
1639         /* Find a dirty page */
1640         run_start = find_next_bit(bitmap, last + 1, first);
1641     }
1642
1643     while (run_start <= last) {
1644         bool do_fixup = false;
1645         unsigned long fixup_start_addr;
1646         unsigned long host_offset;
1647
1648         /*
1649          * If the start of this run of pages is in the middle of a host
1650          * page, then we need to fixup this host page.
1651          */
1652         host_offset = run_start % host_ratio;
1653         if (host_offset) {
1654             do_fixup = true;
1655             run_start -= host_offset;
1656             fixup_start_addr = run_start;
1657             /* For the next pass */
1658             run_start = run_start + host_ratio;
1659         } else {
1660             /* Find the end of this run */
1661             unsigned long run_end;
1662             if (unsent_pass) {
1663                 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1664             } else {
1665                 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1666             }
1667             /*
1668              * If the end isn't at the start of a host page, then the
1669              * run doesn't finish at the end of a host page
1670              * and we need to discard.
1671              */
1672             host_offset = run_end % host_ratio;
1673             if (host_offset) {
1674                 do_fixup = true;
1675                 fixup_start_addr = run_end - host_offset;
1676                 /*
1677                  * This host page has gone, the next loop iteration starts
1678                  * from after the fixup
1679                  */
1680                 run_start = fixup_start_addr + host_ratio;
1681             } else {
1682                 /*
1683                  * No discards on this iteration, next loop starts from
1684                  * next sent/dirty page
1685                  */
1686                 run_start = run_end + 1;
1687             }
1688         }
1689
1690         if (do_fixup) {
1691             unsigned long page;
1692
1693             /* Tell the destination to discard this page */
1694             if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1695                 /* For the unsent_pass we:
1696                  *     discard partially sent pages
1697                  * For the !unsent_pass (dirty) we:
1698                  *     discard partially dirty pages that were sent
1699                  *     (any partially sent pages were already discarded
1700                  *     by the previous unsent_pass)
1701                  */
1702                 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1703                                             host_ratio);
1704             }
1705
1706             /* Clean up the bitmap */
1707             for (page = fixup_start_addr;
1708                  page < fixup_start_addr + host_ratio; page++) {
1709                 /* All pages in this host page are now not sent */
1710                 set_bit(page, unsentmap);
1711
1712                 /*
1713                  * Remark them as dirty, updating the count for any pages
1714                  * that weren't previously dirty.
1715                  */
1716                 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1717             }
1718         }
1719
1720         if (unsent_pass) {
1721             /* Find the next sent page for the next iteration */
1722             run_start = find_next_zero_bit(unsentmap, last + 1,
1723                                            run_start);
1724         } else {
1725             /* Find the next dirty page for the next iteration */
1726             run_start = find_next_bit(bitmap, last + 1, run_start);
1727         }
1728     }
1729 }
1730
1731 /*
1732  * Utility for the outgoing postcopy code.
1733  *
1734  * Discard any partially sent host-page size chunks, mark any partially
1735  * dirty host-page size chunks as all dirty.
1736  *
1737  * Returns: 0 on success
1738  */
1739 static int postcopy_chunk_hostpages(MigrationState *ms)
1740 {
1741     struct RAMBlock *block;
1742
1743     if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1744         /* Easy case - TPS==HPS - nothing to be done */
1745         return 0;
1746     }
1747
1748     /* Easiest way to make sure we don't resume in the middle of a host-page */
1749     last_seen_block = NULL;
1750     last_sent_block = NULL;
1751     last_offset     = 0;
1752
1753     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1754         unsigned long first = block->offset >> TARGET_PAGE_BITS;
1755
1756         PostcopyDiscardState *pds =
1757                          postcopy_discard_send_init(ms, first, block->idstr);
1758
1759         /* First pass: Discard all partially sent host pages */
1760         postcopy_chunk_hostpages_pass(ms, true, block, pds);
1761         /*
1762          * Second pass: Ensure that all partially dirty host pages are made
1763          * fully dirty.
1764          */
1765         postcopy_chunk_hostpages_pass(ms, false, block, pds);
1766
1767         postcopy_discard_send_finish(ms, pds);
1768     } /* ram_list loop */
1769
1770     return 0;
1771 }
1772
1773 /*
1774  * Transmit the set of pages to be discarded after precopy to the target
1775  * these are pages that:
1776  *     a) Have been previously transmitted but are now dirty again
1777  *     b) Pages that have never been transmitted, this ensures that
1778  *        any pages on the destination that have been mapped by background
1779  *        tasks get discarded (transparent huge pages is the specific concern)
1780  * Hopefully this is pretty sparse
1781  */
1782 int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1783 {
1784     int ret;
1785     unsigned long *bitmap, *unsentmap;
1786
1787     rcu_read_lock();
1788
1789     /* This should be our last sync, the src is now paused */
1790     migration_bitmap_sync();
1791
1792     unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1793     if (!unsentmap) {
1794         /* We don't have a safe way to resize the sentmap, so
1795          * if the bitmap was resized it will be NULL at this
1796          * point.
1797          */
1798         error_report("migration ram resized during precopy phase");
1799         rcu_read_unlock();
1800         return -EINVAL;
1801     }
1802
1803     /* Deal with TPS != HPS */
1804     ret = postcopy_chunk_hostpages(ms);
1805     if (ret) {
1806         rcu_read_unlock();
1807         return ret;
1808     }
1809
1810     /*
1811      * Update the unsentmap to be unsentmap = unsentmap | dirty
1812      */
1813     bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1814     bitmap_or(unsentmap, unsentmap, bitmap,
1815                last_ram_offset() >> TARGET_PAGE_BITS);
1816
1817
1818     trace_ram_postcopy_send_discard_bitmap();
1819 #ifdef DEBUG_POSTCOPY
1820     ram_debug_dump_bitmap(unsentmap, true);
1821 #endif
1822
1823     ret = postcopy_each_ram_send_discard(ms);
1824     rcu_read_unlock();
1825
1826     return ret;
1827 }
1828
1829 /*
1830  * At the start of the postcopy phase of migration, any now-dirty
1831  * precopied pages are discarded.
1832  *
1833  * start, length describe a byte address range within the RAMBlock
1834  *
1835  * Returns 0 on success.
1836  */
1837 int ram_discard_range(MigrationIncomingState *mis,
1838                       const char *block_name,
1839                       uint64_t start, size_t length)
1840 {
1841     int ret = -1;
1842
1843     rcu_read_lock();
1844     RAMBlock *rb = qemu_ram_block_by_name(block_name);
1845
1846     if (!rb) {
1847         error_report("ram_discard_range: Failed to find block '%s'",
1848                      block_name);
1849         goto err;
1850     }
1851
1852     uint8_t *host_startaddr = rb->host + start;
1853
1854     if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1855         error_report("ram_discard_range: Unaligned start address: %p",
1856                      host_startaddr);
1857         goto err;
1858     }
1859
1860     if ((start + length) <= rb->used_length) {
1861         uint8_t *host_endaddr = host_startaddr + length;
1862         if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1863             error_report("ram_discard_range: Unaligned end address: %p",
1864                          host_endaddr);
1865             goto err;
1866         }
1867         ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1868     } else {
1869         error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1870                      "/%zx/" RAM_ADDR_FMT")",
1871                      block_name, start, length, rb->used_length);
1872     }
1873
1874 err:
1875     rcu_read_unlock();
1876
1877     return ret;
1878 }
1879
1880
1881 /* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1882  * long-running RCU critical section.  When rcu-reclaims in the code
1883  * start to become numerous it will be necessary to reduce the
1884  * granularity of these critical sections.
1885  */
1886
1887 static int ram_save_setup(QEMUFile *f, void *opaque)
1888 {
1889     RAMBlock *block;
1890     int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1891
1892     dirty_rate_high_cnt = 0;
1893     bitmap_sync_count = 0;
1894     migration_bitmap_sync_init();
1895     qemu_mutex_init(&migration_bitmap_mutex);
1896
1897     if (migrate_use_xbzrle()) {
1898         XBZRLE_cache_lock();
1899         XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1900                                   TARGET_PAGE_SIZE,
1901                                   TARGET_PAGE_SIZE);
1902         if (!XBZRLE.cache) {
1903             XBZRLE_cache_unlock();
1904             error_report("Error creating cache");
1905             return -1;
1906         }
1907         XBZRLE_cache_unlock();
1908
1909         /* We prefer not to abort if there is no memory */
1910         XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1911         if (!XBZRLE.encoded_buf) {
1912             error_report("Error allocating encoded_buf");
1913             return -1;
1914         }
1915
1916         XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1917         if (!XBZRLE.current_buf) {
1918             error_report("Error allocating current_buf");
1919             g_free(XBZRLE.encoded_buf);
1920             XBZRLE.encoded_buf = NULL;
1921             return -1;
1922         }
1923
1924         acct_clear();
1925     }
1926
1927     /* For memory_global_dirty_log_start below.  */
1928     qemu_mutex_lock_iothread();
1929
1930     qemu_mutex_lock_ramlist();
1931     rcu_read_lock();
1932     bytes_transferred = 0;
1933     reset_ram_globals();
1934
1935     ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1936     migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
1937     migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1938     bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
1939
1940     if (migrate_postcopy_ram()) {
1941         migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1942         bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1943     }
1944
1945     /*
1946      * Count the total number of pages used by ram blocks not including any
1947      * gaps due to alignment or unplugs.
1948      */
1949     migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1950
1951     memory_global_dirty_log_start();
1952     migration_bitmap_sync();
1953     qemu_mutex_unlock_ramlist();
1954     qemu_mutex_unlock_iothread();
1955
1956     qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1957
1958     QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1959         qemu_put_byte(f, strlen(block->idstr));
1960         qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1961         qemu_put_be64(f, block->used_length);
1962     }
1963
1964     rcu_read_unlock();
1965
1966     ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1967     ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1968
1969     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1970
1971     return 0;
1972 }
1973
1974 static int ram_save_iterate(QEMUFile *f, void *opaque)
1975 {
1976     int ret;
1977     int i;
1978     int64_t t0;
1979     int pages_sent = 0;
1980
1981     rcu_read_lock();
1982     if (ram_list.version != last_version) {
1983         reset_ram_globals();
1984     }
1985
1986     /* Read version before ram_list.blocks */
1987     smp_rmb();
1988
1989     ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1990
1991     t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1992     i = 0;
1993     while ((ret = qemu_file_rate_limit(f)) == 0) {
1994         int pages;
1995
1996         pages = ram_find_and_save_block(f, false, &bytes_transferred);
1997         /* no more pages to sent */
1998         if (pages == 0) {
1999             break;
2000         }
2001         pages_sent += pages;
2002         acct_info.iterations++;
2003
2004         /* we want to check in the 1st loop, just in case it was the 1st time
2005            and we had to sync the dirty bitmap.
2006            qemu_get_clock_ns() is a bit expensive, so we only check each some
2007            iterations
2008         */
2009         if ((i & 63) == 0) {
2010             uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2011             if (t1 > MAX_WAIT) {
2012                 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
2013                         t1, i);
2014                 break;
2015             }
2016         }
2017         i++;
2018     }
2019     flush_compressed_data(f);
2020     rcu_read_unlock();
2021
2022     /*
2023      * Must occur before EOS (or any QEMUFile operation)
2024      * because of RDMA protocol.
2025      */
2026     ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2027
2028     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2029     bytes_transferred += 8;
2030
2031     ret = qemu_file_get_error(f);
2032     if (ret < 0) {
2033         return ret;
2034     }
2035
2036     return pages_sent;
2037 }
2038
2039 /* Called with iothread lock */
2040 static int ram_save_complete(QEMUFile *f, void *opaque)
2041 {
2042     rcu_read_lock();
2043
2044     if (!migration_in_postcopy(migrate_get_current())) {
2045         migration_bitmap_sync();
2046     }
2047
2048     ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2049
2050     /* try transferring iterative blocks of memory */
2051
2052     /* flush all remaining blocks regardless of rate limiting */
2053     while (true) {
2054         int pages;
2055
2056         pages = ram_find_and_save_block(f, true, &bytes_transferred);
2057         /* no more blocks to sent */
2058         if (pages == 0) {
2059             break;
2060         }
2061     }
2062
2063     flush_compressed_data(f);
2064     ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2065
2066     rcu_read_unlock();
2067
2068     qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2069
2070     return 0;
2071 }
2072
2073 static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2074                              uint64_t *non_postcopiable_pending,
2075                              uint64_t *postcopiable_pending)
2076 {
2077     uint64_t remaining_size;
2078
2079     remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2080
2081     if (!migration_in_postcopy(migrate_get_current()) &&
2082         remaining_size < max_size) {
2083         qemu_mutex_lock_iothread();
2084         rcu_read_lock();
2085         migration_bitmap_sync();
2086         rcu_read_unlock();
2087         qemu_mutex_unlock_iothread();
2088         remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2089     }
2090
2091     /* We can do postcopy, and all the data is postcopiable */
2092     *postcopiable_pending += remaining_size;
2093 }
2094
2095 static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2096 {
2097     unsigned int xh_len;
2098     int xh_flags;
2099     uint8_t *loaded_data;
2100
2101     if (!xbzrle_decoded_buf) {
2102         xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2103     }
2104     loaded_data = xbzrle_decoded_buf;
2105
2106     /* extract RLE header */
2107     xh_flags = qemu_get_byte(f);
2108     xh_len = qemu_get_be16(f);
2109
2110     if (xh_flags != ENCODING_FLAG_XBZRLE) {
2111         error_report("Failed to load XBZRLE page - wrong compression!");
2112         return -1;
2113     }
2114
2115     if (xh_len > TARGET_PAGE_SIZE) {
2116         error_report("Failed to load XBZRLE page - len overflow!");
2117         return -1;
2118     }
2119     /* load data and decode */
2120     qemu_get_buffer_in_place(f, &loaded_data, xh_len);
2121
2122     /* decode RLE */
2123     if (xbzrle_decode_buffer(loaded_data, xh_len, host,
2124                              TARGET_PAGE_SIZE) == -1) {
2125         error_report("Failed to load XBZRLE page - decode error!");
2126         return -1;
2127     }
2128
2129     return 0;
2130 }
2131
2132 /* Must be called from within a rcu critical section.
2133  * Returns a pointer from within the RCU-protected ram_list.
2134  */
2135 /*
2136  * Read a RAMBlock ID from the stream f.
2137  *
2138  * f: Stream to read from
2139  * flags: Page flags (mostly to see if it's a continuation of previous block)
2140  */
2141 static inline RAMBlock *ram_block_from_stream(QEMUFile *f,
2142                                               int flags)
2143 {
2144     static RAMBlock *block = NULL;
2145     char id[256];
2146     uint8_t len;
2147
2148     if (flags & RAM_SAVE_FLAG_CONTINUE) {
2149         if (!block) {
2150             error_report("Ack, bad migration stream!");
2151             return NULL;
2152         }
2153         return block;
2154     }
2155
2156     len = qemu_get_byte(f);
2157     qemu_get_buffer(f, (uint8_t *)id, len);
2158     id[len] = 0;
2159
2160     block = qemu_ram_block_by_name(id);
2161     if (!block) {
2162         error_report("Can't find block %s", id);
2163         return NULL;
2164     }
2165
2166     return block;
2167 }
2168
2169 static inline void *host_from_ram_block_offset(RAMBlock *block,
2170                                                ram_addr_t offset)
2171 {
2172     if (!offset_in_ramblock(block, offset)) {
2173         return NULL;
2174     }
2175
2176     return block->host + offset;
2177 }
2178
2179 /*
2180  * If a page (or a whole RDMA chunk) has been
2181  * determined to be zero, then zap it.
2182  */
2183 void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2184 {
2185     if (ch != 0 || !is_zero_range(host, size)) {
2186         memset(host, ch, size);
2187     }
2188 }
2189
2190 static void *do_data_decompress(void *opaque)
2191 {
2192     DecompressParam *param = opaque;
2193     unsigned long pagesize;
2194
2195     while (!quit_decomp_thread) {
2196         qemu_mutex_lock(&param->mutex);
2197         while (!param->start && !quit_decomp_thread) {
2198             qemu_cond_wait(&param->cond, &param->mutex);
2199             pagesize = TARGET_PAGE_SIZE;
2200             if (!quit_decomp_thread) {
2201                 /* uncompress() will return failed in some case, especially
2202                  * when the page is dirted when doing the compression, it's
2203                  * not a problem because the dirty page will be retransferred
2204                  * and uncompress() won't break the data in other pages.
2205                  */
2206                 uncompress((Bytef *)param->des, &pagesize,
2207                            (const Bytef *)param->compbuf, param->len);
2208             }
2209             param->start = false;
2210         }
2211         qemu_mutex_unlock(&param->mutex);
2212     }
2213
2214     return NULL;
2215 }
2216
2217 void migrate_decompress_threads_create(void)
2218 {
2219     int i, thread_count;
2220
2221     thread_count = migrate_decompress_threads();
2222     decompress_threads = g_new0(QemuThread, thread_count);
2223     decomp_param = g_new0(DecompressParam, thread_count);
2224     quit_decomp_thread = false;
2225     for (i = 0; i < thread_count; i++) {
2226         qemu_mutex_init(&decomp_param[i].mutex);
2227         qemu_cond_init(&decomp_param[i].cond);
2228         decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2229         qemu_thread_create(decompress_threads + i, "decompress",
2230                            do_data_decompress, decomp_param + i,
2231                            QEMU_THREAD_JOINABLE);
2232     }
2233 }
2234
2235 void migrate_decompress_threads_join(void)
2236 {
2237     int i, thread_count;
2238
2239     quit_decomp_thread = true;
2240     thread_count = migrate_decompress_threads();
2241     for (i = 0; i < thread_count; i++) {
2242         qemu_mutex_lock(&decomp_param[i].mutex);
2243         qemu_cond_signal(&decomp_param[i].cond);
2244         qemu_mutex_unlock(&decomp_param[i].mutex);
2245     }
2246     for (i = 0; i < thread_count; i++) {
2247         qemu_thread_join(decompress_threads + i);
2248         qemu_mutex_destroy(&decomp_param[i].mutex);
2249         qemu_cond_destroy(&decomp_param[i].cond);
2250         g_free(decomp_param[i].compbuf);
2251     }
2252     g_free(decompress_threads);
2253     g_free(decomp_param);
2254     decompress_threads = NULL;
2255     decomp_param = NULL;
2256 }
2257
2258 static void decompress_data_with_multi_threads(QEMUFile *f,
2259                                                void *host, int len)
2260 {
2261     int idx, thread_count;
2262
2263     thread_count = migrate_decompress_threads();
2264     while (true) {
2265         for (idx = 0; idx < thread_count; idx++) {
2266             if (!decomp_param[idx].start) {
2267                 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
2268                 decomp_param[idx].des = host;
2269                 decomp_param[idx].len = len;
2270                 start_decompression(&decomp_param[idx]);
2271                 break;
2272             }
2273         }
2274         if (idx < thread_count) {
2275             break;
2276         }
2277     }
2278 }
2279
2280 /*
2281  * Allocate data structures etc needed by incoming migration with postcopy-ram
2282  * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2283  */
2284 int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2285 {
2286     size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2287
2288     return postcopy_ram_incoming_init(mis, ram_pages);
2289 }
2290
2291 /*
2292  * Called in postcopy mode by ram_load().
2293  * rcu_read_lock is taken prior to this being called.
2294  */
2295 static int ram_load_postcopy(QEMUFile *f)
2296 {
2297     int flags = 0, ret = 0;
2298     bool place_needed = false;
2299     bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2300     MigrationIncomingState *mis = migration_incoming_get_current();
2301     /* Temporary page that is later 'placed' */
2302     void *postcopy_host_page = postcopy_get_tmp_page(mis);
2303     void *last_host = NULL;
2304     bool all_zero = false;
2305
2306     while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2307         ram_addr_t addr;
2308         void *host = NULL;
2309         void *page_buffer = NULL;
2310         void *place_source = NULL;
2311         uint8_t ch;
2312
2313         addr = qemu_get_be64(f);
2314         flags = addr & ~TARGET_PAGE_MASK;
2315         addr &= TARGET_PAGE_MASK;
2316
2317         trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2318         place_needed = false;
2319         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2320             RAMBlock *block = ram_block_from_stream(f, flags);
2321
2322             host = host_from_ram_block_offset(block, addr);
2323             if (!host) {
2324                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2325                 ret = -EINVAL;
2326                 break;
2327             }
2328             page_buffer = host;
2329             /*
2330              * Postcopy requires that we place whole host pages atomically.
2331              * To make it atomic, the data is read into a temporary page
2332              * that's moved into place later.
2333              * The migration protocol uses,  possibly smaller, target-pages
2334              * however the source ensures it always sends all the components
2335              * of a host page in order.
2336              */
2337             page_buffer = postcopy_host_page +
2338                           ((uintptr_t)host & ~qemu_host_page_mask);
2339             /* If all TP are zero then we can optimise the place */
2340             if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2341                 all_zero = true;
2342             } else {
2343                 /* not the 1st TP within the HP */
2344                 if (host != (last_host + TARGET_PAGE_SIZE)) {
2345                     error_report("Non-sequential target page %p/%p",
2346                                   host, last_host);
2347                     ret = -EINVAL;
2348                     break;
2349                 }
2350             }
2351
2352
2353             /*
2354              * If it's the last part of a host page then we place the host
2355              * page
2356              */
2357             place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2358                                      ~qemu_host_page_mask) == 0;
2359             place_source = postcopy_host_page;
2360         }
2361         last_host = host;
2362
2363         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2364         case RAM_SAVE_FLAG_COMPRESS:
2365             ch = qemu_get_byte(f);
2366             memset(page_buffer, ch, TARGET_PAGE_SIZE);
2367             if (ch) {
2368                 all_zero = false;
2369             }
2370             break;
2371
2372         case RAM_SAVE_FLAG_PAGE:
2373             all_zero = false;
2374             if (!place_needed || !matching_page_sizes) {
2375                 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2376             } else {
2377                 /* Avoids the qemu_file copy during postcopy, which is
2378                  * going to do a copy later; can only do it when we
2379                  * do this read in one go (matching page sizes)
2380                  */
2381                 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2382                                          TARGET_PAGE_SIZE);
2383             }
2384             break;
2385         case RAM_SAVE_FLAG_EOS:
2386             /* normal exit */
2387             break;
2388         default:
2389             error_report("Unknown combination of migration flags: %#x"
2390                          " (postcopy mode)", flags);
2391             ret = -EINVAL;
2392         }
2393
2394         if (place_needed) {
2395             /* This gets called at the last target page in the host page */
2396             if (all_zero) {
2397                 ret = postcopy_place_page_zero(mis,
2398                                                host + TARGET_PAGE_SIZE -
2399                                                qemu_host_page_size);
2400             } else {
2401                 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2402                                                qemu_host_page_size,
2403                                                place_source);
2404             }
2405         }
2406         if (!ret) {
2407             ret = qemu_file_get_error(f);
2408         }
2409     }
2410
2411     return ret;
2412 }
2413
2414 static int ram_load(QEMUFile *f, void *opaque, int version_id)
2415 {
2416     int flags = 0, ret = 0;
2417     static uint64_t seq_iter;
2418     int len = 0;
2419     /*
2420      * If system is running in postcopy mode, page inserts to host memory must
2421      * be atomic
2422      */
2423     bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2424
2425     seq_iter++;
2426
2427     if (version_id != 4) {
2428         ret = -EINVAL;
2429     }
2430
2431     /* This RCU critical section can be very long running.
2432      * When RCU reclaims in the code start to become numerous,
2433      * it will be necessary to reduce the granularity of this
2434      * critical section.
2435      */
2436     rcu_read_lock();
2437
2438     if (postcopy_running) {
2439         ret = ram_load_postcopy(f);
2440     }
2441
2442     while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2443         ram_addr_t addr, total_ram_bytes;
2444         void *host = NULL;
2445         uint8_t ch;
2446
2447         addr = qemu_get_be64(f);
2448         flags = addr & ~TARGET_PAGE_MASK;
2449         addr &= TARGET_PAGE_MASK;
2450
2451         if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2452                      RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2453             RAMBlock *block = ram_block_from_stream(f, flags);
2454
2455             host = host_from_ram_block_offset(block, addr);
2456             if (!host) {
2457                 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2458                 ret = -EINVAL;
2459                 break;
2460             }
2461         }
2462
2463         switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2464         case RAM_SAVE_FLAG_MEM_SIZE:
2465             /* Synchronize RAM block list */
2466             total_ram_bytes = addr;
2467             while (!ret && total_ram_bytes) {
2468                 RAMBlock *block;
2469                 char id[256];
2470                 ram_addr_t length;
2471
2472                 len = qemu_get_byte(f);
2473                 qemu_get_buffer(f, (uint8_t *)id, len);
2474                 id[len] = 0;
2475                 length = qemu_get_be64(f);
2476
2477                 block = qemu_ram_block_by_name(id);
2478                 if (block) {
2479                     if (length != block->used_length) {
2480                         Error *local_err = NULL;
2481
2482                         ret = qemu_ram_resize(block, length,
2483                                               &local_err);
2484                         if (local_err) {
2485                             error_report_err(local_err);
2486                         }
2487                     }
2488                     ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2489                                           block->idstr);
2490                 } else {
2491                     error_report("Unknown ramblock \"%s\", cannot "
2492                                  "accept migration", id);
2493                     ret = -EINVAL;
2494                 }
2495
2496                 total_ram_bytes -= length;
2497             }
2498             break;
2499
2500         case RAM_SAVE_FLAG_COMPRESS:
2501             ch = qemu_get_byte(f);
2502             ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2503             break;
2504
2505         case RAM_SAVE_FLAG_PAGE:
2506             qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2507             break;
2508
2509         case RAM_SAVE_FLAG_COMPRESS_PAGE:
2510             len = qemu_get_be32(f);
2511             if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2512                 error_report("Invalid compressed data length: %d", len);
2513                 ret = -EINVAL;
2514                 break;
2515             }
2516             decompress_data_with_multi_threads(f, host, len);
2517             break;
2518
2519         case RAM_SAVE_FLAG_XBZRLE:
2520             if (load_xbzrle(f, addr, host) < 0) {
2521                 error_report("Failed to decompress XBZRLE page at "
2522                              RAM_ADDR_FMT, addr);
2523                 ret = -EINVAL;
2524                 break;
2525             }
2526             break;
2527         case RAM_SAVE_FLAG_EOS:
2528             /* normal exit */
2529             break;
2530         default:
2531             if (flags & RAM_SAVE_FLAG_HOOK) {
2532                 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2533             } else {
2534                 error_report("Unknown combination of migration flags: %#x",
2535                              flags);
2536                 ret = -EINVAL;
2537             }
2538         }
2539         if (!ret) {
2540             ret = qemu_file_get_error(f);
2541         }
2542     }
2543
2544     rcu_read_unlock();
2545     DPRINTF("Completed load of VM with exit code %d seq iteration "
2546             "%" PRIu64 "\n", ret, seq_iter);
2547     return ret;
2548 }
2549
2550 static SaveVMHandlers savevm_ram_handlers = {
2551     .save_live_setup = ram_save_setup,
2552     .save_live_iterate = ram_save_iterate,
2553     .save_live_complete_postcopy = ram_save_complete,
2554     .save_live_complete_precopy = ram_save_complete,
2555     .save_live_pending = ram_save_pending,
2556     .load_state = ram_load,
2557     .cleanup = ram_migration_cleanup,
2558 };
2559
2560 void ram_mig_init(void)
2561 {
2562     qemu_mutex_init(&XBZRLE.lock);
2563     register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2564 }
This page took 0.16311 seconds and 2 git commands to generate.